if_myx.c revision 1.77
1/*	$OpenBSD: if_myx.c,v 1.77 2015/05/17 02:33:09 chris Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/pool.h>
33#include <sys/timeout.h>
34#include <sys/device.h>
35#include <sys/queue.h>
36#include <sys/atomic.h>
37
38#include <machine/bus.h>
39#include <machine/intr.h>
40
41#include <net/if.h>
42#include <net/if_dl.h>
43#include <net/if_media.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#include <netinet/in.h>
50#include <netinet/if_ether.h>
51
52#include <dev/pci/pcireg.h>
53#include <dev/pci/pcivar.h>
54#include <dev/pci/pcidevs.h>
55
56#include <dev/pci/if_myxreg.h>
57
58#ifdef MYX_DEBUG
59#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
60#define MYXDBG_CMD	(2<<0)	/* commands */
61#define MYXDBG_INTR	(3<<0)	/* interrupts */
62#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
63int myx_debug = MYXDBG_ALL;
64#define DPRINTF(_lvl, _arg...)	do {					\
65	if (myx_debug & (_lvl))						\
66		printf(_arg);						\
67} while (0)
68#else
69#define DPRINTF(_lvl, arg...)
70#endif
71
72#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
73
74struct myx_dmamem {
75	bus_dmamap_t		 mxm_map;
76	bus_dma_segment_t	 mxm_seg;
77	int			 mxm_nsegs;
78	size_t			 mxm_size;
79	caddr_t			 mxm_kva;
80};
81
82struct myx_buf {
83	SIMPLEQ_ENTRY(myx_buf)	 mb_entry;
84	bus_dmamap_t		 mb_map;
85	struct mbuf		*mb_m;
86};
87
88struct myx_buf_list {
89	SIMPLEQ_HEAD(, myx_buf)	mbl_q;
90	struct mutex		mbl_mtx;
91};
92
93struct pool *myx_buf_pool;
94struct pool *myx_mcl_pool;
95
96struct myx_ring_lock {
97	struct mutex		mrl_mtx;
98	u_int			mrl_running;
99};
100
101enum myx_state {
102	MYX_S_OFF = 0,
103	MYX_S_RUNNING,
104	MYX_S_DOWN
105};
106
107struct myx_softc {
108	struct device		 sc_dev;
109	struct arpcom		 sc_ac;
110
111	pci_chipset_tag_t	 sc_pc;
112	pci_intr_handle_t	 sc_ih;
113	pcitag_t		 sc_tag;
114
115	bus_dma_tag_t		 sc_dmat;
116	bus_space_tag_t		 sc_memt;
117	bus_space_handle_t	 sc_memh;
118	bus_size_t		 sc_mems;
119
120	struct myx_dmamem	 sc_zerodma;
121	struct myx_dmamem	 sc_cmddma;
122	struct myx_dmamem	 sc_paddma;
123
124	struct myx_dmamem	 sc_sts_dma;
125	volatile struct myx_status	*sc_sts;
126	struct mutex		 sc_sts_mtx;
127
128	int			 sc_intx;
129	void			*sc_irqh;
130	u_int32_t		 sc_irqcoaloff;
131	u_int32_t		 sc_irqclaimoff;
132	u_int32_t		 sc_irqdeassertoff;
133
134	struct myx_dmamem	 sc_intrq_dma;
135	struct myx_intrq_desc	*sc_intrq;
136	u_int			 sc_intrq_count;
137	u_int			 sc_intrq_idx;
138
139	u_int			 sc_rx_ring_count;
140	struct myx_ring_lock	 sc_rx_ring_lock[2];
141	u_int32_t		 sc_rx_ring_offset[2];
142	struct myx_buf_list	 sc_rx_buf_free[2];
143	struct myx_buf_list	 sc_rx_buf_list[2];
144	u_int			 sc_rx_ring_idx[2];
145	struct if_rxring	 sc_rx_ring[2];
146#define  MYX_RXSMALL		 0
147#define  MYX_RXBIG		 1
148	struct timeout		 sc_refill;
149
150	bus_size_t		 sc_tx_boundary;
151	u_int			 sc_tx_ring_count;
152	struct myx_ring_lock	 sc_tx_ring_lock;
153	u_int32_t		 sc_tx_ring_offset;
154	u_int			 sc_tx_nsegs;
155	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
156	u_int			 sc_tx_free;
157	struct myx_buf_list	 sc_tx_buf_free;
158	struct myx_buf_list	 sc_tx_buf_list;
159	u_int			 sc_tx_ring_idx;
160
161	struct ifmedia		 sc_media;
162
163	volatile enum myx_state	 sc_state;
164	volatile u_int8_t	 sc_linkdown;
165};
166
167#define MYX_RXSMALL_SIZE	MCLBYTES
168#define MYX_RXBIG_SIZE		(9 * 1024)
169
170int	 myx_match(struct device *, void *, void *);
171void	 myx_attach(struct device *, struct device *, void *);
172int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
173int	 myx_query(struct myx_softc *sc, char *, size_t);
174u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
175void	 myx_attachhook(void *);
176int	 myx_loadfirmware(struct myx_softc *, const char *);
177int	 myx_probe_firmware(struct myx_softc *);
178
179void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
180void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
181
182#if defined(__LP64__)
183#define myx_bus_space_write bus_space_write_raw_region_8
184typedef u_int64_t myx_bus_t;
185#else
186#define myx_bus_space_write bus_space_write_raw_region_4
187typedef u_int32_t myx_bus_t;
188#endif
189
190int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
191int	 myx_boot(struct myx_softc *, u_int32_t);
192
193int	 myx_rdma(struct myx_softc *, u_int);
194int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
195	    bus_size_t, u_int align);
196void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
197int	 myx_media_change(struct ifnet *);
198void	 myx_media_status(struct ifnet *, struct ifmediareq *);
199void	 myx_link_state(struct myx_softc *, u_int32_t);
200void	 myx_watchdog(struct ifnet *);
201int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
202int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
203void	 myx_up(struct myx_softc *);
204void	 myx_iff(struct myx_softc *);
205void	 myx_down(struct myx_softc *);
206
207void	 myx_start(struct ifnet *);
208void	 myx_write_txd_tail(struct myx_softc *, struct myx_buf *, u_int8_t,
209	    u_int32_t, u_int);
210int	 myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *);
211int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
212int	 myx_intr(void *);
213void	 myx_rxeof(struct myx_softc *);
214void	 myx_txeof(struct myx_softc *, u_int32_t);
215
216struct myx_buf *	myx_buf_alloc(struct myx_softc *, bus_size_t, int,
217			    bus_size_t, bus_size_t);
218void			myx_buf_free(struct myx_softc *, struct myx_buf *);
219void			myx_bufs_init(struct myx_buf_list *);
220int			myx_bufs_empty(struct myx_buf_list *);
221struct myx_buf *	myx_buf_get(struct myx_buf_list *);
222void			myx_buf_put(struct myx_buf_list *, struct myx_buf *);
223struct myx_buf *	myx_buf_fill(struct myx_softc *, int);
224struct mbuf *		myx_mcl_small(void);
225struct mbuf *		myx_mcl_big(void);
226
227void			myx_rx_zero(struct myx_softc *, int);
228int			myx_rx_fill(struct myx_softc *, int);
229void			myx_refill(void *);
230
231void			myx_ring_lock_init(struct myx_ring_lock *);
232int			myx_ring_enter(struct myx_ring_lock *);
233int			myx_ring_leave(struct myx_ring_lock *);
234
235static inline void
236myx_sts_enter(struct myx_softc *sc)
237{
238	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
239
240	mtx_enter(&sc->sc_sts_mtx);
241	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
242	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
243}
244
245static inline void
246myx_sts_leave(struct myx_softc *sc)
247{
248	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
249
250	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
251	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
252	mtx_leave(&sc->sc_sts_mtx);
253}
254
255struct cfdriver myx_cd = {
256	NULL, "myx", DV_IFNET
257};
258struct cfattach myx_ca = {
259	sizeof(struct myx_softc), myx_match, myx_attach
260};
261
262const struct pci_matchid myx_devices[] = {
263	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
264	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
265};
266
267int
268myx_match(struct device *parent, void *match, void *aux)
269{
270	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
271}
272
273void
274myx_attach(struct device *parent, struct device *self, void *aux)
275{
276	struct myx_softc	*sc = (struct myx_softc *)self;
277	struct pci_attach_args	*pa = aux;
278	char			 part[32];
279	pcireg_t		 memtype;
280
281	sc->sc_pc = pa->pa_pc;
282	sc->sc_tag = pa->pa_tag;
283	sc->sc_dmat = pa->pa_dmat;
284
285	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXSMALL]);
286	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXSMALL]);
287	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXSMALL]);
288	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXBIG]);
289	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXBIG]);
290	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXBIG]);
291
292	myx_ring_lock_init(&sc->sc_tx_ring_lock);
293	myx_bufs_init(&sc->sc_tx_buf_free);
294	myx_bufs_init(&sc->sc_tx_buf_list);
295
296	timeout_set(&sc->sc_refill, myx_refill, sc);
297
298	mtx_init(&sc->sc_sts_mtx, IPL_NET);
299
300
301	/* Map the PCI memory space */
302	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
303	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
304	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
305		printf(": unable to map register memory\n");
306		return;
307	}
308
309	/* Get board details (mac/part) */
310	memset(part, 0, sizeof(part));
311	if (myx_query(sc, part, sizeof(part)) != 0)
312		goto unmap;
313
314	/* Map the interrupt */
315	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
316		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
317			printf(": unable to map interrupt\n");
318			goto unmap;
319		}
320		sc->sc_intx = 1;
321	}
322
323	printf(": %s, model %s, address %s\n",
324	    pci_intr_string(pa->pa_pc, sc->sc_ih),
325	    part[0] == '\0' ? "(unknown)" : part,
326	    ether_sprintf(sc->sc_ac.ac_enaddr));
327
328	/* this is sort of racy */
329	if (myx_buf_pool == NULL) {
330		extern struct kmem_pa_mode kp_dma_contig;
331
332		myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF,
333		    M_WAITOK);
334		if (myx_buf_pool == NULL) {
335			printf("%s: unable to allocate buf pool\n",
336			    DEVNAME(sc));
337			goto unmap;
338		}
339		pool_init(myx_buf_pool, sizeof(struct myx_buf),
340		    0, 0, 0, "myxbufs", &pool_allocator_nointr);
341		pool_setipl(myx_buf_pool, IPL_NONE);
342
343		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
344		    M_WAITOK);
345		if (myx_mcl_pool == NULL) {
346			printf("%s: unable to allocate mcl pool\n",
347			    DEVNAME(sc));
348			goto unmap;
349		}
350		pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, 0,
351		    0, "myxmcl", NULL);
352		pool_setipl(myx_mcl_pool, IPL_NET);
353		pool_set_constraints(myx_mcl_pool, &kp_dma_contig);
354	}
355
356	if (myx_pcie_dc(sc, pa) != 0)
357		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
358
359	if (mountroothook_establish(myx_attachhook, sc) == NULL) {
360		printf("%s: unable to establish mountroot hook\n", DEVNAME(sc));
361		goto unmap;
362	}
363
364	return;
365
366 unmap:
367	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
368	sc->sc_mems = 0;
369}
370
371int
372myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
373{
374	pcireg_t dcsr;
375	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
376	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
377	int reg;
378
379	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
380	    &reg, NULL) == 0)
381		return (-1);
382
383	reg += PCI_PCIE_DCSR;
384	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
385	if ((dcsr & mask) != dc) {
386		CLR(dcsr, mask);
387		SET(dcsr, dc);
388		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
389	}
390
391	return (0);
392}
393
394u_int
395myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
396{
397	u_int		i, j;
398	u_int8_t	digit;
399
400	memset(lladdr, 0, ETHER_ADDR_LEN);
401	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
402		if (mac[i] >= '0' && mac[i] <= '9')
403			digit = mac[i] - '0';
404		else if (mac[i] >= 'A' && mac[i] <= 'F')
405			digit = mac[i] - 'A' + 10;
406		else if (mac[i] >= 'a' && mac[i] <= 'f')
407			digit = mac[i] - 'a' + 10;
408		else
409			continue;
410		if ((j & 1) == 0)
411			digit <<= 4;
412		lladdr[j++/2] |= digit;
413	}
414
415	return (i);
416}
417
418int
419myx_query(struct myx_softc *sc, char *part, size_t partlen)
420{
421	struct myx_gen_hdr hdr;
422	u_int32_t	offset;
423	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
424	u_int		i, len, maxlen;
425
426	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
427	offset = betoh32(offset);
428	if (offset + sizeof(hdr) > sc->sc_mems) {
429		printf(": header is outside register window\n");
430		return (1);
431	}
432
433	myx_read(sc, offset, &hdr, sizeof(hdr));
434	offset = betoh32(hdr.fw_specs);
435	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
436
437	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
438
439	for (i = 0; i < len; i++) {
440		maxlen = len - i;
441		if (strings[i] == '\0')
442			break;
443		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
444			i += 4;
445			i += myx_ether_aton(&strings[i],
446			    sc->sc_ac.ac_enaddr, maxlen);
447		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
448			i += 3;
449			i += strlcpy(part, &strings[i], min(maxlen, partlen));
450		}
451		for (; i < len; i++) {
452			if (strings[i] == '\0')
453				break;
454		}
455	}
456
457	return (0);
458}
459
460int
461myx_loadfirmware(struct myx_softc *sc, const char *filename)
462{
463	struct myx_gen_hdr	hdr;
464	u_int8_t		*fw;
465	size_t			fwlen;
466	u_int32_t		offset;
467	u_int			i, ret = 1;
468
469	if (loadfirmware(filename, &fw, &fwlen) != 0) {
470		printf("%s: could not load firmware %s\n", DEVNAME(sc),
471		    filename);
472		return (1);
473	}
474	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
475		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
476		goto err;
477	}
478
479	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
480	offset = betoh32(offset);
481	if ((offset + sizeof(hdr)) > fwlen) {
482		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
483		goto err;
484	}
485
486	memcpy(&hdr, fw + offset, sizeof(hdr));
487	DPRINTF(MYXDBG_INIT, "%s: "
488	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
489	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
490	    betoh32(hdr.fw_type), hdr.fw_version);
491
492	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
493	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
494		printf("%s: invalid firmware type 0x%x version %s\n",
495		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
496		goto err;
497	}
498
499	/* Write the firmware to the card's SRAM */
500	for (i = 0; i < fwlen; i += 256)
501		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
502
503	if (myx_boot(sc, fwlen) != 0) {
504		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
505		goto err;
506	}
507
508	ret = 0;
509
510err:
511	free(fw, M_DEVBUF, 0);
512	return (ret);
513}
514
515void
516myx_attachhook(void *arg)
517{
518	struct myx_softc	*sc = (struct myx_softc *)arg;
519	struct ifnet		*ifp = &sc->sc_ac.ac_if;
520	struct myx_cmd		 mc;
521
522	/* Allocate command DMA memory */
523	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
524	    MYXALIGN_CMD) != 0) {
525		printf("%s: failed to allocate command DMA memory\n",
526		    DEVNAME(sc));
527		return;
528	}
529
530	/* Try the firmware stored on disk */
531	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
532		/* error printed by myx_loadfirmware */
533		goto freecmd;
534	}
535
536	memset(&mc, 0, sizeof(mc));
537
538	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
539		printf("%s: failed to reset the device\n", DEVNAME(sc));
540		goto freecmd;
541	}
542
543	sc->sc_tx_boundary = 4096;
544
545	if (myx_probe_firmware(sc) != 0) {
546		printf("%s: error while selecting firmware\n", DEVNAME(sc));
547		goto freecmd;
548	}
549
550	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
551	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
552	if (sc->sc_irqh == NULL) {
553		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
554		goto freecmd;
555	}
556
557	ifp->if_softc = sc;
558	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
559	ifp->if_ioctl = myx_ioctl;
560	ifp->if_start = myx_start;
561	ifp->if_watchdog = myx_watchdog;
562	ifp->if_hardmtu = 9000;
563	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
564	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
565	IFQ_SET_READY(&ifp->if_snd);
566
567	ifp->if_capabilities = IFCAP_VLAN_MTU;
568#if 0
569	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
570	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
571	    IFCAP_CSUM_UDPv4;
572#endif
573
574	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
575	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
576	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
577
578	if_attach(ifp);
579	ether_ifattach(ifp);
580
581	return;
582
583freecmd:
584	myx_dmamem_free(sc, &sc->sc_cmddma);
585}
586
587int
588myx_probe_firmware(struct myx_softc *sc)
589{
590	struct myx_dmamem test;
591	bus_dmamap_t map;
592	struct myx_cmd mc;
593	pcireg_t csr;
594	int offset;
595	int width = 0;
596
597	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
598	    &offset, NULL)) {
599		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
600		    offset + PCI_PCIE_LCSR);
601		width = (csr >> 20) & 0x3f;
602
603		if (width <= 4) {
604			/*
605			 * if the link width is 4 or less we can use the
606			 * aligned firmware.
607			 */
608			return (0);
609		}
610	}
611
612	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
613		return (1);
614	map = test.mxm_map;
615
616	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
617	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
618
619	memset(&mc, 0, sizeof(mc));
620	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
621	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
622	mc.mc_data2 = htobe32(4096 * 0x10000);
623	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
624		printf("%s: DMA read test failed\n", DEVNAME(sc));
625		goto fail;
626	}
627
628	memset(&mc, 0, sizeof(mc));
629	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
630	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
631	mc.mc_data2 = htobe32(4096 * 0x1);
632	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
633		printf("%s: DMA write test failed\n", DEVNAME(sc));
634		goto fail;
635	}
636
637	memset(&mc, 0, sizeof(mc));
638	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
639	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
640	mc.mc_data2 = htobe32(4096 * 0x10001);
641	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
642		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
643		goto fail;
644	}
645
646	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
647	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
648	myx_dmamem_free(sc, &test);
649	return (0);
650
651fail:
652	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
653	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
654	myx_dmamem_free(sc, &test);
655
656	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
657		printf("%s: unable to load %s\n", DEVNAME(sc),
658		    MYXFW_UNALIGNED);
659		return (1);
660	}
661
662	sc->sc_tx_boundary = 2048;
663
664	printf("%s: using unaligned firmware\n", DEVNAME(sc));
665	return (0);
666}
667
668void
669myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
670{
671	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
672	    BUS_SPACE_BARRIER_READ);
673	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
674}
675
676void
677myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
678{
679	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
680	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
681	    BUS_SPACE_BARRIER_WRITE);
682}
683
684int
685myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
686    bus_size_t size, u_int align)
687{
688	mxm->mxm_size = size;
689
690	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
691	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
692	    &mxm->mxm_map) != 0)
693		return (1);
694	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
695	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
696	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
697		goto destroy;
698	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
699	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
700		goto free;
701	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
702	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
703		goto unmap;
704
705	return (0);
706 unmap:
707	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
708 free:
709	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
710 destroy:
711	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
712	return (1);
713}
714
715void
716myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
717{
718	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
719	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
720	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
721	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
722}
723
724int
725myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
726{
727	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
728	struct myx_response	*mr;
729	u_int			 i;
730	u_int32_t		 result, data;
731#ifdef MYX_DEBUG
732	static const char *cmds[MYXCMD_MAX] = {
733		"CMD_NONE",
734		"CMD_RESET",
735		"CMD_GET_VERSION",
736		"CMD_SET_INTRQDMA",
737		"CMD_SET_BIGBUFSZ",
738		"CMD_SET_SMALLBUFSZ",
739		"CMD_GET_TXRINGOFF",
740		"CMD_GET_RXSMALLRINGOFF",
741		"CMD_GET_RXBIGRINGOFF",
742		"CMD_GET_INTRACKOFF",
743		"CMD_GET_INTRDEASSERTOFF",
744		"CMD_GET_TXRINGSZ",
745		"CMD_GET_RXRINGSZ",
746		"CMD_SET_INTRQSZ",
747		"CMD_SET_IFUP",
748		"CMD_SET_IFDOWN",
749		"CMD_SET_MTU",
750		"CMD_GET_INTRCOALDELAYOFF",
751		"CMD_SET_STATSINTVL",
752		"CMD_SET_STATSDMA_OLD",
753		"CMD_SET_PROMISC",
754		"CMD_UNSET_PROMISC",
755		"CMD_SET_LLADDR",
756		"CMD_SET_FC",
757		"CMD_UNSET_FC",
758		"CMD_DMA_TEST",
759		"CMD_SET_ALLMULTI",
760		"CMD_UNSET_ALLMULTI",
761		"CMD_SET_MCASTGROUP",
762		"CMD_UNSET_MCASTGROUP",
763		"CMD_UNSET_MCAST",
764		"CMD_SET_STATSDMA",
765		"CMD_UNALIGNED_DMA_TEST",
766		"CMD_GET_UNALIGNED_STATUS"
767	};
768#endif
769
770	mc->mc_cmd = htobe32(cmd);
771	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
772	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
773
774	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
775	mr->mr_result = 0xffffffff;
776
777	/* Send command */
778	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
779	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
780	    BUS_DMASYNC_PREREAD);
781
782	for (i = 0; i < 20; i++) {
783		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
784		    BUS_DMASYNC_POSTREAD);
785		result = betoh32(mr->mr_result);
786		data = betoh32(mr->mr_data);
787
788		if (result != 0xffffffff)
789			break;
790
791		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
792		    BUS_DMASYNC_PREREAD);
793		delay(1000);
794	}
795
796	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
797	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
798	    cmds[cmd], i, result, data, data);
799
800	if (result != 0)
801		return (-1);
802
803	if (r != NULL)
804		*r = data;
805	return (0);
806}
807
808int
809myx_boot(struct myx_softc *sc, u_int32_t length)
810{
811	struct myx_bootcmd	 bc;
812	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
813	u_int32_t		*status;
814	u_int			 i, ret = 1;
815
816	memset(&bc, 0, sizeof(bc));
817	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
818	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
819	bc.bc_result = 0xffffffff;
820	bc.bc_offset = htobe32(MYX_FW_BOOT);
821	bc.bc_length = htobe32(length - 8);
822	bc.bc_copyto = htobe32(8);
823	bc.bc_jumpto = htobe32(0);
824
825	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
826	*status = 0;
827
828	/* Send command */
829	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
830	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
831	    BUS_DMASYNC_PREREAD);
832
833	for (i = 0; i < 200; i++) {
834		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
835		    BUS_DMASYNC_POSTREAD);
836		if (*status == 0xffffffff) {
837			ret = 0;
838			break;
839		}
840
841		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
842		    BUS_DMASYNC_PREREAD);
843		delay(1000);
844	}
845
846	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
847	    DEVNAME(sc), i, ret);
848
849	return (ret);
850}
851
852int
853myx_rdma(struct myx_softc *sc, u_int do_enable)
854{
855	struct myx_rdmacmd	 rc;
856	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
857	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
858	u_int32_t		*status;
859	int			 ret = 1;
860	u_int			 i;
861
862	/*
863	 * It is required to setup a _dummy_ RDMA address. It also makes
864	 * some PCI-E chipsets resend dropped messages.
865	 */
866	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
867	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
868	rc.rc_result = 0xffffffff;
869	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
870	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
871	rc.rc_enable = htobe32(do_enable);
872
873	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
874	*status = 0;
875
876	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
877	    BUS_DMASYNC_PREREAD);
878
879	/* Send command */
880	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
881
882	for (i = 0; i < 20; i++) {
883		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
884		    BUS_DMASYNC_POSTREAD);
885
886		if (*status == 0xffffffff) {
887			ret = 0;
888			break;
889		}
890
891		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
892		    BUS_DMASYNC_PREREAD);
893		delay(1000);
894	}
895
896	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
897	    DEVNAME(sc), __func__,
898	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
899
900	return (ret);
901}
902
903int
904myx_media_change(struct ifnet *ifp)
905{
906	/* ignore */
907	return (0);
908}
909
910void
911myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
912{
913	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
914	u_int32_t		 sts;
915
916	imr->ifm_active = IFM_ETHER | IFM_AUTO;
917	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
918		imr->ifm_status = 0;
919		return;
920	}
921
922	myx_sts_enter(sc);
923	sts = sc->sc_sts->ms_linkstate;
924	myx_sts_leave(sc);
925
926	myx_link_state(sc, sts);
927
928	imr->ifm_status = IFM_AVALID;
929	if (!LINK_STATE_IS_UP(ifp->if_link_state))
930		return;
931
932	imr->ifm_active |= IFM_FDX | IFM_FLOW |
933	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
934	imr->ifm_status |= IFM_ACTIVE;
935}
936
937void
938myx_link_state(struct myx_softc *sc, u_int32_t sts)
939{
940	struct ifnet		*ifp = &sc->sc_ac.ac_if;
941	int			 link_state = LINK_STATE_DOWN;
942
943	if (betoh32(sts) == MYXSTS_LINKUP)
944		link_state = LINK_STATE_FULL_DUPLEX;
945	if (ifp->if_link_state != link_state) {
946		ifp->if_link_state = link_state;
947		if_link_state_change(ifp);
948		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
949		    IF_Gbps(10) : 0;
950	}
951}
952
953void
954myx_watchdog(struct ifnet *ifp)
955{
956	return;
957}
958
959int
960myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
961{
962	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
963	struct ifaddr		*ifa = (struct ifaddr *)data;
964	struct ifreq		*ifr = (struct ifreq *)data;
965	int			 s, error = 0;
966
967	s = splnet();
968
969	switch (cmd) {
970	case SIOCSIFADDR:
971		ifp->if_flags |= IFF_UP;
972		if (ifa->ifa_addr->sa_family == AF_INET)
973			arp_ifinit(&sc->sc_ac, ifa);
974		/* FALLTHROUGH */
975
976	case SIOCSIFFLAGS:
977		if (ISSET(ifp->if_flags, IFF_UP)) {
978			if (ISSET(ifp->if_flags, IFF_RUNNING))
979				error = ENETRESET;
980			else
981				myx_up(sc);
982		} else {
983			if (ISSET(ifp->if_flags, IFF_RUNNING))
984				myx_down(sc);
985		}
986		break;
987
988	case SIOCGIFMEDIA:
989	case SIOCSIFMEDIA:
990		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
991		break;
992
993	case SIOCGIFRXR:
994		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
995		break;
996
997	default:
998		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
999	}
1000
1001	if (error == ENETRESET) {
1002		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1003		    (IFF_UP | IFF_RUNNING))
1004			myx_iff(sc);
1005		error = 0;
1006	}
1007
1008	splx(s);
1009	return (error);
1010}
1011
1012int
1013myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
1014{
1015	struct if_rxring_info ifr[2];
1016
1017	memset(ifr, 0, sizeof(ifr));
1018
1019	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
1020	mtx_enter(&sc->sc_rx_ring_lock[0].mrl_mtx);
1021	ifr[0].ifr_info = sc->sc_rx_ring[0];
1022	mtx_leave(&sc->sc_rx_ring_lock[0].mrl_mtx);
1023
1024	ifr[1].ifr_size = MYX_RXBIG_SIZE;
1025	mtx_enter(&sc->sc_rx_ring_lock[1].mrl_mtx);
1026	ifr[1].ifr_info = sc->sc_rx_ring[1];
1027	mtx_leave(&sc->sc_rx_ring_lock[1].mrl_mtx);
1028
1029	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
1030}
1031
1032void
1033myx_up(struct myx_softc *sc)
1034{
1035	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1036	struct myx_buf		*mb;
1037	struct myx_cmd		mc;
1038	bus_dmamap_t		map;
1039	size_t			size;
1040	u_int			maxpkt;
1041	u_int32_t		r;
1042	int			i;
1043
1044	memset(&mc, 0, sizeof(mc));
1045	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1046		printf("%s: failed to reset the device\n", DEVNAME(sc));
1047		return;
1048	}
1049
1050	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
1051	    64, MYXALIGN_CMD) != 0) {
1052		printf("%s: failed to allocate zero pad memory\n",
1053		    DEVNAME(sc));
1054		return;
1055	}
1056	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1057	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1058	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1059
1060	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1061	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1062		printf("%s: failed to allocate pad DMA memory\n",
1063		    DEVNAME(sc));
1064		goto free_zero;
1065	}
1066	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1067	    sc->sc_paddma.mxm_map->dm_mapsize,
1068	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1069
1070	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1071		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1072		goto free_pad;
1073	}
1074
1075	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1076		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1077		goto free_pad;
1078	}
1079	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1080
1081	memset(&mc, 0, sizeof(mc));
1082	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1083		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1084		goto free_pad;
1085	}
1086	sc->sc_tx_ring_idx = 0;
1087	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1088	sc->sc_tx_free = sc->sc_tx_ring_count - 1;
1089	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1090	sc->sc_tx_count = 0;
1091	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1092	IFQ_SET_READY(&ifp->if_snd);
1093
1094	/* Allocate Interrupt Queue */
1095
1096	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1097	sc->sc_intrq_idx = 0;
1098
1099	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1100	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1101	    size, MYXALIGN_DATA) != 0) {
1102		goto free_pad;
1103	}
1104	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1105	map = sc->sc_intrq_dma.mxm_map;
1106	memset(sc->sc_intrq, 0, size);
1107	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1108	    BUS_DMASYNC_PREREAD);
1109
1110	memset(&mc, 0, sizeof(mc));
1111	mc.mc_data0 = htobe32(size);
1112	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1113		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1114		goto free_intrq;
1115	}
1116
1117	memset(&mc, 0, sizeof(mc));
1118	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1119	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1120	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1121		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1122		goto free_intrq;
1123	}
1124
1125	/*
1126	 * get interrupt offsets
1127	 */
1128
1129	memset(&mc, 0, sizeof(mc));
1130	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1131	    &sc->sc_irqclaimoff) != 0) {
1132		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1133		goto free_intrq;
1134	}
1135
1136	memset(&mc, 0, sizeof(mc));
1137	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1138	    &sc->sc_irqdeassertoff) != 0) {
1139		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1140		goto free_intrq;
1141	}
1142
1143	memset(&mc, 0, sizeof(mc));
1144	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1145	    &sc->sc_irqcoaloff) != 0) {
1146		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1147		goto free_intrq;
1148	}
1149
1150	/* Set an appropriate interrupt coalescing period */
1151	r = htobe32(MYX_IRQCOALDELAY);
1152	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1153
1154	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1155		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1156		goto free_intrq;
1157	}
1158
1159	memset(&mc, 0, sizeof(mc));
1160	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1161		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1162		goto free_intrq;
1163	}
1164
1165	memset(&mc, 0, sizeof(mc));
1166	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1167		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1168		goto free_intrq;
1169	}
1170
1171	memset(&mc, 0, sizeof(mc));
1172	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1173	    &sc->sc_tx_ring_offset) != 0) {
1174		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1175		goto free_intrq;
1176	}
1177
1178	memset(&mc, 0, sizeof(mc));
1179	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1180	    &sc->sc_rx_ring_offset[MYX_RXSMALL]) != 0) {
1181		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1182		goto free_intrq;
1183	}
1184
1185	memset(&mc, 0, sizeof(mc));
1186	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1187	    &sc->sc_rx_ring_offset[MYX_RXBIG]) != 0) {
1188		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1189		goto free_intrq;
1190	}
1191
1192	/* Allocate Interrupt Data */
1193	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1194	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1195		printf("%s: failed to allocate status DMA memory\n",
1196		    DEVNAME(sc));
1197		goto free_intrq;
1198	}
1199	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1200	map = sc->sc_sts_dma.mxm_map;
1201	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1202	    BUS_DMASYNC_PREREAD);
1203
1204	memset(&mc, 0, sizeof(mc));
1205	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1206	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1207	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1208	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1209		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1210		goto free_sts;
1211	}
1212
1213	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1214
1215	memset(&mc, 0, sizeof(mc));
1216	mc.mc_data0 = htobe32(maxpkt);
1217	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1218		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1219		goto free_sts;
1220	}
1221
1222	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1223		mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs,
1224		    sc->sc_tx_boundary, sc->sc_tx_boundary);
1225		if (mb == NULL)
1226			goto free_tx_bufs;
1227
1228		myx_buf_put(&sc->sc_tx_buf_free, mb);
1229	}
1230
1231	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1232		mb = myx_buf_alloc(sc, MYX_RXSMALL_SIZE, 1, 4096, 4096);
1233		if (mb == NULL)
1234			goto free_rxsmall_bufs;
1235
1236		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXSMALL], mb);
1237	}
1238
1239	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1240		mb = myx_buf_alloc(sc, 12 * 1024, 1, 12 * 1024, 0);
1241		if (mb == NULL)
1242			goto free_rxbig_bufs;
1243
1244		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXBIG], mb);
1245	}
1246
1247	if_rxr_init(&sc->sc_rx_ring[MYX_RXBIG], 2, sc->sc_rx_ring_count - 2);
1248	if_rxr_init(&sc->sc_rx_ring[MYX_RXSMALL], 2, sc->sc_rx_ring_count - 2);
1249
1250	myx_rx_zero(sc, MYX_RXSMALL);
1251	if (myx_rx_fill(sc, MYX_RXSMALL) != 0) {
1252		printf("%s: failed to fill small rx ring\n", DEVNAME(sc));
1253		goto free_rxbig_bufs;
1254	}
1255
1256	myx_rx_zero(sc, MYX_RXBIG);
1257	if (myx_rx_fill(sc, MYX_RXBIG) != 0) {
1258		printf("%s: failed to fill big rx ring\n", DEVNAME(sc));
1259		goto free_rxsmall;
1260	}
1261
1262	memset(&mc, 0, sizeof(mc));
1263	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1264	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1265		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1266		goto free_rxbig;
1267	}
1268
1269	memset(&mc, 0, sizeof(mc));
1270	mc.mc_data0 = htobe32(16384);
1271	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1272		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1273		goto free_rxbig;
1274	}
1275
1276	mtx_enter(&sc->sc_sts_mtx);
1277	sc->sc_state = MYX_S_RUNNING;
1278	mtx_leave(&sc->sc_sts_mtx);
1279
1280	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1281		printf("%s: failed to start the device\n", DEVNAME(sc));
1282		goto free_rxbig;
1283	}
1284
1285	CLR(ifp->if_flags, IFF_OACTIVE);
1286	SET(ifp->if_flags, IFF_RUNNING);
1287	myx_iff(sc);
1288	myx_start(ifp);
1289
1290	return;
1291
1292free_rxbig:
1293	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1294		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1295		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1296		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1297		m_freem(mb->mb_m);
1298		myx_buf_free(sc, mb);
1299	}
1300free_rxsmall:
1301	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1302		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1303		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1304		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1305		m_freem(mb->mb_m);
1306		myx_buf_free(sc, mb);
1307	}
1308free_rxbig_bufs:
1309	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1310		myx_buf_free(sc, mb);
1311free_rxsmall_bufs:
1312	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1313		myx_buf_free(sc, mb);
1314free_tx_bufs:
1315	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1316		myx_buf_free(sc, mb);
1317free_sts:
1318	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1319	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1320	myx_dmamem_free(sc, &sc->sc_sts_dma);
1321free_intrq:
1322	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1323	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1324	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1325free_pad:
1326	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1327	    sc->sc_paddma.mxm_map->dm_mapsize,
1328	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1329	myx_dmamem_free(sc, &sc->sc_paddma);
1330
1331	memset(&mc, 0, sizeof(mc));
1332	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1333		printf("%s: failed to reset the device\n", DEVNAME(sc));
1334	}
1335free_zero:
1336	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1337	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1338	myx_dmamem_free(sc, &sc->sc_zerodma);
1339}
1340
1341int
1342myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1343{
1344	struct myx_cmd		 mc;
1345
1346	memset(&mc, 0, sizeof(mc));
1347	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1348	    addr[2] << 8 | addr[3]);
1349	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1350
1351	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1352		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1353		return (-1);
1354	}
1355	return (0);
1356}
1357
1358void
1359myx_iff(struct myx_softc *sc)
1360{
1361	struct myx_cmd		mc;
1362	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1363	struct ether_multi	*enm;
1364	struct ether_multistep	step;
1365	u_int8_t *addr;
1366
1367	CLR(ifp->if_flags, IFF_ALLMULTI);
1368
1369	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1370	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1371		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1372		return;
1373	}
1374
1375	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1376		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1377		return;
1378	}
1379
1380	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1381		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1382		return;
1383	}
1384
1385	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1386	    sc->sc_ac.ac_multirangecnt > 0) {
1387		SET(ifp->if_flags, IFF_ALLMULTI);
1388		return;
1389	}
1390
1391	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1392	while (enm != NULL) {
1393		addr = enm->enm_addrlo;
1394
1395		memset(&mc, 0, sizeof(mc));
1396		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1397		    addr[2] << 8 | addr[3]);
1398		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1399		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1400			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1401			return;
1402		}
1403
1404		ETHER_NEXT_MULTI(step, enm);
1405	}
1406
1407	memset(&mc, 0, sizeof(mc));
1408	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1409		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1410		return;
1411	}
1412}
1413
1414void
1415myx_down(struct myx_softc *sc)
1416{
1417	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1418	volatile struct myx_status *sts = sc->sc_sts;
1419	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1420	struct myx_buf		*mb;
1421	struct myx_cmd		 mc;
1422	int			 s;
1423
1424	myx_sts_enter(sc);
1425	sc->sc_linkdown = sts->ms_linkdown;
1426	sc->sc_state = MYX_S_DOWN;
1427
1428	memset(&mc, 0, sizeof(mc));
1429	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1430
1431	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1432	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1433	while (sc->sc_state != MYX_S_OFF)
1434		msleep(sts, &sc->sc_sts_mtx, 0, "myxdown", 0);
1435	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1436	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1437	mtx_leave(&sc->sc_sts_mtx);
1438
1439	timeout_del(&sc->sc_refill);
1440
1441	s = splnet();
1442	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1443		ifp->if_link_state = LINK_STATE_UNKNOWN;
1444		ifp->if_baudrate = 0;
1445		if_link_state_change(ifp);
1446	}
1447	splx(s);
1448
1449	memset(&mc, 0, sizeof(mc));
1450	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1451		printf("%s: failed to reset the device\n", DEVNAME(sc));
1452	}
1453
1454	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1455
1456	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1457		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1458		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1459		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1460		m_freem(mb->mb_m);
1461		myx_buf_free(sc, mb);
1462	}
1463
1464	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1465		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1466		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1467		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1468		m_freem(mb->mb_m);
1469		myx_buf_free(sc, mb);
1470	}
1471
1472	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1473		myx_buf_free(sc, mb);
1474
1475	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1476		myx_buf_free(sc, mb);
1477
1478	while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) {
1479		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1480		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1481		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1482		m_freem(mb->mb_m);
1483		myx_buf_free(sc, mb);
1484	}
1485
1486	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1487		myx_buf_free(sc, mb);
1488
1489	/* the sleep shizz above already synced this dmamem */
1490	myx_dmamem_free(sc, &sc->sc_sts_dma);
1491
1492	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1493	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1494	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1495
1496	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1497	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1498	myx_dmamem_free(sc, &sc->sc_paddma);
1499
1500	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1501	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1502	myx_dmamem_free(sc, &sc->sc_zerodma);
1503}
1504
1505void
1506myx_write_txd_tail(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags,
1507    u_int32_t offset, u_int idx)
1508{
1509	struct myx_tx_desc		txd;
1510	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1511	bus_dmamap_t			map = mb->mb_map;
1512	int				i;
1513
1514	for (i = 1; i < map->dm_nsegs; i++) {
1515		memset(&txd, 0, sizeof(txd));
1516		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1517		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1518		txd.tx_flags = flags;
1519
1520		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1521		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1522		    &txd, sizeof(txd));
1523	}
1524
1525	/* pad runt frames */
1526	if (map->dm_mapsize < 60) {
1527		memset(&txd, 0, sizeof(txd));
1528		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1529		txd.tx_length = htobe16(60 - map->dm_mapsize);
1530		txd.tx_flags = flags;
1531
1532		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1533		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1534		    &txd, sizeof(txd));
1535	}
1536}
1537
1538void
1539myx_start(struct ifnet *ifp)
1540{
1541	struct myx_tx_desc		txd;
1542	SIMPLEQ_HEAD(, myx_buf)		list = SIMPLEQ_HEAD_INITIALIZER(list);
1543	struct myx_softc		*sc = ifp->if_softc;
1544	bus_dmamap_t			map;
1545	struct myx_buf			*mb, *firstmb;
1546	struct mbuf			*m;
1547	u_int32_t			offset = sc->sc_tx_ring_offset;
1548	u_int				idx, firstidx;
1549	u_int8_t			flags;
1550
1551	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1552	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
1553	    IFQ_IS_EMPTY(&ifp->if_snd))
1554		return;
1555
1556	for (;;) {
1557		if (sc->sc_tx_free <= sc->sc_tx_nsegs ||
1558		    (mb = myx_buf_get(&sc->sc_tx_buf_free)) == NULL) {
1559			SET(ifp->if_flags, IFF_OACTIVE);
1560			break;
1561		}
1562
1563		IFQ_DEQUEUE(&ifp->if_snd, m);
1564		if (m == NULL) {
1565			myx_buf_put(&sc->sc_tx_buf_free, mb);
1566			break;
1567		}
1568
1569		if (myx_load_buf(sc, mb, m) != 0) {
1570			m_freem(m);
1571			myx_buf_put(&sc->sc_tx_buf_free, mb);
1572			ifp->if_oerrors++;
1573			continue;
1574		}
1575
1576#if NBPFILTER > 0
1577		if (ifp->if_bpf)
1578			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1579#endif
1580
1581		mb->mb_m = m;
1582
1583		map = mb->mb_map;
1584		bus_dmamap_sync(sc->sc_dmat, map, 0,
1585		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1586
1587		SIMPLEQ_INSERT_TAIL(&list, mb, mb_entry);
1588
1589		sc->sc_tx_free -= map->dm_nsegs +
1590		    (map->dm_mapsize < 60 ? 1 : 0);
1591	}
1592
1593	/* post the first descriptor last */
1594	firstmb = SIMPLEQ_FIRST(&list);
1595	if (firstmb == NULL)
1596		return;
1597
1598	SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1599	myx_buf_put(&sc->sc_tx_buf_list, firstmb);
1600
1601	idx = firstidx = sc->sc_tx_ring_idx;
1602	idx += firstmb->mb_map->dm_nsegs +
1603	    (firstmb->mb_map->dm_mapsize < 60 ? 1 : 0);
1604	idx %= sc->sc_tx_ring_count;
1605
1606	while ((mb = SIMPLEQ_FIRST(&list)) != NULL) {
1607		SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1608		myx_buf_put(&sc->sc_tx_buf_list, mb);
1609
1610		map = mb->mb_map;
1611
1612		flags = MYXTXD_FLAGS_NO_TSO;
1613		if (map->dm_mapsize < 1520)
1614			flags |= MYXTXD_FLAGS_SMALL;
1615
1616		memset(&txd, 0, sizeof(txd));
1617		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1618		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1619		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1620		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1621		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1622		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1623
1624		myx_write_txd_tail(sc, mb, flags, offset, idx);
1625
1626		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1627		idx %= sc->sc_tx_ring_count;
1628	}
1629	sc->sc_tx_ring_idx = idx;
1630
1631	/* go back and post first mb */
1632	map = firstmb->mb_map;
1633
1634	flags = MYXTXD_FLAGS_NO_TSO;
1635	if (map->dm_mapsize < 1520)
1636		flags |= MYXTXD_FLAGS_SMALL;
1637
1638	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1639	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1640	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1641	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1642
1643	/* make sure the first descriptor is seen after the others */
1644	myx_write_txd_tail(sc, firstmb, flags, offset, firstidx);
1645
1646	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1647	    offset + sizeof(txd) * firstidx, &txd,
1648	    sizeof(txd) - sizeof(myx_bus_t));
1649
1650	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1651	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1652
1653	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1654	    offset + sizeof(txd) * (firstidx + 1) - sizeof(myx_bus_t),
1655	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1656	    sizeof(myx_bus_t));
1657
1658	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1659	    offset + sizeof(txd) * firstidx, sizeof(txd),
1660	    BUS_SPACE_BARRIER_WRITE);
1661}
1662
1663int
1664myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m)
1665{
1666	bus_dma_tag_t			dmat = sc->sc_dmat;
1667	bus_dmamap_t			dmap = mb->mb_map;
1668
1669	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1670	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1671	case 0:
1672		break;
1673
1674	case EFBIG: /* mbuf chain is too fragmented */
1675		if (m_defrag(m, M_DONTWAIT) == 0 &&
1676		    bus_dmamap_load_mbuf(dmat, dmap, m,
1677		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1678			break;
1679	default:
1680		return (1);
1681	}
1682
1683	mb->mb_m = m;
1684	return (0);
1685}
1686
1687int
1688myx_intr(void *arg)
1689{
1690	struct myx_softc	*sc = (struct myx_softc *)arg;
1691	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1692	volatile struct myx_status *sts = sc->sc_sts;
1693	enum myx_state		 state = MYX_S_RUNNING;
1694	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1695	u_int32_t		 data, link = 0xffffffff;
1696	u_int8_t		 valid = 0;
1697
1698	mtx_enter(&sc->sc_sts_mtx);
1699	if (sc->sc_state == MYX_S_OFF) {
1700		mtx_leave(&sc->sc_sts_mtx);
1701		return (0);
1702	}
1703
1704	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1705	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1706
1707	valid = sts->ms_isvalid;
1708	if (valid == 0x0) {
1709		myx_sts_leave(sc);
1710		return (0);
1711	}
1712
1713	if (sc->sc_intx) {
1714		data = htobe32(0);
1715		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1716		    sc->sc_irqdeassertoff, &data, sizeof(data));
1717	}
1718	sts->ms_isvalid = 0;
1719
1720	do {
1721		data = sts->ms_txdonecnt;
1722
1723		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1724		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1725		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1726	} while (sts->ms_isvalid);
1727
1728	if (sts->ms_statusupdated) {
1729		link = sts->ms_linkstate;
1730
1731		if (sc->sc_state == MYX_S_DOWN &&
1732		    sc->sc_linkdown != sts->ms_linkdown)
1733			state = MYX_S_DOWN;
1734	}
1735	myx_sts_leave(sc);
1736
1737	data = betoh32(data);
1738	if (data != sc->sc_tx_count)
1739		myx_txeof(sc, data);
1740
1741	data = htobe32(3);
1742	if (valid & 0x1) {
1743		myx_rxeof(sc);
1744
1745		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1746		    sc->sc_irqclaimoff, &data, sizeof(data));
1747	}
1748	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1749	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1750
1751	if (state == MYX_S_DOWN) {
1752		/* myx_down is waiting for us */
1753		mtx_enter(&sc->sc_sts_mtx);
1754		sc->sc_state = MYX_S_OFF;
1755		wakeup(sts);
1756		mtx_leave(&sc->sc_sts_mtx);
1757
1758		return (1);
1759	}
1760
1761	if (link != 0xffffffff) {
1762		KERNEL_LOCK();
1763		myx_link_state(sc, link);
1764		KERNEL_UNLOCK();
1765	}
1766
1767	if (ISSET(ifp->if_flags, IFF_OACTIVE)) {
1768		KERNEL_LOCK();
1769		CLR(ifp->if_flags, IFF_OACTIVE);
1770		myx_start(ifp);
1771		KERNEL_UNLOCK();
1772	}
1773
1774	return (1);
1775}
1776
1777void
1778myx_refill(void *xsc)
1779{
1780	struct myx_softc *sc = xsc;
1781	int i;
1782
1783	for (i = 0; i < 2; i++) {
1784		if (myx_rx_fill(sc, i) >= 0 &&
1785		    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1786			timeout_add(&sc->sc_refill, 1);
1787	}
1788}
1789
1790void
1791myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1792{
1793	struct ifnet *ifp = &sc->sc_ac.ac_if;
1794	struct myx_buf *mb;
1795	struct mbuf *m;
1796	bus_dmamap_t map;
1797	u_int free = 0;
1798
1799	do {
1800		mb = myx_buf_get(&sc->sc_tx_buf_list);
1801		if (mb == NULL) {
1802			printf("oh noes, no mb!\n");
1803			break;
1804		}
1805
1806		m = mb->mb_m;
1807		map = mb->mb_map;
1808
1809		free += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1810
1811		bus_dmamap_sync(sc->sc_dmat, map, 0,
1812		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1813
1814		bus_dmamap_unload(sc->sc_dmat, map);
1815		ifp->if_opackets++;
1816
1817		m_freem(m);
1818		myx_buf_put(&sc->sc_tx_buf_free, mb);
1819	} while (++sc->sc_tx_count != done_count);
1820
1821	if (free) {
1822		KERNEL_LOCK();
1823		sc->sc_tx_free += free;
1824		KERNEL_UNLOCK();
1825	}
1826}
1827
1828void
1829myx_rxeof(struct myx_softc *sc)
1830{
1831	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1832	struct ifnet *ifp = &sc->sc_ac.ac_if;
1833	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1834	struct myx_buf *mb;
1835	struct mbuf *m;
1836	int ring;
1837	u_int rxfree[2] = { 0 , 0 };
1838	u_int len;
1839
1840	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1841	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1842
1843	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1844		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1845
1846		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1847			sc->sc_intrq_idx = 0;
1848
1849		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1850		    MYX_RXSMALL : MYX_RXBIG;
1851
1852		mb = myx_buf_get(&sc->sc_rx_buf_list[ring]);
1853		if (mb == NULL) {
1854			printf("oh noes, no mb!\n");
1855			break;
1856		}
1857
1858		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1859		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1860		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1861
1862		m = mb->mb_m;
1863		m->m_data += ETHER_ALIGN;
1864		m->m_pkthdr.len = m->m_len = len;
1865
1866		ml_enqueue(&ml, m);
1867
1868		myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1869
1870		rxfree[ring]++;
1871	}
1872
1873	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1874	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1875
1876	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1877		if (rxfree[ring] == 0)
1878			continue;
1879
1880		mtx_enter(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1881		if_rxr_put(&sc->sc_rx_ring[ring], rxfree[ring]);
1882		mtx_leave(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1883
1884		if (myx_rx_fill(sc, ring) >= 0 &&
1885		    myx_bufs_empty(&sc->sc_rx_buf_list[ring]))
1886			timeout_add(&sc->sc_refill, 0);
1887	}
1888
1889	ifp->if_ipackets += ml_len(&ml);
1890
1891	if_input(ifp, &ml);
1892}
1893
1894void
1895myx_rx_zero(struct myx_softc *sc, int ring)
1896{
1897	struct myx_rx_desc rxd;
1898	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1899	int idx;
1900
1901	sc->sc_rx_ring_idx[ring] = 0;
1902
1903	memset(&rxd, 0xff, sizeof(rxd));
1904	for (idx = 0; idx < sc->sc_rx_ring_count; idx++) {
1905		myx_write(sc, offset + idx * sizeof(rxd),
1906		    &rxd, sizeof(rxd));
1907	}
1908}
1909
1910static inline int
1911myx_rx_fill_slots(struct myx_softc *sc, int ring, u_int slots)
1912{
1913	struct myx_rx_desc rxd;
1914	struct myx_buf *mb, *firstmb;
1915	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1916	u_int idx, firstidx;
1917
1918	firstmb = myx_buf_fill(sc, ring);
1919	if (firstmb == NULL)
1920		return (slots);
1921
1922	myx_buf_put(&sc->sc_rx_buf_list[ring], firstmb);
1923
1924	firstidx = sc->sc_rx_ring_idx[ring];
1925	idx = firstidx + 1;
1926	idx %= sc->sc_rx_ring_count;
1927	slots--;
1928
1929	while (slots > 0 && (mb = myx_buf_fill(sc, ring)) != NULL) {
1930		myx_buf_put(&sc->sc_rx_buf_list[ring], mb);
1931
1932		rxd.rx_addr = htobe64(mb->mb_map->dm_segs[0].ds_addr);
1933		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1934		    offset + idx * sizeof(rxd), &rxd, sizeof(rxd));
1935
1936		idx++;
1937		idx %= sc->sc_rx_ring_count;
1938		slots--;
1939	}
1940
1941	/* make sure the first descriptor is seen after the others */
1942	if (idx != firstidx + 1) {
1943		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1944		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1945		    BUS_SPACE_BARRIER_WRITE);
1946	}
1947
1948	rxd.rx_addr = htobe64(firstmb->mb_map->dm_segs[0].ds_addr);
1949	myx_write(sc, offset + firstidx * sizeof(rxd),
1950	    &rxd, sizeof(rxd));
1951
1952	sc->sc_rx_ring_idx[ring] = idx;
1953
1954	return (slots);
1955}
1956
1957int
1958myx_rx_fill(struct myx_softc *sc, int ring)
1959{
1960	u_int slots;
1961	int rv = 1;
1962
1963	if (!myx_ring_enter(&sc->sc_rx_ring_lock[ring]))
1964		return (-1);
1965
1966	do {
1967		mtx_enter(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1968		slots = if_rxr_get(&sc->sc_rx_ring[ring], sc->sc_rx_ring_count);
1969		mtx_leave(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1970
1971		if (slots == 0)
1972			continue;
1973
1974		slots = myx_rx_fill_slots(sc, ring, slots);
1975		rv = 0;
1976
1977		mtx_enter(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1978		if_rxr_put(&sc->sc_rx_ring[ring], slots);
1979		mtx_leave(&sc->sc_rx_ring_lock[ring].mrl_mtx);
1980	} while (!myx_ring_leave(&sc->sc_rx_ring_lock[ring]));
1981
1982	return (rv);
1983}
1984
1985struct mbuf *
1986myx_mcl_small(void)
1987{
1988	struct mbuf *m;
1989
1990	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1991	if (m == NULL)
1992		return (NULL);
1993
1994	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1995
1996	return (m);
1997}
1998
1999struct mbuf *
2000myx_mcl_big(void)
2001{
2002	struct mbuf *m;
2003	void *mcl;
2004
2005	MGETHDR(m, M_DONTWAIT, MT_DATA);
2006	if (m == NULL)
2007		return (NULL);
2008
2009	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
2010	if (mcl == NULL) {
2011		m_free(m);
2012		return (NULL);
2013	}
2014
2015	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, m_extfree_pool, myx_mcl_pool);
2016	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
2017
2018	return (m);
2019}
2020
2021struct myx_buf *
2022myx_buf_fill(struct myx_softc *sc, int ring)
2023{
2024	struct mbuf *(*mclget[2])(void) = { myx_mcl_small, myx_mcl_big };
2025	struct myx_buf *mb;
2026	struct mbuf *m;
2027	int rv;
2028
2029	m = (*mclget[ring])();
2030	if (m == NULL)
2031		return (NULL);
2032
2033	mb = myx_buf_get(&sc->sc_rx_buf_free[ring]);
2034	if (mb == NULL)
2035		goto mfree;
2036
2037	rv = bus_dmamap_load_mbuf(sc->sc_dmat, mb->mb_map, m, BUS_DMA_NOWAIT);
2038	if (rv != 0)
2039		goto put;
2040
2041	mb->mb_m = m;
2042	bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, mb->mb_map->dm_mapsize,
2043	    BUS_DMASYNC_PREREAD);
2044
2045	return (mb);
2046
2047put:
2048	myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
2049mfree:
2050	m_freem(m);
2051
2052	return (NULL);
2053}
2054
2055struct myx_buf *
2056myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs,
2057    bus_size_t maxsegsz, bus_size_t boundary)
2058{
2059	struct myx_buf *mb;
2060
2061	mb = pool_get(myx_buf_pool, PR_WAITOK);
2062	if (mb == NULL)
2063		return (NULL);
2064
2065	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary,
2066	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) {
2067		pool_put(myx_buf_pool, mb);
2068		return (NULL);
2069	}
2070
2071	return (mb);
2072}
2073
2074void
2075myx_buf_free(struct myx_softc *sc, struct myx_buf *mb)
2076{
2077	bus_dmamap_destroy(sc->sc_dmat, mb->mb_map);
2078	pool_put(myx_buf_pool, mb);
2079}
2080
2081struct myx_buf *
2082myx_buf_get(struct myx_buf_list *mbl)
2083{
2084	struct myx_buf *mb;
2085
2086	mtx_enter(&mbl->mbl_mtx);
2087	mb = SIMPLEQ_FIRST(&mbl->mbl_q);
2088	if (mb != NULL)
2089		SIMPLEQ_REMOVE_HEAD(&mbl->mbl_q, mb_entry);
2090	mtx_leave(&mbl->mbl_mtx);
2091
2092	return (mb);
2093}
2094
2095int
2096myx_bufs_empty(struct myx_buf_list *mbl)
2097{
2098	int rv;
2099
2100	mtx_enter(&mbl->mbl_mtx);
2101	rv = SIMPLEQ_EMPTY(&mbl->mbl_q);
2102	mtx_leave(&mbl->mbl_mtx);
2103
2104	return (rv);
2105}
2106
2107void
2108myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb)
2109{
2110	mtx_enter(&mbl->mbl_mtx);
2111	SIMPLEQ_INSERT_TAIL(&mbl->mbl_q, mb, mb_entry);
2112	mtx_leave(&mbl->mbl_mtx);
2113}
2114
2115void
2116myx_bufs_init(struct myx_buf_list *mbl)
2117{
2118	SIMPLEQ_INIT(&mbl->mbl_q);
2119	mtx_init(&mbl->mbl_mtx, IPL_NET);
2120}
2121
2122void
2123myx_ring_lock_init(struct myx_ring_lock *mrl)
2124{
2125	mtx_init(&mrl->mrl_mtx, IPL_NET);
2126	mrl->mrl_running = 0;
2127}
2128
2129int
2130myx_ring_enter(struct myx_ring_lock *mrl)
2131{
2132	return (atomic_inc_int_nv(&mrl->mrl_running) == 1);
2133}
2134
2135int
2136myx_ring_leave(struct myx_ring_lock *mrl)
2137{
2138	if (atomic_cas_uint(&mrl->mrl_running, 1, 0) == 1)
2139		return (1);
2140
2141	mrl->mrl_running = 1;
2142
2143	return (0);
2144}
2145