if_myx.c revision 1.92
1/*	$OpenBSD: if_myx.c,v 1.92 2015/12/11 16:07:02 mpi Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/pool.h>
33#include <sys/timeout.h>
34#include <sys/device.h>
35#include <sys/proc.h>
36#include <sys/queue.h>
37
38#include <machine/bus.h>
39#include <machine/intr.h>
40
41#include <net/if.h>
42#include <net/if_dl.h>
43#include <net/if_media.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#include <netinet/in.h>
50#include <netinet/if_ether.h>
51
52#include <dev/pci/pcireg.h>
53#include <dev/pci/pcivar.h>
54#include <dev/pci/pcidevs.h>
55
56#include <dev/pci/if_myxreg.h>
57
58#ifdef MYX_DEBUG
59#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
60#define MYXDBG_CMD	(2<<0)	/* commands */
61#define MYXDBG_INTR	(3<<0)	/* interrupts */
62#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
63int myx_debug = MYXDBG_ALL;
64#define DPRINTF(_lvl, _arg...)	do {					\
65	if (myx_debug & (_lvl))						\
66		printf(_arg);						\
67} while (0)
68#else
69#define DPRINTF(_lvl, arg...)
70#endif
71
72#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
73
74struct myx_dmamem {
75	bus_dmamap_t		 mxm_map;
76	bus_dma_segment_t	 mxm_seg;
77	int			 mxm_nsegs;
78	size_t			 mxm_size;
79	caddr_t			 mxm_kva;
80};
81
82struct pool *myx_mcl_pool;
83
84struct myx_slot {
85	bus_dmamap_t		 ms_map;
86	struct mbuf		*ms_m;
87};
88
89struct myx_rx_ring {
90	struct myx_softc	*mrr_softc;
91	struct timeout		 mrr_refill;
92	struct if_rxring	 mrr_rxr;
93	struct myx_slot		*mrr_slots;
94	u_int32_t		 mrr_offset;
95	u_int			 mrr_running;
96	u_int			 mrr_prod;
97	u_int			 mrr_cons;
98	struct mbuf		*(*mrr_mclget)(void);
99};
100
101enum myx_state {
102	MYX_S_OFF = 0,
103	MYX_S_RUNNING,
104	MYX_S_DOWN
105};
106
107struct myx_softc {
108	struct device		 sc_dev;
109	struct arpcom		 sc_ac;
110
111	pci_chipset_tag_t	 sc_pc;
112	pci_intr_handle_t	 sc_ih;
113	pcitag_t		 sc_tag;
114
115	bus_dma_tag_t		 sc_dmat;
116	bus_space_tag_t		 sc_memt;
117	bus_space_handle_t	 sc_memh;
118	bus_size_t		 sc_mems;
119
120	struct myx_dmamem	 sc_zerodma;
121	struct myx_dmamem	 sc_cmddma;
122	struct myx_dmamem	 sc_paddma;
123
124	struct myx_dmamem	 sc_sts_dma;
125	volatile struct myx_status	*sc_sts;
126
127	int			 sc_intx;
128	void			*sc_irqh;
129	u_int32_t		 sc_irqcoaloff;
130	u_int32_t		 sc_irqclaimoff;
131	u_int32_t		 sc_irqdeassertoff;
132
133	struct myx_dmamem	 sc_intrq_dma;
134	struct myx_intrq_desc	*sc_intrq;
135	u_int			 sc_intrq_count;
136	u_int			 sc_intrq_idx;
137
138	u_int			 sc_rx_ring_count;
139#define  MYX_RXSMALL		 0
140#define  MYX_RXBIG		 1
141	struct myx_rx_ring	 sc_rx_ring[2];
142
143	bus_size_t		 sc_tx_boundary;
144	u_int			 sc_tx_ring_count;
145	u_int32_t		 sc_tx_ring_offset;
146	u_int			 sc_tx_nsegs;
147	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
148	u_int			 sc_tx_ring_prod;
149	u_int			 sc_tx_ring_cons;
150
151	u_int			 sc_tx_prod;
152	u_int			 sc_tx_cons;
153	struct myx_slot		*sc_tx_slots;
154
155	struct ifmedia		 sc_media;
156
157	volatile enum myx_state	 sc_state;
158	volatile u_int8_t	 sc_linkdown;
159};
160
161#define MYX_RXSMALL_SIZE	MCLBYTES
162#define MYX_RXBIG_SIZE		(9 * 1024)
163
164int	 myx_match(struct device *, void *, void *);
165void	 myx_attach(struct device *, struct device *, void *);
166int	 myx_pcie_dc(struct myx_softc *, struct pci_attach_args *);
167int	 myx_query(struct myx_softc *sc, char *, size_t);
168u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
169void	 myx_attachhook(struct device *);
170int	 myx_loadfirmware(struct myx_softc *, const char *);
171int	 myx_probe_firmware(struct myx_softc *);
172
173void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
174void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
175
176#if defined(__LP64__)
177#define _myx_bus_space_write bus_space_write_raw_region_8
178typedef u_int64_t myx_bus_t;
179#else
180#define _myx_bus_space_write bus_space_write_raw_region_4
181typedef u_int32_t myx_bus_t;
182#endif
183#define myx_bus_space_write(_sc, _o, _a, _l) \
184    _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l))
185
186int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
187int	 myx_boot(struct myx_softc *, u_int32_t);
188
189int	 myx_rdma(struct myx_softc *, u_int);
190int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
191	    bus_size_t, u_int align);
192void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
193int	 myx_media_change(struct ifnet *);
194void	 myx_media_status(struct ifnet *, struct ifmediareq *);
195void	 myx_link_state(struct myx_softc *, u_int32_t);
196void	 myx_watchdog(struct ifnet *);
197int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
198int	 myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *);
199void	 myx_up(struct myx_softc *);
200void	 myx_iff(struct myx_softc *);
201void	 myx_down(struct myx_softc *);
202
203void	 myx_start(struct ifnet *);
204void	 myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t,
205	    u_int32_t, u_int);
206int	 myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *);
207int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
208int	 myx_intr(void *);
209void	 myx_rxeof(struct myx_softc *);
210void	 myx_txeof(struct myx_softc *, u_int32_t);
211
212int			myx_buf_fill(struct myx_softc *, struct myx_slot *,
213			    struct mbuf *(*)(void));
214struct mbuf *		myx_mcl_small(void);
215struct mbuf *		myx_mcl_big(void);
216
217int			myx_rx_init(struct myx_softc *, int, bus_size_t);
218int			myx_rx_fill(struct myx_softc *, struct myx_rx_ring *);
219void			myx_rx_empty(struct myx_softc *, struct myx_rx_ring *);
220void			myx_rx_free(struct myx_softc *, struct myx_rx_ring *);
221
222int			myx_tx_init(struct myx_softc *, bus_size_t);
223void			myx_tx_empty(struct myx_softc *);
224void			myx_tx_free(struct myx_softc *);
225
226void			myx_refill(void *);
227
228struct cfdriver myx_cd = {
229	NULL, "myx", DV_IFNET
230};
231struct cfattach myx_ca = {
232	sizeof(struct myx_softc), myx_match, myx_attach
233};
234
235const struct pci_matchid myx_devices[] = {
236	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
237	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
238};
239
240int
241myx_match(struct device *parent, void *match, void *aux)
242{
243	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
244}
245
246void
247myx_attach(struct device *parent, struct device *self, void *aux)
248{
249	struct myx_softc	*sc = (struct myx_softc *)self;
250	struct pci_attach_args	*pa = aux;
251	char			 part[32];
252	pcireg_t		 memtype;
253
254	sc->sc_pc = pa->pa_pc;
255	sc->sc_tag = pa->pa_tag;
256	sc->sc_dmat = pa->pa_dmat;
257
258	sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc;
259	sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small;
260	timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill,
261	    &sc->sc_rx_ring[MYX_RXSMALL]);
262	sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc;
263	sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big;
264	timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill,
265	    &sc->sc_rx_ring[MYX_RXBIG]);
266
267	/* Map the PCI memory space */
268	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
269	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
270	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
271		printf(": unable to map register memory\n");
272		return;
273	}
274
275	/* Get board details (mac/part) */
276	memset(part, 0, sizeof(part));
277	if (myx_query(sc, part, sizeof(part)) != 0)
278		goto unmap;
279
280	/* Map the interrupt */
281	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
282		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
283			printf(": unable to map interrupt\n");
284			goto unmap;
285		}
286		sc->sc_intx = 1;
287	}
288
289	printf(": %s, model %s, address %s\n",
290	    pci_intr_string(pa->pa_pc, sc->sc_ih),
291	    part[0] == '\0' ? "(unknown)" : part,
292	    ether_sprintf(sc->sc_ac.ac_enaddr));
293
294	/* this is sort of racy */
295	if (myx_mcl_pool == NULL) {
296		extern struct kmem_pa_mode kp_dma_contig;
297
298		myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF,
299		    M_WAITOK);
300		if (myx_mcl_pool == NULL) {
301			printf("%s: unable to allocate mcl pool\n",
302			    DEVNAME(sc));
303			goto unmap;
304		}
305		pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, 0,
306		    0, "myxmcl", NULL);
307		pool_setipl(myx_mcl_pool, IPL_NET);
308		pool_set_constraints(myx_mcl_pool, &kp_dma_contig);
309	}
310
311	if (myx_pcie_dc(sc, pa) != 0)
312		printf("%s: unable to configure PCI Express\n", DEVNAME(sc));
313
314	config_mountroot(self, myx_attachhook);
315
316	return;
317
318 unmap:
319	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
320	sc->sc_mems = 0;
321}
322
323int
324myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa)
325{
326	pcireg_t dcsr;
327	pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO;
328	pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO;
329	int reg;
330
331	if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
332	    &reg, NULL) == 0)
333		return (-1);
334
335	reg += PCI_PCIE_DCSR;
336	dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg);
337	if ((dcsr & mask) != dc) {
338		CLR(dcsr, mask);
339		SET(dcsr, dc);
340		pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr);
341	}
342
343	return (0);
344}
345
346u_int
347myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
348{
349	u_int		i, j;
350	u_int8_t	digit;
351
352	memset(lladdr, 0, ETHER_ADDR_LEN);
353	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
354		if (mac[i] >= '0' && mac[i] <= '9')
355			digit = mac[i] - '0';
356		else if (mac[i] >= 'A' && mac[i] <= 'F')
357			digit = mac[i] - 'A' + 10;
358		else if (mac[i] >= 'a' && mac[i] <= 'f')
359			digit = mac[i] - 'a' + 10;
360		else
361			continue;
362		if ((j & 1) == 0)
363			digit <<= 4;
364		lladdr[j++/2] |= digit;
365	}
366
367	return (i);
368}
369
370int
371myx_query(struct myx_softc *sc, char *part, size_t partlen)
372{
373	struct myx_gen_hdr hdr;
374	u_int32_t	offset;
375	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
376	u_int		i, len, maxlen;
377
378	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
379	offset = betoh32(offset);
380	if (offset + sizeof(hdr) > sc->sc_mems) {
381		printf(": header is outside register window\n");
382		return (1);
383	}
384
385	myx_read(sc, offset, &hdr, sizeof(hdr));
386	offset = betoh32(hdr.fw_specs);
387	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
388
389	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
390
391	for (i = 0; i < len; i++) {
392		maxlen = len - i;
393		if (strings[i] == '\0')
394			break;
395		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
396			i += 4;
397			i += myx_ether_aton(&strings[i],
398			    sc->sc_ac.ac_enaddr, maxlen);
399		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
400			i += 3;
401			i += strlcpy(part, &strings[i], min(maxlen, partlen));
402		}
403		for (; i < len; i++) {
404			if (strings[i] == '\0')
405				break;
406		}
407	}
408
409	return (0);
410}
411
412int
413myx_loadfirmware(struct myx_softc *sc, const char *filename)
414{
415	struct myx_gen_hdr	hdr;
416	u_int8_t		*fw;
417	size_t			fwlen;
418	u_int32_t		offset;
419	u_int			i, ret = 1;
420
421	if (loadfirmware(filename, &fw, &fwlen) != 0) {
422		printf("%s: could not load firmware %s\n", DEVNAME(sc),
423		    filename);
424		return (1);
425	}
426	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
427		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
428		goto err;
429	}
430
431	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
432	offset = betoh32(offset);
433	if ((offset + sizeof(hdr)) > fwlen) {
434		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
435		goto err;
436	}
437
438	memcpy(&hdr, fw + offset, sizeof(hdr));
439	DPRINTF(MYXDBG_INIT, "%s: "
440	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
441	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
442	    betoh32(hdr.fw_type), hdr.fw_version);
443
444	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
445	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
446		printf("%s: invalid firmware type 0x%x version %s\n",
447		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
448		goto err;
449	}
450
451	/* Write the firmware to the card's SRAM */
452	for (i = 0; i < fwlen; i += 256)
453		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
454
455	if (myx_boot(sc, fwlen) != 0) {
456		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
457		goto err;
458	}
459
460	ret = 0;
461
462err:
463	free(fw, M_DEVBUF, fwlen);
464	return (ret);
465}
466
467void
468myx_attachhook(struct device *self)
469{
470	struct myx_softc	*sc = (struct myx_softc *)self;
471	struct ifnet		*ifp = &sc->sc_ac.ac_if;
472	struct myx_cmd		 mc;
473
474	/* Allocate command DMA memory */
475	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
476	    MYXALIGN_CMD) != 0) {
477		printf("%s: failed to allocate command DMA memory\n",
478		    DEVNAME(sc));
479		return;
480	}
481
482	/* Try the firmware stored on disk */
483	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
484		/* error printed by myx_loadfirmware */
485		goto freecmd;
486	}
487
488	memset(&mc, 0, sizeof(mc));
489
490	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
491		printf("%s: failed to reset the device\n", DEVNAME(sc));
492		goto freecmd;
493	}
494
495	sc->sc_tx_boundary = 4096;
496
497	if (myx_probe_firmware(sc) != 0) {
498		printf("%s: error while selecting firmware\n", DEVNAME(sc));
499		goto freecmd;
500	}
501
502	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
503	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
504	if (sc->sc_irqh == NULL) {
505		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
506		goto freecmd;
507	}
508
509	ifp->if_softc = sc;
510	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
511	ifp->if_xflags = IFXF_MPSAFE;
512	ifp->if_ioctl = myx_ioctl;
513	ifp->if_start = myx_start;
514	ifp->if_watchdog = myx_watchdog;
515	ifp->if_hardmtu = 9000;
516	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
517	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
518	IFQ_SET_READY(&ifp->if_snd);
519
520	ifp->if_capabilities = IFCAP_VLAN_MTU;
521#if 0
522	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
523	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
524	    IFCAP_CSUM_UDPv4;
525#endif
526
527	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
528	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
529	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
530
531	if_attach(ifp);
532	ether_ifattach(ifp);
533
534	return;
535
536freecmd:
537	myx_dmamem_free(sc, &sc->sc_cmddma);
538}
539
540int
541myx_probe_firmware(struct myx_softc *sc)
542{
543	struct myx_dmamem test;
544	bus_dmamap_t map;
545	struct myx_cmd mc;
546	pcireg_t csr;
547	int offset;
548	int width = 0;
549
550	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
551	    &offset, NULL)) {
552		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
553		    offset + PCI_PCIE_LCSR);
554		width = (csr >> 20) & 0x3f;
555
556		if (width <= 4) {
557			/*
558			 * if the link width is 4 or less we can use the
559			 * aligned firmware.
560			 */
561			return (0);
562		}
563	}
564
565	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
566		return (1);
567	map = test.mxm_map;
568
569	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
570	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
571
572	memset(&mc, 0, sizeof(mc));
573	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
574	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
575	mc.mc_data2 = htobe32(4096 * 0x10000);
576	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
577		printf("%s: DMA read test failed\n", DEVNAME(sc));
578		goto fail;
579	}
580
581	memset(&mc, 0, sizeof(mc));
582	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
583	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
584	mc.mc_data2 = htobe32(4096 * 0x1);
585	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
586		printf("%s: DMA write test failed\n", DEVNAME(sc));
587		goto fail;
588	}
589
590	memset(&mc, 0, sizeof(mc));
591	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
592	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
593	mc.mc_data2 = htobe32(4096 * 0x10001);
594	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
595		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
596		goto fail;
597	}
598
599	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
600	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
601	myx_dmamem_free(sc, &test);
602	return (0);
603
604fail:
605	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
606	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
607	myx_dmamem_free(sc, &test);
608
609	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
610		printf("%s: unable to load %s\n", DEVNAME(sc),
611		    MYXFW_UNALIGNED);
612		return (1);
613	}
614
615	sc->sc_tx_boundary = 2048;
616
617	printf("%s: using unaligned firmware\n", DEVNAME(sc));
618	return (0);
619}
620
621void
622myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
623{
624	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
625	    BUS_SPACE_BARRIER_READ);
626	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
627}
628
629void
630myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
631{
632	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
633	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
634	    BUS_SPACE_BARRIER_WRITE);
635}
636
637int
638myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
639    bus_size_t size, u_int align)
640{
641	mxm->mxm_size = size;
642
643	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
644	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
645	    &mxm->mxm_map) != 0)
646		return (1);
647	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
648	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
649	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
650		goto destroy;
651	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
652	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
653		goto free;
654	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
655	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
656		goto unmap;
657
658	return (0);
659 unmap:
660	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
661 free:
662	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
663 destroy:
664	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
665	return (1);
666}
667
668void
669myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
670{
671	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
672	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
673	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
674	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
675}
676
677int
678myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
679{
680	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
681	struct myx_response	*mr;
682	u_int			 i;
683	u_int32_t		 result, data;
684#ifdef MYX_DEBUG
685	static const char *cmds[MYXCMD_MAX] = {
686		"CMD_NONE",
687		"CMD_RESET",
688		"CMD_GET_VERSION",
689		"CMD_SET_INTRQDMA",
690		"CMD_SET_BIGBUFSZ",
691		"CMD_SET_SMALLBUFSZ",
692		"CMD_GET_TXRINGOFF",
693		"CMD_GET_RXSMALLRINGOFF",
694		"CMD_GET_RXBIGRINGOFF",
695		"CMD_GET_INTRACKOFF",
696		"CMD_GET_INTRDEASSERTOFF",
697		"CMD_GET_TXRINGSZ",
698		"CMD_GET_RXRINGSZ",
699		"CMD_SET_INTRQSZ",
700		"CMD_SET_IFUP",
701		"CMD_SET_IFDOWN",
702		"CMD_SET_MTU",
703		"CMD_GET_INTRCOALDELAYOFF",
704		"CMD_SET_STATSINTVL",
705		"CMD_SET_STATSDMA_OLD",
706		"CMD_SET_PROMISC",
707		"CMD_UNSET_PROMISC",
708		"CMD_SET_LLADDR",
709		"CMD_SET_FC",
710		"CMD_UNSET_FC",
711		"CMD_DMA_TEST",
712		"CMD_SET_ALLMULTI",
713		"CMD_UNSET_ALLMULTI",
714		"CMD_SET_MCASTGROUP",
715		"CMD_UNSET_MCASTGROUP",
716		"CMD_UNSET_MCAST",
717		"CMD_SET_STATSDMA",
718		"CMD_UNALIGNED_DMA_TEST",
719		"CMD_GET_UNALIGNED_STATUS"
720	};
721#endif
722
723	mc->mc_cmd = htobe32(cmd);
724	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
725	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
726
727	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
728	mr->mr_result = 0xffffffff;
729
730	/* Send command */
731	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
732	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
733	    BUS_DMASYNC_PREREAD);
734
735	for (i = 0; i < 20; i++) {
736		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
737		    BUS_DMASYNC_POSTREAD);
738		result = betoh32(mr->mr_result);
739		data = betoh32(mr->mr_data);
740
741		if (result != 0xffffffff)
742			break;
743
744		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
745		    BUS_DMASYNC_PREREAD);
746		delay(1000);
747	}
748
749	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
750	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
751	    cmds[cmd], i, result, data, data);
752
753	if (result != 0)
754		return (-1);
755
756	if (r != NULL)
757		*r = data;
758	return (0);
759}
760
761int
762myx_boot(struct myx_softc *sc, u_int32_t length)
763{
764	struct myx_bootcmd	 bc;
765	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
766	u_int32_t		*status;
767	u_int			 i, ret = 1;
768
769	memset(&bc, 0, sizeof(bc));
770	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
771	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
772	bc.bc_result = 0xffffffff;
773	bc.bc_offset = htobe32(MYX_FW_BOOT);
774	bc.bc_length = htobe32(length - 8);
775	bc.bc_copyto = htobe32(8);
776	bc.bc_jumpto = htobe32(0);
777
778	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
779	*status = 0;
780
781	/* Send command */
782	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
783	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
784	    BUS_DMASYNC_PREREAD);
785
786	for (i = 0; i < 200; i++) {
787		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
788		    BUS_DMASYNC_POSTREAD);
789		if (*status == 0xffffffff) {
790			ret = 0;
791			break;
792		}
793
794		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
795		    BUS_DMASYNC_PREREAD);
796		delay(1000);
797	}
798
799	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
800	    DEVNAME(sc), i, ret);
801
802	return (ret);
803}
804
805int
806myx_rdma(struct myx_softc *sc, u_int do_enable)
807{
808	struct myx_rdmacmd	 rc;
809	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
810	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
811	u_int32_t		*status;
812	int			 ret = 1;
813	u_int			 i;
814
815	/*
816	 * It is required to setup a _dummy_ RDMA address. It also makes
817	 * some PCI-E chipsets resend dropped messages.
818	 */
819	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
820	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
821	rc.rc_result = 0xffffffff;
822	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
823	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
824	rc.rc_enable = htobe32(do_enable);
825
826	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
827	*status = 0;
828
829	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
830	    BUS_DMASYNC_PREREAD);
831
832	/* Send command */
833	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
834
835	for (i = 0; i < 20; i++) {
836		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
837		    BUS_DMASYNC_POSTREAD);
838
839		if (*status == 0xffffffff) {
840			ret = 0;
841			break;
842		}
843
844		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
845		    BUS_DMASYNC_PREREAD);
846		delay(1000);
847	}
848
849	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
850	    DEVNAME(sc), __func__,
851	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
852
853	return (ret);
854}
855
856int
857myx_media_change(struct ifnet *ifp)
858{
859	/* ignore */
860	return (0);
861}
862
863void
864myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
865{
866	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
867	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
868	u_int32_t		 sts;
869
870	imr->ifm_active = IFM_ETHER | IFM_AUTO;
871	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
872		imr->ifm_status = 0;
873		return;
874	}
875
876	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
877	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
878	sts = sc->sc_sts->ms_linkstate;
879	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
880	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
881
882	myx_link_state(sc, sts);
883
884	imr->ifm_status = IFM_AVALID;
885	if (!LINK_STATE_IS_UP(ifp->if_link_state))
886		return;
887
888	imr->ifm_active |= IFM_FDX | IFM_FLOW |
889	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
890	imr->ifm_status |= IFM_ACTIVE;
891}
892
893void
894myx_link_state(struct myx_softc *sc, u_int32_t sts)
895{
896	struct ifnet		*ifp = &sc->sc_ac.ac_if;
897	int			 link_state = LINK_STATE_DOWN;
898
899	if (betoh32(sts) == MYXSTS_LINKUP)
900		link_state = LINK_STATE_FULL_DUPLEX;
901	if (ifp->if_link_state != link_state) {
902		ifp->if_link_state = link_state;
903		if_link_state_change(ifp);
904		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
905		    IF_Gbps(10) : 0;
906	}
907}
908
909void
910myx_watchdog(struct ifnet *ifp)
911{
912	return;
913}
914
915int
916myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
917{
918	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
919	struct ifreq		*ifr = (struct ifreq *)data;
920	int			 s, error = 0;
921
922	s = splnet();
923
924	switch (cmd) {
925	case SIOCSIFADDR:
926		ifp->if_flags |= IFF_UP;
927		/* FALLTHROUGH */
928
929	case SIOCSIFFLAGS:
930		if (ISSET(ifp->if_flags, IFF_UP)) {
931			if (ISSET(ifp->if_flags, IFF_RUNNING))
932				error = ENETRESET;
933			else
934				myx_up(sc);
935		} else {
936			if (ISSET(ifp->if_flags, IFF_RUNNING))
937				myx_down(sc);
938		}
939		break;
940
941	case SIOCGIFMEDIA:
942	case SIOCSIFMEDIA:
943		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
944		break;
945
946	case SIOCGIFRXR:
947		error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
948		break;
949
950	default:
951		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
952	}
953
954	if (error == ENETRESET) {
955		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
956		    (IFF_UP | IFF_RUNNING))
957			myx_iff(sc);
958		error = 0;
959	}
960
961	splx(s);
962	return (error);
963}
964
965int
966myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri)
967{
968	struct if_rxring_info ifr[2];
969
970	memset(ifr, 0, sizeof(ifr));
971
972	ifr[0].ifr_size = MYX_RXSMALL_SIZE;
973	ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr;
974
975	ifr[1].ifr_size = MYX_RXBIG_SIZE;
976	ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr;
977
978	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
979}
980
981void
982myx_up(struct myx_softc *sc)
983{
984	struct ifnet		*ifp = &sc->sc_ac.ac_if;
985	struct myx_cmd		mc;
986	bus_dmamap_t		map;
987	size_t			size;
988	u_int			maxpkt;
989	u_int32_t		r;
990
991	memset(&mc, 0, sizeof(mc));
992	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
993		printf("%s: failed to reset the device\n", DEVNAME(sc));
994		return;
995	}
996
997	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
998	    64, MYXALIGN_CMD) != 0) {
999		printf("%s: failed to allocate zero pad memory\n",
1000		    DEVNAME(sc));
1001		return;
1002	}
1003	memset(sc->sc_zerodma.mxm_kva, 0, 64);
1004	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1005	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1006
1007	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
1008	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
1009		printf("%s: failed to allocate pad DMA memory\n",
1010		    DEVNAME(sc));
1011		goto free_zero;
1012	}
1013	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1014	    sc->sc_paddma.mxm_map->dm_mapsize,
1015	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1016
1017	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
1018		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
1019		goto free_pad;
1020	}
1021
1022	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
1023		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
1024		goto free_pad;
1025	}
1026	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
1027
1028	memset(&mc, 0, sizeof(mc));
1029	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
1030		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
1031		goto free_pad;
1032	}
1033	sc->sc_tx_ring_prod = 0;
1034	sc->sc_tx_ring_cons = 0;
1035	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1036	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1037	sc->sc_tx_count = 0;
1038	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1039	IFQ_SET_READY(&ifp->if_snd);
1040
1041	/* Allocate Interrupt Queue */
1042
1043	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1044	sc->sc_intrq_idx = 0;
1045
1046	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1047	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1048	    size, MYXALIGN_DATA) != 0) {
1049		goto free_pad;
1050	}
1051	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1052	map = sc->sc_intrq_dma.mxm_map;
1053	memset(sc->sc_intrq, 0, size);
1054	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1055	    BUS_DMASYNC_PREREAD);
1056
1057	memset(&mc, 0, sizeof(mc));
1058	mc.mc_data0 = htobe32(size);
1059	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1060		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1061		goto free_intrq;
1062	}
1063
1064	memset(&mc, 0, sizeof(mc));
1065	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1066	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1067	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1068		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1069		goto free_intrq;
1070	}
1071
1072	/*
1073	 * get interrupt offsets
1074	 */
1075
1076	memset(&mc, 0, sizeof(mc));
1077	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1078	    &sc->sc_irqclaimoff) != 0) {
1079		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1080		goto free_intrq;
1081	}
1082
1083	memset(&mc, 0, sizeof(mc));
1084	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1085	    &sc->sc_irqdeassertoff) != 0) {
1086		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1087		goto free_intrq;
1088	}
1089
1090	memset(&mc, 0, sizeof(mc));
1091	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1092	    &sc->sc_irqcoaloff) != 0) {
1093		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1094		goto free_intrq;
1095	}
1096
1097	/* Set an appropriate interrupt coalescing period */
1098	r = htobe32(MYX_IRQCOALDELAY);
1099	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1100
1101	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1102		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1103		goto free_intrq;
1104	}
1105
1106	memset(&mc, 0, sizeof(mc));
1107	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1108		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1109		goto free_intrq;
1110	}
1111
1112	memset(&mc, 0, sizeof(mc));
1113	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1114		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1115		goto free_intrq;
1116	}
1117
1118	memset(&mc, 0, sizeof(mc));
1119	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1120	    &sc->sc_tx_ring_offset) != 0) {
1121		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1122		goto free_intrq;
1123	}
1124
1125	memset(&mc, 0, sizeof(mc));
1126	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1127	    &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) {
1128		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1129		goto free_intrq;
1130	}
1131
1132	memset(&mc, 0, sizeof(mc));
1133	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1134	    &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) {
1135		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1136		goto free_intrq;
1137	}
1138
1139	/* Allocate Interrupt Data */
1140	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1141	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1142		printf("%s: failed to allocate status DMA memory\n",
1143		    DEVNAME(sc));
1144		goto free_intrq;
1145	}
1146	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1147	map = sc->sc_sts_dma.mxm_map;
1148	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1149	    BUS_DMASYNC_PREREAD);
1150
1151	memset(&mc, 0, sizeof(mc));
1152	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1153	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1154	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1155	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1156		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1157		goto free_sts;
1158	}
1159
1160	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1161
1162	memset(&mc, 0, sizeof(mc));
1163	mc.mc_data0 = htobe32(maxpkt);
1164	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1165		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1166		goto free_sts;
1167	}
1168
1169	if (myx_tx_init(sc, maxpkt) != 0)
1170		goto free_sts;
1171
1172	if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0)
1173		goto free_tx_ring;
1174
1175	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0)
1176		goto free_rx_ring_small;
1177
1178	if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0)
1179		goto empty_rx_ring_small;
1180
1181	if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0)
1182		goto free_rx_ring_big;
1183
1184	memset(&mc, 0, sizeof(mc));
1185	mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN);
1186	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1187		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1188		goto empty_rx_ring_big;
1189	}
1190
1191	memset(&mc, 0, sizeof(mc));
1192	mc.mc_data0 = htobe32(16384);
1193	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1194		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1195		goto empty_rx_ring_big;
1196	}
1197
1198	sc->sc_state = MYX_S_RUNNING;
1199
1200	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1201		printf("%s: failed to start the device\n", DEVNAME(sc));
1202		goto empty_rx_ring_big;
1203	}
1204
1205	ifq_clr_oactive(&ifp->if_snd);
1206	SET(ifp->if_flags, IFF_RUNNING);
1207	myx_iff(sc);
1208	if_start(ifp);
1209
1210	return;
1211
1212empty_rx_ring_big:
1213	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1214free_rx_ring_big:
1215	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]);
1216empty_rx_ring_small:
1217	myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1218free_rx_ring_small:
1219	myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]);
1220free_tx_ring:
1221	myx_tx_free(sc);
1222free_sts:
1223	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1224	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1225	myx_dmamem_free(sc, &sc->sc_sts_dma);
1226free_intrq:
1227	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1228	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1229	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1230free_pad:
1231	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1232	    sc->sc_paddma.mxm_map->dm_mapsize,
1233	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1234	myx_dmamem_free(sc, &sc->sc_paddma);
1235
1236	memset(&mc, 0, sizeof(mc));
1237	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1238		printf("%s: failed to reset the device\n", DEVNAME(sc));
1239	}
1240free_zero:
1241	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1242	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1243	myx_dmamem_free(sc, &sc->sc_zerodma);
1244}
1245
1246int
1247myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1248{
1249	struct myx_cmd		 mc;
1250
1251	memset(&mc, 0, sizeof(mc));
1252	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1253	    addr[2] << 8 | addr[3]);
1254	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1255
1256	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1257		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1258		return (-1);
1259	}
1260	return (0);
1261}
1262
1263void
1264myx_iff(struct myx_softc *sc)
1265{
1266	struct myx_cmd		mc;
1267	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1268	struct ether_multi	*enm;
1269	struct ether_multistep	step;
1270	u_int8_t *addr;
1271
1272	CLR(ifp->if_flags, IFF_ALLMULTI);
1273
1274	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1275	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1276		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1277		return;
1278	}
1279
1280	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1281		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1282		return;
1283	}
1284
1285	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1286		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1287		return;
1288	}
1289
1290	if (ISSET(ifp->if_flags, IFF_PROMISC) ||
1291	    sc->sc_ac.ac_multirangecnt > 0) {
1292		SET(ifp->if_flags, IFF_ALLMULTI);
1293		return;
1294	}
1295
1296	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1297	while (enm != NULL) {
1298		addr = enm->enm_addrlo;
1299
1300		memset(&mc, 0, sizeof(mc));
1301		mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 |
1302		    addr[2] << 8 | addr[3]);
1303		mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16);
1304		if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) {
1305			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1306			return;
1307		}
1308
1309		ETHER_NEXT_MULTI(step, enm);
1310	}
1311
1312	memset(&mc, 0, sizeof(mc));
1313	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1314		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1315		return;
1316	}
1317}
1318
1319void
1320myx_down(struct myx_softc *sc)
1321{
1322	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1323	volatile struct myx_status *sts = sc->sc_sts;
1324	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1325	struct sleep_state	 sls;
1326	struct myx_cmd		 mc;
1327	int			 s;
1328	int			 ring;
1329
1330	CLR(ifp->if_flags, IFF_RUNNING);
1331
1332	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1333	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1334	sc->sc_linkdown = sts->ms_linkdown;
1335	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1336	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1337
1338	sc->sc_state = MYX_S_DOWN;
1339	membar_producer();
1340
1341	memset(&mc, 0, sizeof(mc));
1342	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1343
1344	while (sc->sc_state != MYX_S_OFF) {
1345		sleep_setup(&sls, sts, PWAIT, "myxdown");
1346		membar_consumer();
1347		sleep_finish(&sls, sc->sc_state != MYX_S_OFF);
1348	}
1349
1350	s = splnet();
1351	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1352		ifp->if_link_state = LINK_STATE_UNKNOWN;
1353		ifp->if_baudrate = 0;
1354		if_link_state_change(ifp);
1355	}
1356	splx(s);
1357
1358	memset(&mc, 0, sizeof(mc));
1359	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1360		printf("%s: failed to reset the device\n", DEVNAME(sc));
1361	}
1362
1363	ifq_clr_oactive(&ifp->if_snd);
1364	ifq_barrier(&ifp->if_snd);
1365
1366	for (ring = 0; ring < 2; ring++) {
1367		struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1368
1369		timeout_del(&mrr->mrr_refill);
1370		myx_rx_empty(sc, mrr);
1371		myx_rx_free(sc, mrr);
1372	}
1373
1374	myx_tx_empty(sc);
1375	myx_tx_free(sc);
1376
1377	/* the sleep shizz above already synced this dmamem */
1378	myx_dmamem_free(sc, &sc->sc_sts_dma);
1379
1380	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1381	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1382	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1383
1384	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1385	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1386	myx_dmamem_free(sc, &sc->sc_paddma);
1387
1388	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1389	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1390	myx_dmamem_free(sc, &sc->sc_zerodma);
1391}
1392
1393void
1394myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags,
1395    u_int32_t offset, u_int idx)
1396{
1397	struct myx_tx_desc		txd;
1398	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1399	bus_dmamap_t			map = ms->ms_map;
1400	int				i;
1401
1402	for (i = 1; i < map->dm_nsegs; i++) {
1403		memset(&txd, 0, sizeof(txd));
1404		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1405		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1406		txd.tx_flags = flags;
1407
1408		myx_bus_space_write(sc,
1409		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1410		    &txd, sizeof(txd));
1411	}
1412
1413	/* pad runt frames */
1414	if (map->dm_mapsize < 60) {
1415		memset(&txd, 0, sizeof(txd));
1416		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1417		txd.tx_length = htobe16(60 - map->dm_mapsize);
1418		txd.tx_flags = flags;
1419
1420		myx_bus_space_write(sc,
1421		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1422		    &txd, sizeof(txd));
1423	}
1424}
1425
1426void
1427myx_start(struct ifnet *ifp)
1428{
1429	struct myx_tx_desc		txd;
1430	struct myx_softc		*sc = ifp->if_softc;
1431	struct myx_slot			*ms;
1432	bus_dmamap_t			map;
1433	struct mbuf			*m;
1434	u_int32_t			offset = sc->sc_tx_ring_offset;
1435	u_int				idx, cons, prod;
1436	u_int				free, used;
1437	u_int8_t			flags;
1438
1439	idx = sc->sc_tx_ring_prod;
1440
1441	/* figure out space */
1442	free = sc->sc_tx_ring_cons;
1443	if (free <= idx)
1444		free += sc->sc_tx_ring_count;
1445	free -= idx;
1446
1447	cons = prod = sc->sc_tx_prod;
1448
1449	used = 0;
1450
1451	for (;;) {
1452		if (used + sc->sc_tx_nsegs + 1 > free) {
1453			ifq_set_oactive(&ifp->if_snd);
1454			break;
1455		}
1456
1457		IFQ_DEQUEUE(&ifp->if_snd, m);
1458		if (m == NULL)
1459			break;
1460
1461		ms = &sc->sc_tx_slots[prod];
1462
1463		if (myx_load_mbuf(sc, ms, m) != 0) {
1464			m_freem(m);
1465			ifp->if_oerrors++;
1466			continue;
1467		}
1468
1469#if NBPFILTER > 0
1470		if (ifp->if_bpf)
1471			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1472#endif
1473
1474		map = ms->ms_map;
1475		bus_dmamap_sync(sc->sc_dmat, map, 0,
1476		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1477
1478		used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1479
1480		if (++prod >= sc->sc_tx_ring_count)
1481			prod = 0;
1482	}
1483
1484	if (cons == prod)
1485		return;
1486
1487	ms = &sc->sc_tx_slots[cons];
1488
1489	for (;;) {
1490		idx += ms->ms_map->dm_nsegs +
1491		    (ms->ms_map->dm_mapsize < 60 ? 1 : 0);
1492		if (idx >= sc->sc_tx_ring_count)
1493			idx -= sc->sc_tx_ring_count;
1494
1495		if (++cons >= sc->sc_tx_ring_count)
1496			cons = 0;
1497
1498		if (cons == prod)
1499			break;
1500
1501		ms = &sc->sc_tx_slots[cons];
1502		map = ms->ms_map;
1503
1504		flags = MYXTXD_FLAGS_NO_TSO;
1505		if (map->dm_mapsize < 1520)
1506			flags |= MYXTXD_FLAGS_SMALL;
1507
1508		memset(&txd, 0, sizeof(txd));
1509		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1510		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1511		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1512		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1513		myx_bus_space_write(sc,
1514		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1515
1516		myx_write_txd_tail(sc, ms, flags, offset, idx);
1517	}
1518
1519	/* go back and post first packet */
1520	ms = &sc->sc_tx_slots[sc->sc_tx_prod];
1521	map = ms->ms_map;
1522
1523	flags = MYXTXD_FLAGS_NO_TSO;
1524	if (map->dm_mapsize < 1520)
1525		flags |= MYXTXD_FLAGS_SMALL;
1526
1527	memset(&txd, 0, sizeof(txd));
1528	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1529	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1530	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1531	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1532
1533	/* make sure the first descriptor is seen after the others */
1534	myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod);
1535
1536	myx_bus_space_write(sc,
1537	    offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd,
1538	    sizeof(txd) - sizeof(myx_bus_t));
1539
1540	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1541	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1542
1543	myx_bus_space_write(sc,
1544	    offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) -
1545	    sizeof(myx_bus_t),
1546	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1547	    sizeof(myx_bus_t));
1548
1549	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1550	    offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd),
1551	    BUS_SPACE_BARRIER_WRITE);
1552
1553	/* commit */
1554	sc->sc_tx_ring_prod = idx;
1555	sc->sc_tx_prod = prod;
1556}
1557
1558int
1559myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m)
1560{
1561	bus_dma_tag_t			dmat = sc->sc_dmat;
1562	bus_dmamap_t			dmap = ms->ms_map;
1563
1564	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1565	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1566	case 0:
1567		break;
1568
1569	case EFBIG: /* mbuf chain is too fragmented */
1570		if (m_defrag(m, M_DONTWAIT) == 0 &&
1571		    bus_dmamap_load_mbuf(dmat, dmap, m,
1572		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1573			break;
1574	default:
1575		return (1);
1576	}
1577
1578	ms->ms_m = m;
1579	return (0);
1580}
1581
1582int
1583myx_intr(void *arg)
1584{
1585	struct myx_softc	*sc = (struct myx_softc *)arg;
1586	volatile struct myx_status *sts = sc->sc_sts;
1587	enum myx_state		 state;
1588	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1589	u_int32_t		 data;
1590	u_int8_t		 valid = 0;
1591
1592	state = sc->sc_state;
1593	if (state == MYX_S_OFF)
1594		return (0);
1595
1596	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1597	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1598
1599	valid = sts->ms_isvalid;
1600	if (valid == 0x0) {
1601		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1602		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1603		return (0);
1604	}
1605
1606	if (sc->sc_intx) {
1607		data = htobe32(0);
1608		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1609		    sc->sc_irqdeassertoff, &data, sizeof(data));
1610	}
1611	sts->ms_isvalid = 0;
1612
1613	do {
1614		data = sts->ms_txdonecnt;
1615
1616		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1617		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
1618		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1619	} while (sts->ms_isvalid);
1620
1621	data = betoh32(data);
1622	if (data != sc->sc_tx_count)
1623		myx_txeof(sc, data);
1624
1625	data = htobe32(3);
1626	if (valid & 0x1) {
1627		myx_rxeof(sc);
1628
1629		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1630		    sc->sc_irqclaimoff, &data, sizeof(data));
1631	}
1632	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1633	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1634
1635	if (sts->ms_statusupdated) {
1636		if (state == MYX_S_DOWN &&
1637		    sc->sc_linkdown != sts->ms_linkdown) {
1638			sc->sc_state = MYX_S_OFF;
1639			membar_producer();
1640			wakeup(sts);
1641		} else {
1642			data = sts->ms_linkstate;
1643			if (data != 0xffffffff) {
1644				KERNEL_LOCK();
1645				myx_link_state(sc, data);
1646				KERNEL_UNLOCK();
1647			}
1648		}
1649	}
1650
1651	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1652	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1653
1654	return (1);
1655}
1656
1657void
1658myx_refill(void *xmrr)
1659{
1660	struct myx_rx_ring *mrr = xmrr;
1661	struct myx_softc *sc = mrr->mrr_softc;
1662
1663	myx_rx_fill(sc, mrr);
1664
1665	if (mrr->mrr_prod == mrr->mrr_cons)
1666		timeout_add(&mrr->mrr_refill, 1);
1667}
1668
1669void
1670myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1671{
1672	struct ifnet *ifp = &sc->sc_ac.ac_if;
1673	struct myx_slot *ms;
1674	bus_dmamap_t map;
1675	u_int idx, cons;
1676
1677	idx = sc->sc_tx_ring_cons;
1678	cons = sc->sc_tx_cons;
1679
1680	do {
1681		ms = &sc->sc_tx_slots[cons];
1682		map = ms->ms_map;
1683
1684		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1685
1686		bus_dmamap_sync(sc->sc_dmat, map, 0,
1687		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1688		bus_dmamap_unload(sc->sc_dmat, map);
1689		m_freem(ms->ms_m);
1690
1691		ifp->if_opackets++;
1692
1693		if (++cons >= sc->sc_tx_ring_count)
1694			cons = 0;
1695	} while (++sc->sc_tx_count != done_count);
1696
1697	if (idx >= sc->sc_tx_ring_count)
1698		idx -= sc->sc_tx_ring_count;
1699
1700	sc->sc_tx_ring_cons = idx;
1701	sc->sc_tx_cons = cons;
1702
1703	if (ifq_is_oactive(&ifp->if_snd))
1704		ifq_restart(&ifp->if_snd);
1705}
1706
1707void
1708myx_rxeof(struct myx_softc *sc)
1709{
1710	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1711	struct ifnet *ifp = &sc->sc_ac.ac_if;
1712	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1713	struct myx_rx_ring *mrr;
1714	struct myx_slot *ms;
1715	struct mbuf *m;
1716	int ring;
1717	u_int rxfree[2] = { 0 , 0 };
1718	u_int len;
1719
1720	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1721	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1722
1723	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1724		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1725
1726		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1727			sc->sc_intrq_idx = 0;
1728
1729		ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ?
1730		    MYX_RXSMALL : MYX_RXBIG;
1731
1732		mrr = &sc->sc_rx_ring[ring];
1733		ms = &mrr->mrr_slots[mrr->mrr_cons];
1734
1735		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1736			mrr->mrr_cons = 0;
1737
1738		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1739		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1740		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1741
1742		m = ms->ms_m;
1743		m->m_data += ETHER_ALIGN;
1744		m->m_pkthdr.len = m->m_len = len;
1745
1746		ml_enqueue(&ml, m);
1747
1748		rxfree[ring]++;
1749	}
1750
1751	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1752	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1753
1754	for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) {
1755		if (rxfree[ring] == 0)
1756			continue;
1757
1758		mrr = &sc->sc_rx_ring[ring];
1759
1760		if_rxr_put(&mrr->mrr_rxr, rxfree[ring]);
1761		myx_rx_fill(sc, mrr);
1762		if (mrr->mrr_prod == mrr->mrr_cons)
1763			timeout_add(&mrr->mrr_refill, 0);
1764	}
1765
1766	if_input(ifp, &ml);
1767}
1768
1769static int
1770myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots)
1771{
1772	struct myx_rx_desc rxd;
1773	struct myx_slot *ms;
1774	u_int32_t offset = mrr->mrr_offset;
1775	u_int p, first, fills;
1776
1777	first = p = mrr->mrr_prod;
1778	if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0)
1779		return (slots);
1780
1781	if (++p >= sc->sc_rx_ring_count)
1782		p = 0;
1783
1784	for (fills = 1; fills < slots; fills++) {
1785		ms = &mrr->mrr_slots[p];
1786
1787		if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0)
1788			break;
1789
1790		rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1791		myx_bus_space_write(sc, offset + p * sizeof(rxd),
1792		    &rxd, sizeof(rxd));
1793
1794		if (++p >= sc->sc_rx_ring_count)
1795			p = 0;
1796	}
1797
1798	mrr->mrr_prod = p;
1799
1800	/* make sure the first descriptor is seen after the others */
1801	if (fills > 1) {
1802		bus_space_barrier(sc->sc_memt, sc->sc_memh,
1803		    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1804		    BUS_SPACE_BARRIER_WRITE);
1805	}
1806
1807	ms = &mrr->mrr_slots[first];
1808	rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
1809	myx_bus_space_write(sc, offset + first * sizeof(rxd),
1810	    &rxd, sizeof(rxd));
1811
1812	return (slots - fills);
1813}
1814
1815int
1816myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size)
1817{
1818	struct myx_rx_desc rxd;
1819	struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring];
1820	struct myx_slot *ms;
1821	u_int32_t offset = mrr->mrr_offset;
1822	int rv;
1823	int i;
1824
1825	mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count,
1826	    M_DEVBUF, M_WAITOK);
1827	if (mrr->mrr_slots == NULL)
1828		return (ENOMEM);
1829
1830	memset(&rxd, 0xff, sizeof(rxd));
1831	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1832		ms = &mrr->mrr_slots[i];
1833		rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1834		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1835		if (rv != 0)
1836			goto destroy;
1837
1838		myx_bus_space_write(sc, offset + i * sizeof(rxd),
1839		    &rxd, sizeof(rxd));
1840	}
1841
1842	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1843	mrr->mrr_prod = mrr->mrr_cons = 0;
1844
1845	return (0);
1846
1847destroy:
1848	while (i-- > 0) {
1849		ms = &mrr->mrr_slots[i];
1850		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1851	}
1852	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1853	return (rv);
1854}
1855
1856int
1857myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr)
1858{
1859	u_int slots;
1860
1861	slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count);
1862	if (slots == 0)
1863		return (1);
1864
1865	slots = myx_rx_fill_slots(sc, mrr, slots);
1866	if (slots > 0)
1867		if_rxr_put(&mrr->mrr_rxr, slots);
1868
1869	return (0);
1870}
1871
1872void
1873myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr)
1874{
1875	struct myx_slot *ms;
1876
1877	while (mrr->mrr_cons != mrr->mrr_prod) {
1878		ms = &mrr->mrr_slots[mrr->mrr_cons];
1879
1880		if (++mrr->mrr_cons >= sc->sc_rx_ring_count)
1881			mrr->mrr_cons = 0;
1882
1883		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1884		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1885		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
1886		m_freem(ms->ms_m);
1887	}
1888
1889	if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2);
1890}
1891
1892void
1893myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr)
1894{
1895	struct myx_slot *ms;
1896	int i;
1897
1898	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1899		ms = &mrr->mrr_slots[i];
1900		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1901	}
1902
1903	free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count);
1904}
1905
1906struct mbuf *
1907myx_mcl_small(void)
1908{
1909	struct mbuf *m;
1910
1911	m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE);
1912	if (m == NULL)
1913		return (NULL);
1914
1915	m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE;
1916
1917	return (m);
1918}
1919
1920struct mbuf *
1921myx_mcl_big(void)
1922{
1923	struct mbuf *m;
1924	void *mcl;
1925
1926	MGETHDR(m, M_DONTWAIT, MT_DATA);
1927	if (m == NULL)
1928		return (NULL);
1929
1930	mcl = pool_get(myx_mcl_pool, PR_NOWAIT);
1931	if (mcl == NULL) {
1932		m_free(m);
1933		return (NULL);
1934	}
1935
1936	MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, m_extfree_pool, myx_mcl_pool);
1937	m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE;
1938
1939	return (m);
1940}
1941
1942int
1943myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms,
1944    struct mbuf *(*mclget)(void))
1945{
1946	struct mbuf *m;
1947	int rv;
1948
1949	m = (*mclget)();
1950	if (m == NULL)
1951		return (ENOMEM);
1952
1953	rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT);
1954	if (rv != 0) {
1955		m_freem(m);
1956		return (rv);
1957	}
1958
1959	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
1960	    ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1961
1962	ms->ms_m = m;
1963
1964	return (0);
1965}
1966
1967int
1968myx_tx_init(struct myx_softc *sc, bus_size_t size)
1969{
1970	struct myx_slot *ms;
1971	int rv;
1972	int i;
1973
1974	sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count,
1975	    M_DEVBUF, M_WAITOK);
1976	if (sc->sc_tx_slots == NULL)
1977		return (ENOMEM);
1978
1979	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1980		ms = &sc->sc_tx_slots[i];
1981		rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs,
1982		    sc->sc_tx_boundary, sc->sc_tx_boundary,
1983		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map);
1984		if (rv != 0)
1985			goto destroy;
1986	}
1987
1988	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1989
1990	return (0);
1991
1992destroy:
1993	while (i-- > 0) {
1994		ms = &sc->sc_tx_slots[i];
1995		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
1996	}
1997	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
1998	return (rv);
1999}
2000
2001void
2002myx_tx_empty(struct myx_softc *sc)
2003{
2004	struct myx_slot *ms;
2005	u_int cons = sc->sc_tx_cons;
2006	u_int prod = sc->sc_tx_prod;
2007
2008	while (cons != prod) {
2009		ms = &sc->sc_tx_slots[cons];
2010
2011		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0,
2012		    ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2013		bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
2014		m_freem(ms->ms_m);
2015
2016		if (++cons >= sc->sc_tx_ring_count)
2017			cons = 0;
2018	}
2019
2020	sc->sc_tx_cons = cons;
2021}
2022
2023void
2024myx_tx_free(struct myx_softc *sc)
2025{
2026	struct myx_slot *ms;
2027	int i;
2028
2029	for (i = 0; i < sc->sc_tx_ring_count; i++) {
2030		ms = &sc->sc_tx_slots[i];
2031		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
2032	}
2033
2034	free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count);
2035}
2036