if_myx.c revision 1.50
1/*	$OpenBSD: if_myx.c,v 1.50 2014/01/21 23:26:50 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*
20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets.
21 */
22
23#include "bpfilter.h"
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/sockio.h>
28#include <sys/mbuf.h>
29#include <sys/kernel.h>
30#include <sys/socket.h>
31#include <sys/malloc.h>
32#include <sys/timeout.h>
33#include <sys/proc.h>
34#include <sys/device.h>
35#include <sys/queue.h>
36
37#include <machine/bus.h>
38#include <machine/intr.h>
39
40#include <net/if.h>
41#include <net/if_dl.h>
42#include <net/if_media.h>
43#include <net/if_types.h>
44
45#if NBPFILTER > 0
46#include <net/bpf.h>
47#endif
48
49#ifdef INET
50#include <netinet/in.h>
51#include <netinet/if_ether.h>
52#endif
53
54#include <dev/pci/pcireg.h>
55#include <dev/pci/pcivar.h>
56#include <dev/pci/pcidevs.h>
57
58#include <dev/pci/if_myxreg.h>
59
60#ifdef MYX_DEBUG
61#define MYXDBG_INIT	(1<<0)	/* chipset initialization */
62#define MYXDBG_CMD	(2<<0)	/* commands */
63#define MYXDBG_INTR	(3<<0)	/* interrupts */
64#define MYXDBG_ALL	0xffff	/* enable all debugging messages */
65int myx_debug = MYXDBG_ALL;
66#define DPRINTF(_lvl, _arg...)	do {					\
67	if (myx_debug & (_lvl))						\
68		printf(_arg);						\
69} while (0)
70#else
71#define DPRINTF(_lvl, arg...)
72#endif
73
74#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
75
76struct myx_dmamem {
77	bus_dmamap_t		 mxm_map;
78	bus_dma_segment_t	 mxm_seg;
79	int			 mxm_nsegs;
80	size_t			 mxm_size;
81	caddr_t			 mxm_kva;
82};
83
84struct myx_buf {
85	SIMPLEQ_ENTRY(myx_buf)	 mb_entry;
86	bus_dmamap_t		 mb_map;
87	struct mbuf		*mb_m;
88};
89
90struct myx_buf_list {
91	SIMPLEQ_HEAD(, myx_buf)	mbl_q;
92	struct mutex		mbl_mtx;
93};
94
95struct pool *myx_buf_pool;
96
97struct myx_ring_lock {
98	struct mutex		mrl_mtx;
99	u_int			mrl_running;
100};
101
102struct myx_softc {
103	struct device		 sc_dev;
104	struct arpcom		 sc_ac;
105
106	pci_chipset_tag_t	 sc_pc;
107	pci_intr_handle_t	 sc_ih;
108	pcitag_t		 sc_tag;
109	u_int			 sc_function;
110
111	bus_dma_tag_t		 sc_dmat;
112	bus_space_tag_t		 sc_memt;
113	bus_space_handle_t	 sc_memh;
114	bus_size_t		 sc_mems;
115
116	struct myx_dmamem	 sc_zerodma;
117	struct myx_dmamem	 sc_cmddma;
118	struct myx_dmamem	 sc_paddma;
119
120	struct myx_dmamem	 sc_sts_dma;
121	volatile struct myx_status	*sc_sts;
122	struct mutex		 sc_sts_mtx;
123
124	int			 sc_intx;
125	void			*sc_irqh;
126	u_int32_t		 sc_irqcoaloff;
127	u_int32_t		 sc_irqclaimoff;
128	u_int32_t		 sc_irqdeassertoff;
129
130	struct myx_dmamem	 sc_intrq_dma;
131	struct myx_intrq_desc	*sc_intrq;
132	u_int			 sc_intrq_count;
133	u_int			 sc_intrq_idx;
134
135	u_int			 sc_rx_ring_count;
136	struct myx_ring_lock	 sc_rx_ring_lock[2];
137	u_int32_t		 sc_rx_ring_offset[2];
138	struct myx_buf_list	 sc_rx_buf_free[2];
139	struct myx_buf_list	 sc_rx_buf_list[2];
140	u_int			 sc_rx_ring_idx[2];
141#define  MYX_RXSMALL		 0
142#define  MYX_RXBIG		 1
143	struct timeout		 sc_refill;
144
145	bus_size_t		 sc_tx_boundary;
146	u_int			 sc_tx_ring_count;
147	struct myx_ring_lock	 sc_tx_ring_lock;
148	u_int32_t		 sc_tx_ring_offset;
149	u_int			 sc_tx_nsegs;
150	u_int32_t		 sc_tx_count; /* shadows ms_txdonecnt */
151	u_int			 sc_tx_free;
152	struct myx_buf_list	 sc_tx_buf_free;
153	struct myx_buf_list	 sc_tx_buf_list;
154	u_int			 sc_tx_ring_idx;
155
156	u_int8_t		 sc_lladdr[ETHER_ADDR_LEN];
157	struct ifmedia		 sc_media;
158
159	volatile u_int8_t	 sc_linkdown;
160};
161
162int	 myx_match(struct device *, void *, void *);
163void	 myx_attach(struct device *, struct device *, void *);
164int	 myx_query(struct myx_softc *sc, char *, size_t);
165u_int	 myx_ether_aton(char *, u_int8_t *, u_int);
166void	 myx_attachhook(void *);
167int	 myx_loadfirmware(struct myx_softc *, const char *);
168int	 myx_probe_firmware(struct myx_softc *);
169
170void	 myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t);
171void	 myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t);
172
173#if defined(__LP64__)
174#define myx_bus_space_write bus_space_write_raw_region_8
175typedef u_int64_t myx_bus_t;
176#else
177#define myx_bus_space_write bus_space_write_raw_region_4
178typedef u_int32_t myx_bus_t;
179#endif
180
181int	 myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *);
182int	 myx_boot(struct myx_softc *, u_int32_t);
183
184int	 myx_rdma(struct myx_softc *, u_int);
185int	 myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *,
186	    bus_size_t, u_int align);
187void	 myx_dmamem_free(struct myx_softc *, struct myx_dmamem *);
188int	 myx_media_change(struct ifnet *);
189void	 myx_media_status(struct ifnet *, struct ifmediareq *);
190void	 myx_link_state(struct myx_softc *, u_int32_t);
191void	 myx_watchdog(struct ifnet *);
192int	 myx_ioctl(struct ifnet *, u_long, caddr_t);
193void	 myx_up(struct myx_softc *);
194void	 myx_iff(struct myx_softc *);
195void	 myx_down(struct myx_softc *);
196
197void	 myx_start(struct ifnet *);
198void	 myx_write_txd_tail(struct myx_softc *, struct myx_buf *, u_int8_t,
199	    u_int32_t, u_int);
200int	 myx_load_buf(struct myx_softc *, struct myx_buf *, struct mbuf *);
201int	 myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *);
202int	 myx_intr(void *);
203int	 myx_rxeof(struct myx_softc *);
204void	 myx_txeof(struct myx_softc *, u_int32_t);
205
206struct myx_buf *	myx_buf_alloc(struct myx_softc *, bus_size_t, int,
207			    bus_size_t, bus_size_t);
208void			myx_buf_free(struct myx_softc *, struct myx_buf *);
209void			myx_bufs_init(struct myx_buf_list *);
210int			myx_bufs_empty(struct myx_buf_list *);
211struct myx_buf *	myx_buf_get(struct myx_buf_list *);
212void			myx_buf_put(struct myx_buf_list *, struct myx_buf *);
213struct myx_buf *	myx_buf_fill(struct myx_softc *, int);
214
215void			myx_rx_zero(struct myx_softc *, int);
216int			myx_rx_fill(struct myx_softc *, int);
217void			myx_refill(void *);
218
219void			myx_ring_lock_init(struct myx_ring_lock *);
220int			myx_ring_enter(struct myx_ring_lock *);
221int			myx_ring_leave(struct myx_ring_lock *);
222
223struct cfdriver myx_cd = {
224	NULL, "myx", DV_IFNET
225};
226struct cfattach myx_ca = {
227	sizeof(struct myx_softc), myx_match, myx_attach
228};
229
230const struct pci_matchid myx_devices[] = {
231	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E },
232	{ PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 }
233};
234
235int
236myx_match(struct device *parent, void *match, void *aux)
237{
238	return (pci_matchbyid(aux, myx_devices, nitems(myx_devices)));
239}
240
241void
242myx_attach(struct device *parent, struct device *self, void *aux)
243{
244	struct myx_softc	*sc = (struct myx_softc *)self;
245	struct pci_attach_args	*pa = aux;
246	char			 part[32];
247	pcireg_t		 memtype;
248
249	sc->sc_pc = pa->pa_pc;
250	sc->sc_tag = pa->pa_tag;
251	sc->sc_dmat = pa->pa_dmat;
252	sc->sc_function = pa->pa_function;
253
254	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXSMALL]);
255	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXSMALL]);
256	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXSMALL]);
257	myx_ring_lock_init(&sc->sc_rx_ring_lock[MYX_RXBIG]);
258	myx_bufs_init(&sc->sc_rx_buf_free[MYX_RXBIG]);
259	myx_bufs_init(&sc->sc_rx_buf_list[MYX_RXBIG]);
260
261	myx_ring_lock_init(&sc->sc_tx_ring_lock);
262	myx_bufs_init(&sc->sc_tx_buf_free);
263	myx_bufs_init(&sc->sc_tx_buf_list);
264
265	timeout_set(&sc->sc_refill, myx_refill, sc);
266
267	mtx_init(&sc->sc_sts_mtx, IPL_NET);
268
269
270	/* Map the PCI memory space */
271	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0);
272	if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE,
273	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
274		printf(": unable to map register memory\n");
275		return;
276	}
277
278	/* Get board details (mac/part) */
279	memset(part, 0, sizeof(part));
280	if (myx_query(sc, part, sizeof(part)) != 0)
281		goto unmap;
282
283	/* Map the interrupt */
284	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) {
285		if (pci_intr_map(pa, &sc->sc_ih) != 0) {
286			printf(": unable to map interrupt\n");
287			goto unmap;
288		}
289		sc->sc_intx = 1;
290	}
291
292	printf(": %s, model %s, address %s\n",
293	    pci_intr_string(pa->pa_pc, sc->sc_ih),
294	    part[0] == '\0' ? "(unknown)" : part,
295	    ether_sprintf(sc->sc_ac.ac_enaddr));
296
297	/* this is sort of racy */
298	if (myx_buf_pool == NULL) {
299		myx_buf_pool = malloc(sizeof(*myx_buf_pool), M_DEVBUF,
300		    M_WAITOK);
301		if (myx_buf_pool == NULL) {
302			printf("%s: unable to allocate buf pool\n",
303			    DEVNAME(sc));
304			goto unmap;
305		}
306		pool_init(myx_buf_pool, sizeof(struct myx_buf),
307		    0, 0, 0, "myxbufs", &pool_allocator_nointr);
308	}
309
310	if (mountroothook_establish(myx_attachhook, sc) == NULL) {
311		printf("%s: unable to establish mountroot hook\n", DEVNAME(sc));
312		goto unmap;
313	}
314
315	return;
316
317 unmap:
318	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
319	sc->sc_mems = 0;
320}
321
322u_int
323myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen)
324{
325	u_int		i, j;
326	u_int8_t	digit;
327
328	memset(lladdr, 0, ETHER_ADDR_LEN);
329	for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) {
330		if (mac[i] >= '0' && mac[i] <= '9')
331			digit = mac[i] - '0';
332		else if (mac[i] >= 'A' && mac[i] <= 'F')
333			digit = mac[i] - 'A' + 10;
334		else if (mac[i] >= 'a' && mac[i] <= 'f')
335			digit = mac[i] - 'a' + 10;
336		else
337			continue;
338		if ((j & 1) == 0)
339			digit <<= 4;
340		lladdr[j++/2] |= digit;
341	}
342
343	return (i);
344}
345
346int
347myx_query(struct myx_softc *sc, char *part, size_t partlen)
348{
349	struct myx_gen_hdr hdr;
350	u_int32_t	offset;
351	u_int8_t	strings[MYX_STRING_SPECS_SIZE];
352	u_int		i, len, maxlen;
353
354	myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset));
355	offset = betoh32(offset);
356	if (offset + sizeof(hdr) > sc->sc_mems) {
357		printf(": header is outside register window\n");
358		return (1);
359	}
360
361	myx_read(sc, offset, &hdr, sizeof(hdr));
362	offset = betoh32(hdr.fw_specs);
363	len = min(betoh32(hdr.fw_specs_len), sizeof(strings));
364
365	bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len);
366
367	for (i = 0; i < len; i++) {
368		maxlen = len - i;
369		if (strings[i] == '\0')
370			break;
371		if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) {
372			i += 4;
373			i += myx_ether_aton(&strings[i],
374			    sc->sc_ac.ac_enaddr, maxlen);
375		} else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) {
376			i += 3;
377			i += strlcpy(part, &strings[i], min(maxlen, partlen));
378		}
379		for (; i < len; i++) {
380			if (strings[i] == '\0')
381				break;
382		}
383	}
384
385	return (0);
386}
387
388int
389myx_loadfirmware(struct myx_softc *sc, const char *filename)
390{
391	struct myx_gen_hdr	hdr;
392	u_int8_t		*fw;
393	size_t			fwlen;
394	u_int32_t		offset;
395	u_int			i, ret = 1;
396
397	if (loadfirmware(filename, &fw, &fwlen) != 0) {
398		printf("%s: could not load firmware %s\n", DEVNAME(sc),
399		    filename);
400		return (1);
401	}
402	if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) {
403		printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename);
404		goto err;
405	}
406
407	memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset));
408	offset = betoh32(offset);
409	if ((offset + sizeof(hdr)) > fwlen) {
410		printf("%s: invalid firmware %s\n", DEVNAME(sc), filename);
411		goto err;
412	}
413
414	memcpy(&hdr, fw + offset, sizeof(hdr));
415	DPRINTF(MYXDBG_INIT, "%s: "
416	    "fw hdr off %u, length %u, type 0x%x, version %s\n",
417	    DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength),
418	    betoh32(hdr.fw_type), hdr.fw_version);
419
420	if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH ||
421	    memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) {
422		printf("%s: invalid firmware type 0x%x version %s\n",
423		    DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version);
424		goto err;
425	}
426
427	/* Write the firmware to the card's SRAM */
428	for (i = 0; i < fwlen; i += 256)
429		myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i));
430
431	if (myx_boot(sc, fwlen) != 0) {
432		printf("%s: failed to boot %s\n", DEVNAME(sc), filename);
433		goto err;
434	}
435
436	ret = 0;
437
438err:
439	free(fw, M_DEVBUF);
440	return (ret);
441}
442
443void
444myx_attachhook(void *arg)
445{
446	struct myx_softc	*sc = (struct myx_softc *)arg;
447	struct ifnet		*ifp = &sc->sc_ac.ac_if;
448	struct myx_cmd		 mc;
449
450	/* Allocate command DMA memory */
451	if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD,
452	    MYXALIGN_CMD) != 0) {
453		printf("%s: failed to allocate command DMA memory\n",
454		    DEVNAME(sc));
455		return;
456	}
457
458	/* Try the firmware stored on disk */
459	if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) {
460		/* error printed by myx_loadfirmware */
461		goto freecmd;
462	}
463
464	memset(&mc, 0, sizeof(mc));
465
466	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
467		printf("%s: failed to reset the device\n", DEVNAME(sc));
468		goto freecmd;
469	}
470
471	sc->sc_tx_boundary = 4096;
472
473	if (myx_probe_firmware(sc) != 0) {
474		printf("%s: error while selecting firmware\n", DEVNAME(sc));
475		goto freecmd;
476	}
477
478	sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih,
479	    IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc));
480	if (sc->sc_irqh == NULL) {
481		printf("%s: unable to establish interrupt\n", DEVNAME(sc));
482		goto freecmd;
483	}
484
485	ifp->if_softc = sc;
486	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
487	ifp->if_ioctl = myx_ioctl;
488	ifp->if_start = myx_start;
489	ifp->if_watchdog = myx_watchdog;
490	ifp->if_hardmtu = 9000;
491	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
492	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
493	IFQ_SET_READY(&ifp->if_snd);
494
495	ifp->if_capabilities = IFCAP_VLAN_MTU;
496#if 0
497	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
498	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
499	    IFCAP_CSUM_UDPv4;
500#endif
501
502	ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status);
503	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
504	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
505
506	if_attach(ifp);
507	ether_ifattach(ifp);
508
509	return;
510
511freecmd:
512	myx_dmamem_free(sc, &sc->sc_cmddma);
513}
514
515int
516myx_probe_firmware(struct myx_softc *sc)
517{
518	struct myx_dmamem test;
519	bus_dmamap_t map;
520	struct myx_cmd mc;
521	pcireg_t csr;
522	int offset;
523	int width = 0;
524
525	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
526	    &offset, NULL)) {
527		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
528		    offset + PCI_PCIE_LCSR);
529		width = (csr >> 20) & 0x3f;
530
531		if (width <= 4) {
532			/*
533			 * if the link width is 4 or less we can use the
534			 * aligned firmware.
535			 */
536			return (0);
537		}
538	}
539
540	if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0)
541		return (1);
542	map = test.mxm_map;
543
544	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
545	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
546
547	memset(&mc, 0, sizeof(mc));
548	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
549	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
550	mc.mc_data2 = htobe32(4096 * 0x10000);
551	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
552		printf("%s: DMA read test failed\n", DEVNAME(sc));
553		goto fail;
554	}
555
556	memset(&mc, 0, sizeof(mc));
557	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
558	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
559	mc.mc_data2 = htobe32(4096 * 0x1);
560	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
561		printf("%s: DMA write test failed\n", DEVNAME(sc));
562		goto fail;
563	}
564
565	memset(&mc, 0, sizeof(mc));
566	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
567	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
568	mc.mc_data2 = htobe32(4096 * 0x10001);
569	if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) {
570		printf("%s: DMA read/write test failed\n", DEVNAME(sc));
571		goto fail;
572	}
573
574	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
575	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
576	myx_dmamem_free(sc, &test);
577	return (0);
578
579fail:
580	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
581	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
582	myx_dmamem_free(sc, &test);
583
584	if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) {
585		printf("%s: unable to load %s\n", DEVNAME(sc),
586		    MYXFW_UNALIGNED);
587		return (1);
588	}
589
590	sc->sc_tx_boundary = 2048;
591
592	printf("%s: using unaligned firmware\n", DEVNAME(sc));
593	return (0);
594}
595
596void
597myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
598{
599	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
600	    BUS_SPACE_BARRIER_READ);
601	bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
602}
603
604void
605myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len)
606{
607	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len);
608	bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len,
609	    BUS_SPACE_BARRIER_WRITE);
610}
611
612int
613myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm,
614    bus_size_t size, u_int align)
615{
616	mxm->mxm_size = size;
617
618	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
619	    mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
620	    &mxm->mxm_map) != 0)
621		return (1);
622	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
623	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
624	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
625		goto destroy;
626	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
627	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
628		goto free;
629	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
630	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
631		goto unmap;
632
633	return (0);
634 unmap:
635	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
636 free:
637	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
638 destroy:
639	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
640	return (1);
641}
642
643void
644myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm)
645{
646	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
647	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
648	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
649	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
650}
651
652int
653myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r)
654{
655	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
656	struct myx_response	*mr;
657	u_int			 i;
658	u_int32_t		 result, data;
659#ifdef MYX_DEBUG
660	static const char *cmds[MYXCMD_MAX] = {
661		"CMD_NONE",
662		"CMD_RESET",
663		"CMD_GET_VERSION",
664		"CMD_SET_INTRQDMA",
665		"CMD_SET_BIGBUFSZ",
666		"CMD_SET_SMALLBUFSZ",
667		"CMD_GET_TXRINGOFF",
668		"CMD_GET_RXSMALLRINGOFF",
669		"CMD_GET_RXBIGRINGOFF",
670		"CMD_GET_INTRACKOFF",
671		"CMD_GET_INTRDEASSERTOFF",
672		"CMD_GET_TXRINGSZ",
673		"CMD_GET_RXRINGSZ",
674		"CMD_SET_INTRQSZ",
675		"CMD_SET_IFUP",
676		"CMD_SET_IFDOWN",
677		"CMD_SET_MTU",
678		"CMD_GET_INTRCOALDELAYOFF",
679		"CMD_SET_STATSINTVL",
680		"CMD_SET_STATSDMA_OLD",
681		"CMD_SET_PROMISC",
682		"CMD_UNSET_PROMISC",
683		"CMD_SET_LLADDR",
684		"CMD_SET_FC",
685		"CMD_UNSET_FC",
686		"CMD_DMA_TEST",
687		"CMD_SET_ALLMULTI",
688		"CMD_UNSET_ALLMULTI",
689		"CMD_SET_MCASTGROUP",
690		"CMD_UNSET_MCASTGROUP",
691		"CMD_UNSET_MCAST",
692		"CMD_SET_STATSDMA",
693		"CMD_UNALIGNED_DMA_TEST",
694		"CMD_GET_UNALIGNED_STATUS"
695	};
696#endif
697
698	mc->mc_cmd = htobe32(cmd);
699	mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
700	mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
701
702	mr = (struct myx_response *)sc->sc_cmddma.mxm_kva;
703	mr->mr_result = 0xffffffff;
704
705	/* Send command */
706	myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd));
707	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
708	    BUS_DMASYNC_PREREAD);
709
710	for (i = 0; i < 20; i++) {
711		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
712		    BUS_DMASYNC_POSTREAD);
713		result = betoh32(mr->mr_result);
714		data = betoh32(mr->mr_data);
715
716		if (result != 0xffffffff)
717			break;
718
719		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
720		    BUS_DMASYNC_PREREAD);
721		delay(1000);
722	}
723
724	DPRINTF(MYXDBG_CMD, "%s(%s): %s completed, i %d, "
725	    "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__,
726	    cmds[cmd], i, result, data, data);
727
728	if (result != 0)
729		return (-1);
730
731	if (r != NULL)
732		*r = data;
733	return (0);
734}
735
736int
737myx_boot(struct myx_softc *sc, u_int32_t length)
738{
739	struct myx_bootcmd	 bc;
740	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
741	u_int32_t		*status;
742	u_int			 i, ret = 1;
743
744	memset(&bc, 0, sizeof(bc));
745	bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
746	bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
747	bc.bc_result = 0xffffffff;
748	bc.bc_offset = htobe32(MYX_FW_BOOT);
749	bc.bc_length = htobe32(length - 8);
750	bc.bc_copyto = htobe32(8);
751	bc.bc_jumpto = htobe32(0);
752
753	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
754	*status = 0;
755
756	/* Send command */
757	myx_write(sc, MYX_BOOT, &bc, sizeof(bc));
758	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
759	    BUS_DMASYNC_PREREAD);
760
761	for (i = 0; i < 200; i++) {
762		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
763		    BUS_DMASYNC_POSTREAD);
764		if (*status == 0xffffffff) {
765			ret = 0;
766			break;
767		}
768
769		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
770		    BUS_DMASYNC_PREREAD);
771		delay(1000);
772	}
773
774	DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n",
775	    DEVNAME(sc), i, ret);
776
777	return (ret);
778}
779
780int
781myx_rdma(struct myx_softc *sc, u_int do_enable)
782{
783	struct myx_rdmacmd	 rc;
784	bus_dmamap_t		 map = sc->sc_cmddma.mxm_map;
785	bus_dmamap_t		 pad = sc->sc_paddma.mxm_map;
786	u_int32_t		*status;
787	int			 ret = 1;
788	u_int			 i;
789
790	/*
791	 * It is required to setup a _dummy_ RDMA address. It also makes
792	 * some PCI-E chipsets resend dropped messages.
793	 */
794	rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
795	rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
796	rc.rc_result = 0xffffffff;
797	rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr));
798	rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr));
799	rc.rc_enable = htobe32(do_enable);
800
801	status = (u_int32_t *)sc->sc_cmddma.mxm_kva;
802	*status = 0;
803
804	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
805	    BUS_DMASYNC_PREREAD);
806
807	/* Send command */
808	myx_write(sc, MYX_RDMA, &rc, sizeof(rc));
809
810	for (i = 0; i < 20; i++) {
811		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
812		    BUS_DMASYNC_POSTREAD);
813
814		if (*status == 0xffffffff) {
815			ret = 0;
816			break;
817		}
818
819		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
820		    BUS_DMASYNC_PREREAD);
821		delay(1000);
822	}
823
824	DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n",
825	    DEVNAME(sc), __func__,
826	    do_enable ? "enabled" : "disabled", i, betoh32(*status));
827
828	return (ret);
829}
830
831int
832myx_media_change(struct ifnet *ifp)
833{
834	/* ignore */
835	return (0);
836}
837
838void
839myx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
840{
841	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
842	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
843	u_int32_t		 sts;
844
845	imr->ifm_active = IFM_ETHER | IFM_AUTO;
846	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
847		imr->ifm_status = 0;
848		return;
849	}
850
851	mtx_enter(&sc->sc_sts_mtx);
852	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
853	    BUS_DMASYNC_POSTREAD);
854	sts = sc->sc_sts->ms_linkstate;
855	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
856	    BUS_DMASYNC_PREREAD);
857	mtx_leave(&sc->sc_sts_mtx);
858
859	myx_link_state(sc, sts);
860
861	imr->ifm_status = IFM_AVALID;
862	if (!LINK_STATE_IS_UP(ifp->if_link_state))
863		return;
864
865	imr->ifm_active |= IFM_FDX | IFM_FLOW |
866	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
867	imr->ifm_status |= IFM_ACTIVE;
868}
869
870void
871myx_link_state(struct myx_softc *sc, u_int32_t sts)
872{
873	struct ifnet		*ifp = &sc->sc_ac.ac_if;
874	int			 link_state = LINK_STATE_DOWN;
875
876	if (betoh32(sts) == MYXSTS_LINKUP)
877		link_state = LINK_STATE_FULL_DUPLEX;
878	if (ifp->if_link_state != link_state) {
879		ifp->if_link_state = link_state;
880		if_link_state_change(ifp);
881		ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ?
882		    IF_Gbps(10) : 0;
883	}
884}
885
886void
887myx_watchdog(struct ifnet *ifp)
888{
889	return;
890}
891
892int
893myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
894{
895	struct myx_softc	*sc = (struct myx_softc *)ifp->if_softc;
896	struct ifaddr		*ifa = (struct ifaddr *)data;
897	struct ifreq		*ifr = (struct ifreq *)data;
898	int			 s, error = 0;
899
900	s = splnet();
901
902	switch (cmd) {
903	case SIOCSIFADDR:
904		ifp->if_flags |= IFF_UP;
905#ifdef INET
906		if (ifa->ifa_addr->sa_family == AF_INET)
907			arp_ifinit(&sc->sc_ac, ifa);
908#endif
909		/* FALLTHROUGH */
910
911	case SIOCSIFFLAGS:
912		if (ISSET(ifp->if_flags, IFF_UP)) {
913			if (ISSET(ifp->if_flags, IFF_RUNNING))
914				error = ENETRESET;
915			else
916				myx_up(sc);
917		} else {
918			if (ISSET(ifp->if_flags, IFF_RUNNING))
919				myx_down(sc);
920		}
921		break;
922
923	case SIOCGIFMEDIA:
924	case SIOCSIFMEDIA:
925		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
926		break;
927
928	default:
929		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
930	}
931
932	if (error == ENETRESET) {
933		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
934		    (IFF_UP | IFF_RUNNING))
935			myx_iff(sc);
936		error = 0;
937	}
938
939	splx(s);
940	return (error);
941}
942
943void
944myx_up(struct myx_softc *sc)
945{
946	struct ifnet		*ifp = &sc->sc_ac.ac_if;
947	struct myx_buf		*mb;
948	struct myx_cmd		mc;
949	bus_dmamap_t		map;
950	size_t			size;
951	u_int			maxpkt;
952	u_int32_t		r;
953	int			i;
954
955	memset(&mc, 0, sizeof(mc));
956	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
957		printf("%s: failed to reset the device\n", DEVNAME(sc));
958		return;
959	}
960
961	if (myx_dmamem_alloc(sc, &sc->sc_zerodma,
962	    64, MYXALIGN_CMD) != 0) {
963		printf("%s: failed to allocate zero pad memory\n",
964		    DEVNAME(sc));
965		return;
966	}
967	memset(sc->sc_zerodma.mxm_kva, 0, 64);
968	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
969	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
970
971	if (myx_dmamem_alloc(sc, &sc->sc_paddma,
972	    MYXALIGN_CMD, MYXALIGN_CMD) != 0) {
973		printf("%s: failed to allocate pad DMA memory\n",
974		    DEVNAME(sc));
975		goto free_zero;
976	}
977	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
978	    sc->sc_paddma.mxm_map->dm_mapsize,
979	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
980
981	if (myx_rdma(sc, MYXRDMA_ON) != 0) {
982		printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc));
983		goto free_pad;
984	}
985
986	if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) {
987		printf("%s: unable to get rx ring size\n", DEVNAME(sc));
988		goto free_pad;
989	}
990	sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc);
991
992	m_clsetwms(ifp, MCLBYTES, 2, sc->sc_rx_ring_count - 2);
993	m_clsetwms(ifp, 12 * 1024, 2, sc->sc_rx_ring_count - 2);
994
995	memset(&mc, 0, sizeof(mc));
996	if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) {
997		printf("%s: unable to get tx ring size\n", DEVNAME(sc));
998		goto free_pad;
999	}
1000	sc->sc_tx_ring_idx = 0;
1001	sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc);
1002	sc->sc_tx_free = sc->sc_tx_ring_count - 1;
1003	sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */
1004	sc->sc_tx_count = 0;
1005	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_count - 1);
1006	IFQ_SET_READY(&ifp->if_snd);
1007
1008	/* Allocate Interrupt Queue */
1009
1010	sc->sc_intrq_count = sc->sc_rx_ring_count * 2;
1011	sc->sc_intrq_idx = 0;
1012
1013	size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc);
1014	if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma,
1015	    size, MYXALIGN_DATA) != 0) {
1016		goto free_pad;
1017	}
1018	sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva;
1019	map = sc->sc_intrq_dma.mxm_map;
1020	memset(sc->sc_intrq, 0, size);
1021	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1022	    BUS_DMASYNC_PREREAD);
1023
1024	memset(&mc, 0, sizeof(mc));
1025	mc.mc_data0 = htobe32(size);
1026	if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) {
1027		printf("%s: failed to set intrq size\n", DEVNAME(sc));
1028		goto free_intrq;
1029	}
1030
1031	memset(&mc, 0, sizeof(mc));
1032	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1033	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1034	if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) {
1035		printf("%s: failed to set intrq address\n", DEVNAME(sc));
1036		goto free_intrq;
1037	}
1038
1039	/*
1040	 * get interrupt offsets
1041	 */
1042
1043	memset(&mc, 0, sizeof(mc));
1044	if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc,
1045	    &sc->sc_irqclaimoff) != 0) {
1046		printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc));
1047		goto free_intrq;
1048	}
1049
1050	memset(&mc, 0, sizeof(mc));
1051	if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc,
1052	    &sc->sc_irqdeassertoff) != 0) {
1053		printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc));
1054		goto free_intrq;
1055	}
1056
1057	memset(&mc, 0, sizeof(mc));
1058	if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc,
1059	    &sc->sc_irqcoaloff) != 0) {
1060		printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc));
1061		goto free_intrq;
1062	}
1063
1064	/* Set an appropriate interrupt coalescing period */
1065	r = htobe32(MYX_IRQCOALDELAY);
1066	myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r));
1067
1068	if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) {
1069		printf("%s: failed to configure lladdr\n", DEVNAME(sc));
1070		goto free_intrq;
1071	}
1072
1073	memset(&mc, 0, sizeof(mc));
1074	if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1075		printf("%s: failed to disable promisc mode\n", DEVNAME(sc));
1076		goto free_intrq;
1077	}
1078
1079	memset(&mc, 0, sizeof(mc));
1080	if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) {
1081		printf("%s: failed to configure flow control\n", DEVNAME(sc));
1082		goto free_intrq;
1083	}
1084
1085	memset(&mc, 0, sizeof(mc));
1086	if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc,
1087	    &sc->sc_tx_ring_offset) != 0) {
1088		printf("%s: unable to get tx ring offset\n", DEVNAME(sc));
1089		goto free_intrq;
1090	}
1091
1092	memset(&mc, 0, sizeof(mc));
1093	if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc,
1094	    &sc->sc_rx_ring_offset[MYX_RXSMALL]) != 0) {
1095		printf("%s: unable to get small rx ring offset\n", DEVNAME(sc));
1096		goto free_intrq;
1097	}
1098
1099	memset(&mc, 0, sizeof(mc));
1100	if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc,
1101	    &sc->sc_rx_ring_offset[MYX_RXBIG]) != 0) {
1102		printf("%s: unable to get big rx ring offset\n", DEVNAME(sc));
1103		goto free_intrq;
1104	}
1105
1106	/* Allocate Interrupt Data */
1107	if (myx_dmamem_alloc(sc, &sc->sc_sts_dma,
1108	    sizeof(struct myx_status), MYXALIGN_DATA) != 0) {
1109		printf("%s: failed to allocate status DMA memory\n",
1110		    DEVNAME(sc));
1111		goto free_intrq;
1112	}
1113	sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva;
1114	map = sc->sc_sts_dma.mxm_map;
1115	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1116	    BUS_DMASYNC_PREREAD);
1117
1118	memset(&mc, 0, sizeof(mc));
1119	mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
1120	mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
1121	mc.mc_data2 = htobe32(sizeof(struct myx_status));
1122	if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) {
1123		printf("%s: failed to set status DMA offset\n", DEVNAME(sc));
1124		goto free_sts;
1125	}
1126
1127	maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1128
1129	memset(&mc, 0, sizeof(mc));
1130	mc.mc_data0 = htobe32(maxpkt);
1131	if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) {
1132		printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt);
1133		goto free_sts;
1134	}
1135
1136	for (i = 0; i < sc->sc_tx_ring_count; i++) {
1137		mb = myx_buf_alloc(sc, maxpkt, sc->sc_tx_nsegs,
1138		    sc->sc_tx_boundary, sc->sc_tx_boundary);
1139		if (mb == NULL)
1140			goto free_tx_bufs;
1141
1142		myx_buf_put(&sc->sc_tx_buf_free, mb);
1143	}
1144
1145	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1146		mb = myx_buf_alloc(sc, MCLBYTES, 1, 4096, 4096);
1147		if (mb == NULL)
1148			goto free_rxsmall_bufs;
1149
1150		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXSMALL], mb);
1151	}
1152
1153	for (i = 0; i < sc->sc_rx_ring_count; i++) {
1154		mb = myx_buf_alloc(sc, 12 * 1024, 1, 12 * 1024, 0);
1155		if (mb == NULL)
1156			goto free_rxbig_bufs;
1157
1158		myx_buf_put(&sc->sc_rx_buf_free[MYX_RXBIG], mb);
1159	}
1160
1161	myx_rx_zero(sc, MYX_RXSMALL);
1162	if (myx_rx_fill(sc, MYX_RXSMALL) != 0) {
1163		printf("%s: failed to fill small rx ring\n", DEVNAME(sc));
1164		goto free_rxbig_bufs;
1165	}
1166
1167	myx_rx_zero(sc, MYX_RXBIG);
1168	if (myx_rx_fill(sc, MYX_RXBIG) != 0) {
1169		printf("%s: failed to fill big rx ring\n", DEVNAME(sc));
1170		goto free_rxsmall;
1171	}
1172
1173	memset(&mc, 0, sizeof(mc));
1174	mc.mc_data0 = htobe32(MCLBYTES - ETHER_ALIGN);
1175	if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) {
1176		printf("%s: failed to set small buf size\n", DEVNAME(sc));
1177		goto free_rxbig;
1178	}
1179
1180	memset(&mc, 0, sizeof(mc));
1181	mc.mc_data0 = htobe32(16384);
1182	if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) {
1183		printf("%s: failed to set big buf size\n", DEVNAME(sc));
1184		goto free_rxbig;
1185	}
1186
1187	if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) {
1188		printf("%s: failed to start the device\n", DEVNAME(sc));
1189		goto free_rxbig;
1190	}
1191
1192	CLR(ifp->if_flags, IFF_OACTIVE);
1193	SET(ifp->if_flags, IFF_RUNNING);
1194
1195	myx_iff(sc);
1196
1197	return;
1198
1199free_rxbig:
1200	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1201		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1202		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1203		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1204		m_freem(mb->mb_m);
1205		myx_buf_free(sc, mb);
1206	}
1207free_rxsmall:
1208	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1209		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1210		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1211		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1212		m_freem(mb->mb_m);
1213		myx_buf_free(sc, mb);
1214	}
1215free_rxbig_bufs:
1216	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1217		myx_buf_free(sc, mb);
1218free_rxsmall_bufs:
1219	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1220		myx_buf_free(sc, mb);
1221free_tx_bufs:
1222	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1223		myx_buf_free(sc, mb);
1224free_sts:
1225	bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0,
1226	    sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1227	myx_dmamem_free(sc, &sc->sc_sts_dma);
1228free_intrq:
1229	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1230	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1231	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1232free_pad:
1233	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1234	    sc->sc_paddma.mxm_map->dm_mapsize,
1235	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1236	myx_dmamem_free(sc, &sc->sc_paddma);
1237
1238	memset(&mc, 0, sizeof(mc));
1239	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1240		printf("%s: failed to reset the device\n", DEVNAME(sc));
1241	}
1242free_zero:
1243	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1244	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1245	myx_dmamem_free(sc, &sc->sc_zerodma);
1246}
1247
1248int
1249myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr)
1250{
1251	struct myx_cmd		 mc;
1252
1253	memset(&mc, 0, sizeof(mc));
1254	mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]);
1255	mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]);
1256
1257	if (myx_cmd(sc, cmd, &mc, NULL) != 0) {
1258		printf("%s: failed to set the lladdr\n", DEVNAME(sc));
1259		return (-1);
1260	}
1261	return (0);
1262}
1263
1264void
1265myx_iff(struct myx_softc *sc)
1266{
1267	struct myx_cmd		mc;
1268	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1269	struct ether_multi	*enm;
1270	struct ether_multistep	step;
1271
1272	CLR(ifp->if_flags, IFF_ALLMULTI);
1273
1274	if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ?
1275	    MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) {
1276		printf("%s: failed to configure promisc mode\n", DEVNAME(sc));
1277		return;
1278	}
1279
1280	if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) {
1281		printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc));
1282		return;
1283	}
1284
1285	if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) {
1286		printf("%s: failed to leave all mcast groups \n", DEVNAME(sc));
1287		return;
1288	}
1289
1290	if (ISSET(ifp->if_flags, IFF_PROMISC) || sc->sc_ac.ac_multirangecnt > 0) {
1291		SET(ifp->if_flags, IFF_ALLMULTI);
1292		return;
1293	}
1294
1295	ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1296	while (enm != NULL) {
1297		if (myx_setlladdr(sc, MYXCMD_SET_MCASTGROUP,
1298		    enm->enm_addrlo) != 0) {
1299			printf("%s: failed to join mcast group\n", DEVNAME(sc));
1300			return;
1301		}
1302
1303		ETHER_NEXT_MULTI(step, enm);
1304	}
1305
1306	memset(&mc, 0, sizeof(mc));
1307	if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) {
1308		printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc));
1309		return;
1310	}
1311}
1312
1313void
1314myx_down(struct myx_softc *sc)
1315{
1316	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1317	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1318	struct myx_buf		*mb;
1319	struct myx_cmd		 mc;
1320	int			 s;
1321
1322	mtx_enter(&sc->sc_sts_mtx);
1323	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1324	    BUS_DMASYNC_POSTREAD);
1325	sc->sc_linkdown = sc->sc_sts->ms_linkdown;
1326	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1327	    BUS_DMASYNC_PREREAD);
1328	mtx_leave(&sc->sc_sts_mtx);
1329
1330	memset(&mc, 0, sizeof(mc));
1331	(void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL);
1332
1333	mtx_enter(&sc->sc_sts_mtx);
1334	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1335	    BUS_DMASYNC_POSTREAD);
1336	while (sc->sc_linkdown == sc->sc_sts->ms_linkdown) {
1337		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1338		    BUS_DMASYNC_PREREAD);
1339
1340		msleep(sc->sc_sts, &sc->sc_sts_mtx, 0, "myxdown", 0);
1341
1342		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1343		    BUS_DMASYNC_POSTREAD);
1344	}
1345	mtx_leave(&sc->sc_sts_mtx);
1346
1347	timeout_del(&sc->sc_refill);
1348
1349	s = splnet();
1350	CLR(ifp->if_flags, IFF_RUNNING);
1351
1352	if (ifp->if_link_state != LINK_STATE_UNKNOWN) {
1353		ifp->if_link_state = LINK_STATE_UNKNOWN;
1354		ifp->if_baudrate = 0;
1355		if_link_state_change(ifp);
1356	}
1357	splx(s);
1358
1359	memset(&mc, 0, sizeof(mc));
1360	if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) {
1361		printf("%s: failed to reset the device\n", DEVNAME(sc));
1362	}
1363
1364	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1365
1366	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXBIG])) != NULL) {
1367		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1368		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1369		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1370		m_freem(mb->mb_m);
1371		myx_buf_free(sc, mb);
1372	}
1373
1374	while ((mb = myx_buf_get(&sc->sc_rx_buf_list[MYX_RXSMALL])) != NULL) {
1375		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1376		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1377		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1378		m_freem(mb->mb_m);
1379		myx_buf_free(sc, mb);
1380	}
1381
1382	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXBIG])) != NULL)
1383		myx_buf_free(sc, mb);
1384
1385	while ((mb = myx_buf_get(&sc->sc_rx_buf_free[MYX_RXSMALL])) != NULL)
1386		myx_buf_free(sc, mb);
1387
1388	while ((mb = myx_buf_get(&sc->sc_tx_buf_list)) != NULL) {
1389		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1390		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1391		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1392		m_freem(mb->mb_m);
1393		myx_buf_free(sc, mb);
1394	}
1395
1396	while ((mb = myx_buf_get(&sc->sc_tx_buf_free)) != NULL)
1397		myx_buf_free(sc, mb);
1398
1399	/* the sleep shizz above already synced this dmamem */
1400	myx_dmamem_free(sc, &sc->sc_sts_dma);
1401
1402	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1403	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1404	myx_dmamem_free(sc, &sc->sc_intrq_dma);
1405
1406	bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0,
1407	    sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1408	myx_dmamem_free(sc, &sc->sc_paddma);
1409
1410	bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0,
1411	    sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1412	myx_dmamem_free(sc, &sc->sc_zerodma);
1413}
1414
1415void
1416myx_write_txd_tail(struct myx_softc *sc, struct myx_buf *mb, u_int8_t flags,
1417    u_int32_t offset, u_int idx)
1418{
1419	struct myx_tx_desc		txd;
1420	bus_dmamap_t			zmap = sc->sc_zerodma.mxm_map;
1421	bus_dmamap_t			map = mb->mb_map;
1422	int				i;
1423
1424	for (i = 1; i < map->dm_nsegs; i++) {
1425		memset(&txd, 0, sizeof(txd));
1426		txd.tx_addr = htobe64(map->dm_segs[i].ds_addr);
1427		txd.tx_length = htobe16(map->dm_segs[i].ds_len);
1428		txd.tx_flags = flags;
1429
1430		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1431		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1432		    &txd, sizeof(txd));
1433	}
1434
1435	/* pad runt frames */
1436	if (map->dm_mapsize < 60) {
1437		memset(&txd, 0, sizeof(txd));
1438		txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr);
1439		txd.tx_length = htobe16(60 - map->dm_mapsize);
1440		txd.tx_flags = flags;
1441
1442		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1443		    offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count),
1444		    &txd, sizeof(txd));
1445	}
1446}
1447
1448void
1449myx_start(struct ifnet *ifp)
1450{
1451	struct myx_tx_desc		txd;
1452	SIMPLEQ_HEAD(, myx_buf)		list = SIMPLEQ_HEAD_INITIALIZER(list);
1453	struct myx_softc		*sc = ifp->if_softc;
1454	bus_dmamap_t			map;
1455	struct myx_buf			*mb, *firstmb;
1456	struct mbuf			*m;
1457	u_int32_t			offset = sc->sc_tx_ring_offset;
1458	u_int				idx, firstidx;
1459	u_int8_t			flags;
1460
1461	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
1462	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
1463	    IFQ_IS_EMPTY(&ifp->if_snd))
1464		return;
1465
1466	for (;;) {
1467		if (sc->sc_tx_free <= sc->sc_tx_nsegs) {
1468			SET(ifp->if_flags, IFF_OACTIVE);
1469			break;
1470		}
1471
1472		IFQ_POLL(&ifp->if_snd, m);
1473		if (m == NULL)
1474			break;
1475
1476		mb = myx_buf_get(&sc->sc_tx_buf_free);
1477		if (mb == NULL) {
1478			SET(ifp->if_flags, IFF_OACTIVE);
1479			break;
1480		}
1481
1482		IFQ_DEQUEUE(&ifp->if_snd, m);
1483		if (myx_load_buf(sc, mb, m) != 0) {
1484			m_freem(m);
1485			myx_buf_put(&sc->sc_tx_buf_free, mb);
1486			ifp->if_oerrors++;
1487			break;
1488		}
1489
1490#if NBPFILTER > 0
1491		if (ifp->if_bpf)
1492			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1493#endif
1494
1495		mb->mb_m = m;
1496
1497		map = mb->mb_map;
1498		bus_dmamap_sync(sc->sc_dmat, map, 0,
1499		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1500
1501		SIMPLEQ_INSERT_TAIL(&list, mb, mb_entry);
1502
1503		sc->sc_tx_free -= map->dm_nsegs +
1504		    (map->dm_mapsize < 60 ? 1 : 0);
1505	}
1506
1507	/* post the first descriptor last */
1508	firstmb = SIMPLEQ_FIRST(&list);
1509	if (firstmb == NULL)
1510		return;
1511
1512	SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1513	myx_buf_put(&sc->sc_tx_buf_list, firstmb);
1514
1515	idx = firstidx = sc->sc_tx_ring_idx;
1516	idx += firstmb->mb_map->dm_nsegs +
1517	    (firstmb->mb_map->dm_mapsize < 60 ? 1 : 0);
1518	idx %= sc->sc_tx_ring_count;
1519
1520	while ((mb = SIMPLEQ_FIRST(&list)) != NULL) {
1521		SIMPLEQ_REMOVE_HEAD(&list, mb_entry);
1522		myx_buf_put(&sc->sc_tx_buf_list, mb);
1523
1524		map = mb->mb_map;
1525
1526		flags = MYXTXD_FLAGS_NO_TSO;
1527		if (map->dm_mapsize < 1520)
1528			flags |= MYXTXD_FLAGS_SMALL;
1529
1530		memset(&txd, 0, sizeof(txd));
1531		txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1532		txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1533		txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1534		txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1535		myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1536		    offset + sizeof(txd) * idx, &txd, sizeof(txd));
1537
1538		myx_write_txd_tail(sc, mb, flags, offset, idx);
1539
1540		idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1541		idx %= sc->sc_tx_ring_count;
1542	}
1543	sc->sc_tx_ring_idx = idx;
1544
1545	/* go back and post first mb */
1546	map = firstmb->mb_map;
1547
1548	flags = MYXTXD_FLAGS_NO_TSO;
1549	if (map->dm_mapsize < 1520)
1550		flags |= MYXTXD_FLAGS_SMALL;
1551
1552	txd.tx_addr = htobe64(map->dm_segs[0].ds_addr);
1553	txd.tx_length = htobe16(map->dm_segs[0].ds_len);
1554	txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0);
1555	txd.tx_flags = flags | MYXTXD_FLAGS_FIRST;
1556
1557	/* make sure the first descriptor is seen after the others */
1558	myx_write_txd_tail(sc, firstmb, flags, offset, firstidx);
1559
1560	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1561	    offset + sizeof(txd) * firstidx, &txd,
1562	    sizeof(txd) - sizeof(myx_bus_t));
1563
1564	bus_space_barrier(sc->sc_memt, sc->sc_memh, offset,
1565	    sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE);
1566
1567	myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1568	    offset + sizeof(txd) * (firstidx + 1) - sizeof(myx_bus_t),
1569	    (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t),
1570	    sizeof(myx_bus_t));
1571
1572	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1573	    offset + sizeof(txd) * firstidx, sizeof(txd),
1574	    BUS_SPACE_BARRIER_WRITE);
1575}
1576
1577int
1578myx_load_buf(struct myx_softc *sc, struct myx_buf *mb, struct mbuf *m)
1579{
1580	bus_dma_tag_t			dmat = sc->sc_dmat;
1581	bus_dmamap_t			dmap = mb->mb_map;
1582
1583	switch (bus_dmamap_load_mbuf(dmat, dmap, m,
1584	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1585	case 0:
1586		break;
1587
1588	case EFBIG: /* mbuf chain is too fragmented */
1589		if (m_defrag(m, M_DONTWAIT) == 0 &&
1590		    bus_dmamap_load_mbuf(dmat, dmap, m,
1591		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1592			break;
1593	default:
1594		return (1);
1595	}
1596
1597	mb->mb_m = m;
1598	return (0);
1599}
1600
1601int
1602myx_intr(void *arg)
1603{
1604	struct myx_softc	*sc = (struct myx_softc *)arg;
1605	struct ifnet		*ifp = &sc->sc_ac.ac_if;
1606	volatile struct myx_status *sts = sc->sc_sts;
1607	bus_dmamap_t		 map = sc->sc_sts_dma.mxm_map;
1608	u_int32_t		 data, link;
1609	int			 refill = 0;
1610	u_int8_t		 valid = 0;
1611	u_int			 if_flags;
1612	int			 i;
1613
1614	KERNEL_LOCK();
1615	if_flags = ifp->if_flags;
1616	KERNEL_UNLOCK();
1617
1618	if (!ISSET(if_flags, IFF_RUNNING))
1619		return (0);
1620
1621	mtx_enter(&sc->sc_sts_mtx);
1622	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1623	    BUS_DMASYNC_POSTREAD);
1624
1625	valid = sts->ms_isvalid;
1626	if (valid == 0x0) {
1627		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1628		    BUS_DMASYNC_PREREAD);
1629		mtx_leave(&sc->sc_sts_mtx);
1630		return (0);
1631	}
1632	sts->ms_isvalid = 0;
1633
1634	if (sc->sc_intx) {
1635		data = htobe32(0);
1636		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1637		    sc->sc_irqdeassertoff, &data, sizeof(data));
1638	}
1639
1640	if (!ISSET(if_flags, IFF_UP) &&
1641	    sc->sc_linkdown != sts->ms_linkdown) {
1642		/* myx_down is waiting for us */
1643		wakeup_one(sc->sc_sts);
1644	}
1645
1646	link = sts->ms_statusupdated ? sts->ms_linkstate : 0xffffffff;
1647
1648	do {
1649		data = betoh32(sts->ms_txdonecnt);
1650		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1651		    BUS_DMASYNC_PREREAD);
1652		mtx_leave(&sc->sc_sts_mtx);
1653
1654		if (data != sc->sc_tx_count)
1655			myx_txeof(sc, data);
1656
1657		refill |= myx_rxeof(sc);
1658
1659		mtx_enter(&sc->sc_sts_mtx);
1660		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1661		    BUS_DMASYNC_POSTREAD);
1662	} while (sts->ms_isvalid);
1663
1664	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1665	    BUS_DMASYNC_PREREAD);
1666	mtx_leave(&sc->sc_sts_mtx);
1667
1668	if (link != 0xffffffff) {
1669		KERNEL_LOCK();
1670		myx_link_state(sc, link);
1671		KERNEL_UNLOCK();
1672	}
1673
1674	data = htobe32(3);
1675	if (valid & 0x1) {
1676		bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1677		    sc->sc_irqclaimoff, &data, sizeof(data));
1678	}
1679	bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh,
1680	    sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data));
1681	bus_space_barrier(sc->sc_memt, sc->sc_memh,
1682	    sc->sc_irqclaimoff, sizeof(data) * 2, BUS_SPACE_BARRIER_WRITE);
1683
1684	if (ISSET(if_flags, IFF_OACTIVE)) {
1685		KERNEL_LOCK();
1686		CLR(ifp->if_flags, IFF_OACTIVE);
1687		myx_start(ifp);
1688		KERNEL_UNLOCK();
1689	}
1690
1691	for (i = 0; i < 2; i++) {
1692		if (ISSET(refill, 1 << i)) {
1693			if (myx_rx_fill(sc, i) >= 0 &&
1694			    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1695				timeout_add(&sc->sc_refill, 0);
1696		}
1697	}
1698
1699	return (1);
1700}
1701
1702void
1703myx_refill(void *xsc)
1704{
1705	struct myx_softc *sc = xsc;
1706	int i;
1707
1708	for (i = 0; i < 2; i++) {
1709		if (myx_rx_fill(sc, i) >= 0 &&
1710		    myx_bufs_empty(&sc->sc_rx_buf_list[i]))
1711			timeout_add(&sc->sc_refill, 1);
1712	}
1713}
1714
1715void
1716myx_txeof(struct myx_softc *sc, u_int32_t done_count)
1717{
1718	struct ifnet *ifp = &sc->sc_ac.ac_if;
1719	struct myx_buf *mb;
1720	struct mbuf *m;
1721	bus_dmamap_t map;
1722
1723	do {
1724		mb = myx_buf_get(&sc->sc_tx_buf_list);
1725		if (mb == NULL) {
1726			printf("oh noes, no mb!\n");
1727			break;
1728		}
1729
1730		m = mb->mb_m;
1731		map = mb->mb_map;
1732
1733		sc->sc_tx_free += map->dm_nsegs;
1734		if (map->dm_mapsize < 60)
1735			sc->sc_tx_free += 1;
1736
1737		bus_dmamap_sync(sc->sc_dmat, map, 0,
1738		    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1739
1740		KERNEL_LOCK();
1741		bus_dmamap_unload(sc->sc_dmat, map);
1742		m_freem(m);
1743		ifp->if_opackets++;
1744		KERNEL_UNLOCK();
1745
1746		myx_buf_put(&sc->sc_tx_buf_free, mb);
1747	} while (++sc->sc_tx_count != done_count);
1748}
1749
1750int
1751myx_rxeof(struct myx_softc *sc)
1752{
1753	static const struct myx_intrq_desc zerodesc = { 0, 0 };
1754	struct ifnet *ifp = &sc->sc_ac.ac_if;
1755	struct myx_buf *mb;
1756	struct mbuf *m;
1757	int ring;
1758	int rings = 0;
1759	u_int len;
1760
1761	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1762	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1763
1764	while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) {
1765		sc->sc_intrq[sc->sc_intrq_idx] = zerodesc;
1766
1767		if (++sc->sc_intrq_idx >= sc->sc_intrq_count)
1768			sc->sc_intrq_idx = 0;
1769
1770		ring = (len <= (MCLBYTES - ETHER_ALIGN)) ?
1771		    MYX_RXSMALL : MYX_RXBIG;
1772
1773		mb = myx_buf_get(&sc->sc_rx_buf_list[ring]);
1774		if (mb == NULL) {
1775			printf("oh noes, no mb!\n");
1776			break;
1777		}
1778
1779		bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0,
1780		    mb->mb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1781
1782		m = mb->mb_m;
1783		m->m_data += ETHER_ALIGN;
1784		m->m_pkthdr.rcvif = ifp;
1785		m->m_pkthdr.len = m->m_len = len;
1786
1787		KERNEL_LOCK();
1788		bus_dmamap_unload(sc->sc_dmat, mb->mb_map);
1789#if NBPFILTER > 0
1790		if (ifp->if_bpf)
1791			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1792#endif
1793
1794		ether_input_mbuf(ifp, m);
1795		ifp->if_ipackets++;
1796		KERNEL_UNLOCK();
1797
1798		myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1799
1800		SET(rings, 1 << ring);
1801	}
1802
1803	bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0,
1804	    sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1805
1806	return (rings);
1807}
1808
1809void
1810myx_rx_zero(struct myx_softc *sc, int ring)
1811{
1812	struct myx_rx_desc rxd;
1813	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1814	int idx;
1815
1816	sc->sc_rx_ring_idx[ring] = 0;
1817
1818	memset(&rxd, 0xff, sizeof(rxd));
1819	for (idx = 0; idx < sc->sc_rx_ring_count; idx++) {
1820		myx_write(sc, offset + idx * sizeof(rxd),
1821		    &rxd, sizeof(rxd));
1822	}
1823}
1824
1825int
1826myx_rx_fill(struct myx_softc *sc, int ring)
1827{
1828	struct myx_rx_desc rxd;
1829	struct myx_buf *mb, *firstmb;
1830	u_int32_t offset = sc->sc_rx_ring_offset[ring];
1831	u_int idx, firstidx;
1832	int rv = 1;
1833
1834	if (!myx_ring_enter(&sc->sc_rx_ring_lock[ring]))
1835		return (-1);
1836
1837	do {
1838		firstmb = myx_buf_fill(sc, ring);
1839		if (firstmb == NULL)
1840			continue;
1841
1842		rv = 0;
1843		myx_buf_put(&sc->sc_rx_buf_list[ring], firstmb);
1844
1845		firstidx = sc->sc_rx_ring_idx[ring];
1846		idx = firstidx + 1;
1847		idx %= sc->sc_rx_ring_count;
1848
1849		while ((mb = myx_buf_fill(sc, ring)) != NULL) {
1850			myx_buf_put(&sc->sc_rx_buf_list[ring], mb);
1851
1852			rxd.rx_addr = htobe64(mb->mb_map->dm_segs[0].ds_addr);
1853			myx_bus_space_write(sc->sc_memt, sc->sc_memh,
1854			    offset + idx * sizeof(rxd), &rxd, sizeof(rxd));
1855
1856			idx++;
1857			idx %= sc->sc_rx_ring_count;
1858		}
1859
1860		/* make sure the first descriptor is seen after the others */
1861		if (idx != firstidx + 1) {
1862			bus_space_barrier(sc->sc_memt, sc->sc_memh,
1863			    offset, sizeof(rxd) * sc->sc_rx_ring_count,
1864			    BUS_SPACE_BARRIER_WRITE);
1865		}
1866
1867		rxd.rx_addr = htobe64(firstmb->mb_map->dm_segs[0].ds_addr);
1868		myx_write(sc, offset + firstidx * sizeof(rxd),
1869		    &rxd, sizeof(rxd));
1870
1871		sc->sc_rx_ring_idx[ring] = idx;
1872	} while (!myx_ring_leave(&sc->sc_rx_ring_lock[ring]));
1873
1874	return (rv);
1875}
1876
1877struct myx_buf *
1878myx_buf_fill(struct myx_softc *sc, int ring)
1879{
1880	static size_t sizes[2] = { MCLBYTES, 12 * 1024 };
1881	struct myx_buf *mb;
1882	struct mbuf *m;
1883	int rv;
1884
1885	KERNEL_LOCK();
1886	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_ac.ac_if, sizes[ring]);
1887	KERNEL_UNLOCK();
1888	if (m == NULL)
1889		return (NULL);
1890	m->m_len = m->m_pkthdr.len = sizes[ring];
1891
1892	mb = myx_buf_get(&sc->sc_rx_buf_free[ring]);
1893	if (mb == NULL)
1894		goto mfree;
1895
1896	KERNEL_LOCK();
1897	rv = bus_dmamap_load_mbuf(sc->sc_dmat, mb->mb_map, m, BUS_DMA_NOWAIT);
1898	KERNEL_UNLOCK();
1899	if (rv != 0)
1900		goto put;
1901
1902	mb->mb_m = m;
1903	bus_dmamap_sync(sc->sc_dmat, mb->mb_map, 0, mb->mb_map->dm_mapsize,
1904	    BUS_DMASYNC_PREREAD);
1905
1906	return (mb);
1907
1908put:
1909	myx_buf_put(&sc->sc_rx_buf_free[ring], mb);
1910mfree:
1911	KERNEL_LOCK();
1912	m_freem(m);
1913	KERNEL_UNLOCK();
1914
1915	return (NULL);
1916}
1917
1918struct myx_buf *
1919myx_buf_alloc(struct myx_softc *sc, bus_size_t size, int nsegs,
1920    bus_size_t maxsegsz, bus_size_t boundary)
1921{
1922	struct myx_buf *mb;
1923
1924	mb = pool_get(myx_buf_pool, PR_WAITOK);
1925	if (mb == NULL)
1926		return (NULL);
1927
1928	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, boundary,
1929	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mb->mb_map) != 0) {
1930		pool_put(myx_buf_pool, mb);
1931		return (NULL);
1932	}
1933
1934	return (mb);
1935}
1936
1937void
1938myx_buf_free(struct myx_softc *sc, struct myx_buf *mb)
1939{
1940	bus_dmamap_destroy(sc->sc_dmat, mb->mb_map);
1941	pool_put(myx_buf_pool, mb);
1942}
1943
1944struct myx_buf *
1945myx_buf_get(struct myx_buf_list *mbl)
1946{
1947	struct myx_buf *mb;
1948
1949	mtx_enter(&mbl->mbl_mtx);
1950	mb = SIMPLEQ_FIRST(&mbl->mbl_q);
1951	if (mb != NULL)
1952		SIMPLEQ_REMOVE_HEAD(&mbl->mbl_q, mb_entry);
1953	mtx_leave(&mbl->mbl_mtx);
1954
1955	return (mb);
1956}
1957
1958int
1959myx_bufs_empty(struct myx_buf_list *mbl)
1960{
1961	int rv;
1962
1963	mtx_enter(&mbl->mbl_mtx);
1964	rv = SIMPLEQ_EMPTY(&mbl->mbl_q);
1965	mtx_leave(&mbl->mbl_mtx);
1966
1967	return (rv);
1968}
1969
1970void
1971myx_buf_put(struct myx_buf_list *mbl, struct myx_buf *mb)
1972{
1973	mtx_enter(&mbl->mbl_mtx);
1974	SIMPLEQ_INSERT_TAIL(&mbl->mbl_q, mb, mb_entry);
1975	mtx_leave(&mbl->mbl_mtx);
1976}
1977
1978void
1979myx_bufs_init(struct myx_buf_list *mbl)
1980{
1981	SIMPLEQ_INIT(&mbl->mbl_q);
1982	mtx_init(&mbl->mbl_mtx, IPL_NET);
1983}
1984
1985void
1986myx_ring_lock_init(struct myx_ring_lock *mrl)
1987{
1988	mtx_init(&mrl->mrl_mtx, IPL_NET);
1989	mrl->mrl_running = 0;
1990}
1991
1992int
1993myx_ring_enter(struct myx_ring_lock *mrl)
1994{
1995	int rv = 1;
1996
1997	mtx_enter(&mrl->mrl_mtx);
1998	if (++mrl->mrl_running > 1)
1999		rv = 0;
2000	mtx_leave(&mrl->mrl_mtx);
2001
2002	return (rv);
2003}
2004
2005int
2006myx_ring_leave(struct myx_ring_lock *mrl)
2007{
2008	int rv = 1;
2009
2010	mtx_enter(&mrl->mrl_mtx);
2011	if (--mrl->mrl_running > 0) {
2012		mrl->mrl_running = 1;
2013		rv = 0;
2014	}
2015	mtx_leave(&mrl->mrl_mtx);
2016
2017	return (rv);
2018}
2019