if_msk.c revision 165883
1/******************************************************************************
2 *
3 * Name   : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date   : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 *	LICENSE:
14 *	Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 *	The computer program files contained in this folder ("Files")
17 *	are provided to you under the BSD-type license terms provided
18 *	below, and any use of such Files and any derivative works
19 *	thereof created by you shall be governed by the following terms
20 *	and conditions:
21 *
22 *	- Redistributions of source code must retain the above copyright
23 *	  notice, this list of conditions and the following disclaimer.
24 *	- Redistributions in binary form must reproduce the above
25 *	  copyright notice, this list of conditions and the following
26 *	  disclaimer in the documentation and/or other materials provided
27 *	  with the distribution.
28 *	- Neither the name of Marvell nor the names of its contributors
29 *	  may be used to endorse or promote products derived from this
30 *	  software without specific prior written permission.
31 *
32 *	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 *	"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 *	LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 *	FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 *	COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 *	INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 *	BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
39 *	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 *	HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 *	STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 *	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 *	OF THE POSSIBILITY OF SUCH DAMAGE.
44 *	/LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 *    notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 *    notice, this list of conditions and the following disclaimer in the
59 *    documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 *    must display the following acknowledgement:
62 *	This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 *    may be used to endorse or promote products derived from this software
65 *    without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
102__FBSDID("$FreeBSD: head/sys/dev/msk/if_msk.c 165883 2007-01-08 00:58:00Z yongari $");
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116#include <sys/taskqueue.h>
117
118#include <net/bpf.h>
119#include <net/ethernet.h>
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/if_dl.h>
123#include <net/if_media.h>
124#include <net/if_types.h>
125#include <net/if_vlan_var.h>
126
127#include <netinet/in.h>
128#include <netinet/in_systm.h>
129#include <netinet/ip.h>
130#include <netinet/tcp.h>
131#include <netinet/udp.h>
132
133#include <machine/bus.h>
134#include <machine/resource.h>
135#include <sys/rman.h>
136
137#include <dev/mii/mii.h>
138#include <dev/mii/miivar.h>
139#include <dev/mii/brgphyreg.h>
140
141#include <dev/pci/pcireg.h>
142#include <dev/pci/pcivar.h>
143
144#include <dev/msk/if_mskreg.h>
145
146MODULE_DEPEND(msk, pci, 1, 1, 1);
147MODULE_DEPEND(msk, ether, 1, 1, 1);
148MODULE_DEPEND(msk, miibus, 1, 1, 1);
149
150/* "device miibus" required.  See GENERIC if you get errors here. */
151#include "miibus_if.h"
152
153/* Tunables. */
154static int msi_disable = 0;
155TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
156
157#define MSK_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
158
159/*
160 * Devices supported by this driver.
161 */
162static struct msk_product {
163	uint16_t	msk_vendorid;
164	uint16_t	msk_deviceid;
165	const char	*msk_name;
166} msk_products[] = {
167	{ VENDORID_SK, DEVICEID_SK_YUKON2,
168	    "SK-9Sxx Gigabit Ethernet" },
169	{ VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
170	    "SK-9Exx Gigabit Ethernet"},
171	{ VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
172	    "Marvell Yukon 88E8021CU Gigabit Ethernet" },
173	{ VENDORID_MARVELL, DEVICEID_MRVL_8021X,
174	    "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
175	{ VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
176	    "Marvell Yukon 88E8022CU Gigabit Ethernet" },
177	{ VENDORID_MARVELL, DEVICEID_MRVL_8022X,
178	    "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
179	{ VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
180	    "Marvell Yukon 88E8061CU Gigabit Ethernet" },
181	{ VENDORID_MARVELL, DEVICEID_MRVL_8061X,
182	    "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
183	{ VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
184	    "Marvell Yukon 88E8062CU Gigabit Ethernet" },
185	{ VENDORID_MARVELL, DEVICEID_MRVL_8062X,
186	    "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
187	{ VENDORID_MARVELL, DEVICEID_MRVL_8035,
188	    "Marvell Yukon 88E8035 Gigabit Ethernet" },
189	{ VENDORID_MARVELL, DEVICEID_MRVL_8036,
190	    "Marvell Yukon 88E8036 Gigabit Ethernet" },
191	{ VENDORID_MARVELL, DEVICEID_MRVL_8038,
192	    "Marvell Yukon 88E8038 Gigabit Ethernet" },
193	{ VENDORID_MARVELL, DEVICEID_MRVL_4361,
194	    "Marvell Yukon 88E8050 Gigabit Ethernet" },
195	{ VENDORID_MARVELL, DEVICEID_MRVL_4360,
196	    "Marvell Yukon 88E8052 Gigabit Ethernet" },
197	{ VENDORID_MARVELL, DEVICEID_MRVL_4362,
198	    "Marvell Yukon 88E8053 Gigabit Ethernet" },
199	{ VENDORID_MARVELL, DEVICEID_MRVL_4363,
200	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
201	{ VENDORID_MARVELL, DEVICEID_MRVL_4364,
202	    "Marvell Yukon 88E8056 Gigabit Ethernet" },
203	{ VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
204	    "D-Link 550SX Gigabit Ethernet" },
205	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
206	    "D-Link 560T Gigabit Ethernet" }
207};
208
209static const char *model_name[] = {
210	"Yukon XL",
211        "Yukon EC Ultra",
212        "Yukon Unknown",
213        "Yukon EC",
214        "Yukon FE"
215};
216
217static int mskc_probe(device_t);
218static int mskc_attach(device_t);
219static int mskc_detach(device_t);
220static void mskc_shutdown(device_t);
221static int mskc_setup_rambuffer(struct msk_softc *);
222static int mskc_suspend(device_t);
223static int mskc_resume(device_t);
224static void mskc_reset(struct msk_softc *);
225
226static int msk_probe(device_t);
227static int msk_attach(device_t);
228static int msk_detach(device_t);
229
230static void msk_tick(void *);
231static void msk_intr(void *);
232static void msk_int_task(void *, int);
233static void msk_intr_phy(struct msk_if_softc *);
234static void msk_intr_gmac(struct msk_if_softc *);
235static __inline void msk_rxput(struct msk_if_softc *);
236static int msk_handle_events(struct msk_softc *);
237static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
238static void msk_intr_hwerr(struct msk_softc *);
239static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
240static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
241static void msk_txeof(struct msk_if_softc *, int);
242static struct mbuf *msk_defrag(struct mbuf *, int, int);
243static int msk_encap(struct msk_if_softc *, struct mbuf **);
244static void msk_tx_task(void *, int);
245static void msk_start(struct ifnet *);
246static int msk_ioctl(struct ifnet *, u_long, caddr_t);
247static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
248static void msk_set_rambuffer(struct msk_if_softc *);
249static void msk_init(void *);
250static void msk_init_locked(struct msk_if_softc *);
251static void msk_stop(struct msk_if_softc *);
252static void msk_watchdog(struct msk_if_softc *);
253static int msk_mediachange(struct ifnet *);
254static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
255static void msk_phy_power(struct msk_softc *, int);
256static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
257static int msk_status_dma_alloc(struct msk_softc *);
258static void msk_status_dma_free(struct msk_softc *);
259static int msk_txrx_dma_alloc(struct msk_if_softc *);
260static void msk_txrx_dma_free(struct msk_if_softc *);
261static void *msk_jalloc(struct msk_if_softc *);
262static void msk_jfree(void *, void *);
263static int msk_init_rx_ring(struct msk_if_softc *);
264static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
265static void msk_init_tx_ring(struct msk_if_softc *);
266static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
267static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
268static int msk_newbuf(struct msk_if_softc *, int);
269static int msk_jumbo_newbuf(struct msk_if_softc *, int);
270
271static int msk_phy_readreg(struct msk_if_softc *, int, int);
272static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
273static int msk_miibus_readreg(device_t, int, int);
274static int msk_miibus_writereg(device_t, int, int, int);
275static void msk_miibus_statchg(device_t);
276static void msk_link_task(void *, int);
277
278static void msk_setmulti(struct msk_if_softc *);
279static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
280static void msk_setpromisc(struct msk_if_softc *);
281
282static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
283static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
284
285static device_method_t mskc_methods[] = {
286	/* Device interface */
287	DEVMETHOD(device_probe,		mskc_probe),
288	DEVMETHOD(device_attach,	mskc_attach),
289	DEVMETHOD(device_detach,	mskc_detach),
290	DEVMETHOD(device_suspend,	mskc_suspend),
291	DEVMETHOD(device_resume,	mskc_resume),
292	DEVMETHOD(device_shutdown,	mskc_shutdown),
293
294	/* bus interface */
295	DEVMETHOD(bus_print_child,	bus_generic_print_child),
296	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
297
298	{ NULL, NULL }
299};
300
301static driver_t mskc_driver = {
302	"mskc",
303	mskc_methods,
304	sizeof(struct msk_softc)
305};
306
307static devclass_t mskc_devclass;
308
309static device_method_t msk_methods[] = {
310	/* Device interface */
311	DEVMETHOD(device_probe,		msk_probe),
312	DEVMETHOD(device_attach,	msk_attach),
313	DEVMETHOD(device_detach,	msk_detach),
314	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
315
316	/* bus interface */
317	DEVMETHOD(bus_print_child,	bus_generic_print_child),
318	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
319
320	/* MII interface */
321	DEVMETHOD(miibus_readreg,	msk_miibus_readreg),
322	DEVMETHOD(miibus_writereg,	msk_miibus_writereg),
323	DEVMETHOD(miibus_statchg,	msk_miibus_statchg),
324
325	{ NULL, NULL }
326};
327
328static driver_t msk_driver = {
329	"msk",
330	msk_methods,
331	sizeof(struct msk_if_softc)
332};
333
334static devclass_t msk_devclass;
335
336DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
337DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
338DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
339
340static struct resource_spec msk_res_spec_io[] = {
341	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
342	{ -1,			0,		0 }
343};
344
345static struct resource_spec msk_res_spec_mem[] = {
346	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
347	{ -1,			0,		0 }
348};
349
350static struct resource_spec msk_irq_spec_legacy[] = {
351	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
352	{ -1,			0,		0 }
353};
354
355static struct resource_spec msk_irq_spec_msi[] = {
356	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
357	{ SYS_RES_IRQ,		2,		RF_ACTIVE },
358	{ -1,			0,		0 }
359};
360
361static int
362msk_miibus_readreg(device_t dev, int phy, int reg)
363{
364	struct msk_if_softc *sc_if;
365
366	sc_if = device_get_softc(dev);
367
368	return (msk_phy_readreg(sc_if, phy, reg));
369}
370
371static int
372msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
373{
374	struct msk_softc *sc;
375	int i, val;
376
377	sc = sc_if->msk_softc;
378
379        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
380	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
381
382	for (i = 0; i < MSK_TIMEOUT; i++) {
383		DELAY(1);
384		val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
385		if ((val & GM_SMI_CT_RD_VAL) != 0) {
386			val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
387			break;
388		}
389	}
390
391	if (i == MSK_TIMEOUT) {
392		if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
393		val = 0;
394	}
395
396	return (val);
397}
398
399static int
400msk_miibus_writereg(device_t dev, int phy, int reg, int val)
401{
402	struct msk_if_softc *sc_if;
403
404	sc_if = device_get_softc(dev);
405
406	return (msk_phy_writereg(sc_if, phy, reg, val));
407}
408
409static int
410msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
411{
412	struct msk_softc *sc;
413	int i;
414
415	sc = sc_if->msk_softc;
416
417	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
418        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
419	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
420	for (i = 0; i < MSK_TIMEOUT; i++) {
421		DELAY(1);
422		if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
423		    GM_SMI_CT_BUSY) == 0)
424			break;
425	}
426	if (i == MSK_TIMEOUT)
427		if_printf(sc_if->msk_ifp, "phy write timeout\n");
428
429	return (0);
430}
431
432static void
433msk_miibus_statchg(device_t dev)
434{
435	struct msk_if_softc *sc_if;
436
437	sc_if = device_get_softc(dev);
438	taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task);
439}
440
441static void
442msk_link_task(void *arg, int pending)
443{
444	struct msk_softc *sc;
445	struct msk_if_softc *sc_if;
446	struct mii_data *mii;
447	struct ifnet *ifp;
448	uint32_t gmac;
449
450	sc_if = (struct msk_if_softc *)arg;
451	sc = sc_if->msk_softc;
452
453	MSK_IF_LOCK(sc_if);
454
455	mii = device_get_softc(sc_if->msk_miibus);
456	ifp = sc_if->msk_ifp;
457	if (mii == NULL || ifp == NULL) {
458		MSK_IF_UNLOCK(sc_if);
459		return;
460	}
461
462	if (mii->mii_media_status & IFM_ACTIVE) {
463		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
464			sc_if->msk_link = 1;
465	} else
466		sc_if->msk_link = 0;
467
468	if (sc_if->msk_link != 0) {
469		/* Enable Tx FIFO Underrun. */
470		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
471		    GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
472		/*
473		 * Because mii(4) notify msk(4) that it detected link status
474		 * change, there is no need to enable automatic
475		 * speed/flow-control/duplex updates.
476		 */
477		gmac = GM_GPCR_AU_ALL_DIS;
478		switch (IFM_SUBTYPE(mii->mii_media_active)) {
479		case IFM_1000_SX:
480		case IFM_1000_T:
481			gmac |= GM_GPCR_SPEED_1000;
482			break;
483		case IFM_100_TX:
484			gmac |= GM_GPCR_SPEED_100;
485			break;
486		case IFM_10_T:
487			break;
488		}
489
490		if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
491			gmac |= GM_GPCR_DUP_FULL;
492		/* Disable Rx flow control. */
493		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
494			gmac |= GM_GPCR_FC_RX_DIS;
495		/* Disable Tx flow control. */
496		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
497			gmac |= GM_GPCR_FC_TX_DIS;
498		gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
499		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
500		/* Read again to ensure writing. */
501		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
502
503		gmac = GMC_PAUSE_ON;
504		if (((mii->mii_media_active & IFM_GMASK) &
505		    (IFM_FLAG0 | IFM_FLAG1)) == 0)
506			gmac = GMC_PAUSE_OFF;
507		/* Diable pause for 10/100 Mbps in half-duplex mode. */
508		if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
509		    (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
510		    IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
511			gmac = GMC_PAUSE_OFF;
512		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
513
514		/* Enable PHY interrupt for FIFO underrun/overflow. */
515		if (sc->msk_marvell_phy)
516			msk_phy_writereg(sc_if, PHY_ADDR_MARV,
517			    PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
518	} else {
519		/*
520		 * Link state changed to down.
521		 * Disable PHY interrupts.
522		 */
523		if (sc->msk_marvell_phy)
524			msk_phy_writereg(sc_if, PHY_ADDR_MARV,
525			    PHY_MARV_INT_MASK, 0);
526		/* Disable Rx/Tx MAC. */
527		gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
528		gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
529		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
530		/* Read again to ensure writing. */
531		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
532	}
533
534	MSK_IF_UNLOCK(sc_if);
535}
536
537static void
538msk_setmulti(struct msk_if_softc *sc_if)
539{
540	struct msk_softc *sc;
541	struct ifnet *ifp;
542	struct ifmultiaddr *ifma;
543	uint32_t mchash[2];
544	uint32_t crc;
545	uint16_t mode;
546
547	sc = sc_if->msk_softc;
548
549	MSK_IF_LOCK_ASSERT(sc_if);
550
551	ifp = sc_if->msk_ifp;
552
553	bzero(mchash, sizeof(mchash));
554	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
555	mode |= GM_RXCR_UCF_ENA;
556	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
557		if ((ifp->if_flags & IFF_PROMISC) != 0)
558			mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
559		else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
560			mchash[0] = 0xffff;
561			mchash[1] = 0xffff;
562		}
563	} else {
564		IF_ADDR_LOCK(ifp);
565		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
566			if (ifma->ifma_addr->sa_family != AF_LINK)
567				continue;
568			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
569			    ifma->ifma_addr), ETHER_ADDR_LEN);
570			/* Just want the 6 least significant bits. */
571			crc &= 0x3f;
572			/* Set the corresponding bit in the hash table. */
573			mchash[crc >> 5] |= 1 << (crc & 0x1f);
574		}
575		IF_ADDR_UNLOCK(ifp);
576		mode |= GM_RXCR_MCF_ENA;
577	}
578
579	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
580	    mchash[0] & 0xffff);
581	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
582	    (mchash[0] >> 16) & 0xffff);
583	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
584	    mchash[1] & 0xffff);
585	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
586	    (mchash[1] >> 16) & 0xffff);
587	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
588}
589
590static void
591msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
592{
593	struct msk_softc *sc;
594
595	sc = sc_if->msk_softc;
596	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
597		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
598		    RX_VLAN_STRIP_ON);
599		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
600		    TX_VLAN_TAG_ON);
601	} else {
602		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
603		    RX_VLAN_STRIP_OFF);
604		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
605		    TX_VLAN_TAG_OFF);
606	}
607}
608
609static void
610msk_setpromisc(struct msk_if_softc *sc_if)
611{
612	struct msk_softc *sc;
613	struct ifnet *ifp;
614	uint16_t mode;
615
616	MSK_IF_LOCK_ASSERT(sc_if);
617
618	sc = sc_if->msk_softc;
619	ifp = sc_if->msk_ifp;
620
621	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
622	if (ifp->if_flags & IFF_PROMISC)
623		mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
624	else
625		mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
626	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
627}
628
629static int
630msk_init_rx_ring(struct msk_if_softc *sc_if)
631{
632	struct msk_ring_data *rd;
633	struct msk_rxdesc *rxd;
634	int i, prod;
635
636	MSK_IF_LOCK_ASSERT(sc_if);
637
638	sc_if->msk_cdata.msk_rx_cons = 0;
639	sc_if->msk_cdata.msk_rx_prod = 0;
640	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
641
642	rd = &sc_if->msk_rdata;
643	bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
644	prod = sc_if->msk_cdata.msk_rx_prod;
645	for (i = 0; i < MSK_RX_RING_CNT; i++) {
646		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
647		rxd->rx_m = NULL;
648		rxd->rx_le = &rd->msk_rx_ring[prod];
649		if (msk_newbuf(sc_if, prod) != 0)
650			return (ENOBUFS);
651		MSK_INC(prod, MSK_RX_RING_CNT);
652	}
653
654	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
655	    sc_if->msk_cdata.msk_rx_ring_map,
656	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
657
658	/* Update prefetch unit. */
659	sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
660	CSR_WRITE_2(sc_if->msk_softc,
661	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
662	    sc_if->msk_cdata.msk_rx_prod);
663
664	return (0);
665}
666
667static int
668msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
669{
670	struct msk_ring_data *rd;
671	struct msk_rxdesc *rxd;
672	int i, prod;
673
674	MSK_IF_LOCK_ASSERT(sc_if);
675
676	sc_if->msk_cdata.msk_rx_cons = 0;
677	sc_if->msk_cdata.msk_rx_prod = 0;
678	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
679
680	rd = &sc_if->msk_rdata;
681	bzero(rd->msk_jumbo_rx_ring,
682	    sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
683	prod = sc_if->msk_cdata.msk_rx_prod;
684	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
685		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
686		rxd->rx_m = NULL;
687		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
688		if (msk_jumbo_newbuf(sc_if, prod) != 0)
689			return (ENOBUFS);
690		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
691	}
692
693	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
694	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
695	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
696
697	sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
698	CSR_WRITE_2(sc_if->msk_softc,
699	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
700	    sc_if->msk_cdata.msk_rx_prod);
701
702	return (0);
703}
704
705static void
706msk_init_tx_ring(struct msk_if_softc *sc_if)
707{
708	struct msk_ring_data *rd;
709	struct msk_txdesc *txd;
710	int i;
711
712	sc_if->msk_cdata.msk_tso_mtu = 0;
713	sc_if->msk_cdata.msk_tx_prod = 0;
714	sc_if->msk_cdata.msk_tx_cons = 0;
715	sc_if->msk_cdata.msk_tx_cnt = 0;
716
717	rd = &sc_if->msk_rdata;
718	bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
719	for (i = 0; i < MSK_TX_RING_CNT; i++) {
720		txd = &sc_if->msk_cdata.msk_txdesc[i];
721		txd->tx_m = NULL;
722		txd->tx_le = &rd->msk_tx_ring[i];
723	}
724
725	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
726	    sc_if->msk_cdata.msk_tx_ring_map,
727	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
728}
729
730static __inline void
731msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
732{
733	struct msk_rx_desc *rx_le;
734	struct msk_rxdesc *rxd;
735	struct mbuf *m;
736
737	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
738	m = rxd->rx_m;
739	rx_le = rxd->rx_le;
740	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
741}
742
743static __inline void
744msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int	idx)
745{
746	struct msk_rx_desc *rx_le;
747	struct msk_rxdesc *rxd;
748	struct mbuf *m;
749
750	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
751	m = rxd->rx_m;
752	rx_le = rxd->rx_le;
753	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
754}
755
756static int
757msk_newbuf(struct msk_if_softc *sc_if, int idx)
758{
759	struct msk_rx_desc *rx_le;
760	struct msk_rxdesc *rxd;
761	struct mbuf *m;
762	bus_dma_segment_t segs[1];
763	bus_dmamap_t map;
764	int nsegs;
765
766	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
767	if (m == NULL)
768		return (ENOBUFS);
769
770	m->m_len = m->m_pkthdr.len = MCLBYTES;
771	m_adj(m, ETHER_ALIGN);
772
773	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
774	    sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
775	    BUS_DMA_NOWAIT) != 0) {
776		m_freem(m);
777		return (ENOBUFS);
778	}
779	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
780
781	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
782	if (rxd->rx_m != NULL) {
783		bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
784		    BUS_DMASYNC_POSTREAD);
785		bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
786	}
787	map = rxd->rx_dmamap;
788	rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
789	sc_if->msk_cdata.msk_rx_sparemap = map;
790	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
791	    BUS_DMASYNC_PREREAD);
792	rxd->rx_m = m;
793	rx_le = rxd->rx_le;
794	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
795	rx_le->msk_control =
796	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
797
798	return (0);
799}
800
801static int
802msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
803{
804	struct msk_rx_desc *rx_le;
805	struct msk_rxdesc *rxd;
806	struct mbuf *m;
807	bus_dma_segment_t segs[1];
808	bus_dmamap_t map;
809	int nsegs;
810	void *buf;
811
812	MGETHDR(m, M_DONTWAIT, MT_DATA);
813	if (m == NULL)
814		return (ENOBUFS);
815	buf = msk_jalloc(sc_if);
816	if (buf == NULL) {
817		m_freem(m);
818		return (ENOBUFS);
819	}
820	/* Attach the buffer to the mbuf. */
821	MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
822	    EXT_NET_DRV);
823	if ((m->m_flags & M_EXT) == 0) {
824		m_freem(m);
825		return (ENOBUFS);
826	}
827	m->m_pkthdr.len = m->m_len = MSK_JLEN;
828	m_adj(m, ETHER_ALIGN);
829
830	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
831	    sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
832	    BUS_DMA_NOWAIT) != 0) {
833		m_freem(m);
834		return (ENOBUFS);
835	}
836	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
837
838	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
839	if (rxd->rx_m != NULL) {
840		bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
841		    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
842		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
843		    rxd->rx_dmamap);
844	}
845	map = rxd->rx_dmamap;
846	rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
847	sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
848	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
849	    BUS_DMASYNC_PREREAD);
850	rxd->rx_m = m;
851	rx_le = rxd->rx_le;
852	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
853	rx_le->msk_control =
854	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
855
856	return (0);
857}
858
859/*
860 * Set media options.
861 */
862static int
863msk_mediachange(struct ifnet *ifp)
864{
865	struct msk_if_softc *sc_if;
866	struct mii_data	*mii;
867
868	sc_if = ifp->if_softc;
869
870	MSK_IF_LOCK(sc_if);
871	mii = device_get_softc(sc_if->msk_miibus);
872	mii_mediachg(mii);
873	MSK_IF_UNLOCK(sc_if);
874
875	return (0);
876}
877
878/*
879 * Report current media status.
880 */
881static void
882msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
883{
884	struct msk_if_softc *sc_if;
885	struct mii_data	*mii;
886
887	sc_if = ifp->if_softc;
888	MSK_IF_LOCK(sc_if);
889	mii = device_get_softc(sc_if->msk_miibus);
890
891	mii_pollstat(mii);
892	MSK_IF_UNLOCK(sc_if);
893	ifmr->ifm_active = mii->mii_media_active;
894	ifmr->ifm_status = mii->mii_media_status;
895}
896
897static int
898msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
899{
900	struct msk_if_softc *sc_if;
901	struct ifreq *ifr;
902	struct mii_data	*mii;
903	int error, mask;
904
905	sc_if = ifp->if_softc;
906	ifr = (struct ifreq *)data;
907	error = 0;
908
909	switch(command) {
910	case SIOCSIFMTU:
911		if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
912			error = EINVAL;
913			break;
914		}
915		if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
916		    ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
917			error = EINVAL;
918			break;
919		}
920		MSK_IF_LOCK(sc_if);
921		ifp->if_mtu = ifr->ifr_mtu;
922		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
923			msk_init_locked(sc_if);
924		MSK_IF_UNLOCK(sc_if);
925		break;
926	case SIOCSIFFLAGS:
927		MSK_IF_LOCK(sc_if);
928		if ((ifp->if_flags & IFF_UP) != 0) {
929			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
930				if (((ifp->if_flags ^ sc_if->msk_if_flags)
931				    & IFF_PROMISC) != 0) {
932					msk_setpromisc(sc_if);
933					msk_setmulti(sc_if);
934				}
935			} else {
936				if (sc_if->msk_detach == 0)
937					msk_init_locked(sc_if);
938			}
939		} else {
940			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
941				msk_stop(sc_if);
942		}
943		sc_if->msk_if_flags = ifp->if_flags;
944		MSK_IF_UNLOCK(sc_if);
945		break;
946	case SIOCADDMULTI:
947	case SIOCDELMULTI:
948		MSK_IF_LOCK(sc_if);
949		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
950			msk_setmulti(sc_if);
951		MSK_IF_UNLOCK(sc_if);
952		break;
953	case SIOCGIFMEDIA:
954	case SIOCSIFMEDIA:
955		mii = device_get_softc(sc_if->msk_miibus);
956		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
957		break;
958	case SIOCSIFCAP:
959		MSK_IF_LOCK(sc_if);
960		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
961		if ((mask & IFCAP_TXCSUM) != 0) {
962			ifp->if_capenable ^= IFCAP_TXCSUM;
963			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
964			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
965				ifp->if_hwassist |= MSK_CSUM_FEATURES;
966			else
967				ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
968		}
969		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
970			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
971			msk_setvlan(sc_if, ifp);
972		}
973
974		if ((mask & IFCAP_TSO4) != 0) {
975			ifp->if_capenable ^= IFCAP_TSO4;
976			if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
977			    (IFCAP_TSO4 & ifp->if_capabilities) != 0)
978				ifp->if_hwassist |= CSUM_TSO;
979			else
980				ifp->if_hwassist &= ~CSUM_TSO;
981		}
982		VLAN_CAPABILITIES(ifp);
983		MSK_IF_UNLOCK(sc_if);
984		break;
985	default:
986		error = ether_ioctl(ifp, command, data);
987		break;
988	}
989
990	return (error);
991}
992
993static int
994mskc_probe(device_t dev)
995{
996	struct msk_product *mp;
997	uint16_t vendor, devid;
998	int i;
999
1000	vendor = pci_get_vendor(dev);
1001	devid = pci_get_device(dev);
1002	mp = msk_products;
1003	for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1004	    i++, mp++) {
1005		if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1006			device_set_desc(dev, mp->msk_name);
1007			return (BUS_PROBE_DEFAULT);
1008		}
1009	}
1010
1011	return (ENXIO);
1012}
1013
1014static int
1015mskc_setup_rambuffer(struct msk_softc *sc)
1016{
1017	int totqsize, minqsize;
1018	int avail, next;
1019	int i;
1020	uint8_t val;
1021
1022	/* Get adapter SRAM size. */
1023	val = CSR_READ_1(sc, B2_E_0);
1024	sc->msk_ramsize = (val == 0) ? 128 : val * 4;
1025	if (sc->msk_hw_id == CHIP_ID_YUKON_FE)
1026		sc->msk_ramsize = 4 * 4;
1027	if (bootverbose)
1028		device_printf(sc->msk_dev,
1029		    "RAM buffer size : %dKB\n", sc->msk_ramsize);
1030
1031	totqsize = sc->msk_ramsize * sc->msk_num_port;
1032	minqsize = MSK_MIN_RXQ_SIZE + MSK_MIN_TXQ_SIZE;
1033	if (minqsize > sc->msk_ramsize)
1034		minqsize = sc->msk_ramsize;
1035
1036	if (minqsize * sc->msk_num_port > totqsize) {
1037		device_printf(sc->msk_dev,
1038		    "not enough RAM buffer memory : %d/%dKB\n",
1039		    minqsize * sc->msk_num_port, totqsize);
1040		return (ENOSPC);
1041	}
1042
1043	avail = totqsize;
1044	if (sc->msk_num_port > 1) {
1045		/*
1046		 * Divide up the memory evenly so that everyone gets a
1047		 * fair share for dual port adapters.
1048		 */
1049		avail = sc->msk_ramsize;
1050	}
1051
1052	/* Take away the minimum memory for active queues. */
1053	avail -= minqsize;
1054	/* Rx queue gets the minimum + 80% of the rest. */
1055	sc->msk_rxqsize =
1056	    (avail * MSK_RAM_QUOTA_RX) / 100 + MSK_MIN_RXQ_SIZE;
1057	avail -= (sc->msk_rxqsize - MSK_MIN_RXQ_SIZE);
1058	sc->msk_txqsize = avail + MSK_MIN_TXQ_SIZE;
1059
1060	for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1061		sc->msk_rxqstart[i] = next;
1062		sc->msk_rxqend[i] = next + (sc->msk_rxqsize * 1024) - 1;
1063		next = sc->msk_rxqend[i] + 1;
1064		sc->msk_txqstart[i] = next;
1065		sc->msk_txqend[i] = next + (sc->msk_txqsize * 1024) - 1;
1066		next = sc->msk_txqend[i] + 1;
1067		if (bootverbose) {
1068			device_printf(sc->msk_dev,
1069			    "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1070			    sc->msk_rxqsize, sc->msk_rxqstart[i],
1071			    sc->msk_rxqend[i]);
1072			device_printf(sc->msk_dev,
1073			    "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1074			    sc->msk_txqsize, sc->msk_txqstart[i],
1075			    sc->msk_txqend[i]);
1076		}
1077	}
1078
1079	return (0);
1080}
1081
1082static void
1083msk_phy_power(struct msk_softc *sc, int mode)
1084{
1085	uint32_t val;
1086	int i;
1087
1088	switch (mode) {
1089	case MSK_PHY_POWERUP:
1090		/* Switch power to VCC (WA for VAUX problem). */
1091		CSR_WRITE_1(sc, B0_POWER_CTRL,
1092		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1093		/* Disable Core Clock Division, set Clock Select to 0. */
1094		CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1095
1096		val = 0;
1097		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1098		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1099			/* Enable bits are inverted. */
1100			val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1101			      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1102			      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1103		}
1104		/*
1105		 * Enable PCI & Core Clock, enable clock gating for both Links.
1106		 */
1107		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1108
1109		val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1110		val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1111		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1112		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1113			/* Deassert Low Power for 1st PHY. */
1114			val |= PCI_Y2_PHY1_COMA;
1115			if (sc->msk_num_port > 1)
1116				val |= PCI_Y2_PHY2_COMA;
1117		} else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1118			uint32_t our;
1119
1120			CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1121
1122			/* Enable all clocks. */
1123			pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1124			our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1125			our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1126			    PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1127			/* Set all bits to 0 except bits 15..12. */
1128			pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1129			/* Set to default value. */
1130			pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1131		}
1132		/* Release PHY from PowerDown/COMA mode. */
1133		pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1134		for (i = 0; i < sc->msk_num_port; i++) {
1135			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1136			    GMLC_RST_SET);
1137			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1138			    GMLC_RST_CLR);
1139		}
1140		break;
1141	case MSK_PHY_POWERDOWN:
1142		val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1143		val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1144		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1145		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1146			val &= ~PCI_Y2_PHY1_COMA;
1147			if (sc->msk_num_port > 1)
1148				val &= ~PCI_Y2_PHY2_COMA;
1149		}
1150		pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1151
1152		val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1153		      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1154		      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1155		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1156		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1157			/* Enable bits are inverted. */
1158			val = 0;
1159		}
1160		/*
1161		 * Disable PCI & Core Clock, disable clock gating for
1162		 * both Links.
1163		 */
1164		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1165		CSR_WRITE_1(sc, B0_POWER_CTRL,
1166		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1167		break;
1168	default:
1169		break;
1170	}
1171}
1172
1173static void
1174mskc_reset(struct msk_softc *sc)
1175{
1176	bus_addr_t addr;
1177	uint16_t status;
1178	uint32_t val;
1179	int i;
1180
1181	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1182
1183	/* Disable ASF. */
1184	if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1185		CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1186		CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1187	}
1188	/*
1189	 * Since we disabled ASF, S/W reset is required for Power Management.
1190	 */
1191	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1192	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1193
1194	/* Clear all error bits in the PCI status register. */
1195	status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1196	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1197
1198	pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1199	    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1200	    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1201	CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1202
1203	switch (sc->msk_bustype) {
1204	case MSK_PEX_BUS:
1205		/* Clear all PEX errors. */
1206		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1207		val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1208		if ((val & PEX_RX_OV) != 0) {
1209			sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1210			sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1211		}
1212		break;
1213	case MSK_PCI_BUS:
1214	case MSK_PCIX_BUS:
1215		/* Set Cache Line Size to 2(8bytes) if configured to 0. */
1216		val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1217		if (val == 0)
1218			pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1219		if (sc->msk_bustype == MSK_PCIX_BUS) {
1220			/* Set Cache Line Size opt. */
1221			val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1222			val |= PCI_CLS_OPT;
1223			pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1224		}
1225		break;
1226	}
1227	/* Set PHY power state. */
1228	msk_phy_power(sc, MSK_PHY_POWERUP);
1229
1230	/* Reset GPHY/GMAC Control */
1231	for (i = 0; i < sc->msk_num_port; i++) {
1232		/* GPHY Control reset. */
1233		CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1234		CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1235		/* GMAC Control reset. */
1236		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1237		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1238		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1239	}
1240	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1241
1242	/* LED On. */
1243	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1244
1245	/* Clear TWSI IRQ. */
1246	CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1247
1248	/* Turn off hardware timer. */
1249	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1250	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1251
1252	/* Turn off descriptor polling. */
1253	CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1254
1255	/* Turn off time stamps. */
1256	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1257	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1258
1259	/* Configure timeout values. */
1260	for (i = 0; i < sc->msk_num_port; i++) {
1261		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1262		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1263		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1264		    MSK_RI_TO_53);
1265		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1266		    MSK_RI_TO_53);
1267		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1268		    MSK_RI_TO_53);
1269		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1270		    MSK_RI_TO_53);
1271		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1272		    MSK_RI_TO_53);
1273		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1274		    MSK_RI_TO_53);
1275		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1276		    MSK_RI_TO_53);
1277		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1278		    MSK_RI_TO_53);
1279		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1280		    MSK_RI_TO_53);
1281		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1282		    MSK_RI_TO_53);
1283		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1284		    MSK_RI_TO_53);
1285		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1286		    MSK_RI_TO_53);
1287	}
1288
1289	/* Disable all interrupts. */
1290	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1291	CSR_READ_4(sc, B0_HWE_IMSK);
1292	CSR_WRITE_4(sc, B0_IMSK, 0);
1293	CSR_READ_4(sc, B0_IMSK);
1294
1295        /*
1296         * On dual port PCI-X card, there is an problem where status
1297         * can be received out of order due to split transactions.
1298         */
1299	if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1300		int pcix;
1301		uint16_t pcix_cmd;
1302
1303		if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1304			pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1305			/* Clear Max Outstanding Split Transactions. */
1306			pcix_cmd &= ~0x70;
1307			CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1308			pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1309			CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1310		}
1311        }
1312	if (sc->msk_bustype == MSK_PEX_BUS) {
1313		uint16_t v, width;
1314
1315		v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1316		/* Change Max. Read Request Size to 4096 bytes. */
1317		v &= ~PEX_DC_MAX_RRS_MSK;
1318		v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1319		pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1320		width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1321		width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1322		v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1323		v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1324		if (v != width)
1325			device_printf(sc->msk_dev,
1326			    "negotiated width of link(x%d) != "
1327			    "max. width of link(x%d)\n", width, v);
1328	}
1329
1330	/* Clear status list. */
1331	bzero(sc->msk_stat_ring,
1332	    sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1333	sc->msk_stat_cons = 0;
1334	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1335	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1336	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1337	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1338	/* Set the status list base address. */
1339	addr = sc->msk_stat_ring_paddr;
1340	CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1341	CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1342	/* Set the status list last index. */
1343	CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1344	if (HW_FEATURE(sc, HWF_WA_DEV_43_418)) {
1345		/* WA for dev. #4.3 */
1346		CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1347		/* WA for dev. #4.18 */
1348		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1349		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1350	} else {
1351		CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1352		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1353		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM,
1354		    HW_FEATURE(sc, HWF_WA_DEV_4109) ? 0x10 : 0x04);
1355		CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1356	}
1357	/*
1358	 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1359	 */
1360	CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1361
1362	/* Enable status unit. */
1363	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1364
1365	CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1366	CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1367	CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1368}
1369
1370static int
1371msk_probe(device_t dev)
1372{
1373	struct msk_softc *sc;
1374	char desc[100];
1375
1376	sc = device_get_softc(device_get_parent(dev));
1377	/*
1378	 * Not much to do here. We always know there will be
1379	 * at least one GMAC present, and if there are two,
1380	 * mskc_attach() will create a second device instance
1381	 * for us.
1382	 */
1383	snprintf(desc, sizeof(desc),
1384	    "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1385	    model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1386	    sc->msk_hw_rev);
1387	device_set_desc_copy(dev, desc);
1388
1389	return (BUS_PROBE_DEFAULT);
1390}
1391
1392static int
1393msk_attach(device_t dev)
1394{
1395	struct msk_softc *sc;
1396	struct msk_if_softc *sc_if;
1397	struct ifnet *ifp;
1398	int i, port, error;
1399	uint8_t eaddr[6];
1400
1401	if (dev == NULL)
1402		return (EINVAL);
1403
1404	error = 0;
1405	sc_if = device_get_softc(dev);
1406	sc = device_get_softc(device_get_parent(dev));
1407	port = *(int *)device_get_ivars(dev);
1408
1409	sc_if->msk_if_dev = dev;
1410	sc_if->msk_port = port;
1411	sc_if->msk_softc = sc;
1412	sc->msk_if[port] = sc_if;
1413	/* Setup Tx/Rx queue register offsets. */
1414	if (port == MSK_PORT_A) {
1415		sc_if->msk_txq = Q_XA1;
1416		sc_if->msk_txsq = Q_XS1;
1417		sc_if->msk_rxq = Q_R1;
1418	} else {
1419		sc_if->msk_txq = Q_XA2;
1420		sc_if->msk_txsq = Q_XS2;
1421		sc_if->msk_rxq = Q_R2;
1422	}
1423
1424	callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1425	TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if);
1426
1427	if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1428		goto fail;
1429
1430	ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1431	if (ifp == NULL) {
1432		device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1433		error = ENOSPC;
1434		goto fail;
1435	}
1436	ifp->if_softc = sc_if;
1437	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1438	ifp->if_mtu = ETHERMTU;
1439	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1440	/*
1441	 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1442	 * has serious bug in Rx checksum offload for all Yukon II family
1443	 * hardware. It seems there is a workaround to make it work somtimes.
1444	 * However, the workaround also have to check OP code sequences to
1445	 * verify whether the OP code is correct. Sometimes it should compute
1446	 * IP/TCP/UDP checksum in driver in order to verify correctness of
1447	 * checksum computed by hardware. If you have to compute checksum
1448	 * with software to verify the hardware's checksum why have hardware
1449	 * compute the checksum? I think there is no reason to spend time to
1450	 * make Rx checksum offload work on Yukon II hardware.
1451	 */
1452	ifp->if_capabilities = IFCAP_TXCSUM;
1453	ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1454	if (sc->msk_hw_id != CHIP_ID_YUKON_EC_U) {
1455		/* It seems Yukon EC Ultra doesn't support TSO. */
1456		ifp->if_capabilities |= IFCAP_TSO4;
1457		ifp->if_hwassist |= CSUM_TSO;
1458	}
1459	ifp->if_capenable = ifp->if_capabilities;
1460	ifp->if_ioctl = msk_ioctl;
1461	ifp->if_start = msk_start;
1462	ifp->if_timer = 0;
1463	ifp->if_watchdog = NULL;
1464	ifp->if_init = msk_init;
1465	IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1466	ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1467	IFQ_SET_READY(&ifp->if_snd);
1468
1469	TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1470
1471	/*
1472	 * Get station address for this interface. Note that
1473	 * dual port cards actually come with three station
1474	 * addresses: one for each port, plus an extra. The
1475	 * extra one is used by the SysKonnect driver software
1476	 * as a 'virtual' station address for when both ports
1477	 * are operating in failover mode. Currently we don't
1478	 * use this extra address.
1479	 */
1480	MSK_IF_LOCK(sc_if);
1481	for (i = 0; i < ETHER_ADDR_LEN; i++)
1482		eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1483
1484	/*
1485	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1486	 */
1487	MSK_IF_UNLOCK(sc_if);
1488	ether_ifattach(ifp, eaddr);
1489	MSK_IF_LOCK(sc_if);
1490
1491	/* VLAN capability setup */
1492        ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1493	if (ifp->if_capabilities & IFCAP_HWCSUM)
1494		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1495	ifp->if_capenable = ifp->if_capabilities;
1496
1497	/*
1498	 * Tell the upper layer(s) we support long frames.
1499	 * Must appear after the call to ether_ifattach() because
1500	 * ether_ifattach() sets ifi_hdrlen to the default value.
1501	 */
1502        ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1503
1504	/*
1505	 * Do miibus setup.
1506	 */
1507	MSK_IF_UNLOCK(sc_if);
1508	error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1509	    msk_mediastatus);
1510	if (error != 0) {
1511		device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1512		ether_ifdetach(ifp);
1513		error = ENXIO;
1514		goto fail;
1515	}
1516	/* Check whether PHY Id is MARVELL. */
1517	if (msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_ID0)
1518	    == PHY_MARV_ID0_VAL)
1519		sc->msk_marvell_phy = 1;
1520
1521fail:
1522	if (error != 0) {
1523		/* Access should be ok even though lock has been dropped */
1524		sc->msk_if[port] = NULL;
1525		msk_detach(dev);
1526	}
1527
1528	return (error);
1529}
1530
1531/*
1532 * Attach the interface. Allocate softc structures, do ifmedia
1533 * setup and ethernet/BPF attach.
1534 */
1535static int
1536mskc_attach(device_t dev)
1537{
1538	struct msk_softc *sc;
1539	int error, msic, *port, reg;
1540
1541	sc = device_get_softc(dev);
1542	sc->msk_dev = dev;
1543	mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1544	    MTX_DEF);
1545
1546	/*
1547	 * Map control/status registers.
1548	 */
1549	pci_enable_busmaster(dev);
1550
1551	/* Allocate I/O resource */
1552#ifdef MSK_USEIOSPACE
1553	sc->msk_res_spec = msk_res_spec_io;
1554#else
1555	sc->msk_res_spec = msk_res_spec_mem;
1556#endif
1557	sc->msk_irq_spec = msk_irq_spec_legacy;
1558	error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1559	if (error) {
1560		if (sc->msk_res_spec == msk_res_spec_mem)
1561			sc->msk_res_spec = msk_res_spec_io;
1562		else
1563			sc->msk_res_spec = msk_res_spec_mem;
1564		error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1565		if (error) {
1566			device_printf(dev, "couldn't allocate %s resources\n",
1567			    sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1568			    "I/O");
1569			mtx_destroy(&sc->msk_mtx);
1570			return (ENXIO);
1571		}
1572	}
1573
1574	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1575	sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1576	sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1577	/* Bail out if chip is not recognized. */
1578	if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1579	    sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1580		device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1581		    sc->msk_hw_id, sc->msk_hw_rev);
1582		error = ENXIO;
1583		goto fail;
1584	}
1585
1586	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1587	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1588	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1589	    &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1590	    "max number of Rx events to process");
1591
1592	sc->msk_process_limit = MSK_PROC_DEFAULT;
1593	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1594	    "process_limit", &sc->msk_process_limit);
1595	if (error == 0) {
1596		if (sc->msk_process_limit < MSK_PROC_MIN ||
1597		    sc->msk_process_limit > MSK_PROC_MAX) {
1598			device_printf(dev, "process_limit value out of range; "
1599			    "using default: %d\n", MSK_PROC_DEFAULT);
1600			sc->msk_process_limit = MSK_PROC_DEFAULT;
1601		}
1602	}
1603
1604	/* Soft reset. */
1605	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1606	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1607	sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1608	 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1609		 sc->msk_coppertype = 0;
1610	 else
1611		 sc->msk_coppertype = 1;
1612	/* Check number of MACs. */
1613	sc->msk_num_port = 1;
1614	if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1615	    CFG_DUAL_MAC_MSK) {
1616		if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1617			sc->msk_num_port++;
1618	}
1619
1620	/* Check bus type. */
1621	if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
1622		sc->msk_bustype = MSK_PEX_BUS;
1623	else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
1624		sc->msk_bustype = MSK_PCIX_BUS;
1625	else
1626		sc->msk_bustype = MSK_PCI_BUS;
1627
1628	/* Get H/W features(bugs). */
1629	switch (sc->msk_hw_id) {
1630	case CHIP_ID_YUKON_EC:
1631		sc->msk_clock = 125;	/* 125 Mhz */
1632		if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1633			sc->msk_hw_feature =
1634			    HWF_WA_DEV_42  | HWF_WA_DEV_46 | HWF_WA_DEV_43_418 |
1635			    HWF_WA_DEV_420 | HWF_WA_DEV_423 |
1636			    HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 |
1637			    HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1638			    HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1639		} else {
1640			/* A2/A3 */
1641			sc->msk_hw_feature =
1642			    HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 |
1643			    HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1644			    HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1645		}
1646		break;
1647	case CHIP_ID_YUKON_EC_U:
1648		sc->msk_clock = 125;	/* 125 Mhz */
1649		if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
1650			sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_483 |
1651			    HWF_WA_DEV_4109;
1652		} else if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1653			uint16_t v;
1654
1655			sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 |
1656			    HWF_WA_DEV_4185;
1657			v = CSR_READ_2(sc, Q_ADDR(Q_XA1, Q_WM));
1658			if (v == 0)
1659				sc->msk_hw_feature |= HWF_WA_DEV_4185CS |
1660				    HWF_WA_DEV_4200;
1661		}
1662		break;
1663	case CHIP_ID_YUKON_FE:
1664		sc->msk_clock = 100;	/* 100 Mhz */
1665		sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 |
1666		    HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1667		break;
1668	case CHIP_ID_YUKON_XL:
1669		sc->msk_clock = 156;	/* 156 Mhz */
1670		switch (sc->msk_hw_rev) {
1671		case CHIP_REV_YU_XL_A0:
1672			sc->msk_hw_feature =
1673			    HWF_WA_DEV_427 | HWF_WA_DEV_463 | HWF_WA_DEV_472 |
1674			    HWF_WA_DEV_479 | HWF_WA_DEV_483 | HWF_WA_DEV_4115 |
1675			    HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1676			break;
1677		case CHIP_REV_YU_XL_A1:
1678			sc->msk_hw_feature =
1679			    HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1680			    HWF_WA_DEV_4115 | HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1681			break;
1682		case CHIP_REV_YU_XL_A2:
1683			sc->msk_hw_feature =
1684			    HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1685			    HWF_WA_DEV_4115 | HWF_WA_DEV_4167;
1686			break;
1687		case CHIP_REV_YU_XL_A3:
1688			sc->msk_hw_feature =
1689			    HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1690			    HWF_WA_DEV_4115;
1691		}
1692		break;
1693	default:
1694		sc->msk_clock = 156;	/* 156 Mhz */
1695		sc->msk_hw_feature = 0;
1696	}
1697
1698	/* Allocate IRQ resources. */
1699	msic = pci_msi_count(dev);
1700	if (bootverbose)
1701		device_printf(dev, "MSI count : %d\n", msic);
1702	/*
1703	 * The Yukon II reports it can handle two messages, one for each
1704	 * possible port.  We go ahead and allocate two messages and only
1705	 * setup a handler for both if we have a dual port card.
1706	 *
1707	 * XXX: I haven't untangled the interrupt handler to handle dual
1708	 * port cards with separate MSI messages, so for now I disable MSI
1709	 * on dual port cards.
1710	 */
1711	if (msic == 2 && msi_disable == 0 && sc->msk_num_port == 1 &&
1712	    pci_alloc_msi(dev, &msic) == 0) {
1713		if (msic == 2) {
1714			sc->msk_msi = 1;
1715			sc->msk_irq_spec = msk_irq_spec_msi;
1716		} else {
1717			pci_release_msi(dev);
1718			sc->msk_irq_spec = msk_irq_spec_legacy;
1719		}
1720	}
1721
1722	error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1723	if (error) {
1724		device_printf(dev, "couldn't allocate IRQ resources\n");
1725		goto fail;
1726	}
1727
1728	if ((error = msk_status_dma_alloc(sc)) != 0)
1729		goto fail;
1730
1731	/* Set base interrupt mask. */
1732	sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1733	sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1734	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1735
1736	/* Reset the adapter. */
1737	mskc_reset(sc);
1738
1739	if ((error = mskc_setup_rambuffer(sc)) != 0)
1740		goto fail;
1741
1742	sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1743	if (sc->msk_devs[MSK_PORT_A] == NULL) {
1744		device_printf(dev, "failed to add child for PORT_A\n");
1745		error = ENXIO;
1746		goto fail;
1747	}
1748	port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1749	if (port == NULL) {
1750		device_printf(dev, "failed to allocate memory for "
1751		    "ivars of PORT_A\n");
1752		error = ENXIO;
1753		goto fail;
1754	}
1755	*port = MSK_PORT_A;
1756	device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1757
1758	if (sc->msk_num_port > 1) {
1759		sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1760		if (sc->msk_devs[MSK_PORT_B] == NULL) {
1761			device_printf(dev, "failed to add child for PORT_B\n");
1762			error = ENXIO;
1763			goto fail;
1764		}
1765		port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1766		if (port == NULL) {
1767			device_printf(dev, "failed to allocate memory for "
1768			    "ivars of PORT_B\n");
1769			error = ENXIO;
1770			goto fail;
1771		}
1772		*port = MSK_PORT_B;
1773		device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1774	}
1775
1776	error = bus_generic_attach(dev);
1777	if (error) {
1778		device_printf(dev, "failed to attach port(s)\n");
1779		goto fail;
1780	}
1781
1782	TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1783	sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1784	    taskqueue_thread_enqueue, &sc->msk_tq);
1785	taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1786	    device_get_nameunit(sc->msk_dev));
1787	/* Hook interrupt last to avoid having to lock softc. */
1788	error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1789	    INTR_MPSAFE | INTR_FAST, msk_intr, sc, &sc->msk_intrhand[0]);
1790
1791	if (error != 0) {
1792		device_printf(dev, "couldn't set up interrupt handler\n");
1793		taskqueue_free(sc->msk_tq);
1794		sc->msk_tq = NULL;
1795		goto fail;
1796	}
1797fail:
1798	if (error != 0)
1799		mskc_detach(dev);
1800
1801	return (error);
1802}
1803
1804/*
1805 * Shutdown hardware and free up resources. This can be called any
1806 * time after the mutex has been initialized. It is called in both
1807 * the error case in attach and the normal detach case so it needs
1808 * to be careful about only freeing resources that have actually been
1809 * allocated.
1810 */
1811static int
1812msk_detach(device_t dev)
1813{
1814	struct msk_softc *sc;
1815	struct msk_if_softc *sc_if;
1816	struct ifnet *ifp;
1817
1818	sc_if = device_get_softc(dev);
1819	KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1820	    ("msk mutex not initialized in msk_detach"));
1821	MSK_IF_LOCK(sc_if);
1822
1823	ifp = sc_if->msk_ifp;
1824	if (device_is_attached(dev)) {
1825		/* XXX */
1826		sc_if->msk_detach = 1;
1827		msk_stop(sc_if);
1828		/* Can't hold locks while calling detach. */
1829		MSK_IF_UNLOCK(sc_if);
1830		callout_drain(&sc_if->msk_tick_ch);
1831		taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1832		taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task);
1833		ether_ifdetach(ifp);
1834		MSK_IF_LOCK(sc_if);
1835	}
1836
1837	/*
1838	 * We're generally called from mskc_detach() which is using
1839	 * device_delete_child() to get to here. It's already trashed
1840	 * miibus for us, so don't do it here or we'll panic.
1841	 *
1842	 * if (sc_if->msk_miibus != NULL) {
1843	 * 	device_delete_child(dev, sc_if->msk_miibus);
1844	 * 	sc_if->msk_miibus = NULL;
1845	 * }
1846	 */
1847
1848	msk_txrx_dma_free(sc_if);
1849	bus_generic_detach(dev);
1850
1851	if (ifp)
1852		if_free(ifp);
1853	sc = sc_if->msk_softc;
1854	sc->msk_if[sc_if->msk_port] = NULL;
1855	MSK_IF_UNLOCK(sc_if);
1856
1857	return (0);
1858}
1859
1860static int
1861mskc_detach(device_t dev)
1862{
1863	struct msk_softc *sc;
1864
1865	sc = device_get_softc(dev);
1866	KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1867
1868	if (device_is_alive(dev)) {
1869		if (sc->msk_devs[MSK_PORT_A] != NULL) {
1870			free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1871			    M_DEVBUF);
1872			device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1873		}
1874		if (sc->msk_devs[MSK_PORT_B] != NULL) {
1875			free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1876			    M_DEVBUF);
1877			device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1878		}
1879		bus_generic_detach(dev);
1880	}
1881
1882	/* Disable all interrupts. */
1883	CSR_WRITE_4(sc, B0_IMSK, 0);
1884	CSR_READ_4(sc, B0_IMSK);
1885	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1886	CSR_READ_4(sc, B0_HWE_IMSK);
1887
1888	/* LED Off. */
1889	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1890
1891	/* Put hardware reset. */
1892	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1893
1894	msk_status_dma_free(sc);
1895
1896	if (sc->msk_tq != NULL) {
1897		taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1898		taskqueue_free(sc->msk_tq);
1899		sc->msk_tq = NULL;
1900	}
1901	if (sc->msk_intrhand[0]) {
1902		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1903		sc->msk_intrhand[0] = NULL;
1904	}
1905	if (sc->msk_intrhand[1]) {
1906		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1907		sc->msk_intrhand[1] = NULL;
1908	}
1909	bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1910	if (sc->msk_msi)
1911		pci_release_msi(dev);
1912	bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1913	mtx_destroy(&sc->msk_mtx);
1914
1915	return (0);
1916}
1917
1918struct msk_dmamap_arg {
1919	bus_addr_t	msk_busaddr;
1920};
1921
1922static void
1923msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1924{
1925	struct msk_dmamap_arg *ctx;
1926
1927	if (error != 0)
1928		return;
1929	ctx = arg;
1930	ctx->msk_busaddr = segs[0].ds_addr;
1931}
1932
1933/* Create status DMA region. */
1934static int
1935msk_status_dma_alloc(struct msk_softc *sc)
1936{
1937	struct msk_dmamap_arg ctx;
1938	int error;
1939
1940	error = bus_dma_tag_create(
1941		    bus_get_dma_tag(sc->msk_dev),	/* parent */
1942		    MSK_STAT_ALIGN, 0,		/* alignment, boundary */
1943		    BUS_SPACE_MAXADDR,		/* lowaddr */
1944		    BUS_SPACE_MAXADDR,		/* highaddr */
1945		    NULL, NULL,			/* filter, filterarg */
1946		    MSK_STAT_RING_SZ,		/* maxsize */
1947		    1,				/* nsegments */
1948		    MSK_STAT_RING_SZ,		/* maxsegsize */
1949		    0,				/* flags */
1950		    NULL, NULL,			/* lockfunc, lockarg */
1951		    &sc->msk_stat_tag);
1952	if (error != 0) {
1953		device_printf(sc->msk_dev,
1954		    "failed to create status DMA tag\n");
1955		return (error);
1956	}
1957
1958	/* Allocate DMA'able memory and load the DMA map for status ring. */
1959	error = bus_dmamem_alloc(sc->msk_stat_tag,
1960	    (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1961	    BUS_DMA_ZERO, &sc->msk_stat_map);
1962	if (error != 0) {
1963		device_printf(sc->msk_dev,
1964		    "failed to allocate DMA'able memory for status ring\n");
1965		return (error);
1966	}
1967
1968	ctx.msk_busaddr = 0;
1969	error = bus_dmamap_load(sc->msk_stat_tag,
1970	    sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1971	    msk_dmamap_cb, &ctx, 0);
1972	if (error != 0) {
1973		device_printf(sc->msk_dev,
1974		    "failed to load DMA'able memory for status ring\n");
1975		return (error);
1976	}
1977	sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1978
1979	return (0);
1980}
1981
1982static void
1983msk_status_dma_free(struct msk_softc *sc)
1984{
1985
1986	/* Destroy status block. */
1987	if (sc->msk_stat_tag) {
1988		if (sc->msk_stat_map) {
1989			bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1990			if (sc->msk_stat_ring) {
1991				bus_dmamem_free(sc->msk_stat_tag,
1992				    sc->msk_stat_ring, sc->msk_stat_map);
1993				sc->msk_stat_ring = NULL;
1994			}
1995			sc->msk_stat_map = NULL;
1996		}
1997		bus_dma_tag_destroy(sc->msk_stat_tag);
1998		sc->msk_stat_tag = NULL;
1999	}
2000}
2001
2002static int
2003msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2004{
2005	struct msk_dmamap_arg ctx;
2006	struct msk_txdesc *txd;
2007	struct msk_rxdesc *rxd;
2008	struct msk_rxdesc *jrxd;
2009	struct msk_jpool_entry *entry;
2010	uint8_t *ptr;
2011	int error, i;
2012
2013	mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF);
2014	SLIST_INIT(&sc_if->msk_jfree_listhead);
2015	SLIST_INIT(&sc_if->msk_jinuse_listhead);
2016
2017	/* Create parent DMA tag. */
2018	/*
2019	 * XXX
2020	 * It seems that Yukon II supports full 64bits DMA operations. But
2021	 * it needs two descriptors(list elements) for 64bits DMA operations.
2022	 * Since we don't know what DMA address mappings(32bits or 64bits)
2023	 * would be used in advance for each mbufs, we limits its DMA space
2024	 * to be in range of 32bits address space. Otherwise, we should check
2025	 * what DMA address is used and chain another descriptor for the
2026	 * 64bits DMA operation. This also means descriptor ring size is
2027	 * variable. Limiting DMA address to be in 32bit address space greatly
2028	 * simplyfies descriptor handling and possibly would increase
2029	 * performance a bit due to efficient handling of descriptors.
2030	 * Apart from harassing checksum offloading mechanisms, it seems
2031	 * it's really bad idea to use a seperate descriptor for 64bit
2032	 * DMA operation to save small descriptor memory. Anyway, I've
2033	 * never seen these exotic scheme on ethernet interface hardware.
2034	 */
2035	error = bus_dma_tag_create(
2036		    bus_get_dma_tag(sc_if->msk_if_dev),	/* parent */
2037		    1, 0,			/* alignment, boundary */
2038		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2039		    BUS_SPACE_MAXADDR,		/* highaddr */
2040		    NULL, NULL,			/* filter, filterarg */
2041		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
2042		    0,				/* nsegments */
2043		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2044		    0,				/* flags */
2045		    NULL, NULL,			/* lockfunc, lockarg */
2046		    &sc_if->msk_cdata.msk_parent_tag);
2047	if (error != 0) {
2048		device_printf(sc_if->msk_if_dev,
2049		    "failed to create parent DMA tag\n");
2050		goto fail;
2051	}
2052	/* Create tag for Tx ring. */
2053	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2054		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2055		    BUS_SPACE_MAXADDR,		/* lowaddr */
2056		    BUS_SPACE_MAXADDR,		/* highaddr */
2057		    NULL, NULL,			/* filter, filterarg */
2058		    MSK_TX_RING_SZ,		/* maxsize */
2059		    1,				/* nsegments */
2060		    MSK_TX_RING_SZ,		/* maxsegsize */
2061		    0,				/* flags */
2062		    NULL, NULL,			/* lockfunc, lockarg */
2063		    &sc_if->msk_cdata.msk_tx_ring_tag);
2064	if (error != 0) {
2065		device_printf(sc_if->msk_if_dev,
2066		    "failed to create Tx ring DMA tag\n");
2067		goto fail;
2068	}
2069
2070	/* Create tag for Rx ring. */
2071	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2072		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2073		    BUS_SPACE_MAXADDR,		/* lowaddr */
2074		    BUS_SPACE_MAXADDR,		/* highaddr */
2075		    NULL, NULL,			/* filter, filterarg */
2076		    MSK_RX_RING_SZ,		/* maxsize */
2077		    1,				/* nsegments */
2078		    MSK_RX_RING_SZ,		/* maxsegsize */
2079		    0,				/* flags */
2080		    NULL, NULL,			/* lockfunc, lockarg */
2081		    &sc_if->msk_cdata.msk_rx_ring_tag);
2082	if (error != 0) {
2083		device_printf(sc_if->msk_if_dev,
2084		    "failed to create Rx ring DMA tag\n");
2085		goto fail;
2086	}
2087
2088	/* Create tag for jumbo Rx ring. */
2089	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2090		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2091		    BUS_SPACE_MAXADDR,		/* lowaddr */
2092		    BUS_SPACE_MAXADDR,		/* highaddr */
2093		    NULL, NULL,			/* filter, filterarg */
2094		    MSK_JUMBO_RX_RING_SZ,	/* maxsize */
2095		    1,				/* nsegments */
2096		    MSK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2097		    0,				/* flags */
2098		    NULL, NULL,			/* lockfunc, lockarg */
2099		    &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2100	if (error != 0) {
2101		device_printf(sc_if->msk_if_dev,
2102		    "failed to create jumbo Rx ring DMA tag\n");
2103		goto fail;
2104	}
2105
2106	/* Create tag for jumbo buffer blocks. */
2107	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2108		    PAGE_SIZE, 0,		/* alignment, boundary */
2109		    BUS_SPACE_MAXADDR,		/* lowaddr */
2110		    BUS_SPACE_MAXADDR,		/* highaddr */
2111		    NULL, NULL,			/* filter, filterarg */
2112		    MSK_JMEM,			/* maxsize */
2113		    1,				/* nsegments */
2114		    MSK_JMEM,			/* maxsegsize */
2115		    0,				/* flags */
2116		    NULL, NULL,			/* lockfunc, lockarg */
2117		    &sc_if->msk_cdata.msk_jumbo_tag);
2118	if (error != 0) {
2119		device_printf(sc_if->msk_if_dev,
2120		    "failed to create jumbo Rx buffer block DMA tag\n");
2121		goto fail;
2122	}
2123
2124	/* Create tag for Tx buffers. */
2125	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2126		    1, 0,			/* alignment, boundary */
2127		    BUS_SPACE_MAXADDR,		/* lowaddr */
2128		    BUS_SPACE_MAXADDR,		/* highaddr */
2129		    NULL, NULL,			/* filter, filterarg */
2130		    MCLBYTES * MSK_MAXTXSEGS,	/* maxsize */
2131		    MSK_MAXTXSEGS,		/* nsegments */
2132		    MCLBYTES,			/* maxsegsize */
2133		    0,				/* flags */
2134		    NULL, NULL,			/* lockfunc, lockarg */
2135		    &sc_if->msk_cdata.msk_tx_tag);
2136	if (error != 0) {
2137		device_printf(sc_if->msk_if_dev,
2138		    "failed to create Tx DMA tag\n");
2139		goto fail;
2140	}
2141
2142	/* Create tag for Rx buffers. */
2143	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2144		    1, 0,			/* alignment, boundary */
2145		    BUS_SPACE_MAXADDR,		/* lowaddr */
2146		    BUS_SPACE_MAXADDR,		/* highaddr */
2147		    NULL, NULL,			/* filter, filterarg */
2148		    MCLBYTES,			/* maxsize */
2149		    1,				/* nsegments */
2150		    MCLBYTES,			/* maxsegsize */
2151		    0,				/* flags */
2152		    NULL, NULL,			/* lockfunc, lockarg */
2153		    &sc_if->msk_cdata.msk_rx_tag);
2154	if (error != 0) {
2155		device_printf(sc_if->msk_if_dev,
2156		    "failed to create Rx DMA tag\n");
2157		goto fail;
2158	}
2159
2160	/* Create tag for jumbo Rx buffers. */
2161	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2162		    PAGE_SIZE, 0,		/* alignment, boundary */
2163		    BUS_SPACE_MAXADDR,		/* lowaddr */
2164		    BUS_SPACE_MAXADDR,		/* highaddr */
2165		    NULL, NULL,			/* filter, filterarg */
2166		    MCLBYTES * MSK_MAXRXSEGS,	/* maxsize */
2167		    MSK_MAXRXSEGS,		/* nsegments */
2168		    MSK_JLEN,			/* maxsegsize */
2169		    0,				/* flags */
2170		    NULL, NULL,			/* lockfunc, lockarg */
2171		    &sc_if->msk_cdata.msk_jumbo_rx_tag);
2172	if (error != 0) {
2173		device_printf(sc_if->msk_if_dev,
2174		    "failed to create jumbo Rx DMA tag\n");
2175		goto fail;
2176	}
2177
2178	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
2179	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2180	    (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2181	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2182	if (error != 0) {
2183		device_printf(sc_if->msk_if_dev,
2184		    "failed to allocate DMA'able memory for Tx ring\n");
2185		goto fail;
2186	}
2187
2188	ctx.msk_busaddr = 0;
2189	error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2190	    sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2191	    MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2192	if (error != 0) {
2193		device_printf(sc_if->msk_if_dev,
2194		    "failed to load DMA'able memory for Tx ring\n");
2195		goto fail;
2196	}
2197	sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2198
2199	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
2200	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2201	    (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2202	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2203	if (error != 0) {
2204		device_printf(sc_if->msk_if_dev,
2205		    "failed to allocate DMA'able memory for Rx ring\n");
2206		goto fail;
2207	}
2208
2209	ctx.msk_busaddr = 0;
2210	error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2211	    sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2212	    MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2213	if (error != 0) {
2214		device_printf(sc_if->msk_if_dev,
2215		    "failed to load DMA'able memory for Rx ring\n");
2216		goto fail;
2217	}
2218	sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2219
2220	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2221	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2222	    (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2223	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2224	    &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2225	if (error != 0) {
2226		device_printf(sc_if->msk_if_dev,
2227		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2228		goto fail;
2229	}
2230
2231	ctx.msk_busaddr = 0;
2232	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2233	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2234	    sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2235	    msk_dmamap_cb, &ctx, 0);
2236	if (error != 0) {
2237		device_printf(sc_if->msk_if_dev,
2238		    "failed to load DMA'able memory for jumbo Rx ring\n");
2239		goto fail;
2240	}
2241	sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2242
2243	/* Create DMA maps for Tx buffers. */
2244	for (i = 0; i < MSK_TX_RING_CNT; i++) {
2245		txd = &sc_if->msk_cdata.msk_txdesc[i];
2246		txd->tx_m = NULL;
2247		txd->tx_dmamap = NULL;
2248		error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2249		    &txd->tx_dmamap);
2250		if (error != 0) {
2251			device_printf(sc_if->msk_if_dev,
2252			    "failed to create Tx dmamap\n");
2253			goto fail;
2254		}
2255	}
2256	/* Create DMA maps for Rx buffers. */
2257	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2258	    &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2259		device_printf(sc_if->msk_if_dev,
2260		    "failed to create spare Rx dmamap\n");
2261		goto fail;
2262	}
2263	for (i = 0; i < MSK_RX_RING_CNT; i++) {
2264		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2265		rxd->rx_m = NULL;
2266		rxd->rx_dmamap = NULL;
2267		error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2268		    &rxd->rx_dmamap);
2269		if (error != 0) {
2270			device_printf(sc_if->msk_if_dev,
2271			    "failed to create Rx dmamap\n");
2272			goto fail;
2273		}
2274	}
2275	/* Create DMA maps for jumbo Rx buffers. */
2276	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2277	    &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2278		device_printf(sc_if->msk_if_dev,
2279		    "failed to create spare jumbo Rx dmamap\n");
2280		goto fail;
2281	}
2282	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2283		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2284		jrxd->rx_m = NULL;
2285		jrxd->rx_dmamap = NULL;
2286		error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2287		    &jrxd->rx_dmamap);
2288		if (error != 0) {
2289			device_printf(sc_if->msk_if_dev,
2290			    "failed to create jumbo Rx dmamap\n");
2291			goto fail;
2292		}
2293	}
2294
2295	/* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2296	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2297	    (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2298	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2299	    &sc_if->msk_cdata.msk_jumbo_map);
2300	if (error != 0) {
2301		device_printf(sc_if->msk_if_dev,
2302		    "failed to allocate DMA'able memory for jumbo buf\n");
2303		goto fail;
2304	}
2305
2306	ctx.msk_busaddr = 0;
2307	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2308	    sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2309	    MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2310	if (error != 0) {
2311		device_printf(sc_if->msk_if_dev,
2312		    "failed to load DMA'able memory for jumbobuf\n");
2313		goto fail;
2314	}
2315	sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2316
2317	/*
2318	 * Now divide it up into 9K pieces and save the addresses
2319	 * in an array.
2320	 */
2321	ptr = sc_if->msk_rdata.msk_jumbo_buf;
2322	for (i = 0; i < MSK_JSLOTS; i++) {
2323		sc_if->msk_cdata.msk_jslots[i] = ptr;
2324		ptr += MSK_JLEN;
2325		entry = malloc(sizeof(struct msk_jpool_entry),
2326		    M_DEVBUF, M_WAITOK);
2327		if (entry == NULL) {
2328			device_printf(sc_if->msk_if_dev,
2329			    "no memory for jumbo buffers!\n");
2330			error = ENOMEM;
2331			goto fail;
2332		}
2333		entry->slot = i;
2334		SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2335		    jpool_entries);
2336	}
2337
2338fail:
2339	return (error);
2340}
2341
2342static void
2343msk_txrx_dma_free(struct msk_if_softc *sc_if)
2344{
2345	struct msk_txdesc *txd;
2346	struct msk_rxdesc *rxd;
2347	struct msk_rxdesc *jrxd;
2348	struct msk_jpool_entry *entry;
2349	int i;
2350
2351	MSK_JLIST_LOCK(sc_if);
2352	while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2353		device_printf(sc_if->msk_if_dev,
2354		    "asked to free buffer that is in use!\n");
2355		SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2356		SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2357		    jpool_entries);
2358	}
2359
2360	while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2361		entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2362		SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2363		free(entry, M_DEVBUF);
2364	}
2365	MSK_JLIST_UNLOCK(sc_if);
2366
2367	/* Destroy jumbo buffer block. */
2368	if (sc_if->msk_cdata.msk_jumbo_map)
2369		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2370		    sc_if->msk_cdata.msk_jumbo_map);
2371
2372	if (sc_if->msk_rdata.msk_jumbo_buf) {
2373		bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2374		    sc_if->msk_rdata.msk_jumbo_buf,
2375		    sc_if->msk_cdata.msk_jumbo_map);
2376		sc_if->msk_rdata.msk_jumbo_buf = NULL;
2377		sc_if->msk_cdata.msk_jumbo_map = NULL;
2378	}
2379
2380	/* Tx ring. */
2381	if (sc_if->msk_cdata.msk_tx_ring_tag) {
2382		if (sc_if->msk_cdata.msk_tx_ring_map)
2383			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2384			    sc_if->msk_cdata.msk_tx_ring_map);
2385		if (sc_if->msk_cdata.msk_tx_ring_map &&
2386		    sc_if->msk_rdata.msk_tx_ring)
2387			bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2388			    sc_if->msk_rdata.msk_tx_ring,
2389			    sc_if->msk_cdata.msk_tx_ring_map);
2390		sc_if->msk_rdata.msk_tx_ring = NULL;
2391		sc_if->msk_cdata.msk_tx_ring_map = NULL;
2392		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2393		sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2394	}
2395	/* Rx ring. */
2396	if (sc_if->msk_cdata.msk_rx_ring_tag) {
2397		if (sc_if->msk_cdata.msk_rx_ring_map)
2398			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2399			    sc_if->msk_cdata.msk_rx_ring_map);
2400		if (sc_if->msk_cdata.msk_rx_ring_map &&
2401		    sc_if->msk_rdata.msk_rx_ring)
2402			bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2403			    sc_if->msk_rdata.msk_rx_ring,
2404			    sc_if->msk_cdata.msk_rx_ring_map);
2405		sc_if->msk_rdata.msk_rx_ring = NULL;
2406		sc_if->msk_cdata.msk_rx_ring_map = NULL;
2407		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2408		sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2409	}
2410	/* Jumbo Rx ring. */
2411	if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2412		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2413			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2414			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2415		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2416		    sc_if->msk_rdata.msk_jumbo_rx_ring)
2417			bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2418			    sc_if->msk_rdata.msk_jumbo_rx_ring,
2419			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2420		sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2421		sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2422		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2423		sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2424	}
2425	/* Tx buffers. */
2426	if (sc_if->msk_cdata.msk_tx_tag) {
2427		for (i = 0; i < MSK_TX_RING_CNT; i++) {
2428			txd = &sc_if->msk_cdata.msk_txdesc[i];
2429			if (txd->tx_dmamap) {
2430				bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2431				    txd->tx_dmamap);
2432				txd->tx_dmamap = NULL;
2433			}
2434		}
2435		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2436		sc_if->msk_cdata.msk_tx_tag = NULL;
2437	}
2438	/* Rx buffers. */
2439	if (sc_if->msk_cdata.msk_rx_tag) {
2440		for (i = 0; i < MSK_RX_RING_CNT; i++) {
2441			rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2442			if (rxd->rx_dmamap) {
2443				bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2444				    rxd->rx_dmamap);
2445				rxd->rx_dmamap = NULL;
2446			}
2447		}
2448		if (sc_if->msk_cdata.msk_rx_sparemap) {
2449			bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2450			    sc_if->msk_cdata.msk_rx_sparemap);
2451			sc_if->msk_cdata.msk_rx_sparemap = 0;
2452		}
2453		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2454		sc_if->msk_cdata.msk_rx_tag = NULL;
2455	}
2456	/* Jumbo Rx buffers. */
2457	if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2458		for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2459			jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2460			if (jrxd->rx_dmamap) {
2461				bus_dmamap_destroy(
2462				    sc_if->msk_cdata.msk_jumbo_rx_tag,
2463				    jrxd->rx_dmamap);
2464				jrxd->rx_dmamap = NULL;
2465			}
2466		}
2467		if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2468			bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2469			    sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2470			sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2471		}
2472		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2473		sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2474	}
2475
2476	if (sc_if->msk_cdata.msk_parent_tag) {
2477		bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2478		sc_if->msk_cdata.msk_parent_tag = NULL;
2479	}
2480	mtx_destroy(&sc_if->msk_jlist_mtx);
2481}
2482
2483/*
2484 * Allocate a jumbo buffer.
2485 */
2486static void *
2487msk_jalloc(struct msk_if_softc *sc_if)
2488{
2489	struct msk_jpool_entry *entry;
2490
2491	MSK_JLIST_LOCK(sc_if);
2492
2493	entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2494
2495	if (entry == NULL) {
2496		MSK_JLIST_UNLOCK(sc_if);
2497		return (NULL);
2498	}
2499
2500	SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2501	SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2502
2503	MSK_JLIST_UNLOCK(sc_if);
2504
2505	return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2506}
2507
2508/*
2509 * Release a jumbo buffer.
2510 */
2511static void
2512msk_jfree(void *buf, void *args)
2513{
2514	struct msk_if_softc *sc_if;
2515	struct msk_jpool_entry *entry;
2516	int i;
2517
2518	/* Extract the softc struct pointer. */
2519	sc_if = (struct msk_if_softc *)args;
2520	KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2521
2522	MSK_JLIST_LOCK(sc_if);
2523	/* Calculate the slot this buffer belongs to. */
2524	i = ((vm_offset_t)buf
2525	     - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2526	KASSERT(i >= 0 && i < MSK_JSLOTS,
2527	    ("%s: asked to free buffer that we don't manage!", __func__));
2528
2529	entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2530	KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2531	entry->slot = i;
2532	SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2533	SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2534	if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2535		wakeup(sc_if);
2536
2537	MSK_JLIST_UNLOCK(sc_if);
2538}
2539
2540/*
2541 * It's copy of ath_defrag(ath(4)).
2542 *
2543 * Defragment an mbuf chain, returning at most maxfrags separate
2544 * mbufs+clusters.  If this is not possible NULL is returned and
2545 * the original mbuf chain is left in it's present (potentially
2546 * modified) state.  We use two techniques: collapsing consecutive
2547 * mbufs and replacing consecutive mbufs by a cluster.
2548 */
2549static struct mbuf *
2550msk_defrag(struct mbuf *m0, int how, int maxfrags)
2551{
2552	struct mbuf *m, *n, *n2, **prev;
2553	u_int curfrags;
2554
2555	/*
2556	 * Calculate the current number of frags.
2557	 */
2558	curfrags = 0;
2559	for (m = m0; m != NULL; m = m->m_next)
2560		curfrags++;
2561	/*
2562	 * First, try to collapse mbufs.  Note that we always collapse
2563	 * towards the front so we don't need to deal with moving the
2564	 * pkthdr.  This may be suboptimal if the first mbuf has much
2565	 * less data than the following.
2566	 */
2567	m = m0;
2568again:
2569	for (;;) {
2570		n = m->m_next;
2571		if (n == NULL)
2572			break;
2573		if ((m->m_flags & M_RDONLY) == 0 &&
2574		    n->m_len < M_TRAILINGSPACE(m)) {
2575			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
2576				n->m_len);
2577			m->m_len += n->m_len;
2578			m->m_next = n->m_next;
2579			m_free(n);
2580			if (--curfrags <= maxfrags)
2581				return (m0);
2582		} else
2583			m = n;
2584	}
2585	KASSERT(maxfrags > 1,
2586		("maxfrags %u, but normal collapse failed", maxfrags));
2587	/*
2588	 * Collapse consecutive mbufs to a cluster.
2589	 */
2590	prev = &m0->m_next;		/* NB: not the first mbuf */
2591	while ((n = *prev) != NULL) {
2592		if ((n2 = n->m_next) != NULL &&
2593		    n->m_len + n2->m_len < MCLBYTES) {
2594			m = m_getcl(how, MT_DATA, 0);
2595			if (m == NULL)
2596				goto bad;
2597			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
2598			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
2599				n2->m_len);
2600			m->m_len = n->m_len + n2->m_len;
2601			m->m_next = n2->m_next;
2602			*prev = m;
2603			m_free(n);
2604			m_free(n2);
2605			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
2606				return m0;
2607			/*
2608			 * Still not there, try the normal collapse
2609			 * again before we allocate another cluster.
2610			 */
2611			goto again;
2612		}
2613		prev = &n->m_next;
2614	}
2615	/*
2616	 * No place where we can collapse to a cluster; punt.
2617	 * This can occur if, for example, you request 2 frags
2618	 * but the packet requires that both be clusters (we
2619	 * never reallocate the first mbuf to avoid moving the
2620	 * packet header).
2621	 */
2622bad:
2623	return (NULL);
2624}
2625
2626static int
2627msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2628{
2629	struct msk_txdesc *txd, *txd_last;
2630	struct msk_tx_desc *tx_le;
2631	struct mbuf *m;
2632	bus_dmamap_t map;
2633	bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2634	uint32_t control, prod, si;
2635	uint16_t offset, tcp_offset, tso_mtu;
2636	int error, i, nseg, tso;
2637
2638	MSK_IF_LOCK_ASSERT(sc_if);
2639
2640	tcp_offset = offset = 0;
2641	m = *m_head;
2642	if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2643		/*
2644		 * Since mbuf has no protocol specific structure information
2645		 * in it we have to inspect protocol information here to
2646		 * setup TSO and checksum offload. I don't know why Marvell
2647		 * made a such decision in chip design because other GigE
2648		 * hardwares normally takes care of all these chores in
2649		 * hardware. However, TSO performance of Yukon II is very
2650		 * good such that it's worth to implement it.
2651		 */
2652		struct ether_vlan_header *evh;
2653		struct ether_header *eh;
2654		struct ip *ip;
2655		struct tcphdr *tcp;
2656
2657		/* TODO check for M_WRITABLE(m) */
2658
2659		offset = sizeof(struct ether_header);
2660		m = m_pullup(m, offset);
2661		if (m == NULL) {
2662			*m_head = NULL;
2663			return (ENOBUFS);
2664		}
2665		eh = mtod(m, struct ether_header *);
2666		/* Check if hardware VLAN insertion is off. */
2667		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2668			offset = sizeof(struct ether_vlan_header);
2669			m = m_pullup(m, offset);
2670			if (m == NULL) {
2671				*m_head = NULL;
2672				return (ENOBUFS);
2673			}
2674			evh = mtod(m, struct ether_vlan_header *);
2675			ip = (struct ip *)(evh + 1);
2676		} else
2677			ip = (struct ip *)(eh + 1);
2678		m = m_pullup(m, offset + sizeof(struct ip));
2679		if (m == NULL) {
2680			*m_head = NULL;
2681			return (ENOBUFS);
2682		}
2683		offset += (ip->ip_hl << 2);
2684		tcp_offset = offset;
2685		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2686			m = m_pullup(m, offset + sizeof(struct tcphdr));
2687			if (m == NULL) {
2688				*m_head = NULL;
2689				return (ENOBUFS);
2690			}
2691			tcp = mtod(m, struct tcphdr *);
2692			offset += (tcp->th_off << 2);
2693		}
2694		*m_head = m;
2695	}
2696
2697	prod = sc_if->msk_cdata.msk_tx_prod;
2698	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2699	txd_last = txd;
2700	map = txd->tx_dmamap;
2701	error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2702	    *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2703	if (error == EFBIG) {
2704		m = msk_defrag(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2705		if (m == NULL) {
2706			m_freem(*m_head);
2707			*m_head = NULL;
2708			return (ENOBUFS);
2709		}
2710		*m_head = m;
2711		error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2712		    map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2713		if (error != 0) {
2714			m_freem(*m_head);
2715			*m_head = NULL;
2716			return (error);
2717		}
2718	} else if (error != 0)
2719		return (error);
2720	if (nseg == 0) {
2721		m_freem(*m_head);
2722		*m_head = NULL;
2723		return (EIO);
2724	}
2725
2726	/* Check number of available descriptors. */
2727	if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2728	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2729		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2730		return (ENOBUFS);
2731	}
2732
2733	control = 0;
2734	tso = 0;
2735	tx_le = NULL;
2736
2737	/* Check TSO support. */
2738	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2739		tso_mtu = offset + m->m_pkthdr.tso_segsz;
2740		if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2741			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2742			tx_le->msk_addr = htole32(tso_mtu);
2743			tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER);
2744			sc_if->msk_cdata.msk_tx_cnt++;
2745			MSK_INC(prod, MSK_TX_RING_CNT);
2746			sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2747		}
2748		tso++;
2749	}
2750	/* Check if we have a VLAN tag to insert. */
2751	if ((m->m_flags & M_VLANTAG) != 0) {
2752		if (tso == 0) {
2753			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2754			tx_le->msk_addr = htole32(0);
2755			tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2756			    htons(m->m_pkthdr.ether_vtag));
2757			sc_if->msk_cdata.msk_tx_cnt++;
2758			MSK_INC(prod, MSK_TX_RING_CNT);
2759		} else {
2760			tx_le->msk_control |= htole32(OP_VLAN |
2761			    htons(m->m_pkthdr.ether_vtag));
2762		}
2763		control |= INS_VLAN;
2764	}
2765	/* Check if we have to handle checksum offload. */
2766	if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2767		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2768		tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2769		    & 0xffff) | ((uint32_t)tcp_offset << 16));
2770		tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2771		control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2772		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2773			control |= UDPTCP;
2774		sc_if->msk_cdata.msk_tx_cnt++;
2775		MSK_INC(prod, MSK_TX_RING_CNT);
2776	}
2777
2778	si = prod;
2779	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2780	tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2781	if (tso == 0)
2782		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2783		    OP_PACKET);
2784	else
2785		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2786		    OP_LARGESEND);
2787	sc_if->msk_cdata.msk_tx_cnt++;
2788	MSK_INC(prod, MSK_TX_RING_CNT);
2789
2790	for (i = 1; i < nseg; i++) {
2791		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2792		tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2793		tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2794		    OP_BUFFER | HW_OWNER);
2795		sc_if->msk_cdata.msk_tx_cnt++;
2796		MSK_INC(prod, MSK_TX_RING_CNT);
2797	}
2798	/* Update producer index. */
2799	sc_if->msk_cdata.msk_tx_prod = prod;
2800
2801	/* Set EOP on the last desciptor. */
2802	prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2803	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2804	tx_le->msk_control |= htole32(EOP);
2805
2806	/* Turn the first descriptor ownership to hardware. */
2807	tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2808	tx_le->msk_control |= htole32(HW_OWNER);
2809
2810	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2811	map = txd_last->tx_dmamap;
2812	txd_last->tx_dmamap = txd->tx_dmamap;
2813	txd->tx_dmamap = map;
2814	txd->tx_m = m;
2815
2816	/* Sync descriptors. */
2817	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2818	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2819	    sc_if->msk_cdata.msk_tx_ring_map,
2820	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2821
2822	return (0);
2823}
2824
2825static void
2826msk_tx_task(void *arg, int pending)
2827{
2828	struct ifnet *ifp;
2829
2830	ifp = arg;
2831	msk_start(ifp);
2832}
2833
2834static void
2835msk_start(struct ifnet *ifp)
2836{
2837        struct msk_if_softc *sc_if;
2838        struct mbuf *m_head;
2839	int enq;
2840
2841	sc_if = ifp->if_softc;
2842
2843	MSK_IF_LOCK(sc_if);
2844
2845	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2846	    IFF_DRV_RUNNING || sc_if->msk_link == 0) {
2847		MSK_IF_UNLOCK(sc_if);
2848		return;
2849	}
2850
2851	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2852	    sc_if->msk_cdata.msk_tx_cnt <
2853	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2854		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2855		if (m_head == NULL)
2856			break;
2857		/*
2858		 * Pack the data into the transmit ring. If we
2859		 * don't have room, set the OACTIVE flag and wait
2860		 * for the NIC to drain the ring.
2861		 */
2862		if (msk_encap(sc_if, &m_head) != 0) {
2863			if (m_head == NULL)
2864				break;
2865			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2866			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2867			break;
2868		}
2869
2870		enq++;
2871		/*
2872		 * If there's a BPF listener, bounce a copy of this frame
2873		 * to him.
2874		 */
2875		BPF_MTAP(ifp, m_head);
2876	}
2877
2878	if (enq > 0) {
2879		/* Transmit */
2880		CSR_WRITE_2(sc_if->msk_softc,
2881		    Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2882		    sc_if->msk_cdata.msk_tx_prod);
2883
2884		/* Set a timeout in case the chip goes out to lunch. */
2885		sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2886	}
2887
2888	MSK_IF_UNLOCK(sc_if);
2889}
2890
2891static void
2892msk_watchdog(struct msk_if_softc *sc_if)
2893{
2894	struct ifnet *ifp;
2895	uint32_t ridx;
2896	int idx;
2897
2898	MSK_IF_LOCK_ASSERT(sc_if);
2899
2900	if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2901		return;
2902	ifp = sc_if->msk_ifp;
2903	if (sc_if->msk_link == 0) {
2904		if (bootverbose)
2905			if_printf(sc_if->msk_ifp, "watchdog timeout "
2906			   "(missed link)\n");
2907		ifp->if_oerrors++;
2908		msk_init_locked(sc_if);
2909		return;
2910	}
2911
2912	/*
2913	 * Reclaim first as there is a possibility of losing Tx completion
2914	 * interrupts.
2915	 */
2916	ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2917	idx = CSR_READ_2(sc_if->msk_softc, ridx);
2918	if (sc_if->msk_cdata.msk_tx_cons != idx) {
2919		msk_txeof(sc_if, idx);
2920		if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2921			if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2922			    "-- recovering\n");
2923			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2924				taskqueue_enqueue(taskqueue_fast,
2925				    &sc_if->msk_tx_task);
2926			return;
2927		}
2928	}
2929
2930	if_printf(ifp, "watchdog timeout\n");
2931	ifp->if_oerrors++;
2932	msk_init_locked(sc_if);
2933	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2934		taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2935}
2936
2937static void
2938mskc_shutdown(device_t dev)
2939{
2940	struct msk_softc *sc;
2941	int i;
2942
2943	sc = device_get_softc(dev);
2944	MSK_LOCK(sc);
2945	for (i = 0; i < sc->msk_num_port; i++) {
2946		if (sc->msk_if[i] != NULL)
2947			msk_stop(sc->msk_if[i]);
2948	}
2949
2950	/* Disable all interrupts. */
2951	CSR_WRITE_4(sc, B0_IMSK, 0);
2952	CSR_READ_4(sc, B0_IMSK);
2953	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2954	CSR_READ_4(sc, B0_HWE_IMSK);
2955
2956	/* Put hardware reset. */
2957	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2958
2959	MSK_UNLOCK(sc);
2960}
2961
2962static int
2963mskc_suspend(device_t dev)
2964{
2965	struct msk_softc *sc;
2966	int i;
2967
2968	sc = device_get_softc(dev);
2969
2970	MSK_LOCK(sc);
2971
2972	for (i = 0; i < sc->msk_num_port; i++) {
2973		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2974		    ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2975		    IFF_DRV_RUNNING) != 0))
2976			msk_stop(sc->msk_if[i]);
2977	}
2978
2979	/* Disable all interrupts. */
2980	CSR_WRITE_4(sc, B0_IMSK, 0);
2981	CSR_READ_4(sc, B0_IMSK);
2982	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2983	CSR_READ_4(sc, B0_HWE_IMSK);
2984
2985	msk_phy_power(sc, MSK_PHY_POWERDOWN);
2986
2987	/* Put hardware reset. */
2988	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2989	sc->msk_suspended = 1;
2990
2991	MSK_UNLOCK(sc);
2992
2993	return (0);
2994}
2995
2996static int
2997mskc_resume(device_t dev)
2998{
2999	struct msk_softc *sc;
3000	int i;
3001
3002	sc = device_get_softc(dev);
3003
3004	MSK_LOCK(sc);
3005
3006	mskc_reset(sc);
3007	for (i = 0; i < sc->msk_num_port; i++) {
3008		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3009		    ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
3010			msk_init_locked(sc->msk_if[i]);
3011	}
3012	sc->msk_suspended = 0;
3013
3014	MSK_UNLOCK(sc);
3015
3016	return (0);
3017}
3018
3019static void
3020msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3021{
3022	struct mbuf *m;
3023	struct ifnet *ifp;
3024	struct msk_rxdesc *rxd;
3025	int cons, rxlen;
3026
3027	ifp = sc_if->msk_ifp;
3028
3029	MSK_IF_LOCK_ASSERT(sc_if);
3030
3031	cons = sc_if->msk_cdata.msk_rx_cons;
3032	do {
3033		rxlen = status >> 16;
3034		if ((status & GMR_FS_VLAN) != 0)
3035			rxlen -= ETHER_VLAN_ENCAP_LEN;
3036		if (len > sc_if->msk_framesize ||
3037		    ((status & GMR_FS_ANY_ERR) != 0) ||
3038		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3039			/* Don't count flow-control packet as errors. */
3040			if ((status & GMR_FS_GOOD_FC) == 0)
3041				ifp->if_ierrors++;
3042			msk_discard_rxbuf(sc_if, cons);
3043			break;
3044		}
3045		rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3046		m = rxd->rx_m;
3047		if (msk_newbuf(sc_if, cons) != 0) {
3048			ifp->if_iqdrops++;
3049			/* Reuse old buffer. */
3050			msk_discard_rxbuf(sc_if, cons);
3051			break;
3052		}
3053		m->m_pkthdr.rcvif = ifp;
3054		m->m_pkthdr.len = m->m_len = len;
3055		ifp->if_ipackets++;
3056		/* Check for VLAN tagged packets. */
3057		if ((status & GMR_FS_VLAN) != 0 &&
3058		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3059			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3060			m->m_flags |= M_VLANTAG;
3061		}
3062		MSK_IF_UNLOCK(sc_if);
3063		(*ifp->if_input)(ifp, m);
3064		MSK_IF_LOCK(sc_if);
3065	} while (0);
3066
3067	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3068	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3069}
3070
3071static void
3072msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3073{
3074	struct mbuf *m;
3075	struct ifnet *ifp;
3076	struct msk_rxdesc *jrxd;
3077	int cons, rxlen;
3078
3079	ifp = sc_if->msk_ifp;
3080
3081	MSK_IF_LOCK_ASSERT(sc_if);
3082
3083	cons = sc_if->msk_cdata.msk_rx_cons;
3084	do {
3085		rxlen = status >> 16;
3086		if ((status & GMR_FS_VLAN) != 0)
3087			rxlen -= ETHER_VLAN_ENCAP_LEN;
3088		if (len > sc_if->msk_framesize ||
3089		    ((status & GMR_FS_ANY_ERR) != 0) ||
3090		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3091			/* Don't count flow-control packet as errors. */
3092			if ((status & GMR_FS_GOOD_FC) == 0)
3093				ifp->if_ierrors++;
3094			msk_discard_jumbo_rxbuf(sc_if, cons);
3095			break;
3096		}
3097		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3098		m = jrxd->rx_m;
3099		if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3100			ifp->if_iqdrops++;
3101			/* Reuse old buffer. */
3102			msk_discard_jumbo_rxbuf(sc_if, cons);
3103			break;
3104		}
3105		m->m_pkthdr.rcvif = ifp;
3106		m->m_pkthdr.len = m->m_len = len;
3107		ifp->if_ipackets++;
3108		/* Check for VLAN tagged packets. */
3109		if ((status & GMR_FS_VLAN) != 0 &&
3110		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3111			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3112			m->m_flags |= M_VLANTAG;
3113		}
3114		MSK_IF_UNLOCK(sc_if);
3115		(*ifp->if_input)(ifp, m);
3116		MSK_IF_LOCK(sc_if);
3117	} while (0);
3118
3119	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3120	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3121}
3122
3123static void
3124msk_txeof(struct msk_if_softc *sc_if, int idx)
3125{
3126	struct msk_txdesc *txd;
3127	struct msk_tx_desc *cur_tx;
3128	struct ifnet *ifp;
3129	uint32_t control;
3130	int cons, prog;
3131
3132	MSK_IF_LOCK_ASSERT(sc_if);
3133
3134	ifp = sc_if->msk_ifp;
3135
3136	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3137	    sc_if->msk_cdata.msk_tx_ring_map,
3138	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3139	/*
3140	 * Go through our tx ring and free mbufs for those
3141	 * frames that have been sent.
3142	 */
3143	cons = sc_if->msk_cdata.msk_tx_cons;
3144	prog = 0;
3145	for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3146		if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3147			break;
3148		prog++;
3149		cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3150		control = le32toh(cur_tx->msk_control);
3151		sc_if->msk_cdata.msk_tx_cnt--;
3152		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3153		if ((control & EOP) == 0)
3154			continue;
3155		txd = &sc_if->msk_cdata.msk_txdesc[cons];
3156		bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3157		    BUS_DMASYNC_POSTWRITE);
3158		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3159
3160		ifp->if_opackets++;
3161		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3162		    __func__));
3163		m_freem(txd->tx_m);
3164		txd->tx_m = NULL;
3165	}
3166
3167	if (prog > 0) {
3168		sc_if->msk_cdata.msk_tx_cons = cons;
3169		if (sc_if->msk_cdata.msk_tx_cnt == 0)
3170			sc_if->msk_watchdog_timer = 0;
3171		/* No need to sync LEs as we didn't update LEs. */
3172	}
3173}
3174
3175static void
3176msk_tick(void *xsc_if)
3177{
3178	struct msk_if_softc *sc_if;
3179	struct mii_data *mii;
3180
3181	sc_if = xsc_if;
3182
3183	MSK_IF_LOCK_ASSERT(sc_if);
3184
3185	mii = device_get_softc(sc_if->msk_miibus);
3186
3187	mii_tick(mii);
3188	msk_watchdog(sc_if);
3189	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3190}
3191
3192static void
3193msk_intr_phy(struct msk_if_softc *sc_if)
3194{
3195	uint16_t status;
3196
3197	if (sc_if->msk_softc->msk_marvell_phy) {
3198		msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3199		status = msk_phy_readreg(sc_if, PHY_ADDR_MARV,
3200		    PHY_MARV_INT_STAT);
3201		/* Handle FIFO Underrun/Overflow? */
3202		if ((status & PHY_M_IS_FIFO_ERROR))
3203			device_printf(sc_if->msk_if_dev,
3204			    "PHY FIFO underrun/overflow.\n");
3205	}
3206}
3207
3208static void
3209msk_intr_gmac(struct msk_if_softc *sc_if)
3210{
3211	struct msk_softc *sc;
3212	uint8_t status;
3213
3214	sc = sc_if->msk_softc;
3215	status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3216
3217	/* GMAC Rx FIFO overrun. */
3218	if ((status & GM_IS_RX_FF_OR) != 0) {
3219		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3220		    GMF_CLI_RX_FO);
3221		device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3222	}
3223	/* GMAC Tx FIFO underrun. */
3224	if ((status & GM_IS_TX_FF_UR) != 0) {
3225		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3226		    GMF_CLI_TX_FU);
3227		device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3228		/*
3229		 * XXX
3230		 * In case of Tx underrun, we may need to flush/reset
3231		 * Tx MAC but that would also require resynchronization
3232		 * with status LEs. Reintializing status LEs would
3233		 * affect other port in dual MAC configuration so it
3234		 * should be avoided as possible as we can.
3235		 * Due to lack of documentation it's all vague guess but
3236		 * it needs more investigation.
3237		 */
3238	}
3239}
3240
3241static void
3242msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3243{
3244	struct msk_softc *sc;
3245
3246	sc = sc_if->msk_softc;
3247	if ((status & Y2_IS_PAR_RD1) != 0) {
3248		device_printf(sc_if->msk_if_dev,
3249		    "RAM buffer read parity error\n");
3250		/* Clear IRQ. */
3251		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3252		    RI_CLR_RD_PERR);
3253	}
3254	if ((status & Y2_IS_PAR_WR1) != 0) {
3255		device_printf(sc_if->msk_if_dev,
3256		    "RAM buffer write parity error\n");
3257		/* Clear IRQ. */
3258		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3259		    RI_CLR_WR_PERR);
3260	}
3261	if ((status & Y2_IS_PAR_MAC1) != 0) {
3262		device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3263		/* Clear IRQ. */
3264		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3265		    GMF_CLI_TX_PE);
3266	}
3267	if ((status & Y2_IS_PAR_RX1) != 0) {
3268		device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3269		/* Clear IRQ. */
3270		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3271	}
3272	if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3273		device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3274		/* Clear IRQ. */
3275		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3276	}
3277}
3278
3279static void
3280msk_intr_hwerr(struct msk_softc *sc)
3281{
3282	uint32_t status;
3283	uint32_t tlphead[4];
3284
3285	status = CSR_READ_4(sc, B0_HWE_ISRC);
3286	/* Time Stamp timer overflow. */
3287	if ((status & Y2_IS_TIST_OV) != 0)
3288		CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3289	if ((status & Y2_IS_PCI_NEXP) != 0) {
3290		/*
3291		 * PCI Express Error occured which is not described in PEX
3292		 * spec.
3293		 * This error is also mapped either to Master Abort(
3294		 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3295		 * can only be cleared there.
3296                 */
3297		device_printf(sc->msk_dev,
3298		    "PCI Express protocol violation error\n");
3299	}
3300
3301	if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3302		uint16_t v16;
3303
3304		if ((status & Y2_IS_MST_ERR) != 0)
3305			device_printf(sc->msk_dev,
3306			    "unexpected IRQ Status error\n");
3307		else
3308			device_printf(sc->msk_dev,
3309			    "unexpected IRQ Master error\n");
3310		/* Reset all bits in the PCI status register. */
3311		v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3312		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3313		pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3314		    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3315		    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3316		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3317	}
3318
3319	/* Check for PCI Express Uncorrectable Error. */
3320	if ((status & Y2_IS_PCI_EXP) != 0) {
3321		uint32_t v32;
3322
3323		/*
3324		 * On PCI Express bus bridges are called root complexes (RC).
3325		 * PCI Express errors are recognized by the root complex too,
3326		 * which requests the system to handle the problem. After
3327		 * error occurence it may be that no access to the adapter
3328		 * may be performed any longer.
3329		 */
3330
3331		v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3332		if ((v32 & PEX_UNSUP_REQ) != 0) {
3333			/* Ignore unsupported request error. */
3334			device_printf(sc->msk_dev,
3335			    "Uncorrectable PCI Express error\n");
3336		}
3337		if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3338			int i;
3339
3340			/* Get TLP header form Log Registers. */
3341			for (i = 0; i < 4; i++)
3342				tlphead[i] = CSR_PCI_READ_4(sc,
3343				    PEX_HEADER_LOG + i * 4);
3344			/* Check for vendor defined broadcast message. */
3345			if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3346				sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3347				CSR_WRITE_4(sc, B0_HWE_IMSK,
3348				    sc->msk_intrhwemask);
3349				CSR_READ_4(sc, B0_HWE_IMSK);
3350			}
3351		}
3352		/* Clear the interrupt. */
3353		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3354		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3355		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3356	}
3357
3358	if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3359		msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3360	if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3361		msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3362}
3363
3364static __inline void
3365msk_rxput(struct msk_if_softc *sc_if)
3366{
3367	struct msk_softc *sc;
3368
3369	sc = sc_if->msk_softc;
3370	if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN))
3371		bus_dmamap_sync(
3372		    sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3373		    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3374		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3375	else
3376		bus_dmamap_sync(
3377		    sc_if->msk_cdata.msk_rx_ring_tag,
3378		    sc_if->msk_cdata.msk_rx_ring_map,
3379		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3380	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3381	    PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3382}
3383
3384static int
3385msk_handle_events(struct msk_softc *sc)
3386{
3387	struct msk_if_softc *sc_if;
3388	int rxput[2];
3389	struct msk_stat_desc *sd;
3390	uint32_t control, status;
3391	int cons, idx, len, port, rxprog;
3392
3393	idx = CSR_READ_2(sc, STAT_PUT_IDX);
3394	if (idx == sc->msk_stat_cons)
3395		return (0);
3396
3397	/* Sync status LEs. */
3398	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3399	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3400	/* XXX Sync Rx LEs here. */
3401
3402	rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3403
3404	rxprog = 0;
3405	for (cons = sc->msk_stat_cons; cons != idx;) {
3406		sd = &sc->msk_stat_ring[cons];
3407		control = le32toh(sd->msk_control);
3408		if ((control & HW_OWNER) == 0)
3409			break;
3410		/*
3411		 * Marvell's FreeBSD driver updates status LE after clearing
3412		 * HW_OWNER. However we don't have a way to sync single LE
3413		 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3414		 * an entire DMA map. So don't sync LE until we have a better
3415		 * way to sync LEs.
3416		 */
3417		control &= ~HW_OWNER;
3418		sd->msk_control = htole32(control);
3419		status = le32toh(sd->msk_status);
3420		len = control & STLE_LEN_MASK;
3421		port = (control >> 16) & 0x01;
3422		sc_if = sc->msk_if[port];
3423		if (sc_if == NULL) {
3424			device_printf(sc->msk_dev, "invalid port opcode "
3425			    "0x%08x\n", control & STLE_OP_MASK);
3426			continue;
3427		}
3428
3429		switch (control & STLE_OP_MASK) {
3430		case OP_RXVLAN:
3431			sc_if->msk_vtag = ntohs(len);
3432			break;
3433		case OP_RXCHKSVLAN:
3434			sc_if->msk_vtag = ntohs(len);
3435			break;
3436		case OP_RXSTAT:
3437			if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3438				msk_jumbo_rxeof(sc_if, status, len);
3439			else
3440				msk_rxeof(sc_if, status, len);
3441			rxprog++;
3442			/*
3443			 * Because there is no way to sync single Rx LE
3444			 * put the DMA sync operation off until the end of
3445			 * event processing.
3446			 */
3447			rxput[port]++;
3448			/* Update prefetch unit if we've passed water mark. */
3449			if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3450				msk_rxput(sc_if);
3451				rxput[port] = 0;
3452			}
3453			break;
3454		case OP_TXINDEXLE:
3455			if (sc->msk_if[MSK_PORT_A] != NULL)
3456				msk_txeof(sc->msk_if[MSK_PORT_A],
3457				    status & STLE_TXA1_MSKL);
3458			if (sc->msk_if[MSK_PORT_B] != NULL)
3459				msk_txeof(sc->msk_if[MSK_PORT_B],
3460				    ((status & STLE_TXA2_MSKL) >>
3461				    STLE_TXA2_SHIFTL) |
3462				    ((len & STLE_TXA2_MSKH) <<
3463				    STLE_TXA2_SHIFTH));
3464			break;
3465		default:
3466			device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3467			    control & STLE_OP_MASK);
3468			break;
3469		}
3470		MSK_INC(cons, MSK_STAT_RING_CNT);
3471		if (rxprog > sc->msk_process_limit)
3472			break;
3473	}
3474
3475	sc->msk_stat_cons = cons;
3476	/* XXX We should sync status LEs here. See above notes. */
3477
3478	if (rxput[MSK_PORT_A] > 0)
3479		msk_rxput(sc->msk_if[MSK_PORT_A]);
3480	if (rxput[MSK_PORT_B] > 0)
3481		msk_rxput(sc->msk_if[MSK_PORT_B]);
3482
3483	return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3484}
3485
3486static void
3487msk_intr(void *xsc)
3488{
3489	struct msk_softc *sc;
3490	uint32_t status;
3491
3492	sc = xsc;
3493	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3494	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3495	if (status == 0 || status == 0xffffffff) {
3496		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3497		return;
3498	}
3499
3500	taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3501}
3502
3503static void
3504msk_int_task(void *arg, int pending)
3505{
3506	struct msk_softc *sc;
3507	struct msk_if_softc *sc_if0, *sc_if1;
3508	struct ifnet *ifp0, *ifp1;
3509	uint32_t status;
3510	int domore;
3511
3512	sc = arg;
3513	MSK_LOCK(sc);
3514
3515	/* Get interrupt source. */
3516	status = CSR_READ_4(sc, B0_ISRC);
3517	if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3518	    (status & sc->msk_intrmask) == 0)
3519		goto done;
3520
3521	sc_if0 = sc->msk_if[MSK_PORT_A];
3522	sc_if1 = sc->msk_if[MSK_PORT_B];
3523	ifp0 = ifp1 = NULL;
3524	if (sc_if0 != NULL)
3525		ifp0 = sc_if0->msk_ifp;
3526	if (sc_if1 != NULL)
3527		ifp1 = sc_if1->msk_ifp;
3528
3529	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3530		msk_intr_phy(sc_if0);
3531	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3532		msk_intr_phy(sc_if1);
3533	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3534		msk_intr_gmac(sc_if0);
3535	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3536		msk_intr_gmac(sc_if1);
3537	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3538		device_printf(sc->msk_dev, "Rx descriptor error\n");
3539		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3540		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3541		CSR_READ_4(sc, B0_IMSK);
3542	}
3543        if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3544		device_printf(sc->msk_dev, "Tx descriptor error\n");
3545		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3546		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3547		CSR_READ_4(sc, B0_IMSK);
3548	}
3549	if ((status & Y2_IS_HW_ERR) != 0)
3550		msk_intr_hwerr(sc);
3551
3552	domore = msk_handle_events(sc);
3553	if ((status & Y2_IS_STAT_BMU) != 0)
3554		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3555
3556	if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3557	    !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3558		taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3559	if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3560	    !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3561		taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3562
3563	if (domore > 0) {
3564		taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3565		MSK_UNLOCK(sc);
3566		return;
3567	}
3568done:
3569	MSK_UNLOCK(sc);
3570
3571	/* Reenable interrupts. */
3572	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3573}
3574
3575static void
3576msk_init(void *xsc)
3577{
3578	struct msk_if_softc *sc_if = xsc;
3579
3580	MSK_IF_LOCK(sc_if);
3581	msk_init_locked(sc_if);
3582	MSK_IF_UNLOCK(sc_if);
3583}
3584
3585static void
3586msk_init_locked(struct msk_if_softc *sc_if)
3587{
3588	struct msk_softc *sc;
3589	struct ifnet *ifp;
3590	struct mii_data	 *mii;
3591	uint16_t eaddr[ETHER_ADDR_LEN / 2];
3592	uint16_t gmac;
3593	int error, i;
3594
3595	MSK_IF_LOCK_ASSERT(sc_if);
3596
3597	ifp = sc_if->msk_ifp;
3598	sc = sc_if->msk_softc;
3599	mii = device_get_softc(sc_if->msk_miibus);
3600
3601	error = 0;
3602	/* Cancel pending I/O and free all Rx/Tx buffers. */
3603	msk_stop(sc_if);
3604
3605	sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN +
3606	    ETHER_VLAN_ENCAP_LEN;
3607
3608	/*
3609	 * Initialize GMAC first.
3610	 * Without this initialization, Rx MAC did not work as expected
3611	 * and Rx MAC garbled status LEs and it resulted in out-of-order
3612	 * or duplicated frame delivery which in turn showed very poor
3613	 * Rx performance.(I had to write a packet analysis code that
3614	 * could be embeded in driver to diagnose this issue.)
3615	 * I've spent almost 2 months to fix this issue. If I have had
3616	 * datasheet for Yukon II I wouldn't have encountered this. :-(
3617	 */
3618	gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3619	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3620
3621	/* Dummy read the Interrupt Source Register. */
3622	CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3623
3624	/* Set MIB Clear Counter Mode. */
3625	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3626	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3627	/* Read all MIB Counters with Clear Mode set. */
3628	for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3629		GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3630	/* Clear MIB Clear Counter Mode. */
3631	gmac &= ~GM_PAR_MIB_CLR;
3632	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3633
3634	/* Disable FCS. */
3635	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3636
3637	/* Setup Transmit Control Register. */
3638	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3639
3640	/* Setup Transmit Flow Control Register. */
3641	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3642
3643	/* Setup Transmit Parameter Register. */
3644	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3645	    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3646	    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3647
3648	gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3649	    GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3650
3651	if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3652		gmac |= GM_SMOD_JUMBO_ENA;
3653	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3654
3655	/* Set station address. */
3656        bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3657        for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3658		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3659		    eaddr[i]);
3660        for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3661		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3662		    eaddr[i]);
3663
3664	/* Disable interrupts for counter overflows. */
3665	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3666	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3667	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3668
3669	/* Configure Rx MAC FIFO. */
3670	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3671	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3672	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3673	    GMF_OPER_ON | GMF_RX_F_FL_ON);
3674
3675	/* Set promiscuous mode. */
3676	msk_setpromisc(sc_if);
3677
3678	/* Set multicast filter. */
3679	msk_setmulti(sc_if);
3680
3681	/* Flush Rx MAC FIFO on any flow control or error. */
3682	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3683	    GMR_FS_ANY_ERR);
3684
3685	/* Set Rx FIFO flush threshold to 64 bytes. */
3686	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3687	    RX_GMF_FL_THR_DEF);
3688
3689	/* Configure Tx MAC FIFO. */
3690	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3691	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3692	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3693
3694	/* Configure hardware VLAN tag insertion/stripping. */
3695	msk_setvlan(sc_if, ifp);
3696
3697	/* XXX It seems STFW is requried for all cases. */
3698	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA);
3699
3700	if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3701		/* Set Rx Pause threshould. */
3702		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3703		    MSK_ECU_LLPP);
3704		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3705		    MSK_ECU_ULPP);
3706		if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) {
3707			/*
3708			 * Can't sure the following code is needed as Yukon
3709			 * Yukon EC Ultra may not support jumbo frames.
3710			 *
3711			 * Set Tx GMAC FIFO Almost Empty Threshold.
3712			 */
3713			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3714			    MSK_ECU_AE_THR);
3715			/* Disable Store & Forward mode for Tx. */
3716			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3717			    TX_STFW_DIS);
3718		}
3719	}
3720
3721	/*
3722	 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3723	 * arbiter as we don't use Sync Tx queue.
3724	 */
3725	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3726	    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3727	/* Enable the RAM Interface Arbiter. */
3728	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3729
3730	/* Setup RAM buffer. */
3731	msk_set_rambuffer(sc_if);
3732
3733	/* Disable Tx sync Queue. */
3734	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3735
3736	/* Setup Tx Queue Bus Memory Interface. */
3737	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3738	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3739	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3740	/* Increase IPID when hardware generates IP packets in TSO. */
3741	if ((ifp->if_hwassist & CSUM_TSO) != 0)
3742		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3743		    BMU_TX_IPIDINCR_ON);
3744	else
3745		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3746		    BMU_TX_IPIDINCR_OFF);
3747	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3748	if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3749	    sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3750		/* Fix for Yukon-EC Ultra: set BMU FIFO level */
3751		CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3752	}
3753
3754	/* Setup Rx Queue Bus Memory Interface. */
3755	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3756	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3757	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3758	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3759        if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3760	    sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3761		/* MAC Rx RAM Read is controlled by hardware. */
3762                CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3763	}
3764
3765	msk_set_prefetch(sc, sc_if->msk_txq,
3766	    sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3767	msk_init_tx_ring(sc_if);
3768
3769	/* Disable Rx checksum offload and RSS hash. */
3770	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3771	    BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3772	if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3773		msk_set_prefetch(sc, sc_if->msk_rxq,
3774		    sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3775		    MSK_JUMBO_RX_RING_CNT - 1);
3776		error = msk_init_jumbo_rx_ring(sc_if);
3777	 } else {
3778		msk_set_prefetch(sc, sc_if->msk_rxq,
3779		    sc_if->msk_rdata.msk_rx_ring_paddr,
3780		    MSK_RX_RING_CNT - 1);
3781		error = msk_init_rx_ring(sc_if);
3782	}
3783	if (error != 0) {
3784		device_printf(sc_if->msk_if_dev,
3785		    "initialization failed: no memory for Rx buffers\n");
3786		msk_stop(sc_if);
3787		return;
3788	}
3789
3790	/* Configure interrupt handling. */
3791	if (sc_if->msk_port == MSK_PORT_A) {
3792		sc->msk_intrmask |= Y2_IS_PORT_A;
3793		sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3794	} else {
3795		sc->msk_intrmask |= Y2_IS_PORT_B;
3796		sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3797	}
3798	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3799	CSR_READ_4(sc, B0_HWE_IMSK);
3800	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3801	CSR_READ_4(sc, B0_IMSK);
3802
3803	sc_if->msk_link = 0;
3804	mii_mediachg(mii);
3805
3806	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3807	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3808
3809	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3810}
3811
3812static void
3813msk_set_rambuffer(struct msk_if_softc *sc_if)
3814{
3815	struct msk_softc *sc;
3816	int ltpp, utpp;
3817
3818	sc = sc_if->msk_softc;
3819
3820	/* Setup Rx Queue. */
3821	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3822	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3823	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3824	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3825	    sc->msk_rxqend[sc_if->msk_port] / 8);
3826	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3827	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3828	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3829	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3830
3831	utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3832	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3833	ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3834	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3835	if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3836		ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3837	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3838	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3839	/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3840
3841	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3842	CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3843
3844	/* Setup Tx Queue. */
3845	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3846	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3847	    sc->msk_txqstart[sc_if->msk_port] / 8);
3848	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3849	    sc->msk_txqend[sc_if->msk_port] / 8);
3850	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3851	    sc->msk_txqstart[sc_if->msk_port] / 8);
3852	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3853	    sc->msk_txqstart[sc_if->msk_port] / 8);
3854	/* Enable Store & Forward for Tx side. */
3855	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3856	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3857	CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3858}
3859
3860static void
3861msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3862    uint32_t count)
3863{
3864
3865	/* Reset the prefetch unit. */
3866	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3867	    PREF_UNIT_RST_SET);
3868	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3869	    PREF_UNIT_RST_CLR);
3870	/* Set LE base address. */
3871	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3872	    MSK_ADDR_LO(addr));
3873	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3874	    MSK_ADDR_HI(addr));
3875	/* Set the list last index. */
3876	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3877	    count);
3878	/* Turn on prefetch unit. */
3879	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3880	    PREF_UNIT_OP_ON);
3881	/* Dummy read to ensure write. */
3882	CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3883}
3884
3885static void
3886msk_stop(struct msk_if_softc *sc_if)
3887{
3888	struct msk_softc *sc;
3889	struct msk_txdesc *txd;
3890	struct msk_rxdesc *rxd;
3891	struct msk_rxdesc *jrxd;
3892	struct ifnet *ifp;
3893	uint32_t val;
3894	int i;
3895
3896	MSK_IF_LOCK_ASSERT(sc_if);
3897	sc = sc_if->msk_softc;
3898	ifp = sc_if->msk_ifp;
3899
3900	callout_stop(&sc_if->msk_tick_ch);
3901	sc_if->msk_watchdog_timer = 0;
3902
3903	/* Disable interrupts. */
3904	if (sc_if->msk_port == MSK_PORT_A) {
3905		sc->msk_intrmask &= ~Y2_IS_PORT_A;
3906		sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3907	} else {
3908		sc->msk_intrmask &= ~Y2_IS_PORT_B;
3909		sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3910	}
3911	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3912	CSR_READ_4(sc, B0_HWE_IMSK);
3913	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3914	CSR_READ_4(sc, B0_IMSK);
3915
3916	/* Disable Tx/Rx MAC. */
3917	val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3918	val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3919	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3920	/* Read again to ensure writing. */
3921	GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3922
3923	/* Stop Tx BMU. */
3924	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3925	val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3926	for (i = 0; i < MSK_TIMEOUT; i++) {
3927		if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3928			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3929			    BMU_STOP);
3930			CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3931		} else
3932			break;
3933		DELAY(1);
3934	}
3935	if (i == MSK_TIMEOUT)
3936		device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3937	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3938	    RB_RST_SET | RB_DIS_OP_MD);
3939
3940	/* Disable all GMAC interrupt. */
3941	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3942	/* Disable PHY interrupt. */
3943	if (sc->msk_marvell_phy)
3944		msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3945
3946	/* Disable the RAM Interface Arbiter. */
3947	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3948
3949	/* Reset the PCI FIFO of the async Tx queue */
3950	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3951	    BMU_RST_SET | BMU_FIFO_RST);
3952
3953	/* Reset the Tx prefetch units. */
3954	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3955	    PREF_UNIT_RST_SET);
3956
3957	/* Reset the RAM Buffer async Tx queue. */
3958	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3959
3960	/* Reset Tx MAC FIFO. */
3961	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3962	/* Set Pause Off. */
3963	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3964
3965	/*
3966	 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3967	 * reach the end of packet and since we can't make sure that we have
3968	 * incoming data, we must reset the BMU while it is not during a DMA
3969	 * transfer. Since it is possible that the Rx path is still active,
3970	 * the Rx RAM buffer will be stopped first, so any possible incoming
3971	 * data will not trigger a DMA. After the RAM buffer is stopped, the
3972	 * BMU is polled until any DMA in progress is ended and only then it
3973	 * will be reset.
3974	 */
3975
3976	/* Disable the RAM Buffer receive queue. */
3977	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3978	for (i = 0; i < MSK_TIMEOUT; i++) {
3979		if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3980		    CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3981			break;
3982		DELAY(1);
3983	}
3984	if (i == MSK_TIMEOUT)
3985		device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3986	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3987	    BMU_RST_SET | BMU_FIFO_RST);
3988	/* Reset the Rx prefetch unit. */
3989	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3990	    PREF_UNIT_RST_SET);
3991	/* Reset the RAM Buffer receive queue. */
3992	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3993	/* Reset Rx MAC FIFO. */
3994	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3995
3996	/* Free Rx and Tx mbufs still in the queues. */
3997	for (i = 0; i < MSK_RX_RING_CNT; i++) {
3998		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3999		if (rxd->rx_m != NULL) {
4000			bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4001			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4002			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4003			    rxd->rx_dmamap);
4004			m_freem(rxd->rx_m);
4005			rxd->rx_m = NULL;
4006		}
4007	}
4008	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4009		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4010		if (jrxd->rx_m != NULL) {
4011			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4012			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4013			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4014			    jrxd->rx_dmamap);
4015			m_freem(jrxd->rx_m);
4016			jrxd->rx_m = NULL;
4017		}
4018	}
4019	for (i = 0; i < MSK_TX_RING_CNT; i++) {
4020		txd = &sc_if->msk_cdata.msk_txdesc[i];
4021		if (txd->tx_m != NULL) {
4022			bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4023			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4024			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4025			    txd->tx_dmamap);
4026			m_freem(txd->tx_m);
4027			txd->tx_m = NULL;
4028		}
4029	}
4030
4031	/*
4032	 * Mark the interface down.
4033	 */
4034	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4035	sc_if->msk_link = 0;
4036}
4037
4038static int
4039sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4040{
4041	int error, value;
4042
4043	if (!arg1)
4044		return (EINVAL);
4045	value = *(int *)arg1;
4046	error = sysctl_handle_int(oidp, &value, 0, req);
4047	if (error || !req->newptr)
4048		return (error);
4049	if (value < low || value > high)
4050		return (EINVAL);
4051	*(int *)arg1 = value;
4052
4053	return (0);
4054}
4055
4056static int
4057sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4058{
4059
4060	return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4061	    MSK_PROC_MAX));
4062}
4063