if_msk.c revision 166737
1/******************************************************************************
2 *
3 * Name   : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date   : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 *	LICENSE:
14 *	Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 *	The computer program files contained in this folder ("Files")
17 *	are provided to you under the BSD-type license terms provided
18 *	below, and any use of such Files and any derivative works
19 *	thereof created by you shall be governed by the following terms
20 *	and conditions:
21 *
22 *	- Redistributions of source code must retain the above copyright
23 *	  notice, this list of conditions and the following disclaimer.
24 *	- Redistributions in binary form must reproduce the above
25 *	  copyright notice, this list of conditions and the following
26 *	  disclaimer in the documentation and/or other materials provided
27 *	  with the distribution.
28 *	- Neither the name of Marvell nor the names of its contributors
29 *	  may be used to endorse or promote products derived from this
30 *	  software without specific prior written permission.
31 *
32 *	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 *	"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 *	LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 *	FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 *	COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 *	INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 *	BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
39 *	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 *	HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 *	STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 *	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 *	OF THE POSSIBILITY OF SUCH DAMAGE.
44 *	/LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 *    notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 *    notice, this list of conditions and the following disclaimer in the
59 *    documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 *    must display the following acknowledgement:
62 *	This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 *    may be used to endorse or promote products derived from this software
65 *    without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
102__FBSDID("$FreeBSD: head/sys/dev/msk/if_msk.c 166737 2007-02-15 06:21:34Z yongari $");
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116#include <sys/taskqueue.h>
117
118#include <net/bpf.h>
119#include <net/ethernet.h>
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/if_dl.h>
123#include <net/if_media.h>
124#include <net/if_types.h>
125#include <net/if_vlan_var.h>
126
127#include <netinet/in.h>
128#include <netinet/in_systm.h>
129#include <netinet/ip.h>
130#include <netinet/tcp.h>
131#include <netinet/udp.h>
132
133#include <machine/bus.h>
134#include <machine/resource.h>
135#include <sys/rman.h>
136
137#include <dev/mii/mii.h>
138#include <dev/mii/miivar.h>
139#include <dev/mii/brgphyreg.h>
140
141#include <dev/pci/pcireg.h>
142#include <dev/pci/pcivar.h>
143
144#include <dev/msk/if_mskreg.h>
145
146MODULE_DEPEND(msk, pci, 1, 1, 1);
147MODULE_DEPEND(msk, ether, 1, 1, 1);
148MODULE_DEPEND(msk, miibus, 1, 1, 1);
149
150/* "device miibus" required.  See GENERIC if you get errors here. */
151#include "miibus_if.h"
152
153/* Tunables. */
154static int msi_disable = 0;
155TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
156
157#define MSK_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
158
159/*
160 * Devices supported by this driver.
161 */
162static struct msk_product {
163	uint16_t	msk_vendorid;
164	uint16_t	msk_deviceid;
165	const char	*msk_name;
166} msk_products[] = {
167	{ VENDORID_SK, DEVICEID_SK_YUKON2,
168	    "SK-9Sxx Gigabit Ethernet" },
169	{ VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
170	    "SK-9Exx Gigabit Ethernet"},
171	{ VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
172	    "Marvell Yukon 88E8021CU Gigabit Ethernet" },
173	{ VENDORID_MARVELL, DEVICEID_MRVL_8021X,
174	    "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
175	{ VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
176	    "Marvell Yukon 88E8022CU Gigabit Ethernet" },
177	{ VENDORID_MARVELL, DEVICEID_MRVL_8022X,
178	    "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
179	{ VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
180	    "Marvell Yukon 88E8061CU Gigabit Ethernet" },
181	{ VENDORID_MARVELL, DEVICEID_MRVL_8061X,
182	    "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
183	{ VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
184	    "Marvell Yukon 88E8062CU Gigabit Ethernet" },
185	{ VENDORID_MARVELL, DEVICEID_MRVL_8062X,
186	    "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
187	{ VENDORID_MARVELL, DEVICEID_MRVL_8035,
188	    "Marvell Yukon 88E8035 Gigabit Ethernet" },
189	{ VENDORID_MARVELL, DEVICEID_MRVL_8036,
190	    "Marvell Yukon 88E8036 Gigabit Ethernet" },
191	{ VENDORID_MARVELL, DEVICEID_MRVL_8038,
192	    "Marvell Yukon 88E8038 Gigabit Ethernet" },
193	{ VENDORID_MARVELL, DEVICEID_MRVL_4361,
194	    "Marvell Yukon 88E8050 Gigabit Ethernet" },
195	{ VENDORID_MARVELL, DEVICEID_MRVL_4360,
196	    "Marvell Yukon 88E8052 Gigabit Ethernet" },
197	{ VENDORID_MARVELL, DEVICEID_MRVL_4362,
198	    "Marvell Yukon 88E8053 Gigabit Ethernet" },
199	{ VENDORID_MARVELL, DEVICEID_MRVL_4363,
200	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
201	{ VENDORID_MARVELL, DEVICEID_MRVL_4364,
202	    "Marvell Yukon 88E8056 Gigabit Ethernet" },
203	{ VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
204	    "D-Link 550SX Gigabit Ethernet" },
205	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
206	    "D-Link 560T Gigabit Ethernet" }
207};
208
209static const char *model_name[] = {
210	"Yukon XL",
211        "Yukon EC Ultra",
212        "Yukon Unknown",
213        "Yukon EC",
214        "Yukon FE"
215};
216
217static int mskc_probe(device_t);
218static int mskc_attach(device_t);
219static int mskc_detach(device_t);
220static void mskc_shutdown(device_t);
221static int mskc_setup_rambuffer(struct msk_softc *);
222static int mskc_suspend(device_t);
223static int mskc_resume(device_t);
224static void mskc_reset(struct msk_softc *);
225
226static int msk_probe(device_t);
227static int msk_attach(device_t);
228static int msk_detach(device_t);
229
230static void msk_tick(void *);
231static void msk_intr(void *);
232static void msk_int_task(void *, int);
233static void msk_intr_phy(struct msk_if_softc *);
234static void msk_intr_gmac(struct msk_if_softc *);
235static __inline void msk_rxput(struct msk_if_softc *);
236static int msk_handle_events(struct msk_softc *);
237static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
238static void msk_intr_hwerr(struct msk_softc *);
239static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
240static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
241static void msk_txeof(struct msk_if_softc *, int);
242static struct mbuf *msk_defrag(struct mbuf *, int, int);
243static int msk_encap(struct msk_if_softc *, struct mbuf **);
244static void msk_tx_task(void *, int);
245static void msk_start(struct ifnet *);
246static int msk_ioctl(struct ifnet *, u_long, caddr_t);
247static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
248static void msk_set_rambuffer(struct msk_if_softc *);
249static void msk_init(void *);
250static void msk_init_locked(struct msk_if_softc *);
251static void msk_stop(struct msk_if_softc *);
252static void msk_watchdog(struct msk_if_softc *);
253static int msk_mediachange(struct ifnet *);
254static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
255static void msk_phy_power(struct msk_softc *, int);
256static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
257static int msk_status_dma_alloc(struct msk_softc *);
258static void msk_status_dma_free(struct msk_softc *);
259static int msk_txrx_dma_alloc(struct msk_if_softc *);
260static void msk_txrx_dma_free(struct msk_if_softc *);
261static void *msk_jalloc(struct msk_if_softc *);
262static void msk_jfree(void *, void *);
263static int msk_init_rx_ring(struct msk_if_softc *);
264static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
265static void msk_init_tx_ring(struct msk_if_softc *);
266static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
267static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
268static int msk_newbuf(struct msk_if_softc *, int);
269static int msk_jumbo_newbuf(struct msk_if_softc *, int);
270
271static int msk_phy_readreg(struct msk_if_softc *, int, int);
272static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
273static int msk_miibus_readreg(device_t, int, int);
274static int msk_miibus_writereg(device_t, int, int, int);
275static void msk_miibus_statchg(device_t);
276static void msk_link_task(void *, int);
277
278static void msk_setmulti(struct msk_if_softc *);
279static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
280static void msk_setpromisc(struct msk_if_softc *);
281
282static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
283static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
284
285static device_method_t mskc_methods[] = {
286	/* Device interface */
287	DEVMETHOD(device_probe,		mskc_probe),
288	DEVMETHOD(device_attach,	mskc_attach),
289	DEVMETHOD(device_detach,	mskc_detach),
290	DEVMETHOD(device_suspend,	mskc_suspend),
291	DEVMETHOD(device_resume,	mskc_resume),
292	DEVMETHOD(device_shutdown,	mskc_shutdown),
293
294	/* bus interface */
295	DEVMETHOD(bus_print_child,	bus_generic_print_child),
296	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
297
298	{ NULL, NULL }
299};
300
301static driver_t mskc_driver = {
302	"mskc",
303	mskc_methods,
304	sizeof(struct msk_softc)
305};
306
307static devclass_t mskc_devclass;
308
309static device_method_t msk_methods[] = {
310	/* Device interface */
311	DEVMETHOD(device_probe,		msk_probe),
312	DEVMETHOD(device_attach,	msk_attach),
313	DEVMETHOD(device_detach,	msk_detach),
314	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
315
316	/* bus interface */
317	DEVMETHOD(bus_print_child,	bus_generic_print_child),
318	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
319
320	/* MII interface */
321	DEVMETHOD(miibus_readreg,	msk_miibus_readreg),
322	DEVMETHOD(miibus_writereg,	msk_miibus_writereg),
323	DEVMETHOD(miibus_statchg,	msk_miibus_statchg),
324
325	{ NULL, NULL }
326};
327
328static driver_t msk_driver = {
329	"msk",
330	msk_methods,
331	sizeof(struct msk_if_softc)
332};
333
334static devclass_t msk_devclass;
335
336DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
337DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
338DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
339
340static struct resource_spec msk_res_spec_io[] = {
341	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
342	{ -1,			0,		0 }
343};
344
345static struct resource_spec msk_res_spec_mem[] = {
346	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
347	{ -1,			0,		0 }
348};
349
350static struct resource_spec msk_irq_spec_legacy[] = {
351	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
352	{ -1,			0,		0 }
353};
354
355static struct resource_spec msk_irq_spec_msi[] = {
356	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
357	{ SYS_RES_IRQ,		2,		RF_ACTIVE },
358	{ -1,			0,		0 }
359};
360
361static int
362msk_miibus_readreg(device_t dev, int phy, int reg)
363{
364	struct msk_if_softc *sc_if;
365
366	sc_if = device_get_softc(dev);
367
368	return (msk_phy_readreg(sc_if, phy, reg));
369}
370
371static int
372msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
373{
374	struct msk_softc *sc;
375	int i, val;
376
377	sc = sc_if->msk_softc;
378
379        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
380	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
381
382	for (i = 0; i < MSK_TIMEOUT; i++) {
383		DELAY(1);
384		val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
385		if ((val & GM_SMI_CT_RD_VAL) != 0) {
386			val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
387			break;
388		}
389	}
390
391	if (i == MSK_TIMEOUT) {
392		if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
393		val = 0;
394	}
395
396	return (val);
397}
398
399static int
400msk_miibus_writereg(device_t dev, int phy, int reg, int val)
401{
402	struct msk_if_softc *sc_if;
403
404	sc_if = device_get_softc(dev);
405
406	return (msk_phy_writereg(sc_if, phy, reg, val));
407}
408
409static int
410msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
411{
412	struct msk_softc *sc;
413	int i;
414
415	sc = sc_if->msk_softc;
416
417	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
418        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
419	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
420	for (i = 0; i < MSK_TIMEOUT; i++) {
421		DELAY(1);
422		if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
423		    GM_SMI_CT_BUSY) == 0)
424			break;
425	}
426	if (i == MSK_TIMEOUT)
427		if_printf(sc_if->msk_ifp, "phy write timeout\n");
428
429	return (0);
430}
431
432static void
433msk_miibus_statchg(device_t dev)
434{
435	struct msk_if_softc *sc_if;
436
437	sc_if = device_get_softc(dev);
438	taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task);
439}
440
441static void
442msk_link_task(void *arg, int pending)
443{
444	struct msk_softc *sc;
445	struct msk_if_softc *sc_if;
446	struct mii_data *mii;
447	struct ifnet *ifp;
448	uint32_t gmac;
449
450	sc_if = (struct msk_if_softc *)arg;
451	sc = sc_if->msk_softc;
452
453	MSK_IF_LOCK(sc_if);
454
455	mii = device_get_softc(sc_if->msk_miibus);
456	ifp = sc_if->msk_ifp;
457	if (mii == NULL || ifp == NULL) {
458		MSK_IF_UNLOCK(sc_if);
459		return;
460	}
461
462	if (mii->mii_media_status & IFM_ACTIVE) {
463		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
464			sc_if->msk_link = 1;
465	} else
466		sc_if->msk_link = 0;
467
468	if (sc_if->msk_link != 0) {
469		/* Enable Tx FIFO Underrun. */
470		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
471		    GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
472		/*
473		 * Because mii(4) notify msk(4) that it detected link status
474		 * change, there is no need to enable automatic
475		 * speed/flow-control/duplex updates.
476		 */
477		gmac = GM_GPCR_AU_ALL_DIS;
478		switch (IFM_SUBTYPE(mii->mii_media_active)) {
479		case IFM_1000_SX:
480		case IFM_1000_T:
481			gmac |= GM_GPCR_SPEED_1000;
482			break;
483		case IFM_100_TX:
484			gmac |= GM_GPCR_SPEED_100;
485			break;
486		case IFM_10_T:
487			break;
488		}
489
490		if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
491			gmac |= GM_GPCR_DUP_FULL;
492		/* Disable Rx flow control. */
493		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
494			gmac |= GM_GPCR_FC_RX_DIS;
495		/* Disable Tx flow control. */
496		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
497			gmac |= GM_GPCR_FC_TX_DIS;
498		gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
499		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
500		/* Read again to ensure writing. */
501		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
502
503		gmac = GMC_PAUSE_ON;
504		if (((mii->mii_media_active & IFM_GMASK) &
505		    (IFM_FLAG0 | IFM_FLAG1)) == 0)
506			gmac = GMC_PAUSE_OFF;
507		/* Diable pause for 10/100 Mbps in half-duplex mode. */
508		if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
509		    (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
510		    IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
511			gmac = GMC_PAUSE_OFF;
512		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
513
514		/* Enable PHY interrupt for FIFO underrun/overflow. */
515		if (sc->msk_marvell_phy)
516			msk_phy_writereg(sc_if, PHY_ADDR_MARV,
517			    PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
518	} else {
519		/*
520		 * Link state changed to down.
521		 * Disable PHY interrupts.
522		 */
523		if (sc->msk_marvell_phy)
524			msk_phy_writereg(sc_if, PHY_ADDR_MARV,
525			    PHY_MARV_INT_MASK, 0);
526		/* Disable Rx/Tx MAC. */
527		gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
528		gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
529		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
530		/* Read again to ensure writing. */
531		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
532	}
533
534	MSK_IF_UNLOCK(sc_if);
535}
536
537static void
538msk_setmulti(struct msk_if_softc *sc_if)
539{
540	struct msk_softc *sc;
541	struct ifnet *ifp;
542	struct ifmultiaddr *ifma;
543	uint32_t mchash[2];
544	uint32_t crc;
545	uint16_t mode;
546
547	sc = sc_if->msk_softc;
548
549	MSK_IF_LOCK_ASSERT(sc_if);
550
551	ifp = sc_if->msk_ifp;
552
553	bzero(mchash, sizeof(mchash));
554	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
555	mode |= GM_RXCR_UCF_ENA;
556	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
557		if ((ifp->if_flags & IFF_PROMISC) != 0)
558			mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
559		else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
560			mchash[0] = 0xffff;
561			mchash[1] = 0xffff;
562		}
563	} else {
564		IF_ADDR_LOCK(ifp);
565		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
566			if (ifma->ifma_addr->sa_family != AF_LINK)
567				continue;
568			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
569			    ifma->ifma_addr), ETHER_ADDR_LEN);
570			/* Just want the 6 least significant bits. */
571			crc &= 0x3f;
572			/* Set the corresponding bit in the hash table. */
573			mchash[crc >> 5] |= 1 << (crc & 0x1f);
574		}
575		IF_ADDR_UNLOCK(ifp);
576		mode |= GM_RXCR_MCF_ENA;
577	}
578
579	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
580	    mchash[0] & 0xffff);
581	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
582	    (mchash[0] >> 16) & 0xffff);
583	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
584	    mchash[1] & 0xffff);
585	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
586	    (mchash[1] >> 16) & 0xffff);
587	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
588}
589
590static void
591msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
592{
593	struct msk_softc *sc;
594
595	sc = sc_if->msk_softc;
596	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
597		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
598		    RX_VLAN_STRIP_ON);
599		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
600		    TX_VLAN_TAG_ON);
601	} else {
602		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
603		    RX_VLAN_STRIP_OFF);
604		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
605		    TX_VLAN_TAG_OFF);
606	}
607}
608
609static void
610msk_setpromisc(struct msk_if_softc *sc_if)
611{
612	struct msk_softc *sc;
613	struct ifnet *ifp;
614	uint16_t mode;
615
616	MSK_IF_LOCK_ASSERT(sc_if);
617
618	sc = sc_if->msk_softc;
619	ifp = sc_if->msk_ifp;
620
621	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
622	if (ifp->if_flags & IFF_PROMISC)
623		mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
624	else
625		mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
626	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
627}
628
629static int
630msk_init_rx_ring(struct msk_if_softc *sc_if)
631{
632	struct msk_ring_data *rd;
633	struct msk_rxdesc *rxd;
634	int i, prod;
635
636	MSK_IF_LOCK_ASSERT(sc_if);
637
638	sc_if->msk_cdata.msk_rx_cons = 0;
639	sc_if->msk_cdata.msk_rx_prod = 0;
640	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
641
642	rd = &sc_if->msk_rdata;
643	bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
644	prod = sc_if->msk_cdata.msk_rx_prod;
645	for (i = 0; i < MSK_RX_RING_CNT; i++) {
646		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
647		rxd->rx_m = NULL;
648		rxd->rx_le = &rd->msk_rx_ring[prod];
649		if (msk_newbuf(sc_if, prod) != 0)
650			return (ENOBUFS);
651		MSK_INC(prod, MSK_RX_RING_CNT);
652	}
653
654	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
655	    sc_if->msk_cdata.msk_rx_ring_map,
656	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
657
658	/* Update prefetch unit. */
659	sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
660	CSR_WRITE_2(sc_if->msk_softc,
661	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
662	    sc_if->msk_cdata.msk_rx_prod);
663
664	return (0);
665}
666
667static int
668msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
669{
670	struct msk_ring_data *rd;
671	struct msk_rxdesc *rxd;
672	int i, prod;
673
674	MSK_IF_LOCK_ASSERT(sc_if);
675
676	sc_if->msk_cdata.msk_rx_cons = 0;
677	sc_if->msk_cdata.msk_rx_prod = 0;
678	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
679
680	rd = &sc_if->msk_rdata;
681	bzero(rd->msk_jumbo_rx_ring,
682	    sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
683	prod = sc_if->msk_cdata.msk_rx_prod;
684	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
685		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
686		rxd->rx_m = NULL;
687		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
688		if (msk_jumbo_newbuf(sc_if, prod) != 0)
689			return (ENOBUFS);
690		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
691	}
692
693	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
694	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
695	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
696
697	sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
698	CSR_WRITE_2(sc_if->msk_softc,
699	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
700	    sc_if->msk_cdata.msk_rx_prod);
701
702	return (0);
703}
704
705static void
706msk_init_tx_ring(struct msk_if_softc *sc_if)
707{
708	struct msk_ring_data *rd;
709	struct msk_txdesc *txd;
710	int i;
711
712	sc_if->msk_cdata.msk_tso_mtu = 0;
713	sc_if->msk_cdata.msk_tx_prod = 0;
714	sc_if->msk_cdata.msk_tx_cons = 0;
715	sc_if->msk_cdata.msk_tx_cnt = 0;
716
717	rd = &sc_if->msk_rdata;
718	bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
719	for (i = 0; i < MSK_TX_RING_CNT; i++) {
720		txd = &sc_if->msk_cdata.msk_txdesc[i];
721		txd->tx_m = NULL;
722		txd->tx_le = &rd->msk_tx_ring[i];
723	}
724
725	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
726	    sc_if->msk_cdata.msk_tx_ring_map,
727	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
728}
729
730static __inline void
731msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
732{
733	struct msk_rx_desc *rx_le;
734	struct msk_rxdesc *rxd;
735	struct mbuf *m;
736
737	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
738	m = rxd->rx_m;
739	rx_le = rxd->rx_le;
740	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
741}
742
743static __inline void
744msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int	idx)
745{
746	struct msk_rx_desc *rx_le;
747	struct msk_rxdesc *rxd;
748	struct mbuf *m;
749
750	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
751	m = rxd->rx_m;
752	rx_le = rxd->rx_le;
753	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
754}
755
756static int
757msk_newbuf(struct msk_if_softc *sc_if, int idx)
758{
759	struct msk_rx_desc *rx_le;
760	struct msk_rxdesc *rxd;
761	struct mbuf *m;
762	bus_dma_segment_t segs[1];
763	bus_dmamap_t map;
764	int nsegs;
765
766	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
767	if (m == NULL)
768		return (ENOBUFS);
769
770	m->m_len = m->m_pkthdr.len = MCLBYTES;
771	m_adj(m, ETHER_ALIGN);
772
773	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
774	    sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
775	    BUS_DMA_NOWAIT) != 0) {
776		m_freem(m);
777		return (ENOBUFS);
778	}
779	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
780
781	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
782	if (rxd->rx_m != NULL) {
783		bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
784		    BUS_DMASYNC_POSTREAD);
785		bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
786	}
787	map = rxd->rx_dmamap;
788	rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
789	sc_if->msk_cdata.msk_rx_sparemap = map;
790	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
791	    BUS_DMASYNC_PREREAD);
792	rxd->rx_m = m;
793	rx_le = rxd->rx_le;
794	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
795	rx_le->msk_control =
796	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
797
798	return (0);
799}
800
801static int
802msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
803{
804	struct msk_rx_desc *rx_le;
805	struct msk_rxdesc *rxd;
806	struct mbuf *m;
807	bus_dma_segment_t segs[1];
808	bus_dmamap_t map;
809	int nsegs;
810	void *buf;
811
812	MGETHDR(m, M_DONTWAIT, MT_DATA);
813	if (m == NULL)
814		return (ENOBUFS);
815	buf = msk_jalloc(sc_if);
816	if (buf == NULL) {
817		m_freem(m);
818		return (ENOBUFS);
819	}
820	/* Attach the buffer to the mbuf. */
821	MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
822	    EXT_NET_DRV);
823	if ((m->m_flags & M_EXT) == 0) {
824		m_freem(m);
825		return (ENOBUFS);
826	}
827	m->m_pkthdr.len = m->m_len = MSK_JLEN;
828	m_adj(m, ETHER_ALIGN);
829
830	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
831	    sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
832	    BUS_DMA_NOWAIT) != 0) {
833		m_freem(m);
834		return (ENOBUFS);
835	}
836	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
837
838	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
839	if (rxd->rx_m != NULL) {
840		bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
841		    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
842		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
843		    rxd->rx_dmamap);
844	}
845	map = rxd->rx_dmamap;
846	rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
847	sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
848	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
849	    BUS_DMASYNC_PREREAD);
850	rxd->rx_m = m;
851	rx_le = rxd->rx_le;
852	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
853	rx_le->msk_control =
854	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
855
856	return (0);
857}
858
859/*
860 * Set media options.
861 */
862static int
863msk_mediachange(struct ifnet *ifp)
864{
865	struct msk_if_softc *sc_if;
866	struct mii_data	*mii;
867
868	sc_if = ifp->if_softc;
869
870	MSK_IF_LOCK(sc_if);
871	mii = device_get_softc(sc_if->msk_miibus);
872	mii_mediachg(mii);
873	MSK_IF_UNLOCK(sc_if);
874
875	return (0);
876}
877
878/*
879 * Report current media status.
880 */
881static void
882msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
883{
884	struct msk_if_softc *sc_if;
885	struct mii_data	*mii;
886
887	sc_if = ifp->if_softc;
888	MSK_IF_LOCK(sc_if);
889	mii = device_get_softc(sc_if->msk_miibus);
890
891	mii_pollstat(mii);
892	MSK_IF_UNLOCK(sc_if);
893	ifmr->ifm_active = mii->mii_media_active;
894	ifmr->ifm_status = mii->mii_media_status;
895}
896
897static int
898msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
899{
900	struct msk_if_softc *sc_if;
901	struct ifreq *ifr;
902	struct mii_data	*mii;
903	int error, mask;
904
905	sc_if = ifp->if_softc;
906	ifr = (struct ifreq *)data;
907	error = 0;
908
909	switch(command) {
910	case SIOCSIFMTU:
911		if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
912			error = EINVAL;
913			break;
914		}
915		if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
916		    ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
917			error = EINVAL;
918			break;
919		}
920		MSK_IF_LOCK(sc_if);
921		ifp->if_mtu = ifr->ifr_mtu;
922		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
923			msk_init_locked(sc_if);
924		MSK_IF_UNLOCK(sc_if);
925		break;
926	case SIOCSIFFLAGS:
927		MSK_IF_LOCK(sc_if);
928		if ((ifp->if_flags & IFF_UP) != 0) {
929			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
930				if (((ifp->if_flags ^ sc_if->msk_if_flags)
931				    & IFF_PROMISC) != 0) {
932					msk_setpromisc(sc_if);
933					msk_setmulti(sc_if);
934				}
935			} else {
936				if (sc_if->msk_detach == 0)
937					msk_init_locked(sc_if);
938			}
939		} else {
940			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
941				msk_stop(sc_if);
942		}
943		sc_if->msk_if_flags = ifp->if_flags;
944		MSK_IF_UNLOCK(sc_if);
945		break;
946	case SIOCADDMULTI:
947	case SIOCDELMULTI:
948		MSK_IF_LOCK(sc_if);
949		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
950			msk_setmulti(sc_if);
951		MSK_IF_UNLOCK(sc_if);
952		break;
953	case SIOCGIFMEDIA:
954	case SIOCSIFMEDIA:
955		mii = device_get_softc(sc_if->msk_miibus);
956		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
957		break;
958	case SIOCSIFCAP:
959		MSK_IF_LOCK(sc_if);
960		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
961		if ((mask & IFCAP_TXCSUM) != 0) {
962			ifp->if_capenable ^= IFCAP_TXCSUM;
963			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
964			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
965				ifp->if_hwassist |= MSK_CSUM_FEATURES;
966			else
967				ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
968		}
969		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
970			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
971			msk_setvlan(sc_if, ifp);
972		}
973
974		if ((mask & IFCAP_TSO4) != 0) {
975			ifp->if_capenable ^= IFCAP_TSO4;
976			if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
977			    (IFCAP_TSO4 & ifp->if_capabilities) != 0)
978				ifp->if_hwassist |= CSUM_TSO;
979			else
980				ifp->if_hwassist &= ~CSUM_TSO;
981		}
982		VLAN_CAPABILITIES(ifp);
983		MSK_IF_UNLOCK(sc_if);
984		break;
985	default:
986		error = ether_ioctl(ifp, command, data);
987		break;
988	}
989
990	return (error);
991}
992
993static int
994mskc_probe(device_t dev)
995{
996	struct msk_product *mp;
997	uint16_t vendor, devid;
998	int i;
999
1000	vendor = pci_get_vendor(dev);
1001	devid = pci_get_device(dev);
1002	mp = msk_products;
1003	for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1004	    i++, mp++) {
1005		if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1006			device_set_desc(dev, mp->msk_name);
1007			return (BUS_PROBE_DEFAULT);
1008		}
1009	}
1010
1011	return (ENXIO);
1012}
1013
1014static int
1015mskc_setup_rambuffer(struct msk_softc *sc)
1016{
1017	int totqsize, minqsize;
1018	int avail, next;
1019	int i;
1020	uint8_t val;
1021
1022	/* Get adapter SRAM size. */
1023	val = CSR_READ_1(sc, B2_E_0);
1024	sc->msk_ramsize = (val == 0) ? 128 : val * 4;
1025	if (sc->msk_hw_id == CHIP_ID_YUKON_FE)
1026		sc->msk_ramsize = 4 * 4;
1027	if (bootverbose)
1028		device_printf(sc->msk_dev,
1029		    "RAM buffer size : %dKB\n", sc->msk_ramsize);
1030
1031	totqsize = sc->msk_ramsize * sc->msk_num_port;
1032	minqsize = MSK_MIN_RXQ_SIZE + MSK_MIN_TXQ_SIZE;
1033	if (minqsize > sc->msk_ramsize)
1034		minqsize = sc->msk_ramsize;
1035
1036	if (minqsize * sc->msk_num_port > totqsize) {
1037		device_printf(sc->msk_dev,
1038		    "not enough RAM buffer memory : %d/%dKB\n",
1039		    minqsize * sc->msk_num_port, totqsize);
1040		return (ENOSPC);
1041	}
1042
1043	avail = totqsize;
1044	if (sc->msk_num_port > 1) {
1045		/*
1046		 * Divide up the memory evenly so that everyone gets a
1047		 * fair share for dual port adapters.
1048		 */
1049		avail = sc->msk_ramsize;
1050	}
1051
1052	/* Take away the minimum memory for active queues. */
1053	avail -= minqsize;
1054	/* Rx queue gets the minimum + 80% of the rest. */
1055	sc->msk_rxqsize =
1056	    (avail * MSK_RAM_QUOTA_RX) / 100 + MSK_MIN_RXQ_SIZE;
1057	avail -= (sc->msk_rxqsize - MSK_MIN_RXQ_SIZE);
1058	sc->msk_txqsize = avail + MSK_MIN_TXQ_SIZE;
1059
1060	for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1061		sc->msk_rxqstart[i] = next;
1062		sc->msk_rxqend[i] = next + (sc->msk_rxqsize * 1024) - 1;
1063		next = sc->msk_rxqend[i] + 1;
1064		sc->msk_txqstart[i] = next;
1065		sc->msk_txqend[i] = next + (sc->msk_txqsize * 1024) - 1;
1066		next = sc->msk_txqend[i] + 1;
1067		if (bootverbose) {
1068			device_printf(sc->msk_dev,
1069			    "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1070			    sc->msk_rxqsize, sc->msk_rxqstart[i],
1071			    sc->msk_rxqend[i]);
1072			device_printf(sc->msk_dev,
1073			    "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1074			    sc->msk_txqsize, sc->msk_txqstart[i],
1075			    sc->msk_txqend[i]);
1076		}
1077	}
1078
1079	return (0);
1080}
1081
1082static void
1083msk_phy_power(struct msk_softc *sc, int mode)
1084{
1085	uint32_t val;
1086	int i;
1087
1088	switch (mode) {
1089	case MSK_PHY_POWERUP:
1090		/* Switch power to VCC (WA for VAUX problem). */
1091		CSR_WRITE_1(sc, B0_POWER_CTRL,
1092		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1093		/* Disable Core Clock Division, set Clock Select to 0. */
1094		CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1095
1096		val = 0;
1097		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1098		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1099			/* Enable bits are inverted. */
1100			val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1101			      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1102			      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1103		}
1104		/*
1105		 * Enable PCI & Core Clock, enable clock gating for both Links.
1106		 */
1107		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1108
1109		val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1110		val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1111		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1112		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1113			/* Deassert Low Power for 1st PHY. */
1114			val |= PCI_Y2_PHY1_COMA;
1115			if (sc->msk_num_port > 1)
1116				val |= PCI_Y2_PHY2_COMA;
1117		} else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1118			uint32_t our;
1119
1120			CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1121
1122			/* Enable all clocks. */
1123			pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1124			our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1125			our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1126			    PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1127			/* Set all bits to 0 except bits 15..12. */
1128			pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1129			/* Set to default value. */
1130			pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1131		}
1132		/* Release PHY from PowerDown/COMA mode. */
1133		pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1134		for (i = 0; i < sc->msk_num_port; i++) {
1135			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1136			    GMLC_RST_SET);
1137			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1138			    GMLC_RST_CLR);
1139		}
1140		break;
1141	case MSK_PHY_POWERDOWN:
1142		val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1143		val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1144		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1145		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1146			val &= ~PCI_Y2_PHY1_COMA;
1147			if (sc->msk_num_port > 1)
1148				val &= ~PCI_Y2_PHY2_COMA;
1149		}
1150		pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1151
1152		val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1153		      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1154		      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1155		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1156		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1157			/* Enable bits are inverted. */
1158			val = 0;
1159		}
1160		/*
1161		 * Disable PCI & Core Clock, disable clock gating for
1162		 * both Links.
1163		 */
1164		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1165		CSR_WRITE_1(sc, B0_POWER_CTRL,
1166		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1167		break;
1168	default:
1169		break;
1170	}
1171}
1172
1173static void
1174mskc_reset(struct msk_softc *sc)
1175{
1176	bus_addr_t addr;
1177	uint16_t status;
1178	uint32_t val;
1179	int i;
1180
1181	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1182
1183	/* Disable ASF. */
1184	if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1185		CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1186		CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1187	}
1188	/*
1189	 * Since we disabled ASF, S/W reset is required for Power Management.
1190	 */
1191	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1192	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1193
1194	/* Clear all error bits in the PCI status register. */
1195	status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1196	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1197
1198	pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1199	    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1200	    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1201	CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1202
1203	switch (sc->msk_bustype) {
1204	case MSK_PEX_BUS:
1205		/* Clear all PEX errors. */
1206		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1207		val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1208		if ((val & PEX_RX_OV) != 0) {
1209			sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1210			sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1211		}
1212		break;
1213	case MSK_PCI_BUS:
1214	case MSK_PCIX_BUS:
1215		/* Set Cache Line Size to 2(8bytes) if configured to 0. */
1216		val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1217		if (val == 0)
1218			pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1219		if (sc->msk_bustype == MSK_PCIX_BUS) {
1220			/* Set Cache Line Size opt. */
1221			val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1222			val |= PCI_CLS_OPT;
1223			pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1224		}
1225		break;
1226	}
1227	/* Set PHY power state. */
1228	msk_phy_power(sc, MSK_PHY_POWERUP);
1229
1230	/* Reset GPHY/GMAC Control */
1231	for (i = 0; i < sc->msk_num_port; i++) {
1232		/* GPHY Control reset. */
1233		CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1234		CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1235		/* GMAC Control reset. */
1236		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1237		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1238		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1239	}
1240	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1241
1242	/* LED On. */
1243	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1244
1245	/* Clear TWSI IRQ. */
1246	CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1247
1248	/* Turn off hardware timer. */
1249	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1250	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1251
1252	/* Turn off descriptor polling. */
1253	CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1254
1255	/* Turn off time stamps. */
1256	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1257	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1258
1259	/* Configure timeout values. */
1260	for (i = 0; i < sc->msk_num_port; i++) {
1261		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1262		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1263		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1264		    MSK_RI_TO_53);
1265		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1266		    MSK_RI_TO_53);
1267		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1268		    MSK_RI_TO_53);
1269		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1270		    MSK_RI_TO_53);
1271		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1272		    MSK_RI_TO_53);
1273		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1274		    MSK_RI_TO_53);
1275		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1276		    MSK_RI_TO_53);
1277		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1278		    MSK_RI_TO_53);
1279		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1280		    MSK_RI_TO_53);
1281		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1282		    MSK_RI_TO_53);
1283		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1284		    MSK_RI_TO_53);
1285		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1286		    MSK_RI_TO_53);
1287	}
1288
1289	/* Disable all interrupts. */
1290	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1291	CSR_READ_4(sc, B0_HWE_IMSK);
1292	CSR_WRITE_4(sc, B0_IMSK, 0);
1293	CSR_READ_4(sc, B0_IMSK);
1294
1295        /*
1296         * On dual port PCI-X card, there is an problem where status
1297         * can be received out of order due to split transactions.
1298         */
1299	if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1300		int pcix;
1301		uint16_t pcix_cmd;
1302
1303		if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1304			pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1305			/* Clear Max Outstanding Split Transactions. */
1306			pcix_cmd &= ~0x70;
1307			CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1308			pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1309			CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1310		}
1311        }
1312	if (sc->msk_bustype == MSK_PEX_BUS) {
1313		uint16_t v, width;
1314
1315		v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1316		/* Change Max. Read Request Size to 4096 bytes. */
1317		v &= ~PEX_DC_MAX_RRS_MSK;
1318		v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1319		pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1320		width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1321		width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1322		v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1323		v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1324		if (v != width)
1325			device_printf(sc->msk_dev,
1326			    "negotiated width of link(x%d) != "
1327			    "max. width of link(x%d)\n", width, v);
1328	}
1329
1330	/* Clear status list. */
1331	bzero(sc->msk_stat_ring,
1332	    sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1333	sc->msk_stat_cons = 0;
1334	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1335	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1336	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1337	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1338	/* Set the status list base address. */
1339	addr = sc->msk_stat_ring_paddr;
1340	CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1341	CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1342	/* Set the status list last index. */
1343	CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1344	if (HW_FEATURE(sc, HWF_WA_DEV_43_418)) {
1345		/* WA for dev. #4.3 */
1346		CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1347		/* WA for dev. #4.18 */
1348		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1349		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1350	} else {
1351		CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1352		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1353		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM,
1354		    HW_FEATURE(sc, HWF_WA_DEV_4109) ? 0x10 : 0x04);
1355		CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1356	}
1357	/*
1358	 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1359	 */
1360	CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1361
1362	/* Enable status unit. */
1363	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1364
1365	CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1366	CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1367	CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1368}
1369
1370static int
1371msk_probe(device_t dev)
1372{
1373	struct msk_softc *sc;
1374	char desc[100];
1375
1376	sc = device_get_softc(device_get_parent(dev));
1377	/*
1378	 * Not much to do here. We always know there will be
1379	 * at least one GMAC present, and if there are two,
1380	 * mskc_attach() will create a second device instance
1381	 * for us.
1382	 */
1383	snprintf(desc, sizeof(desc),
1384	    "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1385	    model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1386	    sc->msk_hw_rev);
1387	device_set_desc_copy(dev, desc);
1388
1389	return (BUS_PROBE_DEFAULT);
1390}
1391
1392static int
1393msk_attach(device_t dev)
1394{
1395	struct msk_softc *sc;
1396	struct msk_if_softc *sc_if;
1397	struct ifnet *ifp;
1398	int i, port, error;
1399	uint8_t eaddr[6];
1400
1401	if (dev == NULL)
1402		return (EINVAL);
1403
1404	error = 0;
1405	sc_if = device_get_softc(dev);
1406	sc = device_get_softc(device_get_parent(dev));
1407	port = *(int *)device_get_ivars(dev);
1408
1409	sc_if->msk_if_dev = dev;
1410	sc_if->msk_port = port;
1411	sc_if->msk_softc = sc;
1412	sc->msk_if[port] = sc_if;
1413	/* Setup Tx/Rx queue register offsets. */
1414	if (port == MSK_PORT_A) {
1415		sc_if->msk_txq = Q_XA1;
1416		sc_if->msk_txsq = Q_XS1;
1417		sc_if->msk_rxq = Q_R1;
1418	} else {
1419		sc_if->msk_txq = Q_XA2;
1420		sc_if->msk_txsq = Q_XS2;
1421		sc_if->msk_rxq = Q_R2;
1422	}
1423
1424	callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1425	TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if);
1426
1427	if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1428		goto fail;
1429
1430	ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1431	if (ifp == NULL) {
1432		device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1433		error = ENOSPC;
1434		goto fail;
1435	}
1436	ifp->if_softc = sc_if;
1437	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1438	ifp->if_mtu = ETHERMTU;
1439	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1440	/*
1441	 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1442	 * has serious bug in Rx checksum offload for all Yukon II family
1443	 * hardware. It seems there is a workaround to make it work somtimes.
1444	 * However, the workaround also have to check OP code sequences to
1445	 * verify whether the OP code is correct. Sometimes it should compute
1446	 * IP/TCP/UDP checksum in driver in order to verify correctness of
1447	 * checksum computed by hardware. If you have to compute checksum
1448	 * with software to verify the hardware's checksum why have hardware
1449	 * compute the checksum? I think there is no reason to spend time to
1450	 * make Rx checksum offload work on Yukon II hardware.
1451	 */
1452	ifp->if_capabilities = IFCAP_TXCSUM;
1453	ifp->if_hwassist = MSK_CSUM_FEATURES;
1454	if (sc->msk_hw_id != CHIP_ID_YUKON_EC_U) {
1455		/* It seems Yukon EC Ultra doesn't support TSO. */
1456		ifp->if_capabilities |= IFCAP_TSO4;
1457		ifp->if_hwassist |= CSUM_TSO;
1458	}
1459	ifp->if_capenable = ifp->if_capabilities;
1460	ifp->if_ioctl = msk_ioctl;
1461	ifp->if_start = msk_start;
1462	ifp->if_timer = 0;
1463	ifp->if_watchdog = NULL;
1464	ifp->if_init = msk_init;
1465	IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1466	ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1467	IFQ_SET_READY(&ifp->if_snd);
1468
1469	TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1470
1471	/*
1472	 * Get station address for this interface. Note that
1473	 * dual port cards actually come with three station
1474	 * addresses: one for each port, plus an extra. The
1475	 * extra one is used by the SysKonnect driver software
1476	 * as a 'virtual' station address for when both ports
1477	 * are operating in failover mode. Currently we don't
1478	 * use this extra address.
1479	 */
1480	MSK_IF_LOCK(sc_if);
1481	for (i = 0; i < ETHER_ADDR_LEN; i++)
1482		eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1483
1484	/*
1485	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1486	 */
1487	MSK_IF_UNLOCK(sc_if);
1488	ether_ifattach(ifp, eaddr);
1489	MSK_IF_LOCK(sc_if);
1490
1491	/* VLAN capability setup */
1492        ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1493	if (ifp->if_capabilities & IFCAP_HWCSUM)
1494		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1495	ifp->if_capenable = ifp->if_capabilities;
1496
1497	/*
1498	 * Tell the upper layer(s) we support long frames.
1499	 * Must appear after the call to ether_ifattach() because
1500	 * ether_ifattach() sets ifi_hdrlen to the default value.
1501	 */
1502        ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1503
1504	/*
1505	 * Do miibus setup.
1506	 */
1507	MSK_IF_UNLOCK(sc_if);
1508	error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1509	    msk_mediastatus);
1510	if (error != 0) {
1511		device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1512		ether_ifdetach(ifp);
1513		error = ENXIO;
1514		goto fail;
1515	}
1516	/* Check whether PHY Id is MARVELL. */
1517	if (msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_ID0)
1518	    == PHY_MARV_ID0_VAL)
1519		sc->msk_marvell_phy = 1;
1520
1521fail:
1522	if (error != 0) {
1523		/* Access should be ok even though lock has been dropped */
1524		sc->msk_if[port] = NULL;
1525		msk_detach(dev);
1526	}
1527
1528	return (error);
1529}
1530
1531/*
1532 * Attach the interface. Allocate softc structures, do ifmedia
1533 * setup and ethernet/BPF attach.
1534 */
1535static int
1536mskc_attach(device_t dev)
1537{
1538	struct msk_softc *sc;
1539	int error, msic, *port, reg;
1540
1541	sc = device_get_softc(dev);
1542	sc->msk_dev = dev;
1543	mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1544	    MTX_DEF);
1545
1546	/*
1547	 * Map control/status registers.
1548	 */
1549	pci_enable_busmaster(dev);
1550
1551	/* Allocate I/O resource */
1552#ifdef MSK_USEIOSPACE
1553	sc->msk_res_spec = msk_res_spec_io;
1554#else
1555	sc->msk_res_spec = msk_res_spec_mem;
1556#endif
1557	sc->msk_irq_spec = msk_irq_spec_legacy;
1558	error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1559	if (error) {
1560		if (sc->msk_res_spec == msk_res_spec_mem)
1561			sc->msk_res_spec = msk_res_spec_io;
1562		else
1563			sc->msk_res_spec = msk_res_spec_mem;
1564		error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1565		if (error) {
1566			device_printf(dev, "couldn't allocate %s resources\n",
1567			    sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1568			    "I/O");
1569			mtx_destroy(&sc->msk_mtx);
1570			return (ENXIO);
1571		}
1572	}
1573
1574	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1575	sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1576	sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1577	/* Bail out if chip is not recognized. */
1578	if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1579	    sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1580		device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1581		    sc->msk_hw_id, sc->msk_hw_rev);
1582		error = ENXIO;
1583		goto fail;
1584	}
1585
1586	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1587	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1588	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1589	    &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1590	    "max number of Rx events to process");
1591
1592	sc->msk_process_limit = MSK_PROC_DEFAULT;
1593	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1594	    "process_limit", &sc->msk_process_limit);
1595	if (error == 0) {
1596		if (sc->msk_process_limit < MSK_PROC_MIN ||
1597		    sc->msk_process_limit > MSK_PROC_MAX) {
1598			device_printf(dev, "process_limit value out of range; "
1599			    "using default: %d\n", MSK_PROC_DEFAULT);
1600			sc->msk_process_limit = MSK_PROC_DEFAULT;
1601		}
1602	}
1603
1604	/* Soft reset. */
1605	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1606	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1607	sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1608	 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1609		 sc->msk_coppertype = 0;
1610	 else
1611		 sc->msk_coppertype = 1;
1612	/* Check number of MACs. */
1613	sc->msk_num_port = 1;
1614	if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1615	    CFG_DUAL_MAC_MSK) {
1616		if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1617			sc->msk_num_port++;
1618	}
1619
1620	/* Check bus type. */
1621	if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
1622		sc->msk_bustype = MSK_PEX_BUS;
1623	else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
1624		sc->msk_bustype = MSK_PCIX_BUS;
1625	else
1626		sc->msk_bustype = MSK_PCI_BUS;
1627
1628	/* Get H/W features(bugs). */
1629	switch (sc->msk_hw_id) {
1630	case CHIP_ID_YUKON_EC:
1631		sc->msk_clock = 125;	/* 125 Mhz */
1632		if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1633			sc->msk_hw_feature =
1634			    HWF_WA_DEV_42  | HWF_WA_DEV_46 | HWF_WA_DEV_43_418 |
1635			    HWF_WA_DEV_420 | HWF_WA_DEV_423 |
1636			    HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 |
1637			    HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1638			    HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1639		} else {
1640			/* A2/A3 */
1641			sc->msk_hw_feature =
1642			    HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 |
1643			    HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1644			    HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1645		}
1646		break;
1647	case CHIP_ID_YUKON_EC_U:
1648		sc->msk_clock = 125;	/* 125 Mhz */
1649		if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
1650			sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_483 |
1651			    HWF_WA_DEV_4109;
1652		} else if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1653			uint16_t v;
1654
1655			sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 |
1656			    HWF_WA_DEV_4185;
1657			v = CSR_READ_2(sc, Q_ADDR(Q_XA1, Q_WM));
1658			if (v == 0)
1659				sc->msk_hw_feature |= HWF_WA_DEV_4185CS |
1660				    HWF_WA_DEV_4200;
1661		}
1662		break;
1663	case CHIP_ID_YUKON_FE:
1664		sc->msk_clock = 100;	/* 100 Mhz */
1665		sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 |
1666		    HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1667		break;
1668	case CHIP_ID_YUKON_XL:
1669		sc->msk_clock = 156;	/* 156 Mhz */
1670		switch (sc->msk_hw_rev) {
1671		case CHIP_REV_YU_XL_A0:
1672			sc->msk_hw_feature =
1673			    HWF_WA_DEV_427 | HWF_WA_DEV_463 | HWF_WA_DEV_472 |
1674			    HWF_WA_DEV_479 | HWF_WA_DEV_483 | HWF_WA_DEV_4115 |
1675			    HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1676			break;
1677		case CHIP_REV_YU_XL_A1:
1678			sc->msk_hw_feature =
1679			    HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1680			    HWF_WA_DEV_4115 | HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1681			break;
1682		case CHIP_REV_YU_XL_A2:
1683			sc->msk_hw_feature =
1684			    HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1685			    HWF_WA_DEV_4115 | HWF_WA_DEV_4167;
1686			break;
1687		case CHIP_REV_YU_XL_A3:
1688			sc->msk_hw_feature =
1689			    HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1690			    HWF_WA_DEV_4115;
1691		}
1692		break;
1693	default:
1694		sc->msk_clock = 156;	/* 156 Mhz */
1695		sc->msk_hw_feature = 0;
1696	}
1697
1698	/* Allocate IRQ resources. */
1699	msic = pci_msi_count(dev);
1700	if (bootverbose)
1701		device_printf(dev, "MSI count : %d\n", msic);
1702	/*
1703	 * The Yukon II reports it can handle two messages, one for each
1704	 * possible port.  We go ahead and allocate two messages and only
1705	 * setup a handler for both if we have a dual port card.
1706	 *
1707	 * XXX: I haven't untangled the interrupt handler to handle dual
1708	 * port cards with separate MSI messages, so for now I disable MSI
1709	 * on dual port cards.
1710	 */
1711	if (msic == 2 && msi_disable == 0 && sc->msk_num_port == 1 &&
1712	    pci_alloc_msi(dev, &msic) == 0) {
1713		if (msic == 2) {
1714			sc->msk_msi = 1;
1715			sc->msk_irq_spec = msk_irq_spec_msi;
1716		} else
1717			pci_release_msi(dev);
1718	}
1719
1720	error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1721	if (error) {
1722		device_printf(dev, "couldn't allocate IRQ resources\n");
1723		goto fail;
1724	}
1725
1726	if ((error = msk_status_dma_alloc(sc)) != 0)
1727		goto fail;
1728
1729	/* Set base interrupt mask. */
1730	sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1731	sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1732	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1733
1734	/* Reset the adapter. */
1735	mskc_reset(sc);
1736
1737	if ((error = mskc_setup_rambuffer(sc)) != 0)
1738		goto fail;
1739
1740	sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1741	if (sc->msk_devs[MSK_PORT_A] == NULL) {
1742		device_printf(dev, "failed to add child for PORT_A\n");
1743		error = ENXIO;
1744		goto fail;
1745	}
1746	port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1747	if (port == NULL) {
1748		device_printf(dev, "failed to allocate memory for "
1749		    "ivars of PORT_A\n");
1750		error = ENXIO;
1751		goto fail;
1752	}
1753	*port = MSK_PORT_A;
1754	device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1755
1756	if (sc->msk_num_port > 1) {
1757		sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1758		if (sc->msk_devs[MSK_PORT_B] == NULL) {
1759			device_printf(dev, "failed to add child for PORT_B\n");
1760			error = ENXIO;
1761			goto fail;
1762		}
1763		port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1764		if (port == NULL) {
1765			device_printf(dev, "failed to allocate memory for "
1766			    "ivars of PORT_B\n");
1767			error = ENXIO;
1768			goto fail;
1769		}
1770		*port = MSK_PORT_B;
1771		device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1772	}
1773
1774	error = bus_generic_attach(dev);
1775	if (error) {
1776		device_printf(dev, "failed to attach port(s)\n");
1777		goto fail;
1778	}
1779
1780	TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1781	sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1782	    taskqueue_thread_enqueue, &sc->msk_tq);
1783	taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1784	    device_get_nameunit(sc->msk_dev));
1785	/* Hook interrupt last to avoid having to lock softc. */
1786	error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1787	    INTR_MPSAFE | INTR_FAST, msk_intr, sc, &sc->msk_intrhand[0]);
1788
1789	if (error != 0) {
1790		device_printf(dev, "couldn't set up interrupt handler\n");
1791		taskqueue_free(sc->msk_tq);
1792		sc->msk_tq = NULL;
1793		goto fail;
1794	}
1795fail:
1796	if (error != 0)
1797		mskc_detach(dev);
1798
1799	return (error);
1800}
1801
1802/*
1803 * Shutdown hardware and free up resources. This can be called any
1804 * time after the mutex has been initialized. It is called in both
1805 * the error case in attach and the normal detach case so it needs
1806 * to be careful about only freeing resources that have actually been
1807 * allocated.
1808 */
1809static int
1810msk_detach(device_t dev)
1811{
1812	struct msk_softc *sc;
1813	struct msk_if_softc *sc_if;
1814	struct ifnet *ifp;
1815
1816	sc_if = device_get_softc(dev);
1817	KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1818	    ("msk mutex not initialized in msk_detach"));
1819	MSK_IF_LOCK(sc_if);
1820
1821	ifp = sc_if->msk_ifp;
1822	if (device_is_attached(dev)) {
1823		/* XXX */
1824		sc_if->msk_detach = 1;
1825		msk_stop(sc_if);
1826		/* Can't hold locks while calling detach. */
1827		MSK_IF_UNLOCK(sc_if);
1828		callout_drain(&sc_if->msk_tick_ch);
1829		taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1830		taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task);
1831		ether_ifdetach(ifp);
1832		MSK_IF_LOCK(sc_if);
1833	}
1834
1835	/*
1836	 * We're generally called from mskc_detach() which is using
1837	 * device_delete_child() to get to here. It's already trashed
1838	 * miibus for us, so don't do it here or we'll panic.
1839	 *
1840	 * if (sc_if->msk_miibus != NULL) {
1841	 * 	device_delete_child(dev, sc_if->msk_miibus);
1842	 * 	sc_if->msk_miibus = NULL;
1843	 * }
1844	 */
1845
1846	msk_txrx_dma_free(sc_if);
1847	bus_generic_detach(dev);
1848
1849	if (ifp)
1850		if_free(ifp);
1851	sc = sc_if->msk_softc;
1852	sc->msk_if[sc_if->msk_port] = NULL;
1853	MSK_IF_UNLOCK(sc_if);
1854
1855	return (0);
1856}
1857
1858static int
1859mskc_detach(device_t dev)
1860{
1861	struct msk_softc *sc;
1862
1863	sc = device_get_softc(dev);
1864	KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1865
1866	if (device_is_alive(dev)) {
1867		if (sc->msk_devs[MSK_PORT_A] != NULL) {
1868			free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1869			    M_DEVBUF);
1870			device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1871		}
1872		if (sc->msk_devs[MSK_PORT_B] != NULL) {
1873			free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1874			    M_DEVBUF);
1875			device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1876		}
1877		bus_generic_detach(dev);
1878	}
1879
1880	/* Disable all interrupts. */
1881	CSR_WRITE_4(sc, B0_IMSK, 0);
1882	CSR_READ_4(sc, B0_IMSK);
1883	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1884	CSR_READ_4(sc, B0_HWE_IMSK);
1885
1886	/* LED Off. */
1887	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1888
1889	/* Put hardware reset. */
1890	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1891
1892	msk_status_dma_free(sc);
1893
1894	if (sc->msk_tq != NULL) {
1895		taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1896		taskqueue_free(sc->msk_tq);
1897		sc->msk_tq = NULL;
1898	}
1899	if (sc->msk_intrhand[0]) {
1900		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1901		sc->msk_intrhand[0] = NULL;
1902	}
1903	if (sc->msk_intrhand[1]) {
1904		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1905		sc->msk_intrhand[1] = NULL;
1906	}
1907	bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1908	if (sc->msk_msi)
1909		pci_release_msi(dev);
1910	bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1911	mtx_destroy(&sc->msk_mtx);
1912
1913	return (0);
1914}
1915
1916struct msk_dmamap_arg {
1917	bus_addr_t	msk_busaddr;
1918};
1919
1920static void
1921msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1922{
1923	struct msk_dmamap_arg *ctx;
1924
1925	if (error != 0)
1926		return;
1927	ctx = arg;
1928	ctx->msk_busaddr = segs[0].ds_addr;
1929}
1930
1931/* Create status DMA region. */
1932static int
1933msk_status_dma_alloc(struct msk_softc *sc)
1934{
1935	struct msk_dmamap_arg ctx;
1936	int error;
1937
1938	error = bus_dma_tag_create(
1939		    bus_get_dma_tag(sc->msk_dev),	/* parent */
1940		    MSK_STAT_ALIGN, 0,		/* alignment, boundary */
1941		    BUS_SPACE_MAXADDR,		/* lowaddr */
1942		    BUS_SPACE_MAXADDR,		/* highaddr */
1943		    NULL, NULL,			/* filter, filterarg */
1944		    MSK_STAT_RING_SZ,		/* maxsize */
1945		    1,				/* nsegments */
1946		    MSK_STAT_RING_SZ,		/* maxsegsize */
1947		    0,				/* flags */
1948		    NULL, NULL,			/* lockfunc, lockarg */
1949		    &sc->msk_stat_tag);
1950	if (error != 0) {
1951		device_printf(sc->msk_dev,
1952		    "failed to create status DMA tag\n");
1953		return (error);
1954	}
1955
1956	/* Allocate DMA'able memory and load the DMA map for status ring. */
1957	error = bus_dmamem_alloc(sc->msk_stat_tag,
1958	    (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1959	    BUS_DMA_ZERO, &sc->msk_stat_map);
1960	if (error != 0) {
1961		device_printf(sc->msk_dev,
1962		    "failed to allocate DMA'able memory for status ring\n");
1963		return (error);
1964	}
1965
1966	ctx.msk_busaddr = 0;
1967	error = bus_dmamap_load(sc->msk_stat_tag,
1968	    sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1969	    msk_dmamap_cb, &ctx, 0);
1970	if (error != 0) {
1971		device_printf(sc->msk_dev,
1972		    "failed to load DMA'able memory for status ring\n");
1973		return (error);
1974	}
1975	sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1976
1977	return (0);
1978}
1979
1980static void
1981msk_status_dma_free(struct msk_softc *sc)
1982{
1983
1984	/* Destroy status block. */
1985	if (sc->msk_stat_tag) {
1986		if (sc->msk_stat_map) {
1987			bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1988			if (sc->msk_stat_ring) {
1989				bus_dmamem_free(sc->msk_stat_tag,
1990				    sc->msk_stat_ring, sc->msk_stat_map);
1991				sc->msk_stat_ring = NULL;
1992			}
1993			sc->msk_stat_map = NULL;
1994		}
1995		bus_dma_tag_destroy(sc->msk_stat_tag);
1996		sc->msk_stat_tag = NULL;
1997	}
1998}
1999
2000static int
2001msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2002{
2003	struct msk_dmamap_arg ctx;
2004	struct msk_txdesc *txd;
2005	struct msk_rxdesc *rxd;
2006	struct msk_rxdesc *jrxd;
2007	struct msk_jpool_entry *entry;
2008	uint8_t *ptr;
2009	int error, i;
2010
2011	mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF);
2012	SLIST_INIT(&sc_if->msk_jfree_listhead);
2013	SLIST_INIT(&sc_if->msk_jinuse_listhead);
2014
2015	/* Create parent DMA tag. */
2016	/*
2017	 * XXX
2018	 * It seems that Yukon II supports full 64bits DMA operations. But
2019	 * it needs two descriptors(list elements) for 64bits DMA operations.
2020	 * Since we don't know what DMA address mappings(32bits or 64bits)
2021	 * would be used in advance for each mbufs, we limits its DMA space
2022	 * to be in range of 32bits address space. Otherwise, we should check
2023	 * what DMA address is used and chain another descriptor for the
2024	 * 64bits DMA operation. This also means descriptor ring size is
2025	 * variable. Limiting DMA address to be in 32bit address space greatly
2026	 * simplyfies descriptor handling and possibly would increase
2027	 * performance a bit due to efficient handling of descriptors.
2028	 * Apart from harassing checksum offloading mechanisms, it seems
2029	 * it's really bad idea to use a seperate descriptor for 64bit
2030	 * DMA operation to save small descriptor memory. Anyway, I've
2031	 * never seen these exotic scheme on ethernet interface hardware.
2032	 */
2033	error = bus_dma_tag_create(
2034		    bus_get_dma_tag(sc_if->msk_if_dev),	/* parent */
2035		    1, 0,			/* alignment, boundary */
2036		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2037		    BUS_SPACE_MAXADDR,		/* highaddr */
2038		    NULL, NULL,			/* filter, filterarg */
2039		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
2040		    0,				/* nsegments */
2041		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2042		    0,				/* flags */
2043		    NULL, NULL,			/* lockfunc, lockarg */
2044		    &sc_if->msk_cdata.msk_parent_tag);
2045	if (error != 0) {
2046		device_printf(sc_if->msk_if_dev,
2047		    "failed to create parent DMA tag\n");
2048		goto fail;
2049	}
2050	/* Create tag for Tx ring. */
2051	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2052		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2053		    BUS_SPACE_MAXADDR,		/* lowaddr */
2054		    BUS_SPACE_MAXADDR,		/* highaddr */
2055		    NULL, NULL,			/* filter, filterarg */
2056		    MSK_TX_RING_SZ,		/* maxsize */
2057		    1,				/* nsegments */
2058		    MSK_TX_RING_SZ,		/* maxsegsize */
2059		    0,				/* flags */
2060		    NULL, NULL,			/* lockfunc, lockarg */
2061		    &sc_if->msk_cdata.msk_tx_ring_tag);
2062	if (error != 0) {
2063		device_printf(sc_if->msk_if_dev,
2064		    "failed to create Tx ring DMA tag\n");
2065		goto fail;
2066	}
2067
2068	/* Create tag for Rx ring. */
2069	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2070		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2071		    BUS_SPACE_MAXADDR,		/* lowaddr */
2072		    BUS_SPACE_MAXADDR,		/* highaddr */
2073		    NULL, NULL,			/* filter, filterarg */
2074		    MSK_RX_RING_SZ,		/* maxsize */
2075		    1,				/* nsegments */
2076		    MSK_RX_RING_SZ,		/* maxsegsize */
2077		    0,				/* flags */
2078		    NULL, NULL,			/* lockfunc, lockarg */
2079		    &sc_if->msk_cdata.msk_rx_ring_tag);
2080	if (error != 0) {
2081		device_printf(sc_if->msk_if_dev,
2082		    "failed to create Rx ring DMA tag\n");
2083		goto fail;
2084	}
2085
2086	/* Create tag for jumbo Rx ring. */
2087	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2088		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2089		    BUS_SPACE_MAXADDR,		/* lowaddr */
2090		    BUS_SPACE_MAXADDR,		/* highaddr */
2091		    NULL, NULL,			/* filter, filterarg */
2092		    MSK_JUMBO_RX_RING_SZ,	/* maxsize */
2093		    1,				/* nsegments */
2094		    MSK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2095		    0,				/* flags */
2096		    NULL, NULL,			/* lockfunc, lockarg */
2097		    &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2098	if (error != 0) {
2099		device_printf(sc_if->msk_if_dev,
2100		    "failed to create jumbo Rx ring DMA tag\n");
2101		goto fail;
2102	}
2103
2104	/* Create tag for jumbo buffer blocks. */
2105	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2106		    PAGE_SIZE, 0,		/* alignment, boundary */
2107		    BUS_SPACE_MAXADDR,		/* lowaddr */
2108		    BUS_SPACE_MAXADDR,		/* highaddr */
2109		    NULL, NULL,			/* filter, filterarg */
2110		    MSK_JMEM,			/* maxsize */
2111		    1,				/* nsegments */
2112		    MSK_JMEM,			/* maxsegsize */
2113		    0,				/* flags */
2114		    NULL, NULL,			/* lockfunc, lockarg */
2115		    &sc_if->msk_cdata.msk_jumbo_tag);
2116	if (error != 0) {
2117		device_printf(sc_if->msk_if_dev,
2118		    "failed to create jumbo Rx buffer block DMA tag\n");
2119		goto fail;
2120	}
2121
2122	/* Create tag for Tx buffers. */
2123	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2124		    1, 0,			/* alignment, boundary */
2125		    BUS_SPACE_MAXADDR,		/* lowaddr */
2126		    BUS_SPACE_MAXADDR,		/* highaddr */
2127		    NULL, NULL,			/* filter, filterarg */
2128		    MCLBYTES * MSK_MAXTXSEGS,	/* maxsize */
2129		    MSK_MAXTXSEGS,		/* nsegments */
2130		    MCLBYTES,			/* maxsegsize */
2131		    0,				/* flags */
2132		    NULL, NULL,			/* lockfunc, lockarg */
2133		    &sc_if->msk_cdata.msk_tx_tag);
2134	if (error != 0) {
2135		device_printf(sc_if->msk_if_dev,
2136		    "failed to create Tx DMA tag\n");
2137		goto fail;
2138	}
2139
2140	/* Create tag for Rx buffers. */
2141	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2142		    1, 0,			/* alignment, boundary */
2143		    BUS_SPACE_MAXADDR,		/* lowaddr */
2144		    BUS_SPACE_MAXADDR,		/* highaddr */
2145		    NULL, NULL,			/* filter, filterarg */
2146		    MCLBYTES,			/* maxsize */
2147		    1,				/* nsegments */
2148		    MCLBYTES,			/* maxsegsize */
2149		    0,				/* flags */
2150		    NULL, NULL,			/* lockfunc, lockarg */
2151		    &sc_if->msk_cdata.msk_rx_tag);
2152	if (error != 0) {
2153		device_printf(sc_if->msk_if_dev,
2154		    "failed to create Rx DMA tag\n");
2155		goto fail;
2156	}
2157
2158	/* Create tag for jumbo Rx buffers. */
2159	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2160		    PAGE_SIZE, 0,		/* alignment, boundary */
2161		    BUS_SPACE_MAXADDR,		/* lowaddr */
2162		    BUS_SPACE_MAXADDR,		/* highaddr */
2163		    NULL, NULL,			/* filter, filterarg */
2164		    MCLBYTES * MSK_MAXRXSEGS,	/* maxsize */
2165		    MSK_MAXRXSEGS,		/* nsegments */
2166		    MSK_JLEN,			/* maxsegsize */
2167		    0,				/* flags */
2168		    NULL, NULL,			/* lockfunc, lockarg */
2169		    &sc_if->msk_cdata.msk_jumbo_rx_tag);
2170	if (error != 0) {
2171		device_printf(sc_if->msk_if_dev,
2172		    "failed to create jumbo Rx DMA tag\n");
2173		goto fail;
2174	}
2175
2176	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
2177	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2178	    (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2179	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2180	if (error != 0) {
2181		device_printf(sc_if->msk_if_dev,
2182		    "failed to allocate DMA'able memory for Tx ring\n");
2183		goto fail;
2184	}
2185
2186	ctx.msk_busaddr = 0;
2187	error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2188	    sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2189	    MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2190	if (error != 0) {
2191		device_printf(sc_if->msk_if_dev,
2192		    "failed to load DMA'able memory for Tx ring\n");
2193		goto fail;
2194	}
2195	sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2196
2197	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
2198	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2199	    (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2200	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2201	if (error != 0) {
2202		device_printf(sc_if->msk_if_dev,
2203		    "failed to allocate DMA'able memory for Rx ring\n");
2204		goto fail;
2205	}
2206
2207	ctx.msk_busaddr = 0;
2208	error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2209	    sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2210	    MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2211	if (error != 0) {
2212		device_printf(sc_if->msk_if_dev,
2213		    "failed to load DMA'able memory for Rx ring\n");
2214		goto fail;
2215	}
2216	sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2217
2218	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2219	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2220	    (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2221	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2222	    &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2223	if (error != 0) {
2224		device_printf(sc_if->msk_if_dev,
2225		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2226		goto fail;
2227	}
2228
2229	ctx.msk_busaddr = 0;
2230	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2231	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2232	    sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2233	    msk_dmamap_cb, &ctx, 0);
2234	if (error != 0) {
2235		device_printf(sc_if->msk_if_dev,
2236		    "failed to load DMA'able memory for jumbo Rx ring\n");
2237		goto fail;
2238	}
2239	sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2240
2241	/* Create DMA maps for Tx buffers. */
2242	for (i = 0; i < MSK_TX_RING_CNT; i++) {
2243		txd = &sc_if->msk_cdata.msk_txdesc[i];
2244		txd->tx_m = NULL;
2245		txd->tx_dmamap = NULL;
2246		error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2247		    &txd->tx_dmamap);
2248		if (error != 0) {
2249			device_printf(sc_if->msk_if_dev,
2250			    "failed to create Tx dmamap\n");
2251			goto fail;
2252		}
2253	}
2254	/* Create DMA maps for Rx buffers. */
2255	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2256	    &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2257		device_printf(sc_if->msk_if_dev,
2258		    "failed to create spare Rx dmamap\n");
2259		goto fail;
2260	}
2261	for (i = 0; i < MSK_RX_RING_CNT; i++) {
2262		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2263		rxd->rx_m = NULL;
2264		rxd->rx_dmamap = NULL;
2265		error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2266		    &rxd->rx_dmamap);
2267		if (error != 0) {
2268			device_printf(sc_if->msk_if_dev,
2269			    "failed to create Rx dmamap\n");
2270			goto fail;
2271		}
2272	}
2273	/* Create DMA maps for jumbo Rx buffers. */
2274	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2275	    &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2276		device_printf(sc_if->msk_if_dev,
2277		    "failed to create spare jumbo Rx dmamap\n");
2278		goto fail;
2279	}
2280	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2281		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2282		jrxd->rx_m = NULL;
2283		jrxd->rx_dmamap = NULL;
2284		error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2285		    &jrxd->rx_dmamap);
2286		if (error != 0) {
2287			device_printf(sc_if->msk_if_dev,
2288			    "failed to create jumbo Rx dmamap\n");
2289			goto fail;
2290		}
2291	}
2292
2293	/* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2294	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2295	    (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2296	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2297	    &sc_if->msk_cdata.msk_jumbo_map);
2298	if (error != 0) {
2299		device_printf(sc_if->msk_if_dev,
2300		    "failed to allocate DMA'able memory for jumbo buf\n");
2301		goto fail;
2302	}
2303
2304	ctx.msk_busaddr = 0;
2305	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2306	    sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2307	    MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2308	if (error != 0) {
2309		device_printf(sc_if->msk_if_dev,
2310		    "failed to load DMA'able memory for jumbobuf\n");
2311		goto fail;
2312	}
2313	sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2314
2315	/*
2316	 * Now divide it up into 9K pieces and save the addresses
2317	 * in an array.
2318	 */
2319	ptr = sc_if->msk_rdata.msk_jumbo_buf;
2320	for (i = 0; i < MSK_JSLOTS; i++) {
2321		sc_if->msk_cdata.msk_jslots[i] = ptr;
2322		ptr += MSK_JLEN;
2323		entry = malloc(sizeof(struct msk_jpool_entry),
2324		    M_DEVBUF, M_WAITOK);
2325		if (entry == NULL) {
2326			device_printf(sc_if->msk_if_dev,
2327			    "no memory for jumbo buffers!\n");
2328			error = ENOMEM;
2329			goto fail;
2330		}
2331		entry->slot = i;
2332		SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2333		    jpool_entries);
2334	}
2335
2336fail:
2337	return (error);
2338}
2339
2340static void
2341msk_txrx_dma_free(struct msk_if_softc *sc_if)
2342{
2343	struct msk_txdesc *txd;
2344	struct msk_rxdesc *rxd;
2345	struct msk_rxdesc *jrxd;
2346	struct msk_jpool_entry *entry;
2347	int i;
2348
2349	MSK_JLIST_LOCK(sc_if);
2350	while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2351		device_printf(sc_if->msk_if_dev,
2352		    "asked to free buffer that is in use!\n");
2353		SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2354		SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2355		    jpool_entries);
2356	}
2357
2358	while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2359		entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2360		SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2361		free(entry, M_DEVBUF);
2362	}
2363	MSK_JLIST_UNLOCK(sc_if);
2364
2365	/* Destroy jumbo buffer block. */
2366	if (sc_if->msk_cdata.msk_jumbo_map)
2367		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2368		    sc_if->msk_cdata.msk_jumbo_map);
2369
2370	if (sc_if->msk_rdata.msk_jumbo_buf) {
2371		bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2372		    sc_if->msk_rdata.msk_jumbo_buf,
2373		    sc_if->msk_cdata.msk_jumbo_map);
2374		sc_if->msk_rdata.msk_jumbo_buf = NULL;
2375		sc_if->msk_cdata.msk_jumbo_map = NULL;
2376	}
2377
2378	/* Tx ring. */
2379	if (sc_if->msk_cdata.msk_tx_ring_tag) {
2380		if (sc_if->msk_cdata.msk_tx_ring_map)
2381			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2382			    sc_if->msk_cdata.msk_tx_ring_map);
2383		if (sc_if->msk_cdata.msk_tx_ring_map &&
2384		    sc_if->msk_rdata.msk_tx_ring)
2385			bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2386			    sc_if->msk_rdata.msk_tx_ring,
2387			    sc_if->msk_cdata.msk_tx_ring_map);
2388		sc_if->msk_rdata.msk_tx_ring = NULL;
2389		sc_if->msk_cdata.msk_tx_ring_map = NULL;
2390		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2391		sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2392	}
2393	/* Rx ring. */
2394	if (sc_if->msk_cdata.msk_rx_ring_tag) {
2395		if (sc_if->msk_cdata.msk_rx_ring_map)
2396			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2397			    sc_if->msk_cdata.msk_rx_ring_map);
2398		if (sc_if->msk_cdata.msk_rx_ring_map &&
2399		    sc_if->msk_rdata.msk_rx_ring)
2400			bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2401			    sc_if->msk_rdata.msk_rx_ring,
2402			    sc_if->msk_cdata.msk_rx_ring_map);
2403		sc_if->msk_rdata.msk_rx_ring = NULL;
2404		sc_if->msk_cdata.msk_rx_ring_map = NULL;
2405		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2406		sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2407	}
2408	/* Jumbo Rx ring. */
2409	if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2410		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2411			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2412			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2413		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2414		    sc_if->msk_rdata.msk_jumbo_rx_ring)
2415			bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2416			    sc_if->msk_rdata.msk_jumbo_rx_ring,
2417			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2418		sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2419		sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2420		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2421		sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2422	}
2423	/* Tx buffers. */
2424	if (sc_if->msk_cdata.msk_tx_tag) {
2425		for (i = 0; i < MSK_TX_RING_CNT; i++) {
2426			txd = &sc_if->msk_cdata.msk_txdesc[i];
2427			if (txd->tx_dmamap) {
2428				bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2429				    txd->tx_dmamap);
2430				txd->tx_dmamap = NULL;
2431			}
2432		}
2433		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2434		sc_if->msk_cdata.msk_tx_tag = NULL;
2435	}
2436	/* Rx buffers. */
2437	if (sc_if->msk_cdata.msk_rx_tag) {
2438		for (i = 0; i < MSK_RX_RING_CNT; i++) {
2439			rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2440			if (rxd->rx_dmamap) {
2441				bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2442				    rxd->rx_dmamap);
2443				rxd->rx_dmamap = NULL;
2444			}
2445		}
2446		if (sc_if->msk_cdata.msk_rx_sparemap) {
2447			bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2448			    sc_if->msk_cdata.msk_rx_sparemap);
2449			sc_if->msk_cdata.msk_rx_sparemap = 0;
2450		}
2451		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2452		sc_if->msk_cdata.msk_rx_tag = NULL;
2453	}
2454	/* Jumbo Rx buffers. */
2455	if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2456		for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2457			jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2458			if (jrxd->rx_dmamap) {
2459				bus_dmamap_destroy(
2460				    sc_if->msk_cdata.msk_jumbo_rx_tag,
2461				    jrxd->rx_dmamap);
2462				jrxd->rx_dmamap = NULL;
2463			}
2464		}
2465		if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2466			bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2467			    sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2468			sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2469		}
2470		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2471		sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2472	}
2473
2474	if (sc_if->msk_cdata.msk_parent_tag) {
2475		bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2476		sc_if->msk_cdata.msk_parent_tag = NULL;
2477	}
2478	mtx_destroy(&sc_if->msk_jlist_mtx);
2479}
2480
2481/*
2482 * Allocate a jumbo buffer.
2483 */
2484static void *
2485msk_jalloc(struct msk_if_softc *sc_if)
2486{
2487	struct msk_jpool_entry *entry;
2488
2489	MSK_JLIST_LOCK(sc_if);
2490
2491	entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2492
2493	if (entry == NULL) {
2494		MSK_JLIST_UNLOCK(sc_if);
2495		return (NULL);
2496	}
2497
2498	SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2499	SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2500
2501	MSK_JLIST_UNLOCK(sc_if);
2502
2503	return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2504}
2505
2506/*
2507 * Release a jumbo buffer.
2508 */
2509static void
2510msk_jfree(void *buf, void *args)
2511{
2512	struct msk_if_softc *sc_if;
2513	struct msk_jpool_entry *entry;
2514	int i;
2515
2516	/* Extract the softc struct pointer. */
2517	sc_if = (struct msk_if_softc *)args;
2518	KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2519
2520	MSK_JLIST_LOCK(sc_if);
2521	/* Calculate the slot this buffer belongs to. */
2522	i = ((vm_offset_t)buf
2523	     - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2524	KASSERT(i >= 0 && i < MSK_JSLOTS,
2525	    ("%s: asked to free buffer that we don't manage!", __func__));
2526
2527	entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2528	KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2529	entry->slot = i;
2530	SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2531	SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2532	if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2533		wakeup(sc_if);
2534
2535	MSK_JLIST_UNLOCK(sc_if);
2536}
2537
2538/*
2539 * It's copy of ath_defrag(ath(4)).
2540 *
2541 * Defragment an mbuf chain, returning at most maxfrags separate
2542 * mbufs+clusters.  If this is not possible NULL is returned and
2543 * the original mbuf chain is left in it's present (potentially
2544 * modified) state.  We use two techniques: collapsing consecutive
2545 * mbufs and replacing consecutive mbufs by a cluster.
2546 */
2547static struct mbuf *
2548msk_defrag(struct mbuf *m0, int how, int maxfrags)
2549{
2550	struct mbuf *m, *n, *n2, **prev;
2551	u_int curfrags;
2552
2553	/*
2554	 * Calculate the current number of frags.
2555	 */
2556	curfrags = 0;
2557	for (m = m0; m != NULL; m = m->m_next)
2558		curfrags++;
2559	/*
2560	 * First, try to collapse mbufs.  Note that we always collapse
2561	 * towards the front so we don't need to deal with moving the
2562	 * pkthdr.  This may be suboptimal if the first mbuf has much
2563	 * less data than the following.
2564	 */
2565	m = m0;
2566again:
2567	for (;;) {
2568		n = m->m_next;
2569		if (n == NULL)
2570			break;
2571		if ((m->m_flags & M_RDONLY) == 0 &&
2572		    n->m_len < M_TRAILINGSPACE(m)) {
2573			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
2574				n->m_len);
2575			m->m_len += n->m_len;
2576			m->m_next = n->m_next;
2577			m_free(n);
2578			if (--curfrags <= maxfrags)
2579				return (m0);
2580		} else
2581			m = n;
2582	}
2583	KASSERT(maxfrags > 1,
2584		("maxfrags %u, but normal collapse failed", maxfrags));
2585	/*
2586	 * Collapse consecutive mbufs to a cluster.
2587	 */
2588	prev = &m0->m_next;		/* NB: not the first mbuf */
2589	while ((n = *prev) != NULL) {
2590		if ((n2 = n->m_next) != NULL &&
2591		    n->m_len + n2->m_len < MCLBYTES) {
2592			m = m_getcl(how, MT_DATA, 0);
2593			if (m == NULL)
2594				goto bad;
2595			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
2596			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
2597				n2->m_len);
2598			m->m_len = n->m_len + n2->m_len;
2599			m->m_next = n2->m_next;
2600			*prev = m;
2601			m_free(n);
2602			m_free(n2);
2603			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
2604				return m0;
2605			/*
2606			 * Still not there, try the normal collapse
2607			 * again before we allocate another cluster.
2608			 */
2609			goto again;
2610		}
2611		prev = &n->m_next;
2612	}
2613	/*
2614	 * No place where we can collapse to a cluster; punt.
2615	 * This can occur if, for example, you request 2 frags
2616	 * but the packet requires that both be clusters (we
2617	 * never reallocate the first mbuf to avoid moving the
2618	 * packet header).
2619	 */
2620bad:
2621	return (NULL);
2622}
2623
2624static int
2625msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2626{
2627	struct msk_txdesc *txd, *txd_last;
2628	struct msk_tx_desc *tx_le;
2629	struct mbuf *m;
2630	bus_dmamap_t map;
2631	bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2632	uint32_t control, prod, si;
2633	uint16_t offset, tcp_offset, tso_mtu;
2634	int error, i, nseg, tso;
2635
2636	MSK_IF_LOCK_ASSERT(sc_if);
2637
2638	tcp_offset = offset = 0;
2639	m = *m_head;
2640	if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2641		/*
2642		 * Since mbuf has no protocol specific structure information
2643		 * in it we have to inspect protocol information here to
2644		 * setup TSO and checksum offload. I don't know why Marvell
2645		 * made a such decision in chip design because other GigE
2646		 * hardwares normally takes care of all these chores in
2647		 * hardware. However, TSO performance of Yukon II is very
2648		 * good such that it's worth to implement it.
2649		 */
2650		struct ether_vlan_header *evh;
2651		struct ether_header *eh;
2652		struct ip *ip;
2653		struct tcphdr *tcp;
2654
2655		/* TODO check for M_WRITABLE(m) */
2656
2657		offset = sizeof(struct ether_header);
2658		m = m_pullup(m, offset);
2659		if (m == NULL) {
2660			*m_head = NULL;
2661			return (ENOBUFS);
2662		}
2663		eh = mtod(m, struct ether_header *);
2664		/* Check if hardware VLAN insertion is off. */
2665		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2666			offset = sizeof(struct ether_vlan_header);
2667			m = m_pullup(m, offset);
2668			if (m == NULL) {
2669				*m_head = NULL;
2670				return (ENOBUFS);
2671			}
2672			evh = mtod(m, struct ether_vlan_header *);
2673			ip = (struct ip *)(evh + 1);
2674		} else
2675			ip = (struct ip *)(eh + 1);
2676		m = m_pullup(m, offset + sizeof(struct ip));
2677		if (m == NULL) {
2678			*m_head = NULL;
2679			return (ENOBUFS);
2680		}
2681		offset += (ip->ip_hl << 2);
2682		tcp_offset = offset;
2683		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2684			m = m_pullup(m, offset + sizeof(struct tcphdr));
2685			if (m == NULL) {
2686				*m_head = NULL;
2687				return (ENOBUFS);
2688			}
2689			tcp = mtod(m, struct tcphdr *);
2690			offset += (tcp->th_off << 2);
2691		}
2692		*m_head = m;
2693	}
2694
2695	prod = sc_if->msk_cdata.msk_tx_prod;
2696	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2697	txd_last = txd;
2698	map = txd->tx_dmamap;
2699	error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2700	    *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2701	if (error == EFBIG) {
2702		m = msk_defrag(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2703		if (m == NULL) {
2704			m_freem(*m_head);
2705			*m_head = NULL;
2706			return (ENOBUFS);
2707		}
2708		*m_head = m;
2709		error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2710		    map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2711		if (error != 0) {
2712			m_freem(*m_head);
2713			*m_head = NULL;
2714			return (error);
2715		}
2716	} else if (error != 0)
2717		return (error);
2718	if (nseg == 0) {
2719		m_freem(*m_head);
2720		*m_head = NULL;
2721		return (EIO);
2722	}
2723
2724	/* Check number of available descriptors. */
2725	if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2726	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2727		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2728		return (ENOBUFS);
2729	}
2730
2731	control = 0;
2732	tso = 0;
2733	tx_le = NULL;
2734
2735	/* Check TSO support. */
2736	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2737		tso_mtu = offset + m->m_pkthdr.tso_segsz;
2738		if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2739			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2740			tx_le->msk_addr = htole32(tso_mtu);
2741			tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER);
2742			sc_if->msk_cdata.msk_tx_cnt++;
2743			MSK_INC(prod, MSK_TX_RING_CNT);
2744			sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2745		}
2746		tso++;
2747	}
2748	/* Check if we have a VLAN tag to insert. */
2749	if ((m->m_flags & M_VLANTAG) != 0) {
2750		if (tso == 0) {
2751			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2752			tx_le->msk_addr = htole32(0);
2753			tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2754			    htons(m->m_pkthdr.ether_vtag));
2755			sc_if->msk_cdata.msk_tx_cnt++;
2756			MSK_INC(prod, MSK_TX_RING_CNT);
2757		} else {
2758			tx_le->msk_control |= htole32(OP_VLAN |
2759			    htons(m->m_pkthdr.ether_vtag));
2760		}
2761		control |= INS_VLAN;
2762	}
2763	/* Check if we have to handle checksum offload. */
2764	if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2765		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2766		tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2767		    & 0xffff) | ((uint32_t)tcp_offset << 16));
2768		tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2769		control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2770		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2771			control |= UDPTCP;
2772		sc_if->msk_cdata.msk_tx_cnt++;
2773		MSK_INC(prod, MSK_TX_RING_CNT);
2774	}
2775
2776	si = prod;
2777	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2778	tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2779	if (tso == 0)
2780		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2781		    OP_PACKET);
2782	else
2783		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2784		    OP_LARGESEND);
2785	sc_if->msk_cdata.msk_tx_cnt++;
2786	MSK_INC(prod, MSK_TX_RING_CNT);
2787
2788	for (i = 1; i < nseg; i++) {
2789		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2790		tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2791		tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2792		    OP_BUFFER | HW_OWNER);
2793		sc_if->msk_cdata.msk_tx_cnt++;
2794		MSK_INC(prod, MSK_TX_RING_CNT);
2795	}
2796	/* Update producer index. */
2797	sc_if->msk_cdata.msk_tx_prod = prod;
2798
2799	/* Set EOP on the last desciptor. */
2800	prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2801	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2802	tx_le->msk_control |= htole32(EOP);
2803
2804	/* Turn the first descriptor ownership to hardware. */
2805	tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2806	tx_le->msk_control |= htole32(HW_OWNER);
2807
2808	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2809	map = txd_last->tx_dmamap;
2810	txd_last->tx_dmamap = txd->tx_dmamap;
2811	txd->tx_dmamap = map;
2812	txd->tx_m = m;
2813
2814	/* Sync descriptors. */
2815	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2816	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2817	    sc_if->msk_cdata.msk_tx_ring_map,
2818	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2819
2820	return (0);
2821}
2822
2823static void
2824msk_tx_task(void *arg, int pending)
2825{
2826	struct ifnet *ifp;
2827
2828	ifp = arg;
2829	msk_start(ifp);
2830}
2831
2832static void
2833msk_start(struct ifnet *ifp)
2834{
2835        struct msk_if_softc *sc_if;
2836        struct mbuf *m_head;
2837	int enq;
2838
2839	sc_if = ifp->if_softc;
2840
2841	MSK_IF_LOCK(sc_if);
2842
2843	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2844	    IFF_DRV_RUNNING || sc_if->msk_link == 0) {
2845		MSK_IF_UNLOCK(sc_if);
2846		return;
2847	}
2848
2849	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2850	    sc_if->msk_cdata.msk_tx_cnt <
2851	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2852		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2853		if (m_head == NULL)
2854			break;
2855		/*
2856		 * Pack the data into the transmit ring. If we
2857		 * don't have room, set the OACTIVE flag and wait
2858		 * for the NIC to drain the ring.
2859		 */
2860		if (msk_encap(sc_if, &m_head) != 0) {
2861			if (m_head == NULL)
2862				break;
2863			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2864			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2865			break;
2866		}
2867
2868		enq++;
2869		/*
2870		 * If there's a BPF listener, bounce a copy of this frame
2871		 * to him.
2872		 */
2873		BPF_MTAP(ifp, m_head);
2874	}
2875
2876	if (enq > 0) {
2877		/* Transmit */
2878		CSR_WRITE_2(sc_if->msk_softc,
2879		    Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2880		    sc_if->msk_cdata.msk_tx_prod);
2881
2882		/* Set a timeout in case the chip goes out to lunch. */
2883		sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2884	}
2885
2886	MSK_IF_UNLOCK(sc_if);
2887}
2888
2889static void
2890msk_watchdog(struct msk_if_softc *sc_if)
2891{
2892	struct ifnet *ifp;
2893	uint32_t ridx;
2894	int idx;
2895
2896	MSK_IF_LOCK_ASSERT(sc_if);
2897
2898	if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2899		return;
2900	ifp = sc_if->msk_ifp;
2901	if (sc_if->msk_link == 0) {
2902		if (bootverbose)
2903			if_printf(sc_if->msk_ifp, "watchdog timeout "
2904			   "(missed link)\n");
2905		ifp->if_oerrors++;
2906		msk_init_locked(sc_if);
2907		return;
2908	}
2909
2910	/*
2911	 * Reclaim first as there is a possibility of losing Tx completion
2912	 * interrupts.
2913	 */
2914	ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2915	idx = CSR_READ_2(sc_if->msk_softc, ridx);
2916	if (sc_if->msk_cdata.msk_tx_cons != idx) {
2917		msk_txeof(sc_if, idx);
2918		if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2919			if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2920			    "-- recovering\n");
2921			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2922				taskqueue_enqueue(taskqueue_fast,
2923				    &sc_if->msk_tx_task);
2924			return;
2925		}
2926	}
2927
2928	if_printf(ifp, "watchdog timeout\n");
2929	ifp->if_oerrors++;
2930	msk_init_locked(sc_if);
2931	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2932		taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2933}
2934
2935static void
2936mskc_shutdown(device_t dev)
2937{
2938	struct msk_softc *sc;
2939	int i;
2940
2941	sc = device_get_softc(dev);
2942	MSK_LOCK(sc);
2943	for (i = 0; i < sc->msk_num_port; i++) {
2944		if (sc->msk_if[i] != NULL)
2945			msk_stop(sc->msk_if[i]);
2946	}
2947
2948	/* Disable all interrupts. */
2949	CSR_WRITE_4(sc, B0_IMSK, 0);
2950	CSR_READ_4(sc, B0_IMSK);
2951	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2952	CSR_READ_4(sc, B0_HWE_IMSK);
2953
2954	/* Put hardware reset. */
2955	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2956
2957	MSK_UNLOCK(sc);
2958}
2959
2960static int
2961mskc_suspend(device_t dev)
2962{
2963	struct msk_softc *sc;
2964	int i;
2965
2966	sc = device_get_softc(dev);
2967
2968	MSK_LOCK(sc);
2969
2970	for (i = 0; i < sc->msk_num_port; i++) {
2971		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2972		    ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2973		    IFF_DRV_RUNNING) != 0))
2974			msk_stop(sc->msk_if[i]);
2975	}
2976
2977	/* Disable all interrupts. */
2978	CSR_WRITE_4(sc, B0_IMSK, 0);
2979	CSR_READ_4(sc, B0_IMSK);
2980	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2981	CSR_READ_4(sc, B0_HWE_IMSK);
2982
2983	msk_phy_power(sc, MSK_PHY_POWERDOWN);
2984
2985	/* Put hardware reset. */
2986	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2987	sc->msk_suspended = 1;
2988
2989	MSK_UNLOCK(sc);
2990
2991	return (0);
2992}
2993
2994static int
2995mskc_resume(device_t dev)
2996{
2997	struct msk_softc *sc;
2998	int i;
2999
3000	sc = device_get_softc(dev);
3001
3002	MSK_LOCK(sc);
3003
3004	mskc_reset(sc);
3005	for (i = 0; i < sc->msk_num_port; i++) {
3006		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3007		    ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
3008			msk_init_locked(sc->msk_if[i]);
3009	}
3010	sc->msk_suspended = 0;
3011
3012	MSK_UNLOCK(sc);
3013
3014	return (0);
3015}
3016
3017static void
3018msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3019{
3020	struct mbuf *m;
3021	struct ifnet *ifp;
3022	struct msk_rxdesc *rxd;
3023	int cons, rxlen;
3024
3025	ifp = sc_if->msk_ifp;
3026
3027	MSK_IF_LOCK_ASSERT(sc_if);
3028
3029	cons = sc_if->msk_cdata.msk_rx_cons;
3030	do {
3031		rxlen = status >> 16;
3032		if ((status & GMR_FS_VLAN) != 0 &&
3033		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3034			rxlen -= ETHER_VLAN_ENCAP_LEN;
3035		if (len > sc_if->msk_framesize ||
3036		    ((status & GMR_FS_ANY_ERR) != 0) ||
3037		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3038			/* Don't count flow-control packet as errors. */
3039			if ((status & GMR_FS_GOOD_FC) == 0)
3040				ifp->if_ierrors++;
3041			msk_discard_rxbuf(sc_if, cons);
3042			break;
3043		}
3044		rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3045		m = rxd->rx_m;
3046		if (msk_newbuf(sc_if, cons) != 0) {
3047			ifp->if_iqdrops++;
3048			/* Reuse old buffer. */
3049			msk_discard_rxbuf(sc_if, cons);
3050			break;
3051		}
3052		m->m_pkthdr.rcvif = ifp;
3053		m->m_pkthdr.len = m->m_len = len;
3054		ifp->if_ipackets++;
3055		/* Check for VLAN tagged packets. */
3056		if ((status & GMR_FS_VLAN) != 0 &&
3057		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3058			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3059			m->m_flags |= M_VLANTAG;
3060		}
3061		MSK_IF_UNLOCK(sc_if);
3062		(*ifp->if_input)(ifp, m);
3063		MSK_IF_LOCK(sc_if);
3064	} while (0);
3065
3066	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3067	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3068}
3069
3070static void
3071msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3072{
3073	struct mbuf *m;
3074	struct ifnet *ifp;
3075	struct msk_rxdesc *jrxd;
3076	int cons, rxlen;
3077
3078	ifp = sc_if->msk_ifp;
3079
3080	MSK_IF_LOCK_ASSERT(sc_if);
3081
3082	cons = sc_if->msk_cdata.msk_rx_cons;
3083	do {
3084		rxlen = status >> 16;
3085		if ((status & GMR_FS_VLAN) != 0 &&
3086		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3087			rxlen -= ETHER_VLAN_ENCAP_LEN;
3088		if (len > sc_if->msk_framesize ||
3089		    ((status & GMR_FS_ANY_ERR) != 0) ||
3090		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3091			/* Don't count flow-control packet as errors. */
3092			if ((status & GMR_FS_GOOD_FC) == 0)
3093				ifp->if_ierrors++;
3094			msk_discard_jumbo_rxbuf(sc_if, cons);
3095			break;
3096		}
3097		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3098		m = jrxd->rx_m;
3099		if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3100			ifp->if_iqdrops++;
3101			/* Reuse old buffer. */
3102			msk_discard_jumbo_rxbuf(sc_if, cons);
3103			break;
3104		}
3105		m->m_pkthdr.rcvif = ifp;
3106		m->m_pkthdr.len = m->m_len = len;
3107		ifp->if_ipackets++;
3108		/* Check for VLAN tagged packets. */
3109		if ((status & GMR_FS_VLAN) != 0 &&
3110		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3111			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3112			m->m_flags |= M_VLANTAG;
3113		}
3114		MSK_IF_UNLOCK(sc_if);
3115		(*ifp->if_input)(ifp, m);
3116		MSK_IF_LOCK(sc_if);
3117	} while (0);
3118
3119	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3120	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3121}
3122
3123static void
3124msk_txeof(struct msk_if_softc *sc_if, int idx)
3125{
3126	struct msk_txdesc *txd;
3127	struct msk_tx_desc *cur_tx;
3128	struct ifnet *ifp;
3129	uint32_t control;
3130	int cons, prog;
3131
3132	MSK_IF_LOCK_ASSERT(sc_if);
3133
3134	ifp = sc_if->msk_ifp;
3135
3136	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3137	    sc_if->msk_cdata.msk_tx_ring_map,
3138	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3139	/*
3140	 * Go through our tx ring and free mbufs for those
3141	 * frames that have been sent.
3142	 */
3143	cons = sc_if->msk_cdata.msk_tx_cons;
3144	prog = 0;
3145	for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3146		if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3147			break;
3148		prog++;
3149		cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3150		control = le32toh(cur_tx->msk_control);
3151		sc_if->msk_cdata.msk_tx_cnt--;
3152		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3153		if ((control & EOP) == 0)
3154			continue;
3155		txd = &sc_if->msk_cdata.msk_txdesc[cons];
3156		bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3157		    BUS_DMASYNC_POSTWRITE);
3158		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3159
3160		ifp->if_opackets++;
3161		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3162		    __func__));
3163		m_freem(txd->tx_m);
3164		txd->tx_m = NULL;
3165	}
3166
3167	if (prog > 0) {
3168		sc_if->msk_cdata.msk_tx_cons = cons;
3169		if (sc_if->msk_cdata.msk_tx_cnt == 0)
3170			sc_if->msk_watchdog_timer = 0;
3171		/* No need to sync LEs as we didn't update LEs. */
3172	}
3173}
3174
3175static void
3176msk_tick(void *xsc_if)
3177{
3178	struct msk_if_softc *sc_if;
3179	struct mii_data *mii;
3180
3181	sc_if = xsc_if;
3182
3183	MSK_IF_LOCK_ASSERT(sc_if);
3184
3185	mii = device_get_softc(sc_if->msk_miibus);
3186
3187	mii_tick(mii);
3188	msk_watchdog(sc_if);
3189	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3190}
3191
3192static void
3193msk_intr_phy(struct msk_if_softc *sc_if)
3194{
3195	uint16_t status;
3196
3197	if (sc_if->msk_softc->msk_marvell_phy) {
3198		msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3199		status = msk_phy_readreg(sc_if, PHY_ADDR_MARV,
3200		    PHY_MARV_INT_STAT);
3201		/* Handle FIFO Underrun/Overflow? */
3202		if ((status & PHY_M_IS_FIFO_ERROR))
3203			device_printf(sc_if->msk_if_dev,
3204			    "PHY FIFO underrun/overflow.\n");
3205	}
3206}
3207
3208static void
3209msk_intr_gmac(struct msk_if_softc *sc_if)
3210{
3211	struct msk_softc *sc;
3212	uint8_t status;
3213
3214	sc = sc_if->msk_softc;
3215	status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3216
3217	/* GMAC Rx FIFO overrun. */
3218	if ((status & GM_IS_RX_FF_OR) != 0) {
3219		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3220		    GMF_CLI_RX_FO);
3221		device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3222	}
3223	/* GMAC Tx FIFO underrun. */
3224	if ((status & GM_IS_TX_FF_UR) != 0) {
3225		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3226		    GMF_CLI_TX_FU);
3227		device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3228		/*
3229		 * XXX
3230		 * In case of Tx underrun, we may need to flush/reset
3231		 * Tx MAC but that would also require resynchronization
3232		 * with status LEs. Reintializing status LEs would
3233		 * affect other port in dual MAC configuration so it
3234		 * should be avoided as possible as we can.
3235		 * Due to lack of documentation it's all vague guess but
3236		 * it needs more investigation.
3237		 */
3238	}
3239}
3240
3241static void
3242msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3243{
3244	struct msk_softc *sc;
3245
3246	sc = sc_if->msk_softc;
3247	if ((status & Y2_IS_PAR_RD1) != 0) {
3248		device_printf(sc_if->msk_if_dev,
3249		    "RAM buffer read parity error\n");
3250		/* Clear IRQ. */
3251		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3252		    RI_CLR_RD_PERR);
3253	}
3254	if ((status & Y2_IS_PAR_WR1) != 0) {
3255		device_printf(sc_if->msk_if_dev,
3256		    "RAM buffer write parity error\n");
3257		/* Clear IRQ. */
3258		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3259		    RI_CLR_WR_PERR);
3260	}
3261	if ((status & Y2_IS_PAR_MAC1) != 0) {
3262		device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3263		/* Clear IRQ. */
3264		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3265		    GMF_CLI_TX_PE);
3266	}
3267	if ((status & Y2_IS_PAR_RX1) != 0) {
3268		device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3269		/* Clear IRQ. */
3270		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3271	}
3272	if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3273		device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3274		/* Clear IRQ. */
3275		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3276	}
3277}
3278
3279static void
3280msk_intr_hwerr(struct msk_softc *sc)
3281{
3282	uint32_t status;
3283	uint32_t tlphead[4];
3284
3285	status = CSR_READ_4(sc, B0_HWE_ISRC);
3286	/* Time Stamp timer overflow. */
3287	if ((status & Y2_IS_TIST_OV) != 0)
3288		CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3289	if ((status & Y2_IS_PCI_NEXP) != 0) {
3290		/*
3291		 * PCI Express Error occured which is not described in PEX
3292		 * spec.
3293		 * This error is also mapped either to Master Abort(
3294		 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3295		 * can only be cleared there.
3296                 */
3297		device_printf(sc->msk_dev,
3298		    "PCI Express protocol violation error\n");
3299	}
3300
3301	if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3302		uint16_t v16;
3303
3304		if ((status & Y2_IS_MST_ERR) != 0)
3305			device_printf(sc->msk_dev,
3306			    "unexpected IRQ Status error\n");
3307		else
3308			device_printf(sc->msk_dev,
3309			    "unexpected IRQ Master error\n");
3310		/* Reset all bits in the PCI status register. */
3311		v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3312		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3313		pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3314		    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3315		    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3316		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3317	}
3318
3319	/* Check for PCI Express Uncorrectable Error. */
3320	if ((status & Y2_IS_PCI_EXP) != 0) {
3321		uint32_t v32;
3322
3323		/*
3324		 * On PCI Express bus bridges are called root complexes (RC).
3325		 * PCI Express errors are recognized by the root complex too,
3326		 * which requests the system to handle the problem. After
3327		 * error occurence it may be that no access to the adapter
3328		 * may be performed any longer.
3329		 */
3330
3331		v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3332		if ((v32 & PEX_UNSUP_REQ) != 0) {
3333			/* Ignore unsupported request error. */
3334			device_printf(sc->msk_dev,
3335			    "Uncorrectable PCI Express error\n");
3336		}
3337		if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3338			int i;
3339
3340			/* Get TLP header form Log Registers. */
3341			for (i = 0; i < 4; i++)
3342				tlphead[i] = CSR_PCI_READ_4(sc,
3343				    PEX_HEADER_LOG + i * 4);
3344			/* Check for vendor defined broadcast message. */
3345			if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3346				sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3347				CSR_WRITE_4(sc, B0_HWE_IMSK,
3348				    sc->msk_intrhwemask);
3349				CSR_READ_4(sc, B0_HWE_IMSK);
3350			}
3351		}
3352		/* Clear the interrupt. */
3353		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3354		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3355		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3356	}
3357
3358	if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3359		msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3360	if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3361		msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3362}
3363
3364static __inline void
3365msk_rxput(struct msk_if_softc *sc_if)
3366{
3367	struct msk_softc *sc;
3368
3369	sc = sc_if->msk_softc;
3370	if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN))
3371		bus_dmamap_sync(
3372		    sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3373		    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3374		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3375	else
3376		bus_dmamap_sync(
3377		    sc_if->msk_cdata.msk_rx_ring_tag,
3378		    sc_if->msk_cdata.msk_rx_ring_map,
3379		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3380	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3381	    PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3382}
3383
3384static int
3385msk_handle_events(struct msk_softc *sc)
3386{
3387	struct msk_if_softc *sc_if;
3388	int rxput[2];
3389	struct msk_stat_desc *sd;
3390	uint32_t control, status;
3391	int cons, idx, len, port, rxprog;
3392
3393	idx = CSR_READ_2(sc, STAT_PUT_IDX);
3394	if (idx == sc->msk_stat_cons)
3395		return (0);
3396
3397	/* Sync status LEs. */
3398	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3399	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3400	/* XXX Sync Rx LEs here. */
3401
3402	rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3403
3404	rxprog = 0;
3405	for (cons = sc->msk_stat_cons; cons != idx;) {
3406		sd = &sc->msk_stat_ring[cons];
3407		control = le32toh(sd->msk_control);
3408		if ((control & HW_OWNER) == 0)
3409			break;
3410		/*
3411		 * Marvell's FreeBSD driver updates status LE after clearing
3412		 * HW_OWNER. However we don't have a way to sync single LE
3413		 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3414		 * an entire DMA map. So don't sync LE until we have a better
3415		 * way to sync LEs.
3416		 */
3417		control &= ~HW_OWNER;
3418		sd->msk_control = htole32(control);
3419		status = le32toh(sd->msk_status);
3420		len = control & STLE_LEN_MASK;
3421		port = (control >> 16) & 0x01;
3422		sc_if = sc->msk_if[port];
3423		if (sc_if == NULL) {
3424			device_printf(sc->msk_dev, "invalid port opcode "
3425			    "0x%08x\n", control & STLE_OP_MASK);
3426			continue;
3427		}
3428
3429		switch (control & STLE_OP_MASK) {
3430		case OP_RXVLAN:
3431			sc_if->msk_vtag = ntohs(len);
3432			break;
3433		case OP_RXCHKSVLAN:
3434			sc_if->msk_vtag = ntohs(len);
3435			break;
3436		case OP_RXSTAT:
3437			if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3438				msk_jumbo_rxeof(sc_if, status, len);
3439			else
3440				msk_rxeof(sc_if, status, len);
3441			rxprog++;
3442			/*
3443			 * Because there is no way to sync single Rx LE
3444			 * put the DMA sync operation off until the end of
3445			 * event processing.
3446			 */
3447			rxput[port]++;
3448			/* Update prefetch unit if we've passed water mark. */
3449			if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3450				msk_rxput(sc_if);
3451				rxput[port] = 0;
3452			}
3453			break;
3454		case OP_TXINDEXLE:
3455			if (sc->msk_if[MSK_PORT_A] != NULL)
3456				msk_txeof(sc->msk_if[MSK_PORT_A],
3457				    status & STLE_TXA1_MSKL);
3458			if (sc->msk_if[MSK_PORT_B] != NULL)
3459				msk_txeof(sc->msk_if[MSK_PORT_B],
3460				    ((status & STLE_TXA2_MSKL) >>
3461				    STLE_TXA2_SHIFTL) |
3462				    ((len & STLE_TXA2_MSKH) <<
3463				    STLE_TXA2_SHIFTH));
3464			break;
3465		default:
3466			device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3467			    control & STLE_OP_MASK);
3468			break;
3469		}
3470		MSK_INC(cons, MSK_STAT_RING_CNT);
3471		if (rxprog > sc->msk_process_limit)
3472			break;
3473	}
3474
3475	sc->msk_stat_cons = cons;
3476	/* XXX We should sync status LEs here. See above notes. */
3477
3478	if (rxput[MSK_PORT_A] > 0)
3479		msk_rxput(sc->msk_if[MSK_PORT_A]);
3480	if (rxput[MSK_PORT_B] > 0)
3481		msk_rxput(sc->msk_if[MSK_PORT_B]);
3482
3483	return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3484}
3485
3486static void
3487msk_intr(void *xsc)
3488{
3489	struct msk_softc *sc;
3490	uint32_t status;
3491
3492	sc = xsc;
3493	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3494	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3495	if (status == 0 || status == 0xffffffff) {
3496		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3497		return;
3498	}
3499
3500	taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3501}
3502
3503static void
3504msk_int_task(void *arg, int pending)
3505{
3506	struct msk_softc *sc;
3507	struct msk_if_softc *sc_if0, *sc_if1;
3508	struct ifnet *ifp0, *ifp1;
3509	uint32_t status;
3510	int domore;
3511
3512	sc = arg;
3513	MSK_LOCK(sc);
3514
3515	/* Get interrupt source. */
3516	status = CSR_READ_4(sc, B0_ISRC);
3517	if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3518	    (status & sc->msk_intrmask) == 0)
3519		goto done;
3520
3521	sc_if0 = sc->msk_if[MSK_PORT_A];
3522	sc_if1 = sc->msk_if[MSK_PORT_B];
3523	ifp0 = ifp1 = NULL;
3524	if (sc_if0 != NULL)
3525		ifp0 = sc_if0->msk_ifp;
3526	if (sc_if1 != NULL)
3527		ifp1 = sc_if1->msk_ifp;
3528
3529	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3530		msk_intr_phy(sc_if0);
3531	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3532		msk_intr_phy(sc_if1);
3533	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3534		msk_intr_gmac(sc_if0);
3535	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3536		msk_intr_gmac(sc_if1);
3537	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3538		device_printf(sc->msk_dev, "Rx descriptor error\n");
3539		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3540		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3541		CSR_READ_4(sc, B0_IMSK);
3542	}
3543        if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3544		device_printf(sc->msk_dev, "Tx descriptor error\n");
3545		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3546		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3547		CSR_READ_4(sc, B0_IMSK);
3548	}
3549	if ((status & Y2_IS_HW_ERR) != 0)
3550		msk_intr_hwerr(sc);
3551
3552	domore = msk_handle_events(sc);
3553	if ((status & Y2_IS_STAT_BMU) != 0)
3554		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3555
3556	if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3557	    !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3558		taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3559	if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3560	    !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3561		taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3562
3563	if (domore > 0) {
3564		taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3565		MSK_UNLOCK(sc);
3566		return;
3567	}
3568done:
3569	MSK_UNLOCK(sc);
3570
3571	/* Reenable interrupts. */
3572	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3573}
3574
3575static void
3576msk_init(void *xsc)
3577{
3578	struct msk_if_softc *sc_if = xsc;
3579
3580	MSK_IF_LOCK(sc_if);
3581	msk_init_locked(sc_if);
3582	MSK_IF_UNLOCK(sc_if);
3583}
3584
3585static void
3586msk_init_locked(struct msk_if_softc *sc_if)
3587{
3588	struct msk_softc *sc;
3589	struct ifnet *ifp;
3590	struct mii_data	 *mii;
3591	uint16_t eaddr[ETHER_ADDR_LEN / 2];
3592	uint16_t gmac;
3593	int error, i;
3594
3595	MSK_IF_LOCK_ASSERT(sc_if);
3596
3597	ifp = sc_if->msk_ifp;
3598	sc = sc_if->msk_softc;
3599	mii = device_get_softc(sc_if->msk_miibus);
3600
3601	error = 0;
3602	/* Cancel pending I/O and free all Rx/Tx buffers. */
3603	msk_stop(sc_if);
3604
3605	sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN +
3606	    ETHER_VLAN_ENCAP_LEN;
3607
3608	/*
3609	 * Initialize GMAC first.
3610	 * Without this initialization, Rx MAC did not work as expected
3611	 * and Rx MAC garbled status LEs and it resulted in out-of-order
3612	 * or duplicated frame delivery which in turn showed very poor
3613	 * Rx performance.(I had to write a packet analysis code that
3614	 * could be embeded in driver to diagnose this issue.)
3615	 * I've spent almost 2 months to fix this issue. If I have had
3616	 * datasheet for Yukon II I wouldn't have encountered this. :-(
3617	 */
3618	gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3619	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3620
3621	/* Dummy read the Interrupt Source Register. */
3622	CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3623
3624	/* Set MIB Clear Counter Mode. */
3625	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3626	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3627	/* Read all MIB Counters with Clear Mode set. */
3628	for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3629		GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3630	/* Clear MIB Clear Counter Mode. */
3631	gmac &= ~GM_PAR_MIB_CLR;
3632	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3633
3634	/* Disable FCS. */
3635	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3636
3637	/* Setup Transmit Control Register. */
3638	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3639
3640	/* Setup Transmit Flow Control Register. */
3641	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3642
3643	/* Setup Transmit Parameter Register. */
3644	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3645	    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3646	    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3647
3648	gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3649	    GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3650
3651	if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3652		gmac |= GM_SMOD_JUMBO_ENA;
3653	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3654
3655	/* Set station address. */
3656        bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3657        for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3658		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3659		    eaddr[i]);
3660        for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3661		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3662		    eaddr[i]);
3663
3664	/* Disable interrupts for counter overflows. */
3665	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3666	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3667	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3668
3669	/* Configure Rx MAC FIFO. */
3670	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3671	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3672	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3673	    GMF_OPER_ON | GMF_RX_F_FL_ON);
3674
3675	/* Set promiscuous mode. */
3676	msk_setpromisc(sc_if);
3677
3678	/* Set multicast filter. */
3679	msk_setmulti(sc_if);
3680
3681	/* Flush Rx MAC FIFO on any flow control or error. */
3682	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3683	    GMR_FS_ANY_ERR);
3684
3685	/* Set Rx FIFO flush threshold to 64 bytes. */
3686	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3687	    RX_GMF_FL_THR_DEF);
3688
3689	/* Configure Tx MAC FIFO. */
3690	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3691	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3692	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3693
3694	/* Configure hardware VLAN tag insertion/stripping. */
3695	msk_setvlan(sc_if, ifp);
3696
3697	/* XXX It seems STFW is requried for all cases. */
3698	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA);
3699
3700	if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3701		/* Set Rx Pause threshould. */
3702		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3703		    MSK_ECU_LLPP);
3704		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3705		    MSK_ECU_ULPP);
3706		if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) {
3707			/*
3708			 * Can't sure the following code is needed as Yukon
3709			 * Yukon EC Ultra may not support jumbo frames.
3710			 *
3711			 * Set Tx GMAC FIFO Almost Empty Threshold.
3712			 */
3713			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3714			    MSK_ECU_AE_THR);
3715			/* Disable Store & Forward mode for Tx. */
3716			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3717			    TX_STFW_DIS);
3718		}
3719	}
3720
3721	/*
3722	 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3723	 * arbiter as we don't use Sync Tx queue.
3724	 */
3725	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3726	    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3727	/* Enable the RAM Interface Arbiter. */
3728	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3729
3730	/* Setup RAM buffer. */
3731	msk_set_rambuffer(sc_if);
3732
3733	/* Disable Tx sync Queue. */
3734	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3735
3736	/* Setup Tx Queue Bus Memory Interface. */
3737	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3738	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3739	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3740	/* Increase IPID when hardware generates IP packets in TSO. */
3741	if ((ifp->if_hwassist & CSUM_TSO) != 0)
3742		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3743		    BMU_TX_IPIDINCR_ON);
3744	else
3745		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3746		    BMU_TX_IPIDINCR_OFF);
3747	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3748	if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3749	    sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3750		/* Fix for Yukon-EC Ultra: set BMU FIFO level */
3751		CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3752	}
3753
3754	/* Setup Rx Queue Bus Memory Interface. */
3755	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3756	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3757	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3758	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3759        if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3760	    sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3761		/* MAC Rx RAM Read is controlled by hardware. */
3762                CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3763	}
3764
3765	msk_set_prefetch(sc, sc_if->msk_txq,
3766	    sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3767	msk_init_tx_ring(sc_if);
3768
3769	/* Disable Rx checksum offload and RSS hash. */
3770	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3771	    BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3772	if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3773		msk_set_prefetch(sc, sc_if->msk_rxq,
3774		    sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3775		    MSK_JUMBO_RX_RING_CNT - 1);
3776		error = msk_init_jumbo_rx_ring(sc_if);
3777	 } else {
3778		msk_set_prefetch(sc, sc_if->msk_rxq,
3779		    sc_if->msk_rdata.msk_rx_ring_paddr,
3780		    MSK_RX_RING_CNT - 1);
3781		error = msk_init_rx_ring(sc_if);
3782	}
3783	if (error != 0) {
3784		device_printf(sc_if->msk_if_dev,
3785		    "initialization failed: no memory for Rx buffers\n");
3786		msk_stop(sc_if);
3787		return;
3788	}
3789
3790	/* Configure interrupt handling. */
3791	if (sc_if->msk_port == MSK_PORT_A) {
3792		sc->msk_intrmask |= Y2_IS_PORT_A;
3793		sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3794	} else {
3795		sc->msk_intrmask |= Y2_IS_PORT_B;
3796		sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3797	}
3798	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3799	CSR_READ_4(sc, B0_HWE_IMSK);
3800	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3801	CSR_READ_4(sc, B0_IMSK);
3802
3803	sc_if->msk_link = 0;
3804	mii_mediachg(mii);
3805
3806	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3807	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3808
3809	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3810}
3811
3812static void
3813msk_set_rambuffer(struct msk_if_softc *sc_if)
3814{
3815	struct msk_softc *sc;
3816	int ltpp, utpp;
3817
3818	sc = sc_if->msk_softc;
3819
3820	/* Setup Rx Queue. */
3821	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3822	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3823	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3824	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3825	    sc->msk_rxqend[sc_if->msk_port] / 8);
3826	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3827	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3828	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3829	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3830
3831	utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3832	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3833	ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3834	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3835	if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3836		ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3837	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3838	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3839	/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3840
3841	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3842	CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3843
3844	/* Setup Tx Queue. */
3845	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3846	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3847	    sc->msk_txqstart[sc_if->msk_port] / 8);
3848	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3849	    sc->msk_txqend[sc_if->msk_port] / 8);
3850	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3851	    sc->msk_txqstart[sc_if->msk_port] / 8);
3852	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3853	    sc->msk_txqstart[sc_if->msk_port] / 8);
3854	/* Enable Store & Forward for Tx side. */
3855	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3856	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3857	CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3858}
3859
3860static void
3861msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3862    uint32_t count)
3863{
3864
3865	/* Reset the prefetch unit. */
3866	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3867	    PREF_UNIT_RST_SET);
3868	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3869	    PREF_UNIT_RST_CLR);
3870	/* Set LE base address. */
3871	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3872	    MSK_ADDR_LO(addr));
3873	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3874	    MSK_ADDR_HI(addr));
3875	/* Set the list last index. */
3876	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3877	    count);
3878	/* Turn on prefetch unit. */
3879	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3880	    PREF_UNIT_OP_ON);
3881	/* Dummy read to ensure write. */
3882	CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3883}
3884
3885static void
3886msk_stop(struct msk_if_softc *sc_if)
3887{
3888	struct msk_softc *sc;
3889	struct msk_txdesc *txd;
3890	struct msk_rxdesc *rxd;
3891	struct msk_rxdesc *jrxd;
3892	struct ifnet *ifp;
3893	uint32_t val;
3894	int i;
3895
3896	MSK_IF_LOCK_ASSERT(sc_if);
3897	sc = sc_if->msk_softc;
3898	ifp = sc_if->msk_ifp;
3899
3900	callout_stop(&sc_if->msk_tick_ch);
3901	sc_if->msk_watchdog_timer = 0;
3902
3903	/* Disable interrupts. */
3904	if (sc_if->msk_port == MSK_PORT_A) {
3905		sc->msk_intrmask &= ~Y2_IS_PORT_A;
3906		sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3907	} else {
3908		sc->msk_intrmask &= ~Y2_IS_PORT_B;
3909		sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3910	}
3911	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3912	CSR_READ_4(sc, B0_HWE_IMSK);
3913	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3914	CSR_READ_4(sc, B0_IMSK);
3915
3916	/* Disable Tx/Rx MAC. */
3917	val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3918	val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3919	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3920	/* Read again to ensure writing. */
3921	GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3922
3923	/* Stop Tx BMU. */
3924	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3925	val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3926	for (i = 0; i < MSK_TIMEOUT; i++) {
3927		if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3928			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3929			    BMU_STOP);
3930			CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3931		} else
3932			break;
3933		DELAY(1);
3934	}
3935	if (i == MSK_TIMEOUT)
3936		device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3937	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3938	    RB_RST_SET | RB_DIS_OP_MD);
3939
3940	/* Disable all GMAC interrupt. */
3941	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3942	/* Disable PHY interrupt. */
3943	if (sc->msk_marvell_phy)
3944		msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3945
3946	/* Disable the RAM Interface Arbiter. */
3947	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3948
3949	/* Reset the PCI FIFO of the async Tx queue */
3950	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3951	    BMU_RST_SET | BMU_FIFO_RST);
3952
3953	/* Reset the Tx prefetch units. */
3954	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3955	    PREF_UNIT_RST_SET);
3956
3957	/* Reset the RAM Buffer async Tx queue. */
3958	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3959
3960	/* Reset Tx MAC FIFO. */
3961	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3962	/* Set Pause Off. */
3963	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3964
3965	/*
3966	 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3967	 * reach the end of packet and since we can't make sure that we have
3968	 * incoming data, we must reset the BMU while it is not during a DMA
3969	 * transfer. Since it is possible that the Rx path is still active,
3970	 * the Rx RAM buffer will be stopped first, so any possible incoming
3971	 * data will not trigger a DMA. After the RAM buffer is stopped, the
3972	 * BMU is polled until any DMA in progress is ended and only then it
3973	 * will be reset.
3974	 */
3975
3976	/* Disable the RAM Buffer receive queue. */
3977	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3978	for (i = 0; i < MSK_TIMEOUT; i++) {
3979		if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3980		    CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3981			break;
3982		DELAY(1);
3983	}
3984	if (i == MSK_TIMEOUT)
3985		device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3986	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3987	    BMU_RST_SET | BMU_FIFO_RST);
3988	/* Reset the Rx prefetch unit. */
3989	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3990	    PREF_UNIT_RST_SET);
3991	/* Reset the RAM Buffer receive queue. */
3992	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3993	/* Reset Rx MAC FIFO. */
3994	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3995
3996	/* Free Rx and Tx mbufs still in the queues. */
3997	for (i = 0; i < MSK_RX_RING_CNT; i++) {
3998		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3999		if (rxd->rx_m != NULL) {
4000			bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4001			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4002			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4003			    rxd->rx_dmamap);
4004			m_freem(rxd->rx_m);
4005			rxd->rx_m = NULL;
4006		}
4007	}
4008	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4009		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4010		if (jrxd->rx_m != NULL) {
4011			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4012			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4013			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4014			    jrxd->rx_dmamap);
4015			m_freem(jrxd->rx_m);
4016			jrxd->rx_m = NULL;
4017		}
4018	}
4019	for (i = 0; i < MSK_TX_RING_CNT; i++) {
4020		txd = &sc_if->msk_cdata.msk_txdesc[i];
4021		if (txd->tx_m != NULL) {
4022			bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4023			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4024			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4025			    txd->tx_dmamap);
4026			m_freem(txd->tx_m);
4027			txd->tx_m = NULL;
4028		}
4029	}
4030
4031	/*
4032	 * Mark the interface down.
4033	 */
4034	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4035	sc_if->msk_link = 0;
4036}
4037
4038static int
4039sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4040{
4041	int error, value;
4042
4043	if (!arg1)
4044		return (EINVAL);
4045	value = *(int *)arg1;
4046	error = sysctl_handle_int(oidp, &value, 0, req);
4047	if (error || !req->newptr)
4048		return (error);
4049	if (value < low || value > high)
4050		return (EINVAL);
4051	*(int *)arg1 = value;
4052
4053	return (0);
4054}
4055
4056static int
4057sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4058{
4059
4060	return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4061	    MSK_PROC_MAX));
4062}
4063