if_msk.c revision 185244
1/******************************************************************************
2 *
3 * Name   : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date   : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 *	LICENSE:
14 *	Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 *	The computer program files contained in this folder ("Files")
17 *	are provided to you under the BSD-type license terms provided
18 *	below, and any use of such Files and any derivative works
19 *	thereof created by you shall be governed by the following terms
20 *	and conditions:
21 *
22 *	- Redistributions of source code must retain the above copyright
23 *	  notice, this list of conditions and the following disclaimer.
24 *	- Redistributions in binary form must reproduce the above
25 *	  copyright notice, this list of conditions and the following
26 *	  disclaimer in the documentation and/or other materials provided
27 *	  with the distribution.
28 *	- Neither the name of Marvell nor the names of its contributors
29 *	  may be used to endorse or promote products derived from this
30 *	  software without specific prior written permission.
31 *
32 *	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 *	"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 *	LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 *	FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 *	COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 *	INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 *	BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
39 *	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 *	HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 *	STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 *	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 *	OF THE POSSIBILITY OF SUCH DAMAGE.
44 *	/LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 *    notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 *    notice, this list of conditions and the following disclaimer in the
59 *    documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 *    must display the following acknowledgement:
62 *	This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 *    may be used to endorse or promote products derived from this software
65 *    without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
102__FBSDID("$FreeBSD: head/sys/dev/msk/if_msk.c 185244 2008-11-24 02:21:50Z yongari $");
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116#include <sys/taskqueue.h>
117
118#include <net/bpf.h>
119#include <net/ethernet.h>
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/if_dl.h>
123#include <net/if_media.h>
124#include <net/if_types.h>
125#include <net/if_vlan_var.h>
126
127#include <netinet/in.h>
128#include <netinet/in_systm.h>
129#include <netinet/ip.h>
130#include <netinet/tcp.h>
131#include <netinet/udp.h>
132
133#include <machine/bus.h>
134#include <machine/in_cksum.h>
135#include <machine/resource.h>
136#include <sys/rman.h>
137
138#include <dev/mii/mii.h>
139#include <dev/mii/miivar.h>
140#include <dev/mii/brgphyreg.h>
141
142#include <dev/pci/pcireg.h>
143#include <dev/pci/pcivar.h>
144
145#include <dev/msk/if_mskreg.h>
146
147MODULE_DEPEND(msk, pci, 1, 1, 1);
148MODULE_DEPEND(msk, ether, 1, 1, 1);
149MODULE_DEPEND(msk, miibus, 1, 1, 1);
150
151/* "device miibus" required.  See GENERIC if you get errors here. */
152#include "miibus_if.h"
153
154/* Tunables. */
155static int msi_disable = 0;
156TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
157static int legacy_intr = 0;
158TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
159
160#define MSK_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
161
162/*
163 * Devices supported by this driver.
164 */
165static struct msk_product {
166	uint16_t	msk_vendorid;
167	uint16_t	msk_deviceid;
168	const char	*msk_name;
169} msk_products[] = {
170	{ VENDORID_SK, DEVICEID_SK_YUKON2,
171	    "SK-9Sxx Gigabit Ethernet" },
172	{ VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
173	    "SK-9Exx Gigabit Ethernet"},
174	{ VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
175	    "Marvell Yukon 88E8021CU Gigabit Ethernet" },
176	{ VENDORID_MARVELL, DEVICEID_MRVL_8021X,
177	    "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
178	{ VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
179	    "Marvell Yukon 88E8022CU Gigabit Ethernet" },
180	{ VENDORID_MARVELL, DEVICEID_MRVL_8022X,
181	    "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
182	{ VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
183	    "Marvell Yukon 88E8061CU Gigabit Ethernet" },
184	{ VENDORID_MARVELL, DEVICEID_MRVL_8061X,
185	    "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
186	{ VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
187	    "Marvell Yukon 88E8062CU Gigabit Ethernet" },
188	{ VENDORID_MARVELL, DEVICEID_MRVL_8062X,
189	    "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
190	{ VENDORID_MARVELL, DEVICEID_MRVL_8035,
191	    "Marvell Yukon 88E8035 Gigabit Ethernet" },
192	{ VENDORID_MARVELL, DEVICEID_MRVL_8036,
193	    "Marvell Yukon 88E8036 Gigabit Ethernet" },
194	{ VENDORID_MARVELL, DEVICEID_MRVL_8038,
195	    "Marvell Yukon 88E8038 Gigabit Ethernet" },
196	{ VENDORID_MARVELL, DEVICEID_MRVL_8039,
197	    "Marvell Yukon 88E8039 Gigabit Ethernet" },
198	{ VENDORID_MARVELL, DEVICEID_MRVL_4361,
199	    "Marvell Yukon 88E8050 Gigabit Ethernet" },
200	{ VENDORID_MARVELL, DEVICEID_MRVL_4360,
201	    "Marvell Yukon 88E8052 Gigabit Ethernet" },
202	{ VENDORID_MARVELL, DEVICEID_MRVL_4362,
203	    "Marvell Yukon 88E8053 Gigabit Ethernet" },
204	{ VENDORID_MARVELL, DEVICEID_MRVL_4363,
205	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
206	{ VENDORID_MARVELL, DEVICEID_MRVL_4364,
207	    "Marvell Yukon 88E8056 Gigabit Ethernet" },
208	{ VENDORID_MARVELL, DEVICEID_MRVL_436A,
209	    "Marvell Yukon 88E8058 Gigabit Ethernet" },
210	{ VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
211	    "D-Link 550SX Gigabit Ethernet" },
212	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
213	    "D-Link 560T Gigabit Ethernet" }
214};
215
216static const char *model_name[] = {
217	"Yukon XL",
218        "Yukon EC Ultra",
219        "Yukon Unknown",
220        "Yukon EC",
221        "Yukon FE"
222};
223
224static int mskc_probe(device_t);
225static int mskc_attach(device_t);
226static int mskc_detach(device_t);
227static int mskc_shutdown(device_t);
228static int mskc_setup_rambuffer(struct msk_softc *);
229static int mskc_suspend(device_t);
230static int mskc_resume(device_t);
231static void mskc_reset(struct msk_softc *);
232
233static int msk_probe(device_t);
234static int msk_attach(device_t);
235static int msk_detach(device_t);
236
237static void msk_tick(void *);
238static void msk_legacy_intr(void *);
239static int msk_intr(void *);
240static void msk_int_task(void *, int);
241static void msk_intr_phy(struct msk_if_softc *);
242static void msk_intr_gmac(struct msk_if_softc *);
243static __inline void msk_rxput(struct msk_if_softc *);
244static int msk_handle_events(struct msk_softc *);
245static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
246static void msk_intr_hwerr(struct msk_softc *);
247#ifndef __NO_STRICT_ALIGNMENT
248static __inline void msk_fixup_rx(struct mbuf *);
249#endif
250static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
251static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
252static void msk_txeof(struct msk_if_softc *, int);
253static int msk_encap(struct msk_if_softc *, struct mbuf **);
254static void msk_tx_task(void *, int);
255static void msk_start(struct ifnet *);
256static int msk_ioctl(struct ifnet *, u_long, caddr_t);
257static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
258static void msk_set_rambuffer(struct msk_if_softc *);
259static void msk_init(void *);
260static void msk_init_locked(struct msk_if_softc *);
261static void msk_stop(struct msk_if_softc *);
262static void msk_watchdog(struct msk_if_softc *);
263static int msk_mediachange(struct ifnet *);
264static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
265static void msk_phy_power(struct msk_softc *, int);
266static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
267static int msk_status_dma_alloc(struct msk_softc *);
268static void msk_status_dma_free(struct msk_softc *);
269static int msk_txrx_dma_alloc(struct msk_if_softc *);
270static void msk_txrx_dma_free(struct msk_if_softc *);
271static void *msk_jalloc(struct msk_if_softc *);
272static void msk_jfree(void *, void *);
273static int msk_init_rx_ring(struct msk_if_softc *);
274static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
275static void msk_init_tx_ring(struct msk_if_softc *);
276static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
277static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
278static int msk_newbuf(struct msk_if_softc *, int);
279static int msk_jumbo_newbuf(struct msk_if_softc *, int);
280
281static int msk_phy_readreg(struct msk_if_softc *, int, int);
282static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
283static int msk_miibus_readreg(device_t, int, int);
284static int msk_miibus_writereg(device_t, int, int, int);
285static void msk_miibus_statchg(device_t);
286static void msk_link_task(void *, int);
287
288static void msk_setmulti(struct msk_if_softc *);
289static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
290static void msk_setpromisc(struct msk_if_softc *);
291
292static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
293static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
294
295static device_method_t mskc_methods[] = {
296	/* Device interface */
297	DEVMETHOD(device_probe,		mskc_probe),
298	DEVMETHOD(device_attach,	mskc_attach),
299	DEVMETHOD(device_detach,	mskc_detach),
300	DEVMETHOD(device_suspend,	mskc_suspend),
301	DEVMETHOD(device_resume,	mskc_resume),
302	DEVMETHOD(device_shutdown,	mskc_shutdown),
303
304	/* bus interface */
305	DEVMETHOD(bus_print_child,	bus_generic_print_child),
306	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
307
308	{ NULL, NULL }
309};
310
311static driver_t mskc_driver = {
312	"mskc",
313	mskc_methods,
314	sizeof(struct msk_softc)
315};
316
317static devclass_t mskc_devclass;
318
319static device_method_t msk_methods[] = {
320	/* Device interface */
321	DEVMETHOD(device_probe,		msk_probe),
322	DEVMETHOD(device_attach,	msk_attach),
323	DEVMETHOD(device_detach,	msk_detach),
324	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
325
326	/* bus interface */
327	DEVMETHOD(bus_print_child,	bus_generic_print_child),
328	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
329
330	/* MII interface */
331	DEVMETHOD(miibus_readreg,	msk_miibus_readreg),
332	DEVMETHOD(miibus_writereg,	msk_miibus_writereg),
333	DEVMETHOD(miibus_statchg,	msk_miibus_statchg),
334
335	{ NULL, NULL }
336};
337
338static driver_t msk_driver = {
339	"msk",
340	msk_methods,
341	sizeof(struct msk_if_softc)
342};
343
344static devclass_t msk_devclass;
345
346DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
347DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
348DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
349
350static struct resource_spec msk_res_spec_io[] = {
351	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
352	{ -1,			0,		0 }
353};
354
355static struct resource_spec msk_res_spec_mem[] = {
356	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
357	{ -1,			0,		0 }
358};
359
360static struct resource_spec msk_irq_spec_legacy[] = {
361	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
362	{ -1,			0,		0 }
363};
364
365static struct resource_spec msk_irq_spec_msi[] = {
366	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
367	{ -1,			0,		0 }
368};
369
370static struct resource_spec msk_irq_spec_msi2[] = {
371	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
372	{ SYS_RES_IRQ,		2,		RF_ACTIVE },
373	{ -1,			0,		0 }
374};
375
376static int
377msk_miibus_readreg(device_t dev, int phy, int reg)
378{
379	struct msk_if_softc *sc_if;
380
381	if (phy != PHY_ADDR_MARV)
382		return (0);
383
384	sc_if = device_get_softc(dev);
385
386	return (msk_phy_readreg(sc_if, phy, reg));
387}
388
389static int
390msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
391{
392	struct msk_softc *sc;
393	int i, val;
394
395	sc = sc_if->msk_softc;
396
397        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
398	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
399
400	for (i = 0; i < MSK_TIMEOUT; i++) {
401		DELAY(1);
402		val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
403		if ((val & GM_SMI_CT_RD_VAL) != 0) {
404			val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
405			break;
406		}
407	}
408
409	if (i == MSK_TIMEOUT) {
410		if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
411		val = 0;
412	}
413
414	return (val);
415}
416
417static int
418msk_miibus_writereg(device_t dev, int phy, int reg, int val)
419{
420	struct msk_if_softc *sc_if;
421
422	if (phy != PHY_ADDR_MARV)
423		return (0);
424
425	sc_if = device_get_softc(dev);
426
427	return (msk_phy_writereg(sc_if, phy, reg, val));
428}
429
430static int
431msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
432{
433	struct msk_softc *sc;
434	int i;
435
436	sc = sc_if->msk_softc;
437
438	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
439        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
440	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
441	for (i = 0; i < MSK_TIMEOUT; i++) {
442		DELAY(1);
443		if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
444		    GM_SMI_CT_BUSY) == 0)
445			break;
446	}
447	if (i == MSK_TIMEOUT)
448		if_printf(sc_if->msk_ifp, "phy write timeout\n");
449
450	return (0);
451}
452
453static void
454msk_miibus_statchg(device_t dev)
455{
456	struct msk_if_softc *sc_if;
457
458	sc_if = device_get_softc(dev);
459	taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task);
460}
461
462static void
463msk_link_task(void *arg, int pending)
464{
465	struct msk_softc *sc;
466	struct msk_if_softc *sc_if;
467	struct mii_data *mii;
468	struct ifnet *ifp;
469	uint32_t gmac;
470
471	sc_if = (struct msk_if_softc *)arg;
472	sc = sc_if->msk_softc;
473
474	MSK_IF_LOCK(sc_if);
475
476	mii = device_get_softc(sc_if->msk_miibus);
477	ifp = sc_if->msk_ifp;
478	if (mii == NULL || ifp == NULL) {
479		MSK_IF_UNLOCK(sc_if);
480		return;
481	}
482
483	if (mii->mii_media_status & IFM_ACTIVE) {
484		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
485			sc_if->msk_link = 1;
486	} else
487		sc_if->msk_link = 0;
488
489	if (sc_if->msk_link != 0) {
490		/* Enable Tx FIFO Underrun. */
491		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
492		    GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
493		/*
494		 * Because mii(4) notify msk(4) that it detected link status
495		 * change, there is no need to enable automatic
496		 * speed/flow-control/duplex updates.
497		 */
498		gmac = GM_GPCR_AU_ALL_DIS;
499		switch (IFM_SUBTYPE(mii->mii_media_active)) {
500		case IFM_1000_SX:
501		case IFM_1000_T:
502			gmac |= GM_GPCR_SPEED_1000;
503			break;
504		case IFM_100_TX:
505			gmac |= GM_GPCR_SPEED_100;
506			break;
507		case IFM_10_T:
508			break;
509		}
510
511		if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
512			gmac |= GM_GPCR_DUP_FULL;
513		/* Disable Rx flow control. */
514		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
515			gmac |= GM_GPCR_FC_RX_DIS;
516		/* Disable Tx flow control. */
517		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
518			gmac |= GM_GPCR_FC_TX_DIS;
519		gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
520		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
521		/* Read again to ensure writing. */
522		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
523
524		gmac = GMC_PAUSE_ON;
525		if (((mii->mii_media_active & IFM_GMASK) &
526		    (IFM_FLAG0 | IFM_FLAG1)) == 0)
527			gmac = GMC_PAUSE_OFF;
528		/* Diable pause for 10/100 Mbps in half-duplex mode. */
529		if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
530		    (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
531		    IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
532			gmac = GMC_PAUSE_OFF;
533		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
534
535		/* Enable PHY interrupt for FIFO underrun/overflow. */
536		msk_phy_writereg(sc_if, PHY_ADDR_MARV,
537		    PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
538	} else {
539		/*
540		 * Link state changed to down.
541		 * Disable PHY interrupts.
542		 */
543		msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
544		/* Disable Rx/Tx MAC. */
545		gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
546		gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
547		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
548		/* Read again to ensure writing. */
549		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
550	}
551
552	MSK_IF_UNLOCK(sc_if);
553}
554
555static void
556msk_setmulti(struct msk_if_softc *sc_if)
557{
558	struct msk_softc *sc;
559	struct ifnet *ifp;
560	struct ifmultiaddr *ifma;
561	uint32_t mchash[2];
562	uint32_t crc;
563	uint16_t mode;
564
565	sc = sc_if->msk_softc;
566
567	MSK_IF_LOCK_ASSERT(sc_if);
568
569	ifp = sc_if->msk_ifp;
570
571	bzero(mchash, sizeof(mchash));
572	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
573	mode |= GM_RXCR_UCF_ENA;
574	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
575		if ((ifp->if_flags & IFF_PROMISC) != 0)
576			mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
577		else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
578			mchash[0] = 0xffff;
579			mchash[1] = 0xffff;
580		}
581	} else {
582		IF_ADDR_LOCK(ifp);
583		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
584			if (ifma->ifma_addr->sa_family != AF_LINK)
585				continue;
586			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
587			    ifma->ifma_addr), ETHER_ADDR_LEN);
588			/* Just want the 6 least significant bits. */
589			crc &= 0x3f;
590			/* Set the corresponding bit in the hash table. */
591			mchash[crc >> 5] |= 1 << (crc & 0x1f);
592		}
593		IF_ADDR_UNLOCK(ifp);
594		mode |= GM_RXCR_MCF_ENA;
595	}
596
597	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
598	    mchash[0] & 0xffff);
599	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
600	    (mchash[0] >> 16) & 0xffff);
601	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
602	    mchash[1] & 0xffff);
603	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
604	    (mchash[1] >> 16) & 0xffff);
605	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
606}
607
608static void
609msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
610{
611	struct msk_softc *sc;
612
613	sc = sc_if->msk_softc;
614	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
615		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
616		    RX_VLAN_STRIP_ON);
617		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
618		    TX_VLAN_TAG_ON);
619	} else {
620		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
621		    RX_VLAN_STRIP_OFF);
622		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
623		    TX_VLAN_TAG_OFF);
624	}
625}
626
627static void
628msk_setpromisc(struct msk_if_softc *sc_if)
629{
630	struct msk_softc *sc;
631	struct ifnet *ifp;
632	uint16_t mode;
633
634	MSK_IF_LOCK_ASSERT(sc_if);
635
636	sc = sc_if->msk_softc;
637	ifp = sc_if->msk_ifp;
638
639	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
640	if (ifp->if_flags & IFF_PROMISC)
641		mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
642	else
643		mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
644	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
645}
646
647static int
648msk_init_rx_ring(struct msk_if_softc *sc_if)
649{
650	struct msk_ring_data *rd;
651	struct msk_rxdesc *rxd;
652	int i, prod;
653
654	MSK_IF_LOCK_ASSERT(sc_if);
655
656	sc_if->msk_cdata.msk_rx_cons = 0;
657	sc_if->msk_cdata.msk_rx_prod = 0;
658	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
659
660	rd = &sc_if->msk_rdata;
661	bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
662	prod = sc_if->msk_cdata.msk_rx_prod;
663	for (i = 0; i < MSK_RX_RING_CNT; i++) {
664		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
665		rxd->rx_m = NULL;
666		rxd->rx_le = &rd->msk_rx_ring[prod];
667		if (msk_newbuf(sc_if, prod) != 0)
668			return (ENOBUFS);
669		MSK_INC(prod, MSK_RX_RING_CNT);
670	}
671
672	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
673	    sc_if->msk_cdata.msk_rx_ring_map,
674	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
675
676	/* Update prefetch unit. */
677	sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
678	CSR_WRITE_2(sc_if->msk_softc,
679	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
680	    sc_if->msk_cdata.msk_rx_prod);
681
682	return (0);
683}
684
685static int
686msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
687{
688	struct msk_ring_data *rd;
689	struct msk_rxdesc *rxd;
690	int i, prod;
691
692	MSK_IF_LOCK_ASSERT(sc_if);
693
694	sc_if->msk_cdata.msk_rx_cons = 0;
695	sc_if->msk_cdata.msk_rx_prod = 0;
696	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
697
698	rd = &sc_if->msk_rdata;
699	bzero(rd->msk_jumbo_rx_ring,
700	    sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
701	prod = sc_if->msk_cdata.msk_rx_prod;
702	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
703		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
704		rxd->rx_m = NULL;
705		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
706		if (msk_jumbo_newbuf(sc_if, prod) != 0)
707			return (ENOBUFS);
708		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
709	}
710
711	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
712	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
713	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
714
715	sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
716	CSR_WRITE_2(sc_if->msk_softc,
717	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
718	    sc_if->msk_cdata.msk_rx_prod);
719
720	return (0);
721}
722
723static void
724msk_init_tx_ring(struct msk_if_softc *sc_if)
725{
726	struct msk_ring_data *rd;
727	struct msk_txdesc *txd;
728	int i;
729
730	sc_if->msk_cdata.msk_tso_mtu = 0;
731	sc_if->msk_cdata.msk_tx_prod = 0;
732	sc_if->msk_cdata.msk_tx_cons = 0;
733	sc_if->msk_cdata.msk_tx_cnt = 0;
734
735	rd = &sc_if->msk_rdata;
736	bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
737	for (i = 0; i < MSK_TX_RING_CNT; i++) {
738		txd = &sc_if->msk_cdata.msk_txdesc[i];
739		txd->tx_m = NULL;
740		txd->tx_le = &rd->msk_tx_ring[i];
741	}
742
743	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
744	    sc_if->msk_cdata.msk_tx_ring_map,
745	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
746}
747
748static __inline void
749msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
750{
751	struct msk_rx_desc *rx_le;
752	struct msk_rxdesc *rxd;
753	struct mbuf *m;
754
755	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
756	m = rxd->rx_m;
757	rx_le = rxd->rx_le;
758	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
759}
760
761static __inline void
762msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int	idx)
763{
764	struct msk_rx_desc *rx_le;
765	struct msk_rxdesc *rxd;
766	struct mbuf *m;
767
768	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
769	m = rxd->rx_m;
770	rx_le = rxd->rx_le;
771	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
772}
773
774static int
775msk_newbuf(struct msk_if_softc *sc_if, int idx)
776{
777	struct msk_rx_desc *rx_le;
778	struct msk_rxdesc *rxd;
779	struct mbuf *m;
780	bus_dma_segment_t segs[1];
781	bus_dmamap_t map;
782	int nsegs;
783
784	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
785	if (m == NULL)
786		return (ENOBUFS);
787
788	m->m_len = m->m_pkthdr.len = MCLBYTES;
789	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
790		m_adj(m, ETHER_ALIGN);
791#ifndef __NO_STRICT_ALIGNMENT
792	else
793		m_adj(m, MSK_RX_BUF_ALIGN);
794#endif
795
796	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
797	    sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
798	    BUS_DMA_NOWAIT) != 0) {
799		m_freem(m);
800		return (ENOBUFS);
801	}
802	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
803
804	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
805	if (rxd->rx_m != NULL) {
806		bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
807		    BUS_DMASYNC_POSTREAD);
808		bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
809	}
810	map = rxd->rx_dmamap;
811	rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
812	sc_if->msk_cdata.msk_rx_sparemap = map;
813	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
814	    BUS_DMASYNC_PREREAD);
815	rxd->rx_m = m;
816	rx_le = rxd->rx_le;
817	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
818	rx_le->msk_control =
819	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
820
821	return (0);
822}
823
824static int
825msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
826{
827	struct msk_rx_desc *rx_le;
828	struct msk_rxdesc *rxd;
829	struct mbuf *m;
830	bus_dma_segment_t segs[1];
831	bus_dmamap_t map;
832	int nsegs;
833	void *buf;
834
835	MGETHDR(m, M_DONTWAIT, MT_DATA);
836	if (m == NULL)
837		return (ENOBUFS);
838	buf = msk_jalloc(sc_if);
839	if (buf == NULL) {
840		m_freem(m);
841		return (ENOBUFS);
842	}
843	/* Attach the buffer to the mbuf. */
844	MEXTADD(m, buf, MSK_JLEN, msk_jfree, buf,
845	    (struct msk_if_softc *)sc_if, 0, EXT_NET_DRV);
846	if ((m->m_flags & M_EXT) == 0) {
847		m_freem(m);
848		return (ENOBUFS);
849	}
850	m->m_pkthdr.len = m->m_len = MSK_JLEN;
851	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
852		m_adj(m, ETHER_ALIGN);
853#ifndef __NO_STRICT_ALIGNMENT
854	else
855		m_adj(m, MSK_RX_BUF_ALIGN);
856#endif
857
858	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
859	    sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
860	    BUS_DMA_NOWAIT) != 0) {
861		m_freem(m);
862		return (ENOBUFS);
863	}
864	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
865
866	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
867	if (rxd->rx_m != NULL) {
868		bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
869		    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
870		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
871		    rxd->rx_dmamap);
872	}
873	map = rxd->rx_dmamap;
874	rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
875	sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
876	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
877	    BUS_DMASYNC_PREREAD);
878	rxd->rx_m = m;
879	rx_le = rxd->rx_le;
880	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
881	rx_le->msk_control =
882	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
883
884	return (0);
885}
886
887/*
888 * Set media options.
889 */
890static int
891msk_mediachange(struct ifnet *ifp)
892{
893	struct msk_if_softc *sc_if;
894	struct mii_data	*mii;
895
896	sc_if = ifp->if_softc;
897
898	MSK_IF_LOCK(sc_if);
899	mii = device_get_softc(sc_if->msk_miibus);
900	mii_mediachg(mii);
901	MSK_IF_UNLOCK(sc_if);
902
903	return (0);
904}
905
906/*
907 * Report current media status.
908 */
909static void
910msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
911{
912	struct msk_if_softc *sc_if;
913	struct mii_data	*mii;
914
915	sc_if = ifp->if_softc;
916	MSK_IF_LOCK(sc_if);
917	mii = device_get_softc(sc_if->msk_miibus);
918
919	mii_pollstat(mii);
920	MSK_IF_UNLOCK(sc_if);
921	ifmr->ifm_active = mii->mii_media_active;
922	ifmr->ifm_status = mii->mii_media_status;
923}
924
925static int
926msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
927{
928	struct msk_if_softc *sc_if;
929	struct ifreq *ifr;
930	struct mii_data	*mii;
931	int error, mask;
932
933	sc_if = ifp->if_softc;
934	ifr = (struct ifreq *)data;
935	error = 0;
936
937	switch(command) {
938	case SIOCSIFMTU:
939		if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
940			error = EINVAL;
941			break;
942		}
943		if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE &&
944		    ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
945			error = EINVAL;
946			break;
947		}
948		MSK_IF_LOCK(sc_if);
949		ifp->if_mtu = ifr->ifr_mtu;
950		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
951			msk_init_locked(sc_if);
952		MSK_IF_UNLOCK(sc_if);
953		break;
954	case SIOCSIFFLAGS:
955		MSK_IF_LOCK(sc_if);
956		if ((ifp->if_flags & IFF_UP) != 0) {
957			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
958				if (((ifp->if_flags ^ sc_if->msk_if_flags)
959				    & IFF_PROMISC) != 0) {
960					msk_setpromisc(sc_if);
961					msk_setmulti(sc_if);
962				}
963			} else {
964				if (sc_if->msk_detach == 0)
965					msk_init_locked(sc_if);
966			}
967		} else {
968			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
969				msk_stop(sc_if);
970		}
971		sc_if->msk_if_flags = ifp->if_flags;
972		MSK_IF_UNLOCK(sc_if);
973		break;
974	case SIOCADDMULTI:
975	case SIOCDELMULTI:
976		MSK_IF_LOCK(sc_if);
977		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
978			msk_setmulti(sc_if);
979		MSK_IF_UNLOCK(sc_if);
980		break;
981	case SIOCGIFMEDIA:
982	case SIOCSIFMEDIA:
983		mii = device_get_softc(sc_if->msk_miibus);
984		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
985		break;
986	case SIOCSIFCAP:
987		MSK_IF_LOCK(sc_if);
988		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
989		if ((mask & IFCAP_TXCSUM) != 0) {
990			ifp->if_capenable ^= IFCAP_TXCSUM;
991			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
992			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
993				ifp->if_hwassist |= MSK_CSUM_FEATURES;
994			else
995				ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
996		}
997		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
998			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
999			msk_setvlan(sc_if, ifp);
1000		}
1001
1002		if ((mask & IFCAP_TSO4) != 0) {
1003			ifp->if_capenable ^= IFCAP_TSO4;
1004			if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1005			    (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1006				ifp->if_hwassist |= CSUM_TSO;
1007			else
1008				ifp->if_hwassist &= ~CSUM_TSO;
1009		}
1010		if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
1011		    sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1012			/*
1013			 * In Yukon EC Ultra, TSO & checksum offload is not
1014			 * supported for jumbo frame.
1015			 */
1016			ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1017			ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1018		}
1019
1020		VLAN_CAPABILITIES(ifp);
1021		MSK_IF_UNLOCK(sc_if);
1022		break;
1023	default:
1024		error = ether_ioctl(ifp, command, data);
1025		break;
1026	}
1027
1028	return (error);
1029}
1030
1031static int
1032mskc_probe(device_t dev)
1033{
1034	struct msk_product *mp;
1035	uint16_t vendor, devid;
1036	int i;
1037
1038	vendor = pci_get_vendor(dev);
1039	devid = pci_get_device(dev);
1040	mp = msk_products;
1041	for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1042	    i++, mp++) {
1043		if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1044			device_set_desc(dev, mp->msk_name);
1045			return (BUS_PROBE_DEFAULT);
1046		}
1047	}
1048
1049	return (ENXIO);
1050}
1051
1052static int
1053mskc_setup_rambuffer(struct msk_softc *sc)
1054{
1055	int next;
1056	int i;
1057
1058	/* Get adapter SRAM size. */
1059	sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1060	if (bootverbose)
1061		device_printf(sc->msk_dev,
1062		    "RAM buffer size : %dKB\n", sc->msk_ramsize);
1063	if (sc->msk_ramsize == 0)
1064		return (0);
1065
1066	sc->msk_pflags |= MSK_FLAG_RAMBUF;
1067	/*
1068	 * Give receiver 2/3 of memory and round down to the multiple
1069	 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
1070	 * of 1024.
1071	 */
1072	sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1073	sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1074	for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1075		sc->msk_rxqstart[i] = next;
1076		sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1077		next = sc->msk_rxqend[i] + 1;
1078		sc->msk_txqstart[i] = next;
1079		sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1080		next = sc->msk_txqend[i] + 1;
1081		if (bootverbose) {
1082			device_printf(sc->msk_dev,
1083			    "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1084			    sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1085			    sc->msk_rxqend[i]);
1086			device_printf(sc->msk_dev,
1087			    "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1088			    sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1089			    sc->msk_txqend[i]);
1090		}
1091	}
1092
1093	return (0);
1094}
1095
1096static void
1097msk_phy_power(struct msk_softc *sc, int mode)
1098{
1099	uint32_t val;
1100	int i;
1101
1102	switch (mode) {
1103	case MSK_PHY_POWERUP:
1104		/* Switch power to VCC (WA for VAUX problem). */
1105		CSR_WRITE_1(sc, B0_POWER_CTRL,
1106		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1107		/* Disable Core Clock Division, set Clock Select to 0. */
1108		CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1109
1110		val = 0;
1111		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1112		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1113			/* Enable bits are inverted. */
1114			val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1115			      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1116			      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1117		}
1118		/*
1119		 * Enable PCI & Core Clock, enable clock gating for both Links.
1120		 */
1121		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1122
1123		val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1124		val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1125		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1126		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1127			/* Deassert Low Power for 1st PHY. */
1128			val |= PCI_Y2_PHY1_COMA;
1129			if (sc->msk_num_port > 1)
1130				val |= PCI_Y2_PHY2_COMA;
1131		} else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1132			uint32_t our;
1133
1134			CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1135
1136			/* Enable all clocks. */
1137			pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1138			our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1139			our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1140			    PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1141			/* Set all bits to 0 except bits 15..12. */
1142			pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1143			/* Set to default value. */
1144			pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1145		}
1146		/* Release PHY from PowerDown/COMA mode. */
1147		pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1148		for (i = 0; i < sc->msk_num_port; i++) {
1149			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1150			    GMLC_RST_SET);
1151			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1152			    GMLC_RST_CLR);
1153		}
1154		break;
1155	case MSK_PHY_POWERDOWN:
1156		val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1157		val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1158		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1159		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1160			val &= ~PCI_Y2_PHY1_COMA;
1161			if (sc->msk_num_port > 1)
1162				val &= ~PCI_Y2_PHY2_COMA;
1163		}
1164		pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1165
1166		val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1167		      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1168		      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1169		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1170		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1171			/* Enable bits are inverted. */
1172			val = 0;
1173		}
1174		/*
1175		 * Disable PCI & Core Clock, disable clock gating for
1176		 * both Links.
1177		 */
1178		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1179		CSR_WRITE_1(sc, B0_POWER_CTRL,
1180		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1181		break;
1182	default:
1183		break;
1184	}
1185}
1186
1187static void
1188mskc_reset(struct msk_softc *sc)
1189{
1190	bus_addr_t addr;
1191	uint16_t status;
1192	uint32_t val;
1193	int i;
1194
1195	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1196
1197	/* Disable ASF. */
1198	if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1199		CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1200		CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1201	}
1202	/*
1203	 * Since we disabled ASF, S/W reset is required for Power Management.
1204	 */
1205	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1206	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1207
1208	/* Clear all error bits in the PCI status register. */
1209	status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1210	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1211
1212	pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1213	    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1214	    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1215	CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1216
1217	switch (sc->msk_bustype) {
1218	case MSK_PEX_BUS:
1219		/* Clear all PEX errors. */
1220		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1221		val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1222		if ((val & PEX_RX_OV) != 0) {
1223			sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1224			sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1225		}
1226		break;
1227	case MSK_PCI_BUS:
1228	case MSK_PCIX_BUS:
1229		/* Set Cache Line Size to 2(8bytes) if configured to 0. */
1230		val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1231		if (val == 0)
1232			pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1233		if (sc->msk_bustype == MSK_PCIX_BUS) {
1234			/* Set Cache Line Size opt. */
1235			val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1236			val |= PCI_CLS_OPT;
1237			pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1238		}
1239		break;
1240	}
1241	/* Set PHY power state. */
1242	msk_phy_power(sc, MSK_PHY_POWERUP);
1243
1244	/* Reset GPHY/GMAC Control */
1245	for (i = 0; i < sc->msk_num_port; i++) {
1246		/* GPHY Control reset. */
1247		CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1248		CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1249		/* GMAC Control reset. */
1250		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1251		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1252		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1253	}
1254	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1255
1256	/* LED On. */
1257	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1258
1259	/* Clear TWSI IRQ. */
1260	CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1261
1262	/* Turn off hardware timer. */
1263	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1264	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1265
1266	/* Turn off descriptor polling. */
1267	CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1268
1269	/* Turn off time stamps. */
1270	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1271	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1272
1273	/* Configure timeout values. */
1274	for (i = 0; i < sc->msk_num_port; i++) {
1275		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1276		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1277		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1278		    MSK_RI_TO_53);
1279		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1280		    MSK_RI_TO_53);
1281		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1282		    MSK_RI_TO_53);
1283		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1284		    MSK_RI_TO_53);
1285		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1286		    MSK_RI_TO_53);
1287		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1288		    MSK_RI_TO_53);
1289		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1290		    MSK_RI_TO_53);
1291		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1292		    MSK_RI_TO_53);
1293		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1294		    MSK_RI_TO_53);
1295		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1296		    MSK_RI_TO_53);
1297		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1298		    MSK_RI_TO_53);
1299		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1300		    MSK_RI_TO_53);
1301	}
1302
1303	/* Disable all interrupts. */
1304	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1305	CSR_READ_4(sc, B0_HWE_IMSK);
1306	CSR_WRITE_4(sc, B0_IMSK, 0);
1307	CSR_READ_4(sc, B0_IMSK);
1308
1309        /*
1310         * On dual port PCI-X card, there is an problem where status
1311         * can be received out of order due to split transactions.
1312         */
1313	if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1314		int pcix;
1315		uint16_t pcix_cmd;
1316
1317		if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1318			pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1319			/* Clear Max Outstanding Split Transactions. */
1320			pcix_cmd &= ~0x70;
1321			CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1322			pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1323			CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1324		}
1325        }
1326	if (sc->msk_bustype == MSK_PEX_BUS) {
1327		uint16_t v, width;
1328
1329		v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1330		/* Change Max. Read Request Size to 4096 bytes. */
1331		v &= ~PEX_DC_MAX_RRS_MSK;
1332		v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1333		pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1334		width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1335		width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1336		v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1337		v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1338		if (v != width)
1339			device_printf(sc->msk_dev,
1340			    "negotiated width of link(x%d) != "
1341			    "max. width of link(x%d)\n", width, v);
1342	}
1343
1344	/* Clear status list. */
1345	bzero(sc->msk_stat_ring,
1346	    sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1347	sc->msk_stat_cons = 0;
1348	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1349	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1350	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1351	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1352	/* Set the status list base address. */
1353	addr = sc->msk_stat_ring_paddr;
1354	CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1355	CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1356	/* Set the status list last index. */
1357	CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1358	if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1359	    sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1360		/* WA for dev. #4.3 */
1361		CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1362		/* WA for dev. #4.18 */
1363		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1364		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1365	} else {
1366		CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1367		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1368		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1369		    sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1370			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1371		else
1372			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1373		CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1374	}
1375	/*
1376	 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1377	 */
1378	CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1379
1380	/* Enable status unit. */
1381	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1382
1383	CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1384	CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1385	CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1386}
1387
1388static int
1389msk_probe(device_t dev)
1390{
1391	struct msk_softc *sc;
1392	char desc[100];
1393
1394	sc = device_get_softc(device_get_parent(dev));
1395	/*
1396	 * Not much to do here. We always know there will be
1397	 * at least one GMAC present, and if there are two,
1398	 * mskc_attach() will create a second device instance
1399	 * for us.
1400	 */
1401	snprintf(desc, sizeof(desc),
1402	    "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1403	    model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1404	    sc->msk_hw_rev);
1405	device_set_desc_copy(dev, desc);
1406
1407	return (BUS_PROBE_DEFAULT);
1408}
1409
1410static int
1411msk_attach(device_t dev)
1412{
1413	struct msk_softc *sc;
1414	struct msk_if_softc *sc_if;
1415	struct ifnet *ifp;
1416	int i, port, error;
1417	uint8_t eaddr[6];
1418
1419	if (dev == NULL)
1420		return (EINVAL);
1421
1422	error = 0;
1423	sc_if = device_get_softc(dev);
1424	sc = device_get_softc(device_get_parent(dev));
1425	port = *(int *)device_get_ivars(dev);
1426
1427	sc_if->msk_if_dev = dev;
1428	sc_if->msk_port = port;
1429	sc_if->msk_softc = sc;
1430	sc_if->msk_flags = sc->msk_pflags;
1431	sc->msk_if[port] = sc_if;
1432	/* Setup Tx/Rx queue register offsets. */
1433	if (port == MSK_PORT_A) {
1434		sc_if->msk_txq = Q_XA1;
1435		sc_if->msk_txsq = Q_XS1;
1436		sc_if->msk_rxq = Q_R1;
1437	} else {
1438		sc_if->msk_txq = Q_XA2;
1439		sc_if->msk_txsq = Q_XS2;
1440		sc_if->msk_rxq = Q_R2;
1441	}
1442
1443	callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1444	TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if);
1445
1446	if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1447		goto fail;
1448
1449	ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1450	if (ifp == NULL) {
1451		device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1452		error = ENOSPC;
1453		goto fail;
1454	}
1455	ifp->if_softc = sc_if;
1456	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1457	ifp->if_mtu = ETHERMTU;
1458	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1459	/*
1460	 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1461	 * has serious bug in Rx checksum offload for all Yukon II family
1462	 * hardware. It seems there is a workaround to make it work somtimes.
1463	 * However, the workaround also have to check OP code sequences to
1464	 * verify whether the OP code is correct. Sometimes it should compute
1465	 * IP/TCP/UDP checksum in driver in order to verify correctness of
1466	 * checksum computed by hardware. If you have to compute checksum
1467	 * with software to verify the hardware's checksum why have hardware
1468	 * compute the checksum? I think there is no reason to spend time to
1469	 * make Rx checksum offload work on Yukon II hardware.
1470	 */
1471	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1472	ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1473	ifp->if_capenable = ifp->if_capabilities;
1474	ifp->if_ioctl = msk_ioctl;
1475	ifp->if_start = msk_start;
1476	ifp->if_timer = 0;
1477	ifp->if_watchdog = NULL;
1478	ifp->if_init = msk_init;
1479	IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1480	ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1481	IFQ_SET_READY(&ifp->if_snd);
1482
1483	TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1484
1485	/*
1486	 * Get station address for this interface. Note that
1487	 * dual port cards actually come with three station
1488	 * addresses: one for each port, plus an extra. The
1489	 * extra one is used by the SysKonnect driver software
1490	 * as a 'virtual' station address for when both ports
1491	 * are operating in failover mode. Currently we don't
1492	 * use this extra address.
1493	 */
1494	MSK_IF_LOCK(sc_if);
1495	for (i = 0; i < ETHER_ADDR_LEN; i++)
1496		eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1497
1498	/*
1499	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1500	 */
1501	MSK_IF_UNLOCK(sc_if);
1502	ether_ifattach(ifp, eaddr);
1503	MSK_IF_LOCK(sc_if);
1504
1505	/*
1506	 * VLAN capability setup
1507	 * Due to Tx checksum offload hardware bugs, msk(4) manually
1508	 * computes checksum for short frames. For VLAN tagged frames
1509	 * this workaround does not work so disable checksum offload
1510	 * for VLAN interface.
1511	 */
1512	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1513	ifp->if_capenable = ifp->if_capabilities;
1514
1515	/*
1516	 * Tell the upper layer(s) we support long frames.
1517	 * Must appear after the call to ether_ifattach() because
1518	 * ether_ifattach() sets ifi_hdrlen to the default value.
1519	 */
1520        ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1521
1522	sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN +
1523	    ETHER_VLAN_ENCAP_LEN;
1524
1525	/*
1526	 * Do miibus setup.
1527	 */
1528	MSK_IF_UNLOCK(sc_if);
1529	error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1530	    msk_mediastatus);
1531	if (error != 0) {
1532		device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1533		ether_ifdetach(ifp);
1534		error = ENXIO;
1535		goto fail;
1536	}
1537
1538fail:
1539	if (error != 0) {
1540		/* Access should be ok even though lock has been dropped */
1541		sc->msk_if[port] = NULL;
1542		msk_detach(dev);
1543	}
1544
1545	return (error);
1546}
1547
1548/*
1549 * Attach the interface. Allocate softc structures, do ifmedia
1550 * setup and ethernet/BPF attach.
1551 */
1552static int
1553mskc_attach(device_t dev)
1554{
1555	struct msk_softc *sc;
1556	int error, msic, msir, *port, reg;
1557
1558	sc = device_get_softc(dev);
1559	sc->msk_dev = dev;
1560	mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1561	    MTX_DEF);
1562
1563	/*
1564	 * Map control/status registers.
1565	 */
1566	pci_enable_busmaster(dev);
1567
1568	/* Allocate I/O resource */
1569#ifdef MSK_USEIOSPACE
1570	sc->msk_res_spec = msk_res_spec_io;
1571#else
1572	sc->msk_res_spec = msk_res_spec_mem;
1573#endif
1574	sc->msk_irq_spec = msk_irq_spec_legacy;
1575	error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1576	if (error) {
1577		if (sc->msk_res_spec == msk_res_spec_mem)
1578			sc->msk_res_spec = msk_res_spec_io;
1579		else
1580			sc->msk_res_spec = msk_res_spec_mem;
1581		error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1582		if (error) {
1583			device_printf(dev, "couldn't allocate %s resources\n",
1584			    sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1585			    "I/O");
1586			mtx_destroy(&sc->msk_mtx);
1587			return (ENXIO);
1588		}
1589	}
1590
1591	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1592	sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1593	sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1594	/* Bail out if chip is not recognized. */
1595	if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1596	    sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1597		device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1598		    sc->msk_hw_id, sc->msk_hw_rev);
1599		mtx_destroy(&sc->msk_mtx);
1600		return (ENXIO);
1601	}
1602
1603	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1604	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1605	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1606	    &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1607	    "max number of Rx events to process");
1608
1609	sc->msk_process_limit = MSK_PROC_DEFAULT;
1610	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1611	    "process_limit", &sc->msk_process_limit);
1612	if (error == 0) {
1613		if (sc->msk_process_limit < MSK_PROC_MIN ||
1614		    sc->msk_process_limit > MSK_PROC_MAX) {
1615			device_printf(dev, "process_limit value out of range; "
1616			    "using default: %d\n", MSK_PROC_DEFAULT);
1617			sc->msk_process_limit = MSK_PROC_DEFAULT;
1618		}
1619	}
1620
1621	/* Soft reset. */
1622	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1623	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1624	sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1625	 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1626		 sc->msk_coppertype = 0;
1627	 else
1628		 sc->msk_coppertype = 1;
1629	/* Check number of MACs. */
1630	sc->msk_num_port = 1;
1631	if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1632	    CFG_DUAL_MAC_MSK) {
1633		if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1634			sc->msk_num_port++;
1635	}
1636
1637	/* Check bus type. */
1638	if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
1639		sc->msk_bustype = MSK_PEX_BUS;
1640	else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
1641		sc->msk_bustype = MSK_PCIX_BUS;
1642	else
1643		sc->msk_bustype = MSK_PCI_BUS;
1644
1645	switch (sc->msk_hw_id) {
1646	case CHIP_ID_YUKON_EC:
1647	case CHIP_ID_YUKON_EC_U:
1648		sc->msk_clock = 125;	/* 125 Mhz */
1649		break;
1650	case CHIP_ID_YUKON_FE:
1651		sc->msk_clock = 100;	/* 100 Mhz */
1652		break;
1653	case CHIP_ID_YUKON_XL:
1654		sc->msk_clock = 156;	/* 156 Mhz */
1655		break;
1656	default:
1657		sc->msk_clock = 156;	/* 156 Mhz */
1658		break;
1659	}
1660
1661	/* Allocate IRQ resources. */
1662	msic = pci_msi_count(dev);
1663	if (bootverbose)
1664		device_printf(dev, "MSI count : %d\n", msic);
1665	/*
1666	 * The Yukon II reports it can handle two messages, one for each
1667	 * possible port.  We go ahead and allocate two messages and only
1668	 * setup a handler for both if we have a dual port card.
1669	 *
1670	 * XXX: I haven't untangled the interrupt handler to handle dual
1671	 * port cards with separate MSI messages, so for now I disable MSI
1672	 * on dual port cards.
1673	 */
1674	if (legacy_intr != 0)
1675		msi_disable = 1;
1676	if (msi_disable == 0) {
1677		switch (msic) {
1678		case 2:
1679		case 1: /* 88E8058 reports 1 MSI message */
1680			msir = msic;
1681			if (sc->msk_num_port == 1 &&
1682			    pci_alloc_msi(dev, &msir) == 0) {
1683				if (msic == msir) {
1684					sc->msk_msi = 1;
1685					sc->msk_irq_spec = msic == 2 ?
1686					    msk_irq_spec_msi2 :
1687					    msk_irq_spec_msi;
1688				} else
1689					pci_release_msi(dev);
1690			}
1691			break;
1692		default:
1693			device_printf(dev,
1694			    "Unexpected number of MSI messages : %d\n", msic);
1695			break;
1696		}
1697	}
1698
1699	error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1700	if (error) {
1701		device_printf(dev, "couldn't allocate IRQ resources\n");
1702		goto fail;
1703	}
1704
1705	if ((error = msk_status_dma_alloc(sc)) != 0)
1706		goto fail;
1707
1708	/* Set base interrupt mask. */
1709	sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1710	sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1711	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1712
1713	/* Reset the adapter. */
1714	mskc_reset(sc);
1715
1716	if ((error = mskc_setup_rambuffer(sc)) != 0)
1717		goto fail;
1718
1719	sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1720	if (sc->msk_devs[MSK_PORT_A] == NULL) {
1721		device_printf(dev, "failed to add child for PORT_A\n");
1722		error = ENXIO;
1723		goto fail;
1724	}
1725	port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1726	if (port == NULL) {
1727		device_printf(dev, "failed to allocate memory for "
1728		    "ivars of PORT_A\n");
1729		error = ENXIO;
1730		goto fail;
1731	}
1732	*port = MSK_PORT_A;
1733	device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1734
1735	if (sc->msk_num_port > 1) {
1736		sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1737		if (sc->msk_devs[MSK_PORT_B] == NULL) {
1738			device_printf(dev, "failed to add child for PORT_B\n");
1739			error = ENXIO;
1740			goto fail;
1741		}
1742		port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1743		if (port == NULL) {
1744			device_printf(dev, "failed to allocate memory for "
1745			    "ivars of PORT_B\n");
1746			error = ENXIO;
1747			goto fail;
1748		}
1749		*port = MSK_PORT_B;
1750		device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1751	}
1752
1753	error = bus_generic_attach(dev);
1754	if (error) {
1755		device_printf(dev, "failed to attach port(s)\n");
1756		goto fail;
1757	}
1758
1759	/* Hook interrupt last to avoid having to lock softc. */
1760	if (legacy_intr)
1761		error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1762		    INTR_MPSAFE, NULL, msk_legacy_intr, sc,
1763		    &sc->msk_intrhand[0]);
1764	else {
1765		TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1766		sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1767		    taskqueue_thread_enqueue, &sc->msk_tq);
1768		taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1769		    device_get_nameunit(sc->msk_dev));
1770		error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1771		    INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]);
1772	}
1773
1774	if (error != 0) {
1775		device_printf(dev, "couldn't set up interrupt handler\n");
1776		if (legacy_intr == 0)
1777			taskqueue_free(sc->msk_tq);
1778		sc->msk_tq = NULL;
1779		goto fail;
1780	}
1781fail:
1782	if (error != 0)
1783		mskc_detach(dev);
1784
1785	return (error);
1786}
1787
1788/*
1789 * Shutdown hardware and free up resources. This can be called any
1790 * time after the mutex has been initialized. It is called in both
1791 * the error case in attach and the normal detach case so it needs
1792 * to be careful about only freeing resources that have actually been
1793 * allocated.
1794 */
1795static int
1796msk_detach(device_t dev)
1797{
1798	struct msk_softc *sc;
1799	struct msk_if_softc *sc_if;
1800	struct ifnet *ifp;
1801
1802	sc_if = device_get_softc(dev);
1803	KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1804	    ("msk mutex not initialized in msk_detach"));
1805	MSK_IF_LOCK(sc_if);
1806
1807	ifp = sc_if->msk_ifp;
1808	if (device_is_attached(dev)) {
1809		/* XXX */
1810		sc_if->msk_detach = 1;
1811		msk_stop(sc_if);
1812		/* Can't hold locks while calling detach. */
1813		MSK_IF_UNLOCK(sc_if);
1814		callout_drain(&sc_if->msk_tick_ch);
1815		taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1816		taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task);
1817		ether_ifdetach(ifp);
1818		MSK_IF_LOCK(sc_if);
1819	}
1820
1821	/*
1822	 * We're generally called from mskc_detach() which is using
1823	 * device_delete_child() to get to here. It's already trashed
1824	 * miibus for us, so don't do it here or we'll panic.
1825	 *
1826	 * if (sc_if->msk_miibus != NULL) {
1827	 * 	device_delete_child(dev, sc_if->msk_miibus);
1828	 * 	sc_if->msk_miibus = NULL;
1829	 * }
1830	 */
1831
1832	msk_txrx_dma_free(sc_if);
1833	bus_generic_detach(dev);
1834
1835	if (ifp)
1836		if_free(ifp);
1837	sc = sc_if->msk_softc;
1838	sc->msk_if[sc_if->msk_port] = NULL;
1839	MSK_IF_UNLOCK(sc_if);
1840
1841	return (0);
1842}
1843
1844static int
1845mskc_detach(device_t dev)
1846{
1847	struct msk_softc *sc;
1848
1849	sc = device_get_softc(dev);
1850	KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1851
1852	if (device_is_alive(dev)) {
1853		if (sc->msk_devs[MSK_PORT_A] != NULL) {
1854			free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1855			    M_DEVBUF);
1856			device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1857		}
1858		if (sc->msk_devs[MSK_PORT_B] != NULL) {
1859			free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1860			    M_DEVBUF);
1861			device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1862		}
1863		bus_generic_detach(dev);
1864	}
1865
1866	/* Disable all interrupts. */
1867	CSR_WRITE_4(sc, B0_IMSK, 0);
1868	CSR_READ_4(sc, B0_IMSK);
1869	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1870	CSR_READ_4(sc, B0_HWE_IMSK);
1871
1872	/* LED Off. */
1873	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1874
1875	/* Put hardware reset. */
1876	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1877
1878	msk_status_dma_free(sc);
1879
1880	if (legacy_intr == 0 && sc->msk_tq != NULL) {
1881		taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1882		taskqueue_free(sc->msk_tq);
1883		sc->msk_tq = NULL;
1884	}
1885	if (sc->msk_intrhand[0]) {
1886		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1887		sc->msk_intrhand[0] = NULL;
1888	}
1889	if (sc->msk_intrhand[1]) {
1890		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1891		sc->msk_intrhand[1] = NULL;
1892	}
1893	bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1894	if (sc->msk_msi)
1895		pci_release_msi(dev);
1896	bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1897	mtx_destroy(&sc->msk_mtx);
1898
1899	return (0);
1900}
1901
1902struct msk_dmamap_arg {
1903	bus_addr_t	msk_busaddr;
1904};
1905
1906static void
1907msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1908{
1909	struct msk_dmamap_arg *ctx;
1910
1911	if (error != 0)
1912		return;
1913	ctx = arg;
1914	ctx->msk_busaddr = segs[0].ds_addr;
1915}
1916
1917/* Create status DMA region. */
1918static int
1919msk_status_dma_alloc(struct msk_softc *sc)
1920{
1921	struct msk_dmamap_arg ctx;
1922	int error;
1923
1924	error = bus_dma_tag_create(
1925		    bus_get_dma_tag(sc->msk_dev),	/* parent */
1926		    MSK_STAT_ALIGN, 0,		/* alignment, boundary */
1927		    BUS_SPACE_MAXADDR,		/* lowaddr */
1928		    BUS_SPACE_MAXADDR,		/* highaddr */
1929		    NULL, NULL,			/* filter, filterarg */
1930		    MSK_STAT_RING_SZ,		/* maxsize */
1931		    1,				/* nsegments */
1932		    MSK_STAT_RING_SZ,		/* maxsegsize */
1933		    0,				/* flags */
1934		    NULL, NULL,			/* lockfunc, lockarg */
1935		    &sc->msk_stat_tag);
1936	if (error != 0) {
1937		device_printf(sc->msk_dev,
1938		    "failed to create status DMA tag\n");
1939		return (error);
1940	}
1941
1942	/* Allocate DMA'able memory and load the DMA map for status ring. */
1943	error = bus_dmamem_alloc(sc->msk_stat_tag,
1944	    (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1945	    BUS_DMA_ZERO, &sc->msk_stat_map);
1946	if (error != 0) {
1947		device_printf(sc->msk_dev,
1948		    "failed to allocate DMA'able memory for status ring\n");
1949		return (error);
1950	}
1951
1952	ctx.msk_busaddr = 0;
1953	error = bus_dmamap_load(sc->msk_stat_tag,
1954	    sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1955	    msk_dmamap_cb, &ctx, 0);
1956	if (error != 0) {
1957		device_printf(sc->msk_dev,
1958		    "failed to load DMA'able memory for status ring\n");
1959		return (error);
1960	}
1961	sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1962
1963	return (0);
1964}
1965
1966static void
1967msk_status_dma_free(struct msk_softc *sc)
1968{
1969
1970	/* Destroy status block. */
1971	if (sc->msk_stat_tag) {
1972		if (sc->msk_stat_map) {
1973			bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1974			if (sc->msk_stat_ring) {
1975				bus_dmamem_free(sc->msk_stat_tag,
1976				    sc->msk_stat_ring, sc->msk_stat_map);
1977				sc->msk_stat_ring = NULL;
1978			}
1979			sc->msk_stat_map = NULL;
1980		}
1981		bus_dma_tag_destroy(sc->msk_stat_tag);
1982		sc->msk_stat_tag = NULL;
1983	}
1984}
1985
1986static int
1987msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1988{
1989	struct msk_dmamap_arg ctx;
1990	struct msk_txdesc *txd;
1991	struct msk_rxdesc *rxd;
1992	struct msk_rxdesc *jrxd;
1993	struct msk_jpool_entry *entry;
1994	uint8_t *ptr;
1995	bus_size_t rxalign;
1996	int error, i;
1997
1998	mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF);
1999	SLIST_INIT(&sc_if->msk_jfree_listhead);
2000	SLIST_INIT(&sc_if->msk_jinuse_listhead);
2001
2002	/* Create parent DMA tag. */
2003	/*
2004	 * XXX
2005	 * It seems that Yukon II supports full 64bits DMA operations. But
2006	 * it needs two descriptors(list elements) for 64bits DMA operations.
2007	 * Since we don't know what DMA address mappings(32bits or 64bits)
2008	 * would be used in advance for each mbufs, we limits its DMA space
2009	 * to be in range of 32bits address space. Otherwise, we should check
2010	 * what DMA address is used and chain another descriptor for the
2011	 * 64bits DMA operation. This also means descriptor ring size is
2012	 * variable. Limiting DMA address to be in 32bit address space greatly
2013	 * simplyfies descriptor handling and possibly would increase
2014	 * performance a bit due to efficient handling of descriptors.
2015	 * Apart from harassing checksum offloading mechanisms, it seems
2016	 * it's really bad idea to use a seperate descriptor for 64bit
2017	 * DMA operation to save small descriptor memory. Anyway, I've
2018	 * never seen these exotic scheme on ethernet interface hardware.
2019	 */
2020	error = bus_dma_tag_create(
2021		    bus_get_dma_tag(sc_if->msk_if_dev),	/* parent */
2022		    1, 0,			/* alignment, boundary */
2023		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2024		    BUS_SPACE_MAXADDR,		/* highaddr */
2025		    NULL, NULL,			/* filter, filterarg */
2026		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
2027		    0,				/* nsegments */
2028		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2029		    0,				/* flags */
2030		    NULL, NULL,			/* lockfunc, lockarg */
2031		    &sc_if->msk_cdata.msk_parent_tag);
2032	if (error != 0) {
2033		device_printf(sc_if->msk_if_dev,
2034		    "failed to create parent DMA tag\n");
2035		goto fail;
2036	}
2037	/* Create tag for Tx ring. */
2038	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2039		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2040		    BUS_SPACE_MAXADDR,		/* lowaddr */
2041		    BUS_SPACE_MAXADDR,		/* highaddr */
2042		    NULL, NULL,			/* filter, filterarg */
2043		    MSK_TX_RING_SZ,		/* maxsize */
2044		    1,				/* nsegments */
2045		    MSK_TX_RING_SZ,		/* maxsegsize */
2046		    0,				/* flags */
2047		    NULL, NULL,			/* lockfunc, lockarg */
2048		    &sc_if->msk_cdata.msk_tx_ring_tag);
2049	if (error != 0) {
2050		device_printf(sc_if->msk_if_dev,
2051		    "failed to create Tx ring DMA tag\n");
2052		goto fail;
2053	}
2054
2055	/* Create tag for Rx ring. */
2056	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2057		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2058		    BUS_SPACE_MAXADDR,		/* lowaddr */
2059		    BUS_SPACE_MAXADDR,		/* highaddr */
2060		    NULL, NULL,			/* filter, filterarg */
2061		    MSK_RX_RING_SZ,		/* maxsize */
2062		    1,				/* nsegments */
2063		    MSK_RX_RING_SZ,		/* maxsegsize */
2064		    0,				/* flags */
2065		    NULL, NULL,			/* lockfunc, lockarg */
2066		    &sc_if->msk_cdata.msk_rx_ring_tag);
2067	if (error != 0) {
2068		device_printf(sc_if->msk_if_dev,
2069		    "failed to create Rx ring DMA tag\n");
2070		goto fail;
2071	}
2072
2073	/* Create tag for jumbo Rx ring. */
2074	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2075		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2076		    BUS_SPACE_MAXADDR,		/* lowaddr */
2077		    BUS_SPACE_MAXADDR,		/* highaddr */
2078		    NULL, NULL,			/* filter, filterarg */
2079		    MSK_JUMBO_RX_RING_SZ,	/* maxsize */
2080		    1,				/* nsegments */
2081		    MSK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2082		    0,				/* flags */
2083		    NULL, NULL,			/* lockfunc, lockarg */
2084		    &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2085	if (error != 0) {
2086		device_printf(sc_if->msk_if_dev,
2087		    "failed to create jumbo Rx ring DMA tag\n");
2088		goto fail;
2089	}
2090
2091	/* Create tag for jumbo buffer blocks. */
2092	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2093		    PAGE_SIZE, 0,		/* alignment, boundary */
2094		    BUS_SPACE_MAXADDR,		/* lowaddr */
2095		    BUS_SPACE_MAXADDR,		/* highaddr */
2096		    NULL, NULL,			/* filter, filterarg */
2097		    MSK_JMEM,			/* maxsize */
2098		    1,				/* nsegments */
2099		    MSK_JMEM,			/* maxsegsize */
2100		    0,				/* flags */
2101		    NULL, NULL,			/* lockfunc, lockarg */
2102		    &sc_if->msk_cdata.msk_jumbo_tag);
2103	if (error != 0) {
2104		device_printf(sc_if->msk_if_dev,
2105		    "failed to create jumbo Rx buffer block DMA tag\n");
2106		goto fail;
2107	}
2108
2109	/* Create tag for Tx buffers. */
2110	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2111		    1, 0,			/* alignment, boundary */
2112		    BUS_SPACE_MAXADDR,		/* lowaddr */
2113		    BUS_SPACE_MAXADDR,		/* highaddr */
2114		    NULL, NULL,			/* filter, filterarg */
2115		    MSK_TSO_MAXSIZE,		/* maxsize */
2116		    MSK_MAXTXSEGS,		/* nsegments */
2117		    MSK_TSO_MAXSGSIZE,		/* maxsegsize */
2118		    0,				/* flags */
2119		    NULL, NULL,			/* lockfunc, lockarg */
2120		    &sc_if->msk_cdata.msk_tx_tag);
2121	if (error != 0) {
2122		device_printf(sc_if->msk_if_dev,
2123		    "failed to create Tx DMA tag\n");
2124		goto fail;
2125	}
2126
2127	rxalign = 1;
2128	/*
2129	 * Workaround hardware hang which seems to happen when Rx buffer
2130	 * is not aligned on multiple of FIFO word(8 bytes).
2131	 */
2132	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2133		rxalign = MSK_RX_BUF_ALIGN;
2134	/* Create tag for Rx buffers. */
2135	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2136		    rxalign, 0,			/* alignment, boundary */
2137		    BUS_SPACE_MAXADDR,		/* lowaddr */
2138		    BUS_SPACE_MAXADDR,		/* highaddr */
2139		    NULL, NULL,			/* filter, filterarg */
2140		    MCLBYTES,			/* maxsize */
2141		    1,				/* nsegments */
2142		    MCLBYTES,			/* maxsegsize */
2143		    0,				/* flags */
2144		    NULL, NULL,			/* lockfunc, lockarg */
2145		    &sc_if->msk_cdata.msk_rx_tag);
2146	if (error != 0) {
2147		device_printf(sc_if->msk_if_dev,
2148		    "failed to create Rx DMA tag\n");
2149		goto fail;
2150	}
2151
2152	/* Create tag for jumbo Rx buffers. */
2153	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2154		    PAGE_SIZE, 0,		/* alignment, boundary */
2155		    BUS_SPACE_MAXADDR,		/* lowaddr */
2156		    BUS_SPACE_MAXADDR,		/* highaddr */
2157		    NULL, NULL,			/* filter, filterarg */
2158		    MCLBYTES * MSK_MAXRXSEGS,	/* maxsize */
2159		    MSK_MAXRXSEGS,		/* nsegments */
2160		    MSK_JLEN,			/* maxsegsize */
2161		    0,				/* flags */
2162		    NULL, NULL,			/* lockfunc, lockarg */
2163		    &sc_if->msk_cdata.msk_jumbo_rx_tag);
2164	if (error != 0) {
2165		device_printf(sc_if->msk_if_dev,
2166		    "failed to create jumbo Rx DMA tag\n");
2167		goto fail;
2168	}
2169
2170	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
2171	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2172	    (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2173	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2174	if (error != 0) {
2175		device_printf(sc_if->msk_if_dev,
2176		    "failed to allocate DMA'able memory for Tx ring\n");
2177		goto fail;
2178	}
2179
2180	ctx.msk_busaddr = 0;
2181	error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2182	    sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2183	    MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2184	if (error != 0) {
2185		device_printf(sc_if->msk_if_dev,
2186		    "failed to load DMA'able memory for Tx ring\n");
2187		goto fail;
2188	}
2189	sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2190
2191	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
2192	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2193	    (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2194	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2195	if (error != 0) {
2196		device_printf(sc_if->msk_if_dev,
2197		    "failed to allocate DMA'able memory for Rx ring\n");
2198		goto fail;
2199	}
2200
2201	ctx.msk_busaddr = 0;
2202	error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2203	    sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2204	    MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2205	if (error != 0) {
2206		device_printf(sc_if->msk_if_dev,
2207		    "failed to load DMA'able memory for Rx ring\n");
2208		goto fail;
2209	}
2210	sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2211
2212	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2213	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2214	    (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2215	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2216	    &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2217	if (error != 0) {
2218		device_printf(sc_if->msk_if_dev,
2219		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2220		goto fail;
2221	}
2222
2223	ctx.msk_busaddr = 0;
2224	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2225	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2226	    sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2227	    msk_dmamap_cb, &ctx, 0);
2228	if (error != 0) {
2229		device_printf(sc_if->msk_if_dev,
2230		    "failed to load DMA'able memory for jumbo Rx ring\n");
2231		goto fail;
2232	}
2233	sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2234
2235	/* Create DMA maps for Tx buffers. */
2236	for (i = 0; i < MSK_TX_RING_CNT; i++) {
2237		txd = &sc_if->msk_cdata.msk_txdesc[i];
2238		txd->tx_m = NULL;
2239		txd->tx_dmamap = NULL;
2240		error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2241		    &txd->tx_dmamap);
2242		if (error != 0) {
2243			device_printf(sc_if->msk_if_dev,
2244			    "failed to create Tx dmamap\n");
2245			goto fail;
2246		}
2247	}
2248	/* Create DMA maps for Rx buffers. */
2249	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2250	    &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2251		device_printf(sc_if->msk_if_dev,
2252		    "failed to create spare Rx dmamap\n");
2253		goto fail;
2254	}
2255	for (i = 0; i < MSK_RX_RING_CNT; i++) {
2256		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2257		rxd->rx_m = NULL;
2258		rxd->rx_dmamap = NULL;
2259		error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2260		    &rxd->rx_dmamap);
2261		if (error != 0) {
2262			device_printf(sc_if->msk_if_dev,
2263			    "failed to create Rx dmamap\n");
2264			goto fail;
2265		}
2266	}
2267	/* Create DMA maps for jumbo Rx buffers. */
2268	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2269	    &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2270		device_printf(sc_if->msk_if_dev,
2271		    "failed to create spare jumbo Rx dmamap\n");
2272		goto fail;
2273	}
2274	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2275		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2276		jrxd->rx_m = NULL;
2277		jrxd->rx_dmamap = NULL;
2278		error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2279		    &jrxd->rx_dmamap);
2280		if (error != 0) {
2281			device_printf(sc_if->msk_if_dev,
2282			    "failed to create jumbo Rx dmamap\n");
2283			goto fail;
2284		}
2285	}
2286
2287	/* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2288	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2289	    (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2290	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2291	    &sc_if->msk_cdata.msk_jumbo_map);
2292	if (error != 0) {
2293		device_printf(sc_if->msk_if_dev,
2294		    "failed to allocate DMA'able memory for jumbo buf\n");
2295		goto fail;
2296	}
2297
2298	ctx.msk_busaddr = 0;
2299	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2300	    sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2301	    MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2302	if (error != 0) {
2303		device_printf(sc_if->msk_if_dev,
2304		    "failed to load DMA'able memory for jumbobuf\n");
2305		goto fail;
2306	}
2307	sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2308
2309	/*
2310	 * Now divide it up into 9K pieces and save the addresses
2311	 * in an array.
2312	 */
2313	ptr = sc_if->msk_rdata.msk_jumbo_buf;
2314	for (i = 0; i < MSK_JSLOTS; i++) {
2315		sc_if->msk_cdata.msk_jslots[i] = ptr;
2316		ptr += MSK_JLEN;
2317		entry = malloc(sizeof(struct msk_jpool_entry),
2318		    M_DEVBUF, M_WAITOK);
2319		if (entry == NULL) {
2320			device_printf(sc_if->msk_if_dev,
2321			    "no memory for jumbo buffers!\n");
2322			error = ENOMEM;
2323			goto fail;
2324		}
2325		entry->slot = i;
2326		SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2327		    jpool_entries);
2328	}
2329
2330fail:
2331	return (error);
2332}
2333
2334static void
2335msk_txrx_dma_free(struct msk_if_softc *sc_if)
2336{
2337	struct msk_txdesc *txd;
2338	struct msk_rxdesc *rxd;
2339	struct msk_rxdesc *jrxd;
2340	struct msk_jpool_entry *entry;
2341	int i;
2342
2343	MSK_JLIST_LOCK(sc_if);
2344	while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2345		device_printf(sc_if->msk_if_dev,
2346		    "asked to free buffer that is in use!\n");
2347		SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2348		SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2349		    jpool_entries);
2350	}
2351
2352	while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2353		entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2354		SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2355		free(entry, M_DEVBUF);
2356	}
2357	MSK_JLIST_UNLOCK(sc_if);
2358
2359	/* Destroy jumbo buffer block. */
2360	if (sc_if->msk_cdata.msk_jumbo_map)
2361		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2362		    sc_if->msk_cdata.msk_jumbo_map);
2363
2364	if (sc_if->msk_rdata.msk_jumbo_buf) {
2365		bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2366		    sc_if->msk_rdata.msk_jumbo_buf,
2367		    sc_if->msk_cdata.msk_jumbo_map);
2368		sc_if->msk_rdata.msk_jumbo_buf = NULL;
2369		sc_if->msk_cdata.msk_jumbo_map = NULL;
2370	}
2371
2372	/* Tx ring. */
2373	if (sc_if->msk_cdata.msk_tx_ring_tag) {
2374		if (sc_if->msk_cdata.msk_tx_ring_map)
2375			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2376			    sc_if->msk_cdata.msk_tx_ring_map);
2377		if (sc_if->msk_cdata.msk_tx_ring_map &&
2378		    sc_if->msk_rdata.msk_tx_ring)
2379			bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2380			    sc_if->msk_rdata.msk_tx_ring,
2381			    sc_if->msk_cdata.msk_tx_ring_map);
2382		sc_if->msk_rdata.msk_tx_ring = NULL;
2383		sc_if->msk_cdata.msk_tx_ring_map = NULL;
2384		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2385		sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2386	}
2387	/* Rx ring. */
2388	if (sc_if->msk_cdata.msk_rx_ring_tag) {
2389		if (sc_if->msk_cdata.msk_rx_ring_map)
2390			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2391			    sc_if->msk_cdata.msk_rx_ring_map);
2392		if (sc_if->msk_cdata.msk_rx_ring_map &&
2393		    sc_if->msk_rdata.msk_rx_ring)
2394			bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2395			    sc_if->msk_rdata.msk_rx_ring,
2396			    sc_if->msk_cdata.msk_rx_ring_map);
2397		sc_if->msk_rdata.msk_rx_ring = NULL;
2398		sc_if->msk_cdata.msk_rx_ring_map = NULL;
2399		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2400		sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2401	}
2402	/* Jumbo Rx ring. */
2403	if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2404		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2405			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2406			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2407		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2408		    sc_if->msk_rdata.msk_jumbo_rx_ring)
2409			bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2410			    sc_if->msk_rdata.msk_jumbo_rx_ring,
2411			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2412		sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2413		sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2414		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2415		sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2416	}
2417	/* Tx buffers. */
2418	if (sc_if->msk_cdata.msk_tx_tag) {
2419		for (i = 0; i < MSK_TX_RING_CNT; i++) {
2420			txd = &sc_if->msk_cdata.msk_txdesc[i];
2421			if (txd->tx_dmamap) {
2422				bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2423				    txd->tx_dmamap);
2424				txd->tx_dmamap = NULL;
2425			}
2426		}
2427		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2428		sc_if->msk_cdata.msk_tx_tag = NULL;
2429	}
2430	/* Rx buffers. */
2431	if (sc_if->msk_cdata.msk_rx_tag) {
2432		for (i = 0; i < MSK_RX_RING_CNT; i++) {
2433			rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2434			if (rxd->rx_dmamap) {
2435				bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2436				    rxd->rx_dmamap);
2437				rxd->rx_dmamap = NULL;
2438			}
2439		}
2440		if (sc_if->msk_cdata.msk_rx_sparemap) {
2441			bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2442			    sc_if->msk_cdata.msk_rx_sparemap);
2443			sc_if->msk_cdata.msk_rx_sparemap = 0;
2444		}
2445		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2446		sc_if->msk_cdata.msk_rx_tag = NULL;
2447	}
2448	/* Jumbo Rx buffers. */
2449	if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2450		for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2451			jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2452			if (jrxd->rx_dmamap) {
2453				bus_dmamap_destroy(
2454				    sc_if->msk_cdata.msk_jumbo_rx_tag,
2455				    jrxd->rx_dmamap);
2456				jrxd->rx_dmamap = NULL;
2457			}
2458		}
2459		if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2460			bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2461			    sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2462			sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2463		}
2464		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2465		sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2466	}
2467
2468	if (sc_if->msk_cdata.msk_parent_tag) {
2469		bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2470		sc_if->msk_cdata.msk_parent_tag = NULL;
2471	}
2472	mtx_destroy(&sc_if->msk_jlist_mtx);
2473}
2474
2475/*
2476 * Allocate a jumbo buffer.
2477 */
2478static void *
2479msk_jalloc(struct msk_if_softc *sc_if)
2480{
2481	struct msk_jpool_entry *entry;
2482
2483	MSK_JLIST_LOCK(sc_if);
2484
2485	entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2486
2487	if (entry == NULL) {
2488		MSK_JLIST_UNLOCK(sc_if);
2489		return (NULL);
2490	}
2491
2492	SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2493	SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2494
2495	MSK_JLIST_UNLOCK(sc_if);
2496
2497	return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2498}
2499
2500/*
2501 * Release a jumbo buffer.
2502 */
2503static void
2504msk_jfree(void *buf, void *args)
2505{
2506	struct msk_if_softc *sc_if;
2507	struct msk_jpool_entry *entry;
2508	int i;
2509
2510	/* Extract the softc struct pointer. */
2511	sc_if = (struct msk_if_softc *)args;
2512	KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2513
2514	MSK_JLIST_LOCK(sc_if);
2515	/* Calculate the slot this buffer belongs to. */
2516	i = ((vm_offset_t)buf
2517	     - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2518	KASSERT(i >= 0 && i < MSK_JSLOTS,
2519	    ("%s: asked to free buffer that we don't manage!", __func__));
2520
2521	entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2522	KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2523	entry->slot = i;
2524	SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2525	SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2526	if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2527		wakeup(sc_if);
2528
2529	MSK_JLIST_UNLOCK(sc_if);
2530}
2531
2532static int
2533msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2534{
2535	struct msk_txdesc *txd, *txd_last;
2536	struct msk_tx_desc *tx_le;
2537	struct mbuf *m;
2538	bus_dmamap_t map;
2539	bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2540	uint32_t control, prod, si;
2541	uint16_t offset, tcp_offset, tso_mtu;
2542	int error, i, nseg, tso;
2543
2544	MSK_IF_LOCK_ASSERT(sc_if);
2545
2546	tcp_offset = offset = 0;
2547	m = *m_head;
2548	if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2549		/*
2550		 * Since mbuf has no protocol specific structure information
2551		 * in it we have to inspect protocol information here to
2552		 * setup TSO and checksum offload. I don't know why Marvell
2553		 * made a such decision in chip design because other GigE
2554		 * hardwares normally takes care of all these chores in
2555		 * hardware. However, TSO performance of Yukon II is very
2556		 * good such that it's worth to implement it.
2557		 */
2558		struct ether_header *eh;
2559		struct ip *ip;
2560		struct tcphdr *tcp;
2561
2562		if (M_WRITABLE(m) == 0) {
2563			/* Get a writable copy. */
2564			m = m_dup(*m_head, M_DONTWAIT);
2565			m_freem(*m_head);
2566			if (m == NULL) {
2567				*m_head = NULL;
2568				return (ENOBUFS);
2569			}
2570			*m_head = m;
2571		}
2572
2573		offset = sizeof(struct ether_header);
2574		m = m_pullup(m, offset);
2575		if (m == NULL) {
2576			*m_head = NULL;
2577			return (ENOBUFS);
2578		}
2579		eh = mtod(m, struct ether_header *);
2580		/* Check if hardware VLAN insertion is off. */
2581		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2582			offset = sizeof(struct ether_vlan_header);
2583			m = m_pullup(m, offset);
2584			if (m == NULL) {
2585				*m_head = NULL;
2586				return (ENOBUFS);
2587			}
2588		}
2589		m = m_pullup(m, offset + sizeof(struct ip));
2590		if (m == NULL) {
2591			*m_head = NULL;
2592			return (ENOBUFS);
2593		}
2594		ip = (struct ip *)(mtod(m, char *) + offset);
2595		offset += (ip->ip_hl << 2);
2596		tcp_offset = offset;
2597		/*
2598		 * It seems that Yukon II has Tx checksum offload bug for
2599		 * small TCP packets that's less than 60 bytes in size
2600		 * (e.g. TCP window probe packet, pure ACK packet).
2601		 * Common work around like padding with zeros to make the
2602		 * frame minimum ethernet frame size didn't work at all.
2603		 * Instead of disabling checksum offload completely we
2604		 * resort to S/W checksum routine when we encounter short
2605		 * TCP frames.
2606		 * Short UDP packets appear to be handled correctly by
2607		 * Yukon II.
2608		 */
2609		if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2610		    (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2611			uint16_t csum;
2612
2613			m = m_pullup(m, offset + sizeof(struct tcphdr));
2614			if (m == NULL) {
2615				*m_head = NULL;
2616				return (ENOBUFS);
2617			}
2618			csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset -
2619			    (ip->ip_hl << 2), offset);
2620			*(uint16_t *)(m->m_data + offset +
2621			    m->m_pkthdr.csum_data) = csum;
2622			m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2623		}
2624		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2625			m = m_pullup(m, offset + sizeof(struct tcphdr));
2626			if (m == NULL) {
2627				*m_head = NULL;
2628				return (ENOBUFS);
2629			}
2630			tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2631			offset += (tcp->th_off << 2);
2632		}
2633		*m_head = m;
2634	}
2635
2636	prod = sc_if->msk_cdata.msk_tx_prod;
2637	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2638	txd_last = txd;
2639	map = txd->tx_dmamap;
2640	error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2641	    *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2642	if (error == EFBIG) {
2643		m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2644		if (m == NULL) {
2645			m_freem(*m_head);
2646			*m_head = NULL;
2647			return (ENOBUFS);
2648		}
2649		*m_head = m;
2650		error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2651		    map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2652		if (error != 0) {
2653			m_freem(*m_head);
2654			*m_head = NULL;
2655			return (error);
2656		}
2657	} else if (error != 0)
2658		return (error);
2659	if (nseg == 0) {
2660		m_freem(*m_head);
2661		*m_head = NULL;
2662		return (EIO);
2663	}
2664
2665	/* Check number of available descriptors. */
2666	if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2667	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2668		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2669		return (ENOBUFS);
2670	}
2671
2672	control = 0;
2673	tso = 0;
2674	tx_le = NULL;
2675
2676	/* Check TSO support. */
2677	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2678		tso_mtu = offset + m->m_pkthdr.tso_segsz;
2679		if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2680			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2681			tx_le->msk_addr = htole32(tso_mtu);
2682			tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER);
2683			sc_if->msk_cdata.msk_tx_cnt++;
2684			MSK_INC(prod, MSK_TX_RING_CNT);
2685			sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2686		}
2687		tso++;
2688	}
2689	/* Check if we have a VLAN tag to insert. */
2690	if ((m->m_flags & M_VLANTAG) != 0) {
2691		if (tso == 0) {
2692			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2693			tx_le->msk_addr = htole32(0);
2694			tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2695			    htons(m->m_pkthdr.ether_vtag));
2696			sc_if->msk_cdata.msk_tx_cnt++;
2697			MSK_INC(prod, MSK_TX_RING_CNT);
2698		} else {
2699			tx_le->msk_control |= htole32(OP_VLAN |
2700			    htons(m->m_pkthdr.ether_vtag));
2701		}
2702		control |= INS_VLAN;
2703	}
2704	/* Check if we have to handle checksum offload. */
2705	if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2706		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2707		tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2708		    & 0xffff) | ((uint32_t)tcp_offset << 16));
2709		tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2710		control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2711		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2712			control |= UDPTCP;
2713		sc_if->msk_cdata.msk_tx_cnt++;
2714		MSK_INC(prod, MSK_TX_RING_CNT);
2715	}
2716
2717	si = prod;
2718	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2719	tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2720	if (tso == 0)
2721		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2722		    OP_PACKET);
2723	else
2724		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2725		    OP_LARGESEND);
2726	sc_if->msk_cdata.msk_tx_cnt++;
2727	MSK_INC(prod, MSK_TX_RING_CNT);
2728
2729	for (i = 1; i < nseg; i++) {
2730		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2731		tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2732		tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2733		    OP_BUFFER | HW_OWNER);
2734		sc_if->msk_cdata.msk_tx_cnt++;
2735		MSK_INC(prod, MSK_TX_RING_CNT);
2736	}
2737	/* Update producer index. */
2738	sc_if->msk_cdata.msk_tx_prod = prod;
2739
2740	/* Set EOP on the last desciptor. */
2741	prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2742	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2743	tx_le->msk_control |= htole32(EOP);
2744
2745	/* Turn the first descriptor ownership to hardware. */
2746	tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2747	tx_le->msk_control |= htole32(HW_OWNER);
2748
2749	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2750	map = txd_last->tx_dmamap;
2751	txd_last->tx_dmamap = txd->tx_dmamap;
2752	txd->tx_dmamap = map;
2753	txd->tx_m = m;
2754
2755	/* Sync descriptors. */
2756	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2757	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2758	    sc_if->msk_cdata.msk_tx_ring_map,
2759	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2760
2761	return (0);
2762}
2763
2764static void
2765msk_tx_task(void *arg, int pending)
2766{
2767	struct ifnet *ifp;
2768
2769	ifp = arg;
2770	msk_start(ifp);
2771}
2772
2773static void
2774msk_start(struct ifnet *ifp)
2775{
2776        struct msk_if_softc *sc_if;
2777        struct mbuf *m_head;
2778	int enq;
2779
2780	sc_if = ifp->if_softc;
2781
2782	MSK_IF_LOCK(sc_if);
2783
2784	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2785	    IFF_DRV_RUNNING || sc_if->msk_link == 0) {
2786		MSK_IF_UNLOCK(sc_if);
2787		return;
2788	}
2789
2790	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2791	    sc_if->msk_cdata.msk_tx_cnt <
2792	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2793		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2794		if (m_head == NULL)
2795			break;
2796		/*
2797		 * Pack the data into the transmit ring. If we
2798		 * don't have room, set the OACTIVE flag and wait
2799		 * for the NIC to drain the ring.
2800		 */
2801		if (msk_encap(sc_if, &m_head) != 0) {
2802			if (m_head == NULL)
2803				break;
2804			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2805			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2806			break;
2807		}
2808
2809		enq++;
2810		/*
2811		 * If there's a BPF listener, bounce a copy of this frame
2812		 * to him.
2813		 */
2814		ETHER_BPF_MTAP(ifp, m_head);
2815	}
2816
2817	if (enq > 0) {
2818		/* Transmit */
2819		CSR_WRITE_2(sc_if->msk_softc,
2820		    Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2821		    sc_if->msk_cdata.msk_tx_prod);
2822
2823		/* Set a timeout in case the chip goes out to lunch. */
2824		sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2825	}
2826
2827	MSK_IF_UNLOCK(sc_if);
2828}
2829
2830static void
2831msk_watchdog(struct msk_if_softc *sc_if)
2832{
2833	struct ifnet *ifp;
2834	uint32_t ridx;
2835	int idx;
2836
2837	MSK_IF_LOCK_ASSERT(sc_if);
2838
2839	if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2840		return;
2841	ifp = sc_if->msk_ifp;
2842	if (sc_if->msk_link == 0) {
2843		if (bootverbose)
2844			if_printf(sc_if->msk_ifp, "watchdog timeout "
2845			   "(missed link)\n");
2846		ifp->if_oerrors++;
2847		msk_init_locked(sc_if);
2848		return;
2849	}
2850
2851	/*
2852	 * Reclaim first as there is a possibility of losing Tx completion
2853	 * interrupts.
2854	 */
2855	ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2856	idx = CSR_READ_2(sc_if->msk_softc, ridx);
2857	if (sc_if->msk_cdata.msk_tx_cons != idx) {
2858		msk_txeof(sc_if, idx);
2859		if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2860			if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2861			    "-- recovering\n");
2862			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2863				taskqueue_enqueue(taskqueue_fast,
2864				    &sc_if->msk_tx_task);
2865			return;
2866		}
2867	}
2868
2869	if_printf(ifp, "watchdog timeout\n");
2870	ifp->if_oerrors++;
2871	msk_init_locked(sc_if);
2872	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2873		taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2874}
2875
2876static int
2877mskc_shutdown(device_t dev)
2878{
2879	struct msk_softc *sc;
2880	int i;
2881
2882	sc = device_get_softc(dev);
2883	MSK_LOCK(sc);
2884	for (i = 0; i < sc->msk_num_port; i++) {
2885		if (sc->msk_if[i] != NULL)
2886			msk_stop(sc->msk_if[i]);
2887	}
2888
2889	/* Disable all interrupts. */
2890	CSR_WRITE_4(sc, B0_IMSK, 0);
2891	CSR_READ_4(sc, B0_IMSK);
2892	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2893	CSR_READ_4(sc, B0_HWE_IMSK);
2894
2895	/* Put hardware reset. */
2896	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2897
2898	MSK_UNLOCK(sc);
2899	return (0);
2900}
2901
2902static int
2903mskc_suspend(device_t dev)
2904{
2905	struct msk_softc *sc;
2906	int i;
2907
2908	sc = device_get_softc(dev);
2909
2910	MSK_LOCK(sc);
2911
2912	for (i = 0; i < sc->msk_num_port; i++) {
2913		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2914		    ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2915		    IFF_DRV_RUNNING) != 0))
2916			msk_stop(sc->msk_if[i]);
2917	}
2918
2919	/* Disable all interrupts. */
2920	CSR_WRITE_4(sc, B0_IMSK, 0);
2921	CSR_READ_4(sc, B0_IMSK);
2922	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2923	CSR_READ_4(sc, B0_HWE_IMSK);
2924
2925	msk_phy_power(sc, MSK_PHY_POWERDOWN);
2926
2927	/* Put hardware reset. */
2928	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2929	sc->msk_suspended = 1;
2930
2931	MSK_UNLOCK(sc);
2932
2933	return (0);
2934}
2935
2936static int
2937mskc_resume(device_t dev)
2938{
2939	struct msk_softc *sc;
2940	int i;
2941
2942	sc = device_get_softc(dev);
2943
2944	MSK_LOCK(sc);
2945
2946	mskc_reset(sc);
2947	for (i = 0; i < sc->msk_num_port; i++) {
2948		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2949		    ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2950			msk_init_locked(sc->msk_if[i]);
2951	}
2952	sc->msk_suspended = 0;
2953
2954	MSK_UNLOCK(sc);
2955
2956	return (0);
2957}
2958
2959#ifndef __NO_STRICT_ALIGNMENT
2960static __inline void
2961msk_fixup_rx(struct mbuf *m)
2962{
2963        int i;
2964        uint16_t *src, *dst;
2965
2966	src = mtod(m, uint16_t *);
2967	dst = src - 3;
2968
2969	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2970		*dst++ = *src++;
2971
2972	m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
2973}
2974#endif
2975
2976static void
2977msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2978{
2979	struct mbuf *m;
2980	struct ifnet *ifp;
2981	struct msk_rxdesc *rxd;
2982	int cons, rxlen;
2983
2984	ifp = sc_if->msk_ifp;
2985
2986	MSK_IF_LOCK_ASSERT(sc_if);
2987
2988	cons = sc_if->msk_cdata.msk_rx_cons;
2989	do {
2990		rxlen = status >> 16;
2991		if ((status & GMR_FS_VLAN) != 0 &&
2992		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2993			rxlen -= ETHER_VLAN_ENCAP_LEN;
2994		if (len > sc_if->msk_framesize ||
2995		    ((status & GMR_FS_ANY_ERR) != 0) ||
2996		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2997			/* Don't count flow-control packet as errors. */
2998			if ((status & GMR_FS_GOOD_FC) == 0)
2999				ifp->if_ierrors++;
3000			msk_discard_rxbuf(sc_if, cons);
3001			break;
3002		}
3003		rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3004		m = rxd->rx_m;
3005		if (msk_newbuf(sc_if, cons) != 0) {
3006			ifp->if_iqdrops++;
3007			/* Reuse old buffer. */
3008			msk_discard_rxbuf(sc_if, cons);
3009			break;
3010		}
3011		m->m_pkthdr.rcvif = ifp;
3012		m->m_pkthdr.len = m->m_len = len;
3013#ifndef __NO_STRICT_ALIGNMENT
3014		if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3015			msk_fixup_rx(m);
3016#endif
3017		ifp->if_ipackets++;
3018		/* Check for VLAN tagged packets. */
3019		if ((status & GMR_FS_VLAN) != 0 &&
3020		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3021			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3022			m->m_flags |= M_VLANTAG;
3023		}
3024		MSK_IF_UNLOCK(sc_if);
3025		(*ifp->if_input)(ifp, m);
3026		MSK_IF_LOCK(sc_if);
3027	} while (0);
3028
3029	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3030	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3031}
3032
3033static void
3034msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3035{
3036	struct mbuf *m;
3037	struct ifnet *ifp;
3038	struct msk_rxdesc *jrxd;
3039	int cons, rxlen;
3040
3041	ifp = sc_if->msk_ifp;
3042
3043	MSK_IF_LOCK_ASSERT(sc_if);
3044
3045	cons = sc_if->msk_cdata.msk_rx_cons;
3046	do {
3047		rxlen = status >> 16;
3048		if ((status & GMR_FS_VLAN) != 0 &&
3049		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3050			rxlen -= ETHER_VLAN_ENCAP_LEN;
3051		if (len > sc_if->msk_framesize ||
3052		    ((status & GMR_FS_ANY_ERR) != 0) ||
3053		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3054			/* Don't count flow-control packet as errors. */
3055			if ((status & GMR_FS_GOOD_FC) == 0)
3056				ifp->if_ierrors++;
3057			msk_discard_jumbo_rxbuf(sc_if, cons);
3058			break;
3059		}
3060		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3061		m = jrxd->rx_m;
3062		if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3063			ifp->if_iqdrops++;
3064			/* Reuse old buffer. */
3065			msk_discard_jumbo_rxbuf(sc_if, cons);
3066			break;
3067		}
3068		m->m_pkthdr.rcvif = ifp;
3069		m->m_pkthdr.len = m->m_len = len;
3070#ifndef __NO_STRICT_ALIGNMENT
3071		if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3072			msk_fixup_rx(m);
3073#endif
3074		ifp->if_ipackets++;
3075		/* Check for VLAN tagged packets. */
3076		if ((status & GMR_FS_VLAN) != 0 &&
3077		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3078			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3079			m->m_flags |= M_VLANTAG;
3080		}
3081		MSK_IF_UNLOCK(sc_if);
3082		(*ifp->if_input)(ifp, m);
3083		MSK_IF_LOCK(sc_if);
3084	} while (0);
3085
3086	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3087	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3088}
3089
3090static void
3091msk_txeof(struct msk_if_softc *sc_if, int idx)
3092{
3093	struct msk_txdesc *txd;
3094	struct msk_tx_desc *cur_tx;
3095	struct ifnet *ifp;
3096	uint32_t control;
3097	int cons, prog;
3098
3099	MSK_IF_LOCK_ASSERT(sc_if);
3100
3101	ifp = sc_if->msk_ifp;
3102
3103	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3104	    sc_if->msk_cdata.msk_tx_ring_map,
3105	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3106	/*
3107	 * Go through our tx ring and free mbufs for those
3108	 * frames that have been sent.
3109	 */
3110	cons = sc_if->msk_cdata.msk_tx_cons;
3111	prog = 0;
3112	for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3113		if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3114			break;
3115		prog++;
3116		cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3117		control = le32toh(cur_tx->msk_control);
3118		sc_if->msk_cdata.msk_tx_cnt--;
3119		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3120		if ((control & EOP) == 0)
3121			continue;
3122		txd = &sc_if->msk_cdata.msk_txdesc[cons];
3123		bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3124		    BUS_DMASYNC_POSTWRITE);
3125		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3126
3127		ifp->if_opackets++;
3128		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3129		    __func__));
3130		m_freem(txd->tx_m);
3131		txd->tx_m = NULL;
3132	}
3133
3134	if (prog > 0) {
3135		sc_if->msk_cdata.msk_tx_cons = cons;
3136		if (sc_if->msk_cdata.msk_tx_cnt == 0)
3137			sc_if->msk_watchdog_timer = 0;
3138		/* No need to sync LEs as we didn't update LEs. */
3139	}
3140}
3141
3142static void
3143msk_tick(void *xsc_if)
3144{
3145	struct msk_if_softc *sc_if;
3146	struct mii_data *mii;
3147
3148	sc_if = xsc_if;
3149
3150	MSK_IF_LOCK_ASSERT(sc_if);
3151
3152	mii = device_get_softc(sc_if->msk_miibus);
3153
3154	mii_tick(mii);
3155	msk_watchdog(sc_if);
3156	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3157}
3158
3159static void
3160msk_intr_phy(struct msk_if_softc *sc_if)
3161{
3162	uint16_t status;
3163
3164	msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3165	status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3166	/* Handle FIFO Underrun/Overflow? */
3167	if ((status & PHY_M_IS_FIFO_ERROR))
3168		device_printf(sc_if->msk_if_dev,
3169		    "PHY FIFO underrun/overflow.\n");
3170}
3171
3172static void
3173msk_intr_gmac(struct msk_if_softc *sc_if)
3174{
3175	struct msk_softc *sc;
3176	uint8_t status;
3177
3178	sc = sc_if->msk_softc;
3179	status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3180
3181	/* GMAC Rx FIFO overrun. */
3182	if ((status & GM_IS_RX_FF_OR) != 0) {
3183		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3184		    GMF_CLI_RX_FO);
3185		device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3186	}
3187	/* GMAC Tx FIFO underrun. */
3188	if ((status & GM_IS_TX_FF_UR) != 0) {
3189		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3190		    GMF_CLI_TX_FU);
3191		device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3192		/*
3193		 * XXX
3194		 * In case of Tx underrun, we may need to flush/reset
3195		 * Tx MAC but that would also require resynchronization
3196		 * with status LEs. Reintializing status LEs would
3197		 * affect other port in dual MAC configuration so it
3198		 * should be avoided as possible as we can.
3199		 * Due to lack of documentation it's all vague guess but
3200		 * it needs more investigation.
3201		 */
3202	}
3203}
3204
3205static void
3206msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3207{
3208	struct msk_softc *sc;
3209
3210	sc = sc_if->msk_softc;
3211	if ((status & Y2_IS_PAR_RD1) != 0) {
3212		device_printf(sc_if->msk_if_dev,
3213		    "RAM buffer read parity error\n");
3214		/* Clear IRQ. */
3215		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3216		    RI_CLR_RD_PERR);
3217	}
3218	if ((status & Y2_IS_PAR_WR1) != 0) {
3219		device_printf(sc_if->msk_if_dev,
3220		    "RAM buffer write parity error\n");
3221		/* Clear IRQ. */
3222		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3223		    RI_CLR_WR_PERR);
3224	}
3225	if ((status & Y2_IS_PAR_MAC1) != 0) {
3226		device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3227		/* Clear IRQ. */
3228		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3229		    GMF_CLI_TX_PE);
3230	}
3231	if ((status & Y2_IS_PAR_RX1) != 0) {
3232		device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3233		/* Clear IRQ. */
3234		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3235	}
3236	if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3237		device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3238		/* Clear IRQ. */
3239		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3240	}
3241}
3242
3243static void
3244msk_intr_hwerr(struct msk_softc *sc)
3245{
3246	uint32_t status;
3247	uint32_t tlphead[4];
3248
3249	status = CSR_READ_4(sc, B0_HWE_ISRC);
3250	/* Time Stamp timer overflow. */
3251	if ((status & Y2_IS_TIST_OV) != 0)
3252		CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3253	if ((status & Y2_IS_PCI_NEXP) != 0) {
3254		/*
3255		 * PCI Express Error occured which is not described in PEX
3256		 * spec.
3257		 * This error is also mapped either to Master Abort(
3258		 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3259		 * can only be cleared there.
3260                 */
3261		device_printf(sc->msk_dev,
3262		    "PCI Express protocol violation error\n");
3263	}
3264
3265	if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3266		uint16_t v16;
3267
3268		if ((status & Y2_IS_MST_ERR) != 0)
3269			device_printf(sc->msk_dev,
3270			    "unexpected IRQ Status error\n");
3271		else
3272			device_printf(sc->msk_dev,
3273			    "unexpected IRQ Master error\n");
3274		/* Reset all bits in the PCI status register. */
3275		v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3276		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3277		pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3278		    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3279		    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3280		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3281	}
3282
3283	/* Check for PCI Express Uncorrectable Error. */
3284	if ((status & Y2_IS_PCI_EXP) != 0) {
3285		uint32_t v32;
3286
3287		/*
3288		 * On PCI Express bus bridges are called root complexes (RC).
3289		 * PCI Express errors are recognized by the root complex too,
3290		 * which requests the system to handle the problem. After
3291		 * error occurence it may be that no access to the adapter
3292		 * may be performed any longer.
3293		 */
3294
3295		v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3296		if ((v32 & PEX_UNSUP_REQ) != 0) {
3297			/* Ignore unsupported request error. */
3298			device_printf(sc->msk_dev,
3299			    "Uncorrectable PCI Express error\n");
3300		}
3301		if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3302			int i;
3303
3304			/* Get TLP header form Log Registers. */
3305			for (i = 0; i < 4; i++)
3306				tlphead[i] = CSR_PCI_READ_4(sc,
3307				    PEX_HEADER_LOG + i * 4);
3308			/* Check for vendor defined broadcast message. */
3309			if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3310				sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3311				CSR_WRITE_4(sc, B0_HWE_IMSK,
3312				    sc->msk_intrhwemask);
3313				CSR_READ_4(sc, B0_HWE_IMSK);
3314			}
3315		}
3316		/* Clear the interrupt. */
3317		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3318		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3319		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3320	}
3321
3322	if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3323		msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3324	if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3325		msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3326}
3327
3328static __inline void
3329msk_rxput(struct msk_if_softc *sc_if)
3330{
3331	struct msk_softc *sc;
3332
3333	sc = sc_if->msk_softc;
3334	if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN))
3335		bus_dmamap_sync(
3336		    sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3337		    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3338		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3339	else
3340		bus_dmamap_sync(
3341		    sc_if->msk_cdata.msk_rx_ring_tag,
3342		    sc_if->msk_cdata.msk_rx_ring_map,
3343		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3344	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3345	    PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3346}
3347
3348static int
3349msk_handle_events(struct msk_softc *sc)
3350{
3351	struct msk_if_softc *sc_if;
3352	int rxput[2];
3353	struct msk_stat_desc *sd;
3354	uint32_t control, status;
3355	int cons, idx, len, port, rxprog;
3356
3357	idx = CSR_READ_2(sc, STAT_PUT_IDX);
3358	if (idx == sc->msk_stat_cons)
3359		return (0);
3360
3361	/* Sync status LEs. */
3362	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3363	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3364	/* XXX Sync Rx LEs here. */
3365
3366	rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3367
3368	rxprog = 0;
3369	for (cons = sc->msk_stat_cons; cons != idx;) {
3370		sd = &sc->msk_stat_ring[cons];
3371		control = le32toh(sd->msk_control);
3372		if ((control & HW_OWNER) == 0)
3373			break;
3374		/*
3375		 * Marvell's FreeBSD driver updates status LE after clearing
3376		 * HW_OWNER. However we don't have a way to sync single LE
3377		 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3378		 * an entire DMA map. So don't sync LE until we have a better
3379		 * way to sync LEs.
3380		 */
3381		control &= ~HW_OWNER;
3382		sd->msk_control = htole32(control);
3383		status = le32toh(sd->msk_status);
3384		len = control & STLE_LEN_MASK;
3385		port = (control >> 16) & 0x01;
3386		sc_if = sc->msk_if[port];
3387		if (sc_if == NULL) {
3388			device_printf(sc->msk_dev, "invalid port opcode "
3389			    "0x%08x\n", control & STLE_OP_MASK);
3390			continue;
3391		}
3392
3393		switch (control & STLE_OP_MASK) {
3394		case OP_RXVLAN:
3395			sc_if->msk_vtag = ntohs(len);
3396			break;
3397		case OP_RXCHKSVLAN:
3398			sc_if->msk_vtag = ntohs(len);
3399			break;
3400		case OP_RXSTAT:
3401			if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3402				msk_jumbo_rxeof(sc_if, status, len);
3403			else
3404				msk_rxeof(sc_if, status, len);
3405			rxprog++;
3406			/*
3407			 * Because there is no way to sync single Rx LE
3408			 * put the DMA sync operation off until the end of
3409			 * event processing.
3410			 */
3411			rxput[port]++;
3412			/* Update prefetch unit if we've passed water mark. */
3413			if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3414				msk_rxput(sc_if);
3415				rxput[port] = 0;
3416			}
3417			break;
3418		case OP_TXINDEXLE:
3419			if (sc->msk_if[MSK_PORT_A] != NULL)
3420				msk_txeof(sc->msk_if[MSK_PORT_A],
3421				    status & STLE_TXA1_MSKL);
3422			if (sc->msk_if[MSK_PORT_B] != NULL)
3423				msk_txeof(sc->msk_if[MSK_PORT_B],
3424				    ((status & STLE_TXA2_MSKL) >>
3425				    STLE_TXA2_SHIFTL) |
3426				    ((len & STLE_TXA2_MSKH) <<
3427				    STLE_TXA2_SHIFTH));
3428			break;
3429		default:
3430			device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3431			    control & STLE_OP_MASK);
3432			break;
3433		}
3434		MSK_INC(cons, MSK_STAT_RING_CNT);
3435		if (rxprog > sc->msk_process_limit)
3436			break;
3437	}
3438
3439	sc->msk_stat_cons = cons;
3440	/* XXX We should sync status LEs here. See above notes. */
3441
3442	if (rxput[MSK_PORT_A] > 0)
3443		msk_rxput(sc->msk_if[MSK_PORT_A]);
3444	if (rxput[MSK_PORT_B] > 0)
3445		msk_rxput(sc->msk_if[MSK_PORT_B]);
3446
3447	return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3448}
3449
3450/* Legacy interrupt handler for shared interrupt. */
3451static void
3452msk_legacy_intr(void *xsc)
3453{
3454	struct msk_softc *sc;
3455	struct msk_if_softc *sc_if0, *sc_if1;
3456	struct ifnet *ifp0, *ifp1;
3457	uint32_t status;
3458
3459	sc = xsc;
3460	MSK_LOCK(sc);
3461
3462	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3463	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3464	if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3465	    (status & sc->msk_intrmask) == 0) {
3466		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3467		return;
3468	}
3469
3470	sc_if0 = sc->msk_if[MSK_PORT_A];
3471	sc_if1 = sc->msk_if[MSK_PORT_B];
3472	ifp0 = ifp1 = NULL;
3473	if (sc_if0 != NULL)
3474		ifp0 = sc_if0->msk_ifp;
3475	if (sc_if1 != NULL)
3476		ifp1 = sc_if1->msk_ifp;
3477
3478	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3479		msk_intr_phy(sc_if0);
3480	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3481		msk_intr_phy(sc_if1);
3482	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3483		msk_intr_gmac(sc_if0);
3484	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3485		msk_intr_gmac(sc_if1);
3486	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3487		device_printf(sc->msk_dev, "Rx descriptor error\n");
3488		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3489		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3490		CSR_READ_4(sc, B0_IMSK);
3491	}
3492        if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3493		device_printf(sc->msk_dev, "Tx descriptor error\n");
3494		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3495		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3496		CSR_READ_4(sc, B0_IMSK);
3497	}
3498	if ((status & Y2_IS_HW_ERR) != 0)
3499		msk_intr_hwerr(sc);
3500
3501	while (msk_handle_events(sc) != 0)
3502		;
3503	if ((status & Y2_IS_STAT_BMU) != 0)
3504		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3505
3506	/* Reenable interrupts. */
3507	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3508
3509	if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3510	    !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3511		taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3512	if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3513	    !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3514		taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3515
3516	MSK_UNLOCK(sc);
3517}
3518
3519static int
3520msk_intr(void *xsc)
3521{
3522	struct msk_softc *sc;
3523	uint32_t status;
3524
3525	sc = xsc;
3526	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3527	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3528	if (status == 0 || status == 0xffffffff) {
3529		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3530		return (FILTER_STRAY);
3531	}
3532
3533	taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3534	return (FILTER_HANDLED);
3535}
3536
3537static void
3538msk_int_task(void *arg, int pending)
3539{
3540	struct msk_softc *sc;
3541	struct msk_if_softc *sc_if0, *sc_if1;
3542	struct ifnet *ifp0, *ifp1;
3543	uint32_t status;
3544	int domore;
3545
3546	sc = arg;
3547	MSK_LOCK(sc);
3548
3549	/* Get interrupt source. */
3550	status = CSR_READ_4(sc, B0_ISRC);
3551	if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3552	    (status & sc->msk_intrmask) == 0)
3553		goto done;
3554
3555	sc_if0 = sc->msk_if[MSK_PORT_A];
3556	sc_if1 = sc->msk_if[MSK_PORT_B];
3557	ifp0 = ifp1 = NULL;
3558	if (sc_if0 != NULL)
3559		ifp0 = sc_if0->msk_ifp;
3560	if (sc_if1 != NULL)
3561		ifp1 = sc_if1->msk_ifp;
3562
3563	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3564		msk_intr_phy(sc_if0);
3565	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3566		msk_intr_phy(sc_if1);
3567	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3568		msk_intr_gmac(sc_if0);
3569	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3570		msk_intr_gmac(sc_if1);
3571	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3572		device_printf(sc->msk_dev, "Rx descriptor error\n");
3573		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3574		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3575		CSR_READ_4(sc, B0_IMSK);
3576	}
3577        if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3578		device_printf(sc->msk_dev, "Tx descriptor error\n");
3579		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3580		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3581		CSR_READ_4(sc, B0_IMSK);
3582	}
3583	if ((status & Y2_IS_HW_ERR) != 0)
3584		msk_intr_hwerr(sc);
3585
3586	domore = msk_handle_events(sc);
3587	if ((status & Y2_IS_STAT_BMU) != 0)
3588		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3589
3590	if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3591	    !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3592		taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3593	if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3594	    !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3595		taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3596
3597	if (domore > 0) {
3598		taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3599		MSK_UNLOCK(sc);
3600		return;
3601	}
3602done:
3603	MSK_UNLOCK(sc);
3604
3605	/* Reenable interrupts. */
3606	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3607}
3608
3609static void
3610msk_init(void *xsc)
3611{
3612	struct msk_if_softc *sc_if = xsc;
3613
3614	MSK_IF_LOCK(sc_if);
3615	msk_init_locked(sc_if);
3616	MSK_IF_UNLOCK(sc_if);
3617}
3618
3619static void
3620msk_init_locked(struct msk_if_softc *sc_if)
3621{
3622	struct msk_softc *sc;
3623	struct ifnet *ifp;
3624	struct mii_data	 *mii;
3625	uint16_t eaddr[ETHER_ADDR_LEN / 2];
3626	uint16_t gmac;
3627	int error, i;
3628
3629	MSK_IF_LOCK_ASSERT(sc_if);
3630
3631	ifp = sc_if->msk_ifp;
3632	sc = sc_if->msk_softc;
3633	mii = device_get_softc(sc_if->msk_miibus);
3634
3635	error = 0;
3636	/* Cancel pending I/O and free all Rx/Tx buffers. */
3637	msk_stop(sc_if);
3638
3639	sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN +
3640	    ETHER_VLAN_ENCAP_LEN;
3641	if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
3642	    sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3643		/*
3644		 * In Yukon EC Ultra, TSO & checksum offload is not
3645		 * supported for jumbo frame.
3646		 */
3647		ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3648		ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3649	}
3650
3651	/*
3652	 * Initialize GMAC first.
3653	 * Without this initialization, Rx MAC did not work as expected
3654	 * and Rx MAC garbled status LEs and it resulted in out-of-order
3655	 * or duplicated frame delivery which in turn showed very poor
3656	 * Rx performance.(I had to write a packet analysis code that
3657	 * could be embeded in driver to diagnose this issue.)
3658	 * I've spent almost 2 months to fix this issue. If I have had
3659	 * datasheet for Yukon II I wouldn't have encountered this. :-(
3660	 */
3661	gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3662	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3663
3664	/* Dummy read the Interrupt Source Register. */
3665	CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3666
3667	/* Set MIB Clear Counter Mode. */
3668	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3669	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3670	/* Read all MIB Counters with Clear Mode set. */
3671	for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3672		GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3673	/* Clear MIB Clear Counter Mode. */
3674	gmac &= ~GM_PAR_MIB_CLR;
3675	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3676
3677	/* Disable FCS. */
3678	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3679
3680	/* Setup Transmit Control Register. */
3681	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3682
3683	/* Setup Transmit Flow Control Register. */
3684	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3685
3686	/* Setup Transmit Parameter Register. */
3687	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3688	    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3689	    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3690
3691	gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3692	    GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3693
3694	if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3695		gmac |= GM_SMOD_JUMBO_ENA;
3696	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3697
3698	/* Set station address. */
3699        bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3700        for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3701		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3702		    eaddr[i]);
3703        for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3704		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3705		    eaddr[i]);
3706
3707	/* Disable interrupts for counter overflows. */
3708	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3709	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3710	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3711
3712	/* Configure Rx MAC FIFO. */
3713	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3714	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3715	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3716	    GMF_OPER_ON | GMF_RX_F_FL_ON);
3717
3718	/* Set promiscuous mode. */
3719	msk_setpromisc(sc_if);
3720
3721	/* Set multicast filter. */
3722	msk_setmulti(sc_if);
3723
3724	/* Flush Rx MAC FIFO on any flow control or error. */
3725	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3726	    GMR_FS_ANY_ERR);
3727
3728	/*
3729	 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3730	 * due to hardware hang on receipt of pause frames.
3731	 */
3732	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3733	    RX_GMF_FL_THR_DEF + 1);
3734
3735	/* Configure Tx MAC FIFO. */
3736	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3737	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3738	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3739
3740	/* Configure hardware VLAN tag insertion/stripping. */
3741	msk_setvlan(sc_if, ifp);
3742
3743	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3744		/* Set Rx Pause threshould. */
3745		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3746		    MSK_ECU_LLPP);
3747		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3748		    MSK_ECU_ULPP);
3749		if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) {
3750			/*
3751			 * Set Tx GMAC FIFO Almost Empty Threshold.
3752			 */
3753			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3754			    MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3755			/* Disable Store & Forward mode for Tx. */
3756			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3757			    TX_JUMBO_ENA | TX_STFW_DIS);
3758		} else {
3759			/* Enable Store & Forward mode for Tx. */
3760			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3761			    TX_JUMBO_DIS | TX_STFW_ENA);
3762		}
3763	}
3764
3765	/*
3766	 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3767	 * arbiter as we don't use Sync Tx queue.
3768	 */
3769	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3770	    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3771	/* Enable the RAM Interface Arbiter. */
3772	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3773
3774	/* Setup RAM buffer. */
3775	msk_set_rambuffer(sc_if);
3776
3777	/* Disable Tx sync Queue. */
3778	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3779
3780	/* Setup Tx Queue Bus Memory Interface. */
3781	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3782	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3783	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3784	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3785	if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3786	    sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3787		/* Fix for Yukon-EC Ultra: set BMU FIFO level */
3788		CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3789	}
3790
3791	/* Setup Rx Queue Bus Memory Interface. */
3792	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3793	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3794	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3795	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3796        if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3797	    sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3798		/* MAC Rx RAM Read is controlled by hardware. */
3799                CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3800	}
3801
3802	msk_set_prefetch(sc, sc_if->msk_txq,
3803	    sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3804	msk_init_tx_ring(sc_if);
3805
3806	/* Disable Rx checksum offload and RSS hash. */
3807	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3808	    BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3809	if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3810		msk_set_prefetch(sc, sc_if->msk_rxq,
3811		    sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3812		    MSK_JUMBO_RX_RING_CNT - 1);
3813		error = msk_init_jumbo_rx_ring(sc_if);
3814	 } else {
3815		msk_set_prefetch(sc, sc_if->msk_rxq,
3816		    sc_if->msk_rdata.msk_rx_ring_paddr,
3817		    MSK_RX_RING_CNT - 1);
3818		error = msk_init_rx_ring(sc_if);
3819	}
3820	if (error != 0) {
3821		device_printf(sc_if->msk_if_dev,
3822		    "initialization failed: no memory for Rx buffers\n");
3823		msk_stop(sc_if);
3824		return;
3825	}
3826
3827	/* Configure interrupt handling. */
3828	if (sc_if->msk_port == MSK_PORT_A) {
3829		sc->msk_intrmask |= Y2_IS_PORT_A;
3830		sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3831	} else {
3832		sc->msk_intrmask |= Y2_IS_PORT_B;
3833		sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3834	}
3835	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3836	CSR_READ_4(sc, B0_HWE_IMSK);
3837	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3838	CSR_READ_4(sc, B0_IMSK);
3839
3840	sc_if->msk_link = 0;
3841	mii_mediachg(mii);
3842
3843	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3844	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3845
3846	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3847}
3848
3849static void
3850msk_set_rambuffer(struct msk_if_softc *sc_if)
3851{
3852	struct msk_softc *sc;
3853	int ltpp, utpp;
3854
3855	sc = sc_if->msk_softc;
3856	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3857		return;
3858
3859	/* Setup Rx Queue. */
3860	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3861	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3862	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3863	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3864	    sc->msk_rxqend[sc_if->msk_port] / 8);
3865	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3866	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3867	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3868	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3869
3870	utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3871	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3872	ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3873	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3874	if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3875		ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3876	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3877	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3878	/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3879
3880	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3881	CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3882
3883	/* Setup Tx Queue. */
3884	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3885	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3886	    sc->msk_txqstart[sc_if->msk_port] / 8);
3887	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3888	    sc->msk_txqend[sc_if->msk_port] / 8);
3889	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3890	    sc->msk_txqstart[sc_if->msk_port] / 8);
3891	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3892	    sc->msk_txqstart[sc_if->msk_port] / 8);
3893	/* Enable Store & Forward for Tx side. */
3894	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3895	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3896	CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3897}
3898
3899static void
3900msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3901    uint32_t count)
3902{
3903
3904	/* Reset the prefetch unit. */
3905	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3906	    PREF_UNIT_RST_SET);
3907	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3908	    PREF_UNIT_RST_CLR);
3909	/* Set LE base address. */
3910	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3911	    MSK_ADDR_LO(addr));
3912	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3913	    MSK_ADDR_HI(addr));
3914	/* Set the list last index. */
3915	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3916	    count);
3917	/* Turn on prefetch unit. */
3918	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3919	    PREF_UNIT_OP_ON);
3920	/* Dummy read to ensure write. */
3921	CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3922}
3923
3924static void
3925msk_stop(struct msk_if_softc *sc_if)
3926{
3927	struct msk_softc *sc;
3928	struct msk_txdesc *txd;
3929	struct msk_rxdesc *rxd;
3930	struct msk_rxdesc *jrxd;
3931	struct ifnet *ifp;
3932	uint32_t val;
3933	int i;
3934
3935	MSK_IF_LOCK_ASSERT(sc_if);
3936	sc = sc_if->msk_softc;
3937	ifp = sc_if->msk_ifp;
3938
3939	callout_stop(&sc_if->msk_tick_ch);
3940	sc_if->msk_watchdog_timer = 0;
3941
3942	/* Disable interrupts. */
3943	if (sc_if->msk_port == MSK_PORT_A) {
3944		sc->msk_intrmask &= ~Y2_IS_PORT_A;
3945		sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3946	} else {
3947		sc->msk_intrmask &= ~Y2_IS_PORT_B;
3948		sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3949	}
3950	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3951	CSR_READ_4(sc, B0_HWE_IMSK);
3952	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3953	CSR_READ_4(sc, B0_IMSK);
3954
3955	/* Disable Tx/Rx MAC. */
3956	val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3957	val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3958	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3959	/* Read again to ensure writing. */
3960	GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3961
3962	/* Stop Tx BMU. */
3963	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3964	val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3965	for (i = 0; i < MSK_TIMEOUT; i++) {
3966		if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3967			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3968			    BMU_STOP);
3969			val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3970		} else
3971			break;
3972		DELAY(1);
3973	}
3974	if (i == MSK_TIMEOUT)
3975		device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3976	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3977	    RB_RST_SET | RB_DIS_OP_MD);
3978
3979	/* Disable all GMAC interrupt. */
3980	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3981	/* Disable PHY interrupt. */
3982	msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3983
3984	/* Disable the RAM Interface Arbiter. */
3985	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3986
3987	/* Reset the PCI FIFO of the async Tx queue */
3988	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3989	    BMU_RST_SET | BMU_FIFO_RST);
3990
3991	/* Reset the Tx prefetch units. */
3992	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3993	    PREF_UNIT_RST_SET);
3994
3995	/* Reset the RAM Buffer async Tx queue. */
3996	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3997
3998	/* Reset Tx MAC FIFO. */
3999	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
4000	/* Set Pause Off. */
4001	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
4002
4003	/*
4004	 * The Rx Stop command will not work for Yukon-2 if the BMU does not
4005	 * reach the end of packet and since we can't make sure that we have
4006	 * incoming data, we must reset the BMU while it is not during a DMA
4007	 * transfer. Since it is possible that the Rx path is still active,
4008	 * the Rx RAM buffer will be stopped first, so any possible incoming
4009	 * data will not trigger a DMA. After the RAM buffer is stopped, the
4010	 * BMU is polled until any DMA in progress is ended and only then it
4011	 * will be reset.
4012	 */
4013
4014	/* Disable the RAM Buffer receive queue. */
4015	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4016	for (i = 0; i < MSK_TIMEOUT; i++) {
4017		if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4018		    CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4019			break;
4020		DELAY(1);
4021	}
4022	if (i == MSK_TIMEOUT)
4023		device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4024	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4025	    BMU_RST_SET | BMU_FIFO_RST);
4026	/* Reset the Rx prefetch unit. */
4027	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4028	    PREF_UNIT_RST_SET);
4029	/* Reset the RAM Buffer receive queue. */
4030	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4031	/* Reset Rx MAC FIFO. */
4032	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4033
4034	/* Free Rx and Tx mbufs still in the queues. */
4035	for (i = 0; i < MSK_RX_RING_CNT; i++) {
4036		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4037		if (rxd->rx_m != NULL) {
4038			bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4039			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4040			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4041			    rxd->rx_dmamap);
4042			m_freem(rxd->rx_m);
4043			rxd->rx_m = NULL;
4044		}
4045	}
4046	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4047		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4048		if (jrxd->rx_m != NULL) {
4049			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4050			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4051			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4052			    jrxd->rx_dmamap);
4053			m_freem(jrxd->rx_m);
4054			jrxd->rx_m = NULL;
4055		}
4056	}
4057	for (i = 0; i < MSK_TX_RING_CNT; i++) {
4058		txd = &sc_if->msk_cdata.msk_txdesc[i];
4059		if (txd->tx_m != NULL) {
4060			bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4061			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4062			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4063			    txd->tx_dmamap);
4064			m_freem(txd->tx_m);
4065			txd->tx_m = NULL;
4066		}
4067	}
4068
4069	/*
4070	 * Mark the interface down.
4071	 */
4072	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4073	sc_if->msk_link = 0;
4074}
4075
4076static int
4077sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4078{
4079	int error, value;
4080
4081	if (!arg1)
4082		return (EINVAL);
4083	value = *(int *)arg1;
4084	error = sysctl_handle_int(oidp, &value, 0, req);
4085	if (error || !req->newptr)
4086		return (error);
4087	if (value < low || value > high)
4088		return (EINVAL);
4089	*(int *)arg1 = value;
4090
4091	return (0);
4092}
4093
4094static int
4095sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4096{
4097
4098	return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4099	    MSK_PROC_MAX));
4100}
4101