if_msk.c revision 196969
1/******************************************************************************
2 *
3 * Name   : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date   : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 *	LICENSE:
14 *	Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 *	The computer program files contained in this folder ("Files")
17 *	are provided to you under the BSD-type license terms provided
18 *	below, and any use of such Files and any derivative works
19 *	thereof created by you shall be governed by the following terms
20 *	and conditions:
21 *
22 *	- Redistributions of source code must retain the above copyright
23 *	  notice, this list of conditions and the following disclaimer.
24 *	- Redistributions in binary form must reproduce the above
25 *	  copyright notice, this list of conditions and the following
26 *	  disclaimer in the documentation and/or other materials provided
27 *	  with the distribution.
28 *	- Neither the name of Marvell nor the names of its contributors
29 *	  may be used to endorse or promote products derived from this
30 *	  software without specific prior written permission.
31 *
32 *	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 *	"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 *	LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 *	FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 *	COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 *	INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 *	BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
39 *	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 *	HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 *	STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 *	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 *	OF THE POSSIBILITY OF SUCH DAMAGE.
44 *	/LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 *    notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 *    notice, this list of conditions and the following disclaimer in the
59 *    documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 *    must display the following acknowledgement:
62 *	This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 *    may be used to endorse or promote products derived from this software
65 *    without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
102__FBSDID("$FreeBSD: head/sys/dev/msk/if_msk.c 196969 2009-09-08 13:16:55Z phk $");
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116#include <sys/taskqueue.h>
117
118#include <net/bpf.h>
119#include <net/ethernet.h>
120#include <net/if.h>
121#include <net/if_dl.h>
122#include <net/if_media.h>
123#include <net/if_types.h>
124#include <net/if_vlan_var.h>
125
126#include <netinet/in.h>
127#include <netinet/ip.h>
128#include <netinet/tcp.h>
129
130#include <machine/bus.h>
131#include <machine/in_cksum.h>
132#include <machine/resource.h>
133#include <sys/rman.h>
134
135#include <dev/mii/miivar.h>
136
137#include <dev/pci/pcireg.h>
138#include <dev/pci/pcivar.h>
139
140#include <dev/msk/if_mskreg.h>
141
142MODULE_DEPEND(msk, pci, 1, 1, 1);
143MODULE_DEPEND(msk, ether, 1, 1, 1);
144MODULE_DEPEND(msk, miibus, 1, 1, 1);
145
146/* "device miibus" required.  See GENERIC if you get errors here. */
147#include "miibus_if.h"
148
149/* Tunables. */
150static int msi_disable = 0;
151TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
152static int legacy_intr = 0;
153TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
154static int jumbo_disable = 0;
155TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
156
157#define MSK_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
158
159/*
160 * Devices supported by this driver.
161 */
162static struct msk_product {
163	uint16_t	msk_vendorid;
164	uint16_t	msk_deviceid;
165	const char	*msk_name;
166} msk_products[] = {
167	{ VENDORID_SK, DEVICEID_SK_YUKON2,
168	    "SK-9Sxx Gigabit Ethernet" },
169	{ VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
170	    "SK-9Exx Gigabit Ethernet"},
171	{ VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
172	    "Marvell Yukon 88E8021CU Gigabit Ethernet" },
173	{ VENDORID_MARVELL, DEVICEID_MRVL_8021X,
174	    "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
175	{ VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
176	    "Marvell Yukon 88E8022CU Gigabit Ethernet" },
177	{ VENDORID_MARVELL, DEVICEID_MRVL_8022X,
178	    "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
179	{ VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
180	    "Marvell Yukon 88E8061CU Gigabit Ethernet" },
181	{ VENDORID_MARVELL, DEVICEID_MRVL_8061X,
182	    "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
183	{ VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
184	    "Marvell Yukon 88E8062CU Gigabit Ethernet" },
185	{ VENDORID_MARVELL, DEVICEID_MRVL_8062X,
186	    "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
187	{ VENDORID_MARVELL, DEVICEID_MRVL_8035,
188	    "Marvell Yukon 88E8035 Fast Ethernet" },
189	{ VENDORID_MARVELL, DEVICEID_MRVL_8036,
190	    "Marvell Yukon 88E8036 Fast Ethernet" },
191	{ VENDORID_MARVELL, DEVICEID_MRVL_8038,
192	    "Marvell Yukon 88E8038 Fast Ethernet" },
193	{ VENDORID_MARVELL, DEVICEID_MRVL_8039,
194	    "Marvell Yukon 88E8039 Fast Ethernet" },
195	{ VENDORID_MARVELL, DEVICEID_MRVL_8040,
196	    "Marvell Yukon 88E8040 Fast Ethernet" },
197	{ VENDORID_MARVELL, DEVICEID_MRVL_8040T,
198	    "Marvell Yukon 88E8040T Fast Ethernet" },
199	{ VENDORID_MARVELL, DEVICEID_MRVL_8048,
200	    "Marvell Yukon 88E8048 Fast Ethernet" },
201	{ VENDORID_MARVELL, DEVICEID_MRVL_4361,
202	    "Marvell Yukon 88E8050 Gigabit Ethernet" },
203	{ VENDORID_MARVELL, DEVICEID_MRVL_4360,
204	    "Marvell Yukon 88E8052 Gigabit Ethernet" },
205	{ VENDORID_MARVELL, DEVICEID_MRVL_4362,
206	    "Marvell Yukon 88E8053 Gigabit Ethernet" },
207	{ VENDORID_MARVELL, DEVICEID_MRVL_4363,
208	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
209	{ VENDORID_MARVELL, DEVICEID_MRVL_4364,
210	    "Marvell Yukon 88E8056 Gigabit Ethernet" },
211	{ VENDORID_MARVELL, DEVICEID_MRVL_4365,
212	    "Marvell Yukon 88E8070 Gigabit Ethernet" },
213	{ VENDORID_MARVELL, DEVICEID_MRVL_436A,
214	    "Marvell Yukon 88E8058 Gigabit Ethernet" },
215	{ VENDORID_MARVELL, DEVICEID_MRVL_436B,
216	    "Marvell Yukon 88E8071 Gigabit Ethernet" },
217	{ VENDORID_MARVELL, DEVICEID_MRVL_436C,
218	    "Marvell Yukon 88E8072 Gigabit Ethernet" },
219	{ VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
220	    "D-Link 550SX Gigabit Ethernet" },
221	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
222	    "D-Link 560T Gigabit Ethernet" }
223};
224
225static const char *model_name[] = {
226	"Yukon XL",
227        "Yukon EC Ultra",
228        "Yukon EX",
229        "Yukon EC",
230        "Yukon FE",
231        "Yukon FE+"
232};
233
234static int mskc_probe(device_t);
235static int mskc_attach(device_t);
236static int mskc_detach(device_t);
237static int mskc_shutdown(device_t);
238static int mskc_setup_rambuffer(struct msk_softc *);
239static int mskc_suspend(device_t);
240static int mskc_resume(device_t);
241static void mskc_reset(struct msk_softc *);
242
243static int msk_probe(device_t);
244static int msk_attach(device_t);
245static int msk_detach(device_t);
246
247static void msk_tick(void *);
248static void msk_legacy_intr(void *);
249static int msk_intr(void *);
250static void msk_int_task(void *, int);
251static void msk_intr_phy(struct msk_if_softc *);
252static void msk_intr_gmac(struct msk_if_softc *);
253static __inline void msk_rxput(struct msk_if_softc *);
254static int msk_handle_events(struct msk_softc *);
255static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
256static void msk_intr_hwerr(struct msk_softc *);
257#ifndef __NO_STRICT_ALIGNMENT
258static __inline void msk_fixup_rx(struct mbuf *);
259#endif
260static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
261static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
262static void msk_txeof(struct msk_if_softc *, int);
263static int msk_encap(struct msk_if_softc *, struct mbuf **);
264static void msk_tx_task(void *, int);
265static void msk_start(struct ifnet *);
266static int msk_ioctl(struct ifnet *, u_long, caddr_t);
267static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
268static void msk_set_rambuffer(struct msk_if_softc *);
269static void msk_set_tx_stfwd(struct msk_if_softc *);
270static void msk_init(void *);
271static void msk_init_locked(struct msk_if_softc *);
272static void msk_stop(struct msk_if_softc *);
273static void msk_watchdog(struct msk_if_softc *);
274static int msk_mediachange(struct ifnet *);
275static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
276static void msk_phy_power(struct msk_softc *, int);
277static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
278static int msk_status_dma_alloc(struct msk_softc *);
279static void msk_status_dma_free(struct msk_softc *);
280static int msk_txrx_dma_alloc(struct msk_if_softc *);
281static int msk_rx_dma_jalloc(struct msk_if_softc *);
282static void msk_txrx_dma_free(struct msk_if_softc *);
283static void msk_rx_dma_jfree(struct msk_if_softc *);
284static int msk_init_rx_ring(struct msk_if_softc *);
285static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
286static void msk_init_tx_ring(struct msk_if_softc *);
287static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
288static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
289static int msk_newbuf(struct msk_if_softc *, int);
290static int msk_jumbo_newbuf(struct msk_if_softc *, int);
291
292static int msk_phy_readreg(struct msk_if_softc *, int, int);
293static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
294static int msk_miibus_readreg(device_t, int, int);
295static int msk_miibus_writereg(device_t, int, int, int);
296static void msk_miibus_statchg(device_t);
297
298static void msk_rxfilter(struct msk_if_softc *);
299static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
300
301static void msk_stats_clear(struct msk_if_softc *);
302static void msk_stats_update(struct msk_if_softc *);
303static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
304static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
305static void msk_sysctl_node(struct msk_if_softc *);
306static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
307static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
308
309static device_method_t mskc_methods[] = {
310	/* Device interface */
311	DEVMETHOD(device_probe,		mskc_probe),
312	DEVMETHOD(device_attach,	mskc_attach),
313	DEVMETHOD(device_detach,	mskc_detach),
314	DEVMETHOD(device_suspend,	mskc_suspend),
315	DEVMETHOD(device_resume,	mskc_resume),
316	DEVMETHOD(device_shutdown,	mskc_shutdown),
317
318	/* bus interface */
319	DEVMETHOD(bus_print_child,	bus_generic_print_child),
320	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
321
322	{ NULL, NULL }
323};
324
325static driver_t mskc_driver = {
326	"mskc",
327	mskc_methods,
328	sizeof(struct msk_softc)
329};
330
331static devclass_t mskc_devclass;
332
333static device_method_t msk_methods[] = {
334	/* Device interface */
335	DEVMETHOD(device_probe,		msk_probe),
336	DEVMETHOD(device_attach,	msk_attach),
337	DEVMETHOD(device_detach,	msk_detach),
338	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
339
340	/* bus interface */
341	DEVMETHOD(bus_print_child,	bus_generic_print_child),
342	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
343
344	/* MII interface */
345	DEVMETHOD(miibus_readreg,	msk_miibus_readreg),
346	DEVMETHOD(miibus_writereg,	msk_miibus_writereg),
347	DEVMETHOD(miibus_statchg,	msk_miibus_statchg),
348
349	{ NULL, NULL }
350};
351
352static driver_t msk_driver = {
353	"msk",
354	msk_methods,
355	sizeof(struct msk_if_softc)
356};
357
358static devclass_t msk_devclass;
359
360DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
361DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
362DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
363
364static struct resource_spec msk_res_spec_io[] = {
365	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
366	{ -1,			0,		0 }
367};
368
369static struct resource_spec msk_res_spec_mem[] = {
370	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
371	{ -1,			0,		0 }
372};
373
374static struct resource_spec msk_irq_spec_legacy[] = {
375	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
376	{ -1,			0,		0 }
377};
378
379static struct resource_spec msk_irq_spec_msi[] = {
380	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
381	{ -1,			0,		0 }
382};
383
384static struct resource_spec msk_irq_spec_msi2[] = {
385	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
386	{ SYS_RES_IRQ,		2,		RF_ACTIVE },
387	{ -1,			0,		0 }
388};
389
390static int
391msk_miibus_readreg(device_t dev, int phy, int reg)
392{
393	struct msk_if_softc *sc_if;
394
395	if (phy != PHY_ADDR_MARV)
396		return (0);
397
398	sc_if = device_get_softc(dev);
399
400	return (msk_phy_readreg(sc_if, phy, reg));
401}
402
403static int
404msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
405{
406	struct msk_softc *sc;
407	int i, val;
408
409	sc = sc_if->msk_softc;
410
411        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
412	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
413
414	for (i = 0; i < MSK_TIMEOUT; i++) {
415		DELAY(1);
416		val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
417		if ((val & GM_SMI_CT_RD_VAL) != 0) {
418			val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
419			break;
420		}
421	}
422
423	if (i == MSK_TIMEOUT) {
424		if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
425		val = 0;
426	}
427
428	return (val);
429}
430
431static int
432msk_miibus_writereg(device_t dev, int phy, int reg, int val)
433{
434	struct msk_if_softc *sc_if;
435
436	if (phy != PHY_ADDR_MARV)
437		return (0);
438
439	sc_if = device_get_softc(dev);
440
441	return (msk_phy_writereg(sc_if, phy, reg, val));
442}
443
444static int
445msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
446{
447	struct msk_softc *sc;
448	int i;
449
450	sc = sc_if->msk_softc;
451
452	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
453        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
454	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
455	for (i = 0; i < MSK_TIMEOUT; i++) {
456		DELAY(1);
457		if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
458		    GM_SMI_CT_BUSY) == 0)
459			break;
460	}
461	if (i == MSK_TIMEOUT)
462		if_printf(sc_if->msk_ifp, "phy write timeout\n");
463
464	return (0);
465}
466
467static void
468msk_miibus_statchg(device_t dev)
469{
470	struct msk_softc *sc;
471	struct msk_if_softc *sc_if;
472	struct mii_data *mii;
473	struct ifnet *ifp;
474	uint32_t gmac;
475
476	sc_if = device_get_softc(dev);
477	sc = sc_if->msk_softc;
478
479	MSK_IF_LOCK_ASSERT(sc_if);
480
481	mii = device_get_softc(sc_if->msk_miibus);
482	ifp = sc_if->msk_ifp;
483	if (mii == NULL || ifp == NULL ||
484	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
485		return;
486
487	sc_if->msk_flags &= ~MSK_FLAG_LINK;
488	if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
489	    (IFM_AVALID | IFM_ACTIVE)) {
490		switch (IFM_SUBTYPE(mii->mii_media_active)) {
491		case IFM_10_T:
492		case IFM_100_TX:
493			sc_if->msk_flags |= MSK_FLAG_LINK;
494			break;
495		case IFM_1000_T:
496		case IFM_1000_SX:
497		case IFM_1000_LX:
498		case IFM_1000_CX:
499			if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
500				sc_if->msk_flags |= MSK_FLAG_LINK;
501			break;
502		default:
503			break;
504		}
505	}
506
507	if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
508		/* Enable Tx FIFO Underrun. */
509		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
510		    GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
511		/*
512		 * Because mii(4) notify msk(4) that it detected link status
513		 * change, there is no need to enable automatic
514		 * speed/flow-control/duplex updates.
515		 */
516		gmac = GM_GPCR_AU_ALL_DIS;
517		switch (IFM_SUBTYPE(mii->mii_media_active)) {
518		case IFM_1000_SX:
519		case IFM_1000_T:
520			gmac |= GM_GPCR_SPEED_1000;
521			break;
522		case IFM_100_TX:
523			gmac |= GM_GPCR_SPEED_100;
524			break;
525		case IFM_10_T:
526			break;
527		}
528
529		if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
530			gmac |= GM_GPCR_DUP_FULL;
531		/* Disable Rx flow control. */
532		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
533			gmac |= GM_GPCR_FC_RX_DIS;
534		/* Disable Tx flow control. */
535		if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
536			gmac |= GM_GPCR_FC_TX_DIS;
537		gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
538		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
539		/* Read again to ensure writing. */
540		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
541
542		gmac = GMC_PAUSE_ON;
543		if (((mii->mii_media_active & IFM_GMASK) &
544		    (IFM_FLAG0 | IFM_FLAG1)) == 0)
545			gmac = GMC_PAUSE_OFF;
546		/* Diable pause for 10/100 Mbps in half-duplex mode. */
547		if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
548		    (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
549		    IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
550			gmac = GMC_PAUSE_OFF;
551		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
552
553		/* Enable PHY interrupt for FIFO underrun/overflow. */
554		msk_phy_writereg(sc_if, PHY_ADDR_MARV,
555		    PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
556	} else {
557		/*
558		 * Link state changed to down.
559		 * Disable PHY interrupts.
560		 */
561		msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
562		/* Disable Rx/Tx MAC. */
563		gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
564		if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) {
565			gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
566			GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
567			/* Read again to ensure writing. */
568			GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
569		}
570	}
571}
572
573static void
574msk_rxfilter(struct msk_if_softc *sc_if)
575{
576	struct msk_softc *sc;
577	struct ifnet *ifp;
578	struct ifmultiaddr *ifma;
579	uint32_t mchash[2];
580	uint32_t crc;
581	uint16_t mode;
582
583	sc = sc_if->msk_softc;
584
585	MSK_IF_LOCK_ASSERT(sc_if);
586
587	ifp = sc_if->msk_ifp;
588
589	bzero(mchash, sizeof(mchash));
590	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
591	if ((ifp->if_flags & IFF_PROMISC) != 0)
592		mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
593	else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
594		mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
595		mchash[0] = 0xffff;
596		mchash[1] = 0xffff;
597	} else {
598		mode |= GM_RXCR_UCF_ENA;
599		if_maddr_rlock(ifp);
600		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
601			if (ifma->ifma_addr->sa_family != AF_LINK)
602				continue;
603			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
604			    ifma->ifma_addr), ETHER_ADDR_LEN);
605			/* Just want the 6 least significant bits. */
606			crc &= 0x3f;
607			/* Set the corresponding bit in the hash table. */
608			mchash[crc >> 5] |= 1 << (crc & 0x1f);
609		}
610		if_maddr_runlock(ifp);
611		if (mchash[0] != 0 || mchash[1] != 0)
612			mode |= GM_RXCR_MCF_ENA;
613	}
614
615	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
616	    mchash[0] & 0xffff);
617	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
618	    (mchash[0] >> 16) & 0xffff);
619	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
620	    mchash[1] & 0xffff);
621	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
622	    (mchash[1] >> 16) & 0xffff);
623	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
624}
625
626static void
627msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
628{
629	struct msk_softc *sc;
630
631	sc = sc_if->msk_softc;
632	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
633		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
634		    RX_VLAN_STRIP_ON);
635		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
636		    TX_VLAN_TAG_ON);
637	} else {
638		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
639		    RX_VLAN_STRIP_OFF);
640		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
641		    TX_VLAN_TAG_OFF);
642	}
643}
644
645static int
646msk_init_rx_ring(struct msk_if_softc *sc_if)
647{
648	struct msk_ring_data *rd;
649	struct msk_rxdesc *rxd;
650	int i, prod;
651
652	MSK_IF_LOCK_ASSERT(sc_if);
653
654	sc_if->msk_cdata.msk_rx_cons = 0;
655	sc_if->msk_cdata.msk_rx_prod = 0;
656	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
657
658	rd = &sc_if->msk_rdata;
659	bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
660	prod = sc_if->msk_cdata.msk_rx_prod;
661	for (i = 0; i < MSK_RX_RING_CNT; i++) {
662		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
663		rxd->rx_m = NULL;
664		rxd->rx_le = &rd->msk_rx_ring[prod];
665		if (msk_newbuf(sc_if, prod) != 0)
666			return (ENOBUFS);
667		MSK_INC(prod, MSK_RX_RING_CNT);
668	}
669
670	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
671	    sc_if->msk_cdata.msk_rx_ring_map,
672	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
673
674	/* Update prefetch unit. */
675	sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
676	CSR_WRITE_2(sc_if->msk_softc,
677	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
678	    sc_if->msk_cdata.msk_rx_prod);
679
680	return (0);
681}
682
683static int
684msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
685{
686	struct msk_ring_data *rd;
687	struct msk_rxdesc *rxd;
688	int i, prod;
689
690	MSK_IF_LOCK_ASSERT(sc_if);
691
692	sc_if->msk_cdata.msk_rx_cons = 0;
693	sc_if->msk_cdata.msk_rx_prod = 0;
694	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
695
696	rd = &sc_if->msk_rdata;
697	bzero(rd->msk_jumbo_rx_ring,
698	    sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
699	prod = sc_if->msk_cdata.msk_rx_prod;
700	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
701		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
702		rxd->rx_m = NULL;
703		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
704		if (msk_jumbo_newbuf(sc_if, prod) != 0)
705			return (ENOBUFS);
706		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
707	}
708
709	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
710	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
711	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712
713	sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
714	CSR_WRITE_2(sc_if->msk_softc,
715	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
716	    sc_if->msk_cdata.msk_rx_prod);
717
718	return (0);
719}
720
721static void
722msk_init_tx_ring(struct msk_if_softc *sc_if)
723{
724	struct msk_ring_data *rd;
725	struct msk_txdesc *txd;
726	int i;
727
728	sc_if->msk_cdata.msk_tso_mtu = 0;
729	sc_if->msk_cdata.msk_tx_prod = 0;
730	sc_if->msk_cdata.msk_tx_cons = 0;
731	sc_if->msk_cdata.msk_tx_cnt = 0;
732
733	rd = &sc_if->msk_rdata;
734	bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
735	for (i = 0; i < MSK_TX_RING_CNT; i++) {
736		txd = &sc_if->msk_cdata.msk_txdesc[i];
737		txd->tx_m = NULL;
738		txd->tx_le = &rd->msk_tx_ring[i];
739	}
740
741	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
742	    sc_if->msk_cdata.msk_tx_ring_map,
743	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
744}
745
746static __inline void
747msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
748{
749	struct msk_rx_desc *rx_le;
750	struct msk_rxdesc *rxd;
751	struct mbuf *m;
752
753	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
754	m = rxd->rx_m;
755	rx_le = rxd->rx_le;
756	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
757}
758
759static __inline void
760msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int	idx)
761{
762	struct msk_rx_desc *rx_le;
763	struct msk_rxdesc *rxd;
764	struct mbuf *m;
765
766	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
767	m = rxd->rx_m;
768	rx_le = rxd->rx_le;
769	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
770}
771
772static int
773msk_newbuf(struct msk_if_softc *sc_if, int idx)
774{
775	struct msk_rx_desc *rx_le;
776	struct msk_rxdesc *rxd;
777	struct mbuf *m;
778	bus_dma_segment_t segs[1];
779	bus_dmamap_t map;
780	int nsegs;
781
782	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
783	if (m == NULL)
784		return (ENOBUFS);
785
786	m->m_len = m->m_pkthdr.len = MCLBYTES;
787	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
788		m_adj(m, ETHER_ALIGN);
789#ifndef __NO_STRICT_ALIGNMENT
790	else
791		m_adj(m, MSK_RX_BUF_ALIGN);
792#endif
793
794	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
795	    sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
796	    BUS_DMA_NOWAIT) != 0) {
797		m_freem(m);
798		return (ENOBUFS);
799	}
800	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
801
802	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
803	if (rxd->rx_m != NULL) {
804		bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
805		    BUS_DMASYNC_POSTREAD);
806		bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
807	}
808	map = rxd->rx_dmamap;
809	rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
810	sc_if->msk_cdata.msk_rx_sparemap = map;
811	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
812	    BUS_DMASYNC_PREREAD);
813	rxd->rx_m = m;
814	rx_le = rxd->rx_le;
815	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
816	rx_le->msk_control =
817	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
818
819	return (0);
820}
821
822static int
823msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
824{
825	struct msk_rx_desc *rx_le;
826	struct msk_rxdesc *rxd;
827	struct mbuf *m;
828	bus_dma_segment_t segs[1];
829	bus_dmamap_t map;
830	int nsegs;
831
832	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
833	if (m == NULL)
834		return (ENOBUFS);
835	if ((m->m_flags & M_EXT) == 0) {
836		m_freem(m);
837		return (ENOBUFS);
838	}
839	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
840	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
841		m_adj(m, ETHER_ALIGN);
842#ifndef __NO_STRICT_ALIGNMENT
843	else
844		m_adj(m, MSK_RX_BUF_ALIGN);
845#endif
846
847	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
848	    sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
849	    BUS_DMA_NOWAIT) != 0) {
850		m_freem(m);
851		return (ENOBUFS);
852	}
853	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
854
855	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
856	if (rxd->rx_m != NULL) {
857		bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
858		    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
859		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
860		    rxd->rx_dmamap);
861	}
862	map = rxd->rx_dmamap;
863	rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
864	sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
865	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
866	    BUS_DMASYNC_PREREAD);
867	rxd->rx_m = m;
868	rx_le = rxd->rx_le;
869	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
870	rx_le->msk_control =
871	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
872
873	return (0);
874}
875
876/*
877 * Set media options.
878 */
879static int
880msk_mediachange(struct ifnet *ifp)
881{
882	struct msk_if_softc *sc_if;
883	struct mii_data	*mii;
884	int error;
885
886	sc_if = ifp->if_softc;
887
888	MSK_IF_LOCK(sc_if);
889	mii = device_get_softc(sc_if->msk_miibus);
890	error = mii_mediachg(mii);
891	MSK_IF_UNLOCK(sc_if);
892
893	return (error);
894}
895
896/*
897 * Report current media status.
898 */
899static void
900msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
901{
902	struct msk_if_softc *sc_if;
903	struct mii_data	*mii;
904
905	sc_if = ifp->if_softc;
906	MSK_IF_LOCK(sc_if);
907	if ((ifp->if_flags & IFF_UP) == 0) {
908		MSK_IF_UNLOCK(sc_if);
909		return;
910	}
911	mii = device_get_softc(sc_if->msk_miibus);
912
913	mii_pollstat(mii);
914	MSK_IF_UNLOCK(sc_if);
915	ifmr->ifm_active = mii->mii_media_active;
916	ifmr->ifm_status = mii->mii_media_status;
917}
918
919static int
920msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
921{
922	struct msk_if_softc *sc_if;
923	struct ifreq *ifr;
924	struct mii_data	*mii;
925	int error, mask;
926
927	sc_if = ifp->if_softc;
928	ifr = (struct ifreq *)data;
929	error = 0;
930
931	switch(command) {
932	case SIOCSIFMTU:
933		MSK_IF_LOCK(sc_if);
934		if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
935			error = EINVAL;
936		else if (ifp->if_mtu != ifr->ifr_mtu) {
937 			if (ifr->ifr_mtu > ETHERMTU) {
938				if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
939					error = EINVAL;
940					MSK_IF_UNLOCK(sc_if);
941					break;
942				}
943				if ((sc_if->msk_flags &
944				    MSK_FLAG_JUMBO_NOCSUM) != 0) {
945					ifp->if_hwassist &=
946					    ~(MSK_CSUM_FEATURES | CSUM_TSO);
947					ifp->if_capenable &=
948					    ~(IFCAP_TSO4 | IFCAP_TXCSUM);
949					VLAN_CAPABILITIES(ifp);
950				}
951			}
952			ifp->if_mtu = ifr->ifr_mtu;
953			msk_init_locked(sc_if);
954		}
955		MSK_IF_UNLOCK(sc_if);
956		break;
957	case SIOCSIFFLAGS:
958		MSK_IF_LOCK(sc_if);
959		if ((ifp->if_flags & IFF_UP) != 0) {
960			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
961			    ((ifp->if_flags ^ sc_if->msk_if_flags) &
962			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
963				msk_rxfilter(sc_if);
964			else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
965				msk_init_locked(sc_if);
966		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
967			msk_stop(sc_if);
968		sc_if->msk_if_flags = ifp->if_flags;
969		MSK_IF_UNLOCK(sc_if);
970		break;
971	case SIOCADDMULTI:
972	case SIOCDELMULTI:
973		MSK_IF_LOCK(sc_if);
974		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
975			msk_rxfilter(sc_if);
976		MSK_IF_UNLOCK(sc_if);
977		break;
978	case SIOCGIFMEDIA:
979	case SIOCSIFMEDIA:
980		mii = device_get_softc(sc_if->msk_miibus);
981		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
982		break;
983	case SIOCSIFCAP:
984		MSK_IF_LOCK(sc_if);
985		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
986		if ((mask & IFCAP_TXCSUM) != 0 &&
987		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
988			ifp->if_capenable ^= IFCAP_TXCSUM;
989			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
990				ifp->if_hwassist |= MSK_CSUM_FEATURES;
991			else
992				ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
993		}
994		if ((mask & IFCAP_RXCSUM) != 0 &&
995		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
996			ifp->if_capenable ^= IFCAP_RXCSUM;
997		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
998		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
999			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1000			msk_setvlan(sc_if, ifp);
1001		}
1002		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1003		    (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
1004			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1005		if ((mask & IFCAP_TSO4) != 0 &&
1006		    (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1007			ifp->if_capenable ^= IFCAP_TSO4;
1008			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1009				ifp->if_hwassist |= CSUM_TSO;
1010			else
1011				ifp->if_hwassist &= ~CSUM_TSO;
1012		}
1013		if (ifp->if_mtu > ETHERMTU &&
1014		    (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
1015			ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1016			ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1017		}
1018
1019		VLAN_CAPABILITIES(ifp);
1020		MSK_IF_UNLOCK(sc_if);
1021		break;
1022	default:
1023		error = ether_ioctl(ifp, command, data);
1024		break;
1025	}
1026
1027	return (error);
1028}
1029
1030static int
1031mskc_probe(device_t dev)
1032{
1033	struct msk_product *mp;
1034	uint16_t vendor, devid;
1035	int i;
1036
1037	vendor = pci_get_vendor(dev);
1038	devid = pci_get_device(dev);
1039	mp = msk_products;
1040	for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1041	    i++, mp++) {
1042		if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1043			device_set_desc(dev, mp->msk_name);
1044			return (BUS_PROBE_DEFAULT);
1045		}
1046	}
1047
1048	return (ENXIO);
1049}
1050
1051static int
1052mskc_setup_rambuffer(struct msk_softc *sc)
1053{
1054	int next;
1055	int i;
1056
1057	/* Get adapter SRAM size. */
1058	sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1059	if (bootverbose)
1060		device_printf(sc->msk_dev,
1061		    "RAM buffer size : %dKB\n", sc->msk_ramsize);
1062	if (sc->msk_ramsize == 0)
1063		return (0);
1064
1065	sc->msk_pflags |= MSK_FLAG_RAMBUF;
1066	/*
1067	 * Give receiver 2/3 of memory and round down to the multiple
1068	 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
1069	 * of 1024.
1070	 */
1071	sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1072	sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1073	for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1074		sc->msk_rxqstart[i] = next;
1075		sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1076		next = sc->msk_rxqend[i] + 1;
1077		sc->msk_txqstart[i] = next;
1078		sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1079		next = sc->msk_txqend[i] + 1;
1080		if (bootverbose) {
1081			device_printf(sc->msk_dev,
1082			    "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1083			    sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1084			    sc->msk_rxqend[i]);
1085			device_printf(sc->msk_dev,
1086			    "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1087			    sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1088			    sc->msk_txqend[i]);
1089		}
1090	}
1091
1092	return (0);
1093}
1094
1095static void
1096msk_phy_power(struct msk_softc *sc, int mode)
1097{
1098	uint32_t our, val;
1099	int i;
1100
1101	switch (mode) {
1102	case MSK_PHY_POWERUP:
1103		/* Switch power to VCC (WA for VAUX problem). */
1104		CSR_WRITE_1(sc, B0_POWER_CTRL,
1105		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1106		/* Disable Core Clock Division, set Clock Select to 0. */
1107		CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1108
1109		val = 0;
1110		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1111		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1112			/* Enable bits are inverted. */
1113			val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1114			      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1115			      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1116		}
1117		/*
1118		 * Enable PCI & Core Clock, enable clock gating for both Links.
1119		 */
1120		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1121
1122		val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1123		val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1124		if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1125			if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1126				/* Deassert Low Power for 1st PHY. */
1127				val |= PCI_Y2_PHY1_COMA;
1128				if (sc->msk_num_port > 1)
1129					val |= PCI_Y2_PHY2_COMA;
1130			}
1131		}
1132		/* Release PHY from PowerDown/COMA mode. */
1133		pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1134		switch (sc->msk_hw_id) {
1135		case CHIP_ID_YUKON_EC_U:
1136		case CHIP_ID_YUKON_EX:
1137		case CHIP_ID_YUKON_FE_P:
1138			CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF);
1139
1140			/* Enable all clocks. */
1141			pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1142			our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1143			our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1144			    PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1145			/* Set all bits to 0 except bits 15..12. */
1146			pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1147			our = pci_read_config(sc->msk_dev, PCI_OUR_REG_5, 4);
1148			our &= PCI_CTL_TIM_VMAIN_AV_MSK;
1149			pci_write_config(sc->msk_dev, PCI_OUR_REG_5, our, 4);
1150			pci_write_config(sc->msk_dev, PCI_CFG_REG_1, 0, 4);
1151			/*
1152			 * Disable status race, workaround for
1153			 * Yukon EC Ultra & Yukon EX.
1154			 */
1155			val = CSR_READ_4(sc, B2_GP_IO);
1156			val |= GLB_GPIO_STAT_RACE_DIS;
1157			CSR_WRITE_4(sc, B2_GP_IO, val);
1158			CSR_READ_4(sc, B2_GP_IO);
1159			break;
1160		default:
1161			break;
1162		}
1163		for (i = 0; i < sc->msk_num_port; i++) {
1164			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1165			    GMLC_RST_SET);
1166			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1167			    GMLC_RST_CLR);
1168		}
1169		break;
1170	case MSK_PHY_POWERDOWN:
1171		val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1172		val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1173		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1174		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1175			val &= ~PCI_Y2_PHY1_COMA;
1176			if (sc->msk_num_port > 1)
1177				val &= ~PCI_Y2_PHY2_COMA;
1178		}
1179		pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1180
1181		val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1182		      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1183		      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1184		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1185		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1186			/* Enable bits are inverted. */
1187			val = 0;
1188		}
1189		/*
1190		 * Disable PCI & Core Clock, disable clock gating for
1191		 * both Links.
1192		 */
1193		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1194		CSR_WRITE_1(sc, B0_POWER_CTRL,
1195		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1196		break;
1197	default:
1198		break;
1199	}
1200}
1201
1202static void
1203mskc_reset(struct msk_softc *sc)
1204{
1205	bus_addr_t addr;
1206	uint16_t status;
1207	uint32_t val;
1208	int i;
1209
1210	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1211
1212	/* Disable ASF. */
1213	if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
1214		status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1215		/* Clear AHB bridge & microcontroller reset. */
1216		status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1217		    Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1218		/* Clear ASF microcontroller state. */
1219		status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1220		CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1221	} else
1222		CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1223	CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1224
1225	/*
1226	 * Since we disabled ASF, S/W reset is required for Power Management.
1227	 */
1228	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1229	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1230
1231	/* Clear all error bits in the PCI status register. */
1232	status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1233	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1234
1235	pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1236	    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1237	    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1238	CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1239
1240	switch (sc->msk_bustype) {
1241	case MSK_PEX_BUS:
1242		/* Clear all PEX errors. */
1243		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1244		val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1245		if ((val & PEX_RX_OV) != 0) {
1246			sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1247			sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1248		}
1249		break;
1250	case MSK_PCI_BUS:
1251	case MSK_PCIX_BUS:
1252		/* Set Cache Line Size to 2(8bytes) if configured to 0. */
1253		val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1254		if (val == 0)
1255			pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1256		if (sc->msk_bustype == MSK_PCIX_BUS) {
1257			/* Set Cache Line Size opt. */
1258			val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1259			val |= PCI_CLS_OPT;
1260			pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1261		}
1262		break;
1263	}
1264	/* Set PHY power state. */
1265	msk_phy_power(sc, MSK_PHY_POWERUP);
1266
1267	/* Reset GPHY/GMAC Control */
1268	for (i = 0; i < sc->msk_num_port; i++) {
1269		/* GPHY Control reset. */
1270		CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1271		CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1272		/* GMAC Control reset. */
1273		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1274		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1275		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1276		if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
1277			CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1278			    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1279			    GMC_BYP_RETR_ON);
1280	}
1281	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1282
1283	/* LED On. */
1284	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1285
1286	/* Clear TWSI IRQ. */
1287	CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1288
1289	/* Turn off hardware timer. */
1290	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1291	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1292
1293	/* Turn off descriptor polling. */
1294	CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1295
1296	/* Turn off time stamps. */
1297	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1298	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1299
1300	/* Configure timeout values. */
1301	for (i = 0; i < sc->msk_num_port; i++) {
1302		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1303		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1304		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1305		    MSK_RI_TO_53);
1306		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1307		    MSK_RI_TO_53);
1308		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1309		    MSK_RI_TO_53);
1310		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1311		    MSK_RI_TO_53);
1312		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1313		    MSK_RI_TO_53);
1314		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1315		    MSK_RI_TO_53);
1316		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1317		    MSK_RI_TO_53);
1318		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1319		    MSK_RI_TO_53);
1320		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1321		    MSK_RI_TO_53);
1322		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1323		    MSK_RI_TO_53);
1324		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1325		    MSK_RI_TO_53);
1326		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1327		    MSK_RI_TO_53);
1328	}
1329
1330	/* Disable all interrupts. */
1331	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1332	CSR_READ_4(sc, B0_HWE_IMSK);
1333	CSR_WRITE_4(sc, B0_IMSK, 0);
1334	CSR_READ_4(sc, B0_IMSK);
1335
1336        /*
1337         * On dual port PCI-X card, there is an problem where status
1338         * can be received out of order due to split transactions.
1339         */
1340	if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1341		int pcix;
1342		uint16_t pcix_cmd;
1343
1344		if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1345			pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1346			/* Clear Max Outstanding Split Transactions. */
1347			pcix_cmd &= ~0x70;
1348			CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1349			pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1350			CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1351		}
1352        }
1353	if (sc->msk_bustype == MSK_PEX_BUS) {
1354		uint16_t v, width;
1355
1356		v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1357		/* Change Max. Read Request Size to 4096 bytes. */
1358		v &= ~PEX_DC_MAX_RRS_MSK;
1359		v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1360		pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1361		width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1362		width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1363		v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1364		v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1365		if (v != width)
1366			device_printf(sc->msk_dev,
1367			    "negotiated width of link(x%d) != "
1368			    "max. width of link(x%d)\n", width, v);
1369	}
1370
1371	/* Clear status list. */
1372	bzero(sc->msk_stat_ring,
1373	    sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1374	sc->msk_stat_cons = 0;
1375	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1376	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1377	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1378	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1379	/* Set the status list base address. */
1380	addr = sc->msk_stat_ring_paddr;
1381	CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1382	CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1383	/* Set the status list last index. */
1384	CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1385	if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1386	    sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1387		/* WA for dev. #4.3 */
1388		CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1389		/* WA for dev. #4.18 */
1390		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1391		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1392	} else {
1393		CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1394		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1395		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1396		    sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1397			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1398		else
1399			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1400		CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1401	}
1402	/*
1403	 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1404	 */
1405	CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1406
1407	/* Enable status unit. */
1408	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1409
1410	CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1411	CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1412	CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1413}
1414
1415static int
1416msk_probe(device_t dev)
1417{
1418	struct msk_softc *sc;
1419	char desc[100];
1420
1421	sc = device_get_softc(device_get_parent(dev));
1422	/*
1423	 * Not much to do here. We always know there will be
1424	 * at least one GMAC present, and if there are two,
1425	 * mskc_attach() will create a second device instance
1426	 * for us.
1427	 */
1428	snprintf(desc, sizeof(desc),
1429	    "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1430	    model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1431	    sc->msk_hw_rev);
1432	device_set_desc_copy(dev, desc);
1433
1434	return (BUS_PROBE_DEFAULT);
1435}
1436
1437static int
1438msk_attach(device_t dev)
1439{
1440	struct msk_softc *sc;
1441	struct msk_if_softc *sc_if;
1442	struct ifnet *ifp;
1443	int i, port, error;
1444	uint8_t eaddr[6];
1445
1446	if (dev == NULL)
1447		return (EINVAL);
1448
1449	error = 0;
1450	sc_if = device_get_softc(dev);
1451	sc = device_get_softc(device_get_parent(dev));
1452	port = *(int *)device_get_ivars(dev);
1453
1454	sc_if->msk_if_dev = dev;
1455	sc_if->msk_port = port;
1456	sc_if->msk_softc = sc;
1457	sc_if->msk_flags = sc->msk_pflags;
1458	sc->msk_if[port] = sc_if;
1459	/* Setup Tx/Rx queue register offsets. */
1460	if (port == MSK_PORT_A) {
1461		sc_if->msk_txq = Q_XA1;
1462		sc_if->msk_txsq = Q_XS1;
1463		sc_if->msk_rxq = Q_R1;
1464	} else {
1465		sc_if->msk_txq = Q_XA2;
1466		sc_if->msk_txsq = Q_XS2;
1467		sc_if->msk_rxq = Q_R2;
1468	}
1469
1470	callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1471	msk_sysctl_node(sc_if);
1472
1473	if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1474		goto fail;
1475	msk_rx_dma_jalloc(sc_if);
1476
1477	ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1478	if (ifp == NULL) {
1479		device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1480		error = ENOSPC;
1481		goto fail;
1482	}
1483	ifp->if_softc = sc_if;
1484	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1485	ifp->if_mtu = ETHERMTU;
1486	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1487	/*
1488	 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1489	 * has serious bug in Rx checksum offload for all Yukon II family
1490	 * hardware. It seems there is a workaround to make it work somtimes.
1491	 * However, the workaround also have to check OP code sequences to
1492	 * verify whether the OP code is correct. Sometimes it should compute
1493	 * IP/TCP/UDP checksum in driver in order to verify correctness of
1494	 * checksum computed by hardware. If you have to compute checksum
1495	 * with software to verify the hardware's checksum why have hardware
1496	 * compute the checksum? I think there is no reason to spend time to
1497	 * make Rx checksum offload work on Yukon II hardware.
1498	 */
1499	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1500	/*
1501	 * Enable Rx checksum offloading if controller support new
1502	 * descriptor format.
1503	 */
1504	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1505	    (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1506		ifp->if_capabilities |= IFCAP_RXCSUM;
1507	ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1508	ifp->if_capenable = ifp->if_capabilities;
1509	ifp->if_ioctl = msk_ioctl;
1510	ifp->if_start = msk_start;
1511	ifp->if_timer = 0;
1512	ifp->if_watchdog = NULL;
1513	ifp->if_init = msk_init;
1514	IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1515	ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1516	IFQ_SET_READY(&ifp->if_snd);
1517
1518	TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1519
1520	/*
1521	 * Get station address for this interface. Note that
1522	 * dual port cards actually come with three station
1523	 * addresses: one for each port, plus an extra. The
1524	 * extra one is used by the SysKonnect driver software
1525	 * as a 'virtual' station address for when both ports
1526	 * are operating in failover mode. Currently we don't
1527	 * use this extra address.
1528	 */
1529	MSK_IF_LOCK(sc_if);
1530	for (i = 0; i < ETHER_ADDR_LEN; i++)
1531		eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1532
1533	/*
1534	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1535	 */
1536	MSK_IF_UNLOCK(sc_if);
1537	ether_ifattach(ifp, eaddr);
1538	MSK_IF_LOCK(sc_if);
1539
1540	/* VLAN capability setup */
1541	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1542	if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
1543		/*
1544		 * Due to Tx checksum offload hardware bugs, msk(4) manually
1545		 * computes checksum for short frames. For VLAN tagged frames
1546		 * this workaround does not work so disable checksum offload
1547		 * for VLAN interface.
1548		 */
1549        	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1550		/*
1551		 * Enable Rx checksum offloading for VLAN taggedd frames
1552		 * if controller support new descriptor format.
1553		 */
1554		if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1555		    (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1556			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1557	}
1558	ifp->if_capenable = ifp->if_capabilities;
1559
1560	/*
1561	 * Tell the upper layer(s) we support long frames.
1562	 * Must appear after the call to ether_ifattach() because
1563	 * ether_ifattach() sets ifi_hdrlen to the default value.
1564	 */
1565        ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1566
1567	/*
1568	 * Do miibus setup.
1569	 */
1570	MSK_IF_UNLOCK(sc_if);
1571	error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1572	    msk_mediastatus);
1573	if (error != 0) {
1574		device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1575		ether_ifdetach(ifp);
1576		error = ENXIO;
1577		goto fail;
1578	}
1579
1580fail:
1581	if (error != 0) {
1582		/* Access should be ok even though lock has been dropped */
1583		sc->msk_if[port] = NULL;
1584		msk_detach(dev);
1585	}
1586
1587	return (error);
1588}
1589
1590/*
1591 * Attach the interface. Allocate softc structures, do ifmedia
1592 * setup and ethernet/BPF attach.
1593 */
1594static int
1595mskc_attach(device_t dev)
1596{
1597	struct msk_softc *sc;
1598	int error, msic, msir, *port, reg;
1599
1600	sc = device_get_softc(dev);
1601	sc->msk_dev = dev;
1602	mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1603	    MTX_DEF);
1604
1605	/*
1606	 * Map control/status registers.
1607	 */
1608	pci_enable_busmaster(dev);
1609
1610	/* Allocate I/O resource */
1611#ifdef MSK_USEIOSPACE
1612	sc->msk_res_spec = msk_res_spec_io;
1613#else
1614	sc->msk_res_spec = msk_res_spec_mem;
1615#endif
1616	sc->msk_irq_spec = msk_irq_spec_legacy;
1617	error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1618	if (error) {
1619		if (sc->msk_res_spec == msk_res_spec_mem)
1620			sc->msk_res_spec = msk_res_spec_io;
1621		else
1622			sc->msk_res_spec = msk_res_spec_mem;
1623		error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1624		if (error) {
1625			device_printf(dev, "couldn't allocate %s resources\n",
1626			    sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1627			    "I/O");
1628			mtx_destroy(&sc->msk_mtx);
1629			return (ENXIO);
1630		}
1631	}
1632
1633	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1634	sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1635	sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1636	/* Bail out if chip is not recognized. */
1637	if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1638	    sc->msk_hw_id > CHIP_ID_YUKON_FE_P) {
1639		device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1640		    sc->msk_hw_id, sc->msk_hw_rev);
1641		mtx_destroy(&sc->msk_mtx);
1642		return (ENXIO);
1643	}
1644
1645	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1646	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1647	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1648	    &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1649	    "max number of Rx events to process");
1650
1651	sc->msk_process_limit = MSK_PROC_DEFAULT;
1652	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1653	    "process_limit", &sc->msk_process_limit);
1654	if (error == 0) {
1655		if (sc->msk_process_limit < MSK_PROC_MIN ||
1656		    sc->msk_process_limit > MSK_PROC_MAX) {
1657			device_printf(dev, "process_limit value out of range; "
1658			    "using default: %d\n", MSK_PROC_DEFAULT);
1659			sc->msk_process_limit = MSK_PROC_DEFAULT;
1660		}
1661	}
1662
1663	/* Soft reset. */
1664	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1665	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1666	sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1667	 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1668		 sc->msk_coppertype = 0;
1669	 else
1670		 sc->msk_coppertype = 1;
1671	/* Check number of MACs. */
1672	sc->msk_num_port = 1;
1673	if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1674	    CFG_DUAL_MAC_MSK) {
1675		if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1676			sc->msk_num_port++;
1677	}
1678
1679	/* Check bus type. */
1680	if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
1681		sc->msk_bustype = MSK_PEX_BUS;
1682	else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
1683		sc->msk_bustype = MSK_PCIX_BUS;
1684	else
1685		sc->msk_bustype = MSK_PCI_BUS;
1686
1687	switch (sc->msk_hw_id) {
1688	case CHIP_ID_YUKON_EC:
1689		sc->msk_clock = 125;	/* 125 Mhz */
1690		sc->msk_pflags |= MSK_FLAG_JUMBO;
1691		break;
1692	case CHIP_ID_YUKON_EC_U:
1693		sc->msk_clock = 125;	/* 125 Mhz */
1694		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1695		break;
1696	case CHIP_ID_YUKON_EX:
1697		sc->msk_clock = 125;	/* 125 Mhz */
1698		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1699		    MSK_FLAG_AUTOTX_CSUM;
1700		/*
1701		 * Yukon Extreme seems to have silicon bug for
1702		 * automatic Tx checksum calculation capability.
1703		 */
1704		if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
1705			sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1706		/*
1707		 * Yukon Extreme A0 could not use store-and-forward
1708		 * for jumbo frames, so disable Tx checksum
1709		 * offloading for jumbo frames.
1710		 */
1711		if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
1712			sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1713		break;
1714	case CHIP_ID_YUKON_FE:
1715		sc->msk_clock = 100;	/* 100 Mhz */
1716		sc->msk_pflags |= MSK_FLAG_FASTETHER;
1717		break;
1718	case CHIP_ID_YUKON_FE_P:
1719		sc->msk_clock = 50;	/* 50 Mhz */
1720		sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
1721		    MSK_FLAG_AUTOTX_CSUM;
1722		if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1723			/*
1724			 * XXX
1725			 * FE+ A0 has status LE writeback bug so msk(4)
1726			 * does not rely on status word of received frame
1727			 * in msk_rxeof() which in turn disables all
1728			 * hardware assistance bits reported by the status
1729			 * word as well as validity of the recevied frame.
1730			 * Just pass received frames to upper stack with
1731			 * minimal test and let upper stack handle them.
1732			 */
1733			sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
1734			    MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1735		}
1736		break;
1737	case CHIP_ID_YUKON_XL:
1738		sc->msk_clock = 156;	/* 156 Mhz */
1739		sc->msk_pflags |= MSK_FLAG_JUMBO;
1740		break;
1741	default:
1742		sc->msk_clock = 156;	/* 156 Mhz */
1743		break;
1744	}
1745
1746	/* Allocate IRQ resources. */
1747	msic = pci_msi_count(dev);
1748	if (bootverbose)
1749		device_printf(dev, "MSI count : %d\n", msic);
1750	/*
1751	 * The Yukon II reports it can handle two messages, one for each
1752	 * possible port.  We go ahead and allocate two messages and only
1753	 * setup a handler for both if we have a dual port card.
1754	 *
1755	 * XXX: I haven't untangled the interrupt handler to handle dual
1756	 * port cards with separate MSI messages, so for now I disable MSI
1757	 * on dual port cards.
1758	 */
1759	if (legacy_intr != 0)
1760		msi_disable = 1;
1761	if (msi_disable == 0) {
1762		switch (msic) {
1763		case 2:
1764		case 1: /* 88E8058 reports 1 MSI message */
1765			msir = msic;
1766			if (sc->msk_num_port == 1 &&
1767			    pci_alloc_msi(dev, &msir) == 0) {
1768				if (msic == msir) {
1769					sc->msk_pflags |= MSK_FLAG_MSI;
1770					sc->msk_irq_spec = msic == 2 ?
1771					    msk_irq_spec_msi2 :
1772					    msk_irq_spec_msi;
1773				} else
1774					pci_release_msi(dev);
1775			}
1776			break;
1777		default:
1778			device_printf(dev,
1779			    "Unexpected number of MSI messages : %d\n", msic);
1780			break;
1781		}
1782	}
1783
1784	error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1785	if (error) {
1786		device_printf(dev, "couldn't allocate IRQ resources\n");
1787		goto fail;
1788	}
1789
1790	if ((error = msk_status_dma_alloc(sc)) != 0)
1791		goto fail;
1792
1793	/* Set base interrupt mask. */
1794	sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1795	sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1796	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1797
1798	/* Reset the adapter. */
1799	mskc_reset(sc);
1800
1801	if ((error = mskc_setup_rambuffer(sc)) != 0)
1802		goto fail;
1803
1804	sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1805	if (sc->msk_devs[MSK_PORT_A] == NULL) {
1806		device_printf(dev, "failed to add child for PORT_A\n");
1807		error = ENXIO;
1808		goto fail;
1809	}
1810	port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1811	if (port == NULL) {
1812		device_printf(dev, "failed to allocate memory for "
1813		    "ivars of PORT_A\n");
1814		error = ENXIO;
1815		goto fail;
1816	}
1817	*port = MSK_PORT_A;
1818	device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1819
1820	if (sc->msk_num_port > 1) {
1821		sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1822		if (sc->msk_devs[MSK_PORT_B] == NULL) {
1823			device_printf(dev, "failed to add child for PORT_B\n");
1824			error = ENXIO;
1825			goto fail;
1826		}
1827		port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1828		if (port == NULL) {
1829			device_printf(dev, "failed to allocate memory for "
1830			    "ivars of PORT_B\n");
1831			error = ENXIO;
1832			goto fail;
1833		}
1834		*port = MSK_PORT_B;
1835		device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1836	}
1837
1838	error = bus_generic_attach(dev);
1839	if (error) {
1840		device_printf(dev, "failed to attach port(s)\n");
1841		goto fail;
1842	}
1843
1844	/* Hook interrupt last to avoid having to lock softc. */
1845	if (legacy_intr)
1846		error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1847		    INTR_MPSAFE, NULL, msk_legacy_intr, sc,
1848		    &sc->msk_intrhand[0]);
1849	else {
1850		TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1851		sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1852		    taskqueue_thread_enqueue, &sc->msk_tq);
1853		taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1854		    device_get_nameunit(sc->msk_dev));
1855		error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1856		    INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]);
1857	}
1858
1859	if (error != 0) {
1860		device_printf(dev, "couldn't set up interrupt handler\n");
1861		if (legacy_intr == 0)
1862			taskqueue_free(sc->msk_tq);
1863		sc->msk_tq = NULL;
1864		goto fail;
1865	}
1866fail:
1867	if (error != 0)
1868		mskc_detach(dev);
1869
1870	return (error);
1871}
1872
1873/*
1874 * Shutdown hardware and free up resources. This can be called any
1875 * time after the mutex has been initialized. It is called in both
1876 * the error case in attach and the normal detach case so it needs
1877 * to be careful about only freeing resources that have actually been
1878 * allocated.
1879 */
1880static int
1881msk_detach(device_t dev)
1882{
1883	struct msk_softc *sc;
1884	struct msk_if_softc *sc_if;
1885	struct ifnet *ifp;
1886
1887	sc_if = device_get_softc(dev);
1888	KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1889	    ("msk mutex not initialized in msk_detach"));
1890	MSK_IF_LOCK(sc_if);
1891
1892	ifp = sc_if->msk_ifp;
1893	if (device_is_attached(dev)) {
1894		/* XXX */
1895		sc_if->msk_flags |= MSK_FLAG_DETACH;
1896		msk_stop(sc_if);
1897		/* Can't hold locks while calling detach. */
1898		MSK_IF_UNLOCK(sc_if);
1899		callout_drain(&sc_if->msk_tick_ch);
1900		taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1901		ether_ifdetach(ifp);
1902		MSK_IF_LOCK(sc_if);
1903	}
1904
1905	/*
1906	 * We're generally called from mskc_detach() which is using
1907	 * device_delete_child() to get to here. It's already trashed
1908	 * miibus for us, so don't do it here or we'll panic.
1909	 *
1910	 * if (sc_if->msk_miibus != NULL) {
1911	 * 	device_delete_child(dev, sc_if->msk_miibus);
1912	 * 	sc_if->msk_miibus = NULL;
1913	 * }
1914	 */
1915
1916	msk_rx_dma_jfree(sc_if);
1917	msk_txrx_dma_free(sc_if);
1918	bus_generic_detach(dev);
1919
1920	if (ifp)
1921		if_free(ifp);
1922	sc = sc_if->msk_softc;
1923	sc->msk_if[sc_if->msk_port] = NULL;
1924	MSK_IF_UNLOCK(sc_if);
1925
1926	return (0);
1927}
1928
1929static int
1930mskc_detach(device_t dev)
1931{
1932	struct msk_softc *sc;
1933
1934	sc = device_get_softc(dev);
1935	KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1936
1937	if (device_is_alive(dev)) {
1938		if (sc->msk_devs[MSK_PORT_A] != NULL) {
1939			free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1940			    M_DEVBUF);
1941			device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1942		}
1943		if (sc->msk_devs[MSK_PORT_B] != NULL) {
1944			free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1945			    M_DEVBUF);
1946			device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1947		}
1948		bus_generic_detach(dev);
1949	}
1950
1951	/* Disable all interrupts. */
1952	CSR_WRITE_4(sc, B0_IMSK, 0);
1953	CSR_READ_4(sc, B0_IMSK);
1954	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1955	CSR_READ_4(sc, B0_HWE_IMSK);
1956
1957	/* LED Off. */
1958	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1959
1960	/* Put hardware reset. */
1961	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1962
1963	msk_status_dma_free(sc);
1964
1965	if (legacy_intr == 0 && sc->msk_tq != NULL) {
1966		taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1967		taskqueue_free(sc->msk_tq);
1968		sc->msk_tq = NULL;
1969	}
1970	if (sc->msk_intrhand[0]) {
1971		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1972		sc->msk_intrhand[0] = NULL;
1973	}
1974	if (sc->msk_intrhand[1]) {
1975		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1976		sc->msk_intrhand[1] = NULL;
1977	}
1978	bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1979	if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
1980		pci_release_msi(dev);
1981	bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1982	mtx_destroy(&sc->msk_mtx);
1983
1984	return (0);
1985}
1986
1987struct msk_dmamap_arg {
1988	bus_addr_t	msk_busaddr;
1989};
1990
1991static void
1992msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1993{
1994	struct msk_dmamap_arg *ctx;
1995
1996	if (error != 0)
1997		return;
1998	ctx = arg;
1999	ctx->msk_busaddr = segs[0].ds_addr;
2000}
2001
2002/* Create status DMA region. */
2003static int
2004msk_status_dma_alloc(struct msk_softc *sc)
2005{
2006	struct msk_dmamap_arg ctx;
2007	int error;
2008
2009	error = bus_dma_tag_create(
2010		    bus_get_dma_tag(sc->msk_dev),	/* parent */
2011		    MSK_STAT_ALIGN, 0,		/* alignment, boundary */
2012		    BUS_SPACE_MAXADDR,		/* lowaddr */
2013		    BUS_SPACE_MAXADDR,		/* highaddr */
2014		    NULL, NULL,			/* filter, filterarg */
2015		    MSK_STAT_RING_SZ,		/* maxsize */
2016		    1,				/* nsegments */
2017		    MSK_STAT_RING_SZ,		/* maxsegsize */
2018		    0,				/* flags */
2019		    NULL, NULL,			/* lockfunc, lockarg */
2020		    &sc->msk_stat_tag);
2021	if (error != 0) {
2022		device_printf(sc->msk_dev,
2023		    "failed to create status DMA tag\n");
2024		return (error);
2025	}
2026
2027	/* Allocate DMA'able memory and load the DMA map for status ring. */
2028	error = bus_dmamem_alloc(sc->msk_stat_tag,
2029	    (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2030	    BUS_DMA_ZERO, &sc->msk_stat_map);
2031	if (error != 0) {
2032		device_printf(sc->msk_dev,
2033		    "failed to allocate DMA'able memory for status ring\n");
2034		return (error);
2035	}
2036
2037	ctx.msk_busaddr = 0;
2038	error = bus_dmamap_load(sc->msk_stat_tag,
2039	    sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
2040	    msk_dmamap_cb, &ctx, 0);
2041	if (error != 0) {
2042		device_printf(sc->msk_dev,
2043		    "failed to load DMA'able memory for status ring\n");
2044		return (error);
2045	}
2046	sc->msk_stat_ring_paddr = ctx.msk_busaddr;
2047
2048	return (0);
2049}
2050
2051static void
2052msk_status_dma_free(struct msk_softc *sc)
2053{
2054
2055	/* Destroy status block. */
2056	if (sc->msk_stat_tag) {
2057		if (sc->msk_stat_map) {
2058			bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
2059			if (sc->msk_stat_ring) {
2060				bus_dmamem_free(sc->msk_stat_tag,
2061				    sc->msk_stat_ring, sc->msk_stat_map);
2062				sc->msk_stat_ring = NULL;
2063			}
2064			sc->msk_stat_map = NULL;
2065		}
2066		bus_dma_tag_destroy(sc->msk_stat_tag);
2067		sc->msk_stat_tag = NULL;
2068	}
2069}
2070
2071static int
2072msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2073{
2074	struct msk_dmamap_arg ctx;
2075	struct msk_txdesc *txd;
2076	struct msk_rxdesc *rxd;
2077	bus_size_t rxalign;
2078	int error, i;
2079
2080	/* Create parent DMA tag. */
2081	/*
2082	 * XXX
2083	 * It seems that Yukon II supports full 64bits DMA operations. But
2084	 * it needs two descriptors(list elements) for 64bits DMA operations.
2085	 * Since we don't know what DMA address mappings(32bits or 64bits)
2086	 * would be used in advance for each mbufs, we limits its DMA space
2087	 * to be in range of 32bits address space. Otherwise, we should check
2088	 * what DMA address is used and chain another descriptor for the
2089	 * 64bits DMA operation. This also means descriptor ring size is
2090	 * variable. Limiting DMA address to be in 32bit address space greatly
2091	 * simplyfies descriptor handling and possibly would increase
2092	 * performance a bit due to efficient handling of descriptors.
2093	 * Apart from harassing checksum offloading mechanisms, it seems
2094	 * it's really bad idea to use a seperate descriptor for 64bit
2095	 * DMA operation to save small descriptor memory. Anyway, I've
2096	 * never seen these exotic scheme on ethernet interface hardware.
2097	 */
2098	error = bus_dma_tag_create(
2099		    bus_get_dma_tag(sc_if->msk_if_dev),	/* parent */
2100		    1, 0,			/* alignment, boundary */
2101		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2102		    BUS_SPACE_MAXADDR,		/* highaddr */
2103		    NULL, NULL,			/* filter, filterarg */
2104		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
2105		    0,				/* nsegments */
2106		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2107		    0,				/* flags */
2108		    NULL, NULL,			/* lockfunc, lockarg */
2109		    &sc_if->msk_cdata.msk_parent_tag);
2110	if (error != 0) {
2111		device_printf(sc_if->msk_if_dev,
2112		    "failed to create parent DMA tag\n");
2113		goto fail;
2114	}
2115	/* Create tag for Tx ring. */
2116	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2117		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2118		    BUS_SPACE_MAXADDR,		/* lowaddr */
2119		    BUS_SPACE_MAXADDR,		/* highaddr */
2120		    NULL, NULL,			/* filter, filterarg */
2121		    MSK_TX_RING_SZ,		/* maxsize */
2122		    1,				/* nsegments */
2123		    MSK_TX_RING_SZ,		/* maxsegsize */
2124		    0,				/* flags */
2125		    NULL, NULL,			/* lockfunc, lockarg */
2126		    &sc_if->msk_cdata.msk_tx_ring_tag);
2127	if (error != 0) {
2128		device_printf(sc_if->msk_if_dev,
2129		    "failed to create Tx ring DMA tag\n");
2130		goto fail;
2131	}
2132
2133	/* Create tag for Rx ring. */
2134	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2135		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2136		    BUS_SPACE_MAXADDR,		/* lowaddr */
2137		    BUS_SPACE_MAXADDR,		/* highaddr */
2138		    NULL, NULL,			/* filter, filterarg */
2139		    MSK_RX_RING_SZ,		/* maxsize */
2140		    1,				/* nsegments */
2141		    MSK_RX_RING_SZ,		/* maxsegsize */
2142		    0,				/* flags */
2143		    NULL, NULL,			/* lockfunc, lockarg */
2144		    &sc_if->msk_cdata.msk_rx_ring_tag);
2145	if (error != 0) {
2146		device_printf(sc_if->msk_if_dev,
2147		    "failed to create Rx ring DMA tag\n");
2148		goto fail;
2149	}
2150
2151	/* Create tag for Tx buffers. */
2152	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2153		    1, 0,			/* alignment, boundary */
2154		    BUS_SPACE_MAXADDR,		/* lowaddr */
2155		    BUS_SPACE_MAXADDR,		/* highaddr */
2156		    NULL, NULL,			/* filter, filterarg */
2157		    MSK_TSO_MAXSIZE,		/* maxsize */
2158		    MSK_MAXTXSEGS,		/* nsegments */
2159		    MSK_TSO_MAXSGSIZE,		/* maxsegsize */
2160		    0,				/* flags */
2161		    NULL, NULL,			/* lockfunc, lockarg */
2162		    &sc_if->msk_cdata.msk_tx_tag);
2163	if (error != 0) {
2164		device_printf(sc_if->msk_if_dev,
2165		    "failed to create Tx DMA tag\n");
2166		goto fail;
2167	}
2168
2169	rxalign = 1;
2170	/*
2171	 * Workaround hardware hang which seems to happen when Rx buffer
2172	 * is not aligned on multiple of FIFO word(8 bytes).
2173	 */
2174	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2175		rxalign = MSK_RX_BUF_ALIGN;
2176	/* Create tag for Rx buffers. */
2177	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2178		    rxalign, 0,			/* alignment, boundary */
2179		    BUS_SPACE_MAXADDR,		/* lowaddr */
2180		    BUS_SPACE_MAXADDR,		/* highaddr */
2181		    NULL, NULL,			/* filter, filterarg */
2182		    MCLBYTES,			/* maxsize */
2183		    1,				/* nsegments */
2184		    MCLBYTES,			/* maxsegsize */
2185		    0,				/* flags */
2186		    NULL, NULL,			/* lockfunc, lockarg */
2187		    &sc_if->msk_cdata.msk_rx_tag);
2188	if (error != 0) {
2189		device_printf(sc_if->msk_if_dev,
2190		    "failed to create Rx DMA tag\n");
2191		goto fail;
2192	}
2193
2194	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
2195	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2196	    (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2197	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2198	if (error != 0) {
2199		device_printf(sc_if->msk_if_dev,
2200		    "failed to allocate DMA'able memory for Tx ring\n");
2201		goto fail;
2202	}
2203
2204	ctx.msk_busaddr = 0;
2205	error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2206	    sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2207	    MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2208	if (error != 0) {
2209		device_printf(sc_if->msk_if_dev,
2210		    "failed to load DMA'able memory for Tx ring\n");
2211		goto fail;
2212	}
2213	sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2214
2215	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
2216	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2217	    (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2218	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2219	if (error != 0) {
2220		device_printf(sc_if->msk_if_dev,
2221		    "failed to allocate DMA'able memory for Rx ring\n");
2222		goto fail;
2223	}
2224
2225	ctx.msk_busaddr = 0;
2226	error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2227	    sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2228	    MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2229	if (error != 0) {
2230		device_printf(sc_if->msk_if_dev,
2231		    "failed to load DMA'able memory for Rx ring\n");
2232		goto fail;
2233	}
2234	sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2235
2236	/* Create DMA maps for Tx buffers. */
2237	for (i = 0; i < MSK_TX_RING_CNT; i++) {
2238		txd = &sc_if->msk_cdata.msk_txdesc[i];
2239		txd->tx_m = NULL;
2240		txd->tx_dmamap = NULL;
2241		error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2242		    &txd->tx_dmamap);
2243		if (error != 0) {
2244			device_printf(sc_if->msk_if_dev,
2245			    "failed to create Tx dmamap\n");
2246			goto fail;
2247		}
2248	}
2249	/* Create DMA maps for Rx buffers. */
2250	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2251	    &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2252		device_printf(sc_if->msk_if_dev,
2253		    "failed to create spare Rx dmamap\n");
2254		goto fail;
2255	}
2256	for (i = 0; i < MSK_RX_RING_CNT; i++) {
2257		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2258		rxd->rx_m = NULL;
2259		rxd->rx_dmamap = NULL;
2260		error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2261		    &rxd->rx_dmamap);
2262		if (error != 0) {
2263			device_printf(sc_if->msk_if_dev,
2264			    "failed to create Rx dmamap\n");
2265			goto fail;
2266		}
2267	}
2268
2269fail:
2270	return (error);
2271}
2272
2273static int
2274msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2275{
2276	struct msk_dmamap_arg ctx;
2277	struct msk_rxdesc *jrxd;
2278	bus_size_t rxalign;
2279	int error, i;
2280
2281	if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2282		sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2283		device_printf(sc_if->msk_if_dev,
2284		    "disabling jumbo frame support\n");
2285		return (0);
2286	}
2287	/* Create tag for jumbo Rx ring. */
2288	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2289		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2290		    BUS_SPACE_MAXADDR,		/* lowaddr */
2291		    BUS_SPACE_MAXADDR,		/* highaddr */
2292		    NULL, NULL,			/* filter, filterarg */
2293		    MSK_JUMBO_RX_RING_SZ,	/* maxsize */
2294		    1,				/* nsegments */
2295		    MSK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2296		    0,				/* flags */
2297		    NULL, NULL,			/* lockfunc, lockarg */
2298		    &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2299	if (error != 0) {
2300		device_printf(sc_if->msk_if_dev,
2301		    "failed to create jumbo Rx ring DMA tag\n");
2302		goto jumbo_fail;
2303	}
2304
2305	rxalign = 1;
2306	/*
2307	 * Workaround hardware hang which seems to happen when Rx buffer
2308	 * is not aligned on multiple of FIFO word(8 bytes).
2309	 */
2310	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2311		rxalign = MSK_RX_BUF_ALIGN;
2312	/* Create tag for jumbo Rx buffers. */
2313	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2314		    rxalign, 0,			/* alignment, boundary */
2315		    BUS_SPACE_MAXADDR,		/* lowaddr */
2316		    BUS_SPACE_MAXADDR,		/* highaddr */
2317		    NULL, NULL,			/* filter, filterarg */
2318		    MJUM9BYTES,			/* maxsize */
2319		    1,				/* nsegments */
2320		    MJUM9BYTES,			/* maxsegsize */
2321		    0,				/* flags */
2322		    NULL, NULL,			/* lockfunc, lockarg */
2323		    &sc_if->msk_cdata.msk_jumbo_rx_tag);
2324	if (error != 0) {
2325		device_printf(sc_if->msk_if_dev,
2326		    "failed to create jumbo Rx DMA tag\n");
2327		goto jumbo_fail;
2328	}
2329
2330	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2331	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2332	    (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2333	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2334	    &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2335	if (error != 0) {
2336		device_printf(sc_if->msk_if_dev,
2337		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2338		goto jumbo_fail;
2339	}
2340
2341	ctx.msk_busaddr = 0;
2342	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2343	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2344	    sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2345	    msk_dmamap_cb, &ctx, 0);
2346	if (error != 0) {
2347		device_printf(sc_if->msk_if_dev,
2348		    "failed to load DMA'able memory for jumbo Rx ring\n");
2349		goto jumbo_fail;
2350	}
2351	sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2352
2353	/* Create DMA maps for jumbo Rx buffers. */
2354	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2355	    &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2356		device_printf(sc_if->msk_if_dev,
2357		    "failed to create spare jumbo Rx dmamap\n");
2358		goto jumbo_fail;
2359	}
2360	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2361		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2362		jrxd->rx_m = NULL;
2363		jrxd->rx_dmamap = NULL;
2364		error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2365		    &jrxd->rx_dmamap);
2366		if (error != 0) {
2367			device_printf(sc_if->msk_if_dev,
2368			    "failed to create jumbo Rx dmamap\n");
2369			goto jumbo_fail;
2370		}
2371	}
2372
2373	return (0);
2374
2375jumbo_fail:
2376	msk_rx_dma_jfree(sc_if);
2377	device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2378	    "due to resource shortage\n");
2379	sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2380	return (error);
2381}
2382
2383static void
2384msk_txrx_dma_free(struct msk_if_softc *sc_if)
2385{
2386	struct msk_txdesc *txd;
2387	struct msk_rxdesc *rxd;
2388	int i;
2389
2390	/* Tx ring. */
2391	if (sc_if->msk_cdata.msk_tx_ring_tag) {
2392		if (sc_if->msk_cdata.msk_tx_ring_map)
2393			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2394			    sc_if->msk_cdata.msk_tx_ring_map);
2395		if (sc_if->msk_cdata.msk_tx_ring_map &&
2396		    sc_if->msk_rdata.msk_tx_ring)
2397			bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2398			    sc_if->msk_rdata.msk_tx_ring,
2399			    sc_if->msk_cdata.msk_tx_ring_map);
2400		sc_if->msk_rdata.msk_tx_ring = NULL;
2401		sc_if->msk_cdata.msk_tx_ring_map = NULL;
2402		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2403		sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2404	}
2405	/* Rx ring. */
2406	if (sc_if->msk_cdata.msk_rx_ring_tag) {
2407		if (sc_if->msk_cdata.msk_rx_ring_map)
2408			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2409			    sc_if->msk_cdata.msk_rx_ring_map);
2410		if (sc_if->msk_cdata.msk_rx_ring_map &&
2411		    sc_if->msk_rdata.msk_rx_ring)
2412			bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2413			    sc_if->msk_rdata.msk_rx_ring,
2414			    sc_if->msk_cdata.msk_rx_ring_map);
2415		sc_if->msk_rdata.msk_rx_ring = NULL;
2416		sc_if->msk_cdata.msk_rx_ring_map = NULL;
2417		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2418		sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2419	}
2420	/* Tx buffers. */
2421	if (sc_if->msk_cdata.msk_tx_tag) {
2422		for (i = 0; i < MSK_TX_RING_CNT; i++) {
2423			txd = &sc_if->msk_cdata.msk_txdesc[i];
2424			if (txd->tx_dmamap) {
2425				bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2426				    txd->tx_dmamap);
2427				txd->tx_dmamap = NULL;
2428			}
2429		}
2430		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2431		sc_if->msk_cdata.msk_tx_tag = NULL;
2432	}
2433	/* Rx buffers. */
2434	if (sc_if->msk_cdata.msk_rx_tag) {
2435		for (i = 0; i < MSK_RX_RING_CNT; i++) {
2436			rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2437			if (rxd->rx_dmamap) {
2438				bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2439				    rxd->rx_dmamap);
2440				rxd->rx_dmamap = NULL;
2441			}
2442		}
2443		if (sc_if->msk_cdata.msk_rx_sparemap) {
2444			bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2445			    sc_if->msk_cdata.msk_rx_sparemap);
2446			sc_if->msk_cdata.msk_rx_sparemap = 0;
2447		}
2448		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2449		sc_if->msk_cdata.msk_rx_tag = NULL;
2450	}
2451	if (sc_if->msk_cdata.msk_parent_tag) {
2452		bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2453		sc_if->msk_cdata.msk_parent_tag = NULL;
2454	}
2455}
2456
2457static void
2458msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2459{
2460	struct msk_rxdesc *jrxd;
2461	int i;
2462
2463	/* Jumbo Rx ring. */
2464	if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2465		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2466			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2467			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2468		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2469		    sc_if->msk_rdata.msk_jumbo_rx_ring)
2470			bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2471			    sc_if->msk_rdata.msk_jumbo_rx_ring,
2472			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2473		sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2474		sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2475		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2476		sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2477	}
2478	/* Jumbo Rx buffers. */
2479	if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2480		for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2481			jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2482			if (jrxd->rx_dmamap) {
2483				bus_dmamap_destroy(
2484				    sc_if->msk_cdata.msk_jumbo_rx_tag,
2485				    jrxd->rx_dmamap);
2486				jrxd->rx_dmamap = NULL;
2487			}
2488		}
2489		if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2490			bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2491			    sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2492			sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2493		}
2494		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2495		sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2496	}
2497}
2498
2499static int
2500msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2501{
2502	struct msk_txdesc *txd, *txd_last;
2503	struct msk_tx_desc *tx_le;
2504	struct mbuf *m;
2505	bus_dmamap_t map;
2506	bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2507	uint32_t control, prod, si;
2508	uint16_t offset, tcp_offset, tso_mtu;
2509	int error, i, nseg, tso;
2510
2511	MSK_IF_LOCK_ASSERT(sc_if);
2512
2513	tcp_offset = offset = 0;
2514	m = *m_head;
2515	if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2516	    (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
2517	    ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2518	    (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2519		/*
2520		 * Since mbuf has no protocol specific structure information
2521		 * in it we have to inspect protocol information here to
2522		 * setup TSO and checksum offload. I don't know why Marvell
2523		 * made a such decision in chip design because other GigE
2524		 * hardwares normally takes care of all these chores in
2525		 * hardware. However, TSO performance of Yukon II is very
2526		 * good such that it's worth to implement it.
2527		 */
2528		struct ether_header *eh;
2529		struct ip *ip;
2530		struct tcphdr *tcp;
2531
2532		if (M_WRITABLE(m) == 0) {
2533			/* Get a writable copy. */
2534			m = m_dup(*m_head, M_DONTWAIT);
2535			m_freem(*m_head);
2536			if (m == NULL) {
2537				*m_head = NULL;
2538				return (ENOBUFS);
2539			}
2540			*m_head = m;
2541		}
2542
2543		offset = sizeof(struct ether_header);
2544		m = m_pullup(m, offset);
2545		if (m == NULL) {
2546			*m_head = NULL;
2547			return (ENOBUFS);
2548		}
2549		eh = mtod(m, struct ether_header *);
2550		/* Check if hardware VLAN insertion is off. */
2551		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2552			offset = sizeof(struct ether_vlan_header);
2553			m = m_pullup(m, offset);
2554			if (m == NULL) {
2555				*m_head = NULL;
2556				return (ENOBUFS);
2557			}
2558		}
2559		m = m_pullup(m, offset + sizeof(struct ip));
2560		if (m == NULL) {
2561			*m_head = NULL;
2562			return (ENOBUFS);
2563		}
2564		ip = (struct ip *)(mtod(m, char *) + offset);
2565		offset += (ip->ip_hl << 2);
2566		tcp_offset = offset;
2567		/*
2568		 * It seems that Yukon II has Tx checksum offload bug for
2569		 * small TCP packets that's less than 60 bytes in size
2570		 * (e.g. TCP window probe packet, pure ACK packet).
2571		 * Common work around like padding with zeros to make the
2572		 * frame minimum ethernet frame size didn't work at all.
2573		 * Instead of disabling checksum offload completely we
2574		 * resort to S/W checksum routine when we encounter short
2575		 * TCP frames.
2576		 * Short UDP packets appear to be handled correctly by
2577		 * Yukon II. Also I assume this bug does not happen on
2578		 * controllers that use newer descriptor format or
2579		 * automatic Tx checksum calaulcation.
2580		 */
2581		if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2582		    (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
2583		    (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2584			m = m_pullup(m, offset + sizeof(struct tcphdr));
2585			if (m == NULL) {
2586				*m_head = NULL;
2587				return (ENOBUFS);
2588			}
2589			*(uint16_t *)(m->m_data + offset +
2590			    m->m_pkthdr.csum_data) = in_cksum_skip(m,
2591			    m->m_pkthdr.len, offset);
2592			m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2593		}
2594		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2595			m = m_pullup(m, offset + sizeof(struct tcphdr));
2596			if (m == NULL) {
2597				*m_head = NULL;
2598				return (ENOBUFS);
2599			}
2600			tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2601			offset += (tcp->th_off << 2);
2602		}
2603		*m_head = m;
2604	}
2605
2606	prod = sc_if->msk_cdata.msk_tx_prod;
2607	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2608	txd_last = txd;
2609	map = txd->tx_dmamap;
2610	error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2611	    *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2612	if (error == EFBIG) {
2613		m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2614		if (m == NULL) {
2615			m_freem(*m_head);
2616			*m_head = NULL;
2617			return (ENOBUFS);
2618		}
2619		*m_head = m;
2620		error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2621		    map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2622		if (error != 0) {
2623			m_freem(*m_head);
2624			*m_head = NULL;
2625			return (error);
2626		}
2627	} else if (error != 0)
2628		return (error);
2629	if (nseg == 0) {
2630		m_freem(*m_head);
2631		*m_head = NULL;
2632		return (EIO);
2633	}
2634
2635	/* Check number of available descriptors. */
2636	if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2637	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2638		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2639		return (ENOBUFS);
2640	}
2641
2642	control = 0;
2643	tso = 0;
2644	tx_le = NULL;
2645
2646	/* Check TSO support. */
2647	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2648		if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2649			tso_mtu = m->m_pkthdr.tso_segsz;
2650		else
2651			tso_mtu = offset + m->m_pkthdr.tso_segsz;
2652		if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2653			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2654			tx_le->msk_addr = htole32(tso_mtu);
2655			if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2656				tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2657			else
2658				tx_le->msk_control =
2659				    htole32(OP_LRGLEN | HW_OWNER);
2660			sc_if->msk_cdata.msk_tx_cnt++;
2661			MSK_INC(prod, MSK_TX_RING_CNT);
2662			sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2663		}
2664		tso++;
2665	}
2666	/* Check if we have a VLAN tag to insert. */
2667	if ((m->m_flags & M_VLANTAG) != 0) {
2668		if (tso == 0) {
2669			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2670			tx_le->msk_addr = htole32(0);
2671			tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2672			    htons(m->m_pkthdr.ether_vtag));
2673			sc_if->msk_cdata.msk_tx_cnt++;
2674			MSK_INC(prod, MSK_TX_RING_CNT);
2675		} else {
2676			tx_le->msk_control |= htole32(OP_VLAN |
2677			    htons(m->m_pkthdr.ether_vtag));
2678		}
2679		control |= INS_VLAN;
2680	}
2681	/* Check if we have to handle checksum offload. */
2682	if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2683		if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
2684			control |= CALSUM;
2685		else {
2686			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2687			tx_le->msk_addr = htole32(((tcp_offset +
2688			    m->m_pkthdr.csum_data) & 0xffff) |
2689			    ((uint32_t)tcp_offset << 16));
2690			tx_le->msk_control = htole32(1 << 16 |
2691			    (OP_TCPLISW | HW_OWNER));
2692			control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2693			if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2694				control |= UDPTCP;
2695			sc_if->msk_cdata.msk_tx_cnt++;
2696			MSK_INC(prod, MSK_TX_RING_CNT);
2697		}
2698	}
2699
2700	si = prod;
2701	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2702	tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2703	if (tso == 0)
2704		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2705		    OP_PACKET);
2706	else
2707		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2708		    OP_LARGESEND);
2709	sc_if->msk_cdata.msk_tx_cnt++;
2710	MSK_INC(prod, MSK_TX_RING_CNT);
2711
2712	for (i = 1; i < nseg; i++) {
2713		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2714		tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2715		tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2716		    OP_BUFFER | HW_OWNER);
2717		sc_if->msk_cdata.msk_tx_cnt++;
2718		MSK_INC(prod, MSK_TX_RING_CNT);
2719	}
2720	/* Update producer index. */
2721	sc_if->msk_cdata.msk_tx_prod = prod;
2722
2723	/* Set EOP on the last desciptor. */
2724	prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2725	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2726	tx_le->msk_control |= htole32(EOP);
2727
2728	/* Turn the first descriptor ownership to hardware. */
2729	tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2730	tx_le->msk_control |= htole32(HW_OWNER);
2731
2732	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2733	map = txd_last->tx_dmamap;
2734	txd_last->tx_dmamap = txd->tx_dmamap;
2735	txd->tx_dmamap = map;
2736	txd->tx_m = m;
2737
2738	/* Sync descriptors. */
2739	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2740	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2741	    sc_if->msk_cdata.msk_tx_ring_map,
2742	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2743
2744	return (0);
2745}
2746
2747static void
2748msk_tx_task(void *arg, int pending)
2749{
2750	struct ifnet *ifp;
2751
2752	ifp = arg;
2753	msk_start(ifp);
2754}
2755
2756static void
2757msk_start(struct ifnet *ifp)
2758{
2759        struct msk_if_softc *sc_if;
2760        struct mbuf *m_head;
2761	int enq;
2762
2763	sc_if = ifp->if_softc;
2764
2765	MSK_IF_LOCK(sc_if);
2766
2767	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2768	    IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2769		MSK_IF_UNLOCK(sc_if);
2770		return;
2771	}
2772
2773	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2774	    sc_if->msk_cdata.msk_tx_cnt <
2775	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2776		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2777		if (m_head == NULL)
2778			break;
2779		/*
2780		 * Pack the data into the transmit ring. If we
2781		 * don't have room, set the OACTIVE flag and wait
2782		 * for the NIC to drain the ring.
2783		 */
2784		if (msk_encap(sc_if, &m_head) != 0) {
2785			if (m_head == NULL)
2786				break;
2787			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2788			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2789			break;
2790		}
2791
2792		enq++;
2793		/*
2794		 * If there's a BPF listener, bounce a copy of this frame
2795		 * to him.
2796		 */
2797		ETHER_BPF_MTAP(ifp, m_head);
2798	}
2799
2800	if (enq > 0) {
2801		/* Transmit */
2802		CSR_WRITE_2(sc_if->msk_softc,
2803		    Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2804		    sc_if->msk_cdata.msk_tx_prod);
2805
2806		/* Set a timeout in case the chip goes out to lunch. */
2807		sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2808	}
2809
2810	MSK_IF_UNLOCK(sc_if);
2811}
2812
2813static void
2814msk_watchdog(struct msk_if_softc *sc_if)
2815{
2816	struct ifnet *ifp;
2817	uint32_t ridx;
2818	int idx;
2819
2820	MSK_IF_LOCK_ASSERT(sc_if);
2821
2822	if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2823		return;
2824	ifp = sc_if->msk_ifp;
2825	if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2826		if (bootverbose)
2827			if_printf(sc_if->msk_ifp, "watchdog timeout "
2828			   "(missed link)\n");
2829		ifp->if_oerrors++;
2830		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2831		msk_init_locked(sc_if);
2832		return;
2833	}
2834
2835	/*
2836	 * Reclaim first as there is a possibility of losing Tx completion
2837	 * interrupts.
2838	 */
2839	ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2840	idx = CSR_READ_2(sc_if->msk_softc, ridx);
2841	if (sc_if->msk_cdata.msk_tx_cons != idx) {
2842		msk_txeof(sc_if, idx);
2843		if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2844			if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2845			    "-- recovering\n");
2846			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2847				taskqueue_enqueue(taskqueue_fast,
2848				    &sc_if->msk_tx_task);
2849			return;
2850		}
2851	}
2852
2853	if_printf(ifp, "watchdog timeout\n");
2854	ifp->if_oerrors++;
2855	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2856	msk_init_locked(sc_if);
2857	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2858		taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2859}
2860
2861static int
2862mskc_shutdown(device_t dev)
2863{
2864	struct msk_softc *sc;
2865	int i;
2866
2867	sc = device_get_softc(dev);
2868	MSK_LOCK(sc);
2869	for (i = 0; i < sc->msk_num_port; i++) {
2870		if (sc->msk_if[i] != NULL)
2871			msk_stop(sc->msk_if[i]);
2872	}
2873
2874	/* Disable all interrupts. */
2875	CSR_WRITE_4(sc, B0_IMSK, 0);
2876	CSR_READ_4(sc, B0_IMSK);
2877	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2878	CSR_READ_4(sc, B0_HWE_IMSK);
2879
2880	/* Put hardware reset. */
2881	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2882
2883	MSK_UNLOCK(sc);
2884	return (0);
2885}
2886
2887static int
2888mskc_suspend(device_t dev)
2889{
2890	struct msk_softc *sc;
2891	int i;
2892
2893	sc = device_get_softc(dev);
2894
2895	MSK_LOCK(sc);
2896
2897	for (i = 0; i < sc->msk_num_port; i++) {
2898		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2899		    ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2900		    IFF_DRV_RUNNING) != 0))
2901			msk_stop(sc->msk_if[i]);
2902	}
2903
2904	/* Disable all interrupts. */
2905	CSR_WRITE_4(sc, B0_IMSK, 0);
2906	CSR_READ_4(sc, B0_IMSK);
2907	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2908	CSR_READ_4(sc, B0_HWE_IMSK);
2909
2910	msk_phy_power(sc, MSK_PHY_POWERDOWN);
2911
2912	/* Put hardware reset. */
2913	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2914	sc->msk_pflags |= MSK_FLAG_SUSPEND;
2915
2916	MSK_UNLOCK(sc);
2917
2918	return (0);
2919}
2920
2921static int
2922mskc_resume(device_t dev)
2923{
2924	struct msk_softc *sc;
2925	int i;
2926
2927	sc = device_get_softc(dev);
2928
2929	MSK_LOCK(sc);
2930
2931	mskc_reset(sc);
2932	for (i = 0; i < sc->msk_num_port; i++) {
2933		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2934		    ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
2935			sc->msk_if[i]->msk_ifp->if_drv_flags &=
2936			    ~IFF_DRV_RUNNING;
2937			msk_init_locked(sc->msk_if[i]);
2938		}
2939	}
2940	sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
2941
2942	MSK_UNLOCK(sc);
2943
2944	return (0);
2945}
2946
2947#ifndef __NO_STRICT_ALIGNMENT
2948static __inline void
2949msk_fixup_rx(struct mbuf *m)
2950{
2951        int i;
2952        uint16_t *src, *dst;
2953
2954	src = mtod(m, uint16_t *);
2955	dst = src - 3;
2956
2957	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2958		*dst++ = *src++;
2959
2960	m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
2961}
2962#endif
2963
2964static void
2965msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
2966    int len)
2967{
2968	struct mbuf *m;
2969	struct ifnet *ifp;
2970	struct msk_rxdesc *rxd;
2971	int cons, rxlen;
2972
2973	ifp = sc_if->msk_ifp;
2974
2975	MSK_IF_LOCK_ASSERT(sc_if);
2976
2977	cons = sc_if->msk_cdata.msk_rx_cons;
2978	do {
2979		rxlen = status >> 16;
2980		if ((status & GMR_FS_VLAN) != 0 &&
2981		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2982			rxlen -= ETHER_VLAN_ENCAP_LEN;
2983		if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
2984			/*
2985			 * For controllers that returns bogus status code
2986			 * just do minimal check and let upper stack
2987			 * handle this frame.
2988			 */
2989			if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
2990				ifp->if_ierrors++;
2991				msk_discard_rxbuf(sc_if, cons);
2992				break;
2993			}
2994		} else if (len > sc_if->msk_framesize ||
2995		    ((status & GMR_FS_ANY_ERR) != 0) ||
2996		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2997			/* Don't count flow-control packet as errors. */
2998			if ((status & GMR_FS_GOOD_FC) == 0)
2999				ifp->if_ierrors++;
3000			msk_discard_rxbuf(sc_if, cons);
3001			break;
3002		}
3003		rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3004		m = rxd->rx_m;
3005		if (msk_newbuf(sc_if, cons) != 0) {
3006			ifp->if_iqdrops++;
3007			/* Reuse old buffer. */
3008			msk_discard_rxbuf(sc_if, cons);
3009			break;
3010		}
3011		m->m_pkthdr.rcvif = ifp;
3012		m->m_pkthdr.len = m->m_len = len;
3013#ifndef __NO_STRICT_ALIGNMENT
3014		if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3015			msk_fixup_rx(m);
3016#endif
3017		ifp->if_ipackets++;
3018		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
3019		    (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3020			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3021			if ((control & CSS_IPV4_CSUM_OK) != 0)
3022				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3023			if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3024			    (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3025				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3026				    CSUM_PSEUDO_HDR;
3027				m->m_pkthdr.csum_data = 0xffff;
3028			}
3029		}
3030		/* Check for VLAN tagged packets. */
3031		if ((status & GMR_FS_VLAN) != 0 &&
3032		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3033			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3034			m->m_flags |= M_VLANTAG;
3035		}
3036		MSK_IF_UNLOCK(sc_if);
3037		(*ifp->if_input)(ifp, m);
3038		MSK_IF_LOCK(sc_if);
3039	} while (0);
3040
3041	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3042	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3043}
3044
3045static void
3046msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3047    int len)
3048{
3049	struct mbuf *m;
3050	struct ifnet *ifp;
3051	struct msk_rxdesc *jrxd;
3052	int cons, rxlen;
3053
3054	ifp = sc_if->msk_ifp;
3055
3056	MSK_IF_LOCK_ASSERT(sc_if);
3057
3058	cons = sc_if->msk_cdata.msk_rx_cons;
3059	do {
3060		rxlen = status >> 16;
3061		if ((status & GMR_FS_VLAN) != 0 &&
3062		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3063			rxlen -= ETHER_VLAN_ENCAP_LEN;
3064		if (len > sc_if->msk_framesize ||
3065		    ((status & GMR_FS_ANY_ERR) != 0) ||
3066		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3067			/* Don't count flow-control packet as errors. */
3068			if ((status & GMR_FS_GOOD_FC) == 0)
3069				ifp->if_ierrors++;
3070			msk_discard_jumbo_rxbuf(sc_if, cons);
3071			break;
3072		}
3073		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3074		m = jrxd->rx_m;
3075		if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3076			ifp->if_iqdrops++;
3077			/* Reuse old buffer. */
3078			msk_discard_jumbo_rxbuf(sc_if, cons);
3079			break;
3080		}
3081		m->m_pkthdr.rcvif = ifp;
3082		m->m_pkthdr.len = m->m_len = len;
3083#ifndef __NO_STRICT_ALIGNMENT
3084		if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3085			msk_fixup_rx(m);
3086#endif
3087		ifp->if_ipackets++;
3088		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
3089		    (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3090			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3091			if ((control & CSS_IPV4_CSUM_OK) != 0)
3092				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3093			if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3094			    (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3095				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3096				    CSUM_PSEUDO_HDR;
3097				m->m_pkthdr.csum_data = 0xffff;
3098			}
3099		}
3100		/* Check for VLAN tagged packets. */
3101		if ((status & GMR_FS_VLAN) != 0 &&
3102		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3103			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3104			m->m_flags |= M_VLANTAG;
3105		}
3106		MSK_IF_UNLOCK(sc_if);
3107		(*ifp->if_input)(ifp, m);
3108		MSK_IF_LOCK(sc_if);
3109	} while (0);
3110
3111	MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3112	MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3113}
3114
3115static void
3116msk_txeof(struct msk_if_softc *sc_if, int idx)
3117{
3118	struct msk_txdesc *txd;
3119	struct msk_tx_desc *cur_tx;
3120	struct ifnet *ifp;
3121	uint32_t control;
3122	int cons, prog;
3123
3124	MSK_IF_LOCK_ASSERT(sc_if);
3125
3126	ifp = sc_if->msk_ifp;
3127
3128	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3129	    sc_if->msk_cdata.msk_tx_ring_map,
3130	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3131	/*
3132	 * Go through our tx ring and free mbufs for those
3133	 * frames that have been sent.
3134	 */
3135	cons = sc_if->msk_cdata.msk_tx_cons;
3136	prog = 0;
3137	for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3138		if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3139			break;
3140		prog++;
3141		cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3142		control = le32toh(cur_tx->msk_control);
3143		sc_if->msk_cdata.msk_tx_cnt--;
3144		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3145		if ((control & EOP) == 0)
3146			continue;
3147		txd = &sc_if->msk_cdata.msk_txdesc[cons];
3148		bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3149		    BUS_DMASYNC_POSTWRITE);
3150		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3151
3152		ifp->if_opackets++;
3153		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3154		    __func__));
3155		m_freem(txd->tx_m);
3156		txd->tx_m = NULL;
3157	}
3158
3159	if (prog > 0) {
3160		sc_if->msk_cdata.msk_tx_cons = cons;
3161		if (sc_if->msk_cdata.msk_tx_cnt == 0)
3162			sc_if->msk_watchdog_timer = 0;
3163		/* No need to sync LEs as we didn't update LEs. */
3164	}
3165}
3166
3167static void
3168msk_tick(void *xsc_if)
3169{
3170	struct msk_if_softc *sc_if;
3171	struct mii_data *mii;
3172
3173	sc_if = xsc_if;
3174
3175	MSK_IF_LOCK_ASSERT(sc_if);
3176
3177	mii = device_get_softc(sc_if->msk_miibus);
3178
3179	mii_tick(mii);
3180	msk_watchdog(sc_if);
3181	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3182}
3183
3184static void
3185msk_intr_phy(struct msk_if_softc *sc_if)
3186{
3187	uint16_t status;
3188
3189	msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3190	status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3191	/* Handle FIFO Underrun/Overflow? */
3192	if ((status & PHY_M_IS_FIFO_ERROR))
3193		device_printf(sc_if->msk_if_dev,
3194		    "PHY FIFO underrun/overflow.\n");
3195}
3196
3197static void
3198msk_intr_gmac(struct msk_if_softc *sc_if)
3199{
3200	struct msk_softc *sc;
3201	uint8_t status;
3202
3203	sc = sc_if->msk_softc;
3204	status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3205
3206	/* GMAC Rx FIFO overrun. */
3207	if ((status & GM_IS_RX_FF_OR) != 0) {
3208		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3209		    GMF_CLI_RX_FO);
3210		device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3211	}
3212	/* GMAC Tx FIFO underrun. */
3213	if ((status & GM_IS_TX_FF_UR) != 0) {
3214		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3215		    GMF_CLI_TX_FU);
3216		device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3217		/*
3218		 * XXX
3219		 * In case of Tx underrun, we may need to flush/reset
3220		 * Tx MAC but that would also require resynchronization
3221		 * with status LEs. Reintializing status LEs would
3222		 * affect other port in dual MAC configuration so it
3223		 * should be avoided as possible as we can.
3224		 * Due to lack of documentation it's all vague guess but
3225		 * it needs more investigation.
3226		 */
3227	}
3228}
3229
3230static void
3231msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3232{
3233	struct msk_softc *sc;
3234
3235	sc = sc_if->msk_softc;
3236	if ((status & Y2_IS_PAR_RD1) != 0) {
3237		device_printf(sc_if->msk_if_dev,
3238		    "RAM buffer read parity error\n");
3239		/* Clear IRQ. */
3240		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3241		    RI_CLR_RD_PERR);
3242	}
3243	if ((status & Y2_IS_PAR_WR1) != 0) {
3244		device_printf(sc_if->msk_if_dev,
3245		    "RAM buffer write parity error\n");
3246		/* Clear IRQ. */
3247		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3248		    RI_CLR_WR_PERR);
3249	}
3250	if ((status & Y2_IS_PAR_MAC1) != 0) {
3251		device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3252		/* Clear IRQ. */
3253		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3254		    GMF_CLI_TX_PE);
3255	}
3256	if ((status & Y2_IS_PAR_RX1) != 0) {
3257		device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3258		/* Clear IRQ. */
3259		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3260	}
3261	if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3262		device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3263		/* Clear IRQ. */
3264		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3265	}
3266}
3267
3268static void
3269msk_intr_hwerr(struct msk_softc *sc)
3270{
3271	uint32_t status;
3272	uint32_t tlphead[4];
3273
3274	status = CSR_READ_4(sc, B0_HWE_ISRC);
3275	/* Time Stamp timer overflow. */
3276	if ((status & Y2_IS_TIST_OV) != 0)
3277		CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3278	if ((status & Y2_IS_PCI_NEXP) != 0) {
3279		/*
3280		 * PCI Express Error occured which is not described in PEX
3281		 * spec.
3282		 * This error is also mapped either to Master Abort(
3283		 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3284		 * can only be cleared there.
3285                 */
3286		device_printf(sc->msk_dev,
3287		    "PCI Express protocol violation error\n");
3288	}
3289
3290	if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3291		uint16_t v16;
3292
3293		if ((status & Y2_IS_MST_ERR) != 0)
3294			device_printf(sc->msk_dev,
3295			    "unexpected IRQ Status error\n");
3296		else
3297			device_printf(sc->msk_dev,
3298			    "unexpected IRQ Master error\n");
3299		/* Reset all bits in the PCI status register. */
3300		v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3301		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3302		pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3303		    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3304		    PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3305		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3306	}
3307
3308	/* Check for PCI Express Uncorrectable Error. */
3309	if ((status & Y2_IS_PCI_EXP) != 0) {
3310		uint32_t v32;
3311
3312		/*
3313		 * On PCI Express bus bridges are called root complexes (RC).
3314		 * PCI Express errors are recognized by the root complex too,
3315		 * which requests the system to handle the problem. After
3316		 * error occurence it may be that no access to the adapter
3317		 * may be performed any longer.
3318		 */
3319
3320		v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3321		if ((v32 & PEX_UNSUP_REQ) != 0) {
3322			/* Ignore unsupported request error. */
3323			device_printf(sc->msk_dev,
3324			    "Uncorrectable PCI Express error\n");
3325		}
3326		if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3327			int i;
3328
3329			/* Get TLP header form Log Registers. */
3330			for (i = 0; i < 4; i++)
3331				tlphead[i] = CSR_PCI_READ_4(sc,
3332				    PEX_HEADER_LOG + i * 4);
3333			/* Check for vendor defined broadcast message. */
3334			if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3335				sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3336				CSR_WRITE_4(sc, B0_HWE_IMSK,
3337				    sc->msk_intrhwemask);
3338				CSR_READ_4(sc, B0_HWE_IMSK);
3339			}
3340		}
3341		/* Clear the interrupt. */
3342		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3343		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3344		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3345	}
3346
3347	if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3348		msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3349	if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3350		msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3351}
3352
3353static __inline void
3354msk_rxput(struct msk_if_softc *sc_if)
3355{
3356	struct msk_softc *sc;
3357
3358	sc = sc_if->msk_softc;
3359	if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3360		bus_dmamap_sync(
3361		    sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3362		    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3363		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3364	else
3365		bus_dmamap_sync(
3366		    sc_if->msk_cdata.msk_rx_ring_tag,
3367		    sc_if->msk_cdata.msk_rx_ring_map,
3368		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3369	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3370	    PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3371}
3372
3373static int
3374msk_handle_events(struct msk_softc *sc)
3375{
3376	struct msk_if_softc *sc_if;
3377	int rxput[2];
3378	struct msk_stat_desc *sd;
3379	uint32_t control, status;
3380	int cons, idx, len, port, rxprog;
3381
3382	idx = CSR_READ_2(sc, STAT_PUT_IDX);
3383	if (idx == sc->msk_stat_cons)
3384		return (0);
3385
3386	/* Sync status LEs. */
3387	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3388	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3389	/* XXX Sync Rx LEs here. */
3390
3391	rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3392
3393	rxprog = 0;
3394	for (cons = sc->msk_stat_cons; cons != idx;) {
3395		sd = &sc->msk_stat_ring[cons];
3396		control = le32toh(sd->msk_control);
3397		if ((control & HW_OWNER) == 0)
3398			break;
3399		/*
3400		 * Marvell's FreeBSD driver updates status LE after clearing
3401		 * HW_OWNER. However we don't have a way to sync single LE
3402		 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3403		 * an entire DMA map. So don't sync LE until we have a better
3404		 * way to sync LEs.
3405		 */
3406		control &= ~HW_OWNER;
3407		sd->msk_control = htole32(control);
3408		status = le32toh(sd->msk_status);
3409		len = control & STLE_LEN_MASK;
3410		port = (control >> 16) & 0x01;
3411		sc_if = sc->msk_if[port];
3412		if (sc_if == NULL) {
3413			device_printf(sc->msk_dev, "invalid port opcode "
3414			    "0x%08x\n", control & STLE_OP_MASK);
3415			continue;
3416		}
3417
3418		switch (control & STLE_OP_MASK) {
3419		case OP_RXVLAN:
3420			sc_if->msk_vtag = ntohs(len);
3421			break;
3422		case OP_RXCHKSVLAN:
3423			sc_if->msk_vtag = ntohs(len);
3424			break;
3425		case OP_RXSTAT:
3426			if (sc_if->msk_framesize >
3427			    (MCLBYTES - MSK_RX_BUF_ALIGN))
3428				msk_jumbo_rxeof(sc_if, status, control, len);
3429			else
3430				msk_rxeof(sc_if, status, control, len);
3431			rxprog++;
3432			/*
3433			 * Because there is no way to sync single Rx LE
3434			 * put the DMA sync operation off until the end of
3435			 * event processing.
3436			 */
3437			rxput[port]++;
3438			/* Update prefetch unit if we've passed water mark. */
3439			if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3440				msk_rxput(sc_if);
3441				rxput[port] = 0;
3442			}
3443			break;
3444		case OP_TXINDEXLE:
3445			if (sc->msk_if[MSK_PORT_A] != NULL)
3446				msk_txeof(sc->msk_if[MSK_PORT_A],
3447				    status & STLE_TXA1_MSKL);
3448			if (sc->msk_if[MSK_PORT_B] != NULL)
3449				msk_txeof(sc->msk_if[MSK_PORT_B],
3450				    ((status & STLE_TXA2_MSKL) >>
3451				    STLE_TXA2_SHIFTL) |
3452				    ((len & STLE_TXA2_MSKH) <<
3453				    STLE_TXA2_SHIFTH));
3454			break;
3455		default:
3456			device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3457			    control & STLE_OP_MASK);
3458			break;
3459		}
3460		MSK_INC(cons, MSK_STAT_RING_CNT);
3461		if (rxprog > sc->msk_process_limit)
3462			break;
3463	}
3464
3465	sc->msk_stat_cons = cons;
3466	/* XXX We should sync status LEs here. See above notes. */
3467
3468	if (rxput[MSK_PORT_A] > 0)
3469		msk_rxput(sc->msk_if[MSK_PORT_A]);
3470	if (rxput[MSK_PORT_B] > 0)
3471		msk_rxput(sc->msk_if[MSK_PORT_B]);
3472
3473	return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3474}
3475
3476/* Legacy interrupt handler for shared interrupt. */
3477static void
3478msk_legacy_intr(void *xsc)
3479{
3480	struct msk_softc *sc;
3481	struct msk_if_softc *sc_if0, *sc_if1;
3482	struct ifnet *ifp0, *ifp1;
3483	uint32_t status;
3484
3485	sc = xsc;
3486	MSK_LOCK(sc);
3487
3488	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3489	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3490	if (status == 0 || status == 0xffffffff ||
3491	    (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3492	    (status & sc->msk_intrmask) == 0) {
3493		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3494		return;
3495	}
3496
3497	sc_if0 = sc->msk_if[MSK_PORT_A];
3498	sc_if1 = sc->msk_if[MSK_PORT_B];
3499	ifp0 = ifp1 = NULL;
3500	if (sc_if0 != NULL)
3501		ifp0 = sc_if0->msk_ifp;
3502	if (sc_if1 != NULL)
3503		ifp1 = sc_if1->msk_ifp;
3504
3505	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3506		msk_intr_phy(sc_if0);
3507	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3508		msk_intr_phy(sc_if1);
3509	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3510		msk_intr_gmac(sc_if0);
3511	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3512		msk_intr_gmac(sc_if1);
3513	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3514		device_printf(sc->msk_dev, "Rx descriptor error\n");
3515		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3516		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3517		CSR_READ_4(sc, B0_IMSK);
3518	}
3519        if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3520		device_printf(sc->msk_dev, "Tx descriptor error\n");
3521		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3522		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3523		CSR_READ_4(sc, B0_IMSK);
3524	}
3525	if ((status & Y2_IS_HW_ERR) != 0)
3526		msk_intr_hwerr(sc);
3527
3528	while (msk_handle_events(sc) != 0)
3529		;
3530	if ((status & Y2_IS_STAT_BMU) != 0)
3531		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3532
3533	/* Reenable interrupts. */
3534	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3535
3536	if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3537	    !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3538		taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3539	if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3540	    !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3541		taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3542
3543	MSK_UNLOCK(sc);
3544}
3545
3546static int
3547msk_intr(void *xsc)
3548{
3549	struct msk_softc *sc;
3550	uint32_t status;
3551
3552	sc = xsc;
3553	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3554	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3555	if (status == 0 || status == 0xffffffff) {
3556		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3557		return (FILTER_STRAY);
3558	}
3559
3560	taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3561	return (FILTER_HANDLED);
3562}
3563
3564static void
3565msk_int_task(void *arg, int pending)
3566{
3567	struct msk_softc *sc;
3568	struct msk_if_softc *sc_if0, *sc_if1;
3569	struct ifnet *ifp0, *ifp1;
3570	uint32_t status;
3571	int domore;
3572
3573	sc = arg;
3574	MSK_LOCK(sc);
3575
3576	/* Get interrupt source. */
3577	status = CSR_READ_4(sc, B0_ISRC);
3578	if (status == 0 || status == 0xffffffff ||
3579	    (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3580	    (status & sc->msk_intrmask) == 0)
3581		goto done;
3582
3583	sc_if0 = sc->msk_if[MSK_PORT_A];
3584	sc_if1 = sc->msk_if[MSK_PORT_B];
3585	ifp0 = ifp1 = NULL;
3586	if (sc_if0 != NULL)
3587		ifp0 = sc_if0->msk_ifp;
3588	if (sc_if1 != NULL)
3589		ifp1 = sc_if1->msk_ifp;
3590
3591	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3592		msk_intr_phy(sc_if0);
3593	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3594		msk_intr_phy(sc_if1);
3595	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3596		msk_intr_gmac(sc_if0);
3597	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3598		msk_intr_gmac(sc_if1);
3599	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3600		device_printf(sc->msk_dev, "Rx descriptor error\n");
3601		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3602		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3603		CSR_READ_4(sc, B0_IMSK);
3604	}
3605        if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3606		device_printf(sc->msk_dev, "Tx descriptor error\n");
3607		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3608		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3609		CSR_READ_4(sc, B0_IMSK);
3610	}
3611	if ((status & Y2_IS_HW_ERR) != 0)
3612		msk_intr_hwerr(sc);
3613
3614	domore = msk_handle_events(sc);
3615	if ((status & Y2_IS_STAT_BMU) != 0)
3616		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3617
3618	if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3619	    !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3620		taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3621	if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3622	    !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3623		taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3624
3625	if (domore > 0) {
3626		taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3627		MSK_UNLOCK(sc);
3628		return;
3629	}
3630done:
3631	MSK_UNLOCK(sc);
3632
3633	/* Reenable interrupts. */
3634	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3635}
3636
3637static void
3638msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3639{
3640	struct msk_softc *sc;
3641	struct ifnet *ifp;
3642
3643	ifp = sc_if->msk_ifp;
3644	sc = sc_if->msk_softc;
3645	switch (sc->msk_hw_id) {
3646	case CHIP_ID_YUKON_EX:
3647		if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
3648			goto yukon_ex_workaround;
3649		if (ifp->if_mtu > ETHERMTU)
3650			CSR_WRITE_4(sc,
3651			    MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3652			    TX_JUMBO_ENA | TX_STFW_ENA);
3653		else
3654			CSR_WRITE_4(sc,
3655			    MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3656			    TX_JUMBO_DIS | TX_STFW_ENA);
3657		break;
3658	default:
3659yukon_ex_workaround:
3660		if (ifp->if_mtu > ETHERMTU) {
3661			/* Set Tx GMAC FIFO Almost Empty Threshold. */
3662			CSR_WRITE_4(sc,
3663			    MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3664			    MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3665			/* Disable Store & Forward mode for Tx. */
3666			CSR_WRITE_4(sc,
3667			    MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3668			    TX_JUMBO_ENA | TX_STFW_DIS);
3669		} else {
3670			/* Enable Store & Forward mode for Tx. */
3671			CSR_WRITE_4(sc,
3672			    MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3673			    TX_JUMBO_DIS | TX_STFW_ENA);
3674		}
3675		break;
3676	}
3677}
3678
3679static void
3680msk_init(void *xsc)
3681{
3682	struct msk_if_softc *sc_if = xsc;
3683
3684	MSK_IF_LOCK(sc_if);
3685	msk_init_locked(sc_if);
3686	MSK_IF_UNLOCK(sc_if);
3687}
3688
3689static void
3690msk_init_locked(struct msk_if_softc *sc_if)
3691{
3692	struct msk_softc *sc;
3693	struct ifnet *ifp;
3694	struct mii_data	 *mii;
3695	uint16_t eaddr[ETHER_ADDR_LEN / 2];
3696	uint16_t gmac;
3697	uint32_t reg;
3698	int error, i;
3699
3700	MSK_IF_LOCK_ASSERT(sc_if);
3701
3702	ifp = sc_if->msk_ifp;
3703	sc = sc_if->msk_softc;
3704	mii = device_get_softc(sc_if->msk_miibus);
3705
3706	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3707		return;
3708
3709	error = 0;
3710	/* Cancel pending I/O and free all Rx/Tx buffers. */
3711	msk_stop(sc_if);
3712
3713	if (ifp->if_mtu < ETHERMTU)
3714		sc_if->msk_framesize = ETHERMTU;
3715	else
3716		sc_if->msk_framesize = ifp->if_mtu;
3717	sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3718	if (ifp->if_mtu > ETHERMTU &&
3719	    (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3720		ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3721		ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3722	}
3723
3724 	/* GMAC Control reset. */
3725 	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3726 	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3727 	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3728	if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
3729		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3730		    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3731		    GMC_BYP_RETR_ON);
3732
3733	/*
3734	 * Initialize GMAC first such that speed/duplex/flow-control
3735	 * parameters are renegotiated when interface is brought up.
3736	 */
3737	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3738
3739	/* Dummy read the Interrupt Source Register. */
3740	CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3741
3742	/* Clear MIB stats. */
3743	msk_stats_clear(sc_if);
3744
3745	/* Disable FCS. */
3746	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3747
3748	/* Setup Transmit Control Register. */
3749	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3750
3751	/* Setup Transmit Flow Control Register. */
3752	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3753
3754	/* Setup Transmit Parameter Register. */
3755	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3756	    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3757	    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3758
3759	gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3760	    GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3761
3762	if (ifp->if_mtu > ETHERMTU)
3763		gmac |= GM_SMOD_JUMBO_ENA;
3764	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3765
3766	/* Set station address. */
3767        bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3768        for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3769		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3770		    eaddr[i]);
3771        for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3772		GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3773		    eaddr[i]);
3774
3775	/* Disable interrupts for counter overflows. */
3776	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3777	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3778	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3779
3780	/* Configure Rx MAC FIFO. */
3781	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3782	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3783	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3784	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3785	    sc->msk_hw_id == CHIP_ID_YUKON_EX)
3786		reg |= GMF_RX_OVER_ON;
3787	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3788
3789	/* Set receive filter. */
3790	msk_rxfilter(sc_if);
3791
3792	/* Flush Rx MAC FIFO on any flow control or error. */
3793	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3794	    GMR_FS_ANY_ERR);
3795
3796	/*
3797	 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3798	 * due to hardware hang on receipt of pause frames.
3799	 */
3800	reg = RX_GMF_FL_THR_DEF + 1;
3801	/* Another magic for Yukon FE+ - From Linux. */
3802	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3803	    sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3804		reg = 0x178;
3805	CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3806
3807	/* Configure Tx MAC FIFO. */
3808	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3809	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3810	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3811
3812	/* Configure hardware VLAN tag insertion/stripping. */
3813	msk_setvlan(sc_if, ifp);
3814
3815	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3816		/* Set Rx Pause threshould. */
3817		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3818		    MSK_ECU_LLPP);
3819		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3820		    MSK_ECU_ULPP);
3821		/* Configure store-and-forward for Tx. */
3822		msk_set_tx_stfwd(sc_if);
3823	}
3824
3825 	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3826 	    sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3827 		/* Disable dynamic watermark - from Linux. */
3828 		reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3829 		reg &= ~0x03;
3830 		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3831 	}
3832
3833	/*
3834	 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3835	 * arbiter as we don't use Sync Tx queue.
3836	 */
3837	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3838	    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3839	/* Enable the RAM Interface Arbiter. */
3840	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3841
3842	/* Setup RAM buffer. */
3843	msk_set_rambuffer(sc_if);
3844
3845	/* Disable Tx sync Queue. */
3846	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3847
3848	/* Setup Tx Queue Bus Memory Interface. */
3849	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3850	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3851	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3852	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3853	switch (sc->msk_hw_id) {
3854	case CHIP_ID_YUKON_EC_U:
3855		if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3856			/* Fix for Yukon-EC Ultra: set BMU FIFO level */
3857			CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3858			    MSK_ECU_TXFF_LEV);
3859		}
3860		break;
3861	case CHIP_ID_YUKON_EX:
3862		/*
3863		 * Yukon Extreme seems to have silicon bug for
3864		 * automatic Tx checksum calculation capability.
3865		 */
3866		if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
3867			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3868			    F_TX_CHK_AUTO_OFF);
3869		break;
3870	}
3871
3872	/* Setup Rx Queue Bus Memory Interface. */
3873	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3874	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3875	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3876	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3877        if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3878	    sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3879		/* MAC Rx RAM Read is controlled by hardware. */
3880                CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3881	}
3882
3883	msk_set_prefetch(sc, sc_if->msk_txq,
3884	    sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3885	msk_init_tx_ring(sc_if);
3886
3887	/* Disable Rx checksum offload and RSS hash. */
3888	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3889	    BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3890	if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
3891		msk_set_prefetch(sc, sc_if->msk_rxq,
3892		    sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3893		    MSK_JUMBO_RX_RING_CNT - 1);
3894		error = msk_init_jumbo_rx_ring(sc_if);
3895	 } else {
3896		msk_set_prefetch(sc, sc_if->msk_rxq,
3897		    sc_if->msk_rdata.msk_rx_ring_paddr,
3898		    MSK_RX_RING_CNT - 1);
3899		error = msk_init_rx_ring(sc_if);
3900	}
3901	if (error != 0) {
3902		device_printf(sc_if->msk_if_dev,
3903		    "initialization failed: no memory for Rx buffers\n");
3904		msk_stop(sc_if);
3905		return;
3906	}
3907
3908	/* Configure interrupt handling. */
3909	if (sc_if->msk_port == MSK_PORT_A) {
3910		sc->msk_intrmask |= Y2_IS_PORT_A;
3911		sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3912	} else {
3913		sc->msk_intrmask |= Y2_IS_PORT_B;
3914		sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3915	}
3916	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3917	CSR_READ_4(sc, B0_HWE_IMSK);
3918	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3919	CSR_READ_4(sc, B0_IMSK);
3920
3921	sc_if->msk_flags &= ~MSK_FLAG_LINK;
3922	mii_mediachg(mii);
3923
3924	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3925	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3926
3927	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3928}
3929
3930static void
3931msk_set_rambuffer(struct msk_if_softc *sc_if)
3932{
3933	struct msk_softc *sc;
3934	int ltpp, utpp;
3935
3936	sc = sc_if->msk_softc;
3937	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3938		return;
3939
3940	/* Setup Rx Queue. */
3941	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3942	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3943	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3944	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3945	    sc->msk_rxqend[sc_if->msk_port] / 8);
3946	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3947	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3948	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3949	    sc->msk_rxqstart[sc_if->msk_port] / 8);
3950
3951	utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3952	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3953	ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3954	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3955	if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3956		ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3957	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3958	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3959	/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3960
3961	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3962	CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3963
3964	/* Setup Tx Queue. */
3965	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3966	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3967	    sc->msk_txqstart[sc_if->msk_port] / 8);
3968	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3969	    sc->msk_txqend[sc_if->msk_port] / 8);
3970	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3971	    sc->msk_txqstart[sc_if->msk_port] / 8);
3972	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3973	    sc->msk_txqstart[sc_if->msk_port] / 8);
3974	/* Enable Store & Forward for Tx side. */
3975	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3976	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3977	CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3978}
3979
3980static void
3981msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3982    uint32_t count)
3983{
3984
3985	/* Reset the prefetch unit. */
3986	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3987	    PREF_UNIT_RST_SET);
3988	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3989	    PREF_UNIT_RST_CLR);
3990	/* Set LE base address. */
3991	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3992	    MSK_ADDR_LO(addr));
3993	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3994	    MSK_ADDR_HI(addr));
3995	/* Set the list last index. */
3996	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3997	    count);
3998	/* Turn on prefetch unit. */
3999	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4000	    PREF_UNIT_OP_ON);
4001	/* Dummy read to ensure write. */
4002	CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
4003}
4004
4005static void
4006msk_stop(struct msk_if_softc *sc_if)
4007{
4008	struct msk_softc *sc;
4009	struct msk_txdesc *txd;
4010	struct msk_rxdesc *rxd;
4011	struct msk_rxdesc *jrxd;
4012	struct ifnet *ifp;
4013	uint32_t val;
4014	int i;
4015
4016	MSK_IF_LOCK_ASSERT(sc_if);
4017	sc = sc_if->msk_softc;
4018	ifp = sc_if->msk_ifp;
4019
4020	callout_stop(&sc_if->msk_tick_ch);
4021	sc_if->msk_watchdog_timer = 0;
4022
4023	/* Disable interrupts. */
4024	if (sc_if->msk_port == MSK_PORT_A) {
4025		sc->msk_intrmask &= ~Y2_IS_PORT_A;
4026		sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
4027	} else {
4028		sc->msk_intrmask &= ~Y2_IS_PORT_B;
4029		sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
4030	}
4031	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4032	CSR_READ_4(sc, B0_HWE_IMSK);
4033	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4034	CSR_READ_4(sc, B0_IMSK);
4035
4036	/* Disable Tx/Rx MAC. */
4037	val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4038	val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
4039	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
4040	/* Read again to ensure writing. */
4041	GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4042	/* Update stats and clear counters. */
4043	msk_stats_update(sc_if);
4044
4045	/* Stop Tx BMU. */
4046	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
4047	val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4048	for (i = 0; i < MSK_TIMEOUT; i++) {
4049		if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
4050			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4051			    BMU_STOP);
4052			val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4053		} else
4054			break;
4055		DELAY(1);
4056	}
4057	if (i == MSK_TIMEOUT)
4058		device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
4059	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
4060	    RB_RST_SET | RB_DIS_OP_MD);
4061
4062	/* Disable all GMAC interrupt. */
4063	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
4064	/* Disable PHY interrupt. */
4065	msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
4066
4067	/* Disable the RAM Interface Arbiter. */
4068	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
4069
4070	/* Reset the PCI FIFO of the async Tx queue */
4071	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4072	    BMU_RST_SET | BMU_FIFO_RST);
4073
4074	/* Reset the Tx prefetch units. */
4075	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
4076	    PREF_UNIT_RST_SET);
4077
4078	/* Reset the RAM Buffer async Tx queue. */
4079	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
4080
4081	/* Reset Tx MAC FIFO. */
4082	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
4083	/* Set Pause Off. */
4084	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
4085
4086	/*
4087	 * The Rx Stop command will not work for Yukon-2 if the BMU does not
4088	 * reach the end of packet and since we can't make sure that we have
4089	 * incoming data, we must reset the BMU while it is not during a DMA
4090	 * transfer. Since it is possible that the Rx path is still active,
4091	 * the Rx RAM buffer will be stopped first, so any possible incoming
4092	 * data will not trigger a DMA. After the RAM buffer is stopped, the
4093	 * BMU is polled until any DMA in progress is ended and only then it
4094	 * will be reset.
4095	 */
4096
4097	/* Disable the RAM Buffer receive queue. */
4098	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4099	for (i = 0; i < MSK_TIMEOUT; i++) {
4100		if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4101		    CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4102			break;
4103		DELAY(1);
4104	}
4105	if (i == MSK_TIMEOUT)
4106		device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4107	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4108	    BMU_RST_SET | BMU_FIFO_RST);
4109	/* Reset the Rx prefetch unit. */
4110	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4111	    PREF_UNIT_RST_SET);
4112	/* Reset the RAM Buffer receive queue. */
4113	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4114	/* Reset Rx MAC FIFO. */
4115	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4116
4117	/* Free Rx and Tx mbufs still in the queues. */
4118	for (i = 0; i < MSK_RX_RING_CNT; i++) {
4119		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4120		if (rxd->rx_m != NULL) {
4121			bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4122			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4123			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4124			    rxd->rx_dmamap);
4125			m_freem(rxd->rx_m);
4126			rxd->rx_m = NULL;
4127		}
4128	}
4129	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4130		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4131		if (jrxd->rx_m != NULL) {
4132			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4133			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4134			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4135			    jrxd->rx_dmamap);
4136			m_freem(jrxd->rx_m);
4137			jrxd->rx_m = NULL;
4138		}
4139	}
4140	for (i = 0; i < MSK_TX_RING_CNT; i++) {
4141		txd = &sc_if->msk_cdata.msk_txdesc[i];
4142		if (txd->tx_m != NULL) {
4143			bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4144			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4145			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4146			    txd->tx_dmamap);
4147			m_freem(txd->tx_m);
4148			txd->tx_m = NULL;
4149		}
4150	}
4151
4152	/*
4153	 * Mark the interface down.
4154	 */
4155	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4156	sc_if->msk_flags &= ~MSK_FLAG_LINK;
4157}
4158
4159/*
4160 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
4161 * counter clears high 16 bits of the counter such that accessing
4162 * lower 16 bits should be the last operation.
4163 */
4164#define	MSK_READ_MIB32(x, y)					\
4165	(((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) +	\
4166	(uint32_t)GMAC_READ_2(sc, x, y)
4167#define	MSK_READ_MIB64(x, y)					\
4168	(((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) +	\
4169	(uint64_t)MSK_READ_MIB32(x, y)
4170
4171static void
4172msk_stats_clear(struct msk_if_softc *sc_if)
4173{
4174	struct msk_softc *sc;
4175	uint32_t reg;
4176	uint16_t gmac;
4177	int i;
4178
4179	MSK_IF_LOCK_ASSERT(sc_if);
4180
4181	sc = sc_if->msk_softc;
4182	/* Set MIB Clear Counter Mode. */
4183	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4184	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4185	/* Read all MIB Counters with Clear Mode set. */
4186	for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i++)
4187		reg = MSK_READ_MIB32(sc_if->msk_port, i);
4188	/* Clear MIB Clear Counter Mode. */
4189	gmac &= ~GM_PAR_MIB_CLR;
4190	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4191}
4192
4193static void
4194msk_stats_update(struct msk_if_softc *sc_if)
4195{
4196	struct msk_softc *sc;
4197	struct ifnet *ifp;
4198	struct msk_hw_stats *stats;
4199	uint16_t gmac;
4200	uint32_t reg;
4201
4202	MSK_IF_LOCK_ASSERT(sc_if);
4203
4204	ifp = sc_if->msk_ifp;
4205	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4206		return;
4207	sc = sc_if->msk_softc;
4208	stats = &sc_if->msk_stats;
4209	/* Set MIB Clear Counter Mode. */
4210	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4211	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4212
4213	/* Rx stats. */
4214	stats->rx_ucast_frames +=
4215	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4216	stats->rx_bcast_frames +=
4217	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4218	stats->rx_pause_frames +=
4219	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4220	stats->rx_mcast_frames +=
4221	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4222	stats->rx_crc_errs +=
4223	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4224	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4225	stats->rx_good_octets +=
4226	    MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4227	stats->rx_bad_octets +=
4228	    MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4229	stats->rx_runts +=
4230	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4231	stats->rx_runt_errs +=
4232	    MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4233	stats->rx_pkts_64 +=
4234	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4235	stats->rx_pkts_65_127 +=
4236	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4237	stats->rx_pkts_128_255 +=
4238	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4239	stats->rx_pkts_256_511 +=
4240	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4241	stats->rx_pkts_512_1023 +=
4242	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4243	stats->rx_pkts_1024_1518 +=
4244	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4245	stats->rx_pkts_1519_max +=
4246	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4247	stats->rx_pkts_too_long +=
4248	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4249	stats->rx_pkts_jabbers +=
4250	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4251	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4252	stats->rx_fifo_oflows +=
4253	    MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4254	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4255
4256	/* Tx stats. */
4257	stats->tx_ucast_frames +=
4258	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4259	stats->tx_bcast_frames +=
4260	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4261	stats->tx_pause_frames +=
4262	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4263	stats->tx_mcast_frames +=
4264	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4265	stats->tx_octets +=
4266	    MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4267	stats->tx_pkts_64 +=
4268	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4269	stats->tx_pkts_65_127 +=
4270	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4271	stats->tx_pkts_128_255 +=
4272	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4273	stats->tx_pkts_256_511 +=
4274	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4275	stats->tx_pkts_512_1023 +=
4276	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4277	stats->tx_pkts_1024_1518 +=
4278	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4279	stats->tx_pkts_1519_max +=
4280	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4281	reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4282	stats->tx_colls +=
4283	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4284	stats->tx_late_colls +=
4285	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4286	stats->tx_excess_colls +=
4287	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4288	stats->tx_multi_colls +=
4289	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4290	stats->tx_single_colls +=
4291	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4292	stats->tx_underflows +=
4293	    MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4294	/* Clear MIB Clear Counter Mode. */
4295	gmac &= ~GM_PAR_MIB_CLR;
4296	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4297}
4298
4299static int
4300msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4301{
4302	struct msk_softc *sc;
4303	struct msk_if_softc *sc_if;
4304	uint32_t result, *stat;
4305	int off;
4306
4307	sc_if = (struct msk_if_softc *)arg1;
4308	sc = sc_if->msk_softc;
4309	off = arg2;
4310	stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4311
4312	MSK_IF_LOCK(sc_if);
4313	result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4314	result += *stat;
4315	MSK_IF_UNLOCK(sc_if);
4316
4317	return (sysctl_handle_int(oidp, &result, 0, req));
4318}
4319
4320static int
4321msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4322{
4323	struct msk_softc *sc;
4324	struct msk_if_softc *sc_if;
4325	uint64_t result, *stat;
4326	int off;
4327
4328	sc_if = (struct msk_if_softc *)arg1;
4329	sc = sc_if->msk_softc;
4330	off = arg2;
4331	stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4332
4333	MSK_IF_LOCK(sc_if);
4334	result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4335	result += *stat;
4336	MSK_IF_UNLOCK(sc_if);
4337
4338	return (sysctl_handle_quad(oidp, &result, 0, req));
4339}
4340
4341#undef MSK_READ_MIB32
4342#undef MSK_READ_MIB64
4343
4344#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) 				\
4345	SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, 	\
4346	    sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32,	\
4347	    "IU", d)
4348#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) 				\
4349	SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, 	\
4350	    sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64,	\
4351	    "Q", d)
4352
4353static void
4354msk_sysctl_node(struct msk_if_softc *sc_if)
4355{
4356	struct sysctl_ctx_list *ctx;
4357	struct sysctl_oid_list *child, *schild;
4358	struct sysctl_oid *tree;
4359
4360	ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4361	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4362
4363	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4364	    NULL, "MSK Statistics");
4365	schild = child = SYSCTL_CHILDREN(tree);
4366	tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4367	    NULL, "MSK RX Statistics");
4368	child = SYSCTL_CHILDREN(tree);
4369	MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4370	    child, rx_ucast_frames, "Good unicast frames");
4371	MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4372	    child, rx_bcast_frames, "Good broadcast frames");
4373	MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4374	    child, rx_pause_frames, "Pause frames");
4375	MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4376	    child, rx_mcast_frames, "Multicast frames");
4377	MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4378	    child, rx_crc_errs, "CRC errors");
4379	MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4380	    child, rx_good_octets, "Good octets");
4381	MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4382	    child, rx_bad_octets, "Bad octets");
4383	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4384	    child, rx_pkts_64, "64 bytes frames");
4385	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4386	    child, rx_pkts_65_127, "65 to 127 bytes frames");
4387	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4388	    child, rx_pkts_128_255, "128 to 255 bytes frames");
4389	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4390	    child, rx_pkts_256_511, "256 to 511 bytes frames");
4391	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4392	    child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4393	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4394	    child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4395	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4396	    child, rx_pkts_1519_max, "1519 to max frames");
4397	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4398	    child, rx_pkts_too_long, "frames too long");
4399	MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4400	    child, rx_pkts_jabbers, "Jabber errors");
4401	MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4402	    child, rx_fifo_oflows, "FIFO overflows");
4403
4404	tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4405	    NULL, "MSK TX Statistics");
4406	child = SYSCTL_CHILDREN(tree);
4407	MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4408	    child, tx_ucast_frames, "Unicast frames");
4409	MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4410	    child, tx_bcast_frames, "Broadcast frames");
4411	MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4412	    child, tx_pause_frames, "Pause frames");
4413	MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4414	    child, tx_mcast_frames, "Multicast frames");
4415	MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4416	    child, tx_octets, "Octets");
4417	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4418	    child, tx_pkts_64, "64 bytes frames");
4419	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4420	    child, tx_pkts_65_127, "65 to 127 bytes frames");
4421	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4422	    child, tx_pkts_128_255, "128 to 255 bytes frames");
4423	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4424	    child, tx_pkts_256_511, "256 to 511 bytes frames");
4425	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4426	    child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4427	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4428	    child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4429	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4430	    child, tx_pkts_1519_max, "1519 to max frames");
4431	MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4432	    child, tx_colls, "Collisions");
4433	MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4434	    child, tx_late_colls, "Late collisions");
4435	MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4436	    child, tx_excess_colls, "Excessive collisions");
4437	MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4438	    child, tx_multi_colls, "Multiple collisions");
4439	MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4440	    child, tx_single_colls, "Single collisions");
4441	MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4442	    child, tx_underflows, "FIFO underflows");
4443}
4444
4445#undef MSK_SYSCTL_STAT32
4446#undef MSK_SYSCTL_STAT64
4447
4448static int
4449sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4450{
4451	int error, value;
4452
4453	if (!arg1)
4454		return (EINVAL);
4455	value = *(int *)arg1;
4456	error = sysctl_handle_int(oidp, &value, 0, req);
4457	if (error || !req->newptr)
4458		return (error);
4459	if (value < low || value > high)
4460		return (EINVAL);
4461	*(int *)arg1 = value;
4462
4463	return (0);
4464}
4465
4466static int
4467sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4468{
4469
4470	return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4471	    MSK_PROC_MAX));
4472}
4473