if_msk.c revision 312362
1/******************************************************************************
2 *
3 * Name   : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date   : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 *	LICENSE:
14 *	Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 *	The computer program files contained in this folder ("Files")
17 *	are provided to you under the BSD-type license terms provided
18 *	below, and any use of such Files and any derivative works
19 *	thereof created by you shall be governed by the following terms
20 *	and conditions:
21 *
22 *	- Redistributions of source code must retain the above copyright
23 *	  notice, this list of conditions and the following disclaimer.
24 *	- Redistributions in binary form must reproduce the above
25 *	  copyright notice, this list of conditions and the following
26 *	  disclaimer in the documentation and/or other materials provided
27 *	  with the distribution.
28 *	- Neither the name of Marvell nor the names of its contributors
29 *	  may be used to endorse or promote products derived from this
30 *	  software without specific prior written permission.
31 *
32 *	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 *	"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 *	LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 *	FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 *	COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 *	INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 *	BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
39 *	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 *	HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 *	STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 *	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 *	OF THE POSSIBILITY OF SUCH DAMAGE.
44 *	/LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 *    notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 *    notice, this list of conditions and the following disclaimer in the
59 *    documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 *    must display the following acknowledgement:
62 *	This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 *    may be used to endorse or promote products derived from this software
65 *    without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
102__FBSDID("$FreeBSD: stable/10/sys/dev/msk/if_msk.c 312362 2017-01-18 02:16:17Z yongari $");
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116
117#include <net/bpf.h>
118#include <net/ethernet.h>
119#include <net/if.h>
120#include <net/if_arp.h>
121#include <net/if_dl.h>
122#include <net/if_media.h>
123#include <net/if_types.h>
124#include <net/if_vlan_var.h>
125
126#include <netinet/in.h>
127#include <netinet/in_systm.h>
128#include <netinet/ip.h>
129#include <netinet/tcp.h>
130#include <netinet/udp.h>
131
132#include <machine/bus.h>
133#include <machine/in_cksum.h>
134#include <machine/resource.h>
135#include <sys/rman.h>
136
137#include <dev/mii/mii.h>
138#include <dev/mii/miivar.h>
139
140#include <dev/pci/pcireg.h>
141#include <dev/pci/pcivar.h>
142
143#include <dev/msk/if_mskreg.h>
144
145MODULE_DEPEND(msk, pci, 1, 1, 1);
146MODULE_DEPEND(msk, ether, 1, 1, 1);
147MODULE_DEPEND(msk, miibus, 1, 1, 1);
148
149/* "device miibus" required.  See GENERIC if you get errors here. */
150#include "miibus_if.h"
151
152/* Tunables. */
153static int msi_disable = 0;
154TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
155static int legacy_intr = 0;
156TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
157static int jumbo_disable = 0;
158TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
159
160#define MSK_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
161
162/*
163 * Devices supported by this driver.
164 */
165static const struct msk_product {
166	uint16_t	msk_vendorid;
167	uint16_t	msk_deviceid;
168	const char	*msk_name;
169} msk_products[] = {
170	{ VENDORID_SK, DEVICEID_SK_YUKON2,
171	    "SK-9Sxx Gigabit Ethernet" },
172	{ VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
173	    "SK-9Exx Gigabit Ethernet"},
174	{ VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
175	    "Marvell Yukon 88E8021CU Gigabit Ethernet" },
176	{ VENDORID_MARVELL, DEVICEID_MRVL_8021X,
177	    "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
178	{ VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
179	    "Marvell Yukon 88E8022CU Gigabit Ethernet" },
180	{ VENDORID_MARVELL, DEVICEID_MRVL_8022X,
181	    "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
182	{ VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
183	    "Marvell Yukon 88E8061CU Gigabit Ethernet" },
184	{ VENDORID_MARVELL, DEVICEID_MRVL_8061X,
185	    "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
186	{ VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
187	    "Marvell Yukon 88E8062CU Gigabit Ethernet" },
188	{ VENDORID_MARVELL, DEVICEID_MRVL_8062X,
189	    "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
190	{ VENDORID_MARVELL, DEVICEID_MRVL_8035,
191	    "Marvell Yukon 88E8035 Fast Ethernet" },
192	{ VENDORID_MARVELL, DEVICEID_MRVL_8036,
193	    "Marvell Yukon 88E8036 Fast Ethernet" },
194	{ VENDORID_MARVELL, DEVICEID_MRVL_8038,
195	    "Marvell Yukon 88E8038 Fast Ethernet" },
196	{ VENDORID_MARVELL, DEVICEID_MRVL_8039,
197	    "Marvell Yukon 88E8039 Fast Ethernet" },
198	{ VENDORID_MARVELL, DEVICEID_MRVL_8040,
199	    "Marvell Yukon 88E8040 Fast Ethernet" },
200	{ VENDORID_MARVELL, DEVICEID_MRVL_8040T,
201	    "Marvell Yukon 88E8040T Fast Ethernet" },
202	{ VENDORID_MARVELL, DEVICEID_MRVL_8042,
203	    "Marvell Yukon 88E8042 Fast Ethernet" },
204	{ VENDORID_MARVELL, DEVICEID_MRVL_8048,
205	    "Marvell Yukon 88E8048 Fast Ethernet" },
206	{ VENDORID_MARVELL, DEVICEID_MRVL_4361,
207	    "Marvell Yukon 88E8050 Gigabit Ethernet" },
208	{ VENDORID_MARVELL, DEVICEID_MRVL_4360,
209	    "Marvell Yukon 88E8052 Gigabit Ethernet" },
210	{ VENDORID_MARVELL, DEVICEID_MRVL_4362,
211	    "Marvell Yukon 88E8053 Gigabit Ethernet" },
212	{ VENDORID_MARVELL, DEVICEID_MRVL_4363,
213	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
214	{ VENDORID_MARVELL, DEVICEID_MRVL_4364,
215	    "Marvell Yukon 88E8056 Gigabit Ethernet" },
216	{ VENDORID_MARVELL, DEVICEID_MRVL_4365,
217	    "Marvell Yukon 88E8070 Gigabit Ethernet" },
218	{ VENDORID_MARVELL, DEVICEID_MRVL_436A,
219	    "Marvell Yukon 88E8058 Gigabit Ethernet" },
220	{ VENDORID_MARVELL, DEVICEID_MRVL_436B,
221	    "Marvell Yukon 88E8071 Gigabit Ethernet" },
222	{ VENDORID_MARVELL, DEVICEID_MRVL_436C,
223	    "Marvell Yukon 88E8072 Gigabit Ethernet" },
224	{ VENDORID_MARVELL, DEVICEID_MRVL_436D,
225	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
226	{ VENDORID_MARVELL, DEVICEID_MRVL_4370,
227	    "Marvell Yukon 88E8075 Gigabit Ethernet" },
228	{ VENDORID_MARVELL, DEVICEID_MRVL_4380,
229	    "Marvell Yukon 88E8057 Gigabit Ethernet" },
230	{ VENDORID_MARVELL, DEVICEID_MRVL_4381,
231	    "Marvell Yukon 88E8059 Gigabit Ethernet" },
232	{ VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
233	    "D-Link 550SX Gigabit Ethernet" },
234	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
235	    "D-Link 560SX Gigabit Ethernet" },
236	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
237	    "D-Link 560T Gigabit Ethernet" }
238};
239
240static const char *model_name[] = {
241	"Yukon XL",
242        "Yukon EC Ultra",
243        "Yukon EX",
244        "Yukon EC",
245        "Yukon FE",
246        "Yukon FE+",
247        "Yukon Supreme",
248        "Yukon Ultra 2",
249        "Yukon Unknown",
250        "Yukon Optima",
251};
252
253static int mskc_probe(device_t);
254static int mskc_attach(device_t);
255static int mskc_detach(device_t);
256static int mskc_shutdown(device_t);
257static int mskc_setup_rambuffer(struct msk_softc *);
258static int mskc_suspend(device_t);
259static int mskc_resume(device_t);
260static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t);
261static void mskc_reset(struct msk_softc *);
262
263static int msk_probe(device_t);
264static int msk_attach(device_t);
265static int msk_detach(device_t);
266
267static void msk_tick(void *);
268static void msk_intr(void *);
269static void msk_intr_phy(struct msk_if_softc *);
270static void msk_intr_gmac(struct msk_if_softc *);
271static __inline void msk_rxput(struct msk_if_softc *);
272static int msk_handle_events(struct msk_softc *);
273static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
274static void msk_intr_hwerr(struct msk_softc *);
275#ifndef __NO_STRICT_ALIGNMENT
276static __inline void msk_fixup_rx(struct mbuf *);
277#endif
278static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
279static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
280static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
281static void msk_txeof(struct msk_if_softc *, int);
282static int msk_encap(struct msk_if_softc *, struct mbuf **);
283static void msk_start(struct ifnet *);
284static void msk_start_locked(struct ifnet *);
285static int msk_ioctl(struct ifnet *, u_long, caddr_t);
286static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
287static void msk_set_rambuffer(struct msk_if_softc *);
288static void msk_set_tx_stfwd(struct msk_if_softc *);
289static void msk_init(void *);
290static void msk_init_locked(struct msk_if_softc *);
291static void msk_stop(struct msk_if_softc *);
292static void msk_watchdog(struct msk_if_softc *);
293static int msk_mediachange(struct ifnet *);
294static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
295static void msk_phy_power(struct msk_softc *, int);
296static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
297static int msk_status_dma_alloc(struct msk_softc *);
298static void msk_status_dma_free(struct msk_softc *);
299static int msk_txrx_dma_alloc(struct msk_if_softc *);
300static int msk_rx_dma_jalloc(struct msk_if_softc *);
301static void msk_txrx_dma_free(struct msk_if_softc *);
302static void msk_rx_dma_jfree(struct msk_if_softc *);
303static int msk_rx_fill(struct msk_if_softc *, int);
304static int msk_init_rx_ring(struct msk_if_softc *);
305static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
306static void msk_init_tx_ring(struct msk_if_softc *);
307static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
308static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
309static int msk_newbuf(struct msk_if_softc *, int);
310static int msk_jumbo_newbuf(struct msk_if_softc *, int);
311
312static int msk_phy_readreg(struct msk_if_softc *, int, int);
313static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
314static int msk_miibus_readreg(device_t, int, int);
315static int msk_miibus_writereg(device_t, int, int, int);
316static void msk_miibus_statchg(device_t);
317
318static void msk_rxfilter(struct msk_if_softc *);
319static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
320
321static void msk_stats_clear(struct msk_if_softc *);
322static void msk_stats_update(struct msk_if_softc *);
323static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
324static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
325static void msk_sysctl_node(struct msk_if_softc *);
326static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
327static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
328
329static device_method_t mskc_methods[] = {
330	/* Device interface */
331	DEVMETHOD(device_probe,		mskc_probe),
332	DEVMETHOD(device_attach,	mskc_attach),
333	DEVMETHOD(device_detach,	mskc_detach),
334	DEVMETHOD(device_suspend,	mskc_suspend),
335	DEVMETHOD(device_resume,	mskc_resume),
336	DEVMETHOD(device_shutdown,	mskc_shutdown),
337
338	DEVMETHOD(bus_get_dma_tag,	mskc_get_dma_tag),
339
340	DEVMETHOD_END
341};
342
343static driver_t mskc_driver = {
344	"mskc",
345	mskc_methods,
346	sizeof(struct msk_softc)
347};
348
349static devclass_t mskc_devclass;
350
351static device_method_t msk_methods[] = {
352	/* Device interface */
353	DEVMETHOD(device_probe,		msk_probe),
354	DEVMETHOD(device_attach,	msk_attach),
355	DEVMETHOD(device_detach,	msk_detach),
356	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
357
358	/* MII interface */
359	DEVMETHOD(miibus_readreg,	msk_miibus_readreg),
360	DEVMETHOD(miibus_writereg,	msk_miibus_writereg),
361	DEVMETHOD(miibus_statchg,	msk_miibus_statchg),
362
363	DEVMETHOD_END
364};
365
366static driver_t msk_driver = {
367	"msk",
368	msk_methods,
369	sizeof(struct msk_if_softc)
370};
371
372static devclass_t msk_devclass;
373
374DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL);
375DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL);
376DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
377
378static struct resource_spec msk_res_spec_io[] = {
379	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
380	{ -1,			0,		0 }
381};
382
383static struct resource_spec msk_res_spec_mem[] = {
384	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
385	{ -1,			0,		0 }
386};
387
388static struct resource_spec msk_irq_spec_legacy[] = {
389	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
390	{ -1,			0,		0 }
391};
392
393static struct resource_spec msk_irq_spec_msi[] = {
394	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
395	{ -1,			0,		0 }
396};
397
398static int
399msk_miibus_readreg(device_t dev, int phy, int reg)
400{
401	struct msk_if_softc *sc_if;
402
403	sc_if = device_get_softc(dev);
404
405	return (msk_phy_readreg(sc_if, phy, reg));
406}
407
408static int
409msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
410{
411	struct msk_softc *sc;
412	int i, val;
413
414	sc = sc_if->msk_softc;
415
416        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
417	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
418
419	for (i = 0; i < MSK_TIMEOUT; i++) {
420		DELAY(1);
421		val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
422		if ((val & GM_SMI_CT_RD_VAL) != 0) {
423			val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
424			break;
425		}
426	}
427
428	if (i == MSK_TIMEOUT) {
429		if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
430		val = 0;
431	}
432
433	return (val);
434}
435
436static int
437msk_miibus_writereg(device_t dev, int phy, int reg, int val)
438{
439	struct msk_if_softc *sc_if;
440
441	sc_if = device_get_softc(dev);
442
443	return (msk_phy_writereg(sc_if, phy, reg, val));
444}
445
446static int
447msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
448{
449	struct msk_softc *sc;
450	int i;
451
452	sc = sc_if->msk_softc;
453
454	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
455        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
456	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
457	for (i = 0; i < MSK_TIMEOUT; i++) {
458		DELAY(1);
459		if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
460		    GM_SMI_CT_BUSY) == 0)
461			break;
462	}
463	if (i == MSK_TIMEOUT)
464		if_printf(sc_if->msk_ifp, "phy write timeout\n");
465
466	return (0);
467}
468
469static void
470msk_miibus_statchg(device_t dev)
471{
472	struct msk_softc *sc;
473	struct msk_if_softc *sc_if;
474	struct mii_data *mii;
475	struct ifnet *ifp;
476	uint32_t gmac;
477
478	sc_if = device_get_softc(dev);
479	sc = sc_if->msk_softc;
480
481	MSK_IF_LOCK_ASSERT(sc_if);
482
483	mii = device_get_softc(sc_if->msk_miibus);
484	ifp = sc_if->msk_ifp;
485	if (mii == NULL || ifp == NULL ||
486	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
487		return;
488
489	sc_if->msk_flags &= ~MSK_FLAG_LINK;
490	if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
491	    (IFM_AVALID | IFM_ACTIVE)) {
492		switch (IFM_SUBTYPE(mii->mii_media_active)) {
493		case IFM_10_T:
494		case IFM_100_TX:
495			sc_if->msk_flags |= MSK_FLAG_LINK;
496			break;
497		case IFM_1000_T:
498		case IFM_1000_SX:
499		case IFM_1000_LX:
500		case IFM_1000_CX:
501			if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
502				sc_if->msk_flags |= MSK_FLAG_LINK;
503			break;
504		default:
505			break;
506		}
507	}
508
509	if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
510		/* Enable Tx FIFO Underrun. */
511		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
512		    GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
513		/*
514		 * Because mii(4) notify msk(4) that it detected link status
515		 * change, there is no need to enable automatic
516		 * speed/flow-control/duplex updates.
517		 */
518		gmac = GM_GPCR_AU_ALL_DIS;
519		switch (IFM_SUBTYPE(mii->mii_media_active)) {
520		case IFM_1000_SX:
521		case IFM_1000_T:
522			gmac |= GM_GPCR_SPEED_1000;
523			break;
524		case IFM_100_TX:
525			gmac |= GM_GPCR_SPEED_100;
526			break;
527		case IFM_10_T:
528			break;
529		}
530
531		if ((IFM_OPTIONS(mii->mii_media_active) &
532		    IFM_ETH_RXPAUSE) == 0)
533			gmac |= GM_GPCR_FC_RX_DIS;
534		if ((IFM_OPTIONS(mii->mii_media_active) &
535		     IFM_ETH_TXPAUSE) == 0)
536			gmac |= GM_GPCR_FC_TX_DIS;
537		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
538			gmac |= GM_GPCR_DUP_FULL;
539		else
540			gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
541		gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
542		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
543		/* Read again to ensure writing. */
544		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
545		gmac = GMC_PAUSE_OFF;
546		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
547			if ((IFM_OPTIONS(mii->mii_media_active) &
548			    IFM_ETH_RXPAUSE) != 0)
549				gmac = GMC_PAUSE_ON;
550		}
551		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
552
553		/* Enable PHY interrupt for FIFO underrun/overflow. */
554		msk_phy_writereg(sc_if, PHY_ADDR_MARV,
555		    PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
556	} else {
557		/*
558		 * Link state changed to down.
559		 * Disable PHY interrupts.
560		 */
561		msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
562		/* Disable Rx/Tx MAC. */
563		gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
564		if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
565			gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
566			GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
567			/* Read again to ensure writing. */
568			GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
569		}
570	}
571}
572
573static void
574msk_rxfilter(struct msk_if_softc *sc_if)
575{
576	struct msk_softc *sc;
577	struct ifnet *ifp;
578	struct ifmultiaddr *ifma;
579	uint32_t mchash[2];
580	uint32_t crc;
581	uint16_t mode;
582
583	sc = sc_if->msk_softc;
584
585	MSK_IF_LOCK_ASSERT(sc_if);
586
587	ifp = sc_if->msk_ifp;
588
589	bzero(mchash, sizeof(mchash));
590	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
591	if ((ifp->if_flags & IFF_PROMISC) != 0)
592		mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
593	else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
594		mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
595		mchash[0] = 0xffff;
596		mchash[1] = 0xffff;
597	} else {
598		mode |= GM_RXCR_UCF_ENA;
599		if_maddr_rlock(ifp);
600		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
601			if (ifma->ifma_addr->sa_family != AF_LINK)
602				continue;
603			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
604			    ifma->ifma_addr), ETHER_ADDR_LEN);
605			/* Just want the 6 least significant bits. */
606			crc &= 0x3f;
607			/* Set the corresponding bit in the hash table. */
608			mchash[crc >> 5] |= 1 << (crc & 0x1f);
609		}
610		if_maddr_runlock(ifp);
611		if (mchash[0] != 0 || mchash[1] != 0)
612			mode |= GM_RXCR_MCF_ENA;
613	}
614
615	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
616	    mchash[0] & 0xffff);
617	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
618	    (mchash[0] >> 16) & 0xffff);
619	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
620	    mchash[1] & 0xffff);
621	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
622	    (mchash[1] >> 16) & 0xffff);
623	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
624}
625
626static void
627msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
628{
629	struct msk_softc *sc;
630
631	sc = sc_if->msk_softc;
632	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
633		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
634		    RX_VLAN_STRIP_ON);
635		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
636		    TX_VLAN_TAG_ON);
637	} else {
638		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
639		    RX_VLAN_STRIP_OFF);
640		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
641		    TX_VLAN_TAG_OFF);
642	}
643}
644
645static int
646msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
647{
648	uint16_t idx;
649	int i;
650
651	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
652	    (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
653		/* Wait until controller executes OP_TCPSTART command. */
654		for (i = 100; i > 0; i--) {
655			DELAY(100);
656			idx = CSR_READ_2(sc_if->msk_softc,
657			    Y2_PREF_Q_ADDR(sc_if->msk_rxq,
658			    PREF_UNIT_GET_IDX_REG));
659			if (idx != 0)
660				break;
661		}
662		if (i == 0) {
663			device_printf(sc_if->msk_if_dev,
664			    "prefetch unit stuck?\n");
665			return (ETIMEDOUT);
666		}
667		/*
668		 * Fill consumed LE with free buffer. This can be done
669		 * in Rx handler but we don't want to add special code
670		 * in fast handler.
671		 */
672		if (jumbo > 0) {
673			if (msk_jumbo_newbuf(sc_if, 0) != 0)
674				return (ENOBUFS);
675			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
676			    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
677			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
678		} else {
679			if (msk_newbuf(sc_if, 0) != 0)
680				return (ENOBUFS);
681			bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
682			    sc_if->msk_cdata.msk_rx_ring_map,
683			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
684		}
685		sc_if->msk_cdata.msk_rx_prod = 0;
686		CSR_WRITE_2(sc_if->msk_softc,
687		    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
688		    sc_if->msk_cdata.msk_rx_prod);
689	}
690	return (0);
691}
692
693static int
694msk_init_rx_ring(struct msk_if_softc *sc_if)
695{
696	struct msk_ring_data *rd;
697	struct msk_rxdesc *rxd;
698	int i, nbuf, prod;
699
700	MSK_IF_LOCK_ASSERT(sc_if);
701
702	sc_if->msk_cdata.msk_rx_cons = 0;
703	sc_if->msk_cdata.msk_rx_prod = 0;
704	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
705
706	rd = &sc_if->msk_rdata;
707	bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
708	for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
709		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
710		rxd->rx_m = NULL;
711		rxd->rx_le = &rd->msk_rx_ring[prod];
712		MSK_INC(prod, MSK_RX_RING_CNT);
713	}
714	nbuf = MSK_RX_BUF_CNT;
715	prod = 0;
716	/* Have controller know how to compute Rx checksum. */
717	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
718	    (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
719#ifdef MSK_64BIT_DMA
720		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
721		rxd->rx_m = NULL;
722		rxd->rx_le = &rd->msk_rx_ring[prod];
723		rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
724		    ETHER_HDR_LEN);
725		rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
726		MSK_INC(prod, MSK_RX_RING_CNT);
727		MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
728#endif
729		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
730		rxd->rx_m = NULL;
731		rxd->rx_le = &rd->msk_rx_ring[prod];
732		rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
733		    ETHER_HDR_LEN);
734		rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
735		MSK_INC(prod, MSK_RX_RING_CNT);
736		MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
737		nbuf--;
738	}
739	for (i = 0; i < nbuf; i++) {
740		if (msk_newbuf(sc_if, prod) != 0)
741			return (ENOBUFS);
742		MSK_RX_INC(prod, MSK_RX_RING_CNT);
743	}
744
745	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
746	    sc_if->msk_cdata.msk_rx_ring_map,
747	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
748
749	/* Update prefetch unit. */
750	sc_if->msk_cdata.msk_rx_prod = prod;
751	CSR_WRITE_2(sc_if->msk_softc,
752	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
753	    (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
754	    MSK_RX_RING_CNT);
755	if (msk_rx_fill(sc_if, 0) != 0)
756		return (ENOBUFS);
757	return (0);
758}
759
760static int
761msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
762{
763	struct msk_ring_data *rd;
764	struct msk_rxdesc *rxd;
765	int i, nbuf, prod;
766
767	MSK_IF_LOCK_ASSERT(sc_if);
768
769	sc_if->msk_cdata.msk_rx_cons = 0;
770	sc_if->msk_cdata.msk_rx_prod = 0;
771	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
772
773	rd = &sc_if->msk_rdata;
774	bzero(rd->msk_jumbo_rx_ring,
775	    sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
776	for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
777		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
778		rxd->rx_m = NULL;
779		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
780		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
781	}
782	nbuf = MSK_RX_BUF_CNT;
783	prod = 0;
784	/* Have controller know how to compute Rx checksum. */
785	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
786	    (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
787#ifdef MSK_64BIT_DMA
788		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
789		rxd->rx_m = NULL;
790		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
791		rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
792		    ETHER_HDR_LEN);
793		rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
794		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
795		MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
796#endif
797		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
798		rxd->rx_m = NULL;
799		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
800		rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
801		    ETHER_HDR_LEN);
802		rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
803		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
804		MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
805		nbuf--;
806	}
807	for (i = 0; i < nbuf; i++) {
808		if (msk_jumbo_newbuf(sc_if, prod) != 0)
809			return (ENOBUFS);
810		MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
811	}
812
813	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
814	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
815	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
816
817	/* Update prefetch unit. */
818	sc_if->msk_cdata.msk_rx_prod = prod;
819	CSR_WRITE_2(sc_if->msk_softc,
820	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
821	    (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
822	    MSK_JUMBO_RX_RING_CNT);
823	if (msk_rx_fill(sc_if, 1) != 0)
824		return (ENOBUFS);
825	return (0);
826}
827
828static void
829msk_init_tx_ring(struct msk_if_softc *sc_if)
830{
831	struct msk_ring_data *rd;
832	struct msk_txdesc *txd;
833	int i;
834
835	sc_if->msk_cdata.msk_tso_mtu = 0;
836	sc_if->msk_cdata.msk_last_csum = 0;
837	sc_if->msk_cdata.msk_tx_prod = 0;
838	sc_if->msk_cdata.msk_tx_cons = 0;
839	sc_if->msk_cdata.msk_tx_cnt = 0;
840	sc_if->msk_cdata.msk_tx_high_addr = 0;
841
842	rd = &sc_if->msk_rdata;
843	bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
844	for (i = 0; i < MSK_TX_RING_CNT; i++) {
845		txd = &sc_if->msk_cdata.msk_txdesc[i];
846		txd->tx_m = NULL;
847		txd->tx_le = &rd->msk_tx_ring[i];
848	}
849
850	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
851	    sc_if->msk_cdata.msk_tx_ring_map,
852	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
853}
854
855static __inline void
856msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
857{
858	struct msk_rx_desc *rx_le;
859	struct msk_rxdesc *rxd;
860	struct mbuf *m;
861
862#ifdef MSK_64BIT_DMA
863	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
864	rx_le = rxd->rx_le;
865	rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
866	MSK_INC(idx, MSK_RX_RING_CNT);
867#endif
868	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
869	m = rxd->rx_m;
870	rx_le = rxd->rx_le;
871	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
872}
873
874static __inline void
875msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int	idx)
876{
877	struct msk_rx_desc *rx_le;
878	struct msk_rxdesc *rxd;
879	struct mbuf *m;
880
881#ifdef MSK_64BIT_DMA
882	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
883	rx_le = rxd->rx_le;
884	rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
885	MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
886#endif
887	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
888	m = rxd->rx_m;
889	rx_le = rxd->rx_le;
890	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
891}
892
893static int
894msk_newbuf(struct msk_if_softc *sc_if, int idx)
895{
896	struct msk_rx_desc *rx_le;
897	struct msk_rxdesc *rxd;
898	struct mbuf *m;
899	bus_dma_segment_t segs[1];
900	bus_dmamap_t map;
901	int nsegs;
902
903	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
904	if (m == NULL)
905		return (ENOBUFS);
906
907	m->m_len = m->m_pkthdr.len = MCLBYTES;
908	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
909		m_adj(m, ETHER_ALIGN);
910#ifndef __NO_STRICT_ALIGNMENT
911	else
912		m_adj(m, MSK_RX_BUF_ALIGN);
913#endif
914
915	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
916	    sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
917	    BUS_DMA_NOWAIT) != 0) {
918		m_freem(m);
919		return (ENOBUFS);
920	}
921	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
922
923	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
924#ifdef MSK_64BIT_DMA
925	rx_le = rxd->rx_le;
926	rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
927	rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
928	MSK_INC(idx, MSK_RX_RING_CNT);
929	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
930#endif
931	if (rxd->rx_m != NULL) {
932		bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
933		    BUS_DMASYNC_POSTREAD);
934		bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
935		rxd->rx_m = NULL;
936	}
937	map = rxd->rx_dmamap;
938	rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
939	sc_if->msk_cdata.msk_rx_sparemap = map;
940	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
941	    BUS_DMASYNC_PREREAD);
942	rxd->rx_m = m;
943	rx_le = rxd->rx_le;
944	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
945	rx_le->msk_control =
946	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
947
948	return (0);
949}
950
951static int
952msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
953{
954	struct msk_rx_desc *rx_le;
955	struct msk_rxdesc *rxd;
956	struct mbuf *m;
957	bus_dma_segment_t segs[1];
958	bus_dmamap_t map;
959	int nsegs;
960
961	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
962	if (m == NULL)
963		return (ENOBUFS);
964	if ((m->m_flags & M_EXT) == 0) {
965		m_freem(m);
966		return (ENOBUFS);
967	}
968	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
969	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
970		m_adj(m, ETHER_ALIGN);
971#ifndef __NO_STRICT_ALIGNMENT
972	else
973		m_adj(m, MSK_RX_BUF_ALIGN);
974#endif
975
976	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
977	    sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
978	    BUS_DMA_NOWAIT) != 0) {
979		m_freem(m);
980		return (ENOBUFS);
981	}
982	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
983
984	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
985#ifdef MSK_64BIT_DMA
986	rx_le = rxd->rx_le;
987	rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
988	rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
989	MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
990	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
991#endif
992	if (rxd->rx_m != NULL) {
993		bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
994		    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
995		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
996		    rxd->rx_dmamap);
997		rxd->rx_m = NULL;
998	}
999	map = rxd->rx_dmamap;
1000	rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
1001	sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
1002	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
1003	    BUS_DMASYNC_PREREAD);
1004	rxd->rx_m = m;
1005	rx_le = rxd->rx_le;
1006	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
1007	rx_le->msk_control =
1008	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
1009
1010	return (0);
1011}
1012
1013/*
1014 * Set media options.
1015 */
1016static int
1017msk_mediachange(struct ifnet *ifp)
1018{
1019	struct msk_if_softc *sc_if;
1020	struct mii_data	*mii;
1021	int error;
1022
1023	sc_if = ifp->if_softc;
1024
1025	MSK_IF_LOCK(sc_if);
1026	mii = device_get_softc(sc_if->msk_miibus);
1027	error = mii_mediachg(mii);
1028	MSK_IF_UNLOCK(sc_if);
1029
1030	return (error);
1031}
1032
1033/*
1034 * Report current media status.
1035 */
1036static void
1037msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1038{
1039	struct msk_if_softc *sc_if;
1040	struct mii_data	*mii;
1041
1042	sc_if = ifp->if_softc;
1043	MSK_IF_LOCK(sc_if);
1044	if ((ifp->if_flags & IFF_UP) == 0) {
1045		MSK_IF_UNLOCK(sc_if);
1046		return;
1047	}
1048	mii = device_get_softc(sc_if->msk_miibus);
1049
1050	mii_pollstat(mii);
1051	ifmr->ifm_active = mii->mii_media_active;
1052	ifmr->ifm_status = mii->mii_media_status;
1053	MSK_IF_UNLOCK(sc_if);
1054}
1055
1056static int
1057msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1058{
1059	struct msk_if_softc *sc_if;
1060	struct ifreq *ifr;
1061	struct mii_data	*mii;
1062	int error, mask, reinit;
1063
1064	sc_if = ifp->if_softc;
1065	ifr = (struct ifreq *)data;
1066	error = 0;
1067
1068	switch(command) {
1069	case SIOCSIFMTU:
1070		MSK_IF_LOCK(sc_if);
1071		if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
1072			error = EINVAL;
1073		else if (ifp->if_mtu != ifr->ifr_mtu) {
1074			if (ifr->ifr_mtu > ETHERMTU) {
1075				if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
1076					error = EINVAL;
1077					MSK_IF_UNLOCK(sc_if);
1078					break;
1079				}
1080				if ((sc_if->msk_flags &
1081				    MSK_FLAG_JUMBO_NOCSUM) != 0) {
1082					ifp->if_hwassist &=
1083					    ~(MSK_CSUM_FEATURES | CSUM_TSO);
1084					ifp->if_capenable &=
1085					    ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1086					VLAN_CAPABILITIES(ifp);
1087				}
1088			}
1089			ifp->if_mtu = ifr->ifr_mtu;
1090			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1091				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1092				msk_init_locked(sc_if);
1093			}
1094		}
1095		MSK_IF_UNLOCK(sc_if);
1096		break;
1097	case SIOCSIFFLAGS:
1098		MSK_IF_LOCK(sc_if);
1099		if ((ifp->if_flags & IFF_UP) != 0) {
1100			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1101			    ((ifp->if_flags ^ sc_if->msk_if_flags) &
1102			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1103				msk_rxfilter(sc_if);
1104			else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
1105				msk_init_locked(sc_if);
1106		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1107			msk_stop(sc_if);
1108		sc_if->msk_if_flags = ifp->if_flags;
1109		MSK_IF_UNLOCK(sc_if);
1110		break;
1111	case SIOCADDMULTI:
1112	case SIOCDELMULTI:
1113		MSK_IF_LOCK(sc_if);
1114		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1115			msk_rxfilter(sc_if);
1116		MSK_IF_UNLOCK(sc_if);
1117		break;
1118	case SIOCGIFMEDIA:
1119	case SIOCSIFMEDIA:
1120		mii = device_get_softc(sc_if->msk_miibus);
1121		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1122		break;
1123	case SIOCSIFCAP:
1124		reinit = 0;
1125		MSK_IF_LOCK(sc_if);
1126		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1127		if ((mask & IFCAP_TXCSUM) != 0 &&
1128		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1129			ifp->if_capenable ^= IFCAP_TXCSUM;
1130			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1131				ifp->if_hwassist |= MSK_CSUM_FEATURES;
1132			else
1133				ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
1134		}
1135		if ((mask & IFCAP_RXCSUM) != 0 &&
1136		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1137			ifp->if_capenable ^= IFCAP_RXCSUM;
1138			if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1139				reinit = 1;
1140		}
1141		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1142		    (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
1143			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1144		if ((mask & IFCAP_TSO4) != 0 &&
1145		    (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1146			ifp->if_capenable ^= IFCAP_TSO4;
1147			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1148				ifp->if_hwassist |= CSUM_TSO;
1149			else
1150				ifp->if_hwassist &= ~CSUM_TSO;
1151		}
1152		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1153		    (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
1154			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1155		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1156		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1157			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1158			if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
1159				ifp->if_capenable &=
1160				    ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
1161			msk_setvlan(sc_if, ifp);
1162		}
1163		if (ifp->if_mtu > ETHERMTU &&
1164		    (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
1165			ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1166			ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1167		}
1168		VLAN_CAPABILITIES(ifp);
1169		if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1170			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1171			msk_init_locked(sc_if);
1172		}
1173		MSK_IF_UNLOCK(sc_if);
1174		break;
1175	default:
1176		error = ether_ioctl(ifp, command, data);
1177		break;
1178	}
1179
1180	return (error);
1181}
1182
1183static int
1184mskc_probe(device_t dev)
1185{
1186	const struct msk_product *mp;
1187	uint16_t vendor, devid;
1188	int i;
1189
1190	vendor = pci_get_vendor(dev);
1191	devid = pci_get_device(dev);
1192	mp = msk_products;
1193	for (i = 0; i < nitems(msk_products); i++, mp++) {
1194		if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1195			device_set_desc(dev, mp->msk_name);
1196			return (BUS_PROBE_DEFAULT);
1197		}
1198	}
1199
1200	return (ENXIO);
1201}
1202
1203static int
1204mskc_setup_rambuffer(struct msk_softc *sc)
1205{
1206	int next;
1207	int i;
1208
1209	/* Get adapter SRAM size. */
1210	sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1211	if (bootverbose)
1212		device_printf(sc->msk_dev,
1213		    "RAM buffer size : %dKB\n", sc->msk_ramsize);
1214	if (sc->msk_ramsize == 0)
1215		return (0);
1216
1217	sc->msk_pflags |= MSK_FLAG_RAMBUF;
1218	/*
1219	 * Give receiver 2/3 of memory and round down to the multiple
1220	 * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
1221	 * of 1024.
1222	 */
1223	sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1224	sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1225	for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1226		sc->msk_rxqstart[i] = next;
1227		sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1228		next = sc->msk_rxqend[i] + 1;
1229		sc->msk_txqstart[i] = next;
1230		sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1231		next = sc->msk_txqend[i] + 1;
1232		if (bootverbose) {
1233			device_printf(sc->msk_dev,
1234			    "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1235			    sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1236			    sc->msk_rxqend[i]);
1237			device_printf(sc->msk_dev,
1238			    "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1239			    sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1240			    sc->msk_txqend[i]);
1241		}
1242	}
1243
1244	return (0);
1245}
1246
1247static void
1248msk_phy_power(struct msk_softc *sc, int mode)
1249{
1250	uint32_t our, val;
1251	int i;
1252
1253	switch (mode) {
1254	case MSK_PHY_POWERUP:
1255		/* Switch power to VCC (WA for VAUX problem). */
1256		CSR_WRITE_1(sc, B0_POWER_CTRL,
1257		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1258		/* Disable Core Clock Division, set Clock Select to 0. */
1259		CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1260
1261		val = 0;
1262		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1263		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1264			/* Enable bits are inverted. */
1265			val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1266			      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1267			      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1268		}
1269		/*
1270		 * Enable PCI & Core Clock, enable clock gating for both Links.
1271		 */
1272		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1273
1274		our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1275		our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1276		if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1277			if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1278				/* Deassert Low Power for 1st PHY. */
1279				our |= PCI_Y2_PHY1_COMA;
1280				if (sc->msk_num_port > 1)
1281					our |= PCI_Y2_PHY2_COMA;
1282			}
1283		}
1284		if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
1285		    sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1286		    sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
1287			val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1288			val &= (PCI_FORCE_ASPM_REQUEST |
1289			    PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
1290			    PCI_ASPM_CLKRUN_REQUEST);
1291			/* Set all bits to 0 except bits 15..12. */
1292			CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
1293			val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1294			val &= PCI_CTL_TIM_VMAIN_AV_MSK;
1295			CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
1296			CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
1297			CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1298			/*
1299			 * Disable status race, workaround for
1300			 * Yukon EC Ultra & Yukon EX.
1301			 */
1302			val = CSR_READ_4(sc, B2_GP_IO);
1303			val |= GLB_GPIO_STAT_RACE_DIS;
1304			CSR_WRITE_4(sc, B2_GP_IO, val);
1305			CSR_READ_4(sc, B2_GP_IO);
1306		}
1307		/* Release PHY from PowerDown/COMA mode. */
1308		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
1309
1310		for (i = 0; i < sc->msk_num_port; i++) {
1311			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1312			    GMLC_RST_SET);
1313			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1314			    GMLC_RST_CLR);
1315		}
1316		break;
1317	case MSK_PHY_POWERDOWN:
1318		val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1319		val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1320		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1321		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1322			val &= ~PCI_Y2_PHY1_COMA;
1323			if (sc->msk_num_port > 1)
1324				val &= ~PCI_Y2_PHY2_COMA;
1325		}
1326		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1327
1328		val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1329		      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1330		      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1331		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1332		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1333			/* Enable bits are inverted. */
1334			val = 0;
1335		}
1336		/*
1337		 * Disable PCI & Core Clock, disable clock gating for
1338		 * both Links.
1339		 */
1340		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1341		CSR_WRITE_1(sc, B0_POWER_CTRL,
1342		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1343		break;
1344	default:
1345		break;
1346	}
1347}
1348
1349static void
1350mskc_reset(struct msk_softc *sc)
1351{
1352	bus_addr_t addr;
1353	uint16_t status;
1354	uint32_t val;
1355	int i, initram;
1356
1357	/* Disable ASF. */
1358	if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
1359	    sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
1360		if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1361		    sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1362			CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1363			status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1364			/* Clear AHB bridge & microcontroller reset. */
1365			status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1366			    Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1367			/* Clear ASF microcontroller state. */
1368			status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1369			status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1370			CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1371			CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1372		} else
1373			CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1374		CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1375		/*
1376		 * Since we disabled ASF, S/W reset is required for
1377		 * Power Management.
1378		 */
1379		CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1380		CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1381	}
1382
1383	/* Clear all error bits in the PCI status register. */
1384	status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1385	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1386
1387	pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1388	    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1389	    PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
1390	CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1391
1392	switch (sc->msk_bustype) {
1393	case MSK_PEX_BUS:
1394		/* Clear all PEX errors. */
1395		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1396		val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1397		if ((val & PEX_RX_OV) != 0) {
1398			sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1399			sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1400		}
1401		break;
1402	case MSK_PCI_BUS:
1403	case MSK_PCIX_BUS:
1404		/* Set Cache Line Size to 2(8bytes) if configured to 0. */
1405		val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1406		if (val == 0)
1407			pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1408		if (sc->msk_bustype == MSK_PCIX_BUS) {
1409			/* Set Cache Line Size opt. */
1410			val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1411			val |= PCI_CLS_OPT;
1412			pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1413		}
1414		break;
1415	}
1416	/* Set PHY power state. */
1417	msk_phy_power(sc, MSK_PHY_POWERUP);
1418
1419	/* Reset GPHY/GMAC Control */
1420	for (i = 0; i < sc->msk_num_port; i++) {
1421		/* GPHY Control reset. */
1422		CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1423		CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1424		/* GMAC Control reset. */
1425		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1426		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1427		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1428		if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1429		    sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
1430			CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1431			    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1432			    GMC_BYP_RETR_ON);
1433	}
1434
1435	if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
1436	    sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
1437		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
1438	if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1439		/* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1440		CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1441	}
1442	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1443
1444	/* LED On. */
1445	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1446
1447	/* Clear TWSI IRQ. */
1448	CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1449
1450	/* Turn off hardware timer. */
1451	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1452	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1453
1454	/* Turn off descriptor polling. */
1455	CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1456
1457	/* Turn off time stamps. */
1458	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1459	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1460
1461	initram = 0;
1462	if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
1463	    sc->msk_hw_id == CHIP_ID_YUKON_EC ||
1464	    sc->msk_hw_id == CHIP_ID_YUKON_FE)
1465		initram++;
1466
1467	/* Configure timeout values. */
1468	for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
1469		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1470		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1471		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1472		    MSK_RI_TO_53);
1473		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1474		    MSK_RI_TO_53);
1475		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1476		    MSK_RI_TO_53);
1477		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1478		    MSK_RI_TO_53);
1479		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1480		    MSK_RI_TO_53);
1481		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1482		    MSK_RI_TO_53);
1483		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1484		    MSK_RI_TO_53);
1485		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1486		    MSK_RI_TO_53);
1487		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1488		    MSK_RI_TO_53);
1489		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1490		    MSK_RI_TO_53);
1491		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1492		    MSK_RI_TO_53);
1493		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1494		    MSK_RI_TO_53);
1495	}
1496
1497	/* Disable all interrupts. */
1498	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1499	CSR_READ_4(sc, B0_HWE_IMSK);
1500	CSR_WRITE_4(sc, B0_IMSK, 0);
1501	CSR_READ_4(sc, B0_IMSK);
1502
1503        /*
1504         * On dual port PCI-X card, there is an problem where status
1505         * can be received out of order due to split transactions.
1506         */
1507	if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
1508		uint16_t pcix_cmd;
1509
1510		pcix_cmd = pci_read_config(sc->msk_dev,
1511		    sc->msk_pcixcap + PCIXR_COMMAND, 2);
1512		/* Clear Max Outstanding Split Transactions. */
1513		pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1514		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1515		pci_write_config(sc->msk_dev,
1516		    sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
1517		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1518        }
1519	if (sc->msk_expcap != 0) {
1520		/* Change Max. Read Request Size to 2048 bytes. */
1521		if (pci_get_max_read_req(sc->msk_dev) == 512)
1522			pci_set_max_read_req(sc->msk_dev, 2048);
1523	}
1524
1525	/* Clear status list. */
1526	bzero(sc->msk_stat_ring,
1527	    sizeof(struct msk_stat_desc) * sc->msk_stat_count);
1528	sc->msk_stat_cons = 0;
1529	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1530	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1531	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1532	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1533	/* Set the status list base address. */
1534	addr = sc->msk_stat_ring_paddr;
1535	CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1536	CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1537	/* Set the status list last index. */
1538	CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
1539	if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1540	    sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1541		/* WA for dev. #4.3 */
1542		CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1543		/* WA for dev. #4.18 */
1544		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1545		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1546	} else {
1547		CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1548		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1549		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1550		    sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1551			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1552		else
1553			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1554		CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1555	}
1556	/*
1557	 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1558	 */
1559	CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1560
1561	/* Enable status unit. */
1562	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1563
1564	CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1565	CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1566	CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1567}
1568
1569static int
1570msk_probe(device_t dev)
1571{
1572	struct msk_softc *sc;
1573	char desc[100];
1574
1575	sc = device_get_softc(device_get_parent(dev));
1576	/*
1577	 * Not much to do here. We always know there will be
1578	 * at least one GMAC present, and if there are two,
1579	 * mskc_attach() will create a second device instance
1580	 * for us.
1581	 */
1582	snprintf(desc, sizeof(desc),
1583	    "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1584	    model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1585	    sc->msk_hw_rev);
1586	device_set_desc_copy(dev, desc);
1587
1588	return (BUS_PROBE_DEFAULT);
1589}
1590
1591static int
1592msk_attach(device_t dev)
1593{
1594	struct msk_softc *sc;
1595	struct msk_if_softc *sc_if;
1596	struct ifnet *ifp;
1597	struct msk_mii_data *mmd;
1598	int i, port, error;
1599	uint8_t eaddr[6];
1600
1601	if (dev == NULL)
1602		return (EINVAL);
1603
1604	error = 0;
1605	sc_if = device_get_softc(dev);
1606	sc = device_get_softc(device_get_parent(dev));
1607	mmd = device_get_ivars(dev);
1608	port = mmd->port;
1609
1610	sc_if->msk_if_dev = dev;
1611	sc_if->msk_port = port;
1612	sc_if->msk_softc = sc;
1613	sc_if->msk_flags = sc->msk_pflags;
1614	sc->msk_if[port] = sc_if;
1615	/* Setup Tx/Rx queue register offsets. */
1616	if (port == MSK_PORT_A) {
1617		sc_if->msk_txq = Q_XA1;
1618		sc_if->msk_txsq = Q_XS1;
1619		sc_if->msk_rxq = Q_R1;
1620	} else {
1621		sc_if->msk_txq = Q_XA2;
1622		sc_if->msk_txsq = Q_XS2;
1623		sc_if->msk_rxq = Q_R2;
1624	}
1625
1626	callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1627	msk_sysctl_node(sc_if);
1628
1629	if ((error = msk_txrx_dma_alloc(sc_if)) != 0)
1630		goto fail;
1631	msk_rx_dma_jalloc(sc_if);
1632
1633	ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1634	if (ifp == NULL) {
1635		device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1636		error = ENOSPC;
1637		goto fail;
1638	}
1639	ifp->if_softc = sc_if;
1640	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1641	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1642	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1643	/*
1644	 * Enable Rx checksum offloading if controller supports
1645	 * new descriptor formant and controller is not Yukon XL.
1646	 */
1647	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
1648	    sc->msk_hw_id != CHIP_ID_YUKON_XL)
1649		ifp->if_capabilities |= IFCAP_RXCSUM;
1650	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1651	    (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1652		ifp->if_capabilities |= IFCAP_RXCSUM;
1653	ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1654	ifp->if_capenable = ifp->if_capabilities;
1655	ifp->if_ioctl = msk_ioctl;
1656	ifp->if_start = msk_start;
1657	ifp->if_init = msk_init;
1658	IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1659	ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1660	IFQ_SET_READY(&ifp->if_snd);
1661	/*
1662	 * Get station address for this interface. Note that
1663	 * dual port cards actually come with three station
1664	 * addresses: one for each port, plus an extra. The
1665	 * extra one is used by the SysKonnect driver software
1666	 * as a 'virtual' station address for when both ports
1667	 * are operating in failover mode. Currently we don't
1668	 * use this extra address.
1669	 */
1670	MSK_IF_LOCK(sc_if);
1671	for (i = 0; i < ETHER_ADDR_LEN; i++)
1672		eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1673
1674	/*
1675	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1676	 */
1677	MSK_IF_UNLOCK(sc_if);
1678	ether_ifattach(ifp, eaddr);
1679	MSK_IF_LOCK(sc_if);
1680
1681	/* VLAN capability setup */
1682	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1683	if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
1684		/*
1685		 * Due to Tx checksum offload hardware bugs, msk(4) manually
1686		 * computes checksum for short frames. For VLAN tagged frames
1687		 * this workaround does not work so disable checksum offload
1688		 * for VLAN interface.
1689		 */
1690		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
1691		/*
1692		 * Enable Rx checksum offloading for VLAN tagged frames
1693		 * if controller support new descriptor format.
1694		 */
1695		if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1696		    (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1697			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1698	}
1699	ifp->if_capenable = ifp->if_capabilities;
1700	/*
1701	 * Disable RX checksum offloading on controllers that don't use
1702	 * new descriptor format but give chance to enable it.
1703	 */
1704	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1705		ifp->if_capenable &= ~IFCAP_RXCSUM;
1706
1707	/*
1708	 * Tell the upper layer(s) we support long frames.
1709	 * Must appear after the call to ether_ifattach() because
1710	 * ether_ifattach() sets ifi_hdrlen to the default value.
1711	 */
1712        ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1713
1714	/*
1715	 * Do miibus setup.
1716	 */
1717	MSK_IF_UNLOCK(sc_if);
1718	error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
1719	    msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
1720	    mmd->mii_flags);
1721	if (error != 0) {
1722		device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
1723		ether_ifdetach(ifp);
1724		error = ENXIO;
1725		goto fail;
1726	}
1727
1728fail:
1729	if (error != 0) {
1730		/* Access should be ok even though lock has been dropped */
1731		sc->msk_if[port] = NULL;
1732		msk_detach(dev);
1733	}
1734
1735	return (error);
1736}
1737
1738/*
1739 * Attach the interface. Allocate softc structures, do ifmedia
1740 * setup and ethernet/BPF attach.
1741 */
1742static int
1743mskc_attach(device_t dev)
1744{
1745	struct msk_softc *sc;
1746	struct msk_mii_data *mmd;
1747	int error, msic, msir, reg;
1748
1749	sc = device_get_softc(dev);
1750	sc->msk_dev = dev;
1751	mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1752	    MTX_DEF);
1753
1754	/*
1755	 * Map control/status registers.
1756	 */
1757	pci_enable_busmaster(dev);
1758
1759	/* Allocate I/O resource */
1760#ifdef MSK_USEIOSPACE
1761	sc->msk_res_spec = msk_res_spec_io;
1762#else
1763	sc->msk_res_spec = msk_res_spec_mem;
1764#endif
1765	sc->msk_irq_spec = msk_irq_spec_legacy;
1766	error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1767	if (error) {
1768		if (sc->msk_res_spec == msk_res_spec_mem)
1769			sc->msk_res_spec = msk_res_spec_io;
1770		else
1771			sc->msk_res_spec = msk_res_spec_mem;
1772		error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1773		if (error) {
1774			device_printf(dev, "couldn't allocate %s resources\n",
1775			    sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1776			    "I/O");
1777			mtx_destroy(&sc->msk_mtx);
1778			return (ENXIO);
1779		}
1780	}
1781
1782	/* Enable all clocks before accessing any registers. */
1783	CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1784
1785	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1786	sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1787	sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1788	/* Bail out if chip is not recognized. */
1789	if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1790	    sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1791	    sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1792		device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1793		    sc->msk_hw_id, sc->msk_hw_rev);
1794		mtx_destroy(&sc->msk_mtx);
1795		return (ENXIO);
1796	}
1797
1798	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1799	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1800	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1801	    &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1802	    "max number of Rx events to process");
1803
1804	sc->msk_process_limit = MSK_PROC_DEFAULT;
1805	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1806	    "process_limit", &sc->msk_process_limit);
1807	if (error == 0) {
1808		if (sc->msk_process_limit < MSK_PROC_MIN ||
1809		    sc->msk_process_limit > MSK_PROC_MAX) {
1810			device_printf(dev, "process_limit value out of range; "
1811			    "using default: %d\n", MSK_PROC_DEFAULT);
1812			sc->msk_process_limit = MSK_PROC_DEFAULT;
1813		}
1814	}
1815
1816	sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
1817	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1818	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1819	    "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
1820	    "Maximum number of time to delay interrupts");
1821	resource_int_value(device_get_name(dev), device_get_unit(dev),
1822	    "int_holdoff", &sc->msk_int_holdoff);
1823
1824	sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1825	/* Check number of MACs. */
1826	sc->msk_num_port = 1;
1827	if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1828	    CFG_DUAL_MAC_MSK) {
1829		if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1830			sc->msk_num_port++;
1831	}
1832
1833	/* Check bus type. */
1834	if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0) {
1835		sc->msk_bustype = MSK_PEX_BUS;
1836		sc->msk_expcap = reg;
1837	} else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, &reg) == 0) {
1838		sc->msk_bustype = MSK_PCIX_BUS;
1839		sc->msk_pcixcap = reg;
1840	} else
1841		sc->msk_bustype = MSK_PCI_BUS;
1842
1843	switch (sc->msk_hw_id) {
1844	case CHIP_ID_YUKON_EC:
1845		sc->msk_clock = 125;	/* 125 MHz */
1846		sc->msk_pflags |= MSK_FLAG_JUMBO;
1847		break;
1848	case CHIP_ID_YUKON_EC_U:
1849		sc->msk_clock = 125;	/* 125 MHz */
1850		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1851		break;
1852	case CHIP_ID_YUKON_EX:
1853		sc->msk_clock = 125;	/* 125 MHz */
1854		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1855		    MSK_FLAG_AUTOTX_CSUM;
1856		/*
1857		 * Yukon Extreme seems to have silicon bug for
1858		 * automatic Tx checksum calculation capability.
1859		 */
1860		if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
1861			sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1862		/*
1863		 * Yukon Extreme A0 could not use store-and-forward
1864		 * for jumbo frames, so disable Tx checksum
1865		 * offloading for jumbo frames.
1866		 */
1867		if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
1868			sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1869		break;
1870	case CHIP_ID_YUKON_FE:
1871		sc->msk_clock = 100;	/* 100 MHz */
1872		sc->msk_pflags |= MSK_FLAG_FASTETHER;
1873		break;
1874	case CHIP_ID_YUKON_FE_P:
1875		sc->msk_clock = 50;	/* 50 MHz */
1876		sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
1877		    MSK_FLAG_AUTOTX_CSUM;
1878		if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1879			/*
1880			 * XXX
1881			 * FE+ A0 has status LE writeback bug so msk(4)
1882			 * does not rely on status word of received frame
1883			 * in msk_rxeof() which in turn disables all
1884			 * hardware assistance bits reported by the status
1885			 * word as well as validity of the received frame.
1886			 * Just pass received frames to upper stack with
1887			 * minimal test and let upper stack handle them.
1888			 */
1889			sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
1890			    MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1891		}
1892		break;
1893	case CHIP_ID_YUKON_XL:
1894		sc->msk_clock = 156;	/* 156 MHz */
1895		sc->msk_pflags |= MSK_FLAG_JUMBO;
1896		break;
1897	case CHIP_ID_YUKON_SUPR:
1898		sc->msk_clock = 125;	/* 125 MHz */
1899		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1900		    MSK_FLAG_AUTOTX_CSUM;
1901		break;
1902	case CHIP_ID_YUKON_UL_2:
1903		sc->msk_clock = 125;	/* 125 MHz */
1904		sc->msk_pflags |= MSK_FLAG_JUMBO;
1905		break;
1906	case CHIP_ID_YUKON_OPT:
1907		sc->msk_clock = 125;	/* 125 MHz */
1908		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
1909		break;
1910	default:
1911		sc->msk_clock = 156;	/* 156 MHz */
1912		break;
1913	}
1914
1915	/* Allocate IRQ resources. */
1916	msic = pci_msi_count(dev);
1917	if (bootverbose)
1918		device_printf(dev, "MSI count : %d\n", msic);
1919	if (legacy_intr != 0)
1920		msi_disable = 1;
1921	if (msi_disable == 0 && msic > 0) {
1922		msir = 1;
1923		if (pci_alloc_msi(dev, &msir) == 0) {
1924			if (msir == 1) {
1925				sc->msk_pflags |= MSK_FLAG_MSI;
1926				sc->msk_irq_spec = msk_irq_spec_msi;
1927			} else
1928				pci_release_msi(dev);
1929		}
1930	}
1931
1932	error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1933	if (error) {
1934		device_printf(dev, "couldn't allocate IRQ resources\n");
1935		goto fail;
1936	}
1937
1938	if ((error = msk_status_dma_alloc(sc)) != 0)
1939		goto fail;
1940
1941	/* Set base interrupt mask. */
1942	sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1943	sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1944	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1945
1946	/* Reset the adapter. */
1947	mskc_reset(sc);
1948
1949	if ((error = mskc_setup_rambuffer(sc)) != 0)
1950		goto fail;
1951
1952	sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1953	if (sc->msk_devs[MSK_PORT_A] == NULL) {
1954		device_printf(dev, "failed to add child for PORT_A\n");
1955		error = ENXIO;
1956		goto fail;
1957	}
1958	mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
1959	if (mmd == NULL) {
1960		device_printf(dev, "failed to allocate memory for "
1961		    "ivars of PORT_A\n");
1962		error = ENXIO;
1963		goto fail;
1964	}
1965	mmd->port = MSK_PORT_A;
1966	mmd->pmd = sc->msk_pmd;
1967	mmd->mii_flags |= MIIF_DOPAUSE;
1968	if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1969		mmd->mii_flags |= MIIF_HAVEFIBER;
1970	if (sc->msk_pmd == 'P')
1971		mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1972	device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
1973
1974	if (sc->msk_num_port > 1) {
1975		sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1976		if (sc->msk_devs[MSK_PORT_B] == NULL) {
1977			device_printf(dev, "failed to add child for PORT_B\n");
1978			error = ENXIO;
1979			goto fail;
1980		}
1981		mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
1982		    M_ZERO);
1983		if (mmd == NULL) {
1984			device_printf(dev, "failed to allocate memory for "
1985			    "ivars of PORT_B\n");
1986			error = ENXIO;
1987			goto fail;
1988		}
1989		mmd->port = MSK_PORT_B;
1990		mmd->pmd = sc->msk_pmd;
1991		if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1992			mmd->mii_flags |= MIIF_HAVEFIBER;
1993		if (sc->msk_pmd == 'P')
1994			mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1995		device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
1996	}
1997
1998	error = bus_generic_attach(dev);
1999	if (error) {
2000		device_printf(dev, "failed to attach port(s)\n");
2001		goto fail;
2002	}
2003
2004	/* Hook interrupt last to avoid having to lock softc. */
2005	error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
2006	    INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
2007	if (error != 0) {
2008		device_printf(dev, "couldn't set up interrupt handler\n");
2009		goto fail;
2010	}
2011fail:
2012	if (error != 0)
2013		mskc_detach(dev);
2014
2015	return (error);
2016}
2017
2018/*
2019 * Shutdown hardware and free up resources. This can be called any
2020 * time after the mutex has been initialized. It is called in both
2021 * the error case in attach and the normal detach case so it needs
2022 * to be careful about only freeing resources that have actually been
2023 * allocated.
2024 */
2025static int
2026msk_detach(device_t dev)
2027{
2028	struct msk_softc *sc;
2029	struct msk_if_softc *sc_if;
2030	struct ifnet *ifp;
2031
2032	sc_if = device_get_softc(dev);
2033	KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
2034	    ("msk mutex not initialized in msk_detach"));
2035	MSK_IF_LOCK(sc_if);
2036
2037	ifp = sc_if->msk_ifp;
2038	if (device_is_attached(dev)) {
2039		/* XXX */
2040		sc_if->msk_flags |= MSK_FLAG_DETACH;
2041		msk_stop(sc_if);
2042		/* Can't hold locks while calling detach. */
2043		MSK_IF_UNLOCK(sc_if);
2044		callout_drain(&sc_if->msk_tick_ch);
2045		if (ifp)
2046			ether_ifdetach(ifp);
2047		MSK_IF_LOCK(sc_if);
2048	}
2049
2050	/*
2051	 * We're generally called from mskc_detach() which is using
2052	 * device_delete_child() to get to here. It's already trashed
2053	 * miibus for us, so don't do it here or we'll panic.
2054	 *
2055	 * if (sc_if->msk_miibus != NULL) {
2056	 * 	device_delete_child(dev, sc_if->msk_miibus);
2057	 * 	sc_if->msk_miibus = NULL;
2058	 * }
2059	 */
2060
2061	msk_rx_dma_jfree(sc_if);
2062	msk_txrx_dma_free(sc_if);
2063	bus_generic_detach(dev);
2064
2065	if (ifp)
2066		if_free(ifp);
2067	sc = sc_if->msk_softc;
2068	sc->msk_if[sc_if->msk_port] = NULL;
2069	MSK_IF_UNLOCK(sc_if);
2070
2071	return (0);
2072}
2073
2074static int
2075mskc_detach(device_t dev)
2076{
2077	struct msk_softc *sc;
2078
2079	sc = device_get_softc(dev);
2080	KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
2081
2082	if (device_is_alive(dev)) {
2083		if (sc->msk_devs[MSK_PORT_A] != NULL) {
2084			free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
2085			    M_DEVBUF);
2086			device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
2087		}
2088		if (sc->msk_devs[MSK_PORT_B] != NULL) {
2089			free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
2090			    M_DEVBUF);
2091			device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
2092		}
2093		bus_generic_detach(dev);
2094	}
2095
2096	/* Disable all interrupts. */
2097	CSR_WRITE_4(sc, B0_IMSK, 0);
2098	CSR_READ_4(sc, B0_IMSK);
2099	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2100	CSR_READ_4(sc, B0_HWE_IMSK);
2101
2102	/* LED Off. */
2103	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
2104
2105	/* Put hardware reset. */
2106	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2107
2108	msk_status_dma_free(sc);
2109
2110	if (sc->msk_intrhand) {
2111		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
2112		sc->msk_intrhand = NULL;
2113	}
2114	bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
2115	if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
2116		pci_release_msi(dev);
2117	bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
2118	mtx_destroy(&sc->msk_mtx);
2119
2120	return (0);
2121}
2122
2123static bus_dma_tag_t
2124mskc_get_dma_tag(device_t bus, device_t child __unused)
2125{
2126
2127	return (bus_get_dma_tag(bus));
2128}
2129
2130struct msk_dmamap_arg {
2131	bus_addr_t	msk_busaddr;
2132};
2133
2134static void
2135msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2136{
2137	struct msk_dmamap_arg *ctx;
2138
2139	if (error != 0)
2140		return;
2141	ctx = arg;
2142	ctx->msk_busaddr = segs[0].ds_addr;
2143}
2144
2145/* Create status DMA region. */
2146static int
2147msk_status_dma_alloc(struct msk_softc *sc)
2148{
2149	struct msk_dmamap_arg ctx;
2150	bus_size_t stat_sz;
2151	int count, error;
2152
2153	/*
2154	 * It seems controller requires number of status LE entries
2155	 * is power of 2 and the maximum number of status LE entries
2156	 * is 4096.  For dual-port controllers, the number of status
2157	 * LE entries should be large enough to hold both port's
2158	 * status updates.
2159	 */
2160	count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
2161	count = imin(4096, roundup2(count, 1024));
2162	sc->msk_stat_count = count;
2163	stat_sz = count * sizeof(struct msk_stat_desc);
2164	error = bus_dma_tag_create(
2165		    bus_get_dma_tag(sc->msk_dev),	/* parent */
2166		    MSK_STAT_ALIGN, 0,		/* alignment, boundary */
2167		    BUS_SPACE_MAXADDR,		/* lowaddr */
2168		    BUS_SPACE_MAXADDR,		/* highaddr */
2169		    NULL, NULL,			/* filter, filterarg */
2170		    stat_sz,			/* maxsize */
2171		    1,				/* nsegments */
2172		    stat_sz,			/* maxsegsize */
2173		    0,				/* flags */
2174		    NULL, NULL,			/* lockfunc, lockarg */
2175		    &sc->msk_stat_tag);
2176	if (error != 0) {
2177		device_printf(sc->msk_dev,
2178		    "failed to create status DMA tag\n");
2179		return (error);
2180	}
2181
2182	/* Allocate DMA'able memory and load the DMA map for status ring. */
2183	error = bus_dmamem_alloc(sc->msk_stat_tag,
2184	    (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2185	    BUS_DMA_ZERO, &sc->msk_stat_map);
2186	if (error != 0) {
2187		device_printf(sc->msk_dev,
2188		    "failed to allocate DMA'able memory for status ring\n");
2189		return (error);
2190	}
2191
2192	ctx.msk_busaddr = 0;
2193	error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
2194	    sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2195	if (error != 0) {
2196		device_printf(sc->msk_dev,
2197		    "failed to load DMA'able memory for status ring\n");
2198		return (error);
2199	}
2200	sc->msk_stat_ring_paddr = ctx.msk_busaddr;
2201
2202	return (0);
2203}
2204
2205static void
2206msk_status_dma_free(struct msk_softc *sc)
2207{
2208
2209	/* Destroy status block. */
2210	if (sc->msk_stat_tag) {
2211		if (sc->msk_stat_map) {
2212			bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
2213			if (sc->msk_stat_ring) {
2214				bus_dmamem_free(sc->msk_stat_tag,
2215				    sc->msk_stat_ring, sc->msk_stat_map);
2216				sc->msk_stat_ring = NULL;
2217			}
2218			sc->msk_stat_map = NULL;
2219		}
2220		bus_dma_tag_destroy(sc->msk_stat_tag);
2221		sc->msk_stat_tag = NULL;
2222	}
2223}
2224
2225static int
2226msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2227{
2228	struct msk_dmamap_arg ctx;
2229	struct msk_txdesc *txd;
2230	struct msk_rxdesc *rxd;
2231	bus_size_t rxalign;
2232	int error, i;
2233
2234	/* Create parent DMA tag. */
2235	error = bus_dma_tag_create(
2236		    bus_get_dma_tag(sc_if->msk_if_dev),	/* parent */
2237		    1, 0,			/* alignment, boundary */
2238		    BUS_SPACE_MAXADDR,		/* lowaddr */
2239		    BUS_SPACE_MAXADDR,		/* highaddr */
2240		    NULL, NULL,			/* filter, filterarg */
2241		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
2242		    0,				/* nsegments */
2243		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2244		    0,				/* flags */
2245		    NULL, NULL,			/* lockfunc, lockarg */
2246		    &sc_if->msk_cdata.msk_parent_tag);
2247	if (error != 0) {
2248		device_printf(sc_if->msk_if_dev,
2249		    "failed to create parent DMA tag\n");
2250		goto fail;
2251	}
2252	/* Create tag for Tx ring. */
2253	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2254		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2255		    BUS_SPACE_MAXADDR,		/* lowaddr */
2256		    BUS_SPACE_MAXADDR,		/* highaddr */
2257		    NULL, NULL,			/* filter, filterarg */
2258		    MSK_TX_RING_SZ,		/* maxsize */
2259		    1,				/* nsegments */
2260		    MSK_TX_RING_SZ,		/* maxsegsize */
2261		    0,				/* flags */
2262		    NULL, NULL,			/* lockfunc, lockarg */
2263		    &sc_if->msk_cdata.msk_tx_ring_tag);
2264	if (error != 0) {
2265		device_printf(sc_if->msk_if_dev,
2266		    "failed to create Tx ring DMA tag\n");
2267		goto fail;
2268	}
2269
2270	/* Create tag for Rx ring. */
2271	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2272		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2273		    BUS_SPACE_MAXADDR,		/* lowaddr */
2274		    BUS_SPACE_MAXADDR,		/* highaddr */
2275		    NULL, NULL,			/* filter, filterarg */
2276		    MSK_RX_RING_SZ,		/* maxsize */
2277		    1,				/* nsegments */
2278		    MSK_RX_RING_SZ,		/* maxsegsize */
2279		    0,				/* flags */
2280		    NULL, NULL,			/* lockfunc, lockarg */
2281		    &sc_if->msk_cdata.msk_rx_ring_tag);
2282	if (error != 0) {
2283		device_printf(sc_if->msk_if_dev,
2284		    "failed to create Rx ring DMA tag\n");
2285		goto fail;
2286	}
2287
2288	/* Create tag for Tx buffers. */
2289	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2290		    1, 0,			/* alignment, boundary */
2291		    BUS_SPACE_MAXADDR,		/* lowaddr */
2292		    BUS_SPACE_MAXADDR,		/* highaddr */
2293		    NULL, NULL,			/* filter, filterarg */
2294		    MSK_TSO_MAXSIZE,		/* maxsize */
2295		    MSK_MAXTXSEGS,		/* nsegments */
2296		    MSK_TSO_MAXSGSIZE,		/* maxsegsize */
2297		    0,				/* flags */
2298		    NULL, NULL,			/* lockfunc, lockarg */
2299		    &sc_if->msk_cdata.msk_tx_tag);
2300	if (error != 0) {
2301		device_printf(sc_if->msk_if_dev,
2302		    "failed to create Tx DMA tag\n");
2303		goto fail;
2304	}
2305
2306	rxalign = 1;
2307	/*
2308	 * Workaround hardware hang which seems to happen when Rx buffer
2309	 * is not aligned on multiple of FIFO word(8 bytes).
2310	 */
2311	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2312		rxalign = MSK_RX_BUF_ALIGN;
2313	/* Create tag for Rx buffers. */
2314	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2315		    rxalign, 0,			/* alignment, boundary */
2316		    BUS_SPACE_MAXADDR,		/* lowaddr */
2317		    BUS_SPACE_MAXADDR,		/* highaddr */
2318		    NULL, NULL,			/* filter, filterarg */
2319		    MCLBYTES,			/* maxsize */
2320		    1,				/* nsegments */
2321		    MCLBYTES,			/* maxsegsize */
2322		    0,				/* flags */
2323		    NULL, NULL,			/* lockfunc, lockarg */
2324		    &sc_if->msk_cdata.msk_rx_tag);
2325	if (error != 0) {
2326		device_printf(sc_if->msk_if_dev,
2327		    "failed to create Rx DMA tag\n");
2328		goto fail;
2329	}
2330
2331	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
2332	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2333	    (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2334	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2335	if (error != 0) {
2336		device_printf(sc_if->msk_if_dev,
2337		    "failed to allocate DMA'able memory for Tx ring\n");
2338		goto fail;
2339	}
2340
2341	ctx.msk_busaddr = 0;
2342	error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2343	    sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2344	    MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2345	if (error != 0) {
2346		device_printf(sc_if->msk_if_dev,
2347		    "failed to load DMA'able memory for Tx ring\n");
2348		goto fail;
2349	}
2350	sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2351
2352	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
2353	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2354	    (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2355	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2356	if (error != 0) {
2357		device_printf(sc_if->msk_if_dev,
2358		    "failed to allocate DMA'able memory for Rx ring\n");
2359		goto fail;
2360	}
2361
2362	ctx.msk_busaddr = 0;
2363	error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2364	    sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2365	    MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2366	if (error != 0) {
2367		device_printf(sc_if->msk_if_dev,
2368		    "failed to load DMA'able memory for Rx ring\n");
2369		goto fail;
2370	}
2371	sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2372
2373	/* Create DMA maps for Tx buffers. */
2374	for (i = 0; i < MSK_TX_RING_CNT; i++) {
2375		txd = &sc_if->msk_cdata.msk_txdesc[i];
2376		txd->tx_m = NULL;
2377		txd->tx_dmamap = NULL;
2378		error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2379		    &txd->tx_dmamap);
2380		if (error != 0) {
2381			device_printf(sc_if->msk_if_dev,
2382			    "failed to create Tx dmamap\n");
2383			goto fail;
2384		}
2385	}
2386	/* Create DMA maps for Rx buffers. */
2387	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2388	    &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2389		device_printf(sc_if->msk_if_dev,
2390		    "failed to create spare Rx dmamap\n");
2391		goto fail;
2392	}
2393	for (i = 0; i < MSK_RX_RING_CNT; i++) {
2394		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2395		rxd->rx_m = NULL;
2396		rxd->rx_dmamap = NULL;
2397		error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2398		    &rxd->rx_dmamap);
2399		if (error != 0) {
2400			device_printf(sc_if->msk_if_dev,
2401			    "failed to create Rx dmamap\n");
2402			goto fail;
2403		}
2404	}
2405
2406fail:
2407	return (error);
2408}
2409
2410static int
2411msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2412{
2413	struct msk_dmamap_arg ctx;
2414	struct msk_rxdesc *jrxd;
2415	bus_size_t rxalign;
2416	int error, i;
2417
2418	if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2419		sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2420		device_printf(sc_if->msk_if_dev,
2421		    "disabling jumbo frame support\n");
2422		return (0);
2423	}
2424	/* Create tag for jumbo Rx ring. */
2425	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2426		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2427		    BUS_SPACE_MAXADDR,		/* lowaddr */
2428		    BUS_SPACE_MAXADDR,		/* highaddr */
2429		    NULL, NULL,			/* filter, filterarg */
2430		    MSK_JUMBO_RX_RING_SZ,	/* maxsize */
2431		    1,				/* nsegments */
2432		    MSK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2433		    0,				/* flags */
2434		    NULL, NULL,			/* lockfunc, lockarg */
2435		    &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2436	if (error != 0) {
2437		device_printf(sc_if->msk_if_dev,
2438		    "failed to create jumbo Rx ring DMA tag\n");
2439		goto jumbo_fail;
2440	}
2441
2442	rxalign = 1;
2443	/*
2444	 * Workaround hardware hang which seems to happen when Rx buffer
2445	 * is not aligned on multiple of FIFO word(8 bytes).
2446	 */
2447	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2448		rxalign = MSK_RX_BUF_ALIGN;
2449	/* Create tag for jumbo Rx buffers. */
2450	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2451		    rxalign, 0,			/* alignment, boundary */
2452		    BUS_SPACE_MAXADDR,		/* lowaddr */
2453		    BUS_SPACE_MAXADDR,		/* highaddr */
2454		    NULL, NULL,			/* filter, filterarg */
2455		    MJUM9BYTES,			/* maxsize */
2456		    1,				/* nsegments */
2457		    MJUM9BYTES,			/* maxsegsize */
2458		    0,				/* flags */
2459		    NULL, NULL,			/* lockfunc, lockarg */
2460		    &sc_if->msk_cdata.msk_jumbo_rx_tag);
2461	if (error != 0) {
2462		device_printf(sc_if->msk_if_dev,
2463		    "failed to create jumbo Rx DMA tag\n");
2464		goto jumbo_fail;
2465	}
2466
2467	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2468	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2469	    (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2470	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2471	    &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2472	if (error != 0) {
2473		device_printf(sc_if->msk_if_dev,
2474		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2475		goto jumbo_fail;
2476	}
2477
2478	ctx.msk_busaddr = 0;
2479	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2480	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2481	    sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2482	    msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2483	if (error != 0) {
2484		device_printf(sc_if->msk_if_dev,
2485		    "failed to load DMA'able memory for jumbo Rx ring\n");
2486		goto jumbo_fail;
2487	}
2488	sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2489
2490	/* Create DMA maps for jumbo Rx buffers. */
2491	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2492	    &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2493		device_printf(sc_if->msk_if_dev,
2494		    "failed to create spare jumbo Rx dmamap\n");
2495		goto jumbo_fail;
2496	}
2497	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2498		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2499		jrxd->rx_m = NULL;
2500		jrxd->rx_dmamap = NULL;
2501		error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2502		    &jrxd->rx_dmamap);
2503		if (error != 0) {
2504			device_printf(sc_if->msk_if_dev,
2505			    "failed to create jumbo Rx dmamap\n");
2506			goto jumbo_fail;
2507		}
2508	}
2509
2510	return (0);
2511
2512jumbo_fail:
2513	msk_rx_dma_jfree(sc_if);
2514	device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2515	    "due to resource shortage\n");
2516	sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2517	return (error);
2518}
2519
2520static void
2521msk_txrx_dma_free(struct msk_if_softc *sc_if)
2522{
2523	struct msk_txdesc *txd;
2524	struct msk_rxdesc *rxd;
2525	int i;
2526
2527	/* Tx ring. */
2528	if (sc_if->msk_cdata.msk_tx_ring_tag) {
2529		if (sc_if->msk_cdata.msk_tx_ring_map)
2530			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2531			    sc_if->msk_cdata.msk_tx_ring_map);
2532		if (sc_if->msk_cdata.msk_tx_ring_map &&
2533		    sc_if->msk_rdata.msk_tx_ring)
2534			bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2535			    sc_if->msk_rdata.msk_tx_ring,
2536			    sc_if->msk_cdata.msk_tx_ring_map);
2537		sc_if->msk_rdata.msk_tx_ring = NULL;
2538		sc_if->msk_cdata.msk_tx_ring_map = NULL;
2539		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2540		sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2541	}
2542	/* Rx ring. */
2543	if (sc_if->msk_cdata.msk_rx_ring_tag) {
2544		if (sc_if->msk_cdata.msk_rx_ring_map)
2545			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2546			    sc_if->msk_cdata.msk_rx_ring_map);
2547		if (sc_if->msk_cdata.msk_rx_ring_map &&
2548		    sc_if->msk_rdata.msk_rx_ring)
2549			bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2550			    sc_if->msk_rdata.msk_rx_ring,
2551			    sc_if->msk_cdata.msk_rx_ring_map);
2552		sc_if->msk_rdata.msk_rx_ring = NULL;
2553		sc_if->msk_cdata.msk_rx_ring_map = NULL;
2554		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2555		sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2556	}
2557	/* Tx buffers. */
2558	if (sc_if->msk_cdata.msk_tx_tag) {
2559		for (i = 0; i < MSK_TX_RING_CNT; i++) {
2560			txd = &sc_if->msk_cdata.msk_txdesc[i];
2561			if (txd->tx_dmamap) {
2562				bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2563				    txd->tx_dmamap);
2564				txd->tx_dmamap = NULL;
2565			}
2566		}
2567		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2568		sc_if->msk_cdata.msk_tx_tag = NULL;
2569	}
2570	/* Rx buffers. */
2571	if (sc_if->msk_cdata.msk_rx_tag) {
2572		for (i = 0; i < MSK_RX_RING_CNT; i++) {
2573			rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2574			if (rxd->rx_dmamap) {
2575				bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2576				    rxd->rx_dmamap);
2577				rxd->rx_dmamap = NULL;
2578			}
2579		}
2580		if (sc_if->msk_cdata.msk_rx_sparemap) {
2581			bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2582			    sc_if->msk_cdata.msk_rx_sparemap);
2583			sc_if->msk_cdata.msk_rx_sparemap = 0;
2584		}
2585		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2586		sc_if->msk_cdata.msk_rx_tag = NULL;
2587	}
2588	if (sc_if->msk_cdata.msk_parent_tag) {
2589		bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2590		sc_if->msk_cdata.msk_parent_tag = NULL;
2591	}
2592}
2593
2594static void
2595msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2596{
2597	struct msk_rxdesc *jrxd;
2598	int i;
2599
2600	/* Jumbo Rx ring. */
2601	if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2602		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2603			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2604			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2605		if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2606		    sc_if->msk_rdata.msk_jumbo_rx_ring)
2607			bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2608			    sc_if->msk_rdata.msk_jumbo_rx_ring,
2609			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2610		sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2611		sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2612		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2613		sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2614	}
2615	/* Jumbo Rx buffers. */
2616	if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2617		for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2618			jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2619			if (jrxd->rx_dmamap) {
2620				bus_dmamap_destroy(
2621				    sc_if->msk_cdata.msk_jumbo_rx_tag,
2622				    jrxd->rx_dmamap);
2623				jrxd->rx_dmamap = NULL;
2624			}
2625		}
2626		if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2627			bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2628			    sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2629			sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2630		}
2631		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2632		sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2633	}
2634}
2635
2636static int
2637msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2638{
2639	struct msk_txdesc *txd, *txd_last;
2640	struct msk_tx_desc *tx_le;
2641	struct mbuf *m;
2642	bus_dmamap_t map;
2643	bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2644	uint32_t control, csum, prod, si;
2645	uint16_t offset, tcp_offset, tso_mtu;
2646	int error, i, nseg, tso;
2647
2648	MSK_IF_LOCK_ASSERT(sc_if);
2649
2650	tcp_offset = offset = 0;
2651	m = *m_head;
2652	if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2653	    (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
2654	    ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2655	    (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2656		/*
2657		 * Since mbuf has no protocol specific structure information
2658		 * in it we have to inspect protocol information here to
2659		 * setup TSO and checksum offload. I don't know why Marvell
2660		 * made a such decision in chip design because other GigE
2661		 * hardwares normally takes care of all these chores in
2662		 * hardware. However, TSO performance of Yukon II is very
2663		 * good such that it's worth to implement it.
2664		 */
2665		struct ether_header *eh;
2666		struct ip *ip;
2667		struct tcphdr *tcp;
2668
2669		if (M_WRITABLE(m) == 0) {
2670			/* Get a writable copy. */
2671			m = m_dup(*m_head, M_NOWAIT);
2672			m_freem(*m_head);
2673			if (m == NULL) {
2674				*m_head = NULL;
2675				return (ENOBUFS);
2676			}
2677			*m_head = m;
2678		}
2679
2680		offset = sizeof(struct ether_header);
2681		m = m_pullup(m, offset);
2682		if (m == NULL) {
2683			*m_head = NULL;
2684			return (ENOBUFS);
2685		}
2686		eh = mtod(m, struct ether_header *);
2687		/* Check if hardware VLAN insertion is off. */
2688		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2689			offset = sizeof(struct ether_vlan_header);
2690			m = m_pullup(m, offset);
2691			if (m == NULL) {
2692				*m_head = NULL;
2693				return (ENOBUFS);
2694			}
2695		}
2696		m = m_pullup(m, offset + sizeof(struct ip));
2697		if (m == NULL) {
2698			*m_head = NULL;
2699			return (ENOBUFS);
2700		}
2701		ip = (struct ip *)(mtod(m, char *) + offset);
2702		offset += (ip->ip_hl << 2);
2703		tcp_offset = offset;
2704		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2705			m = m_pullup(m, offset + sizeof(struct tcphdr));
2706			if (m == NULL) {
2707				*m_head = NULL;
2708				return (ENOBUFS);
2709			}
2710			tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2711			offset += (tcp->th_off << 2);
2712		} else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2713		    (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
2714		    (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2715			/*
2716			 * It seems that Yukon II has Tx checksum offload bug
2717			 * for small TCP packets that's less than 60 bytes in
2718			 * size (e.g. TCP window probe packet, pure ACK packet).
2719			 * Common work around like padding with zeros to make
2720			 * the frame minimum ethernet frame size didn't work at
2721			 * all.
2722			 * Instead of disabling checksum offload completely we
2723			 * resort to S/W checksum routine when we encounter
2724			 * short TCP frames.
2725			 * Short UDP packets appear to be handled correctly by
2726			 * Yukon II. Also I assume this bug does not happen on
2727			 * controllers that use newer descriptor format or
2728			 * automatic Tx checksum calculation.
2729			 */
2730			m = m_pullup(m, offset + sizeof(struct tcphdr));
2731			if (m == NULL) {
2732				*m_head = NULL;
2733				return (ENOBUFS);
2734			}
2735			*(uint16_t *)(m->m_data + offset +
2736			    m->m_pkthdr.csum_data) = in_cksum_skip(m,
2737			    m->m_pkthdr.len, offset);
2738			m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2739		}
2740		*m_head = m;
2741	}
2742
2743	prod = sc_if->msk_cdata.msk_tx_prod;
2744	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2745	txd_last = txd;
2746	map = txd->tx_dmamap;
2747	error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2748	    *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2749	if (error == EFBIG) {
2750		m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
2751		if (m == NULL) {
2752			m_freem(*m_head);
2753			*m_head = NULL;
2754			return (ENOBUFS);
2755		}
2756		*m_head = m;
2757		error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2758		    map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2759		if (error != 0) {
2760			m_freem(*m_head);
2761			*m_head = NULL;
2762			return (error);
2763		}
2764	} else if (error != 0)
2765		return (error);
2766	if (nseg == 0) {
2767		m_freem(*m_head);
2768		*m_head = NULL;
2769		return (EIO);
2770	}
2771
2772	/* Check number of available descriptors. */
2773	if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2774	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2775		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2776		return (ENOBUFS);
2777	}
2778
2779	control = 0;
2780	tso = 0;
2781	tx_le = NULL;
2782
2783	/* Check TSO support. */
2784	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2785		if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2786			tso_mtu = m->m_pkthdr.tso_segsz;
2787		else
2788			tso_mtu = offset + m->m_pkthdr.tso_segsz;
2789		if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2790			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2791			tx_le->msk_addr = htole32(tso_mtu);
2792			if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2793				tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2794			else
2795				tx_le->msk_control =
2796				    htole32(OP_LRGLEN | HW_OWNER);
2797			sc_if->msk_cdata.msk_tx_cnt++;
2798			MSK_INC(prod, MSK_TX_RING_CNT);
2799			sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2800		}
2801		tso++;
2802	}
2803	/* Check if we have a VLAN tag to insert. */
2804	if ((m->m_flags & M_VLANTAG) != 0) {
2805		if (tx_le == NULL) {
2806			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2807			tx_le->msk_addr = htole32(0);
2808			tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2809			    htons(m->m_pkthdr.ether_vtag));
2810			sc_if->msk_cdata.msk_tx_cnt++;
2811			MSK_INC(prod, MSK_TX_RING_CNT);
2812		} else {
2813			tx_le->msk_control |= htole32(OP_VLAN |
2814			    htons(m->m_pkthdr.ether_vtag));
2815		}
2816		control |= INS_VLAN;
2817	}
2818	/* Check if we have to handle checksum offload. */
2819	if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2820		if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
2821			control |= CALSUM;
2822		else {
2823			control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2824			if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2825				control |= UDPTCP;
2826			/* Checksum write position. */
2827			csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
2828			/* Checksum start position. */
2829			csum |= (uint32_t)tcp_offset << 16;
2830			if (csum != sc_if->msk_cdata.msk_last_csum) {
2831				tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2832				tx_le->msk_addr = htole32(csum);
2833				tx_le->msk_control = htole32(1 << 16 |
2834				    (OP_TCPLISW | HW_OWNER));
2835				sc_if->msk_cdata.msk_tx_cnt++;
2836				MSK_INC(prod, MSK_TX_RING_CNT);
2837				sc_if->msk_cdata.msk_last_csum = csum;
2838			}
2839		}
2840	}
2841
2842#ifdef MSK_64BIT_DMA
2843	if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
2844	    sc_if->msk_cdata.msk_tx_high_addr) {
2845		sc_if->msk_cdata.msk_tx_high_addr =
2846		    MSK_ADDR_HI(txsegs[0].ds_addr);
2847		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2848		tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
2849		tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
2850		sc_if->msk_cdata.msk_tx_cnt++;
2851		MSK_INC(prod, MSK_TX_RING_CNT);
2852	}
2853#endif
2854	si = prod;
2855	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2856	tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2857	if (tso == 0)
2858		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2859		    OP_PACKET);
2860	else
2861		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2862		    OP_LARGESEND);
2863	sc_if->msk_cdata.msk_tx_cnt++;
2864	MSK_INC(prod, MSK_TX_RING_CNT);
2865
2866	for (i = 1; i < nseg; i++) {
2867		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2868#ifdef MSK_64BIT_DMA
2869		if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
2870		    sc_if->msk_cdata.msk_tx_high_addr) {
2871			sc_if->msk_cdata.msk_tx_high_addr =
2872			    MSK_ADDR_HI(txsegs[i].ds_addr);
2873			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2874			tx_le->msk_addr =
2875			    htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
2876			tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
2877			sc_if->msk_cdata.msk_tx_cnt++;
2878			MSK_INC(prod, MSK_TX_RING_CNT);
2879			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2880		}
2881#endif
2882		tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2883		tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2884		    OP_BUFFER | HW_OWNER);
2885		sc_if->msk_cdata.msk_tx_cnt++;
2886		MSK_INC(prod, MSK_TX_RING_CNT);
2887	}
2888	/* Update producer index. */
2889	sc_if->msk_cdata.msk_tx_prod = prod;
2890
2891	/* Set EOP on the last descriptor. */
2892	prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2893	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2894	tx_le->msk_control |= htole32(EOP);
2895
2896	/* Turn the first descriptor ownership to hardware. */
2897	tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2898	tx_le->msk_control |= htole32(HW_OWNER);
2899
2900	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2901	map = txd_last->tx_dmamap;
2902	txd_last->tx_dmamap = txd->tx_dmamap;
2903	txd->tx_dmamap = map;
2904	txd->tx_m = m;
2905
2906	/* Sync descriptors. */
2907	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2908	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2909	    sc_if->msk_cdata.msk_tx_ring_map,
2910	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2911
2912	return (0);
2913}
2914
2915static void
2916msk_start(struct ifnet *ifp)
2917{
2918	struct msk_if_softc *sc_if;
2919
2920	sc_if = ifp->if_softc;
2921	MSK_IF_LOCK(sc_if);
2922	msk_start_locked(ifp);
2923	MSK_IF_UNLOCK(sc_if);
2924}
2925
2926static void
2927msk_start_locked(struct ifnet *ifp)
2928{
2929	struct msk_if_softc *sc_if;
2930	struct mbuf *m_head;
2931	int enq;
2932
2933	sc_if = ifp->if_softc;
2934	MSK_IF_LOCK_ASSERT(sc_if);
2935
2936	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2937	    IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
2938		return;
2939
2940	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2941	    sc_if->msk_cdata.msk_tx_cnt <
2942	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2943		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2944		if (m_head == NULL)
2945			break;
2946		/*
2947		 * Pack the data into the transmit ring. If we
2948		 * don't have room, set the OACTIVE flag and wait
2949		 * for the NIC to drain the ring.
2950		 */
2951		if (msk_encap(sc_if, &m_head) != 0) {
2952			if (m_head == NULL)
2953				break;
2954			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2955			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2956			break;
2957		}
2958
2959		enq++;
2960		/*
2961		 * If there's a BPF listener, bounce a copy of this frame
2962		 * to him.
2963		 */
2964		ETHER_BPF_MTAP(ifp, m_head);
2965	}
2966
2967	if (enq > 0) {
2968		/* Transmit */
2969		CSR_WRITE_2(sc_if->msk_softc,
2970		    Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2971		    sc_if->msk_cdata.msk_tx_prod);
2972
2973		/* Set a timeout in case the chip goes out to lunch. */
2974		sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2975	}
2976}
2977
2978static void
2979msk_watchdog(struct msk_if_softc *sc_if)
2980{
2981	struct ifnet *ifp;
2982
2983	MSK_IF_LOCK_ASSERT(sc_if);
2984
2985	if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2986		return;
2987	ifp = sc_if->msk_ifp;
2988	if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2989		if (bootverbose)
2990			if_printf(sc_if->msk_ifp, "watchdog timeout "
2991			   "(missed link)\n");
2992		ifp->if_oerrors++;
2993		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2994		msk_init_locked(sc_if);
2995		return;
2996	}
2997
2998	if_printf(ifp, "watchdog timeout\n");
2999	ifp->if_oerrors++;
3000	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3001	msk_init_locked(sc_if);
3002	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3003		msk_start_locked(ifp);
3004}
3005
3006static int
3007mskc_shutdown(device_t dev)
3008{
3009	struct msk_softc *sc;
3010	int i;
3011
3012	sc = device_get_softc(dev);
3013	MSK_LOCK(sc);
3014	for (i = 0; i < sc->msk_num_port; i++) {
3015		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3016		    ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3017		    IFF_DRV_RUNNING) != 0))
3018			msk_stop(sc->msk_if[i]);
3019	}
3020	MSK_UNLOCK(sc);
3021
3022	/* Put hardware reset. */
3023	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3024	return (0);
3025}
3026
3027static int
3028mskc_suspend(device_t dev)
3029{
3030	struct msk_softc *sc;
3031	int i;
3032
3033	sc = device_get_softc(dev);
3034
3035	MSK_LOCK(sc);
3036
3037	for (i = 0; i < sc->msk_num_port; i++) {
3038		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3039		    ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3040		    IFF_DRV_RUNNING) != 0))
3041			msk_stop(sc->msk_if[i]);
3042	}
3043
3044	/* Disable all interrupts. */
3045	CSR_WRITE_4(sc, B0_IMSK, 0);
3046	CSR_READ_4(sc, B0_IMSK);
3047	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
3048	CSR_READ_4(sc, B0_HWE_IMSK);
3049
3050	msk_phy_power(sc, MSK_PHY_POWERDOWN);
3051
3052	/* Put hardware reset. */
3053	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3054	sc->msk_pflags |= MSK_FLAG_SUSPEND;
3055
3056	MSK_UNLOCK(sc);
3057
3058	return (0);
3059}
3060
3061static int
3062mskc_resume(device_t dev)
3063{
3064	struct msk_softc *sc;
3065	int i;
3066
3067	sc = device_get_softc(dev);
3068
3069	MSK_LOCK(sc);
3070
3071	CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
3072	mskc_reset(sc);
3073	for (i = 0; i < sc->msk_num_port; i++) {
3074		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3075		    ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
3076			sc->msk_if[i]->msk_ifp->if_drv_flags &=
3077			    ~IFF_DRV_RUNNING;
3078			msk_init_locked(sc->msk_if[i]);
3079		}
3080	}
3081	sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
3082
3083	MSK_UNLOCK(sc);
3084
3085	return (0);
3086}
3087
3088#ifndef __NO_STRICT_ALIGNMENT
3089static __inline void
3090msk_fixup_rx(struct mbuf *m)
3091{
3092        int i;
3093        uint16_t *src, *dst;
3094
3095	src = mtod(m, uint16_t *);
3096	dst = src - 3;
3097
3098	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
3099		*dst++ = *src++;
3100
3101	m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
3102}
3103#endif
3104
3105static __inline void
3106msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
3107{
3108	struct ether_header *eh;
3109	struct ip *ip;
3110	struct udphdr *uh;
3111	int32_t hlen, len, pktlen, temp32;
3112	uint16_t csum, *opts;
3113
3114	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
3115		if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3116			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3117			if ((control & CSS_IPV4_CSUM_OK) != 0)
3118				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3119			if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3120			    (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3121				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3122				    CSUM_PSEUDO_HDR;
3123				m->m_pkthdr.csum_data = 0xffff;
3124			}
3125		}
3126		return;
3127	}
3128	/*
3129	 * Marvell Yukon controllers that support OP_RXCHKS has known
3130	 * to have various Rx checksum offloading bugs. These
3131	 * controllers can be configured to compute simple checksum
3132	 * at two different positions. So we can compute IP and TCP/UDP
3133	 * checksum at the same time. We intentionally have controller
3134	 * compute TCP/UDP checksum twice by specifying the same
3135	 * checksum start position and compare the result. If the value
3136	 * is different it would indicate the hardware logic was wrong.
3137	 */
3138	if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
3139		if (bootverbose)
3140			device_printf(sc_if->msk_if_dev,
3141			    "Rx checksum value mismatch!\n");
3142		return;
3143	}
3144	pktlen = m->m_pkthdr.len;
3145	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
3146		return;
3147	eh = mtod(m, struct ether_header *);
3148	if (eh->ether_type != htons(ETHERTYPE_IP))
3149		return;
3150	ip = (struct ip *)(eh + 1);
3151	if (ip->ip_v != IPVERSION)
3152		return;
3153
3154	hlen = ip->ip_hl << 2;
3155	pktlen -= sizeof(struct ether_header);
3156	if (hlen < sizeof(struct ip))
3157		return;
3158	if (ntohs(ip->ip_len) < hlen)
3159		return;
3160	if (ntohs(ip->ip_len) != pktlen)
3161		return;
3162	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
3163		return;	/* can't handle fragmented packet. */
3164
3165	switch (ip->ip_p) {
3166	case IPPROTO_TCP:
3167		if (pktlen < (hlen + sizeof(struct tcphdr)))
3168			return;
3169		break;
3170	case IPPROTO_UDP:
3171		if (pktlen < (hlen + sizeof(struct udphdr)))
3172			return;
3173		uh = (struct udphdr *)((caddr_t)ip + hlen);
3174		if (uh->uh_sum == 0)
3175			return; /* no checksum */
3176		break;
3177	default:
3178		return;
3179	}
3180	csum = bswap16(sc_if->msk_csum & 0xFFFF);
3181	/* Checksum fixup for IP options. */
3182	len = hlen - sizeof(struct ip);
3183	if (len > 0) {
3184		opts = (uint16_t *)(ip + 1);
3185		for (; len > 0; len -= sizeof(uint16_t), opts++) {
3186			temp32 = csum - *opts;
3187			temp32 = (temp32 >> 16) + (temp32 & 65535);
3188			csum = temp32 & 65535;
3189		}
3190	}
3191	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
3192	m->m_pkthdr.csum_data = csum;
3193}
3194
3195static void
3196msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3197    int len)
3198{
3199	struct mbuf *m;
3200	struct ifnet *ifp;
3201	struct msk_rxdesc *rxd;
3202	int cons, rxlen;
3203
3204	ifp = sc_if->msk_ifp;
3205
3206	MSK_IF_LOCK_ASSERT(sc_if);
3207
3208	cons = sc_if->msk_cdata.msk_rx_cons;
3209	do {
3210		rxlen = status >> 16;
3211		if ((status & GMR_FS_VLAN) != 0 &&
3212		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3213			rxlen -= ETHER_VLAN_ENCAP_LEN;
3214		if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
3215			/*
3216			 * For controllers that returns bogus status code
3217			 * just do minimal check and let upper stack
3218			 * handle this frame.
3219			 */
3220			if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
3221				ifp->if_ierrors++;
3222				msk_discard_rxbuf(sc_if, cons);
3223				break;
3224			}
3225		} else if (len > sc_if->msk_framesize ||
3226		    ((status & GMR_FS_ANY_ERR) != 0) ||
3227		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3228			/* Don't count flow-control packet as errors. */
3229			if ((status & GMR_FS_GOOD_FC) == 0)
3230				ifp->if_ierrors++;
3231			msk_discard_rxbuf(sc_if, cons);
3232			break;
3233		}
3234#ifdef MSK_64BIT_DMA
3235		rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
3236		    MSK_RX_RING_CNT];
3237#else
3238		rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3239#endif
3240		m = rxd->rx_m;
3241		if (msk_newbuf(sc_if, cons) != 0) {
3242			ifp->if_iqdrops++;
3243			/* Reuse old buffer. */
3244			msk_discard_rxbuf(sc_if, cons);
3245			break;
3246		}
3247		m->m_pkthdr.rcvif = ifp;
3248		m->m_pkthdr.len = m->m_len = len;
3249#ifndef __NO_STRICT_ALIGNMENT
3250		if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3251			msk_fixup_rx(m);
3252#endif
3253		ifp->if_ipackets++;
3254		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3255			msk_rxcsum(sc_if, control, m);
3256		/* Check for VLAN tagged packets. */
3257		if ((status & GMR_FS_VLAN) != 0 &&
3258		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3259			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3260			m->m_flags |= M_VLANTAG;
3261		}
3262		MSK_IF_UNLOCK(sc_if);
3263		(*ifp->if_input)(ifp, m);
3264		MSK_IF_LOCK(sc_if);
3265	} while (0);
3266
3267	MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3268	MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3269}
3270
3271static void
3272msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3273    int len)
3274{
3275	struct mbuf *m;
3276	struct ifnet *ifp;
3277	struct msk_rxdesc *jrxd;
3278	int cons, rxlen;
3279
3280	ifp = sc_if->msk_ifp;
3281
3282	MSK_IF_LOCK_ASSERT(sc_if);
3283
3284	cons = sc_if->msk_cdata.msk_rx_cons;
3285	do {
3286		rxlen = status >> 16;
3287		if ((status & GMR_FS_VLAN) != 0 &&
3288		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3289			rxlen -= ETHER_VLAN_ENCAP_LEN;
3290		if (len > sc_if->msk_framesize ||
3291		    ((status & GMR_FS_ANY_ERR) != 0) ||
3292		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3293			/* Don't count flow-control packet as errors. */
3294			if ((status & GMR_FS_GOOD_FC) == 0)
3295				ifp->if_ierrors++;
3296			msk_discard_jumbo_rxbuf(sc_if, cons);
3297			break;
3298		}
3299#ifdef MSK_64BIT_DMA
3300		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
3301		    MSK_JUMBO_RX_RING_CNT];
3302#else
3303		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3304#endif
3305		m = jrxd->rx_m;
3306		if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3307			ifp->if_iqdrops++;
3308			/* Reuse old buffer. */
3309			msk_discard_jumbo_rxbuf(sc_if, cons);
3310			break;
3311		}
3312		m->m_pkthdr.rcvif = ifp;
3313		m->m_pkthdr.len = m->m_len = len;
3314#ifndef __NO_STRICT_ALIGNMENT
3315		if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3316			msk_fixup_rx(m);
3317#endif
3318		ifp->if_ipackets++;
3319		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3320			msk_rxcsum(sc_if, control, m);
3321		/* Check for VLAN tagged packets. */
3322		if ((status & GMR_FS_VLAN) != 0 &&
3323		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3324			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3325			m->m_flags |= M_VLANTAG;
3326		}
3327		MSK_IF_UNLOCK(sc_if);
3328		(*ifp->if_input)(ifp, m);
3329		MSK_IF_LOCK(sc_if);
3330	} while (0);
3331
3332	MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3333	MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3334}
3335
3336static void
3337msk_txeof(struct msk_if_softc *sc_if, int idx)
3338{
3339	struct msk_txdesc *txd;
3340	struct msk_tx_desc *cur_tx;
3341	struct ifnet *ifp;
3342	uint32_t control;
3343	int cons, prog;
3344
3345	MSK_IF_LOCK_ASSERT(sc_if);
3346
3347	ifp = sc_if->msk_ifp;
3348
3349	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3350	    sc_if->msk_cdata.msk_tx_ring_map,
3351	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3352	/*
3353	 * Go through our tx ring and free mbufs for those
3354	 * frames that have been sent.
3355	 */
3356	cons = sc_if->msk_cdata.msk_tx_cons;
3357	prog = 0;
3358	for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3359		if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3360			break;
3361		prog++;
3362		cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3363		control = le32toh(cur_tx->msk_control);
3364		sc_if->msk_cdata.msk_tx_cnt--;
3365		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3366		if ((control & EOP) == 0)
3367			continue;
3368		txd = &sc_if->msk_cdata.msk_txdesc[cons];
3369		bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3370		    BUS_DMASYNC_POSTWRITE);
3371		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3372
3373		ifp->if_opackets++;
3374		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3375		    __func__));
3376		m_freem(txd->tx_m);
3377		txd->tx_m = NULL;
3378	}
3379
3380	if (prog > 0) {
3381		sc_if->msk_cdata.msk_tx_cons = cons;
3382		if (sc_if->msk_cdata.msk_tx_cnt == 0)
3383			sc_if->msk_watchdog_timer = 0;
3384		/* No need to sync LEs as we didn't update LEs. */
3385	}
3386}
3387
3388static void
3389msk_tick(void *xsc_if)
3390{
3391	struct msk_if_softc *sc_if;
3392	struct mii_data *mii;
3393
3394	sc_if = xsc_if;
3395
3396	MSK_IF_LOCK_ASSERT(sc_if);
3397
3398	mii = device_get_softc(sc_if->msk_miibus);
3399
3400	mii_tick(mii);
3401	if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
3402		msk_miibus_statchg(sc_if->msk_if_dev);
3403	msk_handle_events(sc_if->msk_softc);
3404	msk_watchdog(sc_if);
3405	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3406}
3407
3408static void
3409msk_intr_phy(struct msk_if_softc *sc_if)
3410{
3411	uint16_t status;
3412
3413	msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3414	status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3415	/* Handle FIFO Underrun/Overflow? */
3416	if ((status & PHY_M_IS_FIFO_ERROR))
3417		device_printf(sc_if->msk_if_dev,
3418		    "PHY FIFO underrun/overflow.\n");
3419}
3420
3421static void
3422msk_intr_gmac(struct msk_if_softc *sc_if)
3423{
3424	struct msk_softc *sc;
3425	uint8_t status;
3426
3427	sc = sc_if->msk_softc;
3428	status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3429
3430	/* GMAC Rx FIFO overrun. */
3431	if ((status & GM_IS_RX_FF_OR) != 0)
3432		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3433		    GMF_CLI_RX_FO);
3434	/* GMAC Tx FIFO underrun. */
3435	if ((status & GM_IS_TX_FF_UR) != 0) {
3436		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3437		    GMF_CLI_TX_FU);
3438		device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3439		/*
3440		 * XXX
3441		 * In case of Tx underrun, we may need to flush/reset
3442		 * Tx MAC but that would also require resynchronization
3443		 * with status LEs. Reinitializing status LEs would
3444		 * affect other port in dual MAC configuration so it
3445		 * should be avoided as possible as we can.
3446		 * Due to lack of documentation it's all vague guess but
3447		 * it needs more investigation.
3448		 */
3449	}
3450}
3451
3452static void
3453msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3454{
3455	struct msk_softc *sc;
3456
3457	sc = sc_if->msk_softc;
3458	if ((status & Y2_IS_PAR_RD1) != 0) {
3459		device_printf(sc_if->msk_if_dev,
3460		    "RAM buffer read parity error\n");
3461		/* Clear IRQ. */
3462		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3463		    RI_CLR_RD_PERR);
3464	}
3465	if ((status & Y2_IS_PAR_WR1) != 0) {
3466		device_printf(sc_if->msk_if_dev,
3467		    "RAM buffer write parity error\n");
3468		/* Clear IRQ. */
3469		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3470		    RI_CLR_WR_PERR);
3471	}
3472	if ((status & Y2_IS_PAR_MAC1) != 0) {
3473		device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3474		/* Clear IRQ. */
3475		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3476		    GMF_CLI_TX_PE);
3477	}
3478	if ((status & Y2_IS_PAR_RX1) != 0) {
3479		device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3480		/* Clear IRQ. */
3481		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3482	}
3483	if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3484		device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3485		/* Clear IRQ. */
3486		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3487	}
3488}
3489
3490static void
3491msk_intr_hwerr(struct msk_softc *sc)
3492{
3493	uint32_t status;
3494	uint32_t tlphead[4];
3495
3496	status = CSR_READ_4(sc, B0_HWE_ISRC);
3497	/* Time Stamp timer overflow. */
3498	if ((status & Y2_IS_TIST_OV) != 0)
3499		CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3500	if ((status & Y2_IS_PCI_NEXP) != 0) {
3501		/*
3502		 * PCI Express Error occured which is not described in PEX
3503		 * spec.
3504		 * This error is also mapped either to Master Abort(
3505		 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3506		 * can only be cleared there.
3507                 */
3508		device_printf(sc->msk_dev,
3509		    "PCI Express protocol violation error\n");
3510	}
3511
3512	if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3513		uint16_t v16;
3514
3515		if ((status & Y2_IS_MST_ERR) != 0)
3516			device_printf(sc->msk_dev,
3517			    "unexpected IRQ Status error\n");
3518		else
3519			device_printf(sc->msk_dev,
3520			    "unexpected IRQ Master error\n");
3521		/* Reset all bits in the PCI status register. */
3522		v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3523		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3524		pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3525		    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3526		    PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
3527		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3528	}
3529
3530	/* Check for PCI Express Uncorrectable Error. */
3531	if ((status & Y2_IS_PCI_EXP) != 0) {
3532		uint32_t v32;
3533
3534		/*
3535		 * On PCI Express bus bridges are called root complexes (RC).
3536		 * PCI Express errors are recognized by the root complex too,
3537		 * which requests the system to handle the problem. After
3538		 * error occurrence it may be that no access to the adapter
3539		 * may be performed any longer.
3540		 */
3541
3542		v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3543		if ((v32 & PEX_UNSUP_REQ) != 0) {
3544			/* Ignore unsupported request error. */
3545			device_printf(sc->msk_dev,
3546			    "Uncorrectable PCI Express error\n");
3547		}
3548		if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3549			int i;
3550
3551			/* Get TLP header form Log Registers. */
3552			for (i = 0; i < 4; i++)
3553				tlphead[i] = CSR_PCI_READ_4(sc,
3554				    PEX_HEADER_LOG + i * 4);
3555			/* Check for vendor defined broadcast message. */
3556			if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3557				sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3558				CSR_WRITE_4(sc, B0_HWE_IMSK,
3559				    sc->msk_intrhwemask);
3560				CSR_READ_4(sc, B0_HWE_IMSK);
3561			}
3562		}
3563		/* Clear the interrupt. */
3564		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3565		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3566		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3567	}
3568
3569	if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3570		msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3571	if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3572		msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3573}
3574
3575static __inline void
3576msk_rxput(struct msk_if_softc *sc_if)
3577{
3578	struct msk_softc *sc;
3579
3580	sc = sc_if->msk_softc;
3581	if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3582		bus_dmamap_sync(
3583		    sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3584		    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3585		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3586	else
3587		bus_dmamap_sync(
3588		    sc_if->msk_cdata.msk_rx_ring_tag,
3589		    sc_if->msk_cdata.msk_rx_ring_map,
3590		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3591	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3592	    PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3593}
3594
3595static int
3596msk_handle_events(struct msk_softc *sc)
3597{
3598	struct msk_if_softc *sc_if;
3599	int rxput[2];
3600	struct msk_stat_desc *sd;
3601	uint32_t control, status;
3602	int cons, len, port, rxprog;
3603
3604	if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
3605		return (0);
3606
3607	/* Sync status LEs. */
3608	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3609	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3610
3611	rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3612	rxprog = 0;
3613	cons = sc->msk_stat_cons;
3614	for (;;) {
3615		sd = &sc->msk_stat_ring[cons];
3616		control = le32toh(sd->msk_control);
3617		if ((control & HW_OWNER) == 0)
3618			break;
3619		control &= ~HW_OWNER;
3620		sd->msk_control = htole32(control);
3621		status = le32toh(sd->msk_status);
3622		len = control & STLE_LEN_MASK;
3623		port = (control >> 16) & 0x01;
3624		sc_if = sc->msk_if[port];
3625		if (sc_if == NULL) {
3626			device_printf(sc->msk_dev, "invalid port opcode "
3627			    "0x%08x\n", control & STLE_OP_MASK);
3628			continue;
3629		}
3630
3631		switch (control & STLE_OP_MASK) {
3632		case OP_RXVLAN:
3633			sc_if->msk_vtag = ntohs(len);
3634			break;
3635		case OP_RXCHKSVLAN:
3636			sc_if->msk_vtag = ntohs(len);
3637			/* FALLTHROUGH */
3638		case OP_RXCHKS:
3639			sc_if->msk_csum = status;
3640			break;
3641		case OP_RXSTAT:
3642			if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
3643				break;
3644			if (sc_if->msk_framesize >
3645			    (MCLBYTES - MSK_RX_BUF_ALIGN))
3646				msk_jumbo_rxeof(sc_if, status, control, len);
3647			else
3648				msk_rxeof(sc_if, status, control, len);
3649			rxprog++;
3650			/*
3651			 * Because there is no way to sync single Rx LE
3652			 * put the DMA sync operation off until the end of
3653			 * event processing.
3654			 */
3655			rxput[port]++;
3656			/* Update prefetch unit if we've passed water mark. */
3657			if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3658				msk_rxput(sc_if);
3659				rxput[port] = 0;
3660			}
3661			break;
3662		case OP_TXINDEXLE:
3663			if (sc->msk_if[MSK_PORT_A] != NULL)
3664				msk_txeof(sc->msk_if[MSK_PORT_A],
3665				    status & STLE_TXA1_MSKL);
3666			if (sc->msk_if[MSK_PORT_B] != NULL)
3667				msk_txeof(sc->msk_if[MSK_PORT_B],
3668				    ((status & STLE_TXA2_MSKL) >>
3669				    STLE_TXA2_SHIFTL) |
3670				    ((len & STLE_TXA2_MSKH) <<
3671				    STLE_TXA2_SHIFTH));
3672			break;
3673		default:
3674			device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3675			    control & STLE_OP_MASK);
3676			break;
3677		}
3678		MSK_INC(cons, sc->msk_stat_count);
3679		if (rxprog > sc->msk_process_limit)
3680			break;
3681	}
3682
3683	sc->msk_stat_cons = cons;
3684	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3685	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3686
3687	if (rxput[MSK_PORT_A] > 0)
3688		msk_rxput(sc->msk_if[MSK_PORT_A]);
3689	if (rxput[MSK_PORT_B] > 0)
3690		msk_rxput(sc->msk_if[MSK_PORT_B]);
3691
3692	return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3693}
3694
3695static void
3696msk_intr(void *xsc)
3697{
3698	struct msk_softc *sc;
3699	struct msk_if_softc *sc_if0, *sc_if1;
3700	struct ifnet *ifp0, *ifp1;
3701	uint32_t status;
3702	int domore;
3703
3704	sc = xsc;
3705	MSK_LOCK(sc);
3706
3707	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3708	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3709	if (status == 0 || status == 0xffffffff ||
3710	    (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3711	    (status & sc->msk_intrmask) == 0) {
3712		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3713		MSK_UNLOCK(sc);
3714		return;
3715	}
3716
3717	sc_if0 = sc->msk_if[MSK_PORT_A];
3718	sc_if1 = sc->msk_if[MSK_PORT_B];
3719	ifp0 = ifp1 = NULL;
3720	if (sc_if0 != NULL)
3721		ifp0 = sc_if0->msk_ifp;
3722	if (sc_if1 != NULL)
3723		ifp1 = sc_if1->msk_ifp;
3724
3725	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3726		msk_intr_phy(sc_if0);
3727	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3728		msk_intr_phy(sc_if1);
3729	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3730		msk_intr_gmac(sc_if0);
3731	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3732		msk_intr_gmac(sc_if1);
3733	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3734		device_printf(sc->msk_dev, "Rx descriptor error\n");
3735		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3736		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3737		CSR_READ_4(sc, B0_IMSK);
3738	}
3739        if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3740		device_printf(sc->msk_dev, "Tx descriptor error\n");
3741		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3742		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3743		CSR_READ_4(sc, B0_IMSK);
3744	}
3745	if ((status & Y2_IS_HW_ERR) != 0)
3746		msk_intr_hwerr(sc);
3747
3748	domore = msk_handle_events(sc);
3749	if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
3750		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3751
3752	/* Reenable interrupts. */
3753	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3754
3755	if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3756	    !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3757		msk_start_locked(ifp0);
3758	if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3759	    !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3760		msk_start_locked(ifp1);
3761
3762	MSK_UNLOCK(sc);
3763}
3764
3765static void
3766msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3767{
3768	struct msk_softc *sc;
3769	struct ifnet *ifp;
3770
3771	ifp = sc_if->msk_ifp;
3772	sc = sc_if->msk_softc;
3773	if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
3774	    sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
3775	    sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
3776		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3777		    TX_STFW_ENA);
3778	} else {
3779		if (ifp->if_mtu > ETHERMTU) {
3780			/* Set Tx GMAC FIFO Almost Empty Threshold. */
3781			CSR_WRITE_4(sc,
3782			    MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3783			    MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3784			/* Disable Store & Forward mode for Tx. */
3785			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3786			    TX_STFW_DIS);
3787		} else {
3788			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3789			    TX_STFW_ENA);
3790		}
3791	}
3792}
3793
3794static void
3795msk_init(void *xsc)
3796{
3797	struct msk_if_softc *sc_if = xsc;
3798
3799	MSK_IF_LOCK(sc_if);
3800	msk_init_locked(sc_if);
3801	MSK_IF_UNLOCK(sc_if);
3802}
3803
3804static void
3805msk_init_locked(struct msk_if_softc *sc_if)
3806{
3807	struct msk_softc *sc;
3808	struct ifnet *ifp;
3809	struct mii_data	 *mii;
3810	uint8_t *eaddr;
3811	uint16_t gmac;
3812	uint32_t reg;
3813	int error;
3814
3815	MSK_IF_LOCK_ASSERT(sc_if);
3816
3817	ifp = sc_if->msk_ifp;
3818	sc = sc_if->msk_softc;
3819	mii = device_get_softc(sc_if->msk_miibus);
3820
3821	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3822		return;
3823
3824	error = 0;
3825	/* Cancel pending I/O and free all Rx/Tx buffers. */
3826	msk_stop(sc_if);
3827
3828	if (ifp->if_mtu < ETHERMTU)
3829		sc_if->msk_framesize = ETHERMTU;
3830	else
3831		sc_if->msk_framesize = ifp->if_mtu;
3832	sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3833	if (ifp->if_mtu > ETHERMTU &&
3834	    (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3835		ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3836		ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3837	}
3838
3839	/* GMAC Control reset. */
3840	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3841	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3842	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3843	if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3844	    sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
3845		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3846		    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3847		    GMC_BYP_RETR_ON);
3848
3849	/*
3850	 * Initialize GMAC first such that speed/duplex/flow-control
3851	 * parameters are renegotiated when interface is brought up.
3852	 */
3853	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3854
3855	/* Dummy read the Interrupt Source Register. */
3856	CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3857
3858	/* Clear MIB stats. */
3859	msk_stats_clear(sc_if);
3860
3861	/* Disable FCS. */
3862	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3863
3864	/* Setup Transmit Control Register. */
3865	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3866
3867	/* Setup Transmit Flow Control Register. */
3868	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3869
3870	/* Setup Transmit Parameter Register. */
3871	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3872	    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3873	    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3874
3875	gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3876	    GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3877
3878	if (ifp->if_mtu > ETHERMTU)
3879		gmac |= GM_SMOD_JUMBO_ENA;
3880	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3881
3882	/* Set station address. */
3883	eaddr = IF_LLADDR(ifp);
3884	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
3885	    eaddr[0] | (eaddr[1] << 8));
3886	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
3887	    eaddr[2] | (eaddr[3] << 8));
3888	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
3889	    eaddr[4] | (eaddr[5] << 8));
3890	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
3891	    eaddr[0] | (eaddr[1] << 8));
3892	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
3893	    eaddr[2] | (eaddr[3] << 8));
3894	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
3895	    eaddr[4] | (eaddr[5] << 8));
3896
3897	/* Disable interrupts for counter overflows. */
3898	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3899	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3900	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3901
3902	/* Configure Rx MAC FIFO. */
3903	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3904	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3905	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3906	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3907	    sc->msk_hw_id == CHIP_ID_YUKON_EX)
3908		reg |= GMF_RX_OVER_ON;
3909	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3910
3911	/* Set receive filter. */
3912	msk_rxfilter(sc_if);
3913
3914	if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3915		/* Clear flush mask - HW bug. */
3916		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3917	} else {
3918		/* Flush Rx MAC FIFO on any flow control or error. */
3919		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3920		    GMR_FS_ANY_ERR);
3921	}
3922
3923	/*
3924	 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3925	 * due to hardware hang on receipt of pause frames.
3926	 */
3927	reg = RX_GMF_FL_THR_DEF + 1;
3928	/* Another magic for Yukon FE+ - From Linux. */
3929	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3930	    sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3931		reg = 0x178;
3932	CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3933
3934	/* Configure Tx MAC FIFO. */
3935	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3936	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3937	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3938
3939	/* Configure hardware VLAN tag insertion/stripping. */
3940	msk_setvlan(sc_if, ifp);
3941
3942	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3943		/* Set Rx Pause threshold. */
3944		CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3945		    MSK_ECU_LLPP);
3946		CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3947		    MSK_ECU_ULPP);
3948		/* Configure store-and-forward for Tx. */
3949		msk_set_tx_stfwd(sc_if);
3950	}
3951
3952	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3953	    sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3954		/* Disable dynamic watermark - from Linux. */
3955		reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3956		reg &= ~0x03;
3957		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3958	}
3959
3960	/*
3961	 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3962	 * arbiter as we don't use Sync Tx queue.
3963	 */
3964	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3965	    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3966	/* Enable the RAM Interface Arbiter. */
3967	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3968
3969	/* Setup RAM buffer. */
3970	msk_set_rambuffer(sc_if);
3971
3972	/* Disable Tx sync Queue. */
3973	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3974
3975	/* Setup Tx Queue Bus Memory Interface. */
3976	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3977	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3978	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3979	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3980	switch (sc->msk_hw_id) {
3981	case CHIP_ID_YUKON_EC_U:
3982		if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3983			/* Fix for Yukon-EC Ultra: set BMU FIFO level */
3984			CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3985			    MSK_ECU_TXFF_LEV);
3986		}
3987		break;
3988	case CHIP_ID_YUKON_EX:
3989		/*
3990		 * Yukon Extreme seems to have silicon bug for
3991		 * automatic Tx checksum calculation capability.
3992		 */
3993		if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
3994			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3995			    F_TX_CHK_AUTO_OFF);
3996		break;
3997	}
3998
3999	/* Setup Rx Queue Bus Memory Interface. */
4000	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
4001	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
4002	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
4003	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
4004        if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
4005	    sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
4006		/* MAC Rx RAM Read is controlled by hardware. */
4007                CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
4008	}
4009
4010	msk_set_prefetch(sc, sc_if->msk_txq,
4011	    sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
4012	msk_init_tx_ring(sc_if);
4013
4014	/* Disable Rx checksum offload and RSS hash. */
4015	reg = BMU_DIS_RX_RSS_HASH;
4016	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
4017	    (ifp->if_capenable & IFCAP_RXCSUM) != 0)
4018		reg |= BMU_ENA_RX_CHKSUM;
4019	else
4020		reg |= BMU_DIS_RX_CHKSUM;
4021	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
4022	if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
4023		msk_set_prefetch(sc, sc_if->msk_rxq,
4024		    sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
4025		    MSK_JUMBO_RX_RING_CNT - 1);
4026		error = msk_init_jumbo_rx_ring(sc_if);
4027	 } else {
4028		msk_set_prefetch(sc, sc_if->msk_rxq,
4029		    sc_if->msk_rdata.msk_rx_ring_paddr,
4030		    MSK_RX_RING_CNT - 1);
4031		error = msk_init_rx_ring(sc_if);
4032	}
4033	if (error != 0) {
4034		device_printf(sc_if->msk_if_dev,
4035		    "initialization failed: no memory for Rx buffers\n");
4036		msk_stop(sc_if);
4037		return;
4038	}
4039	if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
4040	    sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
4041		/* Disable flushing of non-ASF packets. */
4042		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
4043		    GMF_RX_MACSEC_FLUSH_OFF);
4044	}
4045
4046	/* Configure interrupt handling. */
4047	if (sc_if->msk_port == MSK_PORT_A) {
4048		sc->msk_intrmask |= Y2_IS_PORT_A;
4049		sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
4050	} else {
4051		sc->msk_intrmask |= Y2_IS_PORT_B;
4052		sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
4053	}
4054	/* Configure IRQ moderation mask. */
4055	CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
4056	if (sc->msk_int_holdoff > 0) {
4057		/* Configure initial IRQ moderation timer value. */
4058		CSR_WRITE_4(sc, B2_IRQM_INI,
4059		    MSK_USECS(sc, sc->msk_int_holdoff));
4060		CSR_WRITE_4(sc, B2_IRQM_VAL,
4061		    MSK_USECS(sc, sc->msk_int_holdoff));
4062		/* Start IRQ moderation. */
4063		CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
4064	}
4065	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4066	CSR_READ_4(sc, B0_HWE_IMSK);
4067	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4068	CSR_READ_4(sc, B0_IMSK);
4069
4070	ifp->if_drv_flags |= IFF_DRV_RUNNING;
4071	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4072
4073	sc_if->msk_flags &= ~MSK_FLAG_LINK;
4074	mii_mediachg(mii);
4075
4076	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
4077}
4078
4079static void
4080msk_set_rambuffer(struct msk_if_softc *sc_if)
4081{
4082	struct msk_softc *sc;
4083	int ltpp, utpp;
4084
4085	sc = sc_if->msk_softc;
4086	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
4087		return;
4088
4089	/* Setup Rx Queue. */
4090	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
4091	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
4092	    sc->msk_rxqstart[sc_if->msk_port] / 8);
4093	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
4094	    sc->msk_rxqend[sc_if->msk_port] / 8);
4095	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
4096	    sc->msk_rxqstart[sc_if->msk_port] / 8);
4097	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
4098	    sc->msk_rxqstart[sc_if->msk_port] / 8);
4099
4100	utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4101	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
4102	ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4103	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
4104	if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
4105		ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
4106	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
4107	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
4108	/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
4109
4110	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
4111	CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
4112
4113	/* Setup Tx Queue. */
4114	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
4115	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
4116	    sc->msk_txqstart[sc_if->msk_port] / 8);
4117	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
4118	    sc->msk_txqend[sc_if->msk_port] / 8);
4119	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
4120	    sc->msk_txqstart[sc_if->msk_port] / 8);
4121	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
4122	    sc->msk_txqstart[sc_if->msk_port] / 8);
4123	/* Enable Store & Forward for Tx side. */
4124	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
4125	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
4126	CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
4127}
4128
4129static void
4130msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
4131    uint32_t count)
4132{
4133
4134	/* Reset the prefetch unit. */
4135	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4136	    PREF_UNIT_RST_SET);
4137	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4138	    PREF_UNIT_RST_CLR);
4139	/* Set LE base address. */
4140	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
4141	    MSK_ADDR_LO(addr));
4142	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
4143	    MSK_ADDR_HI(addr));
4144	/* Set the list last index. */
4145	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
4146	    count);
4147	/* Turn on prefetch unit. */
4148	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4149	    PREF_UNIT_OP_ON);
4150	/* Dummy read to ensure write. */
4151	CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
4152}
4153
4154static void
4155msk_stop(struct msk_if_softc *sc_if)
4156{
4157	struct msk_softc *sc;
4158	struct msk_txdesc *txd;
4159	struct msk_rxdesc *rxd;
4160	struct msk_rxdesc *jrxd;
4161	struct ifnet *ifp;
4162	uint32_t val;
4163	int i;
4164
4165	MSK_IF_LOCK_ASSERT(sc_if);
4166	sc = sc_if->msk_softc;
4167	ifp = sc_if->msk_ifp;
4168
4169	callout_stop(&sc_if->msk_tick_ch);
4170	sc_if->msk_watchdog_timer = 0;
4171
4172	/* Disable interrupts. */
4173	if (sc_if->msk_port == MSK_PORT_A) {
4174		sc->msk_intrmask &= ~Y2_IS_PORT_A;
4175		sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
4176	} else {
4177		sc->msk_intrmask &= ~Y2_IS_PORT_B;
4178		sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
4179	}
4180	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4181	CSR_READ_4(sc, B0_HWE_IMSK);
4182	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4183	CSR_READ_4(sc, B0_IMSK);
4184
4185	/* Disable Tx/Rx MAC. */
4186	val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4187	val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
4188	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
4189	/* Read again to ensure writing. */
4190	GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4191	/* Update stats and clear counters. */
4192	msk_stats_update(sc_if);
4193
4194	/* Stop Tx BMU. */
4195	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
4196	val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4197	for (i = 0; i < MSK_TIMEOUT; i++) {
4198		if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
4199			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4200			    BMU_STOP);
4201			val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4202		} else
4203			break;
4204		DELAY(1);
4205	}
4206	if (i == MSK_TIMEOUT)
4207		device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
4208	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
4209	    RB_RST_SET | RB_DIS_OP_MD);
4210
4211	/* Disable all GMAC interrupt. */
4212	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
4213	/* Disable PHY interrupt. */
4214	msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
4215
4216	/* Disable the RAM Interface Arbiter. */
4217	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
4218
4219	/* Reset the PCI FIFO of the async Tx queue */
4220	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4221	    BMU_RST_SET | BMU_FIFO_RST);
4222
4223	/* Reset the Tx prefetch units. */
4224	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
4225	    PREF_UNIT_RST_SET);
4226
4227	/* Reset the RAM Buffer async Tx queue. */
4228	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
4229
4230	/* Reset Tx MAC FIFO. */
4231	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
4232	/* Set Pause Off. */
4233	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
4234
4235	/*
4236	 * The Rx Stop command will not work for Yukon-2 if the BMU does not
4237	 * reach the end of packet and since we can't make sure that we have
4238	 * incoming data, we must reset the BMU while it is not during a DMA
4239	 * transfer. Since it is possible that the Rx path is still active,
4240	 * the Rx RAM buffer will be stopped first, so any possible incoming
4241	 * data will not trigger a DMA. After the RAM buffer is stopped, the
4242	 * BMU is polled until any DMA in progress is ended and only then it
4243	 * will be reset.
4244	 */
4245
4246	/* Disable the RAM Buffer receive queue. */
4247	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4248	for (i = 0; i < MSK_TIMEOUT; i++) {
4249		if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4250		    CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4251			break;
4252		DELAY(1);
4253	}
4254	if (i == MSK_TIMEOUT)
4255		device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4256	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4257	    BMU_RST_SET | BMU_FIFO_RST);
4258	/* Reset the Rx prefetch unit. */
4259	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4260	    PREF_UNIT_RST_SET);
4261	/* Reset the RAM Buffer receive queue. */
4262	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4263	/* Reset Rx MAC FIFO. */
4264	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4265
4266	/* Free Rx and Tx mbufs still in the queues. */
4267	for (i = 0; i < MSK_RX_RING_CNT; i++) {
4268		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4269		if (rxd->rx_m != NULL) {
4270			bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4271			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4272			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4273			    rxd->rx_dmamap);
4274			m_freem(rxd->rx_m);
4275			rxd->rx_m = NULL;
4276		}
4277	}
4278	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4279		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4280		if (jrxd->rx_m != NULL) {
4281			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4282			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4283			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4284			    jrxd->rx_dmamap);
4285			m_freem(jrxd->rx_m);
4286			jrxd->rx_m = NULL;
4287		}
4288	}
4289	for (i = 0; i < MSK_TX_RING_CNT; i++) {
4290		txd = &sc_if->msk_cdata.msk_txdesc[i];
4291		if (txd->tx_m != NULL) {
4292			bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4293			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4294			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4295			    txd->tx_dmamap);
4296			m_freem(txd->tx_m);
4297			txd->tx_m = NULL;
4298		}
4299	}
4300
4301	/*
4302	 * Mark the interface down.
4303	 */
4304	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4305	sc_if->msk_flags &= ~MSK_FLAG_LINK;
4306}
4307
4308/*
4309 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
4310 * counter clears high 16 bits of the counter such that accessing
4311 * lower 16 bits should be the last operation.
4312 */
4313#define	MSK_READ_MIB32(x, y)					\
4314	(((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) +	\
4315	(uint32_t)GMAC_READ_2(sc, x, y)
4316#define	MSK_READ_MIB64(x, y)					\
4317	(((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) +	\
4318	(uint64_t)MSK_READ_MIB32(x, y)
4319
4320static void
4321msk_stats_clear(struct msk_if_softc *sc_if)
4322{
4323	struct msk_softc *sc;
4324	uint32_t reg;
4325	uint16_t gmac;
4326	int i;
4327
4328	MSK_IF_LOCK_ASSERT(sc_if);
4329
4330	sc = sc_if->msk_softc;
4331	/* Set MIB Clear Counter Mode. */
4332	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4333	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4334	/* Read all MIB Counters with Clear Mode set. */
4335	for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
4336		reg = MSK_READ_MIB32(sc_if->msk_port, i);
4337	/* Clear MIB Clear Counter Mode. */
4338	gmac &= ~GM_PAR_MIB_CLR;
4339	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4340}
4341
4342static void
4343msk_stats_update(struct msk_if_softc *sc_if)
4344{
4345	struct msk_softc *sc;
4346	struct ifnet *ifp;
4347	struct msk_hw_stats *stats;
4348	uint16_t gmac;
4349	uint32_t reg;
4350
4351	MSK_IF_LOCK_ASSERT(sc_if);
4352
4353	ifp = sc_if->msk_ifp;
4354	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4355		return;
4356	sc = sc_if->msk_softc;
4357	stats = &sc_if->msk_stats;
4358	/* Set MIB Clear Counter Mode. */
4359	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4360	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4361
4362	/* Rx stats. */
4363	stats->rx_ucast_frames +=
4364	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4365	stats->rx_bcast_frames +=
4366	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4367	stats->rx_pause_frames +=
4368	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4369	stats->rx_mcast_frames +=
4370	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4371	stats->rx_crc_errs +=
4372	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4373	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4374	stats->rx_good_octets +=
4375	    MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4376	stats->rx_bad_octets +=
4377	    MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4378	stats->rx_runts +=
4379	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4380	stats->rx_runt_errs +=
4381	    MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4382	stats->rx_pkts_64 +=
4383	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4384	stats->rx_pkts_65_127 +=
4385	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4386	stats->rx_pkts_128_255 +=
4387	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4388	stats->rx_pkts_256_511 +=
4389	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4390	stats->rx_pkts_512_1023 +=
4391	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4392	stats->rx_pkts_1024_1518 +=
4393	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4394	stats->rx_pkts_1519_max +=
4395	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4396	stats->rx_pkts_too_long +=
4397	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4398	stats->rx_pkts_jabbers +=
4399	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4400	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4401	stats->rx_fifo_oflows +=
4402	    MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4403	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4404
4405	/* Tx stats. */
4406	stats->tx_ucast_frames +=
4407	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4408	stats->tx_bcast_frames +=
4409	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4410	stats->tx_pause_frames +=
4411	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4412	stats->tx_mcast_frames +=
4413	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4414	stats->tx_octets +=
4415	    MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4416	stats->tx_pkts_64 +=
4417	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4418	stats->tx_pkts_65_127 +=
4419	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4420	stats->tx_pkts_128_255 +=
4421	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4422	stats->tx_pkts_256_511 +=
4423	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4424	stats->tx_pkts_512_1023 +=
4425	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4426	stats->tx_pkts_1024_1518 +=
4427	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4428	stats->tx_pkts_1519_max +=
4429	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4430	reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4431	stats->tx_colls +=
4432	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4433	stats->tx_late_colls +=
4434	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4435	stats->tx_excess_colls +=
4436	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4437	stats->tx_multi_colls +=
4438	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4439	stats->tx_single_colls +=
4440	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4441	stats->tx_underflows +=
4442	    MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4443	/* Clear MIB Clear Counter Mode. */
4444	gmac &= ~GM_PAR_MIB_CLR;
4445	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4446}
4447
4448static int
4449msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4450{
4451	struct msk_softc *sc;
4452	struct msk_if_softc *sc_if;
4453	uint32_t result, *stat;
4454	int off;
4455
4456	sc_if = (struct msk_if_softc *)arg1;
4457	sc = sc_if->msk_softc;
4458	off = arg2;
4459	stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4460
4461	MSK_IF_LOCK(sc_if);
4462	result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4463	result += *stat;
4464	MSK_IF_UNLOCK(sc_if);
4465
4466	return (sysctl_handle_int(oidp, &result, 0, req));
4467}
4468
4469static int
4470msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4471{
4472	struct msk_softc *sc;
4473	struct msk_if_softc *sc_if;
4474	uint64_t result, *stat;
4475	int off;
4476
4477	sc_if = (struct msk_if_softc *)arg1;
4478	sc = sc_if->msk_softc;
4479	off = arg2;
4480	stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4481
4482	MSK_IF_LOCK(sc_if);
4483	result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4484	result += *stat;
4485	MSK_IF_UNLOCK(sc_if);
4486
4487	return (sysctl_handle_64(oidp, &result, 0, req));
4488}
4489
4490#undef MSK_READ_MIB32
4491#undef MSK_READ_MIB64
4492
4493#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) 				\
4494	SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, 	\
4495	    sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32,	\
4496	    "IU", d)
4497#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) 				\
4498	SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD, 	\
4499	    sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64,	\
4500	    "QU", d)
4501
4502static void
4503msk_sysctl_node(struct msk_if_softc *sc_if)
4504{
4505	struct sysctl_ctx_list *ctx;
4506	struct sysctl_oid_list *child, *schild;
4507	struct sysctl_oid *tree;
4508
4509	ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4510	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4511
4512	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4513	    NULL, "MSK Statistics");
4514	schild = child = SYSCTL_CHILDREN(tree);
4515	tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4516	    NULL, "MSK RX Statistics");
4517	child = SYSCTL_CHILDREN(tree);
4518	MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4519	    child, rx_ucast_frames, "Good unicast frames");
4520	MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4521	    child, rx_bcast_frames, "Good broadcast frames");
4522	MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4523	    child, rx_pause_frames, "Pause frames");
4524	MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4525	    child, rx_mcast_frames, "Multicast frames");
4526	MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4527	    child, rx_crc_errs, "CRC errors");
4528	MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4529	    child, rx_good_octets, "Good octets");
4530	MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4531	    child, rx_bad_octets, "Bad octets");
4532	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4533	    child, rx_pkts_64, "64 bytes frames");
4534	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4535	    child, rx_pkts_65_127, "65 to 127 bytes frames");
4536	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4537	    child, rx_pkts_128_255, "128 to 255 bytes frames");
4538	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4539	    child, rx_pkts_256_511, "256 to 511 bytes frames");
4540	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4541	    child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4542	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4543	    child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4544	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4545	    child, rx_pkts_1519_max, "1519 to max frames");
4546	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4547	    child, rx_pkts_too_long, "frames too long");
4548	MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4549	    child, rx_pkts_jabbers, "Jabber errors");
4550	MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4551	    child, rx_fifo_oflows, "FIFO overflows");
4552
4553	tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4554	    NULL, "MSK TX Statistics");
4555	child = SYSCTL_CHILDREN(tree);
4556	MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4557	    child, tx_ucast_frames, "Unicast frames");
4558	MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4559	    child, tx_bcast_frames, "Broadcast frames");
4560	MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4561	    child, tx_pause_frames, "Pause frames");
4562	MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4563	    child, tx_mcast_frames, "Multicast frames");
4564	MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4565	    child, tx_octets, "Octets");
4566	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4567	    child, tx_pkts_64, "64 bytes frames");
4568	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4569	    child, tx_pkts_65_127, "65 to 127 bytes frames");
4570	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4571	    child, tx_pkts_128_255, "128 to 255 bytes frames");
4572	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4573	    child, tx_pkts_256_511, "256 to 511 bytes frames");
4574	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4575	    child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4576	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4577	    child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4578	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4579	    child, tx_pkts_1519_max, "1519 to max frames");
4580	MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4581	    child, tx_colls, "Collisions");
4582	MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4583	    child, tx_late_colls, "Late collisions");
4584	MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4585	    child, tx_excess_colls, "Excessive collisions");
4586	MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4587	    child, tx_multi_colls, "Multiple collisions");
4588	MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4589	    child, tx_single_colls, "Single collisions");
4590	MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4591	    child, tx_underflows, "FIFO underflows");
4592}
4593
4594#undef MSK_SYSCTL_STAT32
4595#undef MSK_SYSCTL_STAT64
4596
4597static int
4598sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4599{
4600	int error, value;
4601
4602	if (!arg1)
4603		return (EINVAL);
4604	value = *(int *)arg1;
4605	error = sysctl_handle_int(oidp, &value, 0, req);
4606	if (error || !req->newptr)
4607		return (error);
4608	if (value < low || value > high)
4609		return (EINVAL);
4610	*(int *)arg1 = value;
4611
4612	return (0);
4613}
4614
4615static int
4616sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4617{
4618
4619	return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4620	    MSK_PROC_MAX));
4621}
4622