1/******************************************************************************
2 *
3 * Name   : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date   : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 *	LICENSE:
14 *	Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 *	The computer program files contained in this folder ("Files")
17 *	are provided to you under the BSD-type license terms provided
18 *	below, and any use of such Files and any derivative works
19 *	thereof created by you shall be governed by the following terms
20 *	and conditions:
21 *
22 *	- Redistributions of source code must retain the above copyright
23 *	  notice, this list of conditions and the following disclaimer.
24 *	- Redistributions in binary form must reproduce the above
25 *	  copyright notice, this list of conditions and the following
26 *	  disclaimer in the documentation and/or other materials provided
27 *	  with the distribution.
28 *	- Neither the name of Marvell nor the names of its contributors
29 *	  may be used to endorse or promote products derived from this
30 *	  software without specific prior written permission.
31 *
32 *	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 *	"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 *	LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 *	FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 *	COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 *	INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 *	BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
39 *	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 *	HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 *	STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 *	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 *	OF THE POSSIBILITY OF SUCH DAMAGE.
44 *	/LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * SPDX-License-Identifier: BSD-4-Clause AND BSD-3-Clause
50 *
51 * Copyright (c) 1997, 1998, 1999, 2000
52 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 *    notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 *    notice, this list of conditions and the following disclaimer in the
61 *    documentation and/or other materials provided with the distribution.
62 * 3. All advertising materials mentioning features or use of this software
63 *    must display the following acknowledgement:
64 *	This product includes software developed by Bill Paul.
65 * 4. Neither the name of the author nor the names of any co-contributors
66 *    may be used to endorse or promote products derived from this software
67 *    without specific prior written permission.
68 *
69 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
70 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
71 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
72 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
73 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
74 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
75 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
76 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
77 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
78 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
79 * THE POSSIBILITY OF SUCH DAMAGE.
80 */
81/*-
82 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
83 *
84 * Permission to use, copy, modify, and distribute this software for any
85 * purpose with or without fee is hereby granted, provided that the above
86 * copyright notice and this permission notice appear in all copies.
87 *
88 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
89 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
90 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
91 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
92 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
93 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
94 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
95 */
96
97/*
98 * Device driver for the Marvell Yukon II Ethernet controller.
99 * Due to lack of documentation, this driver is based on the code from
100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
101 */
102
103#include <sys/cdefs.h>
104__FBSDID("$FreeBSD: releng/12.0/sys/dev/msk/if_msk.c 333813 2018-05-18 20:13:34Z mmacy $");
105
106#include <sys/param.h>
107#include <sys/systm.h>
108#include <sys/bus.h>
109#include <sys/endian.h>
110#include <sys/mbuf.h>
111#include <sys/malloc.h>
112#include <sys/kernel.h>
113#include <sys/module.h>
114#include <sys/socket.h>
115#include <sys/sockio.h>
116#include <sys/queue.h>
117#include <sys/sysctl.h>
118
119#include <net/bpf.h>
120#include <net/ethernet.h>
121#include <net/if.h>
122#include <net/if_var.h>
123#include <net/if_arp.h>
124#include <net/if_dl.h>
125#include <net/if_media.h>
126#include <net/if_types.h>
127#include <net/if_vlan_var.h>
128
129#include <netinet/in.h>
130#include <netinet/in_systm.h>
131#include <netinet/ip.h>
132#include <netinet/tcp.h>
133#include <netinet/udp.h>
134
135#include <machine/bus.h>
136#include <machine/in_cksum.h>
137#include <machine/resource.h>
138#include <sys/rman.h>
139
140#include <dev/mii/mii.h>
141#include <dev/mii/miivar.h>
142
143#include <dev/pci/pcireg.h>
144#include <dev/pci/pcivar.h>
145
146#include <dev/msk/if_mskreg.h>
147
148MODULE_DEPEND(msk, pci, 1, 1, 1);
149MODULE_DEPEND(msk, ether, 1, 1, 1);
150MODULE_DEPEND(msk, miibus, 1, 1, 1);
151
152/* "device miibus" required.  See GENERIC if you get errors here. */
153#include "miibus_if.h"
154
155/* Tunables. */
156static int msi_disable = 0;
157TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
158static int legacy_intr = 0;
159TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
160static int jumbo_disable = 0;
161TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
162
163#define MSK_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
164
165/*
166 * Devices supported by this driver.
167 */
168static const struct msk_product {
169	uint16_t	msk_vendorid;
170	uint16_t	msk_deviceid;
171	const char	*msk_name;
172} msk_products[] = {
173	{ VENDORID_SK, DEVICEID_SK_YUKON2,
174	    "SK-9Sxx Gigabit Ethernet" },
175	{ VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
176	    "SK-9Exx Gigabit Ethernet"},
177	{ VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
178	    "Marvell Yukon 88E8021CU Gigabit Ethernet" },
179	{ VENDORID_MARVELL, DEVICEID_MRVL_8021X,
180	    "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
181	{ VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
182	    "Marvell Yukon 88E8022CU Gigabit Ethernet" },
183	{ VENDORID_MARVELL, DEVICEID_MRVL_8022X,
184	    "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
185	{ VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
186	    "Marvell Yukon 88E8061CU Gigabit Ethernet" },
187	{ VENDORID_MARVELL, DEVICEID_MRVL_8061X,
188	    "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
189	{ VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
190	    "Marvell Yukon 88E8062CU Gigabit Ethernet" },
191	{ VENDORID_MARVELL, DEVICEID_MRVL_8062X,
192	    "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
193	{ VENDORID_MARVELL, DEVICEID_MRVL_8035,
194	    "Marvell Yukon 88E8035 Fast Ethernet" },
195	{ VENDORID_MARVELL, DEVICEID_MRVL_8036,
196	    "Marvell Yukon 88E8036 Fast Ethernet" },
197	{ VENDORID_MARVELL, DEVICEID_MRVL_8038,
198	    "Marvell Yukon 88E8038 Fast Ethernet" },
199	{ VENDORID_MARVELL, DEVICEID_MRVL_8039,
200	    "Marvell Yukon 88E8039 Fast Ethernet" },
201	{ VENDORID_MARVELL, DEVICEID_MRVL_8040,
202	    "Marvell Yukon 88E8040 Fast Ethernet" },
203	{ VENDORID_MARVELL, DEVICEID_MRVL_8040T,
204	    "Marvell Yukon 88E8040T Fast Ethernet" },
205	{ VENDORID_MARVELL, DEVICEID_MRVL_8042,
206	    "Marvell Yukon 88E8042 Fast Ethernet" },
207	{ VENDORID_MARVELL, DEVICEID_MRVL_8048,
208	    "Marvell Yukon 88E8048 Fast Ethernet" },
209	{ VENDORID_MARVELL, DEVICEID_MRVL_4361,
210	    "Marvell Yukon 88E8050 Gigabit Ethernet" },
211	{ VENDORID_MARVELL, DEVICEID_MRVL_4360,
212	    "Marvell Yukon 88E8052 Gigabit Ethernet" },
213	{ VENDORID_MARVELL, DEVICEID_MRVL_4362,
214	    "Marvell Yukon 88E8053 Gigabit Ethernet" },
215	{ VENDORID_MARVELL, DEVICEID_MRVL_4363,
216	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
217	{ VENDORID_MARVELL, DEVICEID_MRVL_4364,
218	    "Marvell Yukon 88E8056 Gigabit Ethernet" },
219	{ VENDORID_MARVELL, DEVICEID_MRVL_4365,
220	    "Marvell Yukon 88E8070 Gigabit Ethernet" },
221	{ VENDORID_MARVELL, DEVICEID_MRVL_436A,
222	    "Marvell Yukon 88E8058 Gigabit Ethernet" },
223	{ VENDORID_MARVELL, DEVICEID_MRVL_436B,
224	    "Marvell Yukon 88E8071 Gigabit Ethernet" },
225	{ VENDORID_MARVELL, DEVICEID_MRVL_436C,
226	    "Marvell Yukon 88E8072 Gigabit Ethernet" },
227	{ VENDORID_MARVELL, DEVICEID_MRVL_436D,
228	    "Marvell Yukon 88E8055 Gigabit Ethernet" },
229	{ VENDORID_MARVELL, DEVICEID_MRVL_4370,
230	    "Marvell Yukon 88E8075 Gigabit Ethernet" },
231	{ VENDORID_MARVELL, DEVICEID_MRVL_4380,
232	    "Marvell Yukon 88E8057 Gigabit Ethernet" },
233	{ VENDORID_MARVELL, DEVICEID_MRVL_4381,
234	    "Marvell Yukon 88E8059 Gigabit Ethernet" },
235	{ VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
236	    "D-Link 550SX Gigabit Ethernet" },
237	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
238	    "D-Link 560SX Gigabit Ethernet" },
239	{ VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
240	    "D-Link 560T Gigabit Ethernet" }
241};
242
243static const char *model_name[] = {
244	"Yukon XL",
245        "Yukon EC Ultra",
246        "Yukon EX",
247        "Yukon EC",
248        "Yukon FE",
249        "Yukon FE+",
250        "Yukon Supreme",
251        "Yukon Ultra 2",
252        "Yukon Unknown",
253        "Yukon Optima",
254};
255
256static int mskc_probe(device_t);
257static int mskc_attach(device_t);
258static int mskc_detach(device_t);
259static int mskc_shutdown(device_t);
260static int mskc_setup_rambuffer(struct msk_softc *);
261static int mskc_suspend(device_t);
262static int mskc_resume(device_t);
263static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t);
264static void mskc_reset(struct msk_softc *);
265
266static int msk_probe(device_t);
267static int msk_attach(device_t);
268static int msk_detach(device_t);
269
270static void msk_tick(void *);
271static void msk_intr(void *);
272static void msk_intr_phy(struct msk_if_softc *);
273static void msk_intr_gmac(struct msk_if_softc *);
274static __inline void msk_rxput(struct msk_if_softc *);
275static int msk_handle_events(struct msk_softc *);
276static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
277static void msk_intr_hwerr(struct msk_softc *);
278#ifndef __NO_STRICT_ALIGNMENT
279static __inline void msk_fixup_rx(struct mbuf *);
280#endif
281static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
282static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
283static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
284static void msk_txeof(struct msk_if_softc *, int);
285static int msk_encap(struct msk_if_softc *, struct mbuf **);
286static void msk_start(struct ifnet *);
287static void msk_start_locked(struct ifnet *);
288static int msk_ioctl(struct ifnet *, u_long, caddr_t);
289static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
290static void msk_set_rambuffer(struct msk_if_softc *);
291static void msk_set_tx_stfwd(struct msk_if_softc *);
292static void msk_init(void *);
293static void msk_init_locked(struct msk_if_softc *);
294static void msk_stop(struct msk_if_softc *);
295static void msk_watchdog(struct msk_if_softc *);
296static int msk_mediachange(struct ifnet *);
297static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
298static void msk_phy_power(struct msk_softc *, int);
299static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
300static int msk_status_dma_alloc(struct msk_softc *);
301static void msk_status_dma_free(struct msk_softc *);
302static int msk_txrx_dma_alloc(struct msk_if_softc *);
303static int msk_rx_dma_jalloc(struct msk_if_softc *);
304static void msk_txrx_dma_free(struct msk_if_softc *);
305static void msk_rx_dma_jfree(struct msk_if_softc *);
306static int msk_rx_fill(struct msk_if_softc *, int);
307static int msk_init_rx_ring(struct msk_if_softc *);
308static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
309static void msk_init_tx_ring(struct msk_if_softc *);
310static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
311static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
312static int msk_newbuf(struct msk_if_softc *, int);
313static int msk_jumbo_newbuf(struct msk_if_softc *, int);
314
315static int msk_phy_readreg(struct msk_if_softc *, int, int);
316static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
317static int msk_miibus_readreg(device_t, int, int);
318static int msk_miibus_writereg(device_t, int, int, int);
319static void msk_miibus_statchg(device_t);
320
321static void msk_rxfilter(struct msk_if_softc *);
322static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
323
324static void msk_stats_clear(struct msk_if_softc *);
325static void msk_stats_update(struct msk_if_softc *);
326static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
327static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
328static void msk_sysctl_node(struct msk_if_softc *);
329static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
330static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
331
332static device_method_t mskc_methods[] = {
333	/* Device interface */
334	DEVMETHOD(device_probe,		mskc_probe),
335	DEVMETHOD(device_attach,	mskc_attach),
336	DEVMETHOD(device_detach,	mskc_detach),
337	DEVMETHOD(device_suspend,	mskc_suspend),
338	DEVMETHOD(device_resume,	mskc_resume),
339	DEVMETHOD(device_shutdown,	mskc_shutdown),
340
341	DEVMETHOD(bus_get_dma_tag,	mskc_get_dma_tag),
342
343	DEVMETHOD_END
344};
345
346static driver_t mskc_driver = {
347	"mskc",
348	mskc_methods,
349	sizeof(struct msk_softc)
350};
351
352static devclass_t mskc_devclass;
353
354static device_method_t msk_methods[] = {
355	/* Device interface */
356	DEVMETHOD(device_probe,		msk_probe),
357	DEVMETHOD(device_attach,	msk_attach),
358	DEVMETHOD(device_detach,	msk_detach),
359	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
360
361	/* MII interface */
362	DEVMETHOD(miibus_readreg,	msk_miibus_readreg),
363	DEVMETHOD(miibus_writereg,	msk_miibus_writereg),
364	DEVMETHOD(miibus_statchg,	msk_miibus_statchg),
365
366	DEVMETHOD_END
367};
368
369static driver_t msk_driver = {
370	"msk",
371	msk_methods,
372	sizeof(struct msk_if_softc)
373};
374
375static devclass_t msk_devclass;
376
377DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL);
378DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL);
379DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
380
381static struct resource_spec msk_res_spec_io[] = {
382	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
383	{ -1,			0,		0 }
384};
385
386static struct resource_spec msk_res_spec_mem[] = {
387	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
388	{ -1,			0,		0 }
389};
390
391static struct resource_spec msk_irq_spec_legacy[] = {
392	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
393	{ -1,			0,		0 }
394};
395
396static struct resource_spec msk_irq_spec_msi[] = {
397	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
398	{ -1,			0,		0 }
399};
400
401static int
402msk_miibus_readreg(device_t dev, int phy, int reg)
403{
404	struct msk_if_softc *sc_if;
405
406	sc_if = device_get_softc(dev);
407
408	return (msk_phy_readreg(sc_if, phy, reg));
409}
410
411static int
412msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
413{
414	struct msk_softc *sc;
415	int i, val;
416
417	sc = sc_if->msk_softc;
418
419        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
420	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
421
422	for (i = 0; i < MSK_TIMEOUT; i++) {
423		DELAY(1);
424		val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
425		if ((val & GM_SMI_CT_RD_VAL) != 0) {
426			val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
427			break;
428		}
429	}
430
431	if (i == MSK_TIMEOUT) {
432		if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
433		val = 0;
434	}
435
436	return (val);
437}
438
439static int
440msk_miibus_writereg(device_t dev, int phy, int reg, int val)
441{
442	struct msk_if_softc *sc_if;
443
444	sc_if = device_get_softc(dev);
445
446	return (msk_phy_writereg(sc_if, phy, reg, val));
447}
448
449static int
450msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
451{
452	struct msk_softc *sc;
453	int i;
454
455	sc = sc_if->msk_softc;
456
457	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
458        GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
459	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
460	for (i = 0; i < MSK_TIMEOUT; i++) {
461		DELAY(1);
462		if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
463		    GM_SMI_CT_BUSY) == 0)
464			break;
465	}
466	if (i == MSK_TIMEOUT)
467		if_printf(sc_if->msk_ifp, "phy write timeout\n");
468
469	return (0);
470}
471
472static void
473msk_miibus_statchg(device_t dev)
474{
475	struct msk_softc *sc;
476	struct msk_if_softc *sc_if;
477	struct mii_data *mii;
478	struct ifnet *ifp;
479	uint32_t gmac;
480
481	sc_if = device_get_softc(dev);
482	sc = sc_if->msk_softc;
483
484	MSK_IF_LOCK_ASSERT(sc_if);
485
486	mii = device_get_softc(sc_if->msk_miibus);
487	ifp = sc_if->msk_ifp;
488	if (mii == NULL || ifp == NULL ||
489	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
490		return;
491
492	sc_if->msk_flags &= ~MSK_FLAG_LINK;
493	if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
494	    (IFM_AVALID | IFM_ACTIVE)) {
495		switch (IFM_SUBTYPE(mii->mii_media_active)) {
496		case IFM_10_T:
497		case IFM_100_TX:
498			sc_if->msk_flags |= MSK_FLAG_LINK;
499			break;
500		case IFM_1000_T:
501		case IFM_1000_SX:
502		case IFM_1000_LX:
503		case IFM_1000_CX:
504			if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
505				sc_if->msk_flags |= MSK_FLAG_LINK;
506			break;
507		default:
508			break;
509		}
510	}
511
512	if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
513		/* Enable Tx FIFO Underrun. */
514		CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
515		    GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
516		/*
517		 * Because mii(4) notify msk(4) that it detected link status
518		 * change, there is no need to enable automatic
519		 * speed/flow-control/duplex updates.
520		 */
521		gmac = GM_GPCR_AU_ALL_DIS;
522		switch (IFM_SUBTYPE(mii->mii_media_active)) {
523		case IFM_1000_SX:
524		case IFM_1000_T:
525			gmac |= GM_GPCR_SPEED_1000;
526			break;
527		case IFM_100_TX:
528			gmac |= GM_GPCR_SPEED_100;
529			break;
530		case IFM_10_T:
531			break;
532		}
533
534		if ((IFM_OPTIONS(mii->mii_media_active) &
535		    IFM_ETH_RXPAUSE) == 0)
536			gmac |= GM_GPCR_FC_RX_DIS;
537		if ((IFM_OPTIONS(mii->mii_media_active) &
538		     IFM_ETH_TXPAUSE) == 0)
539			gmac |= GM_GPCR_FC_TX_DIS;
540		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
541			gmac |= GM_GPCR_DUP_FULL;
542		else
543			gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
544		gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
545		GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
546		/* Read again to ensure writing. */
547		GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
548		gmac = GMC_PAUSE_OFF;
549		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
550			if ((IFM_OPTIONS(mii->mii_media_active) &
551			    IFM_ETH_RXPAUSE) != 0)
552				gmac = GMC_PAUSE_ON;
553		}
554		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
555
556		/* Enable PHY interrupt for FIFO underrun/overflow. */
557		msk_phy_writereg(sc_if, PHY_ADDR_MARV,
558		    PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
559	} else {
560		/*
561		 * Link state changed to down.
562		 * Disable PHY interrupts.
563		 */
564		msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
565		/* Disable Rx/Tx MAC. */
566		gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
567		if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
568			gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
569			GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
570			/* Read again to ensure writing. */
571			GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
572		}
573	}
574}
575
576static void
577msk_rxfilter(struct msk_if_softc *sc_if)
578{
579	struct msk_softc *sc;
580	struct ifnet *ifp;
581	struct ifmultiaddr *ifma;
582	uint32_t mchash[2];
583	uint32_t crc;
584	uint16_t mode;
585
586	sc = sc_if->msk_softc;
587
588	MSK_IF_LOCK_ASSERT(sc_if);
589
590	ifp = sc_if->msk_ifp;
591
592	bzero(mchash, sizeof(mchash));
593	mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
594	if ((ifp->if_flags & IFF_PROMISC) != 0)
595		mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
596	else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
597		mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
598		mchash[0] = 0xffff;
599		mchash[1] = 0xffff;
600	} else {
601		mode |= GM_RXCR_UCF_ENA;
602		if_maddr_rlock(ifp);
603		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
604			if (ifma->ifma_addr->sa_family != AF_LINK)
605				continue;
606			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
607			    ifma->ifma_addr), ETHER_ADDR_LEN);
608			/* Just want the 6 least significant bits. */
609			crc &= 0x3f;
610			/* Set the corresponding bit in the hash table. */
611			mchash[crc >> 5] |= 1 << (crc & 0x1f);
612		}
613		if_maddr_runlock(ifp);
614		if (mchash[0] != 0 || mchash[1] != 0)
615			mode |= GM_RXCR_MCF_ENA;
616	}
617
618	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
619	    mchash[0] & 0xffff);
620	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
621	    (mchash[0] >> 16) & 0xffff);
622	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
623	    mchash[1] & 0xffff);
624	GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
625	    (mchash[1] >> 16) & 0xffff);
626	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
627}
628
629static void
630msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
631{
632	struct msk_softc *sc;
633
634	sc = sc_if->msk_softc;
635	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
636		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
637		    RX_VLAN_STRIP_ON);
638		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
639		    TX_VLAN_TAG_ON);
640	} else {
641		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
642		    RX_VLAN_STRIP_OFF);
643		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
644		    TX_VLAN_TAG_OFF);
645	}
646}
647
648static int
649msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
650{
651	uint16_t idx;
652	int i;
653
654	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
655	    (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
656		/* Wait until controller executes OP_TCPSTART command. */
657		for (i = 100; i > 0; i--) {
658			DELAY(100);
659			idx = CSR_READ_2(sc_if->msk_softc,
660			    Y2_PREF_Q_ADDR(sc_if->msk_rxq,
661			    PREF_UNIT_GET_IDX_REG));
662			if (idx != 0)
663				break;
664		}
665		if (i == 0) {
666			device_printf(sc_if->msk_if_dev,
667			    "prefetch unit stuck?\n");
668			return (ETIMEDOUT);
669		}
670		/*
671		 * Fill consumed LE with free buffer. This can be done
672		 * in Rx handler but we don't want to add special code
673		 * in fast handler.
674		 */
675		if (jumbo > 0) {
676			if (msk_jumbo_newbuf(sc_if, 0) != 0)
677				return (ENOBUFS);
678			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
679			    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
680			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
681		} else {
682			if (msk_newbuf(sc_if, 0) != 0)
683				return (ENOBUFS);
684			bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
685			    sc_if->msk_cdata.msk_rx_ring_map,
686			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
687		}
688		sc_if->msk_cdata.msk_rx_prod = 0;
689		CSR_WRITE_2(sc_if->msk_softc,
690		    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
691		    sc_if->msk_cdata.msk_rx_prod);
692	}
693	return (0);
694}
695
696static int
697msk_init_rx_ring(struct msk_if_softc *sc_if)
698{
699	struct msk_ring_data *rd;
700	struct msk_rxdesc *rxd;
701	int i, nbuf, prod;
702
703	MSK_IF_LOCK_ASSERT(sc_if);
704
705	sc_if->msk_cdata.msk_rx_cons = 0;
706	sc_if->msk_cdata.msk_rx_prod = 0;
707	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
708
709	rd = &sc_if->msk_rdata;
710	bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
711	for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
712		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
713		rxd->rx_m = NULL;
714		rxd->rx_le = &rd->msk_rx_ring[prod];
715		MSK_INC(prod, MSK_RX_RING_CNT);
716	}
717	nbuf = MSK_RX_BUF_CNT;
718	prod = 0;
719	/* Have controller know how to compute Rx checksum. */
720	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
721	    (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
722#ifdef MSK_64BIT_DMA
723		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
724		rxd->rx_m = NULL;
725		rxd->rx_le = &rd->msk_rx_ring[prod];
726		rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
727		    ETHER_HDR_LEN);
728		rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
729		MSK_INC(prod, MSK_RX_RING_CNT);
730		MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
731#endif
732		rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
733		rxd->rx_m = NULL;
734		rxd->rx_le = &rd->msk_rx_ring[prod];
735		rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
736		    ETHER_HDR_LEN);
737		rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
738		MSK_INC(prod, MSK_RX_RING_CNT);
739		MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
740		nbuf--;
741	}
742	for (i = 0; i < nbuf; i++) {
743		if (msk_newbuf(sc_if, prod) != 0)
744			return (ENOBUFS);
745		MSK_RX_INC(prod, MSK_RX_RING_CNT);
746	}
747
748	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
749	    sc_if->msk_cdata.msk_rx_ring_map,
750	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
751
752	/* Update prefetch unit. */
753	sc_if->msk_cdata.msk_rx_prod = prod;
754	CSR_WRITE_2(sc_if->msk_softc,
755	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
756	    (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
757	    MSK_RX_RING_CNT);
758	if (msk_rx_fill(sc_if, 0) != 0)
759		return (ENOBUFS);
760	return (0);
761}
762
763static int
764msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
765{
766	struct msk_ring_data *rd;
767	struct msk_rxdesc *rxd;
768	int i, nbuf, prod;
769
770	MSK_IF_LOCK_ASSERT(sc_if);
771
772	sc_if->msk_cdata.msk_rx_cons = 0;
773	sc_if->msk_cdata.msk_rx_prod = 0;
774	sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
775
776	rd = &sc_if->msk_rdata;
777	bzero(rd->msk_jumbo_rx_ring,
778	    sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
779	for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
780		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
781		rxd->rx_m = NULL;
782		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
783		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
784	}
785	nbuf = MSK_RX_BUF_CNT;
786	prod = 0;
787	/* Have controller know how to compute Rx checksum. */
788	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
789	    (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
790#ifdef MSK_64BIT_DMA
791		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
792		rxd->rx_m = NULL;
793		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
794		rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
795		    ETHER_HDR_LEN);
796		rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
797		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
798		MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
799#endif
800		rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
801		rxd->rx_m = NULL;
802		rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
803		rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
804		    ETHER_HDR_LEN);
805		rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
806		MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
807		MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
808		nbuf--;
809	}
810	for (i = 0; i < nbuf; i++) {
811		if (msk_jumbo_newbuf(sc_if, prod) != 0)
812			return (ENOBUFS);
813		MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
814	}
815
816	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
817	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
818	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
819
820	/* Update prefetch unit. */
821	sc_if->msk_cdata.msk_rx_prod = prod;
822	CSR_WRITE_2(sc_if->msk_softc,
823	    Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
824	    (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
825	    MSK_JUMBO_RX_RING_CNT);
826	if (msk_rx_fill(sc_if, 1) != 0)
827		return (ENOBUFS);
828	return (0);
829}
830
831static void
832msk_init_tx_ring(struct msk_if_softc *sc_if)
833{
834	struct msk_ring_data *rd;
835	struct msk_txdesc *txd;
836	int i;
837
838	sc_if->msk_cdata.msk_tso_mtu = 0;
839	sc_if->msk_cdata.msk_last_csum = 0;
840	sc_if->msk_cdata.msk_tx_prod = 0;
841	sc_if->msk_cdata.msk_tx_cons = 0;
842	sc_if->msk_cdata.msk_tx_cnt = 0;
843	sc_if->msk_cdata.msk_tx_high_addr = 0;
844
845	rd = &sc_if->msk_rdata;
846	bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
847	for (i = 0; i < MSK_TX_RING_CNT; i++) {
848		txd = &sc_if->msk_cdata.msk_txdesc[i];
849		txd->tx_m = NULL;
850		txd->tx_le = &rd->msk_tx_ring[i];
851	}
852
853	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
854	    sc_if->msk_cdata.msk_tx_ring_map,
855	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
856}
857
858static __inline void
859msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
860{
861	struct msk_rx_desc *rx_le;
862	struct msk_rxdesc *rxd;
863	struct mbuf *m;
864
865#ifdef MSK_64BIT_DMA
866	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
867	rx_le = rxd->rx_le;
868	rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
869	MSK_INC(idx, MSK_RX_RING_CNT);
870#endif
871	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
872	m = rxd->rx_m;
873	rx_le = rxd->rx_le;
874	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
875}
876
877static __inline void
878msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int	idx)
879{
880	struct msk_rx_desc *rx_le;
881	struct msk_rxdesc *rxd;
882	struct mbuf *m;
883
884#ifdef MSK_64BIT_DMA
885	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
886	rx_le = rxd->rx_le;
887	rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
888	MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
889#endif
890	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
891	m = rxd->rx_m;
892	rx_le = rxd->rx_le;
893	rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
894}
895
896static int
897msk_newbuf(struct msk_if_softc *sc_if, int idx)
898{
899	struct msk_rx_desc *rx_le;
900	struct msk_rxdesc *rxd;
901	struct mbuf *m;
902	bus_dma_segment_t segs[1];
903	bus_dmamap_t map;
904	int nsegs;
905
906	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
907	if (m == NULL)
908		return (ENOBUFS);
909
910	m->m_len = m->m_pkthdr.len = MCLBYTES;
911	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
912		m_adj(m, ETHER_ALIGN);
913#ifndef __NO_STRICT_ALIGNMENT
914	else
915		m_adj(m, MSK_RX_BUF_ALIGN);
916#endif
917
918	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
919	    sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
920	    BUS_DMA_NOWAIT) != 0) {
921		m_freem(m);
922		return (ENOBUFS);
923	}
924	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
925
926	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
927#ifdef MSK_64BIT_DMA
928	rx_le = rxd->rx_le;
929	rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
930	rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
931	MSK_INC(idx, MSK_RX_RING_CNT);
932	rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
933#endif
934	if (rxd->rx_m != NULL) {
935		bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
936		    BUS_DMASYNC_POSTREAD);
937		bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
938		rxd->rx_m = NULL;
939	}
940	map = rxd->rx_dmamap;
941	rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
942	sc_if->msk_cdata.msk_rx_sparemap = map;
943	bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
944	    BUS_DMASYNC_PREREAD);
945	rxd->rx_m = m;
946	rx_le = rxd->rx_le;
947	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
948	rx_le->msk_control =
949	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
950
951	return (0);
952}
953
954static int
955msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
956{
957	struct msk_rx_desc *rx_le;
958	struct msk_rxdesc *rxd;
959	struct mbuf *m;
960	bus_dma_segment_t segs[1];
961	bus_dmamap_t map;
962	int nsegs;
963
964	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
965	if (m == NULL)
966		return (ENOBUFS);
967	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
968	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
969		m_adj(m, ETHER_ALIGN);
970#ifndef __NO_STRICT_ALIGNMENT
971	else
972		m_adj(m, MSK_RX_BUF_ALIGN);
973#endif
974
975	if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
976	    sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
977	    BUS_DMA_NOWAIT) != 0) {
978		m_freem(m);
979		return (ENOBUFS);
980	}
981	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
982
983	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
984#ifdef MSK_64BIT_DMA
985	rx_le = rxd->rx_le;
986	rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
987	rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
988	MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
989	rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
990#endif
991	if (rxd->rx_m != NULL) {
992		bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
993		    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
994		bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
995		    rxd->rx_dmamap);
996		rxd->rx_m = NULL;
997	}
998	map = rxd->rx_dmamap;
999	rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
1000	sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
1001	bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
1002	    BUS_DMASYNC_PREREAD);
1003	rxd->rx_m = m;
1004	rx_le = rxd->rx_le;
1005	rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
1006	rx_le->msk_control =
1007	    htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
1008
1009	return (0);
1010}
1011
1012/*
1013 * Set media options.
1014 */
1015static int
1016msk_mediachange(struct ifnet *ifp)
1017{
1018	struct msk_if_softc *sc_if;
1019	struct mii_data	*mii;
1020	int error;
1021
1022	sc_if = ifp->if_softc;
1023
1024	MSK_IF_LOCK(sc_if);
1025	mii = device_get_softc(sc_if->msk_miibus);
1026	error = mii_mediachg(mii);
1027	MSK_IF_UNLOCK(sc_if);
1028
1029	return (error);
1030}
1031
1032/*
1033 * Report current media status.
1034 */
1035static void
1036msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1037{
1038	struct msk_if_softc *sc_if;
1039	struct mii_data	*mii;
1040
1041	sc_if = ifp->if_softc;
1042	MSK_IF_LOCK(sc_if);
1043	if ((ifp->if_flags & IFF_UP) == 0) {
1044		MSK_IF_UNLOCK(sc_if);
1045		return;
1046	}
1047	mii = device_get_softc(sc_if->msk_miibus);
1048
1049	mii_pollstat(mii);
1050	ifmr->ifm_active = mii->mii_media_active;
1051	ifmr->ifm_status = mii->mii_media_status;
1052	MSK_IF_UNLOCK(sc_if);
1053}
1054
1055static int
1056msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1057{
1058	struct msk_if_softc *sc_if;
1059	struct ifreq *ifr;
1060	struct mii_data	*mii;
1061	int error, mask, reinit;
1062
1063	sc_if = ifp->if_softc;
1064	ifr = (struct ifreq *)data;
1065	error = 0;
1066
1067	switch(command) {
1068	case SIOCSIFMTU:
1069		MSK_IF_LOCK(sc_if);
1070		if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
1071			error = EINVAL;
1072		else if (ifp->if_mtu != ifr->ifr_mtu) {
1073			if (ifr->ifr_mtu > ETHERMTU) {
1074				if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
1075					error = EINVAL;
1076					MSK_IF_UNLOCK(sc_if);
1077					break;
1078				}
1079				if ((sc_if->msk_flags &
1080				    MSK_FLAG_JUMBO_NOCSUM) != 0) {
1081					ifp->if_hwassist &=
1082					    ~(MSK_CSUM_FEATURES | CSUM_TSO);
1083					ifp->if_capenable &=
1084					    ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1085					VLAN_CAPABILITIES(ifp);
1086				}
1087			}
1088			ifp->if_mtu = ifr->ifr_mtu;
1089			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1090				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1091				msk_init_locked(sc_if);
1092			}
1093		}
1094		MSK_IF_UNLOCK(sc_if);
1095		break;
1096	case SIOCSIFFLAGS:
1097		MSK_IF_LOCK(sc_if);
1098		if ((ifp->if_flags & IFF_UP) != 0) {
1099			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1100			    ((ifp->if_flags ^ sc_if->msk_if_flags) &
1101			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1102				msk_rxfilter(sc_if);
1103			else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
1104				msk_init_locked(sc_if);
1105		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1106			msk_stop(sc_if);
1107		sc_if->msk_if_flags = ifp->if_flags;
1108		MSK_IF_UNLOCK(sc_if);
1109		break;
1110	case SIOCADDMULTI:
1111	case SIOCDELMULTI:
1112		MSK_IF_LOCK(sc_if);
1113		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1114			msk_rxfilter(sc_if);
1115		MSK_IF_UNLOCK(sc_if);
1116		break;
1117	case SIOCGIFMEDIA:
1118	case SIOCSIFMEDIA:
1119		mii = device_get_softc(sc_if->msk_miibus);
1120		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1121		break;
1122	case SIOCSIFCAP:
1123		reinit = 0;
1124		MSK_IF_LOCK(sc_if);
1125		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1126		if ((mask & IFCAP_TXCSUM) != 0 &&
1127		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1128			ifp->if_capenable ^= IFCAP_TXCSUM;
1129			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1130				ifp->if_hwassist |= MSK_CSUM_FEATURES;
1131			else
1132				ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
1133		}
1134		if ((mask & IFCAP_RXCSUM) != 0 &&
1135		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1136			ifp->if_capenable ^= IFCAP_RXCSUM;
1137			if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1138				reinit = 1;
1139		}
1140		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1141		    (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
1142			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1143		if ((mask & IFCAP_TSO4) != 0 &&
1144		    (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1145			ifp->if_capenable ^= IFCAP_TSO4;
1146			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1147				ifp->if_hwassist |= CSUM_TSO;
1148			else
1149				ifp->if_hwassist &= ~CSUM_TSO;
1150		}
1151		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1152		    (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
1153			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1154		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1155		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1156			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1157			if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
1158				ifp->if_capenable &=
1159				    ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
1160			msk_setvlan(sc_if, ifp);
1161		}
1162		if (ifp->if_mtu > ETHERMTU &&
1163		    (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
1164			ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
1165			ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1166		}
1167		VLAN_CAPABILITIES(ifp);
1168		if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1169			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1170			msk_init_locked(sc_if);
1171		}
1172		MSK_IF_UNLOCK(sc_if);
1173		break;
1174	default:
1175		error = ether_ioctl(ifp, command, data);
1176		break;
1177	}
1178
1179	return (error);
1180}
1181
1182static int
1183mskc_probe(device_t dev)
1184{
1185	const struct msk_product *mp;
1186	uint16_t vendor, devid;
1187	int i;
1188
1189	vendor = pci_get_vendor(dev);
1190	devid = pci_get_device(dev);
1191	mp = msk_products;
1192	for (i = 0; i < nitems(msk_products); i++, mp++) {
1193		if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1194			device_set_desc(dev, mp->msk_name);
1195			return (BUS_PROBE_DEFAULT);
1196		}
1197	}
1198
1199	return (ENXIO);
1200}
1201
1202static int
1203mskc_setup_rambuffer(struct msk_softc *sc)
1204{
1205	int next;
1206	int i;
1207
1208	/* Get adapter SRAM size. */
1209	sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1210	if (bootverbose)
1211		device_printf(sc->msk_dev,
1212		    "RAM buffer size : %dKB\n", sc->msk_ramsize);
1213	if (sc->msk_ramsize == 0)
1214		return (0);
1215
1216	sc->msk_pflags |= MSK_FLAG_RAMBUF;
1217	/*
1218	 * Give receiver 2/3 of memory and round down to the multiple
1219	 * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
1220	 * of 1024.
1221	 */
1222	sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1223	sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1224	for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1225		sc->msk_rxqstart[i] = next;
1226		sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1227		next = sc->msk_rxqend[i] + 1;
1228		sc->msk_txqstart[i] = next;
1229		sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1230		next = sc->msk_txqend[i] + 1;
1231		if (bootverbose) {
1232			device_printf(sc->msk_dev,
1233			    "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1234			    sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1235			    sc->msk_rxqend[i]);
1236			device_printf(sc->msk_dev,
1237			    "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1238			    sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1239			    sc->msk_txqend[i]);
1240		}
1241	}
1242
1243	return (0);
1244}
1245
1246static void
1247msk_phy_power(struct msk_softc *sc, int mode)
1248{
1249	uint32_t our, val;
1250	int i;
1251
1252	switch (mode) {
1253	case MSK_PHY_POWERUP:
1254		/* Switch power to VCC (WA for VAUX problem). */
1255		CSR_WRITE_1(sc, B0_POWER_CTRL,
1256		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1257		/* Disable Core Clock Division, set Clock Select to 0. */
1258		CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1259
1260		val = 0;
1261		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1262		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1263			/* Enable bits are inverted. */
1264			val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1265			      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1266			      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1267		}
1268		/*
1269		 * Enable PCI & Core Clock, enable clock gating for both Links.
1270		 */
1271		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1272
1273		our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1274		our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1275		if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1276			if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1277				/* Deassert Low Power for 1st PHY. */
1278				our |= PCI_Y2_PHY1_COMA;
1279				if (sc->msk_num_port > 1)
1280					our |= PCI_Y2_PHY2_COMA;
1281			}
1282		}
1283		if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
1284		    sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1285		    sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
1286			val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1287			val &= (PCI_FORCE_ASPM_REQUEST |
1288			    PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
1289			    PCI_ASPM_CLKRUN_REQUEST);
1290			/* Set all bits to 0 except bits 15..12. */
1291			CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
1292			val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1293			val &= PCI_CTL_TIM_VMAIN_AV_MSK;
1294			CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
1295			CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
1296			CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1297			/*
1298			 * Disable status race, workaround for
1299			 * Yukon EC Ultra & Yukon EX.
1300			 */
1301			val = CSR_READ_4(sc, B2_GP_IO);
1302			val |= GLB_GPIO_STAT_RACE_DIS;
1303			CSR_WRITE_4(sc, B2_GP_IO, val);
1304			CSR_READ_4(sc, B2_GP_IO);
1305		}
1306		/* Release PHY from PowerDown/COMA mode. */
1307		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
1308
1309		for (i = 0; i < sc->msk_num_port; i++) {
1310			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1311			    GMLC_RST_SET);
1312			CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1313			    GMLC_RST_CLR);
1314		}
1315		break;
1316	case MSK_PHY_POWERDOWN:
1317		val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1318		val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1319		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1320		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1321			val &= ~PCI_Y2_PHY1_COMA;
1322			if (sc->msk_num_port > 1)
1323				val &= ~PCI_Y2_PHY2_COMA;
1324		}
1325		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1326
1327		val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1328		      Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1329		      Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1330		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1331		    sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1332			/* Enable bits are inverted. */
1333			val = 0;
1334		}
1335		/*
1336		 * Disable PCI & Core Clock, disable clock gating for
1337		 * both Links.
1338		 */
1339		CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1340		CSR_WRITE_1(sc, B0_POWER_CTRL,
1341		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1342		break;
1343	default:
1344		break;
1345	}
1346}
1347
1348static void
1349mskc_reset(struct msk_softc *sc)
1350{
1351	bus_addr_t addr;
1352	uint16_t status;
1353	uint32_t val;
1354	int i, initram;
1355
1356	/* Disable ASF. */
1357	if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
1358	    sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
1359		if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1360		    sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1361			CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1362			status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1363			/* Clear AHB bridge & microcontroller reset. */
1364			status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1365			    Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1366			/* Clear ASF microcontroller state. */
1367			status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1368			status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1369			CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1370			CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1371		} else
1372			CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1373		CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1374		/*
1375		 * Since we disabled ASF, S/W reset is required for
1376		 * Power Management.
1377		 */
1378		CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1379		CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1380	}
1381
1382	/* Clear all error bits in the PCI status register. */
1383	status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1384	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1385
1386	pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1387	    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1388	    PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
1389	CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1390
1391	switch (sc->msk_bustype) {
1392	case MSK_PEX_BUS:
1393		/* Clear all PEX errors. */
1394		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1395		val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1396		if ((val & PEX_RX_OV) != 0) {
1397			sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1398			sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1399		}
1400		break;
1401	case MSK_PCI_BUS:
1402	case MSK_PCIX_BUS:
1403		/* Set Cache Line Size to 2(8bytes) if configured to 0. */
1404		val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1405		if (val == 0)
1406			pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1407		if (sc->msk_bustype == MSK_PCIX_BUS) {
1408			/* Set Cache Line Size opt. */
1409			val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1410			val |= PCI_CLS_OPT;
1411			pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1412		}
1413		break;
1414	}
1415	/* Set PHY power state. */
1416	msk_phy_power(sc, MSK_PHY_POWERUP);
1417
1418	/* Reset GPHY/GMAC Control */
1419	for (i = 0; i < sc->msk_num_port; i++) {
1420		/* GPHY Control reset. */
1421		CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1422		CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1423		/* GMAC Control reset. */
1424		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1425		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1426		CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1427		if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1428		    sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
1429			CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1430			    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1431			    GMC_BYP_RETR_ON);
1432	}
1433
1434	if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
1435	    sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
1436		CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
1437	if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1438		/* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1439		CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1440	}
1441	CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1442
1443	/* LED On. */
1444	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1445
1446	/* Clear TWSI IRQ. */
1447	CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1448
1449	/* Turn off hardware timer. */
1450	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1451	CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1452
1453	/* Turn off descriptor polling. */
1454	CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1455
1456	/* Turn off time stamps. */
1457	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1458	CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1459
1460	initram = 0;
1461	if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
1462	    sc->msk_hw_id == CHIP_ID_YUKON_EC ||
1463	    sc->msk_hw_id == CHIP_ID_YUKON_FE)
1464		initram++;
1465
1466	/* Configure timeout values. */
1467	for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
1468		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1469		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1470		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1471		    MSK_RI_TO_53);
1472		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1473		    MSK_RI_TO_53);
1474		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1475		    MSK_RI_TO_53);
1476		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1477		    MSK_RI_TO_53);
1478		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1479		    MSK_RI_TO_53);
1480		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1481		    MSK_RI_TO_53);
1482		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1483		    MSK_RI_TO_53);
1484		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1485		    MSK_RI_TO_53);
1486		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1487		    MSK_RI_TO_53);
1488		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1489		    MSK_RI_TO_53);
1490		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1491		    MSK_RI_TO_53);
1492		CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1493		    MSK_RI_TO_53);
1494	}
1495
1496	/* Disable all interrupts. */
1497	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1498	CSR_READ_4(sc, B0_HWE_IMSK);
1499	CSR_WRITE_4(sc, B0_IMSK, 0);
1500	CSR_READ_4(sc, B0_IMSK);
1501
1502        /*
1503         * On dual port PCI-X card, there is an problem where status
1504         * can be received out of order due to split transactions.
1505         */
1506	if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
1507		uint16_t pcix_cmd;
1508
1509		pcix_cmd = pci_read_config(sc->msk_dev,
1510		    sc->msk_pcixcap + PCIXR_COMMAND, 2);
1511		/* Clear Max Outstanding Split Transactions. */
1512		pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1513		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1514		pci_write_config(sc->msk_dev,
1515		    sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
1516		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1517        }
1518	if (sc->msk_expcap != 0) {
1519		/* Change Max. Read Request Size to 2048 bytes. */
1520		if (pci_get_max_read_req(sc->msk_dev) == 512)
1521			pci_set_max_read_req(sc->msk_dev, 2048);
1522	}
1523
1524	/* Clear status list. */
1525	bzero(sc->msk_stat_ring,
1526	    sizeof(struct msk_stat_desc) * sc->msk_stat_count);
1527	sc->msk_stat_cons = 0;
1528	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1529	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1530	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1531	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1532	/* Set the status list base address. */
1533	addr = sc->msk_stat_ring_paddr;
1534	CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1535	CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1536	/* Set the status list last index. */
1537	CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
1538	if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1539	    sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1540		/* WA for dev. #4.3 */
1541		CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1542		/* WA for dev. #4.18 */
1543		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1544		CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1545	} else {
1546		CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1547		CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1548		if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1549		    sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1550			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1551		else
1552			CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1553		CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1554	}
1555	/*
1556	 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1557	 */
1558	CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1559
1560	/* Enable status unit. */
1561	CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1562
1563	CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1564	CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1565	CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1566}
1567
1568static int
1569msk_probe(device_t dev)
1570{
1571	struct msk_softc *sc;
1572	char desc[100];
1573
1574	sc = device_get_softc(device_get_parent(dev));
1575	/*
1576	 * Not much to do here. We always know there will be
1577	 * at least one GMAC present, and if there are two,
1578	 * mskc_attach() will create a second device instance
1579	 * for us.
1580	 */
1581	snprintf(desc, sizeof(desc),
1582	    "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1583	    model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1584	    sc->msk_hw_rev);
1585	device_set_desc_copy(dev, desc);
1586
1587	return (BUS_PROBE_DEFAULT);
1588}
1589
1590static int
1591msk_attach(device_t dev)
1592{
1593	struct msk_softc *sc;
1594	struct msk_if_softc *sc_if;
1595	struct ifnet *ifp;
1596	struct msk_mii_data *mmd;
1597	int i, port, error;
1598	uint8_t eaddr[6];
1599
1600	if (dev == NULL)
1601		return (EINVAL);
1602
1603	error = 0;
1604	sc_if = device_get_softc(dev);
1605	sc = device_get_softc(device_get_parent(dev));
1606	mmd = device_get_ivars(dev);
1607	port = mmd->port;
1608
1609	sc_if->msk_if_dev = dev;
1610	sc_if->msk_port = port;
1611	sc_if->msk_softc = sc;
1612	sc_if->msk_flags = sc->msk_pflags;
1613	sc->msk_if[port] = sc_if;
1614	/* Setup Tx/Rx queue register offsets. */
1615	if (port == MSK_PORT_A) {
1616		sc_if->msk_txq = Q_XA1;
1617		sc_if->msk_txsq = Q_XS1;
1618		sc_if->msk_rxq = Q_R1;
1619	} else {
1620		sc_if->msk_txq = Q_XA2;
1621		sc_if->msk_txsq = Q_XS2;
1622		sc_if->msk_rxq = Q_R2;
1623	}
1624
1625	callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1626	msk_sysctl_node(sc_if);
1627
1628	if ((error = msk_txrx_dma_alloc(sc_if)) != 0)
1629		goto fail;
1630	msk_rx_dma_jalloc(sc_if);
1631
1632	ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1633	if (ifp == NULL) {
1634		device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1635		error = ENOSPC;
1636		goto fail;
1637	}
1638	ifp->if_softc = sc_if;
1639	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1640	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1641	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1642	/*
1643	 * Enable Rx checksum offloading if controller supports
1644	 * new descriptor formant and controller is not Yukon XL.
1645	 */
1646	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
1647	    sc->msk_hw_id != CHIP_ID_YUKON_XL)
1648		ifp->if_capabilities |= IFCAP_RXCSUM;
1649	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1650	    (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1651		ifp->if_capabilities |= IFCAP_RXCSUM;
1652	ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1653	ifp->if_capenable = ifp->if_capabilities;
1654	ifp->if_ioctl = msk_ioctl;
1655	ifp->if_start = msk_start;
1656	ifp->if_init = msk_init;
1657	IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1658	ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1659	IFQ_SET_READY(&ifp->if_snd);
1660	/*
1661	 * Get station address for this interface. Note that
1662	 * dual port cards actually come with three station
1663	 * addresses: one for each port, plus an extra. The
1664	 * extra one is used by the SysKonnect driver software
1665	 * as a 'virtual' station address for when both ports
1666	 * are operating in failover mode. Currently we don't
1667	 * use this extra address.
1668	 */
1669	MSK_IF_LOCK(sc_if);
1670	for (i = 0; i < ETHER_ADDR_LEN; i++)
1671		eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1672
1673	/*
1674	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1675	 */
1676	MSK_IF_UNLOCK(sc_if);
1677	ether_ifattach(ifp, eaddr);
1678	MSK_IF_LOCK(sc_if);
1679
1680	/* VLAN capability setup */
1681	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1682	if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
1683		/*
1684		 * Due to Tx checksum offload hardware bugs, msk(4) manually
1685		 * computes checksum for short frames. For VLAN tagged frames
1686		 * this workaround does not work so disable checksum offload
1687		 * for VLAN interface.
1688		 */
1689		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
1690		/*
1691		 * Enable Rx checksum offloading for VLAN tagged frames
1692		 * if controller support new descriptor format.
1693		 */
1694		if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
1695		    (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
1696			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1697	}
1698	ifp->if_capenable = ifp->if_capabilities;
1699	/*
1700	 * Disable RX checksum offloading on controllers that don't use
1701	 * new descriptor format but give chance to enable it.
1702	 */
1703	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
1704		ifp->if_capenable &= ~IFCAP_RXCSUM;
1705
1706	/*
1707	 * Tell the upper layer(s) we support long frames.
1708	 * Must appear after the call to ether_ifattach() because
1709	 * ether_ifattach() sets ifi_hdrlen to the default value.
1710	 */
1711        ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1712
1713	/*
1714	 * Do miibus setup.
1715	 */
1716	MSK_IF_UNLOCK(sc_if);
1717	error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
1718	    msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
1719	    mmd->mii_flags);
1720	if (error != 0) {
1721		device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
1722		ether_ifdetach(ifp);
1723		error = ENXIO;
1724		goto fail;
1725	}
1726
1727fail:
1728	if (error != 0) {
1729		/* Access should be ok even though lock has been dropped */
1730		sc->msk_if[port] = NULL;
1731		msk_detach(dev);
1732	}
1733
1734	return (error);
1735}
1736
1737/*
1738 * Attach the interface. Allocate softc structures, do ifmedia
1739 * setup and ethernet/BPF attach.
1740 */
1741static int
1742mskc_attach(device_t dev)
1743{
1744	struct msk_softc *sc;
1745	struct msk_mii_data *mmd;
1746	int error, msic, msir, reg;
1747
1748	sc = device_get_softc(dev);
1749	sc->msk_dev = dev;
1750	mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1751	    MTX_DEF);
1752
1753	/*
1754	 * Map control/status registers.
1755	 */
1756	pci_enable_busmaster(dev);
1757
1758	/* Allocate I/O resource */
1759#ifdef MSK_USEIOSPACE
1760	sc->msk_res_spec = msk_res_spec_io;
1761#else
1762	sc->msk_res_spec = msk_res_spec_mem;
1763#endif
1764	sc->msk_irq_spec = msk_irq_spec_legacy;
1765	error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1766	if (error) {
1767		if (sc->msk_res_spec == msk_res_spec_mem)
1768			sc->msk_res_spec = msk_res_spec_io;
1769		else
1770			sc->msk_res_spec = msk_res_spec_mem;
1771		error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1772		if (error) {
1773			device_printf(dev, "couldn't allocate %s resources\n",
1774			    sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1775			    "I/O");
1776			mtx_destroy(&sc->msk_mtx);
1777			return (ENXIO);
1778		}
1779	}
1780
1781	/* Enable all clocks before accessing any registers. */
1782	CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1783
1784	CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1785	sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1786	sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1787	/* Bail out if chip is not recognized. */
1788	if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1789	    sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1790	    sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1791		device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1792		    sc->msk_hw_id, sc->msk_hw_rev);
1793		mtx_destroy(&sc->msk_mtx);
1794		return (ENXIO);
1795	}
1796
1797	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1798	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1799	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1800	    &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1801	    "max number of Rx events to process");
1802
1803	sc->msk_process_limit = MSK_PROC_DEFAULT;
1804	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1805	    "process_limit", &sc->msk_process_limit);
1806	if (error == 0) {
1807		if (sc->msk_process_limit < MSK_PROC_MIN ||
1808		    sc->msk_process_limit > MSK_PROC_MAX) {
1809			device_printf(dev, "process_limit value out of range; "
1810			    "using default: %d\n", MSK_PROC_DEFAULT);
1811			sc->msk_process_limit = MSK_PROC_DEFAULT;
1812		}
1813	}
1814
1815	sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
1816	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1817	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1818	    "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
1819	    "Maximum number of time to delay interrupts");
1820	resource_int_value(device_get_name(dev), device_get_unit(dev),
1821	    "int_holdoff", &sc->msk_int_holdoff);
1822
1823	sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1824	/* Check number of MACs. */
1825	sc->msk_num_port = 1;
1826	if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1827	    CFG_DUAL_MAC_MSK) {
1828		if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1829			sc->msk_num_port++;
1830	}
1831
1832	/* Check bus type. */
1833	if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0) {
1834		sc->msk_bustype = MSK_PEX_BUS;
1835		sc->msk_expcap = reg;
1836	} else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, &reg) == 0) {
1837		sc->msk_bustype = MSK_PCIX_BUS;
1838		sc->msk_pcixcap = reg;
1839	} else
1840		sc->msk_bustype = MSK_PCI_BUS;
1841
1842	switch (sc->msk_hw_id) {
1843	case CHIP_ID_YUKON_EC:
1844		sc->msk_clock = 125;	/* 125 MHz */
1845		sc->msk_pflags |= MSK_FLAG_JUMBO;
1846		break;
1847	case CHIP_ID_YUKON_EC_U:
1848		sc->msk_clock = 125;	/* 125 MHz */
1849		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1850		break;
1851	case CHIP_ID_YUKON_EX:
1852		sc->msk_clock = 125;	/* 125 MHz */
1853		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1854		    MSK_FLAG_AUTOTX_CSUM;
1855		/*
1856		 * Yukon Extreme seems to have silicon bug for
1857		 * automatic Tx checksum calculation capability.
1858		 */
1859		if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
1860			sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
1861		/*
1862		 * Yukon Extreme A0 could not use store-and-forward
1863		 * for jumbo frames, so disable Tx checksum
1864		 * offloading for jumbo frames.
1865		 */
1866		if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
1867			sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
1868		break;
1869	case CHIP_ID_YUKON_FE:
1870		sc->msk_clock = 100;	/* 100 MHz */
1871		sc->msk_pflags |= MSK_FLAG_FASTETHER;
1872		break;
1873	case CHIP_ID_YUKON_FE_P:
1874		sc->msk_clock = 50;	/* 50 MHz */
1875		sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
1876		    MSK_FLAG_AUTOTX_CSUM;
1877		if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1878			/*
1879			 * XXX
1880			 * FE+ A0 has status LE writeback bug so msk(4)
1881			 * does not rely on status word of received frame
1882			 * in msk_rxeof() which in turn disables all
1883			 * hardware assistance bits reported by the status
1884			 * word as well as validity of the received frame.
1885			 * Just pass received frames to upper stack with
1886			 * minimal test and let upper stack handle them.
1887			 */
1888			sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
1889			    MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
1890		}
1891		break;
1892	case CHIP_ID_YUKON_XL:
1893		sc->msk_clock = 156;	/* 156 MHz */
1894		sc->msk_pflags |= MSK_FLAG_JUMBO;
1895		break;
1896	case CHIP_ID_YUKON_SUPR:
1897		sc->msk_clock = 125;	/* 125 MHz */
1898		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
1899		    MSK_FLAG_AUTOTX_CSUM;
1900		break;
1901	case CHIP_ID_YUKON_UL_2:
1902		sc->msk_clock = 125;	/* 125 MHz */
1903		sc->msk_pflags |= MSK_FLAG_JUMBO;
1904		break;
1905	case CHIP_ID_YUKON_OPT:
1906		sc->msk_clock = 125;	/* 125 MHz */
1907		sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
1908		break;
1909	default:
1910		sc->msk_clock = 156;	/* 156 MHz */
1911		break;
1912	}
1913
1914	/* Allocate IRQ resources. */
1915	msic = pci_msi_count(dev);
1916	if (bootverbose)
1917		device_printf(dev, "MSI count : %d\n", msic);
1918	if (legacy_intr != 0)
1919		msi_disable = 1;
1920	if (msi_disable == 0 && msic > 0) {
1921		msir = 1;
1922		if (pci_alloc_msi(dev, &msir) == 0) {
1923			if (msir == 1) {
1924				sc->msk_pflags |= MSK_FLAG_MSI;
1925				sc->msk_irq_spec = msk_irq_spec_msi;
1926			} else
1927				pci_release_msi(dev);
1928		}
1929	}
1930
1931	error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1932	if (error) {
1933		device_printf(dev, "couldn't allocate IRQ resources\n");
1934		goto fail;
1935	}
1936
1937	if ((error = msk_status_dma_alloc(sc)) != 0)
1938		goto fail;
1939
1940	/* Set base interrupt mask. */
1941	sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1942	sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1943	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1944
1945	/* Reset the adapter. */
1946	mskc_reset(sc);
1947
1948	if ((error = mskc_setup_rambuffer(sc)) != 0)
1949		goto fail;
1950
1951	sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1952	if (sc->msk_devs[MSK_PORT_A] == NULL) {
1953		device_printf(dev, "failed to add child for PORT_A\n");
1954		error = ENXIO;
1955		goto fail;
1956	}
1957	mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
1958	if (mmd == NULL) {
1959		device_printf(dev, "failed to allocate memory for "
1960		    "ivars of PORT_A\n");
1961		error = ENXIO;
1962		goto fail;
1963	}
1964	mmd->port = MSK_PORT_A;
1965	mmd->pmd = sc->msk_pmd;
1966	mmd->mii_flags |= MIIF_DOPAUSE;
1967	if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1968		mmd->mii_flags |= MIIF_HAVEFIBER;
1969	if (sc->msk_pmd == 'P')
1970		mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1971	device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
1972
1973	if (sc->msk_num_port > 1) {
1974		sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1975		if (sc->msk_devs[MSK_PORT_B] == NULL) {
1976			device_printf(dev, "failed to add child for PORT_B\n");
1977			error = ENXIO;
1978			goto fail;
1979		}
1980		mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
1981		    M_ZERO);
1982		if (mmd == NULL) {
1983			device_printf(dev, "failed to allocate memory for "
1984			    "ivars of PORT_B\n");
1985			error = ENXIO;
1986			goto fail;
1987		}
1988		mmd->port = MSK_PORT_B;
1989		mmd->pmd = sc->msk_pmd;
1990		if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1991			mmd->mii_flags |= MIIF_HAVEFIBER;
1992		if (sc->msk_pmd == 'P')
1993			mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1994		device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
1995	}
1996
1997	error = bus_generic_attach(dev);
1998	if (error) {
1999		device_printf(dev, "failed to attach port(s)\n");
2000		goto fail;
2001	}
2002
2003	/* Hook interrupt last to avoid having to lock softc. */
2004	error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
2005	    INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
2006	if (error != 0) {
2007		device_printf(dev, "couldn't set up interrupt handler\n");
2008		goto fail;
2009	}
2010fail:
2011	if (error != 0)
2012		mskc_detach(dev);
2013
2014	return (error);
2015}
2016
2017/*
2018 * Shutdown hardware and free up resources. This can be called any
2019 * time after the mutex has been initialized. It is called in both
2020 * the error case in attach and the normal detach case so it needs
2021 * to be careful about only freeing resources that have actually been
2022 * allocated.
2023 */
2024static int
2025msk_detach(device_t dev)
2026{
2027	struct msk_softc *sc;
2028	struct msk_if_softc *sc_if;
2029	struct ifnet *ifp;
2030
2031	sc_if = device_get_softc(dev);
2032	KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
2033	    ("msk mutex not initialized in msk_detach"));
2034	MSK_IF_LOCK(sc_if);
2035
2036	ifp = sc_if->msk_ifp;
2037	if (device_is_attached(dev)) {
2038		/* XXX */
2039		sc_if->msk_flags |= MSK_FLAG_DETACH;
2040		msk_stop(sc_if);
2041		/* Can't hold locks while calling detach. */
2042		MSK_IF_UNLOCK(sc_if);
2043		callout_drain(&sc_if->msk_tick_ch);
2044		if (ifp)
2045			ether_ifdetach(ifp);
2046		MSK_IF_LOCK(sc_if);
2047	}
2048
2049	/*
2050	 * We're generally called from mskc_detach() which is using
2051	 * device_delete_child() to get to here. It's already trashed
2052	 * miibus for us, so don't do it here or we'll panic.
2053	 *
2054	 * if (sc_if->msk_miibus != NULL) {
2055	 * 	device_delete_child(dev, sc_if->msk_miibus);
2056	 * 	sc_if->msk_miibus = NULL;
2057	 * }
2058	 */
2059
2060	msk_rx_dma_jfree(sc_if);
2061	msk_txrx_dma_free(sc_if);
2062	bus_generic_detach(dev);
2063
2064	sc = sc_if->msk_softc;
2065	sc->msk_if[sc_if->msk_port] = NULL;
2066	MSK_IF_UNLOCK(sc_if);
2067	if (ifp)
2068		if_free(ifp);
2069
2070	return (0);
2071}
2072
2073static int
2074mskc_detach(device_t dev)
2075{
2076	struct msk_softc *sc;
2077
2078	sc = device_get_softc(dev);
2079	KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
2080
2081	if (device_is_alive(dev)) {
2082		if (sc->msk_devs[MSK_PORT_A] != NULL) {
2083			free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
2084			    M_DEVBUF);
2085			device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
2086		}
2087		if (sc->msk_devs[MSK_PORT_B] != NULL) {
2088			free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
2089			    M_DEVBUF);
2090			device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
2091		}
2092		bus_generic_detach(dev);
2093	}
2094
2095	/* Disable all interrupts. */
2096	CSR_WRITE_4(sc, B0_IMSK, 0);
2097	CSR_READ_4(sc, B0_IMSK);
2098	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2099	CSR_READ_4(sc, B0_HWE_IMSK);
2100
2101	/* LED Off. */
2102	CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
2103
2104	/* Put hardware reset. */
2105	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2106
2107	msk_status_dma_free(sc);
2108
2109	if (sc->msk_intrhand) {
2110		bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
2111		sc->msk_intrhand = NULL;
2112	}
2113	bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
2114	if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
2115		pci_release_msi(dev);
2116	bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
2117	mtx_destroy(&sc->msk_mtx);
2118
2119	return (0);
2120}
2121
2122static bus_dma_tag_t
2123mskc_get_dma_tag(device_t bus, device_t child __unused)
2124{
2125
2126	return (bus_get_dma_tag(bus));
2127}
2128
2129struct msk_dmamap_arg {
2130	bus_addr_t	msk_busaddr;
2131};
2132
2133static void
2134msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2135{
2136	struct msk_dmamap_arg *ctx;
2137
2138	if (error != 0)
2139		return;
2140	ctx = arg;
2141	ctx->msk_busaddr = segs[0].ds_addr;
2142}
2143
2144/* Create status DMA region. */
2145static int
2146msk_status_dma_alloc(struct msk_softc *sc)
2147{
2148	struct msk_dmamap_arg ctx;
2149	bus_size_t stat_sz;
2150	int count, error;
2151
2152	/*
2153	 * It seems controller requires number of status LE entries
2154	 * is power of 2 and the maximum number of status LE entries
2155	 * is 4096.  For dual-port controllers, the number of status
2156	 * LE entries should be large enough to hold both port's
2157	 * status updates.
2158	 */
2159	count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
2160	count = imin(4096, roundup2(count, 1024));
2161	sc->msk_stat_count = count;
2162	stat_sz = count * sizeof(struct msk_stat_desc);
2163	error = bus_dma_tag_create(
2164		    bus_get_dma_tag(sc->msk_dev),	/* parent */
2165		    MSK_STAT_ALIGN, 0,		/* alignment, boundary */
2166		    BUS_SPACE_MAXADDR,		/* lowaddr */
2167		    BUS_SPACE_MAXADDR,		/* highaddr */
2168		    NULL, NULL,			/* filter, filterarg */
2169		    stat_sz,			/* maxsize */
2170		    1,				/* nsegments */
2171		    stat_sz,			/* maxsegsize */
2172		    0,				/* flags */
2173		    NULL, NULL,			/* lockfunc, lockarg */
2174		    &sc->msk_stat_tag);
2175	if (error != 0) {
2176		device_printf(sc->msk_dev,
2177		    "failed to create status DMA tag\n");
2178		return (error);
2179	}
2180
2181	/* Allocate DMA'able memory and load the DMA map for status ring. */
2182	error = bus_dmamem_alloc(sc->msk_stat_tag,
2183	    (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2184	    BUS_DMA_ZERO, &sc->msk_stat_map);
2185	if (error != 0) {
2186		device_printf(sc->msk_dev,
2187		    "failed to allocate DMA'able memory for status ring\n");
2188		return (error);
2189	}
2190
2191	ctx.msk_busaddr = 0;
2192	error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
2193	    sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2194	if (error != 0) {
2195		device_printf(sc->msk_dev,
2196		    "failed to load DMA'able memory for status ring\n");
2197		return (error);
2198	}
2199	sc->msk_stat_ring_paddr = ctx.msk_busaddr;
2200
2201	return (0);
2202}
2203
2204static void
2205msk_status_dma_free(struct msk_softc *sc)
2206{
2207
2208	/* Destroy status block. */
2209	if (sc->msk_stat_tag) {
2210		if (sc->msk_stat_ring_paddr) {
2211			bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
2212			sc->msk_stat_ring_paddr = 0;
2213		}
2214		if (sc->msk_stat_ring) {
2215			bus_dmamem_free(sc->msk_stat_tag,
2216			    sc->msk_stat_ring, sc->msk_stat_map);
2217			sc->msk_stat_ring = NULL;
2218		}
2219		bus_dma_tag_destroy(sc->msk_stat_tag);
2220		sc->msk_stat_tag = NULL;
2221	}
2222}
2223
2224static int
2225msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2226{
2227	struct msk_dmamap_arg ctx;
2228	struct msk_txdesc *txd;
2229	struct msk_rxdesc *rxd;
2230	bus_size_t rxalign;
2231	int error, i;
2232
2233	/* Create parent DMA tag. */
2234	error = bus_dma_tag_create(
2235		    bus_get_dma_tag(sc_if->msk_if_dev),	/* parent */
2236		    1, 0,			/* alignment, boundary */
2237		    BUS_SPACE_MAXADDR,		/* lowaddr */
2238		    BUS_SPACE_MAXADDR,		/* highaddr */
2239		    NULL, NULL,			/* filter, filterarg */
2240		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
2241		    0,				/* nsegments */
2242		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2243		    0,				/* flags */
2244		    NULL, NULL,			/* lockfunc, lockarg */
2245		    &sc_if->msk_cdata.msk_parent_tag);
2246	if (error != 0) {
2247		device_printf(sc_if->msk_if_dev,
2248		    "failed to create parent DMA tag\n");
2249		goto fail;
2250	}
2251	/* Create tag for Tx ring. */
2252	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2253		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2254		    BUS_SPACE_MAXADDR,		/* lowaddr */
2255		    BUS_SPACE_MAXADDR,		/* highaddr */
2256		    NULL, NULL,			/* filter, filterarg */
2257		    MSK_TX_RING_SZ,		/* maxsize */
2258		    1,				/* nsegments */
2259		    MSK_TX_RING_SZ,		/* maxsegsize */
2260		    0,				/* flags */
2261		    NULL, NULL,			/* lockfunc, lockarg */
2262		    &sc_if->msk_cdata.msk_tx_ring_tag);
2263	if (error != 0) {
2264		device_printf(sc_if->msk_if_dev,
2265		    "failed to create Tx ring DMA tag\n");
2266		goto fail;
2267	}
2268
2269	/* Create tag for Rx ring. */
2270	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2271		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2272		    BUS_SPACE_MAXADDR,		/* lowaddr */
2273		    BUS_SPACE_MAXADDR,		/* highaddr */
2274		    NULL, NULL,			/* filter, filterarg */
2275		    MSK_RX_RING_SZ,		/* maxsize */
2276		    1,				/* nsegments */
2277		    MSK_RX_RING_SZ,		/* maxsegsize */
2278		    0,				/* flags */
2279		    NULL, NULL,			/* lockfunc, lockarg */
2280		    &sc_if->msk_cdata.msk_rx_ring_tag);
2281	if (error != 0) {
2282		device_printf(sc_if->msk_if_dev,
2283		    "failed to create Rx ring DMA tag\n");
2284		goto fail;
2285	}
2286
2287	/* Create tag for Tx buffers. */
2288	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2289		    1, 0,			/* alignment, boundary */
2290		    BUS_SPACE_MAXADDR,		/* lowaddr */
2291		    BUS_SPACE_MAXADDR,		/* highaddr */
2292		    NULL, NULL,			/* filter, filterarg */
2293		    MSK_TSO_MAXSIZE,		/* maxsize */
2294		    MSK_MAXTXSEGS,		/* nsegments */
2295		    MSK_TSO_MAXSGSIZE,		/* maxsegsize */
2296		    0,				/* flags */
2297		    NULL, NULL,			/* lockfunc, lockarg */
2298		    &sc_if->msk_cdata.msk_tx_tag);
2299	if (error != 0) {
2300		device_printf(sc_if->msk_if_dev,
2301		    "failed to create Tx DMA tag\n");
2302		goto fail;
2303	}
2304
2305	rxalign = 1;
2306	/*
2307	 * Workaround hardware hang which seems to happen when Rx buffer
2308	 * is not aligned on multiple of FIFO word(8 bytes).
2309	 */
2310	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2311		rxalign = MSK_RX_BUF_ALIGN;
2312	/* Create tag for Rx buffers. */
2313	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2314		    rxalign, 0,			/* alignment, boundary */
2315		    BUS_SPACE_MAXADDR,		/* lowaddr */
2316		    BUS_SPACE_MAXADDR,		/* highaddr */
2317		    NULL, NULL,			/* filter, filterarg */
2318		    MCLBYTES,			/* maxsize */
2319		    1,				/* nsegments */
2320		    MCLBYTES,			/* maxsegsize */
2321		    0,				/* flags */
2322		    NULL, NULL,			/* lockfunc, lockarg */
2323		    &sc_if->msk_cdata.msk_rx_tag);
2324	if (error != 0) {
2325		device_printf(sc_if->msk_if_dev,
2326		    "failed to create Rx DMA tag\n");
2327		goto fail;
2328	}
2329
2330	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
2331	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2332	    (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2333	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2334	if (error != 0) {
2335		device_printf(sc_if->msk_if_dev,
2336		    "failed to allocate DMA'able memory for Tx ring\n");
2337		goto fail;
2338	}
2339
2340	ctx.msk_busaddr = 0;
2341	error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2342	    sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2343	    MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2344	if (error != 0) {
2345		device_printf(sc_if->msk_if_dev,
2346		    "failed to load DMA'able memory for Tx ring\n");
2347		goto fail;
2348	}
2349	sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2350
2351	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
2352	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2353	    (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2354	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2355	if (error != 0) {
2356		device_printf(sc_if->msk_if_dev,
2357		    "failed to allocate DMA'able memory for Rx ring\n");
2358		goto fail;
2359	}
2360
2361	ctx.msk_busaddr = 0;
2362	error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2363	    sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2364	    MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2365	if (error != 0) {
2366		device_printf(sc_if->msk_if_dev,
2367		    "failed to load DMA'able memory for Rx ring\n");
2368		goto fail;
2369	}
2370	sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2371
2372	/* Create DMA maps for Tx buffers. */
2373	for (i = 0; i < MSK_TX_RING_CNT; i++) {
2374		txd = &sc_if->msk_cdata.msk_txdesc[i];
2375		txd->tx_m = NULL;
2376		txd->tx_dmamap = NULL;
2377		error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2378		    &txd->tx_dmamap);
2379		if (error != 0) {
2380			device_printf(sc_if->msk_if_dev,
2381			    "failed to create Tx dmamap\n");
2382			goto fail;
2383		}
2384	}
2385	/* Create DMA maps for Rx buffers. */
2386	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2387	    &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2388		device_printf(sc_if->msk_if_dev,
2389		    "failed to create spare Rx dmamap\n");
2390		goto fail;
2391	}
2392	for (i = 0; i < MSK_RX_RING_CNT; i++) {
2393		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2394		rxd->rx_m = NULL;
2395		rxd->rx_dmamap = NULL;
2396		error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2397		    &rxd->rx_dmamap);
2398		if (error != 0) {
2399			device_printf(sc_if->msk_if_dev,
2400			    "failed to create Rx dmamap\n");
2401			goto fail;
2402		}
2403	}
2404
2405fail:
2406	return (error);
2407}
2408
2409static int
2410msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2411{
2412	struct msk_dmamap_arg ctx;
2413	struct msk_rxdesc *jrxd;
2414	bus_size_t rxalign;
2415	int error, i;
2416
2417	if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2418		sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2419		device_printf(sc_if->msk_if_dev,
2420		    "disabling jumbo frame support\n");
2421		return (0);
2422	}
2423	/* Create tag for jumbo Rx ring. */
2424	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2425		    MSK_RING_ALIGN, 0,		/* alignment, boundary */
2426		    BUS_SPACE_MAXADDR,		/* lowaddr */
2427		    BUS_SPACE_MAXADDR,		/* highaddr */
2428		    NULL, NULL,			/* filter, filterarg */
2429		    MSK_JUMBO_RX_RING_SZ,	/* maxsize */
2430		    1,				/* nsegments */
2431		    MSK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2432		    0,				/* flags */
2433		    NULL, NULL,			/* lockfunc, lockarg */
2434		    &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2435	if (error != 0) {
2436		device_printf(sc_if->msk_if_dev,
2437		    "failed to create jumbo Rx ring DMA tag\n");
2438		goto jumbo_fail;
2439	}
2440
2441	rxalign = 1;
2442	/*
2443	 * Workaround hardware hang which seems to happen when Rx buffer
2444	 * is not aligned on multiple of FIFO word(8 bytes).
2445	 */
2446	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2447		rxalign = MSK_RX_BUF_ALIGN;
2448	/* Create tag for jumbo Rx buffers. */
2449	error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2450		    rxalign, 0,			/* alignment, boundary */
2451		    BUS_SPACE_MAXADDR,		/* lowaddr */
2452		    BUS_SPACE_MAXADDR,		/* highaddr */
2453		    NULL, NULL,			/* filter, filterarg */
2454		    MJUM9BYTES,			/* maxsize */
2455		    1,				/* nsegments */
2456		    MJUM9BYTES,			/* maxsegsize */
2457		    0,				/* flags */
2458		    NULL, NULL,			/* lockfunc, lockarg */
2459		    &sc_if->msk_cdata.msk_jumbo_rx_tag);
2460	if (error != 0) {
2461		device_printf(sc_if->msk_if_dev,
2462		    "failed to create jumbo Rx DMA tag\n");
2463		goto jumbo_fail;
2464	}
2465
2466	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2467	error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2468	    (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2469	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2470	    &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2471	if (error != 0) {
2472		device_printf(sc_if->msk_if_dev,
2473		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2474		goto jumbo_fail;
2475	}
2476
2477	ctx.msk_busaddr = 0;
2478	error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2479	    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2480	    sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2481	    msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2482	if (error != 0) {
2483		device_printf(sc_if->msk_if_dev,
2484		    "failed to load DMA'able memory for jumbo Rx ring\n");
2485		goto jumbo_fail;
2486	}
2487	sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2488
2489	/* Create DMA maps for jumbo Rx buffers. */
2490	if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2491	    &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2492		device_printf(sc_if->msk_if_dev,
2493		    "failed to create spare jumbo Rx dmamap\n");
2494		goto jumbo_fail;
2495	}
2496	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2497		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2498		jrxd->rx_m = NULL;
2499		jrxd->rx_dmamap = NULL;
2500		error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2501		    &jrxd->rx_dmamap);
2502		if (error != 0) {
2503			device_printf(sc_if->msk_if_dev,
2504			    "failed to create jumbo Rx dmamap\n");
2505			goto jumbo_fail;
2506		}
2507	}
2508
2509	return (0);
2510
2511jumbo_fail:
2512	msk_rx_dma_jfree(sc_if);
2513	device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2514	    "due to resource shortage\n");
2515	sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2516	return (error);
2517}
2518
2519static void
2520msk_txrx_dma_free(struct msk_if_softc *sc_if)
2521{
2522	struct msk_txdesc *txd;
2523	struct msk_rxdesc *rxd;
2524	int i;
2525
2526	/* Tx ring. */
2527	if (sc_if->msk_cdata.msk_tx_ring_tag) {
2528		if (sc_if->msk_rdata.msk_tx_ring_paddr)
2529			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2530			    sc_if->msk_cdata.msk_tx_ring_map);
2531		if (sc_if->msk_rdata.msk_tx_ring)
2532			bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2533			    sc_if->msk_rdata.msk_tx_ring,
2534			    sc_if->msk_cdata.msk_tx_ring_map);
2535		sc_if->msk_rdata.msk_tx_ring = NULL;
2536		sc_if->msk_rdata.msk_tx_ring_paddr = 0;
2537		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2538		sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2539	}
2540	/* Rx ring. */
2541	if (sc_if->msk_cdata.msk_rx_ring_tag) {
2542		if (sc_if->msk_rdata.msk_rx_ring_paddr)
2543			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2544			    sc_if->msk_cdata.msk_rx_ring_map);
2545		if (sc_if->msk_rdata.msk_rx_ring)
2546			bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2547			    sc_if->msk_rdata.msk_rx_ring,
2548			    sc_if->msk_cdata.msk_rx_ring_map);
2549		sc_if->msk_rdata.msk_rx_ring = NULL;
2550		sc_if->msk_rdata.msk_rx_ring_paddr = 0;
2551		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2552		sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2553	}
2554	/* Tx buffers. */
2555	if (sc_if->msk_cdata.msk_tx_tag) {
2556		for (i = 0; i < MSK_TX_RING_CNT; i++) {
2557			txd = &sc_if->msk_cdata.msk_txdesc[i];
2558			if (txd->tx_dmamap) {
2559				bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2560				    txd->tx_dmamap);
2561				txd->tx_dmamap = NULL;
2562			}
2563		}
2564		bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2565		sc_if->msk_cdata.msk_tx_tag = NULL;
2566	}
2567	/* Rx buffers. */
2568	if (sc_if->msk_cdata.msk_rx_tag) {
2569		for (i = 0; i < MSK_RX_RING_CNT; i++) {
2570			rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2571			if (rxd->rx_dmamap) {
2572				bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2573				    rxd->rx_dmamap);
2574				rxd->rx_dmamap = NULL;
2575			}
2576		}
2577		if (sc_if->msk_cdata.msk_rx_sparemap) {
2578			bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2579			    sc_if->msk_cdata.msk_rx_sparemap);
2580			sc_if->msk_cdata.msk_rx_sparemap = 0;
2581		}
2582		bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2583		sc_if->msk_cdata.msk_rx_tag = NULL;
2584	}
2585	if (sc_if->msk_cdata.msk_parent_tag) {
2586		bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2587		sc_if->msk_cdata.msk_parent_tag = NULL;
2588	}
2589}
2590
2591static void
2592msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2593{
2594	struct msk_rxdesc *jrxd;
2595	int i;
2596
2597	/* Jumbo Rx ring. */
2598	if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2599		if (sc_if->msk_rdata.msk_jumbo_rx_ring_paddr)
2600			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2601			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2602		if (sc_if->msk_rdata.msk_jumbo_rx_ring)
2603			bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2604			    sc_if->msk_rdata.msk_jumbo_rx_ring,
2605			    sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2606		sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2607		sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = 0;
2608		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2609		sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2610	}
2611	/* Jumbo Rx buffers. */
2612	if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2613		for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2614			jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2615			if (jrxd->rx_dmamap) {
2616				bus_dmamap_destroy(
2617				    sc_if->msk_cdata.msk_jumbo_rx_tag,
2618				    jrxd->rx_dmamap);
2619				jrxd->rx_dmamap = NULL;
2620			}
2621		}
2622		if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2623			bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2624			    sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2625			sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2626		}
2627		bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2628		sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2629	}
2630}
2631
2632static int
2633msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2634{
2635	struct msk_txdesc *txd, *txd_last;
2636	struct msk_tx_desc *tx_le;
2637	struct mbuf *m;
2638	bus_dmamap_t map;
2639	bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2640	uint32_t control, csum, prod, si;
2641	uint16_t offset, tcp_offset, tso_mtu;
2642	int error, i, nseg, tso;
2643
2644	MSK_IF_LOCK_ASSERT(sc_if);
2645
2646	tcp_offset = offset = 0;
2647	m = *m_head;
2648	if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2649	    (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
2650	    ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2651	    (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2652		/*
2653		 * Since mbuf has no protocol specific structure information
2654		 * in it we have to inspect protocol information here to
2655		 * setup TSO and checksum offload. I don't know why Marvell
2656		 * made a such decision in chip design because other GigE
2657		 * hardwares normally takes care of all these chores in
2658		 * hardware. However, TSO performance of Yukon II is very
2659		 * good such that it's worth to implement it.
2660		 */
2661		struct ether_header *eh;
2662		struct ip *ip;
2663		struct tcphdr *tcp;
2664
2665		if (M_WRITABLE(m) == 0) {
2666			/* Get a writable copy. */
2667			m = m_dup(*m_head, M_NOWAIT);
2668			m_freem(*m_head);
2669			if (m == NULL) {
2670				*m_head = NULL;
2671				return (ENOBUFS);
2672			}
2673			*m_head = m;
2674		}
2675
2676		offset = sizeof(struct ether_header);
2677		m = m_pullup(m, offset);
2678		if (m == NULL) {
2679			*m_head = NULL;
2680			return (ENOBUFS);
2681		}
2682		eh = mtod(m, struct ether_header *);
2683		/* Check if hardware VLAN insertion is off. */
2684		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2685			offset = sizeof(struct ether_vlan_header);
2686			m = m_pullup(m, offset);
2687			if (m == NULL) {
2688				*m_head = NULL;
2689				return (ENOBUFS);
2690			}
2691		}
2692		m = m_pullup(m, offset + sizeof(struct ip));
2693		if (m == NULL) {
2694			*m_head = NULL;
2695			return (ENOBUFS);
2696		}
2697		ip = (struct ip *)(mtod(m, char *) + offset);
2698		offset += (ip->ip_hl << 2);
2699		tcp_offset = offset;
2700		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2701			m = m_pullup(m, offset + sizeof(struct tcphdr));
2702			if (m == NULL) {
2703				*m_head = NULL;
2704				return (ENOBUFS);
2705			}
2706			tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2707			offset += (tcp->th_off << 2);
2708		} else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
2709		    (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
2710		    (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2711			/*
2712			 * It seems that Yukon II has Tx checksum offload bug
2713			 * for small TCP packets that's less than 60 bytes in
2714			 * size (e.g. TCP window probe packet, pure ACK packet).
2715			 * Common work around like padding with zeros to make
2716			 * the frame minimum ethernet frame size didn't work at
2717			 * all.
2718			 * Instead of disabling checksum offload completely we
2719			 * resort to S/W checksum routine when we encounter
2720			 * short TCP frames.
2721			 * Short UDP packets appear to be handled correctly by
2722			 * Yukon II. Also I assume this bug does not happen on
2723			 * controllers that use newer descriptor format or
2724			 * automatic Tx checksum calculation.
2725			 */
2726			m = m_pullup(m, offset + sizeof(struct tcphdr));
2727			if (m == NULL) {
2728				*m_head = NULL;
2729				return (ENOBUFS);
2730			}
2731			*(uint16_t *)(m->m_data + offset +
2732			    m->m_pkthdr.csum_data) = in_cksum_skip(m,
2733			    m->m_pkthdr.len, offset);
2734			m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2735		}
2736		*m_head = m;
2737	}
2738
2739	prod = sc_if->msk_cdata.msk_tx_prod;
2740	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2741	txd_last = txd;
2742	map = txd->tx_dmamap;
2743	error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2744	    *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2745	if (error == EFBIG) {
2746		m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
2747		if (m == NULL) {
2748			m_freem(*m_head);
2749			*m_head = NULL;
2750			return (ENOBUFS);
2751		}
2752		*m_head = m;
2753		error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2754		    map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2755		if (error != 0) {
2756			m_freem(*m_head);
2757			*m_head = NULL;
2758			return (error);
2759		}
2760	} else if (error != 0)
2761		return (error);
2762	if (nseg == 0) {
2763		m_freem(*m_head);
2764		*m_head = NULL;
2765		return (EIO);
2766	}
2767
2768	/* Check number of available descriptors. */
2769	if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2770	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2771		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2772		return (ENOBUFS);
2773	}
2774
2775	control = 0;
2776	tso = 0;
2777	tx_le = NULL;
2778
2779	/* Check TSO support. */
2780	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2781		if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2782			tso_mtu = m->m_pkthdr.tso_segsz;
2783		else
2784			tso_mtu = offset + m->m_pkthdr.tso_segsz;
2785		if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2786			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2787			tx_le->msk_addr = htole32(tso_mtu);
2788			if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2789				tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2790			else
2791				tx_le->msk_control =
2792				    htole32(OP_LRGLEN | HW_OWNER);
2793			sc_if->msk_cdata.msk_tx_cnt++;
2794			MSK_INC(prod, MSK_TX_RING_CNT);
2795			sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2796		}
2797		tso++;
2798	}
2799	/* Check if we have a VLAN tag to insert. */
2800	if ((m->m_flags & M_VLANTAG) != 0) {
2801		if (tx_le == NULL) {
2802			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2803			tx_le->msk_addr = htole32(0);
2804			tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2805			    htons(m->m_pkthdr.ether_vtag));
2806			sc_if->msk_cdata.msk_tx_cnt++;
2807			MSK_INC(prod, MSK_TX_RING_CNT);
2808		} else {
2809			tx_le->msk_control |= htole32(OP_VLAN |
2810			    htons(m->m_pkthdr.ether_vtag));
2811		}
2812		control |= INS_VLAN;
2813	}
2814	/* Check if we have to handle checksum offload. */
2815	if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2816		if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
2817			control |= CALSUM;
2818		else {
2819			control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2820			if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2821				control |= UDPTCP;
2822			/* Checksum write position. */
2823			csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
2824			/* Checksum start position. */
2825			csum |= (uint32_t)tcp_offset << 16;
2826			if (csum != sc_if->msk_cdata.msk_last_csum) {
2827				tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2828				tx_le->msk_addr = htole32(csum);
2829				tx_le->msk_control = htole32(1 << 16 |
2830				    (OP_TCPLISW | HW_OWNER));
2831				sc_if->msk_cdata.msk_tx_cnt++;
2832				MSK_INC(prod, MSK_TX_RING_CNT);
2833				sc_if->msk_cdata.msk_last_csum = csum;
2834			}
2835		}
2836	}
2837
2838#ifdef MSK_64BIT_DMA
2839	if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
2840	    sc_if->msk_cdata.msk_tx_high_addr) {
2841		sc_if->msk_cdata.msk_tx_high_addr =
2842		    MSK_ADDR_HI(txsegs[0].ds_addr);
2843		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2844		tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
2845		tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
2846		sc_if->msk_cdata.msk_tx_cnt++;
2847		MSK_INC(prod, MSK_TX_RING_CNT);
2848	}
2849#endif
2850	si = prod;
2851	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2852	tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2853	if (tso == 0)
2854		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2855		    OP_PACKET);
2856	else
2857		tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2858		    OP_LARGESEND);
2859	sc_if->msk_cdata.msk_tx_cnt++;
2860	MSK_INC(prod, MSK_TX_RING_CNT);
2861
2862	for (i = 1; i < nseg; i++) {
2863		tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2864#ifdef MSK_64BIT_DMA
2865		if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
2866		    sc_if->msk_cdata.msk_tx_high_addr) {
2867			sc_if->msk_cdata.msk_tx_high_addr =
2868			    MSK_ADDR_HI(txsegs[i].ds_addr);
2869			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2870			tx_le->msk_addr =
2871			    htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
2872			tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
2873			sc_if->msk_cdata.msk_tx_cnt++;
2874			MSK_INC(prod, MSK_TX_RING_CNT);
2875			tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2876		}
2877#endif
2878		tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2879		tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2880		    OP_BUFFER | HW_OWNER);
2881		sc_if->msk_cdata.msk_tx_cnt++;
2882		MSK_INC(prod, MSK_TX_RING_CNT);
2883	}
2884	/* Update producer index. */
2885	sc_if->msk_cdata.msk_tx_prod = prod;
2886
2887	/* Set EOP on the last descriptor. */
2888	prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2889	tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2890	tx_le->msk_control |= htole32(EOP);
2891
2892	/* Turn the first descriptor ownership to hardware. */
2893	tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2894	tx_le->msk_control |= htole32(HW_OWNER);
2895
2896	txd = &sc_if->msk_cdata.msk_txdesc[prod];
2897	map = txd_last->tx_dmamap;
2898	txd_last->tx_dmamap = txd->tx_dmamap;
2899	txd->tx_dmamap = map;
2900	txd->tx_m = m;
2901
2902	/* Sync descriptors. */
2903	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2904	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2905	    sc_if->msk_cdata.msk_tx_ring_map,
2906	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2907
2908	return (0);
2909}
2910
2911static void
2912msk_start(struct ifnet *ifp)
2913{
2914	struct msk_if_softc *sc_if;
2915
2916	sc_if = ifp->if_softc;
2917	MSK_IF_LOCK(sc_if);
2918	msk_start_locked(ifp);
2919	MSK_IF_UNLOCK(sc_if);
2920}
2921
2922static void
2923msk_start_locked(struct ifnet *ifp)
2924{
2925	struct msk_if_softc *sc_if;
2926	struct mbuf *m_head;
2927	int enq;
2928
2929	sc_if = ifp->if_softc;
2930	MSK_IF_LOCK_ASSERT(sc_if);
2931
2932	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2933	    IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
2934		return;
2935
2936	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2937	    sc_if->msk_cdata.msk_tx_cnt <
2938	    (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2939		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2940		if (m_head == NULL)
2941			break;
2942		/*
2943		 * Pack the data into the transmit ring. If we
2944		 * don't have room, set the OACTIVE flag and wait
2945		 * for the NIC to drain the ring.
2946		 */
2947		if (msk_encap(sc_if, &m_head) != 0) {
2948			if (m_head == NULL)
2949				break;
2950			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2951			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2952			break;
2953		}
2954
2955		enq++;
2956		/*
2957		 * If there's a BPF listener, bounce a copy of this frame
2958		 * to him.
2959		 */
2960		ETHER_BPF_MTAP(ifp, m_head);
2961	}
2962
2963	if (enq > 0) {
2964		/* Transmit */
2965		CSR_WRITE_2(sc_if->msk_softc,
2966		    Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2967		    sc_if->msk_cdata.msk_tx_prod);
2968
2969		/* Set a timeout in case the chip goes out to lunch. */
2970		sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2971	}
2972}
2973
2974static void
2975msk_watchdog(struct msk_if_softc *sc_if)
2976{
2977	struct ifnet *ifp;
2978
2979	MSK_IF_LOCK_ASSERT(sc_if);
2980
2981	if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2982		return;
2983	ifp = sc_if->msk_ifp;
2984	if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2985		if (bootverbose)
2986			if_printf(sc_if->msk_ifp, "watchdog timeout "
2987			   "(missed link)\n");
2988		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2989		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2990		msk_init_locked(sc_if);
2991		return;
2992	}
2993
2994	if_printf(ifp, "watchdog timeout\n");
2995	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2996	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2997	msk_init_locked(sc_if);
2998	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2999		msk_start_locked(ifp);
3000}
3001
3002static int
3003mskc_shutdown(device_t dev)
3004{
3005	struct msk_softc *sc;
3006	int i;
3007
3008	sc = device_get_softc(dev);
3009	MSK_LOCK(sc);
3010	for (i = 0; i < sc->msk_num_port; i++) {
3011		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3012		    ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3013		    IFF_DRV_RUNNING) != 0))
3014			msk_stop(sc->msk_if[i]);
3015	}
3016	MSK_UNLOCK(sc);
3017
3018	/* Put hardware reset. */
3019	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3020	return (0);
3021}
3022
3023static int
3024mskc_suspend(device_t dev)
3025{
3026	struct msk_softc *sc;
3027	int i;
3028
3029	sc = device_get_softc(dev);
3030
3031	MSK_LOCK(sc);
3032
3033	for (i = 0; i < sc->msk_num_port; i++) {
3034		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3035		    ((sc->msk_if[i]->msk_ifp->if_drv_flags &
3036		    IFF_DRV_RUNNING) != 0))
3037			msk_stop(sc->msk_if[i]);
3038	}
3039
3040	/* Disable all interrupts. */
3041	CSR_WRITE_4(sc, B0_IMSK, 0);
3042	CSR_READ_4(sc, B0_IMSK);
3043	CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
3044	CSR_READ_4(sc, B0_HWE_IMSK);
3045
3046	msk_phy_power(sc, MSK_PHY_POWERDOWN);
3047
3048	/* Put hardware reset. */
3049	CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
3050	sc->msk_pflags |= MSK_FLAG_SUSPEND;
3051
3052	MSK_UNLOCK(sc);
3053
3054	return (0);
3055}
3056
3057static int
3058mskc_resume(device_t dev)
3059{
3060	struct msk_softc *sc;
3061	int i;
3062
3063	sc = device_get_softc(dev);
3064
3065	MSK_LOCK(sc);
3066
3067	CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
3068	mskc_reset(sc);
3069	for (i = 0; i < sc->msk_num_port; i++) {
3070		if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3071		    ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
3072			sc->msk_if[i]->msk_ifp->if_drv_flags &=
3073			    ~IFF_DRV_RUNNING;
3074			msk_init_locked(sc->msk_if[i]);
3075		}
3076	}
3077	sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
3078
3079	MSK_UNLOCK(sc);
3080
3081	return (0);
3082}
3083
3084#ifndef __NO_STRICT_ALIGNMENT
3085static __inline void
3086msk_fixup_rx(struct mbuf *m)
3087{
3088        int i;
3089        uint16_t *src, *dst;
3090
3091	src = mtod(m, uint16_t *);
3092	dst = src - 3;
3093
3094	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
3095		*dst++ = *src++;
3096
3097	m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
3098}
3099#endif
3100
3101static __inline void
3102msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
3103{
3104	struct ether_header *eh;
3105	struct ip *ip;
3106	struct udphdr *uh;
3107	int32_t hlen, len, pktlen, temp32;
3108	uint16_t csum, *opts;
3109
3110	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
3111		if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
3112			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3113			if ((control & CSS_IPV4_CSUM_OK) != 0)
3114				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3115			if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
3116			    (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
3117				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3118				    CSUM_PSEUDO_HDR;
3119				m->m_pkthdr.csum_data = 0xffff;
3120			}
3121		}
3122		return;
3123	}
3124	/*
3125	 * Marvell Yukon controllers that support OP_RXCHKS has known
3126	 * to have various Rx checksum offloading bugs. These
3127	 * controllers can be configured to compute simple checksum
3128	 * at two different positions. So we can compute IP and TCP/UDP
3129	 * checksum at the same time. We intentionally have controller
3130	 * compute TCP/UDP checksum twice by specifying the same
3131	 * checksum start position and compare the result. If the value
3132	 * is different it would indicate the hardware logic was wrong.
3133	 */
3134	if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
3135		if (bootverbose)
3136			device_printf(sc_if->msk_if_dev,
3137			    "Rx checksum value mismatch!\n");
3138		return;
3139	}
3140	pktlen = m->m_pkthdr.len;
3141	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
3142		return;
3143	eh = mtod(m, struct ether_header *);
3144	if (eh->ether_type != htons(ETHERTYPE_IP))
3145		return;
3146	ip = (struct ip *)(eh + 1);
3147	if (ip->ip_v != IPVERSION)
3148		return;
3149
3150	hlen = ip->ip_hl << 2;
3151	pktlen -= sizeof(struct ether_header);
3152	if (hlen < sizeof(struct ip))
3153		return;
3154	if (ntohs(ip->ip_len) < hlen)
3155		return;
3156	if (ntohs(ip->ip_len) != pktlen)
3157		return;
3158	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
3159		return;	/* can't handle fragmented packet. */
3160
3161	switch (ip->ip_p) {
3162	case IPPROTO_TCP:
3163		if (pktlen < (hlen + sizeof(struct tcphdr)))
3164			return;
3165		break;
3166	case IPPROTO_UDP:
3167		if (pktlen < (hlen + sizeof(struct udphdr)))
3168			return;
3169		uh = (struct udphdr *)((caddr_t)ip + hlen);
3170		if (uh->uh_sum == 0)
3171			return; /* no checksum */
3172		break;
3173	default:
3174		return;
3175	}
3176	csum = bswap16(sc_if->msk_csum & 0xFFFF);
3177	/* Checksum fixup for IP options. */
3178	len = hlen - sizeof(struct ip);
3179	if (len > 0) {
3180		opts = (uint16_t *)(ip + 1);
3181		for (; len > 0; len -= sizeof(uint16_t), opts++) {
3182			temp32 = csum - *opts;
3183			temp32 = (temp32 >> 16) + (temp32 & 65535);
3184			csum = temp32 & 65535;
3185		}
3186	}
3187	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
3188	m->m_pkthdr.csum_data = csum;
3189}
3190
3191static void
3192msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3193    int len)
3194{
3195	struct mbuf *m;
3196	struct ifnet *ifp;
3197	struct msk_rxdesc *rxd;
3198	int cons, rxlen;
3199
3200	ifp = sc_if->msk_ifp;
3201
3202	MSK_IF_LOCK_ASSERT(sc_if);
3203
3204	cons = sc_if->msk_cdata.msk_rx_cons;
3205	do {
3206		rxlen = status >> 16;
3207		if ((status & GMR_FS_VLAN) != 0 &&
3208		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3209			rxlen -= ETHER_VLAN_ENCAP_LEN;
3210		if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
3211			/*
3212			 * For controllers that returns bogus status code
3213			 * just do minimal check and let upper stack
3214			 * handle this frame.
3215			 */
3216			if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
3217				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3218				msk_discard_rxbuf(sc_if, cons);
3219				break;
3220			}
3221		} else if (len > sc_if->msk_framesize ||
3222		    ((status & GMR_FS_ANY_ERR) != 0) ||
3223		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3224			/* Don't count flow-control packet as errors. */
3225			if ((status & GMR_FS_GOOD_FC) == 0)
3226				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3227			msk_discard_rxbuf(sc_if, cons);
3228			break;
3229		}
3230#ifdef MSK_64BIT_DMA
3231		rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
3232		    MSK_RX_RING_CNT];
3233#else
3234		rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3235#endif
3236		m = rxd->rx_m;
3237		if (msk_newbuf(sc_if, cons) != 0) {
3238			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
3239			/* Reuse old buffer. */
3240			msk_discard_rxbuf(sc_if, cons);
3241			break;
3242		}
3243		m->m_pkthdr.rcvif = ifp;
3244		m->m_pkthdr.len = m->m_len = len;
3245#ifndef __NO_STRICT_ALIGNMENT
3246		if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3247			msk_fixup_rx(m);
3248#endif
3249		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3250		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3251			msk_rxcsum(sc_if, control, m);
3252		/* Check for VLAN tagged packets. */
3253		if ((status & GMR_FS_VLAN) != 0 &&
3254		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3255			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3256			m->m_flags |= M_VLANTAG;
3257		}
3258		MSK_IF_UNLOCK(sc_if);
3259		(*ifp->if_input)(ifp, m);
3260		MSK_IF_LOCK(sc_if);
3261	} while (0);
3262
3263	MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3264	MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3265}
3266
3267static void
3268msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
3269    int len)
3270{
3271	struct mbuf *m;
3272	struct ifnet *ifp;
3273	struct msk_rxdesc *jrxd;
3274	int cons, rxlen;
3275
3276	ifp = sc_if->msk_ifp;
3277
3278	MSK_IF_LOCK_ASSERT(sc_if);
3279
3280	cons = sc_if->msk_cdata.msk_rx_cons;
3281	do {
3282		rxlen = status >> 16;
3283		if ((status & GMR_FS_VLAN) != 0 &&
3284		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3285			rxlen -= ETHER_VLAN_ENCAP_LEN;
3286		if (len > sc_if->msk_framesize ||
3287		    ((status & GMR_FS_ANY_ERR) != 0) ||
3288		    ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3289			/* Don't count flow-control packet as errors. */
3290			if ((status & GMR_FS_GOOD_FC) == 0)
3291				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3292			msk_discard_jumbo_rxbuf(sc_if, cons);
3293			break;
3294		}
3295#ifdef MSK_64BIT_DMA
3296		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
3297		    MSK_JUMBO_RX_RING_CNT];
3298#else
3299		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3300#endif
3301		m = jrxd->rx_m;
3302		if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3303			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
3304			/* Reuse old buffer. */
3305			msk_discard_jumbo_rxbuf(sc_if, cons);
3306			break;
3307		}
3308		m->m_pkthdr.rcvif = ifp;
3309		m->m_pkthdr.len = m->m_len = len;
3310#ifndef __NO_STRICT_ALIGNMENT
3311		if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
3312			msk_fixup_rx(m);
3313#endif
3314		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3315		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3316			msk_rxcsum(sc_if, control, m);
3317		/* Check for VLAN tagged packets. */
3318		if ((status & GMR_FS_VLAN) != 0 &&
3319		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3320			m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3321			m->m_flags |= M_VLANTAG;
3322		}
3323		MSK_IF_UNLOCK(sc_if);
3324		(*ifp->if_input)(ifp, m);
3325		MSK_IF_LOCK(sc_if);
3326	} while (0);
3327
3328	MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3329	MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3330}
3331
3332static void
3333msk_txeof(struct msk_if_softc *sc_if, int idx)
3334{
3335	struct msk_txdesc *txd;
3336	struct msk_tx_desc *cur_tx;
3337	struct ifnet *ifp;
3338	uint32_t control;
3339	int cons, prog;
3340
3341	MSK_IF_LOCK_ASSERT(sc_if);
3342
3343	ifp = sc_if->msk_ifp;
3344
3345	bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3346	    sc_if->msk_cdata.msk_tx_ring_map,
3347	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3348	/*
3349	 * Go through our tx ring and free mbufs for those
3350	 * frames that have been sent.
3351	 */
3352	cons = sc_if->msk_cdata.msk_tx_cons;
3353	prog = 0;
3354	for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3355		if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3356			break;
3357		prog++;
3358		cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3359		control = le32toh(cur_tx->msk_control);
3360		sc_if->msk_cdata.msk_tx_cnt--;
3361		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3362		if ((control & EOP) == 0)
3363			continue;
3364		txd = &sc_if->msk_cdata.msk_txdesc[cons];
3365		bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3366		    BUS_DMASYNC_POSTWRITE);
3367		bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3368
3369		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3370		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3371		    __func__));
3372		m_freem(txd->tx_m);
3373		txd->tx_m = NULL;
3374	}
3375
3376	if (prog > 0) {
3377		sc_if->msk_cdata.msk_tx_cons = cons;
3378		if (sc_if->msk_cdata.msk_tx_cnt == 0)
3379			sc_if->msk_watchdog_timer = 0;
3380		/* No need to sync LEs as we didn't update LEs. */
3381	}
3382}
3383
3384static void
3385msk_tick(void *xsc_if)
3386{
3387	struct msk_if_softc *sc_if;
3388	struct mii_data *mii;
3389
3390	sc_if = xsc_if;
3391
3392	MSK_IF_LOCK_ASSERT(sc_if);
3393
3394	mii = device_get_softc(sc_if->msk_miibus);
3395
3396	mii_tick(mii);
3397	if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
3398		msk_miibus_statchg(sc_if->msk_if_dev);
3399	msk_handle_events(sc_if->msk_softc);
3400	msk_watchdog(sc_if);
3401	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3402}
3403
3404static void
3405msk_intr_phy(struct msk_if_softc *sc_if)
3406{
3407	uint16_t status;
3408
3409	msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3410	status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3411	/* Handle FIFO Underrun/Overflow? */
3412	if ((status & PHY_M_IS_FIFO_ERROR))
3413		device_printf(sc_if->msk_if_dev,
3414		    "PHY FIFO underrun/overflow.\n");
3415}
3416
3417static void
3418msk_intr_gmac(struct msk_if_softc *sc_if)
3419{
3420	struct msk_softc *sc;
3421	uint8_t status;
3422
3423	sc = sc_if->msk_softc;
3424	status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3425
3426	/* GMAC Rx FIFO overrun. */
3427	if ((status & GM_IS_RX_FF_OR) != 0)
3428		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3429		    GMF_CLI_RX_FO);
3430	/* GMAC Tx FIFO underrun. */
3431	if ((status & GM_IS_TX_FF_UR) != 0) {
3432		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3433		    GMF_CLI_TX_FU);
3434		device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3435		/*
3436		 * XXX
3437		 * In case of Tx underrun, we may need to flush/reset
3438		 * Tx MAC but that would also require resynchronization
3439		 * with status LEs. Reinitializing status LEs would
3440		 * affect other port in dual MAC configuration so it
3441		 * should be avoided as possible as we can.
3442		 * Due to lack of documentation it's all vague guess but
3443		 * it needs more investigation.
3444		 */
3445	}
3446}
3447
3448static void
3449msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3450{
3451	struct msk_softc *sc;
3452
3453	sc = sc_if->msk_softc;
3454	if ((status & Y2_IS_PAR_RD1) != 0) {
3455		device_printf(sc_if->msk_if_dev,
3456		    "RAM buffer read parity error\n");
3457		/* Clear IRQ. */
3458		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3459		    RI_CLR_RD_PERR);
3460	}
3461	if ((status & Y2_IS_PAR_WR1) != 0) {
3462		device_printf(sc_if->msk_if_dev,
3463		    "RAM buffer write parity error\n");
3464		/* Clear IRQ. */
3465		CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3466		    RI_CLR_WR_PERR);
3467	}
3468	if ((status & Y2_IS_PAR_MAC1) != 0) {
3469		device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3470		/* Clear IRQ. */
3471		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3472		    GMF_CLI_TX_PE);
3473	}
3474	if ((status & Y2_IS_PAR_RX1) != 0) {
3475		device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3476		/* Clear IRQ. */
3477		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3478	}
3479	if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3480		device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3481		/* Clear IRQ. */
3482		CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3483	}
3484}
3485
3486static void
3487msk_intr_hwerr(struct msk_softc *sc)
3488{
3489	uint32_t status;
3490	uint32_t tlphead[4];
3491
3492	status = CSR_READ_4(sc, B0_HWE_ISRC);
3493	/* Time Stamp timer overflow. */
3494	if ((status & Y2_IS_TIST_OV) != 0)
3495		CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3496	if ((status & Y2_IS_PCI_NEXP) != 0) {
3497		/*
3498		 * PCI Express Error occurred which is not described in PEX
3499		 * spec.
3500		 * This error is also mapped either to Master Abort(
3501		 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3502		 * can only be cleared there.
3503                 */
3504		device_printf(sc->msk_dev,
3505		    "PCI Express protocol violation error\n");
3506	}
3507
3508	if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3509		uint16_t v16;
3510
3511		if ((status & Y2_IS_MST_ERR) != 0)
3512			device_printf(sc->msk_dev,
3513			    "unexpected IRQ Status error\n");
3514		else
3515			device_printf(sc->msk_dev,
3516			    "unexpected IRQ Master error\n");
3517		/* Reset all bits in the PCI status register. */
3518		v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3519		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3520		pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3521		    PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3522		    PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
3523		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3524	}
3525
3526	/* Check for PCI Express Uncorrectable Error. */
3527	if ((status & Y2_IS_PCI_EXP) != 0) {
3528		uint32_t v32;
3529
3530		/*
3531		 * On PCI Express bus bridges are called root complexes (RC).
3532		 * PCI Express errors are recognized by the root complex too,
3533		 * which requests the system to handle the problem. After
3534		 * error occurrence it may be that no access to the adapter
3535		 * may be performed any longer.
3536		 */
3537
3538		v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3539		if ((v32 & PEX_UNSUP_REQ) != 0) {
3540			/* Ignore unsupported request error. */
3541			device_printf(sc->msk_dev,
3542			    "Uncorrectable PCI Express error\n");
3543		}
3544		if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3545			int i;
3546
3547			/* Get TLP header form Log Registers. */
3548			for (i = 0; i < 4; i++)
3549				tlphead[i] = CSR_PCI_READ_4(sc,
3550				    PEX_HEADER_LOG + i * 4);
3551			/* Check for vendor defined broadcast message. */
3552			if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3553				sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3554				CSR_WRITE_4(sc, B0_HWE_IMSK,
3555				    sc->msk_intrhwemask);
3556				CSR_READ_4(sc, B0_HWE_IMSK);
3557			}
3558		}
3559		/* Clear the interrupt. */
3560		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3561		CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3562		CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3563	}
3564
3565	if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3566		msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3567	if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3568		msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3569}
3570
3571static __inline void
3572msk_rxput(struct msk_if_softc *sc_if)
3573{
3574	struct msk_softc *sc;
3575
3576	sc = sc_if->msk_softc;
3577	if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3578		bus_dmamap_sync(
3579		    sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3580		    sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3581		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3582	else
3583		bus_dmamap_sync(
3584		    sc_if->msk_cdata.msk_rx_ring_tag,
3585		    sc_if->msk_cdata.msk_rx_ring_map,
3586		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3587	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3588	    PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3589}
3590
3591static int
3592msk_handle_events(struct msk_softc *sc)
3593{
3594	struct msk_if_softc *sc_if;
3595	int rxput[2];
3596	struct msk_stat_desc *sd;
3597	uint32_t control, status;
3598	int cons, len, port, rxprog;
3599
3600	if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
3601		return (0);
3602
3603	/* Sync status LEs. */
3604	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3605	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3606
3607	rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3608	rxprog = 0;
3609	cons = sc->msk_stat_cons;
3610	for (;;) {
3611		sd = &sc->msk_stat_ring[cons];
3612		control = le32toh(sd->msk_control);
3613		if ((control & HW_OWNER) == 0)
3614			break;
3615		control &= ~HW_OWNER;
3616		sd->msk_control = htole32(control);
3617		status = le32toh(sd->msk_status);
3618		len = control & STLE_LEN_MASK;
3619		port = (control >> 16) & 0x01;
3620		sc_if = sc->msk_if[port];
3621		if (sc_if == NULL) {
3622			device_printf(sc->msk_dev, "invalid port opcode "
3623			    "0x%08x\n", control & STLE_OP_MASK);
3624			continue;
3625		}
3626
3627		switch (control & STLE_OP_MASK) {
3628		case OP_RXVLAN:
3629			sc_if->msk_vtag = ntohs(len);
3630			break;
3631		case OP_RXCHKSVLAN:
3632			sc_if->msk_vtag = ntohs(len);
3633			/* FALLTHROUGH */
3634		case OP_RXCHKS:
3635			sc_if->msk_csum = status;
3636			break;
3637		case OP_RXSTAT:
3638			if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
3639				break;
3640			if (sc_if->msk_framesize >
3641			    (MCLBYTES - MSK_RX_BUF_ALIGN))
3642				msk_jumbo_rxeof(sc_if, status, control, len);
3643			else
3644				msk_rxeof(sc_if, status, control, len);
3645			rxprog++;
3646			/*
3647			 * Because there is no way to sync single Rx LE
3648			 * put the DMA sync operation off until the end of
3649			 * event processing.
3650			 */
3651			rxput[port]++;
3652			/* Update prefetch unit if we've passed water mark. */
3653			if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3654				msk_rxput(sc_if);
3655				rxput[port] = 0;
3656			}
3657			break;
3658		case OP_TXINDEXLE:
3659			if (sc->msk_if[MSK_PORT_A] != NULL)
3660				msk_txeof(sc->msk_if[MSK_PORT_A],
3661				    status & STLE_TXA1_MSKL);
3662			if (sc->msk_if[MSK_PORT_B] != NULL)
3663				msk_txeof(sc->msk_if[MSK_PORT_B],
3664				    ((status & STLE_TXA2_MSKL) >>
3665				    STLE_TXA2_SHIFTL) |
3666				    ((len & STLE_TXA2_MSKH) <<
3667				    STLE_TXA2_SHIFTH));
3668			break;
3669		default:
3670			device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3671			    control & STLE_OP_MASK);
3672			break;
3673		}
3674		MSK_INC(cons, sc->msk_stat_count);
3675		if (rxprog > sc->msk_process_limit)
3676			break;
3677	}
3678
3679	sc->msk_stat_cons = cons;
3680	bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3681	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3682
3683	if (rxput[MSK_PORT_A] > 0)
3684		msk_rxput(sc->msk_if[MSK_PORT_A]);
3685	if (rxput[MSK_PORT_B] > 0)
3686		msk_rxput(sc->msk_if[MSK_PORT_B]);
3687
3688	return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3689}
3690
3691static void
3692msk_intr(void *xsc)
3693{
3694	struct msk_softc *sc;
3695	struct msk_if_softc *sc_if0, *sc_if1;
3696	struct ifnet *ifp0, *ifp1;
3697	uint32_t status;
3698	int domore;
3699
3700	sc = xsc;
3701	MSK_LOCK(sc);
3702
3703#ifndef __HAIKU__
3704	/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3705	status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3706	if (status == 0 || status == 0xffffffff ||
3707	    (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3708	    (status & sc->msk_intrmask) == 0) {
3709		CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3710		MSK_UNLOCK(sc);
3711		return;
3712	}
3713#else
3714	status = sc->haiku_interrupt_status;
3715#endif
3716
3717	sc_if0 = sc->msk_if[MSK_PORT_A];
3718	sc_if1 = sc->msk_if[MSK_PORT_B];
3719	ifp0 = ifp1 = NULL;
3720	if (sc_if0 != NULL)
3721		ifp0 = sc_if0->msk_ifp;
3722	if (sc_if1 != NULL)
3723		ifp1 = sc_if1->msk_ifp;
3724
3725	if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3726		msk_intr_phy(sc_if0);
3727	if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3728		msk_intr_phy(sc_if1);
3729	if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3730		msk_intr_gmac(sc_if0);
3731	if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3732		msk_intr_gmac(sc_if1);
3733	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3734		device_printf(sc->msk_dev, "Rx descriptor error\n");
3735		sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3736		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3737		CSR_READ_4(sc, B0_IMSK);
3738	}
3739        if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3740		device_printf(sc->msk_dev, "Tx descriptor error\n");
3741		sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3742		CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3743		CSR_READ_4(sc, B0_IMSK);
3744	}
3745	if ((status & Y2_IS_HW_ERR) != 0)
3746		msk_intr_hwerr(sc);
3747
3748	domore = msk_handle_events(sc);
3749	if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
3750		CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3751
3752#ifndef __HAIKU__
3753	/* Reenable interrupts. */
3754	CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3755#endif
3756
3757	if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3758	    !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3759		msk_start_locked(ifp0);
3760	if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3761	    !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3762		msk_start_locked(ifp1);
3763
3764	MSK_UNLOCK(sc);
3765}
3766
3767static void
3768msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3769{
3770	struct msk_softc *sc;
3771	struct ifnet *ifp;
3772
3773	ifp = sc_if->msk_ifp;
3774	sc = sc_if->msk_softc;
3775	if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
3776	    sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
3777	    sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
3778		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3779		    TX_STFW_ENA);
3780	} else {
3781		if (ifp->if_mtu > ETHERMTU) {
3782			/* Set Tx GMAC FIFO Almost Empty Threshold. */
3783			CSR_WRITE_4(sc,
3784			    MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3785			    MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3786			/* Disable Store & Forward mode for Tx. */
3787			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3788			    TX_STFW_DIS);
3789		} else {
3790			CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3791			    TX_STFW_ENA);
3792		}
3793	}
3794}
3795
3796static void
3797msk_init(void *xsc)
3798{
3799	struct msk_if_softc *sc_if = xsc;
3800
3801	MSK_IF_LOCK(sc_if);
3802	msk_init_locked(sc_if);
3803	MSK_IF_UNLOCK(sc_if);
3804}
3805
3806static void
3807msk_init_locked(struct msk_if_softc *sc_if)
3808{
3809	struct msk_softc *sc;
3810	struct ifnet *ifp;
3811	struct mii_data	 *mii;
3812	uint8_t *eaddr;
3813	uint16_t gmac;
3814	uint32_t reg;
3815	int error;
3816
3817	MSK_IF_LOCK_ASSERT(sc_if);
3818
3819	ifp = sc_if->msk_ifp;
3820	sc = sc_if->msk_softc;
3821	mii = device_get_softc(sc_if->msk_miibus);
3822
3823	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3824		return;
3825
3826	error = 0;
3827	/* Cancel pending I/O and free all Rx/Tx buffers. */
3828	msk_stop(sc_if);
3829
3830	if (ifp->if_mtu < ETHERMTU)
3831		sc_if->msk_framesize = ETHERMTU;
3832	else
3833		sc_if->msk_framesize = ifp->if_mtu;
3834	sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3835	if (ifp->if_mtu > ETHERMTU &&
3836	    (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3837		ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3838		ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3839	}
3840
3841	/* GMAC Control reset. */
3842	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3843	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3844	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3845	if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3846	    sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
3847		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3848		    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3849		    GMC_BYP_RETR_ON);
3850
3851	/*
3852	 * Initialize GMAC first such that speed/duplex/flow-control
3853	 * parameters are renegotiated when interface is brought up.
3854	 */
3855	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3856
3857	/* Dummy read the Interrupt Source Register. */
3858	CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3859
3860	/* Clear MIB stats. */
3861	msk_stats_clear(sc_if);
3862
3863	/* Disable FCS. */
3864	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3865
3866	/* Setup Transmit Control Register. */
3867	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3868
3869	/* Setup Transmit Flow Control Register. */
3870	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3871
3872	/* Setup Transmit Parameter Register. */
3873	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3874	    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3875	    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3876
3877	gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3878	    GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3879
3880	if (ifp->if_mtu > ETHERMTU)
3881		gmac |= GM_SMOD_JUMBO_ENA;
3882	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3883
3884	/* Set station address. */
3885	eaddr = IF_LLADDR(ifp);
3886	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
3887	    eaddr[0] | (eaddr[1] << 8));
3888	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
3889	    eaddr[2] | (eaddr[3] << 8));
3890	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
3891	    eaddr[4] | (eaddr[5] << 8));
3892	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
3893	    eaddr[0] | (eaddr[1] << 8));
3894	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
3895	    eaddr[2] | (eaddr[3] << 8));
3896	GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
3897	    eaddr[4] | (eaddr[5] << 8));
3898
3899	/* Disable interrupts for counter overflows. */
3900	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3901	GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3902	GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3903
3904	/* Configure Rx MAC FIFO. */
3905	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3906	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3907	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3908	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3909	    sc->msk_hw_id == CHIP_ID_YUKON_EX)
3910		reg |= GMF_RX_OVER_ON;
3911	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3912
3913	/* Set receive filter. */
3914	msk_rxfilter(sc_if);
3915
3916	if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3917		/* Clear flush mask - HW bug. */
3918		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3919	} else {
3920		/* Flush Rx MAC FIFO on any flow control or error. */
3921		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3922		    GMR_FS_ANY_ERR);
3923	}
3924
3925	/*
3926	 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3927	 * due to hardware hang on receipt of pause frames.
3928	 */
3929	reg = RX_GMF_FL_THR_DEF + 1;
3930	/* Another magic for Yukon FE+ - From Linux. */
3931	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3932	    sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3933		reg = 0x178;
3934	CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3935
3936	/* Configure Tx MAC FIFO. */
3937	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3938	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3939	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3940
3941	/* Configure hardware VLAN tag insertion/stripping. */
3942	msk_setvlan(sc_if, ifp);
3943
3944	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3945		/* Set Rx Pause threshold. */
3946		CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3947		    MSK_ECU_LLPP);
3948		CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3949		    MSK_ECU_ULPP);
3950		/* Configure store-and-forward for Tx. */
3951		msk_set_tx_stfwd(sc_if);
3952	}
3953
3954	if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3955	    sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3956		/* Disable dynamic watermark - from Linux. */
3957		reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3958		reg &= ~0x03;
3959		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3960	}
3961
3962	/*
3963	 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3964	 * arbiter as we don't use Sync Tx queue.
3965	 */
3966	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3967	    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3968	/* Enable the RAM Interface Arbiter. */
3969	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3970
3971	/* Setup RAM buffer. */
3972	msk_set_rambuffer(sc_if);
3973
3974	/* Disable Tx sync Queue. */
3975	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3976
3977	/* Setup Tx Queue Bus Memory Interface. */
3978	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3979	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3980	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3981	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3982	switch (sc->msk_hw_id) {
3983	case CHIP_ID_YUKON_EC_U:
3984		if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3985			/* Fix for Yukon-EC Ultra: set BMU FIFO level */
3986			CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3987			    MSK_ECU_TXFF_LEV);
3988		}
3989		break;
3990	case CHIP_ID_YUKON_EX:
3991		/*
3992		 * Yukon Extreme seems to have silicon bug for
3993		 * automatic Tx checksum calculation capability.
3994		 */
3995		if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
3996			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3997			    F_TX_CHK_AUTO_OFF);
3998		break;
3999	}
4000
4001	/* Setup Rx Queue Bus Memory Interface. */
4002	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
4003	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
4004	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
4005	CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
4006        if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
4007	    sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
4008		/* MAC Rx RAM Read is controlled by hardware. */
4009                CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
4010	}
4011
4012	msk_set_prefetch(sc, sc_if->msk_txq,
4013	    sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
4014	msk_init_tx_ring(sc_if);
4015
4016	/* Disable Rx checksum offload and RSS hash. */
4017	reg = BMU_DIS_RX_RSS_HASH;
4018	if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
4019	    (ifp->if_capenable & IFCAP_RXCSUM) != 0)
4020		reg |= BMU_ENA_RX_CHKSUM;
4021	else
4022		reg |= BMU_DIS_RX_CHKSUM;
4023	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
4024	if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
4025		msk_set_prefetch(sc, sc_if->msk_rxq,
4026		    sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
4027		    MSK_JUMBO_RX_RING_CNT - 1);
4028		error = msk_init_jumbo_rx_ring(sc_if);
4029	 } else {
4030		msk_set_prefetch(sc, sc_if->msk_rxq,
4031		    sc_if->msk_rdata.msk_rx_ring_paddr,
4032		    MSK_RX_RING_CNT - 1);
4033		error = msk_init_rx_ring(sc_if);
4034	}
4035	if (error != 0) {
4036		device_printf(sc_if->msk_if_dev,
4037		    "initialization failed: no memory for Rx buffers\n");
4038		msk_stop(sc_if);
4039		return;
4040	}
4041	if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
4042	    sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
4043		/* Disable flushing of non-ASF packets. */
4044		CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
4045		    GMF_RX_MACSEC_FLUSH_OFF);
4046	}
4047
4048	/* Configure interrupt handling. */
4049	if (sc_if->msk_port == MSK_PORT_A) {
4050		sc->msk_intrmask |= Y2_IS_PORT_A;
4051		sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
4052	} else {
4053		sc->msk_intrmask |= Y2_IS_PORT_B;
4054		sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
4055	}
4056	/* Configure IRQ moderation mask. */
4057	CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
4058	if (sc->msk_int_holdoff > 0) {
4059		/* Configure initial IRQ moderation timer value. */
4060		CSR_WRITE_4(sc, B2_IRQM_INI,
4061		    MSK_USECS(sc, sc->msk_int_holdoff));
4062		CSR_WRITE_4(sc, B2_IRQM_VAL,
4063		    MSK_USECS(sc, sc->msk_int_holdoff));
4064		/* Start IRQ moderation. */
4065		CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
4066	}
4067	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4068	CSR_READ_4(sc, B0_HWE_IMSK);
4069	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4070	CSR_READ_4(sc, B0_IMSK);
4071
4072	ifp->if_drv_flags |= IFF_DRV_RUNNING;
4073	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4074
4075	sc_if->msk_flags &= ~MSK_FLAG_LINK;
4076	mii_mediachg(mii);
4077
4078	callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
4079}
4080
4081static void
4082msk_set_rambuffer(struct msk_if_softc *sc_if)
4083{
4084	struct msk_softc *sc;
4085	int ltpp, utpp;
4086
4087	sc = sc_if->msk_softc;
4088	if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
4089		return;
4090
4091	/* Setup Rx Queue. */
4092	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
4093	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
4094	    sc->msk_rxqstart[sc_if->msk_port] / 8);
4095	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
4096	    sc->msk_rxqend[sc_if->msk_port] / 8);
4097	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
4098	    sc->msk_rxqstart[sc_if->msk_port] / 8);
4099	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
4100	    sc->msk_rxqstart[sc_if->msk_port] / 8);
4101
4102	utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4103	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
4104	ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
4105	    sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
4106	if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
4107		ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
4108	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
4109	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
4110	/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
4111
4112	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
4113	CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
4114
4115	/* Setup Tx Queue. */
4116	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
4117	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
4118	    sc->msk_txqstart[sc_if->msk_port] / 8);
4119	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
4120	    sc->msk_txqend[sc_if->msk_port] / 8);
4121	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
4122	    sc->msk_txqstart[sc_if->msk_port] / 8);
4123	CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
4124	    sc->msk_txqstart[sc_if->msk_port] / 8);
4125	/* Enable Store & Forward for Tx side. */
4126	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
4127	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
4128	CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
4129}
4130
4131static void
4132msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
4133    uint32_t count)
4134{
4135
4136	/* Reset the prefetch unit. */
4137	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4138	    PREF_UNIT_RST_SET);
4139	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4140	    PREF_UNIT_RST_CLR);
4141	/* Set LE base address. */
4142	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
4143	    MSK_ADDR_LO(addr));
4144	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
4145	    MSK_ADDR_HI(addr));
4146	/* Set the list last index. */
4147	CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
4148	    count);
4149	/* Turn on prefetch unit. */
4150	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
4151	    PREF_UNIT_OP_ON);
4152	/* Dummy read to ensure write. */
4153	CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
4154}
4155
4156static void
4157msk_stop(struct msk_if_softc *sc_if)
4158{
4159	struct msk_softc *sc;
4160	struct msk_txdesc *txd;
4161	struct msk_rxdesc *rxd;
4162	struct msk_rxdesc *jrxd;
4163	struct ifnet *ifp;
4164	uint32_t val;
4165	int i;
4166
4167	MSK_IF_LOCK_ASSERT(sc_if);
4168	sc = sc_if->msk_softc;
4169	ifp = sc_if->msk_ifp;
4170
4171	callout_stop(&sc_if->msk_tick_ch);
4172	sc_if->msk_watchdog_timer = 0;
4173
4174	/* Disable interrupts. */
4175	if (sc_if->msk_port == MSK_PORT_A) {
4176		sc->msk_intrmask &= ~Y2_IS_PORT_A;
4177		sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
4178	} else {
4179		sc->msk_intrmask &= ~Y2_IS_PORT_B;
4180		sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
4181	}
4182	CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
4183	CSR_READ_4(sc, B0_HWE_IMSK);
4184	CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
4185	CSR_READ_4(sc, B0_IMSK);
4186
4187	/* Disable Tx/Rx MAC. */
4188	val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4189	val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
4190	GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
4191	/* Read again to ensure writing. */
4192	GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
4193	/* Update stats and clear counters. */
4194	msk_stats_update(sc_if);
4195
4196	/* Stop Tx BMU. */
4197	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
4198	val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4199	for (i = 0; i < MSK_TIMEOUT; i++) {
4200		if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
4201			CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4202			    BMU_STOP);
4203			val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
4204		} else
4205			break;
4206		DELAY(1);
4207	}
4208	if (i == MSK_TIMEOUT)
4209		device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
4210	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
4211	    RB_RST_SET | RB_DIS_OP_MD);
4212
4213	/* Disable all GMAC interrupt. */
4214	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
4215	/* Disable PHY interrupt. */
4216	msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
4217
4218	/* Disable the RAM Interface Arbiter. */
4219	CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
4220
4221	/* Reset the PCI FIFO of the async Tx queue */
4222	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
4223	    BMU_RST_SET | BMU_FIFO_RST);
4224
4225	/* Reset the Tx prefetch units. */
4226	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
4227	    PREF_UNIT_RST_SET);
4228
4229	/* Reset the RAM Buffer async Tx queue. */
4230	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
4231
4232	/* Reset Tx MAC FIFO. */
4233	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
4234	/* Set Pause Off. */
4235	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
4236
4237	/*
4238	 * The Rx Stop command will not work for Yukon-2 if the BMU does not
4239	 * reach the end of packet and since we can't make sure that we have
4240	 * incoming data, we must reset the BMU while it is not during a DMA
4241	 * transfer. Since it is possible that the Rx path is still active,
4242	 * the Rx RAM buffer will be stopped first, so any possible incoming
4243	 * data will not trigger a DMA. After the RAM buffer is stopped, the
4244	 * BMU is polled until any DMA in progress is ended and only then it
4245	 * will be reset.
4246	 */
4247
4248	/* Disable the RAM Buffer receive queue. */
4249	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
4250	for (i = 0; i < MSK_TIMEOUT; i++) {
4251		if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
4252		    CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
4253			break;
4254		DELAY(1);
4255	}
4256	if (i == MSK_TIMEOUT)
4257		device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
4258	CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
4259	    BMU_RST_SET | BMU_FIFO_RST);
4260	/* Reset the Rx prefetch unit. */
4261	CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4262	    PREF_UNIT_RST_SET);
4263	/* Reset the RAM Buffer receive queue. */
4264	CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4265	/* Reset Rx MAC FIFO. */
4266	CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4267
4268	/* Free Rx and Tx mbufs still in the queues. */
4269	for (i = 0; i < MSK_RX_RING_CNT; i++) {
4270		rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4271		if (rxd->rx_m != NULL) {
4272			bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4273			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4274			bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4275			    rxd->rx_dmamap);
4276			m_freem(rxd->rx_m);
4277			rxd->rx_m = NULL;
4278		}
4279	}
4280	for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4281		jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4282		if (jrxd->rx_m != NULL) {
4283			bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4284			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4285			bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4286			    jrxd->rx_dmamap);
4287			m_freem(jrxd->rx_m);
4288			jrxd->rx_m = NULL;
4289		}
4290	}
4291	for (i = 0; i < MSK_TX_RING_CNT; i++) {
4292		txd = &sc_if->msk_cdata.msk_txdesc[i];
4293		if (txd->tx_m != NULL) {
4294			bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4295			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4296			bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4297			    txd->tx_dmamap);
4298			m_freem(txd->tx_m);
4299			txd->tx_m = NULL;
4300		}
4301	}
4302
4303	/*
4304	 * Mark the interface down.
4305	 */
4306	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4307	sc_if->msk_flags &= ~MSK_FLAG_LINK;
4308}
4309
4310/*
4311 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
4312 * counter clears high 16 bits of the counter such that accessing
4313 * lower 16 bits should be the last operation.
4314 */
4315#define	MSK_READ_MIB32(x, y)					\
4316	(((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) +	\
4317	(uint32_t)GMAC_READ_2(sc, x, y)
4318#define	MSK_READ_MIB64(x, y)					\
4319	(((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) +	\
4320	(uint64_t)MSK_READ_MIB32(x, y)
4321
4322static void
4323msk_stats_clear(struct msk_if_softc *sc_if)
4324{
4325	struct msk_softc *sc;
4326	uint32_t reg;
4327	uint16_t gmac;
4328	int i;
4329
4330	MSK_IF_LOCK_ASSERT(sc_if);
4331
4332	sc = sc_if->msk_softc;
4333	/* Set MIB Clear Counter Mode. */
4334	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4335	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4336	/* Read all MIB Counters with Clear Mode set. */
4337	for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
4338		reg = MSK_READ_MIB32(sc_if->msk_port, i);
4339	/* Clear MIB Clear Counter Mode. */
4340	gmac &= ~GM_PAR_MIB_CLR;
4341	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4342}
4343
4344static void
4345msk_stats_update(struct msk_if_softc *sc_if)
4346{
4347	struct msk_softc *sc;
4348	struct ifnet *ifp;
4349	struct msk_hw_stats *stats;
4350	uint16_t gmac;
4351	uint32_t reg;
4352
4353	MSK_IF_LOCK_ASSERT(sc_if);
4354
4355	ifp = sc_if->msk_ifp;
4356	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4357		return;
4358	sc = sc_if->msk_softc;
4359	stats = &sc_if->msk_stats;
4360	/* Set MIB Clear Counter Mode. */
4361	gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
4362	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
4363
4364	/* Rx stats. */
4365	stats->rx_ucast_frames +=
4366	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
4367	stats->rx_bcast_frames +=
4368	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
4369	stats->rx_pause_frames +=
4370	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
4371	stats->rx_mcast_frames +=
4372	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
4373	stats->rx_crc_errs +=
4374	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4375	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4376	stats->rx_good_octets +=
4377	    MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4378	stats->rx_bad_octets +=
4379	    MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4380	stats->rx_runts +=
4381	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4382	stats->rx_runt_errs +=
4383	    MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4384	stats->rx_pkts_64 +=
4385	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4386	stats->rx_pkts_65_127 +=
4387	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4388	stats->rx_pkts_128_255 +=
4389	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4390	stats->rx_pkts_256_511 +=
4391	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4392	stats->rx_pkts_512_1023 +=
4393	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4394	stats->rx_pkts_1024_1518 +=
4395	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4396	stats->rx_pkts_1519_max +=
4397	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4398	stats->rx_pkts_too_long +=
4399	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4400	stats->rx_pkts_jabbers +=
4401	    MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4402	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4403	stats->rx_fifo_oflows +=
4404	    MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4405	reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4406
4407	/* Tx stats. */
4408	stats->tx_ucast_frames +=
4409	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4410	stats->tx_bcast_frames +=
4411	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4412	stats->tx_pause_frames +=
4413	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4414	stats->tx_mcast_frames +=
4415	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4416	stats->tx_octets +=
4417	    MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4418	stats->tx_pkts_64 +=
4419	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4420	stats->tx_pkts_65_127 +=
4421	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4422	stats->tx_pkts_128_255 +=
4423	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4424	stats->tx_pkts_256_511 +=
4425	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4426	stats->tx_pkts_512_1023 +=
4427	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4428	stats->tx_pkts_1024_1518 +=
4429	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4430	stats->tx_pkts_1519_max +=
4431	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4432	reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4433	stats->tx_colls +=
4434	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4435	stats->tx_late_colls +=
4436	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4437	stats->tx_excess_colls +=
4438	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4439	stats->tx_multi_colls +=
4440	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4441	stats->tx_single_colls +=
4442	    MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4443	stats->tx_underflows +=
4444	    MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4445	/* Clear MIB Clear Counter Mode. */
4446	gmac &= ~GM_PAR_MIB_CLR;
4447	GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4448}
4449
4450static int
4451msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4452{
4453	struct msk_softc *sc;
4454	struct msk_if_softc *sc_if;
4455	uint32_t result, *stat;
4456	int off;
4457
4458	sc_if = (struct msk_if_softc *)arg1;
4459	sc = sc_if->msk_softc;
4460	off = arg2;
4461	stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4462
4463	MSK_IF_LOCK(sc_if);
4464	result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4465	result += *stat;
4466	MSK_IF_UNLOCK(sc_if);
4467
4468	return (sysctl_handle_int(oidp, &result, 0, req));
4469}
4470
4471static int
4472msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4473{
4474	struct msk_softc *sc;
4475	struct msk_if_softc *sc_if;
4476	uint64_t result, *stat;
4477	int off;
4478
4479	sc_if = (struct msk_if_softc *)arg1;
4480	sc = sc_if->msk_softc;
4481	off = arg2;
4482	stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4483
4484	MSK_IF_LOCK(sc_if);
4485	result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4486	result += *stat;
4487	MSK_IF_UNLOCK(sc_if);
4488
4489	return (sysctl_handle_64(oidp, &result, 0, req));
4490}
4491
4492#undef MSK_READ_MIB32
4493#undef MSK_READ_MIB64
4494
4495#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) 				\
4496	SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, 	\
4497	    sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32,	\
4498	    "IU", d)
4499#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) 				\
4500	SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD, 	\
4501	    sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64,	\
4502	    "QU", d)
4503
4504static void
4505msk_sysctl_node(struct msk_if_softc *sc_if)
4506{
4507	struct sysctl_ctx_list *ctx;
4508	struct sysctl_oid_list *child, *schild;
4509	struct sysctl_oid *tree;
4510
4511	ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4512	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4513
4514	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4515	    NULL, "MSK Statistics");
4516	schild = SYSCTL_CHILDREN(tree);
4517	tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4518	    NULL, "MSK RX Statistics");
4519	child = SYSCTL_CHILDREN(tree);
4520	MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4521	    child, rx_ucast_frames, "Good unicast frames");
4522	MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4523	    child, rx_bcast_frames, "Good broadcast frames");
4524	MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4525	    child, rx_pause_frames, "Pause frames");
4526	MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4527	    child, rx_mcast_frames, "Multicast frames");
4528	MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4529	    child, rx_crc_errs, "CRC errors");
4530	MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4531	    child, rx_good_octets, "Good octets");
4532	MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4533	    child, rx_bad_octets, "Bad octets");
4534	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4535	    child, rx_pkts_64, "64 bytes frames");
4536	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4537	    child, rx_pkts_65_127, "65 to 127 bytes frames");
4538	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4539	    child, rx_pkts_128_255, "128 to 255 bytes frames");
4540	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4541	    child, rx_pkts_256_511, "256 to 511 bytes frames");
4542	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4543	    child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4544	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4545	    child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4546	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4547	    child, rx_pkts_1519_max, "1519 to max frames");
4548	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4549	    child, rx_pkts_too_long, "frames too long");
4550	MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4551	    child, rx_pkts_jabbers, "Jabber errors");
4552	MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4553	    child, rx_fifo_oflows, "FIFO overflows");
4554
4555	tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4556	    NULL, "MSK TX Statistics");
4557	child = SYSCTL_CHILDREN(tree);
4558	MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4559	    child, tx_ucast_frames, "Unicast frames");
4560	MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4561	    child, tx_bcast_frames, "Broadcast frames");
4562	MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4563	    child, tx_pause_frames, "Pause frames");
4564	MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4565	    child, tx_mcast_frames, "Multicast frames");
4566	MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4567	    child, tx_octets, "Octets");
4568	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4569	    child, tx_pkts_64, "64 bytes frames");
4570	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4571	    child, tx_pkts_65_127, "65 to 127 bytes frames");
4572	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4573	    child, tx_pkts_128_255, "128 to 255 bytes frames");
4574	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4575	    child, tx_pkts_256_511, "256 to 511 bytes frames");
4576	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4577	    child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4578	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4579	    child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4580	MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4581	    child, tx_pkts_1519_max, "1519 to max frames");
4582	MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4583	    child, tx_colls, "Collisions");
4584	MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4585	    child, tx_late_colls, "Late collisions");
4586	MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4587	    child, tx_excess_colls, "Excessive collisions");
4588	MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4589	    child, tx_multi_colls, "Multiple collisions");
4590	MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4591	    child, tx_single_colls, "Single collisions");
4592	MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4593	    child, tx_underflows, "FIFO underflows");
4594}
4595
4596#undef MSK_SYSCTL_STAT32
4597#undef MSK_SYSCTL_STAT64
4598
4599static int
4600sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4601{
4602	int error, value;
4603
4604	if (!arg1)
4605		return (EINVAL);
4606	value = *(int *)arg1;
4607	error = sysctl_handle_int(oidp, &value, 0, req);
4608	if (error || !req->newptr)
4609		return (error);
4610	if (value < low || value > high)
4611		return (EINVAL);
4612	*(int *)arg1 = value;
4613
4614	return (0);
4615}
4616
4617static int
4618sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4619{
4620
4621	return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4622	    MSK_PROC_MAX));
4623}
4624