Deleted Added
full compact
if_msk.c (192725) if_msk.c (192726)
1/******************************************************************************
2 *
3 * Name : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 * LICENSE:
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
20 * and conditions:
21 *
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 * /LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
1/******************************************************************************
2 *
3 * Name : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 * LICENSE:
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
20 * and conditions:
21 *
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 * /LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
102__FBSDID("$FreeBSD: head/sys/dev/msk/if_msk.c 192725 2009-05-25 04:27:12Z yongari $");
102__FBSDID("$FreeBSD: head/sys/dev/msk/if_msk.c 192726 2009-05-25 06:09:18Z yongari $");
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116#include <sys/taskqueue.h>
117
118#include <net/bpf.h>
119#include <net/ethernet.h>
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/if_dl.h>
123#include <net/if_media.h>
124#include <net/if_types.h>
125#include <net/if_vlan_var.h>
126
127#include <netinet/in.h>
128#include <netinet/in_systm.h>
129#include <netinet/ip.h>
130#include <netinet/tcp.h>
131#include <netinet/udp.h>
132
133#include <machine/bus.h>
134#include <machine/in_cksum.h>
135#include <machine/resource.h>
136#include <sys/rman.h>
137
138#include <dev/mii/mii.h>
139#include <dev/mii/miivar.h>
140#include <dev/mii/brgphyreg.h>
141
142#include <dev/pci/pcireg.h>
143#include <dev/pci/pcivar.h>
144
145#include <dev/msk/if_mskreg.h>
146
147MODULE_DEPEND(msk, pci, 1, 1, 1);
148MODULE_DEPEND(msk, ether, 1, 1, 1);
149MODULE_DEPEND(msk, miibus, 1, 1, 1);
150
151/* "device miibus" required. See GENERIC if you get errors here. */
152#include "miibus_if.h"
153
154/* Tunables. */
155static int msi_disable = 0;
156TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
157static int legacy_intr = 0;
158TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
159static int jumbo_disable = 0;
160TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
161
162#define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
163
164/*
165 * Devices supported by this driver.
166 */
167static struct msk_product {
168 uint16_t msk_vendorid;
169 uint16_t msk_deviceid;
170 const char *msk_name;
171} msk_products[] = {
172 { VENDORID_SK, DEVICEID_SK_YUKON2,
173 "SK-9Sxx Gigabit Ethernet" },
174 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
175 "SK-9Exx Gigabit Ethernet"},
176 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
177 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
178 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
179 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
180 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
181 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
182 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
183 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
184 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
185 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
186 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
187 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
188 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
189 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
190 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
191 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
192 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
193 "Marvell Yukon 88E8035 Gigabit Ethernet" },
194 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
195 "Marvell Yukon 88E8036 Gigabit Ethernet" },
196 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
197 "Marvell Yukon 88E8038 Gigabit Ethernet" },
198 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
199 "Marvell Yukon 88E8039 Gigabit Ethernet" },
200 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
201 "Marvell Yukon 88E8050 Gigabit Ethernet" },
202 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
203 "Marvell Yukon 88E8052 Gigabit Ethernet" },
204 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
205 "Marvell Yukon 88E8053 Gigabit Ethernet" },
206 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
207 "Marvell Yukon 88E8055 Gigabit Ethernet" },
208 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
209 "Marvell Yukon 88E8056 Gigabit Ethernet" },
210 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
211 "Marvell Yukon 88E8058 Gigabit Ethernet" },
212 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
213 "D-Link 550SX Gigabit Ethernet" },
214 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
215 "D-Link 560T Gigabit Ethernet" }
216};
217
218static const char *model_name[] = {
219 "Yukon XL",
220 "Yukon EC Ultra",
221 "Yukon Unknown",
222 "Yukon EC",
223 "Yukon FE"
224};
225
226static int mskc_probe(device_t);
227static int mskc_attach(device_t);
228static int mskc_detach(device_t);
229static int mskc_shutdown(device_t);
230static int mskc_setup_rambuffer(struct msk_softc *);
231static int mskc_suspend(device_t);
232static int mskc_resume(device_t);
233static void mskc_reset(struct msk_softc *);
234
235static int msk_probe(device_t);
236static int msk_attach(device_t);
237static int msk_detach(device_t);
238
239static void msk_tick(void *);
240static void msk_legacy_intr(void *);
241static int msk_intr(void *);
242static void msk_int_task(void *, int);
243static void msk_intr_phy(struct msk_if_softc *);
244static void msk_intr_gmac(struct msk_if_softc *);
245static __inline void msk_rxput(struct msk_if_softc *);
246static int msk_handle_events(struct msk_softc *);
247static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
248static void msk_intr_hwerr(struct msk_softc *);
249#ifndef __NO_STRICT_ALIGNMENT
250static __inline void msk_fixup_rx(struct mbuf *);
251#endif
252static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
253static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
254static void msk_txeof(struct msk_if_softc *, int);
255static int msk_encap(struct msk_if_softc *, struct mbuf **);
256static void msk_tx_task(void *, int);
257static void msk_start(struct ifnet *);
258static int msk_ioctl(struct ifnet *, u_long, caddr_t);
259static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
260static void msk_set_rambuffer(struct msk_if_softc *);
261static void msk_init(void *);
262static void msk_init_locked(struct msk_if_softc *);
263static void msk_stop(struct msk_if_softc *);
264static void msk_watchdog(struct msk_if_softc *);
265static int msk_mediachange(struct ifnet *);
266static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
267static void msk_phy_power(struct msk_softc *, int);
268static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
269static int msk_status_dma_alloc(struct msk_softc *);
270static void msk_status_dma_free(struct msk_softc *);
271static int msk_txrx_dma_alloc(struct msk_if_softc *);
272static int msk_rx_dma_jalloc(struct msk_if_softc *);
273static void msk_txrx_dma_free(struct msk_if_softc *);
274static void msk_rx_dma_jfree(struct msk_if_softc *);
275static int msk_init_rx_ring(struct msk_if_softc *);
276static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
277static void msk_init_tx_ring(struct msk_if_softc *);
278static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
279static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
280static int msk_newbuf(struct msk_if_softc *, int);
281static int msk_jumbo_newbuf(struct msk_if_softc *, int);
282
283static int msk_phy_readreg(struct msk_if_softc *, int, int);
284static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
285static int msk_miibus_readreg(device_t, int, int);
286static int msk_miibus_writereg(device_t, int, int, int);
287static void msk_miibus_statchg(device_t);
288
289static void msk_rxfilter(struct msk_if_softc *);
290static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
291
292static void msk_stats_clear(struct msk_if_softc *);
293static void msk_stats_update(struct msk_if_softc *);
294static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
295static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
296static void msk_sysctl_node(struct msk_if_softc *);
297static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
298static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
299
300static device_method_t mskc_methods[] = {
301 /* Device interface */
302 DEVMETHOD(device_probe, mskc_probe),
303 DEVMETHOD(device_attach, mskc_attach),
304 DEVMETHOD(device_detach, mskc_detach),
305 DEVMETHOD(device_suspend, mskc_suspend),
306 DEVMETHOD(device_resume, mskc_resume),
307 DEVMETHOD(device_shutdown, mskc_shutdown),
308
309 /* bus interface */
310 DEVMETHOD(bus_print_child, bus_generic_print_child),
311 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
312
313 { NULL, NULL }
314};
315
316static driver_t mskc_driver = {
317 "mskc",
318 mskc_methods,
319 sizeof(struct msk_softc)
320};
321
322static devclass_t mskc_devclass;
323
324static device_method_t msk_methods[] = {
325 /* Device interface */
326 DEVMETHOD(device_probe, msk_probe),
327 DEVMETHOD(device_attach, msk_attach),
328 DEVMETHOD(device_detach, msk_detach),
329 DEVMETHOD(device_shutdown, bus_generic_shutdown),
330
331 /* bus interface */
332 DEVMETHOD(bus_print_child, bus_generic_print_child),
333 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
334
335 /* MII interface */
336 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
337 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
338 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
339
340 { NULL, NULL }
341};
342
343static driver_t msk_driver = {
344 "msk",
345 msk_methods,
346 sizeof(struct msk_if_softc)
347};
348
349static devclass_t msk_devclass;
350
351DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
352DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
353DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
354
355static struct resource_spec msk_res_spec_io[] = {
356 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
357 { -1, 0, 0 }
358};
359
360static struct resource_spec msk_res_spec_mem[] = {
361 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
362 { -1, 0, 0 }
363};
364
365static struct resource_spec msk_irq_spec_legacy[] = {
366 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
367 { -1, 0, 0 }
368};
369
370static struct resource_spec msk_irq_spec_msi[] = {
371 { SYS_RES_IRQ, 1, RF_ACTIVE },
372 { -1, 0, 0 }
373};
374
375static struct resource_spec msk_irq_spec_msi2[] = {
376 { SYS_RES_IRQ, 1, RF_ACTIVE },
377 { SYS_RES_IRQ, 2, RF_ACTIVE },
378 { -1, 0, 0 }
379};
380
381static int
382msk_miibus_readreg(device_t dev, int phy, int reg)
383{
384 struct msk_if_softc *sc_if;
385
386 if (phy != PHY_ADDR_MARV)
387 return (0);
388
389 sc_if = device_get_softc(dev);
390
391 return (msk_phy_readreg(sc_if, phy, reg));
392}
393
394static int
395msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
396{
397 struct msk_softc *sc;
398 int i, val;
399
400 sc = sc_if->msk_softc;
401
402 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
403 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
404
405 for (i = 0; i < MSK_TIMEOUT; i++) {
406 DELAY(1);
407 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
408 if ((val & GM_SMI_CT_RD_VAL) != 0) {
409 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
410 break;
411 }
412 }
413
414 if (i == MSK_TIMEOUT) {
415 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
416 val = 0;
417 }
418
419 return (val);
420}
421
422static int
423msk_miibus_writereg(device_t dev, int phy, int reg, int val)
424{
425 struct msk_if_softc *sc_if;
426
427 if (phy != PHY_ADDR_MARV)
428 return (0);
429
430 sc_if = device_get_softc(dev);
431
432 return (msk_phy_writereg(sc_if, phy, reg, val));
433}
434
435static int
436msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
437{
438 struct msk_softc *sc;
439 int i;
440
441 sc = sc_if->msk_softc;
442
443 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
444 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
445 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
446 for (i = 0; i < MSK_TIMEOUT; i++) {
447 DELAY(1);
448 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
449 GM_SMI_CT_BUSY) == 0)
450 break;
451 }
452 if (i == MSK_TIMEOUT)
453 if_printf(sc_if->msk_ifp, "phy write timeout\n");
454
455 return (0);
456}
457
458static void
459msk_miibus_statchg(device_t dev)
460{
461 struct msk_softc *sc;
462 struct msk_if_softc *sc_if;
463 struct mii_data *mii;
464 struct ifnet *ifp;
465 uint32_t gmac;
466
467 sc_if = device_get_softc(dev);
468 sc = sc_if->msk_softc;
469
470 MSK_IF_LOCK_ASSERT(sc_if);
471
472 mii = device_get_softc(sc_if->msk_miibus);
473 ifp = sc_if->msk_ifp;
474 if (mii == NULL || ifp == NULL ||
475 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
476 return;
477
478 if (mii->mii_media_status & IFM_ACTIVE) {
479 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
480 sc_if->msk_flags |= MSK_FLAG_LINK;
481 } else
482 sc_if->msk_flags &= ~MSK_FLAG_LINK;
483
484 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
485 /* Enable Tx FIFO Underrun. */
486 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
487 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
488 /*
489 * Because mii(4) notify msk(4) that it detected link status
490 * change, there is no need to enable automatic
491 * speed/flow-control/duplex updates.
492 */
493 gmac = GM_GPCR_AU_ALL_DIS;
494 switch (IFM_SUBTYPE(mii->mii_media_active)) {
495 case IFM_1000_SX:
496 case IFM_1000_T:
497 gmac |= GM_GPCR_SPEED_1000;
498 break;
499 case IFM_100_TX:
500 gmac |= GM_GPCR_SPEED_100;
501 break;
502 case IFM_10_T:
503 break;
504 }
505
506 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
507 gmac |= GM_GPCR_DUP_FULL;
508 /* Disable Rx flow control. */
509 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
510 gmac |= GM_GPCR_FC_RX_DIS;
511 /* Disable Tx flow control. */
512 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
513 gmac |= GM_GPCR_FC_TX_DIS;
514 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
515 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
516 /* Read again to ensure writing. */
517 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
518
519 gmac = GMC_PAUSE_ON;
520 if (((mii->mii_media_active & IFM_GMASK) &
521 (IFM_FLAG0 | IFM_FLAG1)) == 0)
522 gmac = GMC_PAUSE_OFF;
523 /* Diable pause for 10/100 Mbps in half-duplex mode. */
524 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
525 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
526 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
527 gmac = GMC_PAUSE_OFF;
528 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
529
530 /* Enable PHY interrupt for FIFO underrun/overflow. */
531 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
532 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
533 } else {
534 /*
535 * Link state changed to down.
536 * Disable PHY interrupts.
537 */
538 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
539 /* Disable Rx/Tx MAC. */
540 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
541 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
542 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
543 /* Read again to ensure writing. */
544 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
545 }
546}
547
548static void
549msk_rxfilter(struct msk_if_softc *sc_if)
550{
551 struct msk_softc *sc;
552 struct ifnet *ifp;
553 struct ifmultiaddr *ifma;
554 uint32_t mchash[2];
555 uint32_t crc;
556 uint16_t mode;
557
558 sc = sc_if->msk_softc;
559
560 MSK_IF_LOCK_ASSERT(sc_if);
561
562 ifp = sc_if->msk_ifp;
563
564 bzero(mchash, sizeof(mchash));
565 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
566 if ((ifp->if_flags & IFF_PROMISC) != 0)
567 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
568 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
569 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
570 mchash[0] = 0xffff;
571 mchash[1] = 0xffff;
572 } else {
573 mode |= GM_RXCR_UCF_ENA;
574 IF_ADDR_LOCK(ifp);
575 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
576 if (ifma->ifma_addr->sa_family != AF_LINK)
577 continue;
578 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
579 ifma->ifma_addr), ETHER_ADDR_LEN);
580 /* Just want the 6 least significant bits. */
581 crc &= 0x3f;
582 /* Set the corresponding bit in the hash table. */
583 mchash[crc >> 5] |= 1 << (crc & 0x1f);
584 }
585 IF_ADDR_UNLOCK(ifp);
586 if (mchash[0] != 0 || mchash[1] != 0)
587 mode |= GM_RXCR_MCF_ENA;
588 }
589
590 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
591 mchash[0] & 0xffff);
592 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
593 (mchash[0] >> 16) & 0xffff);
594 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
595 mchash[1] & 0xffff);
596 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
597 (mchash[1] >> 16) & 0xffff);
598 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
599}
600
601static void
602msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
603{
604 struct msk_softc *sc;
605
606 sc = sc_if->msk_softc;
607 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
608 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
609 RX_VLAN_STRIP_ON);
610 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
611 TX_VLAN_TAG_ON);
612 } else {
613 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
614 RX_VLAN_STRIP_OFF);
615 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
616 TX_VLAN_TAG_OFF);
617 }
618}
619
620static int
621msk_init_rx_ring(struct msk_if_softc *sc_if)
622{
623 struct msk_ring_data *rd;
624 struct msk_rxdesc *rxd;
625 int i, prod;
626
627 MSK_IF_LOCK_ASSERT(sc_if);
628
629 sc_if->msk_cdata.msk_rx_cons = 0;
630 sc_if->msk_cdata.msk_rx_prod = 0;
631 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
632
633 rd = &sc_if->msk_rdata;
634 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
635 prod = sc_if->msk_cdata.msk_rx_prod;
636 for (i = 0; i < MSK_RX_RING_CNT; i++) {
637 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
638 rxd->rx_m = NULL;
639 rxd->rx_le = &rd->msk_rx_ring[prod];
640 if (msk_newbuf(sc_if, prod) != 0)
641 return (ENOBUFS);
642 MSK_INC(prod, MSK_RX_RING_CNT);
643 }
644
645 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
646 sc_if->msk_cdata.msk_rx_ring_map,
647 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
648
649 /* Update prefetch unit. */
650 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
651 CSR_WRITE_2(sc_if->msk_softc,
652 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
653 sc_if->msk_cdata.msk_rx_prod);
654
655 return (0);
656}
657
658static int
659msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
660{
661 struct msk_ring_data *rd;
662 struct msk_rxdesc *rxd;
663 int i, prod;
664
665 MSK_IF_LOCK_ASSERT(sc_if);
666
667 sc_if->msk_cdata.msk_rx_cons = 0;
668 sc_if->msk_cdata.msk_rx_prod = 0;
669 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
670
671 rd = &sc_if->msk_rdata;
672 bzero(rd->msk_jumbo_rx_ring,
673 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
674 prod = sc_if->msk_cdata.msk_rx_prod;
675 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
676 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
677 rxd->rx_m = NULL;
678 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
679 if (msk_jumbo_newbuf(sc_if, prod) != 0)
680 return (ENOBUFS);
681 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
682 }
683
684 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
685 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
686 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
687
688 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
689 CSR_WRITE_2(sc_if->msk_softc,
690 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
691 sc_if->msk_cdata.msk_rx_prod);
692
693 return (0);
694}
695
696static void
697msk_init_tx_ring(struct msk_if_softc *sc_if)
698{
699 struct msk_ring_data *rd;
700 struct msk_txdesc *txd;
701 int i;
702
703 sc_if->msk_cdata.msk_tso_mtu = 0;
704 sc_if->msk_cdata.msk_tx_prod = 0;
705 sc_if->msk_cdata.msk_tx_cons = 0;
706 sc_if->msk_cdata.msk_tx_cnt = 0;
707
708 rd = &sc_if->msk_rdata;
709 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
710 for (i = 0; i < MSK_TX_RING_CNT; i++) {
711 txd = &sc_if->msk_cdata.msk_txdesc[i];
712 txd->tx_m = NULL;
713 txd->tx_le = &rd->msk_tx_ring[i];
714 }
715
716 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
717 sc_if->msk_cdata.msk_tx_ring_map,
718 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
719}
720
721static __inline void
722msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
723{
724 struct msk_rx_desc *rx_le;
725 struct msk_rxdesc *rxd;
726 struct mbuf *m;
727
728 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
729 m = rxd->rx_m;
730 rx_le = rxd->rx_le;
731 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
732}
733
734static __inline void
735msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
736{
737 struct msk_rx_desc *rx_le;
738 struct msk_rxdesc *rxd;
739 struct mbuf *m;
740
741 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
742 m = rxd->rx_m;
743 rx_le = rxd->rx_le;
744 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
745}
746
747static int
748msk_newbuf(struct msk_if_softc *sc_if, int idx)
749{
750 struct msk_rx_desc *rx_le;
751 struct msk_rxdesc *rxd;
752 struct mbuf *m;
753 bus_dma_segment_t segs[1];
754 bus_dmamap_t map;
755 int nsegs;
756
757 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
758 if (m == NULL)
759 return (ENOBUFS);
760
761 m->m_len = m->m_pkthdr.len = MCLBYTES;
762 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
763 m_adj(m, ETHER_ALIGN);
764#ifndef __NO_STRICT_ALIGNMENT
765 else
766 m_adj(m, MSK_RX_BUF_ALIGN);
767#endif
768
769 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
770 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
771 BUS_DMA_NOWAIT) != 0) {
772 m_freem(m);
773 return (ENOBUFS);
774 }
775 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
776
777 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
778 if (rxd->rx_m != NULL) {
779 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
780 BUS_DMASYNC_POSTREAD);
781 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
782 }
783 map = rxd->rx_dmamap;
784 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
785 sc_if->msk_cdata.msk_rx_sparemap = map;
786 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
787 BUS_DMASYNC_PREREAD);
788 rxd->rx_m = m;
789 rx_le = rxd->rx_le;
790 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
791 rx_le->msk_control =
792 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
793
794 return (0);
795}
796
797static int
798msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
799{
800 struct msk_rx_desc *rx_le;
801 struct msk_rxdesc *rxd;
802 struct mbuf *m;
803 bus_dma_segment_t segs[1];
804 bus_dmamap_t map;
805 int nsegs;
806
807 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
808 if (m == NULL)
809 return (ENOBUFS);
810 if ((m->m_flags & M_EXT) == 0) {
811 m_freem(m);
812 return (ENOBUFS);
813 }
814 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
815 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
816 m_adj(m, ETHER_ALIGN);
817#ifndef __NO_STRICT_ALIGNMENT
818 else
819 m_adj(m, MSK_RX_BUF_ALIGN);
820#endif
821
822 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
823 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
824 BUS_DMA_NOWAIT) != 0) {
825 m_freem(m);
826 return (ENOBUFS);
827 }
828 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
829
830 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
831 if (rxd->rx_m != NULL) {
832 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
833 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
834 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
835 rxd->rx_dmamap);
836 }
837 map = rxd->rx_dmamap;
838 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
839 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
840 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
841 BUS_DMASYNC_PREREAD);
842 rxd->rx_m = m;
843 rx_le = rxd->rx_le;
844 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
845 rx_le->msk_control =
846 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
847
848 return (0);
849}
850
851/*
852 * Set media options.
853 */
854static int
855msk_mediachange(struct ifnet *ifp)
856{
857 struct msk_if_softc *sc_if;
858 struct mii_data *mii;
859 int error;
860
861 sc_if = ifp->if_softc;
862
863 MSK_IF_LOCK(sc_if);
864 mii = device_get_softc(sc_if->msk_miibus);
865 error = mii_mediachg(mii);
866 MSK_IF_UNLOCK(sc_if);
867
868 return (error);
869}
870
871/*
872 * Report current media status.
873 */
874static void
875msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
876{
877 struct msk_if_softc *sc_if;
878 struct mii_data *mii;
879
880 sc_if = ifp->if_softc;
881 MSK_IF_LOCK(sc_if);
882 mii = device_get_softc(sc_if->msk_miibus);
883
884 mii_pollstat(mii);
885 MSK_IF_UNLOCK(sc_if);
886 ifmr->ifm_active = mii->mii_media_active;
887 ifmr->ifm_status = mii->mii_media_status;
888}
889
890static int
891msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
892{
893 struct msk_if_softc *sc_if;
894 struct ifreq *ifr;
895 struct mii_data *mii;
896 int error, mask;
897
898 sc_if = ifp->if_softc;
899 ifr = (struct ifreq *)data;
900 error = 0;
901
902 switch(command) {
903 case SIOCSIFMTU:
904 MSK_IF_LOCK(sc_if);
905 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
906 error = EINVAL;
907 else if (ifp->if_mtu != ifr->ifr_mtu) {
908 if (ifr->ifr_mtu > ETHERMTU) {
909 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
910 error = EINVAL;
911 MSK_IF_UNLOCK(sc_if);
912 break;
913 }
914 if ((sc_if->msk_flags &
915 MSK_FLAG_JUMBO_NOCSUM) != 0) {
916 ifp->if_hwassist &=
917 ~(MSK_CSUM_FEATURES | CSUM_TSO);
918 ifp->if_capenable &=
919 ~(IFCAP_TSO4 | IFCAP_TXCSUM);
920 VLAN_CAPABILITIES(ifp);
921 }
922 }
923 ifp->if_mtu = ifr->ifr_mtu;
924 msk_init_locked(sc_if);
925 }
926 MSK_IF_UNLOCK(sc_if);
927 break;
928 case SIOCSIFFLAGS:
929 MSK_IF_LOCK(sc_if);
930 if ((ifp->if_flags & IFF_UP) != 0) {
931 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
932 if (((ifp->if_flags ^ sc_if->msk_if_flags)
933 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
934 msk_rxfilter(sc_if);
935 } else {
936 if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
937 msk_init_locked(sc_if);
938 }
939 } else {
940 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
941 msk_stop(sc_if);
942 }
943 sc_if->msk_if_flags = ifp->if_flags;
944 MSK_IF_UNLOCK(sc_if);
945 break;
946 case SIOCADDMULTI:
947 case SIOCDELMULTI:
948 MSK_IF_LOCK(sc_if);
949 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
950 msk_rxfilter(sc_if);
951 MSK_IF_UNLOCK(sc_if);
952 break;
953 case SIOCGIFMEDIA:
954 case SIOCSIFMEDIA:
955 mii = device_get_softc(sc_if->msk_miibus);
956 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
957 break;
958 case SIOCSIFCAP:
959 MSK_IF_LOCK(sc_if);
960 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
961 if ((mask & IFCAP_TXCSUM) != 0) {
962 ifp->if_capenable ^= IFCAP_TXCSUM;
963 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
964 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
965 ifp->if_hwassist |= MSK_CSUM_FEATURES;
966 else
967 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
968 }
969 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
970 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
971 msk_setvlan(sc_if, ifp);
972 }
973
974 if ((mask & IFCAP_TSO4) != 0) {
975 ifp->if_capenable ^= IFCAP_TSO4;
976 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
977 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
978 ifp->if_hwassist |= CSUM_TSO;
979 else
980 ifp->if_hwassist &= ~CSUM_TSO;
981 }
982 if (ifp->if_mtu > ETHERMTU &&
983 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
984 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
985 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
986 }
987
988 VLAN_CAPABILITIES(ifp);
989 MSK_IF_UNLOCK(sc_if);
990 break;
991 default:
992 error = ether_ioctl(ifp, command, data);
993 break;
994 }
995
996 return (error);
997}
998
999static int
1000mskc_probe(device_t dev)
1001{
1002 struct msk_product *mp;
1003 uint16_t vendor, devid;
1004 int i;
1005
1006 vendor = pci_get_vendor(dev);
1007 devid = pci_get_device(dev);
1008 mp = msk_products;
1009 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1010 i++, mp++) {
1011 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1012 device_set_desc(dev, mp->msk_name);
1013 return (BUS_PROBE_DEFAULT);
1014 }
1015 }
1016
1017 return (ENXIO);
1018}
1019
1020static int
1021mskc_setup_rambuffer(struct msk_softc *sc)
1022{
1023 int next;
1024 int i;
1025
1026 /* Get adapter SRAM size. */
1027 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1028 if (bootverbose)
1029 device_printf(sc->msk_dev,
1030 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1031 if (sc->msk_ramsize == 0)
1032 return (0);
1033
1034 sc->msk_pflags |= MSK_FLAG_RAMBUF;
1035 /*
1036 * Give receiver 2/3 of memory and round down to the multiple
1037 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
1038 * of 1024.
1039 */
1040 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1041 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1042 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1043 sc->msk_rxqstart[i] = next;
1044 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1045 next = sc->msk_rxqend[i] + 1;
1046 sc->msk_txqstart[i] = next;
1047 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1048 next = sc->msk_txqend[i] + 1;
1049 if (bootverbose) {
1050 device_printf(sc->msk_dev,
1051 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1052 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1053 sc->msk_rxqend[i]);
1054 device_printf(sc->msk_dev,
1055 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1056 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1057 sc->msk_txqend[i]);
1058 }
1059 }
1060
1061 return (0);
1062}
1063
1064static void
1065msk_phy_power(struct msk_softc *sc, int mode)
1066{
1067 uint32_t val;
1068 int i;
1069
1070 switch (mode) {
1071 case MSK_PHY_POWERUP:
1072 /* Switch power to VCC (WA for VAUX problem). */
1073 CSR_WRITE_1(sc, B0_POWER_CTRL,
1074 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1075 /* Disable Core Clock Division, set Clock Select to 0. */
1076 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1077
1078 val = 0;
1079 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1080 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1081 /* Enable bits are inverted. */
1082 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1083 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1084 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1085 }
1086 /*
1087 * Enable PCI & Core Clock, enable clock gating for both Links.
1088 */
1089 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1090
1091 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1092 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1093 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1094 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1095 /* Deassert Low Power for 1st PHY. */
1096 val |= PCI_Y2_PHY1_COMA;
1097 if (sc->msk_num_port > 1)
1098 val |= PCI_Y2_PHY2_COMA;
1099 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1100 uint32_t our;
1101
1102 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1103
1104 /* Enable all clocks. */
1105 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1106 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1107 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1108 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1109 /* Set all bits to 0 except bits 15..12. */
1110 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1111 /* Set to default value. */
1112 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1113 }
1114 /* Release PHY from PowerDown/COMA mode. */
1115 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1116 for (i = 0; i < sc->msk_num_port; i++) {
1117 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1118 GMLC_RST_SET);
1119 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1120 GMLC_RST_CLR);
1121 }
1122 break;
1123 case MSK_PHY_POWERDOWN:
1124 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1125 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1126 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1127 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1128 val &= ~PCI_Y2_PHY1_COMA;
1129 if (sc->msk_num_port > 1)
1130 val &= ~PCI_Y2_PHY2_COMA;
1131 }
1132 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1133
1134 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1135 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1136 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1137 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1138 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1139 /* Enable bits are inverted. */
1140 val = 0;
1141 }
1142 /*
1143 * Disable PCI & Core Clock, disable clock gating for
1144 * both Links.
1145 */
1146 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1147 CSR_WRITE_1(sc, B0_POWER_CTRL,
1148 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1149 break;
1150 default:
1151 break;
1152 }
1153}
1154
1155static void
1156mskc_reset(struct msk_softc *sc)
1157{
1158 bus_addr_t addr;
1159 uint16_t status;
1160 uint32_t val;
1161 int i;
1162
1163 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1164
1165 /* Disable ASF. */
1166 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1167 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1168 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1169 }
1170 /*
1171 * Since we disabled ASF, S/W reset is required for Power Management.
1172 */
1173 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1174 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1175
1176 /* Clear all error bits in the PCI status register. */
1177 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1178 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1179
1180 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1181 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1182 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1183 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1184
1185 switch (sc->msk_bustype) {
1186 case MSK_PEX_BUS:
1187 /* Clear all PEX errors. */
1188 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1189 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1190 if ((val & PEX_RX_OV) != 0) {
1191 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1192 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1193 }
1194 break;
1195 case MSK_PCI_BUS:
1196 case MSK_PCIX_BUS:
1197 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1198 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1199 if (val == 0)
1200 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1201 if (sc->msk_bustype == MSK_PCIX_BUS) {
1202 /* Set Cache Line Size opt. */
1203 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1204 val |= PCI_CLS_OPT;
1205 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1206 }
1207 break;
1208 }
1209 /* Set PHY power state. */
1210 msk_phy_power(sc, MSK_PHY_POWERUP);
1211
1212 /* Reset GPHY/GMAC Control */
1213 for (i = 0; i < sc->msk_num_port; i++) {
1214 /* GPHY Control reset. */
1215 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1216 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1217 /* GMAC Control reset. */
1218 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1219 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1220 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1221 }
1222 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1223
1224 /* LED On. */
1225 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1226
1227 /* Clear TWSI IRQ. */
1228 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1229
1230 /* Turn off hardware timer. */
1231 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1232 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1233
1234 /* Turn off descriptor polling. */
1235 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1236
1237 /* Turn off time stamps. */
1238 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1239 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1240
1241 /* Configure timeout values. */
1242 for (i = 0; i < sc->msk_num_port; i++) {
1243 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1244 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1245 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1246 MSK_RI_TO_53);
1247 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1248 MSK_RI_TO_53);
1249 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1250 MSK_RI_TO_53);
1251 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1252 MSK_RI_TO_53);
1253 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1254 MSK_RI_TO_53);
1255 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1256 MSK_RI_TO_53);
1257 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1258 MSK_RI_TO_53);
1259 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1260 MSK_RI_TO_53);
1261 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1262 MSK_RI_TO_53);
1263 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1264 MSK_RI_TO_53);
1265 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1266 MSK_RI_TO_53);
1267 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1268 MSK_RI_TO_53);
1269 }
1270
1271 /* Disable all interrupts. */
1272 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1273 CSR_READ_4(sc, B0_HWE_IMSK);
1274 CSR_WRITE_4(sc, B0_IMSK, 0);
1275 CSR_READ_4(sc, B0_IMSK);
1276
1277 /*
1278 * On dual port PCI-X card, there is an problem where status
1279 * can be received out of order due to split transactions.
1280 */
1281 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1282 int pcix;
1283 uint16_t pcix_cmd;
1284
1285 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1286 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1287 /* Clear Max Outstanding Split Transactions. */
1288 pcix_cmd &= ~0x70;
1289 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1290 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1291 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1292 }
1293 }
1294 if (sc->msk_bustype == MSK_PEX_BUS) {
1295 uint16_t v, width;
1296
1297 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1298 /* Change Max. Read Request Size to 4096 bytes. */
1299 v &= ~PEX_DC_MAX_RRS_MSK;
1300 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1301 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1302 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1303 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1304 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1305 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1306 if (v != width)
1307 device_printf(sc->msk_dev,
1308 "negotiated width of link(x%d) != "
1309 "max. width of link(x%d)\n", width, v);
1310 }
1311
1312 /* Clear status list. */
1313 bzero(sc->msk_stat_ring,
1314 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1315 sc->msk_stat_cons = 0;
1316 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1317 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1318 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1319 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1320 /* Set the status list base address. */
1321 addr = sc->msk_stat_ring_paddr;
1322 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1323 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1324 /* Set the status list last index. */
1325 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1326 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1327 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1328 /* WA for dev. #4.3 */
1329 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1330 /* WA for dev. #4.18 */
1331 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1332 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1333 } else {
1334 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1335 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1336 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1337 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1338 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1339 else
1340 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1341 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1342 }
1343 /*
1344 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1345 */
1346 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1347
1348 /* Enable status unit. */
1349 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1350
1351 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1352 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1353 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1354}
1355
1356static int
1357msk_probe(device_t dev)
1358{
1359 struct msk_softc *sc;
1360 char desc[100];
1361
1362 sc = device_get_softc(device_get_parent(dev));
1363 /*
1364 * Not much to do here. We always know there will be
1365 * at least one GMAC present, and if there are two,
1366 * mskc_attach() will create a second device instance
1367 * for us.
1368 */
1369 snprintf(desc, sizeof(desc),
1370 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1371 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1372 sc->msk_hw_rev);
1373 device_set_desc_copy(dev, desc);
1374
1375 return (BUS_PROBE_DEFAULT);
1376}
1377
1378static int
1379msk_attach(device_t dev)
1380{
1381 struct msk_softc *sc;
1382 struct msk_if_softc *sc_if;
1383 struct ifnet *ifp;
1384 int i, port, error;
1385 uint8_t eaddr[6];
1386
1387 if (dev == NULL)
1388 return (EINVAL);
1389
1390 error = 0;
1391 sc_if = device_get_softc(dev);
1392 sc = device_get_softc(device_get_parent(dev));
1393 port = *(int *)device_get_ivars(dev);
1394
1395 sc_if->msk_if_dev = dev;
1396 sc_if->msk_port = port;
1397 sc_if->msk_softc = sc;
1398 sc_if->msk_flags = sc->msk_pflags;
1399 sc->msk_if[port] = sc_if;
1400 /* Setup Tx/Rx queue register offsets. */
1401 if (port == MSK_PORT_A) {
1402 sc_if->msk_txq = Q_XA1;
1403 sc_if->msk_txsq = Q_XS1;
1404 sc_if->msk_rxq = Q_R1;
1405 } else {
1406 sc_if->msk_txq = Q_XA2;
1407 sc_if->msk_txsq = Q_XS2;
1408 sc_if->msk_rxq = Q_R2;
1409 }
1410
1411 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1412 msk_sysctl_node(sc_if);
1413
1414 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1415 goto fail;
1416 msk_rx_dma_jalloc(sc_if);
1417
1418 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1419 if (ifp == NULL) {
1420 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1421 error = ENOSPC;
1422 goto fail;
1423 }
1424 ifp->if_softc = sc_if;
1425 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1426 ifp->if_mtu = ETHERMTU;
1427 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1428 /*
1429 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1430 * has serious bug in Rx checksum offload for all Yukon II family
1431 * hardware. It seems there is a workaround to make it work somtimes.
1432 * However, the workaround also have to check OP code sequences to
1433 * verify whether the OP code is correct. Sometimes it should compute
1434 * IP/TCP/UDP checksum in driver in order to verify correctness of
1435 * checksum computed by hardware. If you have to compute checksum
1436 * with software to verify the hardware's checksum why have hardware
1437 * compute the checksum? I think there is no reason to spend time to
1438 * make Rx checksum offload work on Yukon II hardware.
1439 */
1440 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1441 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1442 ifp->if_capenable = ifp->if_capabilities;
1443 ifp->if_ioctl = msk_ioctl;
1444 ifp->if_start = msk_start;
1445 ifp->if_timer = 0;
1446 ifp->if_watchdog = NULL;
1447 ifp->if_init = msk_init;
1448 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1449 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1450 IFQ_SET_READY(&ifp->if_snd);
1451
1452 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1453
1454 /*
1455 * Get station address for this interface. Note that
1456 * dual port cards actually come with three station
1457 * addresses: one for each port, plus an extra. The
1458 * extra one is used by the SysKonnect driver software
1459 * as a 'virtual' station address for when both ports
1460 * are operating in failover mode. Currently we don't
1461 * use this extra address.
1462 */
1463 MSK_IF_LOCK(sc_if);
1464 for (i = 0; i < ETHER_ADDR_LEN; i++)
1465 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1466
1467 /*
1468 * Call MI attach routine. Can't hold locks when calling into ether_*.
1469 */
1470 MSK_IF_UNLOCK(sc_if);
1471 ether_ifattach(ifp, eaddr);
1472 MSK_IF_LOCK(sc_if);
1473
1474 /*
1475 * VLAN capability setup
1476 * Due to Tx checksum offload hardware bugs, msk(4) manually
1477 * computes checksum for short frames. For VLAN tagged frames
1478 * this workaround does not work so disable checksum offload
1479 * for VLAN interface.
1480 */
1481 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1482 ifp->if_capenable = ifp->if_capabilities;
1483
1484 /*
1485 * Tell the upper layer(s) we support long frames.
1486 * Must appear after the call to ether_ifattach() because
1487 * ether_ifattach() sets ifi_hdrlen to the default value.
1488 */
1489 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1490
1491 /*
1492 * Do miibus setup.
1493 */
1494 MSK_IF_UNLOCK(sc_if);
1495 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1496 msk_mediastatus);
1497 if (error != 0) {
1498 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1499 ether_ifdetach(ifp);
1500 error = ENXIO;
1501 goto fail;
1502 }
1503
1504fail:
1505 if (error != 0) {
1506 /* Access should be ok even though lock has been dropped */
1507 sc->msk_if[port] = NULL;
1508 msk_detach(dev);
1509 }
1510
1511 return (error);
1512}
1513
1514/*
1515 * Attach the interface. Allocate softc structures, do ifmedia
1516 * setup and ethernet/BPF attach.
1517 */
1518static int
1519mskc_attach(device_t dev)
1520{
1521 struct msk_softc *sc;
1522 int error, msic, msir, *port, reg;
1523
1524 sc = device_get_softc(dev);
1525 sc->msk_dev = dev;
1526 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1527 MTX_DEF);
1528
1529 /*
1530 * Map control/status registers.
1531 */
1532 pci_enable_busmaster(dev);
1533
1534 /* Allocate I/O resource */
1535#ifdef MSK_USEIOSPACE
1536 sc->msk_res_spec = msk_res_spec_io;
1537#else
1538 sc->msk_res_spec = msk_res_spec_mem;
1539#endif
1540 sc->msk_irq_spec = msk_irq_spec_legacy;
1541 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1542 if (error) {
1543 if (sc->msk_res_spec == msk_res_spec_mem)
1544 sc->msk_res_spec = msk_res_spec_io;
1545 else
1546 sc->msk_res_spec = msk_res_spec_mem;
1547 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1548 if (error) {
1549 device_printf(dev, "couldn't allocate %s resources\n",
1550 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1551 "I/O");
1552 mtx_destroy(&sc->msk_mtx);
1553 return (ENXIO);
1554 }
1555 }
1556
1557 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1558 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1559 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1560 /* Bail out if chip is not recognized. */
1561 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1562 sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1563 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1564 sc->msk_hw_id, sc->msk_hw_rev);
1565 mtx_destroy(&sc->msk_mtx);
1566 return (ENXIO);
1567 }
1568
1569 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1570 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1571 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1572 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1573 "max number of Rx events to process");
1574
1575 sc->msk_process_limit = MSK_PROC_DEFAULT;
1576 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1577 "process_limit", &sc->msk_process_limit);
1578 if (error == 0) {
1579 if (sc->msk_process_limit < MSK_PROC_MIN ||
1580 sc->msk_process_limit > MSK_PROC_MAX) {
1581 device_printf(dev, "process_limit value out of range; "
1582 "using default: %d\n", MSK_PROC_DEFAULT);
1583 sc->msk_process_limit = MSK_PROC_DEFAULT;
1584 }
1585 }
1586
1587 /* Soft reset. */
1588 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1589 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1590 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1591 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1592 sc->msk_coppertype = 0;
1593 else
1594 sc->msk_coppertype = 1;
1595 /* Check number of MACs. */
1596 sc->msk_num_port = 1;
1597 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1598 CFG_DUAL_MAC_MSK) {
1599 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1600 sc->msk_num_port++;
1601 }
1602
1603 /* Check bus type. */
1604 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
1605 sc->msk_bustype = MSK_PEX_BUS;
1606 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
1607 sc->msk_bustype = MSK_PCIX_BUS;
1608 else
1609 sc->msk_bustype = MSK_PCI_BUS;
1610
1611 switch (sc->msk_hw_id) {
1612 case CHIP_ID_YUKON_EC:
1613 sc->msk_clock = 125; /* 125 Mhz */
1614 sc->msk_pflags |= MSK_FLAG_JUMBO;
1615 break;
1616 case CHIP_ID_YUKON_EC_U:
1617 sc->msk_clock = 125; /* 125 Mhz */
1618 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1619 break;
1620 case CHIP_ID_YUKON_FE:
1621 sc->msk_clock = 100; /* 100 Mhz */
1622 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1623 break;
1624 case CHIP_ID_YUKON_XL:
1625 sc->msk_clock = 156; /* 156 Mhz */
1626 sc->msk_pflags |= MSK_FLAG_JUMBO;
1627 break;
1628 default:
1629 sc->msk_clock = 156; /* 156 Mhz */
1630 break;
1631 }
1632
1633 /* Allocate IRQ resources. */
1634 msic = pci_msi_count(dev);
1635 if (bootverbose)
1636 device_printf(dev, "MSI count : %d\n", msic);
1637 /*
1638 * The Yukon II reports it can handle two messages, one for each
1639 * possible port. We go ahead and allocate two messages and only
1640 * setup a handler for both if we have a dual port card.
1641 *
1642 * XXX: I haven't untangled the interrupt handler to handle dual
1643 * port cards with separate MSI messages, so for now I disable MSI
1644 * on dual port cards.
1645 */
1646 if (legacy_intr != 0)
1647 msi_disable = 1;
1648 if (msi_disable == 0) {
1649 switch (msic) {
1650 case 2:
1651 case 1: /* 88E8058 reports 1 MSI message */
1652 msir = msic;
1653 if (sc->msk_num_port == 1 &&
1654 pci_alloc_msi(dev, &msir) == 0) {
1655 if (msic == msir) {
1656 sc->msk_pflags |= MSK_FLAG_MSI;
1657 sc->msk_irq_spec = msic == 2 ?
1658 msk_irq_spec_msi2 :
1659 msk_irq_spec_msi;
1660 } else
1661 pci_release_msi(dev);
1662 }
1663 break;
1664 default:
1665 device_printf(dev,
1666 "Unexpected number of MSI messages : %d\n", msic);
1667 break;
1668 }
1669 }
1670
1671 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1672 if (error) {
1673 device_printf(dev, "couldn't allocate IRQ resources\n");
1674 goto fail;
1675 }
1676
1677 if ((error = msk_status_dma_alloc(sc)) != 0)
1678 goto fail;
1679
1680 /* Set base interrupt mask. */
1681 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1682 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1683 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1684
1685 /* Reset the adapter. */
1686 mskc_reset(sc);
1687
1688 if ((error = mskc_setup_rambuffer(sc)) != 0)
1689 goto fail;
1690
1691 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1692 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1693 device_printf(dev, "failed to add child for PORT_A\n");
1694 error = ENXIO;
1695 goto fail;
1696 }
1697 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1698 if (port == NULL) {
1699 device_printf(dev, "failed to allocate memory for "
1700 "ivars of PORT_A\n");
1701 error = ENXIO;
1702 goto fail;
1703 }
1704 *port = MSK_PORT_A;
1705 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1706
1707 if (sc->msk_num_port > 1) {
1708 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1709 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1710 device_printf(dev, "failed to add child for PORT_B\n");
1711 error = ENXIO;
1712 goto fail;
1713 }
1714 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1715 if (port == NULL) {
1716 device_printf(dev, "failed to allocate memory for "
1717 "ivars of PORT_B\n");
1718 error = ENXIO;
1719 goto fail;
1720 }
1721 *port = MSK_PORT_B;
1722 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1723 }
1724
1725 error = bus_generic_attach(dev);
1726 if (error) {
1727 device_printf(dev, "failed to attach port(s)\n");
1728 goto fail;
1729 }
1730
1731 /* Hook interrupt last to avoid having to lock softc. */
1732 if (legacy_intr)
1733 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1734 INTR_MPSAFE, NULL, msk_legacy_intr, sc,
1735 &sc->msk_intrhand[0]);
1736 else {
1737 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1738 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1739 taskqueue_thread_enqueue, &sc->msk_tq);
1740 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1741 device_get_nameunit(sc->msk_dev));
1742 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1743 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]);
1744 }
1745
1746 if (error != 0) {
1747 device_printf(dev, "couldn't set up interrupt handler\n");
1748 if (legacy_intr == 0)
1749 taskqueue_free(sc->msk_tq);
1750 sc->msk_tq = NULL;
1751 goto fail;
1752 }
1753fail:
1754 if (error != 0)
1755 mskc_detach(dev);
1756
1757 return (error);
1758}
1759
1760/*
1761 * Shutdown hardware and free up resources. This can be called any
1762 * time after the mutex has been initialized. It is called in both
1763 * the error case in attach and the normal detach case so it needs
1764 * to be careful about only freeing resources that have actually been
1765 * allocated.
1766 */
1767static int
1768msk_detach(device_t dev)
1769{
1770 struct msk_softc *sc;
1771 struct msk_if_softc *sc_if;
1772 struct ifnet *ifp;
1773
1774 sc_if = device_get_softc(dev);
1775 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1776 ("msk mutex not initialized in msk_detach"));
1777 MSK_IF_LOCK(sc_if);
1778
1779 ifp = sc_if->msk_ifp;
1780 if (device_is_attached(dev)) {
1781 /* XXX */
1782 sc_if->msk_flags |= MSK_FLAG_DETACH;
1783 msk_stop(sc_if);
1784 /* Can't hold locks while calling detach. */
1785 MSK_IF_UNLOCK(sc_if);
1786 callout_drain(&sc_if->msk_tick_ch);
1787 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1788 ether_ifdetach(ifp);
1789 MSK_IF_LOCK(sc_if);
1790 }
1791
1792 /*
1793 * We're generally called from mskc_detach() which is using
1794 * device_delete_child() to get to here. It's already trashed
1795 * miibus for us, so don't do it here or we'll panic.
1796 *
1797 * if (sc_if->msk_miibus != NULL) {
1798 * device_delete_child(dev, sc_if->msk_miibus);
1799 * sc_if->msk_miibus = NULL;
1800 * }
1801 */
1802
1803 msk_rx_dma_jfree(sc_if);
1804 msk_txrx_dma_free(sc_if);
1805 bus_generic_detach(dev);
1806
1807 if (ifp)
1808 if_free(ifp);
1809 sc = sc_if->msk_softc;
1810 sc->msk_if[sc_if->msk_port] = NULL;
1811 MSK_IF_UNLOCK(sc_if);
1812
1813 return (0);
1814}
1815
1816static int
1817mskc_detach(device_t dev)
1818{
1819 struct msk_softc *sc;
1820
1821 sc = device_get_softc(dev);
1822 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1823
1824 if (device_is_alive(dev)) {
1825 if (sc->msk_devs[MSK_PORT_A] != NULL) {
1826 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1827 M_DEVBUF);
1828 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1829 }
1830 if (sc->msk_devs[MSK_PORT_B] != NULL) {
1831 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1832 M_DEVBUF);
1833 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1834 }
1835 bus_generic_detach(dev);
1836 }
1837
1838 /* Disable all interrupts. */
1839 CSR_WRITE_4(sc, B0_IMSK, 0);
1840 CSR_READ_4(sc, B0_IMSK);
1841 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1842 CSR_READ_4(sc, B0_HWE_IMSK);
1843
1844 /* LED Off. */
1845 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1846
1847 /* Put hardware reset. */
1848 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1849
1850 msk_status_dma_free(sc);
1851
1852 if (legacy_intr == 0 && sc->msk_tq != NULL) {
1853 taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1854 taskqueue_free(sc->msk_tq);
1855 sc->msk_tq = NULL;
1856 }
1857 if (sc->msk_intrhand[0]) {
1858 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1859 sc->msk_intrhand[0] = NULL;
1860 }
1861 if (sc->msk_intrhand[1]) {
1862 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1863 sc->msk_intrhand[1] = NULL;
1864 }
1865 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1866 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
1867 pci_release_msi(dev);
1868 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1869 mtx_destroy(&sc->msk_mtx);
1870
1871 return (0);
1872}
1873
1874struct msk_dmamap_arg {
1875 bus_addr_t msk_busaddr;
1876};
1877
1878static void
1879msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1880{
1881 struct msk_dmamap_arg *ctx;
1882
1883 if (error != 0)
1884 return;
1885 ctx = arg;
1886 ctx->msk_busaddr = segs[0].ds_addr;
1887}
1888
1889/* Create status DMA region. */
1890static int
1891msk_status_dma_alloc(struct msk_softc *sc)
1892{
1893 struct msk_dmamap_arg ctx;
1894 int error;
1895
1896 error = bus_dma_tag_create(
1897 bus_get_dma_tag(sc->msk_dev), /* parent */
1898 MSK_STAT_ALIGN, 0, /* alignment, boundary */
1899 BUS_SPACE_MAXADDR, /* lowaddr */
1900 BUS_SPACE_MAXADDR, /* highaddr */
1901 NULL, NULL, /* filter, filterarg */
1902 MSK_STAT_RING_SZ, /* maxsize */
1903 1, /* nsegments */
1904 MSK_STAT_RING_SZ, /* maxsegsize */
1905 0, /* flags */
1906 NULL, NULL, /* lockfunc, lockarg */
1907 &sc->msk_stat_tag);
1908 if (error != 0) {
1909 device_printf(sc->msk_dev,
1910 "failed to create status DMA tag\n");
1911 return (error);
1912 }
1913
1914 /* Allocate DMA'able memory and load the DMA map for status ring. */
1915 error = bus_dmamem_alloc(sc->msk_stat_tag,
1916 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1917 BUS_DMA_ZERO, &sc->msk_stat_map);
1918 if (error != 0) {
1919 device_printf(sc->msk_dev,
1920 "failed to allocate DMA'able memory for status ring\n");
1921 return (error);
1922 }
1923
1924 ctx.msk_busaddr = 0;
1925 error = bus_dmamap_load(sc->msk_stat_tag,
1926 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1927 msk_dmamap_cb, &ctx, 0);
1928 if (error != 0) {
1929 device_printf(sc->msk_dev,
1930 "failed to load DMA'able memory for status ring\n");
1931 return (error);
1932 }
1933 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1934
1935 return (0);
1936}
1937
1938static void
1939msk_status_dma_free(struct msk_softc *sc)
1940{
1941
1942 /* Destroy status block. */
1943 if (sc->msk_stat_tag) {
1944 if (sc->msk_stat_map) {
1945 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1946 if (sc->msk_stat_ring) {
1947 bus_dmamem_free(sc->msk_stat_tag,
1948 sc->msk_stat_ring, sc->msk_stat_map);
1949 sc->msk_stat_ring = NULL;
1950 }
1951 sc->msk_stat_map = NULL;
1952 }
1953 bus_dma_tag_destroy(sc->msk_stat_tag);
1954 sc->msk_stat_tag = NULL;
1955 }
1956}
1957
1958static int
1959msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1960{
1961 struct msk_dmamap_arg ctx;
1962 struct msk_txdesc *txd;
1963 struct msk_rxdesc *rxd;
1964 bus_size_t rxalign;
1965 int error, i;
1966
1967 /* Create parent DMA tag. */
1968 /*
1969 * XXX
1970 * It seems that Yukon II supports full 64bits DMA operations. But
1971 * it needs two descriptors(list elements) for 64bits DMA operations.
1972 * Since we don't know what DMA address mappings(32bits or 64bits)
1973 * would be used in advance for each mbufs, we limits its DMA space
1974 * to be in range of 32bits address space. Otherwise, we should check
1975 * what DMA address is used and chain another descriptor for the
1976 * 64bits DMA operation. This also means descriptor ring size is
1977 * variable. Limiting DMA address to be in 32bit address space greatly
1978 * simplyfies descriptor handling and possibly would increase
1979 * performance a bit due to efficient handling of descriptors.
1980 * Apart from harassing checksum offloading mechanisms, it seems
1981 * it's really bad idea to use a seperate descriptor for 64bit
1982 * DMA operation to save small descriptor memory. Anyway, I've
1983 * never seen these exotic scheme on ethernet interface hardware.
1984 */
1985 error = bus_dma_tag_create(
1986 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
1987 1, 0, /* alignment, boundary */
1988 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1989 BUS_SPACE_MAXADDR, /* highaddr */
1990 NULL, NULL, /* filter, filterarg */
1991 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1992 0, /* nsegments */
1993 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1994 0, /* flags */
1995 NULL, NULL, /* lockfunc, lockarg */
1996 &sc_if->msk_cdata.msk_parent_tag);
1997 if (error != 0) {
1998 device_printf(sc_if->msk_if_dev,
1999 "failed to create parent DMA tag\n");
2000 goto fail;
2001 }
2002 /* Create tag for Tx ring. */
2003 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2004 MSK_RING_ALIGN, 0, /* alignment, boundary */
2005 BUS_SPACE_MAXADDR, /* lowaddr */
2006 BUS_SPACE_MAXADDR, /* highaddr */
2007 NULL, NULL, /* filter, filterarg */
2008 MSK_TX_RING_SZ, /* maxsize */
2009 1, /* nsegments */
2010 MSK_TX_RING_SZ, /* maxsegsize */
2011 0, /* flags */
2012 NULL, NULL, /* lockfunc, lockarg */
2013 &sc_if->msk_cdata.msk_tx_ring_tag);
2014 if (error != 0) {
2015 device_printf(sc_if->msk_if_dev,
2016 "failed to create Tx ring DMA tag\n");
2017 goto fail;
2018 }
2019
2020 /* Create tag for Rx ring. */
2021 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2022 MSK_RING_ALIGN, 0, /* alignment, boundary */
2023 BUS_SPACE_MAXADDR, /* lowaddr */
2024 BUS_SPACE_MAXADDR, /* highaddr */
2025 NULL, NULL, /* filter, filterarg */
2026 MSK_RX_RING_SZ, /* maxsize */
2027 1, /* nsegments */
2028 MSK_RX_RING_SZ, /* maxsegsize */
2029 0, /* flags */
2030 NULL, NULL, /* lockfunc, lockarg */
2031 &sc_if->msk_cdata.msk_rx_ring_tag);
2032 if (error != 0) {
2033 device_printf(sc_if->msk_if_dev,
2034 "failed to create Rx ring DMA tag\n");
2035 goto fail;
2036 }
2037
2038 /* Create tag for Tx buffers. */
2039 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2040 1, 0, /* alignment, boundary */
2041 BUS_SPACE_MAXADDR, /* lowaddr */
2042 BUS_SPACE_MAXADDR, /* highaddr */
2043 NULL, NULL, /* filter, filterarg */
2044 MSK_TSO_MAXSIZE, /* maxsize */
2045 MSK_MAXTXSEGS, /* nsegments */
2046 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2047 0, /* flags */
2048 NULL, NULL, /* lockfunc, lockarg */
2049 &sc_if->msk_cdata.msk_tx_tag);
2050 if (error != 0) {
2051 device_printf(sc_if->msk_if_dev,
2052 "failed to create Tx DMA tag\n");
2053 goto fail;
2054 }
2055
2056 rxalign = 1;
2057 /*
2058 * Workaround hardware hang which seems to happen when Rx buffer
2059 * is not aligned on multiple of FIFO word(8 bytes).
2060 */
2061 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2062 rxalign = MSK_RX_BUF_ALIGN;
2063 /* Create tag for Rx buffers. */
2064 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2065 rxalign, 0, /* alignment, boundary */
2066 BUS_SPACE_MAXADDR, /* lowaddr */
2067 BUS_SPACE_MAXADDR, /* highaddr */
2068 NULL, NULL, /* filter, filterarg */
2069 MCLBYTES, /* maxsize */
2070 1, /* nsegments */
2071 MCLBYTES, /* maxsegsize */
2072 0, /* flags */
2073 NULL, NULL, /* lockfunc, lockarg */
2074 &sc_if->msk_cdata.msk_rx_tag);
2075 if (error != 0) {
2076 device_printf(sc_if->msk_if_dev,
2077 "failed to create Rx DMA tag\n");
2078 goto fail;
2079 }
2080
2081 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2082 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2083 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2084 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2085 if (error != 0) {
2086 device_printf(sc_if->msk_if_dev,
2087 "failed to allocate DMA'able memory for Tx ring\n");
2088 goto fail;
2089 }
2090
2091 ctx.msk_busaddr = 0;
2092 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2093 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2094 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2095 if (error != 0) {
2096 device_printf(sc_if->msk_if_dev,
2097 "failed to load DMA'able memory for Tx ring\n");
2098 goto fail;
2099 }
2100 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2101
2102 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2103 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2104 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2105 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2106 if (error != 0) {
2107 device_printf(sc_if->msk_if_dev,
2108 "failed to allocate DMA'able memory for Rx ring\n");
2109 goto fail;
2110 }
2111
2112 ctx.msk_busaddr = 0;
2113 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2114 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2115 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2116 if (error != 0) {
2117 device_printf(sc_if->msk_if_dev,
2118 "failed to load DMA'able memory for Rx ring\n");
2119 goto fail;
2120 }
2121 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2122
2123 /* Create DMA maps for Tx buffers. */
2124 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2125 txd = &sc_if->msk_cdata.msk_txdesc[i];
2126 txd->tx_m = NULL;
2127 txd->tx_dmamap = NULL;
2128 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2129 &txd->tx_dmamap);
2130 if (error != 0) {
2131 device_printf(sc_if->msk_if_dev,
2132 "failed to create Tx dmamap\n");
2133 goto fail;
2134 }
2135 }
2136 /* Create DMA maps for Rx buffers. */
2137 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2138 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2139 device_printf(sc_if->msk_if_dev,
2140 "failed to create spare Rx dmamap\n");
2141 goto fail;
2142 }
2143 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2144 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2145 rxd->rx_m = NULL;
2146 rxd->rx_dmamap = NULL;
2147 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2148 &rxd->rx_dmamap);
2149 if (error != 0) {
2150 device_printf(sc_if->msk_if_dev,
2151 "failed to create Rx dmamap\n");
2152 goto fail;
2153 }
2154 }
2155
2156fail:
2157 return (error);
2158}
2159
2160static int
2161msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2162{
2163 struct msk_dmamap_arg ctx;
2164 struct msk_rxdesc *jrxd;
2165 bus_size_t rxalign;
2166 int error, i;
2167
2168 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2169 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2170 device_printf(sc_if->msk_if_dev,
2171 "disabling jumbo frame support\n");
2172 return (0);
2173 }
2174 /* Create tag for jumbo Rx ring. */
2175 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2176 MSK_RING_ALIGN, 0, /* alignment, boundary */
2177 BUS_SPACE_MAXADDR, /* lowaddr */
2178 BUS_SPACE_MAXADDR, /* highaddr */
2179 NULL, NULL, /* filter, filterarg */
2180 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2181 1, /* nsegments */
2182 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2183 0, /* flags */
2184 NULL, NULL, /* lockfunc, lockarg */
2185 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2186 if (error != 0) {
2187 device_printf(sc_if->msk_if_dev,
2188 "failed to create jumbo Rx ring DMA tag\n");
2189 goto jumbo_fail;
2190 }
2191
2192 rxalign = 1;
2193 /*
2194 * Workaround hardware hang which seems to happen when Rx buffer
2195 * is not aligned on multiple of FIFO word(8 bytes).
2196 */
2197 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2198 rxalign = MSK_RX_BUF_ALIGN;
2199 /* Create tag for jumbo Rx buffers. */
2200 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2201 rxalign, 0, /* alignment, boundary */
2202 BUS_SPACE_MAXADDR, /* lowaddr */
2203 BUS_SPACE_MAXADDR, /* highaddr */
2204 NULL, NULL, /* filter, filterarg */
2205 MJUM9BYTES, /* maxsize */
2206 1, /* nsegments */
2207 MJUM9BYTES, /* maxsegsize */
2208 0, /* flags */
2209 NULL, NULL, /* lockfunc, lockarg */
2210 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2211 if (error != 0) {
2212 device_printf(sc_if->msk_if_dev,
2213 "failed to create jumbo Rx DMA tag\n");
2214 goto jumbo_fail;
2215 }
2216
2217 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2218 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2219 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2220 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2221 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2222 if (error != 0) {
2223 device_printf(sc_if->msk_if_dev,
2224 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2225 goto jumbo_fail;
2226 }
2227
2228 ctx.msk_busaddr = 0;
2229 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2230 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2231 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2232 msk_dmamap_cb, &ctx, 0);
2233 if (error != 0) {
2234 device_printf(sc_if->msk_if_dev,
2235 "failed to load DMA'able memory for jumbo Rx ring\n");
2236 goto jumbo_fail;
2237 }
2238 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2239
2240 /* Create DMA maps for jumbo Rx buffers. */
2241 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2242 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2243 device_printf(sc_if->msk_if_dev,
2244 "failed to create spare jumbo Rx dmamap\n");
2245 goto jumbo_fail;
2246 }
2247 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2248 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2249 jrxd->rx_m = NULL;
2250 jrxd->rx_dmamap = NULL;
2251 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2252 &jrxd->rx_dmamap);
2253 if (error != 0) {
2254 device_printf(sc_if->msk_if_dev,
2255 "failed to create jumbo Rx dmamap\n");
2256 goto jumbo_fail;
2257 }
2258 }
2259
2260 return (0);
2261
2262jumbo_fail:
2263 msk_rx_dma_jfree(sc_if);
2264 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2265 "due to resource shortage\n");
2266 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2267 return (error);
2268}
2269
2270static void
2271msk_txrx_dma_free(struct msk_if_softc *sc_if)
2272{
2273 struct msk_txdesc *txd;
2274 struct msk_rxdesc *rxd;
2275 int i;
2276
2277 /* Tx ring. */
2278 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2279 if (sc_if->msk_cdata.msk_tx_ring_map)
2280 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2281 sc_if->msk_cdata.msk_tx_ring_map);
2282 if (sc_if->msk_cdata.msk_tx_ring_map &&
2283 sc_if->msk_rdata.msk_tx_ring)
2284 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2285 sc_if->msk_rdata.msk_tx_ring,
2286 sc_if->msk_cdata.msk_tx_ring_map);
2287 sc_if->msk_rdata.msk_tx_ring = NULL;
2288 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2289 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2290 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2291 }
2292 /* Rx ring. */
2293 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2294 if (sc_if->msk_cdata.msk_rx_ring_map)
2295 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2296 sc_if->msk_cdata.msk_rx_ring_map);
2297 if (sc_if->msk_cdata.msk_rx_ring_map &&
2298 sc_if->msk_rdata.msk_rx_ring)
2299 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2300 sc_if->msk_rdata.msk_rx_ring,
2301 sc_if->msk_cdata.msk_rx_ring_map);
2302 sc_if->msk_rdata.msk_rx_ring = NULL;
2303 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2304 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2305 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2306 }
2307 /* Tx buffers. */
2308 if (sc_if->msk_cdata.msk_tx_tag) {
2309 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2310 txd = &sc_if->msk_cdata.msk_txdesc[i];
2311 if (txd->tx_dmamap) {
2312 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2313 txd->tx_dmamap);
2314 txd->tx_dmamap = NULL;
2315 }
2316 }
2317 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2318 sc_if->msk_cdata.msk_tx_tag = NULL;
2319 }
2320 /* Rx buffers. */
2321 if (sc_if->msk_cdata.msk_rx_tag) {
2322 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2323 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2324 if (rxd->rx_dmamap) {
2325 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2326 rxd->rx_dmamap);
2327 rxd->rx_dmamap = NULL;
2328 }
2329 }
2330 if (sc_if->msk_cdata.msk_rx_sparemap) {
2331 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2332 sc_if->msk_cdata.msk_rx_sparemap);
2333 sc_if->msk_cdata.msk_rx_sparemap = 0;
2334 }
2335 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2336 sc_if->msk_cdata.msk_rx_tag = NULL;
2337 }
2338 if (sc_if->msk_cdata.msk_parent_tag) {
2339 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2340 sc_if->msk_cdata.msk_parent_tag = NULL;
2341 }
2342}
2343
2344static void
2345msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2346{
2347 struct msk_rxdesc *jrxd;
2348 int i;
2349
2350 /* Jumbo Rx ring. */
2351 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2352 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2353 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2354 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2355 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2356 sc_if->msk_rdata.msk_jumbo_rx_ring)
2357 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2358 sc_if->msk_rdata.msk_jumbo_rx_ring,
2359 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2360 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2361 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2362 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2363 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2364 }
2365 /* Jumbo Rx buffers. */
2366 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2367 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2368 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2369 if (jrxd->rx_dmamap) {
2370 bus_dmamap_destroy(
2371 sc_if->msk_cdata.msk_jumbo_rx_tag,
2372 jrxd->rx_dmamap);
2373 jrxd->rx_dmamap = NULL;
2374 }
2375 }
2376 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2377 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2378 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2379 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2380 }
2381 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2382 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2383 }
2384}
2385
2386static int
2387msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2388{
2389 struct msk_txdesc *txd, *txd_last;
2390 struct msk_tx_desc *tx_le;
2391 struct mbuf *m;
2392 bus_dmamap_t map;
2393 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2394 uint32_t control, prod, si;
2395 uint16_t offset, tcp_offset, tso_mtu;
2396 int error, i, nseg, tso;
2397
2398 MSK_IF_LOCK_ASSERT(sc_if);
2399
2400 tcp_offset = offset = 0;
2401 m = *m_head;
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116#include <sys/taskqueue.h>
117
118#include <net/bpf.h>
119#include <net/ethernet.h>
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/if_dl.h>
123#include <net/if_media.h>
124#include <net/if_types.h>
125#include <net/if_vlan_var.h>
126
127#include <netinet/in.h>
128#include <netinet/in_systm.h>
129#include <netinet/ip.h>
130#include <netinet/tcp.h>
131#include <netinet/udp.h>
132
133#include <machine/bus.h>
134#include <machine/in_cksum.h>
135#include <machine/resource.h>
136#include <sys/rman.h>
137
138#include <dev/mii/mii.h>
139#include <dev/mii/miivar.h>
140#include <dev/mii/brgphyreg.h>
141
142#include <dev/pci/pcireg.h>
143#include <dev/pci/pcivar.h>
144
145#include <dev/msk/if_mskreg.h>
146
147MODULE_DEPEND(msk, pci, 1, 1, 1);
148MODULE_DEPEND(msk, ether, 1, 1, 1);
149MODULE_DEPEND(msk, miibus, 1, 1, 1);
150
151/* "device miibus" required. See GENERIC if you get errors here. */
152#include "miibus_if.h"
153
154/* Tunables. */
155static int msi_disable = 0;
156TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
157static int legacy_intr = 0;
158TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
159static int jumbo_disable = 0;
160TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
161
162#define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
163
164/*
165 * Devices supported by this driver.
166 */
167static struct msk_product {
168 uint16_t msk_vendorid;
169 uint16_t msk_deviceid;
170 const char *msk_name;
171} msk_products[] = {
172 { VENDORID_SK, DEVICEID_SK_YUKON2,
173 "SK-9Sxx Gigabit Ethernet" },
174 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
175 "SK-9Exx Gigabit Ethernet"},
176 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
177 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
178 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
179 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
180 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
181 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
182 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
183 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
184 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
185 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
186 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
187 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
188 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
189 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
190 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
191 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
192 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
193 "Marvell Yukon 88E8035 Gigabit Ethernet" },
194 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
195 "Marvell Yukon 88E8036 Gigabit Ethernet" },
196 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
197 "Marvell Yukon 88E8038 Gigabit Ethernet" },
198 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
199 "Marvell Yukon 88E8039 Gigabit Ethernet" },
200 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
201 "Marvell Yukon 88E8050 Gigabit Ethernet" },
202 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
203 "Marvell Yukon 88E8052 Gigabit Ethernet" },
204 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
205 "Marvell Yukon 88E8053 Gigabit Ethernet" },
206 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
207 "Marvell Yukon 88E8055 Gigabit Ethernet" },
208 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
209 "Marvell Yukon 88E8056 Gigabit Ethernet" },
210 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
211 "Marvell Yukon 88E8058 Gigabit Ethernet" },
212 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
213 "D-Link 550SX Gigabit Ethernet" },
214 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
215 "D-Link 560T Gigabit Ethernet" }
216};
217
218static const char *model_name[] = {
219 "Yukon XL",
220 "Yukon EC Ultra",
221 "Yukon Unknown",
222 "Yukon EC",
223 "Yukon FE"
224};
225
226static int mskc_probe(device_t);
227static int mskc_attach(device_t);
228static int mskc_detach(device_t);
229static int mskc_shutdown(device_t);
230static int mskc_setup_rambuffer(struct msk_softc *);
231static int mskc_suspend(device_t);
232static int mskc_resume(device_t);
233static void mskc_reset(struct msk_softc *);
234
235static int msk_probe(device_t);
236static int msk_attach(device_t);
237static int msk_detach(device_t);
238
239static void msk_tick(void *);
240static void msk_legacy_intr(void *);
241static int msk_intr(void *);
242static void msk_int_task(void *, int);
243static void msk_intr_phy(struct msk_if_softc *);
244static void msk_intr_gmac(struct msk_if_softc *);
245static __inline void msk_rxput(struct msk_if_softc *);
246static int msk_handle_events(struct msk_softc *);
247static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
248static void msk_intr_hwerr(struct msk_softc *);
249#ifndef __NO_STRICT_ALIGNMENT
250static __inline void msk_fixup_rx(struct mbuf *);
251#endif
252static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
253static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
254static void msk_txeof(struct msk_if_softc *, int);
255static int msk_encap(struct msk_if_softc *, struct mbuf **);
256static void msk_tx_task(void *, int);
257static void msk_start(struct ifnet *);
258static int msk_ioctl(struct ifnet *, u_long, caddr_t);
259static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
260static void msk_set_rambuffer(struct msk_if_softc *);
261static void msk_init(void *);
262static void msk_init_locked(struct msk_if_softc *);
263static void msk_stop(struct msk_if_softc *);
264static void msk_watchdog(struct msk_if_softc *);
265static int msk_mediachange(struct ifnet *);
266static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
267static void msk_phy_power(struct msk_softc *, int);
268static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
269static int msk_status_dma_alloc(struct msk_softc *);
270static void msk_status_dma_free(struct msk_softc *);
271static int msk_txrx_dma_alloc(struct msk_if_softc *);
272static int msk_rx_dma_jalloc(struct msk_if_softc *);
273static void msk_txrx_dma_free(struct msk_if_softc *);
274static void msk_rx_dma_jfree(struct msk_if_softc *);
275static int msk_init_rx_ring(struct msk_if_softc *);
276static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
277static void msk_init_tx_ring(struct msk_if_softc *);
278static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
279static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
280static int msk_newbuf(struct msk_if_softc *, int);
281static int msk_jumbo_newbuf(struct msk_if_softc *, int);
282
283static int msk_phy_readreg(struct msk_if_softc *, int, int);
284static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
285static int msk_miibus_readreg(device_t, int, int);
286static int msk_miibus_writereg(device_t, int, int, int);
287static void msk_miibus_statchg(device_t);
288
289static void msk_rxfilter(struct msk_if_softc *);
290static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
291
292static void msk_stats_clear(struct msk_if_softc *);
293static void msk_stats_update(struct msk_if_softc *);
294static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
295static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
296static void msk_sysctl_node(struct msk_if_softc *);
297static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
298static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
299
300static device_method_t mskc_methods[] = {
301 /* Device interface */
302 DEVMETHOD(device_probe, mskc_probe),
303 DEVMETHOD(device_attach, mskc_attach),
304 DEVMETHOD(device_detach, mskc_detach),
305 DEVMETHOD(device_suspend, mskc_suspend),
306 DEVMETHOD(device_resume, mskc_resume),
307 DEVMETHOD(device_shutdown, mskc_shutdown),
308
309 /* bus interface */
310 DEVMETHOD(bus_print_child, bus_generic_print_child),
311 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
312
313 { NULL, NULL }
314};
315
316static driver_t mskc_driver = {
317 "mskc",
318 mskc_methods,
319 sizeof(struct msk_softc)
320};
321
322static devclass_t mskc_devclass;
323
324static device_method_t msk_methods[] = {
325 /* Device interface */
326 DEVMETHOD(device_probe, msk_probe),
327 DEVMETHOD(device_attach, msk_attach),
328 DEVMETHOD(device_detach, msk_detach),
329 DEVMETHOD(device_shutdown, bus_generic_shutdown),
330
331 /* bus interface */
332 DEVMETHOD(bus_print_child, bus_generic_print_child),
333 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
334
335 /* MII interface */
336 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
337 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
338 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
339
340 { NULL, NULL }
341};
342
343static driver_t msk_driver = {
344 "msk",
345 msk_methods,
346 sizeof(struct msk_if_softc)
347};
348
349static devclass_t msk_devclass;
350
351DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
352DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
353DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
354
355static struct resource_spec msk_res_spec_io[] = {
356 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
357 { -1, 0, 0 }
358};
359
360static struct resource_spec msk_res_spec_mem[] = {
361 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
362 { -1, 0, 0 }
363};
364
365static struct resource_spec msk_irq_spec_legacy[] = {
366 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
367 { -1, 0, 0 }
368};
369
370static struct resource_spec msk_irq_spec_msi[] = {
371 { SYS_RES_IRQ, 1, RF_ACTIVE },
372 { -1, 0, 0 }
373};
374
375static struct resource_spec msk_irq_spec_msi2[] = {
376 { SYS_RES_IRQ, 1, RF_ACTIVE },
377 { SYS_RES_IRQ, 2, RF_ACTIVE },
378 { -1, 0, 0 }
379};
380
381static int
382msk_miibus_readreg(device_t dev, int phy, int reg)
383{
384 struct msk_if_softc *sc_if;
385
386 if (phy != PHY_ADDR_MARV)
387 return (0);
388
389 sc_if = device_get_softc(dev);
390
391 return (msk_phy_readreg(sc_if, phy, reg));
392}
393
394static int
395msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
396{
397 struct msk_softc *sc;
398 int i, val;
399
400 sc = sc_if->msk_softc;
401
402 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
403 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
404
405 for (i = 0; i < MSK_TIMEOUT; i++) {
406 DELAY(1);
407 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
408 if ((val & GM_SMI_CT_RD_VAL) != 0) {
409 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
410 break;
411 }
412 }
413
414 if (i == MSK_TIMEOUT) {
415 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
416 val = 0;
417 }
418
419 return (val);
420}
421
422static int
423msk_miibus_writereg(device_t dev, int phy, int reg, int val)
424{
425 struct msk_if_softc *sc_if;
426
427 if (phy != PHY_ADDR_MARV)
428 return (0);
429
430 sc_if = device_get_softc(dev);
431
432 return (msk_phy_writereg(sc_if, phy, reg, val));
433}
434
435static int
436msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
437{
438 struct msk_softc *sc;
439 int i;
440
441 sc = sc_if->msk_softc;
442
443 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
444 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
445 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
446 for (i = 0; i < MSK_TIMEOUT; i++) {
447 DELAY(1);
448 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
449 GM_SMI_CT_BUSY) == 0)
450 break;
451 }
452 if (i == MSK_TIMEOUT)
453 if_printf(sc_if->msk_ifp, "phy write timeout\n");
454
455 return (0);
456}
457
458static void
459msk_miibus_statchg(device_t dev)
460{
461 struct msk_softc *sc;
462 struct msk_if_softc *sc_if;
463 struct mii_data *mii;
464 struct ifnet *ifp;
465 uint32_t gmac;
466
467 sc_if = device_get_softc(dev);
468 sc = sc_if->msk_softc;
469
470 MSK_IF_LOCK_ASSERT(sc_if);
471
472 mii = device_get_softc(sc_if->msk_miibus);
473 ifp = sc_if->msk_ifp;
474 if (mii == NULL || ifp == NULL ||
475 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
476 return;
477
478 if (mii->mii_media_status & IFM_ACTIVE) {
479 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
480 sc_if->msk_flags |= MSK_FLAG_LINK;
481 } else
482 sc_if->msk_flags &= ~MSK_FLAG_LINK;
483
484 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
485 /* Enable Tx FIFO Underrun. */
486 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
487 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
488 /*
489 * Because mii(4) notify msk(4) that it detected link status
490 * change, there is no need to enable automatic
491 * speed/flow-control/duplex updates.
492 */
493 gmac = GM_GPCR_AU_ALL_DIS;
494 switch (IFM_SUBTYPE(mii->mii_media_active)) {
495 case IFM_1000_SX:
496 case IFM_1000_T:
497 gmac |= GM_GPCR_SPEED_1000;
498 break;
499 case IFM_100_TX:
500 gmac |= GM_GPCR_SPEED_100;
501 break;
502 case IFM_10_T:
503 break;
504 }
505
506 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
507 gmac |= GM_GPCR_DUP_FULL;
508 /* Disable Rx flow control. */
509 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
510 gmac |= GM_GPCR_FC_RX_DIS;
511 /* Disable Tx flow control. */
512 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
513 gmac |= GM_GPCR_FC_TX_DIS;
514 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
515 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
516 /* Read again to ensure writing. */
517 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
518
519 gmac = GMC_PAUSE_ON;
520 if (((mii->mii_media_active & IFM_GMASK) &
521 (IFM_FLAG0 | IFM_FLAG1)) == 0)
522 gmac = GMC_PAUSE_OFF;
523 /* Diable pause for 10/100 Mbps in half-duplex mode. */
524 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
525 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
526 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
527 gmac = GMC_PAUSE_OFF;
528 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
529
530 /* Enable PHY interrupt for FIFO underrun/overflow. */
531 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
532 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
533 } else {
534 /*
535 * Link state changed to down.
536 * Disable PHY interrupts.
537 */
538 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
539 /* Disable Rx/Tx MAC. */
540 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
541 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
542 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
543 /* Read again to ensure writing. */
544 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
545 }
546}
547
548static void
549msk_rxfilter(struct msk_if_softc *sc_if)
550{
551 struct msk_softc *sc;
552 struct ifnet *ifp;
553 struct ifmultiaddr *ifma;
554 uint32_t mchash[2];
555 uint32_t crc;
556 uint16_t mode;
557
558 sc = sc_if->msk_softc;
559
560 MSK_IF_LOCK_ASSERT(sc_if);
561
562 ifp = sc_if->msk_ifp;
563
564 bzero(mchash, sizeof(mchash));
565 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
566 if ((ifp->if_flags & IFF_PROMISC) != 0)
567 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
568 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
569 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
570 mchash[0] = 0xffff;
571 mchash[1] = 0xffff;
572 } else {
573 mode |= GM_RXCR_UCF_ENA;
574 IF_ADDR_LOCK(ifp);
575 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
576 if (ifma->ifma_addr->sa_family != AF_LINK)
577 continue;
578 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
579 ifma->ifma_addr), ETHER_ADDR_LEN);
580 /* Just want the 6 least significant bits. */
581 crc &= 0x3f;
582 /* Set the corresponding bit in the hash table. */
583 mchash[crc >> 5] |= 1 << (crc & 0x1f);
584 }
585 IF_ADDR_UNLOCK(ifp);
586 if (mchash[0] != 0 || mchash[1] != 0)
587 mode |= GM_RXCR_MCF_ENA;
588 }
589
590 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
591 mchash[0] & 0xffff);
592 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
593 (mchash[0] >> 16) & 0xffff);
594 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
595 mchash[1] & 0xffff);
596 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
597 (mchash[1] >> 16) & 0xffff);
598 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
599}
600
601static void
602msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
603{
604 struct msk_softc *sc;
605
606 sc = sc_if->msk_softc;
607 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
608 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
609 RX_VLAN_STRIP_ON);
610 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
611 TX_VLAN_TAG_ON);
612 } else {
613 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
614 RX_VLAN_STRIP_OFF);
615 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
616 TX_VLAN_TAG_OFF);
617 }
618}
619
620static int
621msk_init_rx_ring(struct msk_if_softc *sc_if)
622{
623 struct msk_ring_data *rd;
624 struct msk_rxdesc *rxd;
625 int i, prod;
626
627 MSK_IF_LOCK_ASSERT(sc_if);
628
629 sc_if->msk_cdata.msk_rx_cons = 0;
630 sc_if->msk_cdata.msk_rx_prod = 0;
631 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
632
633 rd = &sc_if->msk_rdata;
634 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
635 prod = sc_if->msk_cdata.msk_rx_prod;
636 for (i = 0; i < MSK_RX_RING_CNT; i++) {
637 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
638 rxd->rx_m = NULL;
639 rxd->rx_le = &rd->msk_rx_ring[prod];
640 if (msk_newbuf(sc_if, prod) != 0)
641 return (ENOBUFS);
642 MSK_INC(prod, MSK_RX_RING_CNT);
643 }
644
645 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
646 sc_if->msk_cdata.msk_rx_ring_map,
647 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
648
649 /* Update prefetch unit. */
650 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
651 CSR_WRITE_2(sc_if->msk_softc,
652 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
653 sc_if->msk_cdata.msk_rx_prod);
654
655 return (0);
656}
657
658static int
659msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
660{
661 struct msk_ring_data *rd;
662 struct msk_rxdesc *rxd;
663 int i, prod;
664
665 MSK_IF_LOCK_ASSERT(sc_if);
666
667 sc_if->msk_cdata.msk_rx_cons = 0;
668 sc_if->msk_cdata.msk_rx_prod = 0;
669 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
670
671 rd = &sc_if->msk_rdata;
672 bzero(rd->msk_jumbo_rx_ring,
673 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
674 prod = sc_if->msk_cdata.msk_rx_prod;
675 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
676 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
677 rxd->rx_m = NULL;
678 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
679 if (msk_jumbo_newbuf(sc_if, prod) != 0)
680 return (ENOBUFS);
681 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
682 }
683
684 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
685 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
686 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
687
688 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
689 CSR_WRITE_2(sc_if->msk_softc,
690 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
691 sc_if->msk_cdata.msk_rx_prod);
692
693 return (0);
694}
695
696static void
697msk_init_tx_ring(struct msk_if_softc *sc_if)
698{
699 struct msk_ring_data *rd;
700 struct msk_txdesc *txd;
701 int i;
702
703 sc_if->msk_cdata.msk_tso_mtu = 0;
704 sc_if->msk_cdata.msk_tx_prod = 0;
705 sc_if->msk_cdata.msk_tx_cons = 0;
706 sc_if->msk_cdata.msk_tx_cnt = 0;
707
708 rd = &sc_if->msk_rdata;
709 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
710 for (i = 0; i < MSK_TX_RING_CNT; i++) {
711 txd = &sc_if->msk_cdata.msk_txdesc[i];
712 txd->tx_m = NULL;
713 txd->tx_le = &rd->msk_tx_ring[i];
714 }
715
716 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
717 sc_if->msk_cdata.msk_tx_ring_map,
718 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
719}
720
721static __inline void
722msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
723{
724 struct msk_rx_desc *rx_le;
725 struct msk_rxdesc *rxd;
726 struct mbuf *m;
727
728 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
729 m = rxd->rx_m;
730 rx_le = rxd->rx_le;
731 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
732}
733
734static __inline void
735msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
736{
737 struct msk_rx_desc *rx_le;
738 struct msk_rxdesc *rxd;
739 struct mbuf *m;
740
741 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
742 m = rxd->rx_m;
743 rx_le = rxd->rx_le;
744 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
745}
746
747static int
748msk_newbuf(struct msk_if_softc *sc_if, int idx)
749{
750 struct msk_rx_desc *rx_le;
751 struct msk_rxdesc *rxd;
752 struct mbuf *m;
753 bus_dma_segment_t segs[1];
754 bus_dmamap_t map;
755 int nsegs;
756
757 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
758 if (m == NULL)
759 return (ENOBUFS);
760
761 m->m_len = m->m_pkthdr.len = MCLBYTES;
762 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
763 m_adj(m, ETHER_ALIGN);
764#ifndef __NO_STRICT_ALIGNMENT
765 else
766 m_adj(m, MSK_RX_BUF_ALIGN);
767#endif
768
769 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
770 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
771 BUS_DMA_NOWAIT) != 0) {
772 m_freem(m);
773 return (ENOBUFS);
774 }
775 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
776
777 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
778 if (rxd->rx_m != NULL) {
779 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
780 BUS_DMASYNC_POSTREAD);
781 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
782 }
783 map = rxd->rx_dmamap;
784 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
785 sc_if->msk_cdata.msk_rx_sparemap = map;
786 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
787 BUS_DMASYNC_PREREAD);
788 rxd->rx_m = m;
789 rx_le = rxd->rx_le;
790 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
791 rx_le->msk_control =
792 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
793
794 return (0);
795}
796
797static int
798msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
799{
800 struct msk_rx_desc *rx_le;
801 struct msk_rxdesc *rxd;
802 struct mbuf *m;
803 bus_dma_segment_t segs[1];
804 bus_dmamap_t map;
805 int nsegs;
806
807 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
808 if (m == NULL)
809 return (ENOBUFS);
810 if ((m->m_flags & M_EXT) == 0) {
811 m_freem(m);
812 return (ENOBUFS);
813 }
814 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
815 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
816 m_adj(m, ETHER_ALIGN);
817#ifndef __NO_STRICT_ALIGNMENT
818 else
819 m_adj(m, MSK_RX_BUF_ALIGN);
820#endif
821
822 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
823 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
824 BUS_DMA_NOWAIT) != 0) {
825 m_freem(m);
826 return (ENOBUFS);
827 }
828 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
829
830 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
831 if (rxd->rx_m != NULL) {
832 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
833 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
834 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
835 rxd->rx_dmamap);
836 }
837 map = rxd->rx_dmamap;
838 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
839 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
840 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
841 BUS_DMASYNC_PREREAD);
842 rxd->rx_m = m;
843 rx_le = rxd->rx_le;
844 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
845 rx_le->msk_control =
846 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
847
848 return (0);
849}
850
851/*
852 * Set media options.
853 */
854static int
855msk_mediachange(struct ifnet *ifp)
856{
857 struct msk_if_softc *sc_if;
858 struct mii_data *mii;
859 int error;
860
861 sc_if = ifp->if_softc;
862
863 MSK_IF_LOCK(sc_if);
864 mii = device_get_softc(sc_if->msk_miibus);
865 error = mii_mediachg(mii);
866 MSK_IF_UNLOCK(sc_if);
867
868 return (error);
869}
870
871/*
872 * Report current media status.
873 */
874static void
875msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
876{
877 struct msk_if_softc *sc_if;
878 struct mii_data *mii;
879
880 sc_if = ifp->if_softc;
881 MSK_IF_LOCK(sc_if);
882 mii = device_get_softc(sc_if->msk_miibus);
883
884 mii_pollstat(mii);
885 MSK_IF_UNLOCK(sc_if);
886 ifmr->ifm_active = mii->mii_media_active;
887 ifmr->ifm_status = mii->mii_media_status;
888}
889
890static int
891msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
892{
893 struct msk_if_softc *sc_if;
894 struct ifreq *ifr;
895 struct mii_data *mii;
896 int error, mask;
897
898 sc_if = ifp->if_softc;
899 ifr = (struct ifreq *)data;
900 error = 0;
901
902 switch(command) {
903 case SIOCSIFMTU:
904 MSK_IF_LOCK(sc_if);
905 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
906 error = EINVAL;
907 else if (ifp->if_mtu != ifr->ifr_mtu) {
908 if (ifr->ifr_mtu > ETHERMTU) {
909 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
910 error = EINVAL;
911 MSK_IF_UNLOCK(sc_if);
912 break;
913 }
914 if ((sc_if->msk_flags &
915 MSK_FLAG_JUMBO_NOCSUM) != 0) {
916 ifp->if_hwassist &=
917 ~(MSK_CSUM_FEATURES | CSUM_TSO);
918 ifp->if_capenable &=
919 ~(IFCAP_TSO4 | IFCAP_TXCSUM);
920 VLAN_CAPABILITIES(ifp);
921 }
922 }
923 ifp->if_mtu = ifr->ifr_mtu;
924 msk_init_locked(sc_if);
925 }
926 MSK_IF_UNLOCK(sc_if);
927 break;
928 case SIOCSIFFLAGS:
929 MSK_IF_LOCK(sc_if);
930 if ((ifp->if_flags & IFF_UP) != 0) {
931 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
932 if (((ifp->if_flags ^ sc_if->msk_if_flags)
933 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
934 msk_rxfilter(sc_if);
935 } else {
936 if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
937 msk_init_locked(sc_if);
938 }
939 } else {
940 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
941 msk_stop(sc_if);
942 }
943 sc_if->msk_if_flags = ifp->if_flags;
944 MSK_IF_UNLOCK(sc_if);
945 break;
946 case SIOCADDMULTI:
947 case SIOCDELMULTI:
948 MSK_IF_LOCK(sc_if);
949 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
950 msk_rxfilter(sc_if);
951 MSK_IF_UNLOCK(sc_if);
952 break;
953 case SIOCGIFMEDIA:
954 case SIOCSIFMEDIA:
955 mii = device_get_softc(sc_if->msk_miibus);
956 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
957 break;
958 case SIOCSIFCAP:
959 MSK_IF_LOCK(sc_if);
960 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
961 if ((mask & IFCAP_TXCSUM) != 0) {
962 ifp->if_capenable ^= IFCAP_TXCSUM;
963 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
964 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
965 ifp->if_hwassist |= MSK_CSUM_FEATURES;
966 else
967 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
968 }
969 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
970 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
971 msk_setvlan(sc_if, ifp);
972 }
973
974 if ((mask & IFCAP_TSO4) != 0) {
975 ifp->if_capenable ^= IFCAP_TSO4;
976 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
977 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
978 ifp->if_hwassist |= CSUM_TSO;
979 else
980 ifp->if_hwassist &= ~CSUM_TSO;
981 }
982 if (ifp->if_mtu > ETHERMTU &&
983 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
984 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
985 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
986 }
987
988 VLAN_CAPABILITIES(ifp);
989 MSK_IF_UNLOCK(sc_if);
990 break;
991 default:
992 error = ether_ioctl(ifp, command, data);
993 break;
994 }
995
996 return (error);
997}
998
999static int
1000mskc_probe(device_t dev)
1001{
1002 struct msk_product *mp;
1003 uint16_t vendor, devid;
1004 int i;
1005
1006 vendor = pci_get_vendor(dev);
1007 devid = pci_get_device(dev);
1008 mp = msk_products;
1009 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1010 i++, mp++) {
1011 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1012 device_set_desc(dev, mp->msk_name);
1013 return (BUS_PROBE_DEFAULT);
1014 }
1015 }
1016
1017 return (ENXIO);
1018}
1019
1020static int
1021mskc_setup_rambuffer(struct msk_softc *sc)
1022{
1023 int next;
1024 int i;
1025
1026 /* Get adapter SRAM size. */
1027 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
1028 if (bootverbose)
1029 device_printf(sc->msk_dev,
1030 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1031 if (sc->msk_ramsize == 0)
1032 return (0);
1033
1034 sc->msk_pflags |= MSK_FLAG_RAMBUF;
1035 /*
1036 * Give receiver 2/3 of memory and round down to the multiple
1037 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
1038 * of 1024.
1039 */
1040 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
1041 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
1042 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1043 sc->msk_rxqstart[i] = next;
1044 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
1045 next = sc->msk_rxqend[i] + 1;
1046 sc->msk_txqstart[i] = next;
1047 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1048 next = sc->msk_txqend[i] + 1;
1049 if (bootverbose) {
1050 device_printf(sc->msk_dev,
1051 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1052 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1053 sc->msk_rxqend[i]);
1054 device_printf(sc->msk_dev,
1055 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1056 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1057 sc->msk_txqend[i]);
1058 }
1059 }
1060
1061 return (0);
1062}
1063
1064static void
1065msk_phy_power(struct msk_softc *sc, int mode)
1066{
1067 uint32_t val;
1068 int i;
1069
1070 switch (mode) {
1071 case MSK_PHY_POWERUP:
1072 /* Switch power to VCC (WA for VAUX problem). */
1073 CSR_WRITE_1(sc, B0_POWER_CTRL,
1074 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1075 /* Disable Core Clock Division, set Clock Select to 0. */
1076 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1077
1078 val = 0;
1079 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1080 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1081 /* Enable bits are inverted. */
1082 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1083 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1084 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1085 }
1086 /*
1087 * Enable PCI & Core Clock, enable clock gating for both Links.
1088 */
1089 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1090
1091 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1092 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1093 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1094 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1095 /* Deassert Low Power for 1st PHY. */
1096 val |= PCI_Y2_PHY1_COMA;
1097 if (sc->msk_num_port > 1)
1098 val |= PCI_Y2_PHY2_COMA;
1099 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1100 uint32_t our;
1101
1102 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1103
1104 /* Enable all clocks. */
1105 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1106 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1107 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1108 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1109 /* Set all bits to 0 except bits 15..12. */
1110 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1111 /* Set to default value. */
1112 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1113 }
1114 /* Release PHY from PowerDown/COMA mode. */
1115 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1116 for (i = 0; i < sc->msk_num_port; i++) {
1117 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1118 GMLC_RST_SET);
1119 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1120 GMLC_RST_CLR);
1121 }
1122 break;
1123 case MSK_PHY_POWERDOWN:
1124 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1125 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1126 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1127 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1128 val &= ~PCI_Y2_PHY1_COMA;
1129 if (sc->msk_num_port > 1)
1130 val &= ~PCI_Y2_PHY2_COMA;
1131 }
1132 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1133
1134 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1135 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1136 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1137 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1138 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1139 /* Enable bits are inverted. */
1140 val = 0;
1141 }
1142 /*
1143 * Disable PCI & Core Clock, disable clock gating for
1144 * both Links.
1145 */
1146 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1147 CSR_WRITE_1(sc, B0_POWER_CTRL,
1148 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1149 break;
1150 default:
1151 break;
1152 }
1153}
1154
1155static void
1156mskc_reset(struct msk_softc *sc)
1157{
1158 bus_addr_t addr;
1159 uint16_t status;
1160 uint32_t val;
1161 int i;
1162
1163 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1164
1165 /* Disable ASF. */
1166 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1167 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1168 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1169 }
1170 /*
1171 * Since we disabled ASF, S/W reset is required for Power Management.
1172 */
1173 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1174 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1175
1176 /* Clear all error bits in the PCI status register. */
1177 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1178 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1179
1180 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1181 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1182 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1183 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1184
1185 switch (sc->msk_bustype) {
1186 case MSK_PEX_BUS:
1187 /* Clear all PEX errors. */
1188 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1189 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1190 if ((val & PEX_RX_OV) != 0) {
1191 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1192 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1193 }
1194 break;
1195 case MSK_PCI_BUS:
1196 case MSK_PCIX_BUS:
1197 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1198 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1199 if (val == 0)
1200 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1201 if (sc->msk_bustype == MSK_PCIX_BUS) {
1202 /* Set Cache Line Size opt. */
1203 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1204 val |= PCI_CLS_OPT;
1205 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1206 }
1207 break;
1208 }
1209 /* Set PHY power state. */
1210 msk_phy_power(sc, MSK_PHY_POWERUP);
1211
1212 /* Reset GPHY/GMAC Control */
1213 for (i = 0; i < sc->msk_num_port; i++) {
1214 /* GPHY Control reset. */
1215 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1216 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1217 /* GMAC Control reset. */
1218 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1219 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1220 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1221 }
1222 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1223
1224 /* LED On. */
1225 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1226
1227 /* Clear TWSI IRQ. */
1228 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1229
1230 /* Turn off hardware timer. */
1231 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1232 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1233
1234 /* Turn off descriptor polling. */
1235 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1236
1237 /* Turn off time stamps. */
1238 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1239 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1240
1241 /* Configure timeout values. */
1242 for (i = 0; i < sc->msk_num_port; i++) {
1243 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1244 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1245 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1246 MSK_RI_TO_53);
1247 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1248 MSK_RI_TO_53);
1249 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1250 MSK_RI_TO_53);
1251 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1252 MSK_RI_TO_53);
1253 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1254 MSK_RI_TO_53);
1255 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1256 MSK_RI_TO_53);
1257 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1258 MSK_RI_TO_53);
1259 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1260 MSK_RI_TO_53);
1261 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1262 MSK_RI_TO_53);
1263 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1264 MSK_RI_TO_53);
1265 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1266 MSK_RI_TO_53);
1267 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1268 MSK_RI_TO_53);
1269 }
1270
1271 /* Disable all interrupts. */
1272 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1273 CSR_READ_4(sc, B0_HWE_IMSK);
1274 CSR_WRITE_4(sc, B0_IMSK, 0);
1275 CSR_READ_4(sc, B0_IMSK);
1276
1277 /*
1278 * On dual port PCI-X card, there is an problem where status
1279 * can be received out of order due to split transactions.
1280 */
1281 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1282 int pcix;
1283 uint16_t pcix_cmd;
1284
1285 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1286 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1287 /* Clear Max Outstanding Split Transactions. */
1288 pcix_cmd &= ~0x70;
1289 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1290 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1291 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1292 }
1293 }
1294 if (sc->msk_bustype == MSK_PEX_BUS) {
1295 uint16_t v, width;
1296
1297 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1298 /* Change Max. Read Request Size to 4096 bytes. */
1299 v &= ~PEX_DC_MAX_RRS_MSK;
1300 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1301 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1302 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1303 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1304 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1305 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1306 if (v != width)
1307 device_printf(sc->msk_dev,
1308 "negotiated width of link(x%d) != "
1309 "max. width of link(x%d)\n", width, v);
1310 }
1311
1312 /* Clear status list. */
1313 bzero(sc->msk_stat_ring,
1314 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1315 sc->msk_stat_cons = 0;
1316 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1317 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1318 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1319 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1320 /* Set the status list base address. */
1321 addr = sc->msk_stat_ring_paddr;
1322 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1323 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1324 /* Set the status list last index. */
1325 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1326 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1327 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1328 /* WA for dev. #4.3 */
1329 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1330 /* WA for dev. #4.18 */
1331 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1332 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1333 } else {
1334 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1335 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1336 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1337 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1338 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1339 else
1340 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1341 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1342 }
1343 /*
1344 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1345 */
1346 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1347
1348 /* Enable status unit. */
1349 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1350
1351 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1352 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1353 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1354}
1355
1356static int
1357msk_probe(device_t dev)
1358{
1359 struct msk_softc *sc;
1360 char desc[100];
1361
1362 sc = device_get_softc(device_get_parent(dev));
1363 /*
1364 * Not much to do here. We always know there will be
1365 * at least one GMAC present, and if there are two,
1366 * mskc_attach() will create a second device instance
1367 * for us.
1368 */
1369 snprintf(desc, sizeof(desc),
1370 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1371 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1372 sc->msk_hw_rev);
1373 device_set_desc_copy(dev, desc);
1374
1375 return (BUS_PROBE_DEFAULT);
1376}
1377
1378static int
1379msk_attach(device_t dev)
1380{
1381 struct msk_softc *sc;
1382 struct msk_if_softc *sc_if;
1383 struct ifnet *ifp;
1384 int i, port, error;
1385 uint8_t eaddr[6];
1386
1387 if (dev == NULL)
1388 return (EINVAL);
1389
1390 error = 0;
1391 sc_if = device_get_softc(dev);
1392 sc = device_get_softc(device_get_parent(dev));
1393 port = *(int *)device_get_ivars(dev);
1394
1395 sc_if->msk_if_dev = dev;
1396 sc_if->msk_port = port;
1397 sc_if->msk_softc = sc;
1398 sc_if->msk_flags = sc->msk_pflags;
1399 sc->msk_if[port] = sc_if;
1400 /* Setup Tx/Rx queue register offsets. */
1401 if (port == MSK_PORT_A) {
1402 sc_if->msk_txq = Q_XA1;
1403 sc_if->msk_txsq = Q_XS1;
1404 sc_if->msk_rxq = Q_R1;
1405 } else {
1406 sc_if->msk_txq = Q_XA2;
1407 sc_if->msk_txsq = Q_XS2;
1408 sc_if->msk_rxq = Q_R2;
1409 }
1410
1411 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1412 msk_sysctl_node(sc_if);
1413
1414 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1415 goto fail;
1416 msk_rx_dma_jalloc(sc_if);
1417
1418 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1419 if (ifp == NULL) {
1420 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1421 error = ENOSPC;
1422 goto fail;
1423 }
1424 ifp->if_softc = sc_if;
1425 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1426 ifp->if_mtu = ETHERMTU;
1427 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1428 /*
1429 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1430 * has serious bug in Rx checksum offload for all Yukon II family
1431 * hardware. It seems there is a workaround to make it work somtimes.
1432 * However, the workaround also have to check OP code sequences to
1433 * verify whether the OP code is correct. Sometimes it should compute
1434 * IP/TCP/UDP checksum in driver in order to verify correctness of
1435 * checksum computed by hardware. If you have to compute checksum
1436 * with software to verify the hardware's checksum why have hardware
1437 * compute the checksum? I think there is no reason to spend time to
1438 * make Rx checksum offload work on Yukon II hardware.
1439 */
1440 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1441 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1442 ifp->if_capenable = ifp->if_capabilities;
1443 ifp->if_ioctl = msk_ioctl;
1444 ifp->if_start = msk_start;
1445 ifp->if_timer = 0;
1446 ifp->if_watchdog = NULL;
1447 ifp->if_init = msk_init;
1448 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1449 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1450 IFQ_SET_READY(&ifp->if_snd);
1451
1452 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1453
1454 /*
1455 * Get station address for this interface. Note that
1456 * dual port cards actually come with three station
1457 * addresses: one for each port, plus an extra. The
1458 * extra one is used by the SysKonnect driver software
1459 * as a 'virtual' station address for when both ports
1460 * are operating in failover mode. Currently we don't
1461 * use this extra address.
1462 */
1463 MSK_IF_LOCK(sc_if);
1464 for (i = 0; i < ETHER_ADDR_LEN; i++)
1465 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1466
1467 /*
1468 * Call MI attach routine. Can't hold locks when calling into ether_*.
1469 */
1470 MSK_IF_UNLOCK(sc_if);
1471 ether_ifattach(ifp, eaddr);
1472 MSK_IF_LOCK(sc_if);
1473
1474 /*
1475 * VLAN capability setup
1476 * Due to Tx checksum offload hardware bugs, msk(4) manually
1477 * computes checksum for short frames. For VLAN tagged frames
1478 * this workaround does not work so disable checksum offload
1479 * for VLAN interface.
1480 */
1481 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1482 ifp->if_capenable = ifp->if_capabilities;
1483
1484 /*
1485 * Tell the upper layer(s) we support long frames.
1486 * Must appear after the call to ether_ifattach() because
1487 * ether_ifattach() sets ifi_hdrlen to the default value.
1488 */
1489 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1490
1491 /*
1492 * Do miibus setup.
1493 */
1494 MSK_IF_UNLOCK(sc_if);
1495 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1496 msk_mediastatus);
1497 if (error != 0) {
1498 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1499 ether_ifdetach(ifp);
1500 error = ENXIO;
1501 goto fail;
1502 }
1503
1504fail:
1505 if (error != 0) {
1506 /* Access should be ok even though lock has been dropped */
1507 sc->msk_if[port] = NULL;
1508 msk_detach(dev);
1509 }
1510
1511 return (error);
1512}
1513
1514/*
1515 * Attach the interface. Allocate softc structures, do ifmedia
1516 * setup and ethernet/BPF attach.
1517 */
1518static int
1519mskc_attach(device_t dev)
1520{
1521 struct msk_softc *sc;
1522 int error, msic, msir, *port, reg;
1523
1524 sc = device_get_softc(dev);
1525 sc->msk_dev = dev;
1526 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1527 MTX_DEF);
1528
1529 /*
1530 * Map control/status registers.
1531 */
1532 pci_enable_busmaster(dev);
1533
1534 /* Allocate I/O resource */
1535#ifdef MSK_USEIOSPACE
1536 sc->msk_res_spec = msk_res_spec_io;
1537#else
1538 sc->msk_res_spec = msk_res_spec_mem;
1539#endif
1540 sc->msk_irq_spec = msk_irq_spec_legacy;
1541 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1542 if (error) {
1543 if (sc->msk_res_spec == msk_res_spec_mem)
1544 sc->msk_res_spec = msk_res_spec_io;
1545 else
1546 sc->msk_res_spec = msk_res_spec_mem;
1547 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1548 if (error) {
1549 device_printf(dev, "couldn't allocate %s resources\n",
1550 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1551 "I/O");
1552 mtx_destroy(&sc->msk_mtx);
1553 return (ENXIO);
1554 }
1555 }
1556
1557 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1558 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1559 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1560 /* Bail out if chip is not recognized. */
1561 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1562 sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1563 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1564 sc->msk_hw_id, sc->msk_hw_rev);
1565 mtx_destroy(&sc->msk_mtx);
1566 return (ENXIO);
1567 }
1568
1569 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1570 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1571 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1572 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1573 "max number of Rx events to process");
1574
1575 sc->msk_process_limit = MSK_PROC_DEFAULT;
1576 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1577 "process_limit", &sc->msk_process_limit);
1578 if (error == 0) {
1579 if (sc->msk_process_limit < MSK_PROC_MIN ||
1580 sc->msk_process_limit > MSK_PROC_MAX) {
1581 device_printf(dev, "process_limit value out of range; "
1582 "using default: %d\n", MSK_PROC_DEFAULT);
1583 sc->msk_process_limit = MSK_PROC_DEFAULT;
1584 }
1585 }
1586
1587 /* Soft reset. */
1588 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1589 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1590 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1591 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1592 sc->msk_coppertype = 0;
1593 else
1594 sc->msk_coppertype = 1;
1595 /* Check number of MACs. */
1596 sc->msk_num_port = 1;
1597 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1598 CFG_DUAL_MAC_MSK) {
1599 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1600 sc->msk_num_port++;
1601 }
1602
1603 /* Check bus type. */
1604 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
1605 sc->msk_bustype = MSK_PEX_BUS;
1606 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
1607 sc->msk_bustype = MSK_PCIX_BUS;
1608 else
1609 sc->msk_bustype = MSK_PCI_BUS;
1610
1611 switch (sc->msk_hw_id) {
1612 case CHIP_ID_YUKON_EC:
1613 sc->msk_clock = 125; /* 125 Mhz */
1614 sc->msk_pflags |= MSK_FLAG_JUMBO;
1615 break;
1616 case CHIP_ID_YUKON_EC_U:
1617 sc->msk_clock = 125; /* 125 Mhz */
1618 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
1619 break;
1620 case CHIP_ID_YUKON_FE:
1621 sc->msk_clock = 100; /* 100 Mhz */
1622 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1623 break;
1624 case CHIP_ID_YUKON_XL:
1625 sc->msk_clock = 156; /* 156 Mhz */
1626 sc->msk_pflags |= MSK_FLAG_JUMBO;
1627 break;
1628 default:
1629 sc->msk_clock = 156; /* 156 Mhz */
1630 break;
1631 }
1632
1633 /* Allocate IRQ resources. */
1634 msic = pci_msi_count(dev);
1635 if (bootverbose)
1636 device_printf(dev, "MSI count : %d\n", msic);
1637 /*
1638 * The Yukon II reports it can handle two messages, one for each
1639 * possible port. We go ahead and allocate two messages and only
1640 * setup a handler for both if we have a dual port card.
1641 *
1642 * XXX: I haven't untangled the interrupt handler to handle dual
1643 * port cards with separate MSI messages, so for now I disable MSI
1644 * on dual port cards.
1645 */
1646 if (legacy_intr != 0)
1647 msi_disable = 1;
1648 if (msi_disable == 0) {
1649 switch (msic) {
1650 case 2:
1651 case 1: /* 88E8058 reports 1 MSI message */
1652 msir = msic;
1653 if (sc->msk_num_port == 1 &&
1654 pci_alloc_msi(dev, &msir) == 0) {
1655 if (msic == msir) {
1656 sc->msk_pflags |= MSK_FLAG_MSI;
1657 sc->msk_irq_spec = msic == 2 ?
1658 msk_irq_spec_msi2 :
1659 msk_irq_spec_msi;
1660 } else
1661 pci_release_msi(dev);
1662 }
1663 break;
1664 default:
1665 device_printf(dev,
1666 "Unexpected number of MSI messages : %d\n", msic);
1667 break;
1668 }
1669 }
1670
1671 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1672 if (error) {
1673 device_printf(dev, "couldn't allocate IRQ resources\n");
1674 goto fail;
1675 }
1676
1677 if ((error = msk_status_dma_alloc(sc)) != 0)
1678 goto fail;
1679
1680 /* Set base interrupt mask. */
1681 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1682 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1683 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1684
1685 /* Reset the adapter. */
1686 mskc_reset(sc);
1687
1688 if ((error = mskc_setup_rambuffer(sc)) != 0)
1689 goto fail;
1690
1691 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1692 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1693 device_printf(dev, "failed to add child for PORT_A\n");
1694 error = ENXIO;
1695 goto fail;
1696 }
1697 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1698 if (port == NULL) {
1699 device_printf(dev, "failed to allocate memory for "
1700 "ivars of PORT_A\n");
1701 error = ENXIO;
1702 goto fail;
1703 }
1704 *port = MSK_PORT_A;
1705 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1706
1707 if (sc->msk_num_port > 1) {
1708 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1709 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1710 device_printf(dev, "failed to add child for PORT_B\n");
1711 error = ENXIO;
1712 goto fail;
1713 }
1714 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1715 if (port == NULL) {
1716 device_printf(dev, "failed to allocate memory for "
1717 "ivars of PORT_B\n");
1718 error = ENXIO;
1719 goto fail;
1720 }
1721 *port = MSK_PORT_B;
1722 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1723 }
1724
1725 error = bus_generic_attach(dev);
1726 if (error) {
1727 device_printf(dev, "failed to attach port(s)\n");
1728 goto fail;
1729 }
1730
1731 /* Hook interrupt last to avoid having to lock softc. */
1732 if (legacy_intr)
1733 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1734 INTR_MPSAFE, NULL, msk_legacy_intr, sc,
1735 &sc->msk_intrhand[0]);
1736 else {
1737 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1738 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1739 taskqueue_thread_enqueue, &sc->msk_tq);
1740 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1741 device_get_nameunit(sc->msk_dev));
1742 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1743 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]);
1744 }
1745
1746 if (error != 0) {
1747 device_printf(dev, "couldn't set up interrupt handler\n");
1748 if (legacy_intr == 0)
1749 taskqueue_free(sc->msk_tq);
1750 sc->msk_tq = NULL;
1751 goto fail;
1752 }
1753fail:
1754 if (error != 0)
1755 mskc_detach(dev);
1756
1757 return (error);
1758}
1759
1760/*
1761 * Shutdown hardware and free up resources. This can be called any
1762 * time after the mutex has been initialized. It is called in both
1763 * the error case in attach and the normal detach case so it needs
1764 * to be careful about only freeing resources that have actually been
1765 * allocated.
1766 */
1767static int
1768msk_detach(device_t dev)
1769{
1770 struct msk_softc *sc;
1771 struct msk_if_softc *sc_if;
1772 struct ifnet *ifp;
1773
1774 sc_if = device_get_softc(dev);
1775 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1776 ("msk mutex not initialized in msk_detach"));
1777 MSK_IF_LOCK(sc_if);
1778
1779 ifp = sc_if->msk_ifp;
1780 if (device_is_attached(dev)) {
1781 /* XXX */
1782 sc_if->msk_flags |= MSK_FLAG_DETACH;
1783 msk_stop(sc_if);
1784 /* Can't hold locks while calling detach. */
1785 MSK_IF_UNLOCK(sc_if);
1786 callout_drain(&sc_if->msk_tick_ch);
1787 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1788 ether_ifdetach(ifp);
1789 MSK_IF_LOCK(sc_if);
1790 }
1791
1792 /*
1793 * We're generally called from mskc_detach() which is using
1794 * device_delete_child() to get to here. It's already trashed
1795 * miibus for us, so don't do it here or we'll panic.
1796 *
1797 * if (sc_if->msk_miibus != NULL) {
1798 * device_delete_child(dev, sc_if->msk_miibus);
1799 * sc_if->msk_miibus = NULL;
1800 * }
1801 */
1802
1803 msk_rx_dma_jfree(sc_if);
1804 msk_txrx_dma_free(sc_if);
1805 bus_generic_detach(dev);
1806
1807 if (ifp)
1808 if_free(ifp);
1809 sc = sc_if->msk_softc;
1810 sc->msk_if[sc_if->msk_port] = NULL;
1811 MSK_IF_UNLOCK(sc_if);
1812
1813 return (0);
1814}
1815
1816static int
1817mskc_detach(device_t dev)
1818{
1819 struct msk_softc *sc;
1820
1821 sc = device_get_softc(dev);
1822 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1823
1824 if (device_is_alive(dev)) {
1825 if (sc->msk_devs[MSK_PORT_A] != NULL) {
1826 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1827 M_DEVBUF);
1828 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1829 }
1830 if (sc->msk_devs[MSK_PORT_B] != NULL) {
1831 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1832 M_DEVBUF);
1833 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1834 }
1835 bus_generic_detach(dev);
1836 }
1837
1838 /* Disable all interrupts. */
1839 CSR_WRITE_4(sc, B0_IMSK, 0);
1840 CSR_READ_4(sc, B0_IMSK);
1841 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1842 CSR_READ_4(sc, B0_HWE_IMSK);
1843
1844 /* LED Off. */
1845 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1846
1847 /* Put hardware reset. */
1848 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1849
1850 msk_status_dma_free(sc);
1851
1852 if (legacy_intr == 0 && sc->msk_tq != NULL) {
1853 taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1854 taskqueue_free(sc->msk_tq);
1855 sc->msk_tq = NULL;
1856 }
1857 if (sc->msk_intrhand[0]) {
1858 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1859 sc->msk_intrhand[0] = NULL;
1860 }
1861 if (sc->msk_intrhand[1]) {
1862 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1863 sc->msk_intrhand[1] = NULL;
1864 }
1865 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1866 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
1867 pci_release_msi(dev);
1868 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1869 mtx_destroy(&sc->msk_mtx);
1870
1871 return (0);
1872}
1873
1874struct msk_dmamap_arg {
1875 bus_addr_t msk_busaddr;
1876};
1877
1878static void
1879msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1880{
1881 struct msk_dmamap_arg *ctx;
1882
1883 if (error != 0)
1884 return;
1885 ctx = arg;
1886 ctx->msk_busaddr = segs[0].ds_addr;
1887}
1888
1889/* Create status DMA region. */
1890static int
1891msk_status_dma_alloc(struct msk_softc *sc)
1892{
1893 struct msk_dmamap_arg ctx;
1894 int error;
1895
1896 error = bus_dma_tag_create(
1897 bus_get_dma_tag(sc->msk_dev), /* parent */
1898 MSK_STAT_ALIGN, 0, /* alignment, boundary */
1899 BUS_SPACE_MAXADDR, /* lowaddr */
1900 BUS_SPACE_MAXADDR, /* highaddr */
1901 NULL, NULL, /* filter, filterarg */
1902 MSK_STAT_RING_SZ, /* maxsize */
1903 1, /* nsegments */
1904 MSK_STAT_RING_SZ, /* maxsegsize */
1905 0, /* flags */
1906 NULL, NULL, /* lockfunc, lockarg */
1907 &sc->msk_stat_tag);
1908 if (error != 0) {
1909 device_printf(sc->msk_dev,
1910 "failed to create status DMA tag\n");
1911 return (error);
1912 }
1913
1914 /* Allocate DMA'able memory and load the DMA map for status ring. */
1915 error = bus_dmamem_alloc(sc->msk_stat_tag,
1916 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1917 BUS_DMA_ZERO, &sc->msk_stat_map);
1918 if (error != 0) {
1919 device_printf(sc->msk_dev,
1920 "failed to allocate DMA'able memory for status ring\n");
1921 return (error);
1922 }
1923
1924 ctx.msk_busaddr = 0;
1925 error = bus_dmamap_load(sc->msk_stat_tag,
1926 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1927 msk_dmamap_cb, &ctx, 0);
1928 if (error != 0) {
1929 device_printf(sc->msk_dev,
1930 "failed to load DMA'able memory for status ring\n");
1931 return (error);
1932 }
1933 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1934
1935 return (0);
1936}
1937
1938static void
1939msk_status_dma_free(struct msk_softc *sc)
1940{
1941
1942 /* Destroy status block. */
1943 if (sc->msk_stat_tag) {
1944 if (sc->msk_stat_map) {
1945 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1946 if (sc->msk_stat_ring) {
1947 bus_dmamem_free(sc->msk_stat_tag,
1948 sc->msk_stat_ring, sc->msk_stat_map);
1949 sc->msk_stat_ring = NULL;
1950 }
1951 sc->msk_stat_map = NULL;
1952 }
1953 bus_dma_tag_destroy(sc->msk_stat_tag);
1954 sc->msk_stat_tag = NULL;
1955 }
1956}
1957
1958static int
1959msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1960{
1961 struct msk_dmamap_arg ctx;
1962 struct msk_txdesc *txd;
1963 struct msk_rxdesc *rxd;
1964 bus_size_t rxalign;
1965 int error, i;
1966
1967 /* Create parent DMA tag. */
1968 /*
1969 * XXX
1970 * It seems that Yukon II supports full 64bits DMA operations. But
1971 * it needs two descriptors(list elements) for 64bits DMA operations.
1972 * Since we don't know what DMA address mappings(32bits or 64bits)
1973 * would be used in advance for each mbufs, we limits its DMA space
1974 * to be in range of 32bits address space. Otherwise, we should check
1975 * what DMA address is used and chain another descriptor for the
1976 * 64bits DMA operation. This also means descriptor ring size is
1977 * variable. Limiting DMA address to be in 32bit address space greatly
1978 * simplyfies descriptor handling and possibly would increase
1979 * performance a bit due to efficient handling of descriptors.
1980 * Apart from harassing checksum offloading mechanisms, it seems
1981 * it's really bad idea to use a seperate descriptor for 64bit
1982 * DMA operation to save small descriptor memory. Anyway, I've
1983 * never seen these exotic scheme on ethernet interface hardware.
1984 */
1985 error = bus_dma_tag_create(
1986 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
1987 1, 0, /* alignment, boundary */
1988 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1989 BUS_SPACE_MAXADDR, /* highaddr */
1990 NULL, NULL, /* filter, filterarg */
1991 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1992 0, /* nsegments */
1993 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1994 0, /* flags */
1995 NULL, NULL, /* lockfunc, lockarg */
1996 &sc_if->msk_cdata.msk_parent_tag);
1997 if (error != 0) {
1998 device_printf(sc_if->msk_if_dev,
1999 "failed to create parent DMA tag\n");
2000 goto fail;
2001 }
2002 /* Create tag for Tx ring. */
2003 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2004 MSK_RING_ALIGN, 0, /* alignment, boundary */
2005 BUS_SPACE_MAXADDR, /* lowaddr */
2006 BUS_SPACE_MAXADDR, /* highaddr */
2007 NULL, NULL, /* filter, filterarg */
2008 MSK_TX_RING_SZ, /* maxsize */
2009 1, /* nsegments */
2010 MSK_TX_RING_SZ, /* maxsegsize */
2011 0, /* flags */
2012 NULL, NULL, /* lockfunc, lockarg */
2013 &sc_if->msk_cdata.msk_tx_ring_tag);
2014 if (error != 0) {
2015 device_printf(sc_if->msk_if_dev,
2016 "failed to create Tx ring DMA tag\n");
2017 goto fail;
2018 }
2019
2020 /* Create tag for Rx ring. */
2021 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2022 MSK_RING_ALIGN, 0, /* alignment, boundary */
2023 BUS_SPACE_MAXADDR, /* lowaddr */
2024 BUS_SPACE_MAXADDR, /* highaddr */
2025 NULL, NULL, /* filter, filterarg */
2026 MSK_RX_RING_SZ, /* maxsize */
2027 1, /* nsegments */
2028 MSK_RX_RING_SZ, /* maxsegsize */
2029 0, /* flags */
2030 NULL, NULL, /* lockfunc, lockarg */
2031 &sc_if->msk_cdata.msk_rx_ring_tag);
2032 if (error != 0) {
2033 device_printf(sc_if->msk_if_dev,
2034 "failed to create Rx ring DMA tag\n");
2035 goto fail;
2036 }
2037
2038 /* Create tag for Tx buffers. */
2039 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2040 1, 0, /* alignment, boundary */
2041 BUS_SPACE_MAXADDR, /* lowaddr */
2042 BUS_SPACE_MAXADDR, /* highaddr */
2043 NULL, NULL, /* filter, filterarg */
2044 MSK_TSO_MAXSIZE, /* maxsize */
2045 MSK_MAXTXSEGS, /* nsegments */
2046 MSK_TSO_MAXSGSIZE, /* maxsegsize */
2047 0, /* flags */
2048 NULL, NULL, /* lockfunc, lockarg */
2049 &sc_if->msk_cdata.msk_tx_tag);
2050 if (error != 0) {
2051 device_printf(sc_if->msk_if_dev,
2052 "failed to create Tx DMA tag\n");
2053 goto fail;
2054 }
2055
2056 rxalign = 1;
2057 /*
2058 * Workaround hardware hang which seems to happen when Rx buffer
2059 * is not aligned on multiple of FIFO word(8 bytes).
2060 */
2061 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2062 rxalign = MSK_RX_BUF_ALIGN;
2063 /* Create tag for Rx buffers. */
2064 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2065 rxalign, 0, /* alignment, boundary */
2066 BUS_SPACE_MAXADDR, /* lowaddr */
2067 BUS_SPACE_MAXADDR, /* highaddr */
2068 NULL, NULL, /* filter, filterarg */
2069 MCLBYTES, /* maxsize */
2070 1, /* nsegments */
2071 MCLBYTES, /* maxsegsize */
2072 0, /* flags */
2073 NULL, NULL, /* lockfunc, lockarg */
2074 &sc_if->msk_cdata.msk_rx_tag);
2075 if (error != 0) {
2076 device_printf(sc_if->msk_if_dev,
2077 "failed to create Rx DMA tag\n");
2078 goto fail;
2079 }
2080
2081 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2082 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2083 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2084 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2085 if (error != 0) {
2086 device_printf(sc_if->msk_if_dev,
2087 "failed to allocate DMA'able memory for Tx ring\n");
2088 goto fail;
2089 }
2090
2091 ctx.msk_busaddr = 0;
2092 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2093 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2094 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2095 if (error != 0) {
2096 device_printf(sc_if->msk_if_dev,
2097 "failed to load DMA'able memory for Tx ring\n");
2098 goto fail;
2099 }
2100 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2101
2102 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2103 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2104 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2105 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2106 if (error != 0) {
2107 device_printf(sc_if->msk_if_dev,
2108 "failed to allocate DMA'able memory for Rx ring\n");
2109 goto fail;
2110 }
2111
2112 ctx.msk_busaddr = 0;
2113 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2114 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2115 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2116 if (error != 0) {
2117 device_printf(sc_if->msk_if_dev,
2118 "failed to load DMA'able memory for Rx ring\n");
2119 goto fail;
2120 }
2121 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2122
2123 /* Create DMA maps for Tx buffers. */
2124 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2125 txd = &sc_if->msk_cdata.msk_txdesc[i];
2126 txd->tx_m = NULL;
2127 txd->tx_dmamap = NULL;
2128 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2129 &txd->tx_dmamap);
2130 if (error != 0) {
2131 device_printf(sc_if->msk_if_dev,
2132 "failed to create Tx dmamap\n");
2133 goto fail;
2134 }
2135 }
2136 /* Create DMA maps for Rx buffers. */
2137 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2138 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2139 device_printf(sc_if->msk_if_dev,
2140 "failed to create spare Rx dmamap\n");
2141 goto fail;
2142 }
2143 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2144 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2145 rxd->rx_m = NULL;
2146 rxd->rx_dmamap = NULL;
2147 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2148 &rxd->rx_dmamap);
2149 if (error != 0) {
2150 device_printf(sc_if->msk_if_dev,
2151 "failed to create Rx dmamap\n");
2152 goto fail;
2153 }
2154 }
2155
2156fail:
2157 return (error);
2158}
2159
2160static int
2161msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
2162{
2163 struct msk_dmamap_arg ctx;
2164 struct msk_rxdesc *jrxd;
2165 bus_size_t rxalign;
2166 int error, i;
2167
2168 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
2169 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2170 device_printf(sc_if->msk_if_dev,
2171 "disabling jumbo frame support\n");
2172 return (0);
2173 }
2174 /* Create tag for jumbo Rx ring. */
2175 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2176 MSK_RING_ALIGN, 0, /* alignment, boundary */
2177 BUS_SPACE_MAXADDR, /* lowaddr */
2178 BUS_SPACE_MAXADDR, /* highaddr */
2179 NULL, NULL, /* filter, filterarg */
2180 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2181 1, /* nsegments */
2182 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2183 0, /* flags */
2184 NULL, NULL, /* lockfunc, lockarg */
2185 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2186 if (error != 0) {
2187 device_printf(sc_if->msk_if_dev,
2188 "failed to create jumbo Rx ring DMA tag\n");
2189 goto jumbo_fail;
2190 }
2191
2192 rxalign = 1;
2193 /*
2194 * Workaround hardware hang which seems to happen when Rx buffer
2195 * is not aligned on multiple of FIFO word(8 bytes).
2196 */
2197 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2198 rxalign = MSK_RX_BUF_ALIGN;
2199 /* Create tag for jumbo Rx buffers. */
2200 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2201 rxalign, 0, /* alignment, boundary */
2202 BUS_SPACE_MAXADDR, /* lowaddr */
2203 BUS_SPACE_MAXADDR, /* highaddr */
2204 NULL, NULL, /* filter, filterarg */
2205 MJUM9BYTES, /* maxsize */
2206 1, /* nsegments */
2207 MJUM9BYTES, /* maxsegsize */
2208 0, /* flags */
2209 NULL, NULL, /* lockfunc, lockarg */
2210 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2211 if (error != 0) {
2212 device_printf(sc_if->msk_if_dev,
2213 "failed to create jumbo Rx DMA tag\n");
2214 goto jumbo_fail;
2215 }
2216
2217 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2218 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2219 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2220 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2221 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2222 if (error != 0) {
2223 device_printf(sc_if->msk_if_dev,
2224 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2225 goto jumbo_fail;
2226 }
2227
2228 ctx.msk_busaddr = 0;
2229 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2230 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2231 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2232 msk_dmamap_cb, &ctx, 0);
2233 if (error != 0) {
2234 device_printf(sc_if->msk_if_dev,
2235 "failed to load DMA'able memory for jumbo Rx ring\n");
2236 goto jumbo_fail;
2237 }
2238 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2239
2240 /* Create DMA maps for jumbo Rx buffers. */
2241 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2242 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2243 device_printf(sc_if->msk_if_dev,
2244 "failed to create spare jumbo Rx dmamap\n");
2245 goto jumbo_fail;
2246 }
2247 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2248 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2249 jrxd->rx_m = NULL;
2250 jrxd->rx_dmamap = NULL;
2251 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2252 &jrxd->rx_dmamap);
2253 if (error != 0) {
2254 device_printf(sc_if->msk_if_dev,
2255 "failed to create jumbo Rx dmamap\n");
2256 goto jumbo_fail;
2257 }
2258 }
2259
2260 return (0);
2261
2262jumbo_fail:
2263 msk_rx_dma_jfree(sc_if);
2264 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
2265 "due to resource shortage\n");
2266 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
2267 return (error);
2268}
2269
2270static void
2271msk_txrx_dma_free(struct msk_if_softc *sc_if)
2272{
2273 struct msk_txdesc *txd;
2274 struct msk_rxdesc *rxd;
2275 int i;
2276
2277 /* Tx ring. */
2278 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2279 if (sc_if->msk_cdata.msk_tx_ring_map)
2280 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2281 sc_if->msk_cdata.msk_tx_ring_map);
2282 if (sc_if->msk_cdata.msk_tx_ring_map &&
2283 sc_if->msk_rdata.msk_tx_ring)
2284 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2285 sc_if->msk_rdata.msk_tx_ring,
2286 sc_if->msk_cdata.msk_tx_ring_map);
2287 sc_if->msk_rdata.msk_tx_ring = NULL;
2288 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2289 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2290 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2291 }
2292 /* Rx ring. */
2293 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2294 if (sc_if->msk_cdata.msk_rx_ring_map)
2295 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2296 sc_if->msk_cdata.msk_rx_ring_map);
2297 if (sc_if->msk_cdata.msk_rx_ring_map &&
2298 sc_if->msk_rdata.msk_rx_ring)
2299 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2300 sc_if->msk_rdata.msk_rx_ring,
2301 sc_if->msk_cdata.msk_rx_ring_map);
2302 sc_if->msk_rdata.msk_rx_ring = NULL;
2303 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2304 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2305 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2306 }
2307 /* Tx buffers. */
2308 if (sc_if->msk_cdata.msk_tx_tag) {
2309 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2310 txd = &sc_if->msk_cdata.msk_txdesc[i];
2311 if (txd->tx_dmamap) {
2312 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2313 txd->tx_dmamap);
2314 txd->tx_dmamap = NULL;
2315 }
2316 }
2317 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2318 sc_if->msk_cdata.msk_tx_tag = NULL;
2319 }
2320 /* Rx buffers. */
2321 if (sc_if->msk_cdata.msk_rx_tag) {
2322 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2323 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2324 if (rxd->rx_dmamap) {
2325 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2326 rxd->rx_dmamap);
2327 rxd->rx_dmamap = NULL;
2328 }
2329 }
2330 if (sc_if->msk_cdata.msk_rx_sparemap) {
2331 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2332 sc_if->msk_cdata.msk_rx_sparemap);
2333 sc_if->msk_cdata.msk_rx_sparemap = 0;
2334 }
2335 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2336 sc_if->msk_cdata.msk_rx_tag = NULL;
2337 }
2338 if (sc_if->msk_cdata.msk_parent_tag) {
2339 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2340 sc_if->msk_cdata.msk_parent_tag = NULL;
2341 }
2342}
2343
2344static void
2345msk_rx_dma_jfree(struct msk_if_softc *sc_if)
2346{
2347 struct msk_rxdesc *jrxd;
2348 int i;
2349
2350 /* Jumbo Rx ring. */
2351 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2352 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2353 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2354 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2355 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2356 sc_if->msk_rdata.msk_jumbo_rx_ring)
2357 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2358 sc_if->msk_rdata.msk_jumbo_rx_ring,
2359 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2360 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2361 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2362 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2363 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2364 }
2365 /* Jumbo Rx buffers. */
2366 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2367 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2368 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2369 if (jrxd->rx_dmamap) {
2370 bus_dmamap_destroy(
2371 sc_if->msk_cdata.msk_jumbo_rx_tag,
2372 jrxd->rx_dmamap);
2373 jrxd->rx_dmamap = NULL;
2374 }
2375 }
2376 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2377 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2378 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2379 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2380 }
2381 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2382 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2383 }
2384}
2385
2386static int
2387msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2388{
2389 struct msk_txdesc *txd, *txd_last;
2390 struct msk_tx_desc *tx_le;
2391 struct mbuf *m;
2392 bus_dmamap_t map;
2393 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2394 uint32_t control, prod, si;
2395 uint16_t offset, tcp_offset, tso_mtu;
2396 int error, i, nseg, tso;
2397
2398 MSK_IF_LOCK_ASSERT(sc_if);
2399
2400 tcp_offset = offset = 0;
2401 m = *m_head;
2402 if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2402 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
2403 (m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2403 /*
2404 * Since mbuf has no protocol specific structure information
2405 * in it we have to inspect protocol information here to
2406 * setup TSO and checksum offload. I don't know why Marvell
2407 * made a such decision in chip design because other GigE
2408 * hardwares normally takes care of all these chores in
2409 * hardware. However, TSO performance of Yukon II is very
2410 * good such that it's worth to implement it.
2411 */
2412 struct ether_header *eh;
2413 struct ip *ip;
2414 struct tcphdr *tcp;
2415
2416 if (M_WRITABLE(m) == 0) {
2417 /* Get a writable copy. */
2418 m = m_dup(*m_head, M_DONTWAIT);
2419 m_freem(*m_head);
2420 if (m == NULL) {
2421 *m_head = NULL;
2422 return (ENOBUFS);
2423 }
2424 *m_head = m;
2425 }
2426
2427 offset = sizeof(struct ether_header);
2428 m = m_pullup(m, offset);
2429 if (m == NULL) {
2430 *m_head = NULL;
2431 return (ENOBUFS);
2432 }
2433 eh = mtod(m, struct ether_header *);
2434 /* Check if hardware VLAN insertion is off. */
2435 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2436 offset = sizeof(struct ether_vlan_header);
2437 m = m_pullup(m, offset);
2438 if (m == NULL) {
2439 *m_head = NULL;
2440 return (ENOBUFS);
2441 }
2442 }
2443 m = m_pullup(m, offset + sizeof(struct ip));
2444 if (m == NULL) {
2445 *m_head = NULL;
2446 return (ENOBUFS);
2447 }
2448 ip = (struct ip *)(mtod(m, char *) + offset);
2449 offset += (ip->ip_hl << 2);
2450 tcp_offset = offset;
2451 /*
2452 * It seems that Yukon II has Tx checksum offload bug for
2453 * small TCP packets that's less than 60 bytes in size
2454 * (e.g. TCP window probe packet, pure ACK packet).
2455 * Common work around like padding with zeros to make the
2456 * frame minimum ethernet frame size didn't work at all.
2457 * Instead of disabling checksum offload completely we
2458 * resort to S/W checksum routine when we encounter short
2459 * TCP frames.
2460 * Short UDP packets appear to be handled correctly by
2461 * Yukon II.
2462 */
2463 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2464 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2465 m = m_pullup(m, offset + sizeof(struct tcphdr));
2466 if (m == NULL) {
2467 *m_head = NULL;
2468 return (ENOBUFS);
2469 }
2470 *(uint16_t *)(m->m_data + offset +
2471 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2472 m->m_pkthdr.len, offset);
2473 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2474 }
2475 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2476 m = m_pullup(m, offset + sizeof(struct tcphdr));
2477 if (m == NULL) {
2478 *m_head = NULL;
2479 return (ENOBUFS);
2480 }
2481 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2482 offset += (tcp->th_off << 2);
2483 }
2484 *m_head = m;
2485 }
2486
2487 prod = sc_if->msk_cdata.msk_tx_prod;
2488 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2489 txd_last = txd;
2490 map = txd->tx_dmamap;
2491 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2492 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2493 if (error == EFBIG) {
2494 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2495 if (m == NULL) {
2496 m_freem(*m_head);
2497 *m_head = NULL;
2498 return (ENOBUFS);
2499 }
2500 *m_head = m;
2501 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2502 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2503 if (error != 0) {
2504 m_freem(*m_head);
2505 *m_head = NULL;
2506 return (error);
2507 }
2508 } else if (error != 0)
2509 return (error);
2510 if (nseg == 0) {
2511 m_freem(*m_head);
2512 *m_head = NULL;
2513 return (EIO);
2514 }
2515
2516 /* Check number of available descriptors. */
2517 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2518 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2519 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2520 return (ENOBUFS);
2521 }
2522
2523 control = 0;
2524 tso = 0;
2525 tx_le = NULL;
2526
2527 /* Check TSO support. */
2528 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2404 /*
2405 * Since mbuf has no protocol specific structure information
2406 * in it we have to inspect protocol information here to
2407 * setup TSO and checksum offload. I don't know why Marvell
2408 * made a such decision in chip design because other GigE
2409 * hardwares normally takes care of all these chores in
2410 * hardware. However, TSO performance of Yukon II is very
2411 * good such that it's worth to implement it.
2412 */
2413 struct ether_header *eh;
2414 struct ip *ip;
2415 struct tcphdr *tcp;
2416
2417 if (M_WRITABLE(m) == 0) {
2418 /* Get a writable copy. */
2419 m = m_dup(*m_head, M_DONTWAIT);
2420 m_freem(*m_head);
2421 if (m == NULL) {
2422 *m_head = NULL;
2423 return (ENOBUFS);
2424 }
2425 *m_head = m;
2426 }
2427
2428 offset = sizeof(struct ether_header);
2429 m = m_pullup(m, offset);
2430 if (m == NULL) {
2431 *m_head = NULL;
2432 return (ENOBUFS);
2433 }
2434 eh = mtod(m, struct ether_header *);
2435 /* Check if hardware VLAN insertion is off. */
2436 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2437 offset = sizeof(struct ether_vlan_header);
2438 m = m_pullup(m, offset);
2439 if (m == NULL) {
2440 *m_head = NULL;
2441 return (ENOBUFS);
2442 }
2443 }
2444 m = m_pullup(m, offset + sizeof(struct ip));
2445 if (m == NULL) {
2446 *m_head = NULL;
2447 return (ENOBUFS);
2448 }
2449 ip = (struct ip *)(mtod(m, char *) + offset);
2450 offset += (ip->ip_hl << 2);
2451 tcp_offset = offset;
2452 /*
2453 * It seems that Yukon II has Tx checksum offload bug for
2454 * small TCP packets that's less than 60 bytes in size
2455 * (e.g. TCP window probe packet, pure ACK packet).
2456 * Common work around like padding with zeros to make the
2457 * frame minimum ethernet frame size didn't work at all.
2458 * Instead of disabling checksum offload completely we
2459 * resort to S/W checksum routine when we encounter short
2460 * TCP frames.
2461 * Short UDP packets appear to be handled correctly by
2462 * Yukon II.
2463 */
2464 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2465 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2466 m = m_pullup(m, offset + sizeof(struct tcphdr));
2467 if (m == NULL) {
2468 *m_head = NULL;
2469 return (ENOBUFS);
2470 }
2471 *(uint16_t *)(m->m_data + offset +
2472 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2473 m->m_pkthdr.len, offset);
2474 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2475 }
2476 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2477 m = m_pullup(m, offset + sizeof(struct tcphdr));
2478 if (m == NULL) {
2479 *m_head = NULL;
2480 return (ENOBUFS);
2481 }
2482 tcp = (struct tcphdr *)(mtod(m, char *) + offset);
2483 offset += (tcp->th_off << 2);
2484 }
2485 *m_head = m;
2486 }
2487
2488 prod = sc_if->msk_cdata.msk_tx_prod;
2489 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2490 txd_last = txd;
2491 map = txd->tx_dmamap;
2492 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2493 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2494 if (error == EFBIG) {
2495 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2496 if (m == NULL) {
2497 m_freem(*m_head);
2498 *m_head = NULL;
2499 return (ENOBUFS);
2500 }
2501 *m_head = m;
2502 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2503 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2504 if (error != 0) {
2505 m_freem(*m_head);
2506 *m_head = NULL;
2507 return (error);
2508 }
2509 } else if (error != 0)
2510 return (error);
2511 if (nseg == 0) {
2512 m_freem(*m_head);
2513 *m_head = NULL;
2514 return (EIO);
2515 }
2516
2517 /* Check number of available descriptors. */
2518 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2519 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2520 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2521 return (ENOBUFS);
2522 }
2523
2524 control = 0;
2525 tso = 0;
2526 tx_le = NULL;
2527
2528 /* Check TSO support. */
2529 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2529 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2530 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2531 tso_mtu = m->m_pkthdr.tso_segsz;
2532 else
2533 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2530 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2531 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2532 tx_le->msk_addr = htole32(tso_mtu);
2534 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2535 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2536 tx_le->msk_addr = htole32(tso_mtu);
2533 tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER);
2537 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2538 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
2539 else
2540 tx_le->msk_control =
2541 htole32(OP_LRGLEN | HW_OWNER);
2534 sc_if->msk_cdata.msk_tx_cnt++;
2535 MSK_INC(prod, MSK_TX_RING_CNT);
2536 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2537 }
2538 tso++;
2539 }
2540 /* Check if we have a VLAN tag to insert. */
2541 if ((m->m_flags & M_VLANTAG) != 0) {
2542 if (tso == 0) {
2543 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2544 tx_le->msk_addr = htole32(0);
2545 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2546 htons(m->m_pkthdr.ether_vtag));
2547 sc_if->msk_cdata.msk_tx_cnt++;
2548 MSK_INC(prod, MSK_TX_RING_CNT);
2549 } else {
2550 tx_le->msk_control |= htole32(OP_VLAN |
2551 htons(m->m_pkthdr.ether_vtag));
2552 }
2553 control |= INS_VLAN;
2554 }
2555 /* Check if we have to handle checksum offload. */
2556 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2542 sc_if->msk_cdata.msk_tx_cnt++;
2543 MSK_INC(prod, MSK_TX_RING_CNT);
2544 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2545 }
2546 tso++;
2547 }
2548 /* Check if we have a VLAN tag to insert. */
2549 if ((m->m_flags & M_VLANTAG) != 0) {
2550 if (tso == 0) {
2551 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2552 tx_le->msk_addr = htole32(0);
2553 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2554 htons(m->m_pkthdr.ether_vtag));
2555 sc_if->msk_cdata.msk_tx_cnt++;
2556 MSK_INC(prod, MSK_TX_RING_CNT);
2557 } else {
2558 tx_le->msk_control |= htole32(OP_VLAN |
2559 htons(m->m_pkthdr.ether_vtag));
2560 }
2561 control |= INS_VLAN;
2562 }
2563 /* Check if we have to handle checksum offload. */
2564 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2557 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2558 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2559 & 0xffff) | ((uint32_t)tcp_offset << 16));
2560 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2561 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2562 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2563 control |= UDPTCP;
2564 sc_if->msk_cdata.msk_tx_cnt++;
2565 MSK_INC(prod, MSK_TX_RING_CNT);
2565 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
2566 control |= CALSUM;
2567 else {
2568 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2569 tx_le->msk_addr = htole32(((tcp_offset +
2570 m->m_pkthdr.csum_data) & 0xffff) |
2571 ((uint32_t)tcp_offset << 16));
2572 tx_le->msk_control = htole32(1 << 16 |
2573 (OP_TCPLISW | HW_OWNER));
2574 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2575 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2576 control |= UDPTCP;
2577 sc_if->msk_cdata.msk_tx_cnt++;
2578 MSK_INC(prod, MSK_TX_RING_CNT);
2579 }
2566 }
2567
2568 si = prod;
2569 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2570 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2571 if (tso == 0)
2572 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2573 OP_PACKET);
2574 else
2575 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2576 OP_LARGESEND);
2577 sc_if->msk_cdata.msk_tx_cnt++;
2578 MSK_INC(prod, MSK_TX_RING_CNT);
2579
2580 for (i = 1; i < nseg; i++) {
2581 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2582 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2583 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2584 OP_BUFFER | HW_OWNER);
2585 sc_if->msk_cdata.msk_tx_cnt++;
2586 MSK_INC(prod, MSK_TX_RING_CNT);
2587 }
2588 /* Update producer index. */
2589 sc_if->msk_cdata.msk_tx_prod = prod;
2590
2591 /* Set EOP on the last desciptor. */
2592 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2593 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2594 tx_le->msk_control |= htole32(EOP);
2595
2596 /* Turn the first descriptor ownership to hardware. */
2597 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2598 tx_le->msk_control |= htole32(HW_OWNER);
2599
2600 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2601 map = txd_last->tx_dmamap;
2602 txd_last->tx_dmamap = txd->tx_dmamap;
2603 txd->tx_dmamap = map;
2604 txd->tx_m = m;
2605
2606 /* Sync descriptors. */
2607 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2608 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2609 sc_if->msk_cdata.msk_tx_ring_map,
2610 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2611
2612 return (0);
2613}
2614
2615static void
2616msk_tx_task(void *arg, int pending)
2617{
2618 struct ifnet *ifp;
2619
2620 ifp = arg;
2621 msk_start(ifp);
2622}
2623
2624static void
2625msk_start(struct ifnet *ifp)
2626{
2627 struct msk_if_softc *sc_if;
2628 struct mbuf *m_head;
2629 int enq;
2630
2631 sc_if = ifp->if_softc;
2632
2633 MSK_IF_LOCK(sc_if);
2634
2635 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2636 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2637 MSK_IF_UNLOCK(sc_if);
2638 return;
2639 }
2640
2641 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2642 sc_if->msk_cdata.msk_tx_cnt <
2643 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2644 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2645 if (m_head == NULL)
2646 break;
2647 /*
2648 * Pack the data into the transmit ring. If we
2649 * don't have room, set the OACTIVE flag and wait
2650 * for the NIC to drain the ring.
2651 */
2652 if (msk_encap(sc_if, &m_head) != 0) {
2653 if (m_head == NULL)
2654 break;
2655 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2656 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2657 break;
2658 }
2659
2660 enq++;
2661 /*
2662 * If there's a BPF listener, bounce a copy of this frame
2663 * to him.
2664 */
2665 ETHER_BPF_MTAP(ifp, m_head);
2666 }
2667
2668 if (enq > 0) {
2669 /* Transmit */
2670 CSR_WRITE_2(sc_if->msk_softc,
2671 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2672 sc_if->msk_cdata.msk_tx_prod);
2673
2674 /* Set a timeout in case the chip goes out to lunch. */
2675 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2676 }
2677
2678 MSK_IF_UNLOCK(sc_if);
2679}
2680
2681static void
2682msk_watchdog(struct msk_if_softc *sc_if)
2683{
2684 struct ifnet *ifp;
2685 uint32_t ridx;
2686 int idx;
2687
2688 MSK_IF_LOCK_ASSERT(sc_if);
2689
2690 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2691 return;
2692 ifp = sc_if->msk_ifp;
2693 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2694 if (bootverbose)
2695 if_printf(sc_if->msk_ifp, "watchdog timeout "
2696 "(missed link)\n");
2697 ifp->if_oerrors++;
2698 msk_init_locked(sc_if);
2699 return;
2700 }
2701
2702 /*
2703 * Reclaim first as there is a possibility of losing Tx completion
2704 * interrupts.
2705 */
2706 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2707 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2708 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2709 msk_txeof(sc_if, idx);
2710 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2711 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2712 "-- recovering\n");
2713 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2714 taskqueue_enqueue(taskqueue_fast,
2715 &sc_if->msk_tx_task);
2716 return;
2717 }
2718 }
2719
2720 if_printf(ifp, "watchdog timeout\n");
2721 ifp->if_oerrors++;
2722 msk_init_locked(sc_if);
2723 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2724 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2725}
2726
2727static int
2728mskc_shutdown(device_t dev)
2729{
2730 struct msk_softc *sc;
2731 int i;
2732
2733 sc = device_get_softc(dev);
2734 MSK_LOCK(sc);
2735 for (i = 0; i < sc->msk_num_port; i++) {
2736 if (sc->msk_if[i] != NULL)
2737 msk_stop(sc->msk_if[i]);
2738 }
2739
2740 /* Disable all interrupts. */
2741 CSR_WRITE_4(sc, B0_IMSK, 0);
2742 CSR_READ_4(sc, B0_IMSK);
2743 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2744 CSR_READ_4(sc, B0_HWE_IMSK);
2745
2746 /* Put hardware reset. */
2747 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2748
2749 MSK_UNLOCK(sc);
2750 return (0);
2751}
2752
2753static int
2754mskc_suspend(device_t dev)
2755{
2756 struct msk_softc *sc;
2757 int i;
2758
2759 sc = device_get_softc(dev);
2760
2761 MSK_LOCK(sc);
2762
2763 for (i = 0; i < sc->msk_num_port; i++) {
2764 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2765 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2766 IFF_DRV_RUNNING) != 0))
2767 msk_stop(sc->msk_if[i]);
2768 }
2769
2770 /* Disable all interrupts. */
2771 CSR_WRITE_4(sc, B0_IMSK, 0);
2772 CSR_READ_4(sc, B0_IMSK);
2773 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2774 CSR_READ_4(sc, B0_HWE_IMSK);
2775
2776 msk_phy_power(sc, MSK_PHY_POWERDOWN);
2777
2778 /* Put hardware reset. */
2779 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2780 sc->msk_pflags |= MSK_FLAG_SUSPEND;
2781
2782 MSK_UNLOCK(sc);
2783
2784 return (0);
2785}
2786
2787static int
2788mskc_resume(device_t dev)
2789{
2790 struct msk_softc *sc;
2791 int i;
2792
2793 sc = device_get_softc(dev);
2794
2795 MSK_LOCK(sc);
2796
2797 mskc_reset(sc);
2798 for (i = 0; i < sc->msk_num_port; i++) {
2799 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2800 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2801 msk_init_locked(sc->msk_if[i]);
2802 }
2803 sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
2804
2805 MSK_UNLOCK(sc);
2806
2807 return (0);
2808}
2809
2810#ifndef __NO_STRICT_ALIGNMENT
2811static __inline void
2812msk_fixup_rx(struct mbuf *m)
2813{
2814 int i;
2815 uint16_t *src, *dst;
2816
2817 src = mtod(m, uint16_t *);
2818 dst = src - 3;
2819
2820 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2821 *dst++ = *src++;
2822
2823 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
2824}
2825#endif
2826
2827static void
2828msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2829{
2830 struct mbuf *m;
2831 struct ifnet *ifp;
2832 struct msk_rxdesc *rxd;
2833 int cons, rxlen;
2834
2835 ifp = sc_if->msk_ifp;
2836
2837 MSK_IF_LOCK_ASSERT(sc_if);
2838
2839 cons = sc_if->msk_cdata.msk_rx_cons;
2840 do {
2841 rxlen = status >> 16;
2842 if ((status & GMR_FS_VLAN) != 0 &&
2843 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2844 rxlen -= ETHER_VLAN_ENCAP_LEN;
2845 if (len > sc_if->msk_framesize ||
2846 ((status & GMR_FS_ANY_ERR) != 0) ||
2847 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2848 /* Don't count flow-control packet as errors. */
2849 if ((status & GMR_FS_GOOD_FC) == 0)
2850 ifp->if_ierrors++;
2851 msk_discard_rxbuf(sc_if, cons);
2852 break;
2853 }
2854 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2855 m = rxd->rx_m;
2856 if (msk_newbuf(sc_if, cons) != 0) {
2857 ifp->if_iqdrops++;
2858 /* Reuse old buffer. */
2859 msk_discard_rxbuf(sc_if, cons);
2860 break;
2861 }
2862 m->m_pkthdr.rcvif = ifp;
2863 m->m_pkthdr.len = m->m_len = len;
2864#ifndef __NO_STRICT_ALIGNMENT
2865 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2866 msk_fixup_rx(m);
2867#endif
2868 ifp->if_ipackets++;
2869 /* Check for VLAN tagged packets. */
2870 if ((status & GMR_FS_VLAN) != 0 &&
2871 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2872 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2873 m->m_flags |= M_VLANTAG;
2874 }
2875 MSK_IF_UNLOCK(sc_if);
2876 (*ifp->if_input)(ifp, m);
2877 MSK_IF_LOCK(sc_if);
2878 } while (0);
2879
2880 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2881 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2882}
2883
2884static void
2885msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2886{
2887 struct mbuf *m;
2888 struct ifnet *ifp;
2889 struct msk_rxdesc *jrxd;
2890 int cons, rxlen;
2891
2892 ifp = sc_if->msk_ifp;
2893
2894 MSK_IF_LOCK_ASSERT(sc_if);
2895
2896 cons = sc_if->msk_cdata.msk_rx_cons;
2897 do {
2898 rxlen = status >> 16;
2899 if ((status & GMR_FS_VLAN) != 0 &&
2900 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2901 rxlen -= ETHER_VLAN_ENCAP_LEN;
2902 if (len > sc_if->msk_framesize ||
2903 ((status & GMR_FS_ANY_ERR) != 0) ||
2904 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2905 /* Don't count flow-control packet as errors. */
2906 if ((status & GMR_FS_GOOD_FC) == 0)
2907 ifp->if_ierrors++;
2908 msk_discard_jumbo_rxbuf(sc_if, cons);
2909 break;
2910 }
2911 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
2912 m = jrxd->rx_m;
2913 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
2914 ifp->if_iqdrops++;
2915 /* Reuse old buffer. */
2916 msk_discard_jumbo_rxbuf(sc_if, cons);
2917 break;
2918 }
2919 m->m_pkthdr.rcvif = ifp;
2920 m->m_pkthdr.len = m->m_len = len;
2921#ifndef __NO_STRICT_ALIGNMENT
2922 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2923 msk_fixup_rx(m);
2924#endif
2925 ifp->if_ipackets++;
2926 /* Check for VLAN tagged packets. */
2927 if ((status & GMR_FS_VLAN) != 0 &&
2928 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2929 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2930 m->m_flags |= M_VLANTAG;
2931 }
2932 MSK_IF_UNLOCK(sc_if);
2933 (*ifp->if_input)(ifp, m);
2934 MSK_IF_LOCK(sc_if);
2935 } while (0);
2936
2937 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
2938 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
2939}
2940
2941static void
2942msk_txeof(struct msk_if_softc *sc_if, int idx)
2943{
2944 struct msk_txdesc *txd;
2945 struct msk_tx_desc *cur_tx;
2946 struct ifnet *ifp;
2947 uint32_t control;
2948 int cons, prog;
2949
2950 MSK_IF_LOCK_ASSERT(sc_if);
2951
2952 ifp = sc_if->msk_ifp;
2953
2954 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2955 sc_if->msk_cdata.msk_tx_ring_map,
2956 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2957 /*
2958 * Go through our tx ring and free mbufs for those
2959 * frames that have been sent.
2960 */
2961 cons = sc_if->msk_cdata.msk_tx_cons;
2962 prog = 0;
2963 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
2964 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
2965 break;
2966 prog++;
2967 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2968 control = le32toh(cur_tx->msk_control);
2969 sc_if->msk_cdata.msk_tx_cnt--;
2970 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2971 if ((control & EOP) == 0)
2972 continue;
2973 txd = &sc_if->msk_cdata.msk_txdesc[cons];
2974 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
2975 BUS_DMASYNC_POSTWRITE);
2976 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
2977
2978 ifp->if_opackets++;
2979 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
2980 __func__));
2981 m_freem(txd->tx_m);
2982 txd->tx_m = NULL;
2983 }
2984
2985 if (prog > 0) {
2986 sc_if->msk_cdata.msk_tx_cons = cons;
2987 if (sc_if->msk_cdata.msk_tx_cnt == 0)
2988 sc_if->msk_watchdog_timer = 0;
2989 /* No need to sync LEs as we didn't update LEs. */
2990 }
2991}
2992
2993static void
2994msk_tick(void *xsc_if)
2995{
2996 struct msk_if_softc *sc_if;
2997 struct mii_data *mii;
2998
2999 sc_if = xsc_if;
3000
3001 MSK_IF_LOCK_ASSERT(sc_if);
3002
3003 mii = device_get_softc(sc_if->msk_miibus);
3004
3005 mii_tick(mii);
3006 msk_watchdog(sc_if);
3007 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3008}
3009
3010static void
3011msk_intr_phy(struct msk_if_softc *sc_if)
3012{
3013 uint16_t status;
3014
3015 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3016 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3017 /* Handle FIFO Underrun/Overflow? */
3018 if ((status & PHY_M_IS_FIFO_ERROR))
3019 device_printf(sc_if->msk_if_dev,
3020 "PHY FIFO underrun/overflow.\n");
3021}
3022
3023static void
3024msk_intr_gmac(struct msk_if_softc *sc_if)
3025{
3026 struct msk_softc *sc;
3027 uint8_t status;
3028
3029 sc = sc_if->msk_softc;
3030 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3031
3032 /* GMAC Rx FIFO overrun. */
3033 if ((status & GM_IS_RX_FF_OR) != 0) {
3034 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3035 GMF_CLI_RX_FO);
3036 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3037 }
3038 /* GMAC Tx FIFO underrun. */
3039 if ((status & GM_IS_TX_FF_UR) != 0) {
3040 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3041 GMF_CLI_TX_FU);
3042 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3043 /*
3044 * XXX
3045 * In case of Tx underrun, we may need to flush/reset
3046 * Tx MAC but that would also require resynchronization
3047 * with status LEs. Reintializing status LEs would
3048 * affect other port in dual MAC configuration so it
3049 * should be avoided as possible as we can.
3050 * Due to lack of documentation it's all vague guess but
3051 * it needs more investigation.
3052 */
3053 }
3054}
3055
3056static void
3057msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3058{
3059 struct msk_softc *sc;
3060
3061 sc = sc_if->msk_softc;
3062 if ((status & Y2_IS_PAR_RD1) != 0) {
3063 device_printf(sc_if->msk_if_dev,
3064 "RAM buffer read parity error\n");
3065 /* Clear IRQ. */
3066 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3067 RI_CLR_RD_PERR);
3068 }
3069 if ((status & Y2_IS_PAR_WR1) != 0) {
3070 device_printf(sc_if->msk_if_dev,
3071 "RAM buffer write parity error\n");
3072 /* Clear IRQ. */
3073 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3074 RI_CLR_WR_PERR);
3075 }
3076 if ((status & Y2_IS_PAR_MAC1) != 0) {
3077 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3078 /* Clear IRQ. */
3079 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3080 GMF_CLI_TX_PE);
3081 }
3082 if ((status & Y2_IS_PAR_RX1) != 0) {
3083 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3084 /* Clear IRQ. */
3085 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3086 }
3087 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3088 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3089 /* Clear IRQ. */
3090 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3091 }
3092}
3093
3094static void
3095msk_intr_hwerr(struct msk_softc *sc)
3096{
3097 uint32_t status;
3098 uint32_t tlphead[4];
3099
3100 status = CSR_READ_4(sc, B0_HWE_ISRC);
3101 /* Time Stamp timer overflow. */
3102 if ((status & Y2_IS_TIST_OV) != 0)
3103 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3104 if ((status & Y2_IS_PCI_NEXP) != 0) {
3105 /*
3106 * PCI Express Error occured which is not described in PEX
3107 * spec.
3108 * This error is also mapped either to Master Abort(
3109 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3110 * can only be cleared there.
3111 */
3112 device_printf(sc->msk_dev,
3113 "PCI Express protocol violation error\n");
3114 }
3115
3116 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3117 uint16_t v16;
3118
3119 if ((status & Y2_IS_MST_ERR) != 0)
3120 device_printf(sc->msk_dev,
3121 "unexpected IRQ Status error\n");
3122 else
3123 device_printf(sc->msk_dev,
3124 "unexpected IRQ Master error\n");
3125 /* Reset all bits in the PCI status register. */
3126 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3127 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3128 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3129 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3130 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3131 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3132 }
3133
3134 /* Check for PCI Express Uncorrectable Error. */
3135 if ((status & Y2_IS_PCI_EXP) != 0) {
3136 uint32_t v32;
3137
3138 /*
3139 * On PCI Express bus bridges are called root complexes (RC).
3140 * PCI Express errors are recognized by the root complex too,
3141 * which requests the system to handle the problem. After
3142 * error occurence it may be that no access to the adapter
3143 * may be performed any longer.
3144 */
3145
3146 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3147 if ((v32 & PEX_UNSUP_REQ) != 0) {
3148 /* Ignore unsupported request error. */
3149 device_printf(sc->msk_dev,
3150 "Uncorrectable PCI Express error\n");
3151 }
3152 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3153 int i;
3154
3155 /* Get TLP header form Log Registers. */
3156 for (i = 0; i < 4; i++)
3157 tlphead[i] = CSR_PCI_READ_4(sc,
3158 PEX_HEADER_LOG + i * 4);
3159 /* Check for vendor defined broadcast message. */
3160 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3161 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3162 CSR_WRITE_4(sc, B0_HWE_IMSK,
3163 sc->msk_intrhwemask);
3164 CSR_READ_4(sc, B0_HWE_IMSK);
3165 }
3166 }
3167 /* Clear the interrupt. */
3168 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3169 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3170 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3171 }
3172
3173 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3174 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3175 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3176 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3177}
3178
3179static __inline void
3180msk_rxput(struct msk_if_softc *sc_if)
3181{
3182 struct msk_softc *sc;
3183
3184 sc = sc_if->msk_softc;
3185 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3186 bus_dmamap_sync(
3187 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3188 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3190 else
3191 bus_dmamap_sync(
3192 sc_if->msk_cdata.msk_rx_ring_tag,
3193 sc_if->msk_cdata.msk_rx_ring_map,
3194 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3195 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3196 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3197}
3198
3199static int
3200msk_handle_events(struct msk_softc *sc)
3201{
3202 struct msk_if_softc *sc_if;
3203 int rxput[2];
3204 struct msk_stat_desc *sd;
3205 uint32_t control, status;
3206 int cons, idx, len, port, rxprog;
3207
3208 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3209 if (idx == sc->msk_stat_cons)
3210 return (0);
3211
3212 /* Sync status LEs. */
3213 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3214 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3215 /* XXX Sync Rx LEs here. */
3216
3217 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3218
3219 rxprog = 0;
3220 for (cons = sc->msk_stat_cons; cons != idx;) {
3221 sd = &sc->msk_stat_ring[cons];
3222 control = le32toh(sd->msk_control);
3223 if ((control & HW_OWNER) == 0)
3224 break;
3225 /*
3226 * Marvell's FreeBSD driver updates status LE after clearing
3227 * HW_OWNER. However we don't have a way to sync single LE
3228 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3229 * an entire DMA map. So don't sync LE until we have a better
3230 * way to sync LEs.
3231 */
3232 control &= ~HW_OWNER;
3233 sd->msk_control = htole32(control);
3234 status = le32toh(sd->msk_status);
3235 len = control & STLE_LEN_MASK;
3236 port = (control >> 16) & 0x01;
3237 sc_if = sc->msk_if[port];
3238 if (sc_if == NULL) {
3239 device_printf(sc->msk_dev, "invalid port opcode "
3240 "0x%08x\n", control & STLE_OP_MASK);
3241 continue;
3242 }
3243
3244 switch (control & STLE_OP_MASK) {
3245 case OP_RXVLAN:
3246 sc_if->msk_vtag = ntohs(len);
3247 break;
3248 case OP_RXCHKSVLAN:
3249 sc_if->msk_vtag = ntohs(len);
3250 break;
3251 case OP_RXSTAT:
3252 if (sc_if->msk_framesize >
3253 (MCLBYTES - MSK_RX_BUF_ALIGN))
3254 msk_jumbo_rxeof(sc_if, status, len);
3255 else
3256 msk_rxeof(sc_if, status, len);
3257 rxprog++;
3258 /*
3259 * Because there is no way to sync single Rx LE
3260 * put the DMA sync operation off until the end of
3261 * event processing.
3262 */
3263 rxput[port]++;
3264 /* Update prefetch unit if we've passed water mark. */
3265 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3266 msk_rxput(sc_if);
3267 rxput[port] = 0;
3268 }
3269 break;
3270 case OP_TXINDEXLE:
3271 if (sc->msk_if[MSK_PORT_A] != NULL)
3272 msk_txeof(sc->msk_if[MSK_PORT_A],
3273 status & STLE_TXA1_MSKL);
3274 if (sc->msk_if[MSK_PORT_B] != NULL)
3275 msk_txeof(sc->msk_if[MSK_PORT_B],
3276 ((status & STLE_TXA2_MSKL) >>
3277 STLE_TXA2_SHIFTL) |
3278 ((len & STLE_TXA2_MSKH) <<
3279 STLE_TXA2_SHIFTH));
3280 break;
3281 default:
3282 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3283 control & STLE_OP_MASK);
3284 break;
3285 }
3286 MSK_INC(cons, MSK_STAT_RING_CNT);
3287 if (rxprog > sc->msk_process_limit)
3288 break;
3289 }
3290
3291 sc->msk_stat_cons = cons;
3292 /* XXX We should sync status LEs here. See above notes. */
3293
3294 if (rxput[MSK_PORT_A] > 0)
3295 msk_rxput(sc->msk_if[MSK_PORT_A]);
3296 if (rxput[MSK_PORT_B] > 0)
3297 msk_rxput(sc->msk_if[MSK_PORT_B]);
3298
3299 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3300}
3301
3302/* Legacy interrupt handler for shared interrupt. */
3303static void
3304msk_legacy_intr(void *xsc)
3305{
3306 struct msk_softc *sc;
3307 struct msk_if_softc *sc_if0, *sc_if1;
3308 struct ifnet *ifp0, *ifp1;
3309 uint32_t status;
3310
3311 sc = xsc;
3312 MSK_LOCK(sc);
3313
3314 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3315 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3316 if (status == 0 || status == 0xffffffff ||
3317 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3318 (status & sc->msk_intrmask) == 0) {
3319 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3320 return;
3321 }
3322
3323 sc_if0 = sc->msk_if[MSK_PORT_A];
3324 sc_if1 = sc->msk_if[MSK_PORT_B];
3325 ifp0 = ifp1 = NULL;
3326 if (sc_if0 != NULL)
3327 ifp0 = sc_if0->msk_ifp;
3328 if (sc_if1 != NULL)
3329 ifp1 = sc_if1->msk_ifp;
3330
3331 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3332 msk_intr_phy(sc_if0);
3333 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3334 msk_intr_phy(sc_if1);
3335 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3336 msk_intr_gmac(sc_if0);
3337 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3338 msk_intr_gmac(sc_if1);
3339 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3340 device_printf(sc->msk_dev, "Rx descriptor error\n");
3341 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3342 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3343 CSR_READ_4(sc, B0_IMSK);
3344 }
3345 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3346 device_printf(sc->msk_dev, "Tx descriptor error\n");
3347 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3348 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3349 CSR_READ_4(sc, B0_IMSK);
3350 }
3351 if ((status & Y2_IS_HW_ERR) != 0)
3352 msk_intr_hwerr(sc);
3353
3354 while (msk_handle_events(sc) != 0)
3355 ;
3356 if ((status & Y2_IS_STAT_BMU) != 0)
3357 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3358
3359 /* Reenable interrupts. */
3360 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3361
3362 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3363 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3364 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3365 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3366 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3367 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3368
3369 MSK_UNLOCK(sc);
3370}
3371
3372static int
3373msk_intr(void *xsc)
3374{
3375 struct msk_softc *sc;
3376 uint32_t status;
3377
3378 sc = xsc;
3379 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3380 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3381 if (status == 0 || status == 0xffffffff) {
3382 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3383 return (FILTER_STRAY);
3384 }
3385
3386 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3387 return (FILTER_HANDLED);
3388}
3389
3390static void
3391msk_int_task(void *arg, int pending)
3392{
3393 struct msk_softc *sc;
3394 struct msk_if_softc *sc_if0, *sc_if1;
3395 struct ifnet *ifp0, *ifp1;
3396 uint32_t status;
3397 int domore;
3398
3399 sc = arg;
3400 MSK_LOCK(sc);
3401
3402 /* Get interrupt source. */
3403 status = CSR_READ_4(sc, B0_ISRC);
3404 if (status == 0 || status == 0xffffffff ||
3405 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3406 (status & sc->msk_intrmask) == 0)
3407 goto done;
3408
3409 sc_if0 = sc->msk_if[MSK_PORT_A];
3410 sc_if1 = sc->msk_if[MSK_PORT_B];
3411 ifp0 = ifp1 = NULL;
3412 if (sc_if0 != NULL)
3413 ifp0 = sc_if0->msk_ifp;
3414 if (sc_if1 != NULL)
3415 ifp1 = sc_if1->msk_ifp;
3416
3417 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3418 msk_intr_phy(sc_if0);
3419 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3420 msk_intr_phy(sc_if1);
3421 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3422 msk_intr_gmac(sc_if0);
3423 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3424 msk_intr_gmac(sc_if1);
3425 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3426 device_printf(sc->msk_dev, "Rx descriptor error\n");
3427 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3428 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3429 CSR_READ_4(sc, B0_IMSK);
3430 }
3431 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3432 device_printf(sc->msk_dev, "Tx descriptor error\n");
3433 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3434 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3435 CSR_READ_4(sc, B0_IMSK);
3436 }
3437 if ((status & Y2_IS_HW_ERR) != 0)
3438 msk_intr_hwerr(sc);
3439
3440 domore = msk_handle_events(sc);
3441 if ((status & Y2_IS_STAT_BMU) != 0)
3442 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3443
3444 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3445 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3446 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3447 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3448 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3449 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3450
3451 if (domore > 0) {
3452 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3453 MSK_UNLOCK(sc);
3454 return;
3455 }
3456done:
3457 MSK_UNLOCK(sc);
3458
3459 /* Reenable interrupts. */
3460 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3461}
3462
3463static void
3464msk_init(void *xsc)
3465{
3466 struct msk_if_softc *sc_if = xsc;
3467
3468 MSK_IF_LOCK(sc_if);
3469 msk_init_locked(sc_if);
3470 MSK_IF_UNLOCK(sc_if);
3471}
3472
3473static void
3474msk_init_locked(struct msk_if_softc *sc_if)
3475{
3476 struct msk_softc *sc;
3477 struct ifnet *ifp;
3478 struct mii_data *mii;
3479 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3480 uint16_t gmac;
3481 int error, i;
3482
3483 MSK_IF_LOCK_ASSERT(sc_if);
3484
3485 ifp = sc_if->msk_ifp;
3486 sc = sc_if->msk_softc;
3487 mii = device_get_softc(sc_if->msk_miibus);
3488
3489 error = 0;
3490 /* Cancel pending I/O and free all Rx/Tx buffers. */
3491 msk_stop(sc_if);
3492
3493 if (ifp->if_mtu < ETHERMTU)
3494 sc_if->msk_framesize = ETHERMTU;
3495 else
3496 sc_if->msk_framesize = ifp->if_mtu;
3497 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3498 if (ifp->if_mtu > ETHERMTU &&
3499 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3500 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3501 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3502 }
3503
3504 /*
3505 * Initialize GMAC first.
3506 * Without this initialization, Rx MAC did not work as expected
3507 * and Rx MAC garbled status LEs and it resulted in out-of-order
3508 * or duplicated frame delivery which in turn showed very poor
3509 * Rx performance.(I had to write a packet analysis code that
3510 * could be embeded in driver to diagnose this issue.)
3511 * I've spent almost 2 months to fix this issue. If I have had
3512 * datasheet for Yukon II I wouldn't have encountered this. :-(
3513 */
3514 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3515 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3516
3517 /* Dummy read the Interrupt Source Register. */
3518 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3519
3520 /* Clear MIB stats. */
3521 msk_stats_clear(sc_if);
3522
3523 /* Disable FCS. */
3524 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3525
3526 /* Setup Transmit Control Register. */
3527 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3528
3529 /* Setup Transmit Flow Control Register. */
3530 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3531
3532 /* Setup Transmit Parameter Register. */
3533 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3534 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3535 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3536
3537 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3538 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3539
3540 if (ifp->if_mtu > ETHERMTU)
3541 gmac |= GM_SMOD_JUMBO_ENA;
3542 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3543
3544 /* Set station address. */
3545 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3546 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3547 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3548 eaddr[i]);
3549 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3550 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3551 eaddr[i]);
3552
3553 /* Disable interrupts for counter overflows. */
3554 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3555 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3556 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3557
3558 /* Configure Rx MAC FIFO. */
3559 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3560 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3561 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3562 GMF_OPER_ON | GMF_RX_F_FL_ON);
3563
3564 /* Set receive filter. */
3565 msk_rxfilter(sc_if);
3566
3567 /* Flush Rx MAC FIFO on any flow control or error. */
3568 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3569 GMR_FS_ANY_ERR);
3570
3571 /*
3572 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3573 * due to hardware hang on receipt of pause frames.
3574 */
3575 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3576 RX_GMF_FL_THR_DEF + 1);
3577
3578 /* Configure Tx MAC FIFO. */
3579 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3580 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3581 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3582
3583 /* Configure hardware VLAN tag insertion/stripping. */
3584 msk_setvlan(sc_if, ifp);
3585
3586 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3587 /* Set Rx Pause threshould. */
3588 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3589 MSK_ECU_LLPP);
3590 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3591 MSK_ECU_ULPP);
3592 if (ifp->if_mtu > ETHERMTU) {
3593 /*
3594 * Set Tx GMAC FIFO Almost Empty Threshold.
3595 */
3596 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3597 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3598 /* Disable Store & Forward mode for Tx. */
3599 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3600 TX_JUMBO_ENA | TX_STFW_DIS);
3601 } else {
3602 /* Enable Store & Forward mode for Tx. */
3603 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3604 TX_JUMBO_DIS | TX_STFW_ENA);
3605 }
3606 }
3607
3608 /*
3609 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3610 * arbiter as we don't use Sync Tx queue.
3611 */
3612 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3613 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3614 /* Enable the RAM Interface Arbiter. */
3615 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3616
3617 /* Setup RAM buffer. */
3618 msk_set_rambuffer(sc_if);
3619
3620 /* Disable Tx sync Queue. */
3621 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3622
3623 /* Setup Tx Queue Bus Memory Interface. */
3624 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3625 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3626 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3627 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3628 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3629 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3630 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3631 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3632 }
3633
3634 /* Setup Rx Queue Bus Memory Interface. */
3635 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3636 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3637 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3638 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3639 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3640 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3641 /* MAC Rx RAM Read is controlled by hardware. */
3642 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3643 }
3644
3645 msk_set_prefetch(sc, sc_if->msk_txq,
3646 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3647 msk_init_tx_ring(sc_if);
3648
3649 /* Disable Rx checksum offload and RSS hash. */
3650 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3651 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3652 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
3653 msk_set_prefetch(sc, sc_if->msk_rxq,
3654 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3655 MSK_JUMBO_RX_RING_CNT - 1);
3656 error = msk_init_jumbo_rx_ring(sc_if);
3657 } else {
3658 msk_set_prefetch(sc, sc_if->msk_rxq,
3659 sc_if->msk_rdata.msk_rx_ring_paddr,
3660 MSK_RX_RING_CNT - 1);
3661 error = msk_init_rx_ring(sc_if);
3662 }
3663 if (error != 0) {
3664 device_printf(sc_if->msk_if_dev,
3665 "initialization failed: no memory for Rx buffers\n");
3666 msk_stop(sc_if);
3667 return;
3668 }
3669
3670 /* Configure interrupt handling. */
3671 if (sc_if->msk_port == MSK_PORT_A) {
3672 sc->msk_intrmask |= Y2_IS_PORT_A;
3673 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3674 } else {
3675 sc->msk_intrmask |= Y2_IS_PORT_B;
3676 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3677 }
3678 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3679 CSR_READ_4(sc, B0_HWE_IMSK);
3680 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3681 CSR_READ_4(sc, B0_IMSK);
3682
3683 sc_if->msk_flags &= ~MSK_FLAG_LINK;
3684 mii_mediachg(mii);
3685
3686 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3687 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3688
3689 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3690}
3691
3692static void
3693msk_set_rambuffer(struct msk_if_softc *sc_if)
3694{
3695 struct msk_softc *sc;
3696 int ltpp, utpp;
3697
3698 sc = sc_if->msk_softc;
3699 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3700 return;
3701
3702 /* Setup Rx Queue. */
3703 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3704 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3705 sc->msk_rxqstart[sc_if->msk_port] / 8);
3706 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3707 sc->msk_rxqend[sc_if->msk_port] / 8);
3708 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3709 sc->msk_rxqstart[sc_if->msk_port] / 8);
3710 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3711 sc->msk_rxqstart[sc_if->msk_port] / 8);
3712
3713 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3714 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3715 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3716 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3717 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3718 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3719 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3720 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3721 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3722
3723 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3724 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3725
3726 /* Setup Tx Queue. */
3727 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3728 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3729 sc->msk_txqstart[sc_if->msk_port] / 8);
3730 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3731 sc->msk_txqend[sc_if->msk_port] / 8);
3732 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3733 sc->msk_txqstart[sc_if->msk_port] / 8);
3734 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3735 sc->msk_txqstart[sc_if->msk_port] / 8);
3736 /* Enable Store & Forward for Tx side. */
3737 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3738 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3739 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3740}
3741
3742static void
3743msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3744 uint32_t count)
3745{
3746
3747 /* Reset the prefetch unit. */
3748 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3749 PREF_UNIT_RST_SET);
3750 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3751 PREF_UNIT_RST_CLR);
3752 /* Set LE base address. */
3753 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3754 MSK_ADDR_LO(addr));
3755 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3756 MSK_ADDR_HI(addr));
3757 /* Set the list last index. */
3758 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3759 count);
3760 /* Turn on prefetch unit. */
3761 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3762 PREF_UNIT_OP_ON);
3763 /* Dummy read to ensure write. */
3764 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3765}
3766
3767static void
3768msk_stop(struct msk_if_softc *sc_if)
3769{
3770 struct msk_softc *sc;
3771 struct msk_txdesc *txd;
3772 struct msk_rxdesc *rxd;
3773 struct msk_rxdesc *jrxd;
3774 struct ifnet *ifp;
3775 uint32_t val;
3776 int i;
3777
3778 MSK_IF_LOCK_ASSERT(sc_if);
3779 sc = sc_if->msk_softc;
3780 ifp = sc_if->msk_ifp;
3781
3782 callout_stop(&sc_if->msk_tick_ch);
3783 sc_if->msk_watchdog_timer = 0;
3784
3785 /* Disable interrupts. */
3786 if (sc_if->msk_port == MSK_PORT_A) {
3787 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3788 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3789 } else {
3790 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3791 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3792 }
3793 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3794 CSR_READ_4(sc, B0_HWE_IMSK);
3795 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3796 CSR_READ_4(sc, B0_IMSK);
3797
3798 /* Disable Tx/Rx MAC. */
3799 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3800 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3801 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3802 /* Read again to ensure writing. */
3803 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3804 /* Update stats and clear counters. */
3805 msk_stats_update(sc_if);
3806
3807 /* Stop Tx BMU. */
3808 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3809 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3810 for (i = 0; i < MSK_TIMEOUT; i++) {
3811 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3812 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3813 BMU_STOP);
3814 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3815 } else
3816 break;
3817 DELAY(1);
3818 }
3819 if (i == MSK_TIMEOUT)
3820 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3821 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3822 RB_RST_SET | RB_DIS_OP_MD);
3823
3824 /* Disable all GMAC interrupt. */
3825 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3826 /* Disable PHY interrupt. */
3827 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3828
3829 /* Disable the RAM Interface Arbiter. */
3830 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3831
3832 /* Reset the PCI FIFO of the async Tx queue */
3833 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3834 BMU_RST_SET | BMU_FIFO_RST);
3835
3836 /* Reset the Tx prefetch units. */
3837 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3838 PREF_UNIT_RST_SET);
3839
3840 /* Reset the RAM Buffer async Tx queue. */
3841 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3842
3843 /* Reset Tx MAC FIFO. */
3844 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3845 /* Set Pause Off. */
3846 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3847
3848 /*
3849 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3850 * reach the end of packet and since we can't make sure that we have
3851 * incoming data, we must reset the BMU while it is not during a DMA
3852 * transfer. Since it is possible that the Rx path is still active,
3853 * the Rx RAM buffer will be stopped first, so any possible incoming
3854 * data will not trigger a DMA. After the RAM buffer is stopped, the
3855 * BMU is polled until any DMA in progress is ended and only then it
3856 * will be reset.
3857 */
3858
3859 /* Disable the RAM Buffer receive queue. */
3860 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3861 for (i = 0; i < MSK_TIMEOUT; i++) {
3862 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3863 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3864 break;
3865 DELAY(1);
3866 }
3867 if (i == MSK_TIMEOUT)
3868 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3869 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3870 BMU_RST_SET | BMU_FIFO_RST);
3871 /* Reset the Rx prefetch unit. */
3872 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3873 PREF_UNIT_RST_SET);
3874 /* Reset the RAM Buffer receive queue. */
3875 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3876 /* Reset Rx MAC FIFO. */
3877 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3878
3879 /* Free Rx and Tx mbufs still in the queues. */
3880 for (i = 0; i < MSK_RX_RING_CNT; i++) {
3881 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3882 if (rxd->rx_m != NULL) {
3883 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
3884 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3885 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3886 rxd->rx_dmamap);
3887 m_freem(rxd->rx_m);
3888 rxd->rx_m = NULL;
3889 }
3890 }
3891 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3892 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3893 if (jrxd->rx_m != NULL) {
3894 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3895 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3896 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
3897 jrxd->rx_dmamap);
3898 m_freem(jrxd->rx_m);
3899 jrxd->rx_m = NULL;
3900 }
3901 }
3902 for (i = 0; i < MSK_TX_RING_CNT; i++) {
3903 txd = &sc_if->msk_cdata.msk_txdesc[i];
3904 if (txd->tx_m != NULL) {
3905 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
3906 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3907 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
3908 txd->tx_dmamap);
3909 m_freem(txd->tx_m);
3910 txd->tx_m = NULL;
3911 }
3912 }
3913
3914 /*
3915 * Mark the interface down.
3916 */
3917 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3918 sc_if->msk_flags &= ~MSK_FLAG_LINK;
3919}
3920
3921/*
3922 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
3923 * counter clears high 16 bits of the counter such that accessing
3924 * lower 16 bits should be the last operation.
3925 */
3926#define MSK_READ_MIB32(x, y) \
3927 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
3928 (uint32_t)GMAC_READ_2(sc, x, y)
3929#define MSK_READ_MIB64(x, y) \
3930 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
3931 (uint64_t)MSK_READ_MIB32(x, y)
3932
3933static void
3934msk_stats_clear(struct msk_if_softc *sc_if)
3935{
3936 struct msk_softc *sc;
3937 uint32_t reg;
3938 uint16_t gmac;
3939 int i;
3940
3941 MSK_IF_LOCK_ASSERT(sc_if);
3942
3943 sc = sc_if->msk_softc;
3944 /* Set MIB Clear Counter Mode. */
3945 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3946 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3947 /* Read all MIB Counters with Clear Mode set. */
3948 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i++)
3949 reg = MSK_READ_MIB32(sc_if->msk_port, i);
3950 /* Clear MIB Clear Counter Mode. */
3951 gmac &= ~GM_PAR_MIB_CLR;
3952 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3953}
3954
3955static void
3956msk_stats_update(struct msk_if_softc *sc_if)
3957{
3958 struct msk_softc *sc;
3959 struct ifnet *ifp;
3960 struct msk_hw_stats *stats;
3961 uint16_t gmac;
3962 uint32_t reg;
3963
3964 MSK_IF_LOCK_ASSERT(sc_if);
3965
3966 ifp = sc_if->msk_ifp;
3967 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3968 return;
3969 sc = sc_if->msk_softc;
3970 stats = &sc_if->msk_stats;
3971 /* Set MIB Clear Counter Mode. */
3972 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3973 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3974
3975 /* Rx stats. */
3976 stats->rx_ucast_frames +=
3977 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
3978 stats->rx_bcast_frames +=
3979 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
3980 stats->rx_pause_frames +=
3981 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
3982 stats->rx_mcast_frames +=
3983 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
3984 stats->rx_crc_errs +=
3985 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
3986 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
3987 stats->rx_good_octets +=
3988 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
3989 stats->rx_bad_octets +=
3990 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
3991 stats->rx_runts +=
3992 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
3993 stats->rx_runt_errs +=
3994 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
3995 stats->rx_pkts_64 +=
3996 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
3997 stats->rx_pkts_65_127 +=
3998 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
3999 stats->rx_pkts_128_255 +=
4000 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4001 stats->rx_pkts_256_511 +=
4002 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4003 stats->rx_pkts_512_1023 +=
4004 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4005 stats->rx_pkts_1024_1518 +=
4006 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4007 stats->rx_pkts_1519_max +=
4008 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4009 stats->rx_pkts_too_long +=
4010 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4011 stats->rx_pkts_jabbers +=
4012 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4013 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4014 stats->rx_fifo_oflows +=
4015 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4016 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4017
4018 /* Tx stats. */
4019 stats->tx_ucast_frames +=
4020 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4021 stats->tx_bcast_frames +=
4022 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4023 stats->tx_pause_frames +=
4024 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4025 stats->tx_mcast_frames +=
4026 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4027 stats->tx_octets +=
4028 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4029 stats->tx_pkts_64 +=
4030 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4031 stats->tx_pkts_65_127 +=
4032 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4033 stats->tx_pkts_128_255 +=
4034 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4035 stats->tx_pkts_256_511 +=
4036 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4037 stats->tx_pkts_512_1023 +=
4038 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4039 stats->tx_pkts_1024_1518 +=
4040 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4041 stats->tx_pkts_1519_max +=
4042 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4043 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4044 stats->tx_colls +=
4045 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4046 stats->tx_late_colls +=
4047 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4048 stats->tx_excess_colls +=
4049 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4050 stats->tx_multi_colls +=
4051 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4052 stats->tx_single_colls +=
4053 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4054 stats->tx_underflows +=
4055 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4056 /* Clear MIB Clear Counter Mode. */
4057 gmac &= ~GM_PAR_MIB_CLR;
4058 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4059}
4060
4061static int
4062msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4063{
4064 struct msk_softc *sc;
4065 struct msk_if_softc *sc_if;
4066 uint32_t result, *stat;
4067 int off;
4068
4069 sc_if = (struct msk_if_softc *)arg1;
4070 sc = sc_if->msk_softc;
4071 off = arg2;
4072 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4073
4074 MSK_IF_LOCK(sc_if);
4075 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4076 result += *stat;
4077 MSK_IF_UNLOCK(sc_if);
4078
4079 return (sysctl_handle_int(oidp, &result, 0, req));
4080}
4081
4082static int
4083msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4084{
4085 struct msk_softc *sc;
4086 struct msk_if_softc *sc_if;
4087 uint64_t result, *stat;
4088 int off;
4089
4090 sc_if = (struct msk_if_softc *)arg1;
4091 sc = sc_if->msk_softc;
4092 off = arg2;
4093 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4094
4095 MSK_IF_LOCK(sc_if);
4096 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4097 result += *stat;
4098 MSK_IF_UNLOCK(sc_if);
4099
4100 return (sysctl_handle_quad(oidp, &result, 0, req));
4101}
4102
4103#undef MSK_READ_MIB32
4104#undef MSK_READ_MIB64
4105
4106#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4107 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4108 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4109 "IU", d)
4110#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4111 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4112 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4113 "Q", d)
4114
4115static void
4116msk_sysctl_node(struct msk_if_softc *sc_if)
4117{
4118 struct sysctl_ctx_list *ctx;
4119 struct sysctl_oid_list *child, *schild;
4120 struct sysctl_oid *tree;
4121
4122 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4123 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4124
4125 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4126 NULL, "MSK Statistics");
4127 schild = child = SYSCTL_CHILDREN(tree);
4128 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4129 NULL, "MSK RX Statistics");
4130 child = SYSCTL_CHILDREN(tree);
4131 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4132 child, rx_ucast_frames, "Good unicast frames");
4133 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4134 child, rx_bcast_frames, "Good broadcast frames");
4135 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4136 child, rx_pause_frames, "Pause frames");
4137 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4138 child, rx_mcast_frames, "Multicast frames");
4139 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4140 child, rx_crc_errs, "CRC errors");
4141 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4142 child, rx_good_octets, "Good octets");
4143 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4144 child, rx_bad_octets, "Bad octets");
4145 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4146 child, rx_pkts_64, "64 bytes frames");
4147 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4148 child, rx_pkts_65_127, "65 to 127 bytes frames");
4149 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4150 child, rx_pkts_128_255, "128 to 255 bytes frames");
4151 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4152 child, rx_pkts_256_511, "256 to 511 bytes frames");
4153 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4154 child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4155 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4156 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4157 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4158 child, rx_pkts_1519_max, "1519 to max frames");
4159 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4160 child, rx_pkts_too_long, "frames too long");
4161 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4162 child, rx_pkts_jabbers, "Jabber errors");
4163 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4164 child, rx_fifo_oflows, "FIFO overflows");
4165
4166 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4167 NULL, "MSK TX Statistics");
4168 child = SYSCTL_CHILDREN(tree);
4169 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4170 child, tx_ucast_frames, "Unicast frames");
4171 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4172 child, tx_bcast_frames, "Broadcast frames");
4173 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4174 child, tx_pause_frames, "Pause frames");
4175 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4176 child, tx_mcast_frames, "Multicast frames");
4177 MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4178 child, tx_octets, "Octets");
4179 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4180 child, tx_pkts_64, "64 bytes frames");
4181 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4182 child, tx_pkts_65_127, "65 to 127 bytes frames");
4183 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4184 child, tx_pkts_128_255, "128 to 255 bytes frames");
4185 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4186 child, tx_pkts_256_511, "256 to 511 bytes frames");
4187 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4188 child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4189 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4190 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4191 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4192 child, tx_pkts_1519_max, "1519 to max frames");
4193 MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4194 child, tx_colls, "Collisions");
4195 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4196 child, tx_late_colls, "Late collisions");
4197 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4198 child, tx_excess_colls, "Excessive collisions");
4199 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4200 child, tx_multi_colls, "Multiple collisions");
4201 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4202 child, tx_single_colls, "Single collisions");
4203 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4204 child, tx_underflows, "FIFO underflows");
4205}
4206
4207#undef MSK_SYSCTL_STAT32
4208#undef MSK_SYSCTL_STAT64
4209
4210static int
4211sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4212{
4213 int error, value;
4214
4215 if (!arg1)
4216 return (EINVAL);
4217 value = *(int *)arg1;
4218 error = sysctl_handle_int(oidp, &value, 0, req);
4219 if (error || !req->newptr)
4220 return (error);
4221 if (value < low || value > high)
4222 return (EINVAL);
4223 *(int *)arg1 = value;
4224
4225 return (0);
4226}
4227
4228static int
4229sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4230{
4231
4232 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4233 MSK_PROC_MAX));
4234}
2580 }
2581
2582 si = prod;
2583 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2584 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2585 if (tso == 0)
2586 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2587 OP_PACKET);
2588 else
2589 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2590 OP_LARGESEND);
2591 sc_if->msk_cdata.msk_tx_cnt++;
2592 MSK_INC(prod, MSK_TX_RING_CNT);
2593
2594 for (i = 1; i < nseg; i++) {
2595 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2596 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2597 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2598 OP_BUFFER | HW_OWNER);
2599 sc_if->msk_cdata.msk_tx_cnt++;
2600 MSK_INC(prod, MSK_TX_RING_CNT);
2601 }
2602 /* Update producer index. */
2603 sc_if->msk_cdata.msk_tx_prod = prod;
2604
2605 /* Set EOP on the last desciptor. */
2606 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2607 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2608 tx_le->msk_control |= htole32(EOP);
2609
2610 /* Turn the first descriptor ownership to hardware. */
2611 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2612 tx_le->msk_control |= htole32(HW_OWNER);
2613
2614 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2615 map = txd_last->tx_dmamap;
2616 txd_last->tx_dmamap = txd->tx_dmamap;
2617 txd->tx_dmamap = map;
2618 txd->tx_m = m;
2619
2620 /* Sync descriptors. */
2621 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2622 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2623 sc_if->msk_cdata.msk_tx_ring_map,
2624 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2625
2626 return (0);
2627}
2628
2629static void
2630msk_tx_task(void *arg, int pending)
2631{
2632 struct ifnet *ifp;
2633
2634 ifp = arg;
2635 msk_start(ifp);
2636}
2637
2638static void
2639msk_start(struct ifnet *ifp)
2640{
2641 struct msk_if_softc *sc_if;
2642 struct mbuf *m_head;
2643 int enq;
2644
2645 sc_if = ifp->if_softc;
2646
2647 MSK_IF_LOCK(sc_if);
2648
2649 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2650 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2651 MSK_IF_UNLOCK(sc_if);
2652 return;
2653 }
2654
2655 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2656 sc_if->msk_cdata.msk_tx_cnt <
2657 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2658 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2659 if (m_head == NULL)
2660 break;
2661 /*
2662 * Pack the data into the transmit ring. If we
2663 * don't have room, set the OACTIVE flag and wait
2664 * for the NIC to drain the ring.
2665 */
2666 if (msk_encap(sc_if, &m_head) != 0) {
2667 if (m_head == NULL)
2668 break;
2669 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2670 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2671 break;
2672 }
2673
2674 enq++;
2675 /*
2676 * If there's a BPF listener, bounce a copy of this frame
2677 * to him.
2678 */
2679 ETHER_BPF_MTAP(ifp, m_head);
2680 }
2681
2682 if (enq > 0) {
2683 /* Transmit */
2684 CSR_WRITE_2(sc_if->msk_softc,
2685 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2686 sc_if->msk_cdata.msk_tx_prod);
2687
2688 /* Set a timeout in case the chip goes out to lunch. */
2689 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
2690 }
2691
2692 MSK_IF_UNLOCK(sc_if);
2693}
2694
2695static void
2696msk_watchdog(struct msk_if_softc *sc_if)
2697{
2698 struct ifnet *ifp;
2699 uint32_t ridx;
2700 int idx;
2701
2702 MSK_IF_LOCK_ASSERT(sc_if);
2703
2704 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
2705 return;
2706 ifp = sc_if->msk_ifp;
2707 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
2708 if (bootverbose)
2709 if_printf(sc_if->msk_ifp, "watchdog timeout "
2710 "(missed link)\n");
2711 ifp->if_oerrors++;
2712 msk_init_locked(sc_if);
2713 return;
2714 }
2715
2716 /*
2717 * Reclaim first as there is a possibility of losing Tx completion
2718 * interrupts.
2719 */
2720 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2721 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2722 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2723 msk_txeof(sc_if, idx);
2724 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2725 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2726 "-- recovering\n");
2727 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2728 taskqueue_enqueue(taskqueue_fast,
2729 &sc_if->msk_tx_task);
2730 return;
2731 }
2732 }
2733
2734 if_printf(ifp, "watchdog timeout\n");
2735 ifp->if_oerrors++;
2736 msk_init_locked(sc_if);
2737 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2738 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2739}
2740
2741static int
2742mskc_shutdown(device_t dev)
2743{
2744 struct msk_softc *sc;
2745 int i;
2746
2747 sc = device_get_softc(dev);
2748 MSK_LOCK(sc);
2749 for (i = 0; i < sc->msk_num_port; i++) {
2750 if (sc->msk_if[i] != NULL)
2751 msk_stop(sc->msk_if[i]);
2752 }
2753
2754 /* Disable all interrupts. */
2755 CSR_WRITE_4(sc, B0_IMSK, 0);
2756 CSR_READ_4(sc, B0_IMSK);
2757 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2758 CSR_READ_4(sc, B0_HWE_IMSK);
2759
2760 /* Put hardware reset. */
2761 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2762
2763 MSK_UNLOCK(sc);
2764 return (0);
2765}
2766
2767static int
2768mskc_suspend(device_t dev)
2769{
2770 struct msk_softc *sc;
2771 int i;
2772
2773 sc = device_get_softc(dev);
2774
2775 MSK_LOCK(sc);
2776
2777 for (i = 0; i < sc->msk_num_port; i++) {
2778 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2779 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2780 IFF_DRV_RUNNING) != 0))
2781 msk_stop(sc->msk_if[i]);
2782 }
2783
2784 /* Disable all interrupts. */
2785 CSR_WRITE_4(sc, B0_IMSK, 0);
2786 CSR_READ_4(sc, B0_IMSK);
2787 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2788 CSR_READ_4(sc, B0_HWE_IMSK);
2789
2790 msk_phy_power(sc, MSK_PHY_POWERDOWN);
2791
2792 /* Put hardware reset. */
2793 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2794 sc->msk_pflags |= MSK_FLAG_SUSPEND;
2795
2796 MSK_UNLOCK(sc);
2797
2798 return (0);
2799}
2800
2801static int
2802mskc_resume(device_t dev)
2803{
2804 struct msk_softc *sc;
2805 int i;
2806
2807 sc = device_get_softc(dev);
2808
2809 MSK_LOCK(sc);
2810
2811 mskc_reset(sc);
2812 for (i = 0; i < sc->msk_num_port; i++) {
2813 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2814 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2815 msk_init_locked(sc->msk_if[i]);
2816 }
2817 sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
2818
2819 MSK_UNLOCK(sc);
2820
2821 return (0);
2822}
2823
2824#ifndef __NO_STRICT_ALIGNMENT
2825static __inline void
2826msk_fixup_rx(struct mbuf *m)
2827{
2828 int i;
2829 uint16_t *src, *dst;
2830
2831 src = mtod(m, uint16_t *);
2832 dst = src - 3;
2833
2834 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2835 *dst++ = *src++;
2836
2837 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
2838}
2839#endif
2840
2841static void
2842msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2843{
2844 struct mbuf *m;
2845 struct ifnet *ifp;
2846 struct msk_rxdesc *rxd;
2847 int cons, rxlen;
2848
2849 ifp = sc_if->msk_ifp;
2850
2851 MSK_IF_LOCK_ASSERT(sc_if);
2852
2853 cons = sc_if->msk_cdata.msk_rx_cons;
2854 do {
2855 rxlen = status >> 16;
2856 if ((status & GMR_FS_VLAN) != 0 &&
2857 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2858 rxlen -= ETHER_VLAN_ENCAP_LEN;
2859 if (len > sc_if->msk_framesize ||
2860 ((status & GMR_FS_ANY_ERR) != 0) ||
2861 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2862 /* Don't count flow-control packet as errors. */
2863 if ((status & GMR_FS_GOOD_FC) == 0)
2864 ifp->if_ierrors++;
2865 msk_discard_rxbuf(sc_if, cons);
2866 break;
2867 }
2868 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2869 m = rxd->rx_m;
2870 if (msk_newbuf(sc_if, cons) != 0) {
2871 ifp->if_iqdrops++;
2872 /* Reuse old buffer. */
2873 msk_discard_rxbuf(sc_if, cons);
2874 break;
2875 }
2876 m->m_pkthdr.rcvif = ifp;
2877 m->m_pkthdr.len = m->m_len = len;
2878#ifndef __NO_STRICT_ALIGNMENT
2879 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2880 msk_fixup_rx(m);
2881#endif
2882 ifp->if_ipackets++;
2883 /* Check for VLAN tagged packets. */
2884 if ((status & GMR_FS_VLAN) != 0 &&
2885 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2886 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2887 m->m_flags |= M_VLANTAG;
2888 }
2889 MSK_IF_UNLOCK(sc_if);
2890 (*ifp->if_input)(ifp, m);
2891 MSK_IF_LOCK(sc_if);
2892 } while (0);
2893
2894 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2895 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2896}
2897
2898static void
2899msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2900{
2901 struct mbuf *m;
2902 struct ifnet *ifp;
2903 struct msk_rxdesc *jrxd;
2904 int cons, rxlen;
2905
2906 ifp = sc_if->msk_ifp;
2907
2908 MSK_IF_LOCK_ASSERT(sc_if);
2909
2910 cons = sc_if->msk_cdata.msk_rx_cons;
2911 do {
2912 rxlen = status >> 16;
2913 if ((status & GMR_FS_VLAN) != 0 &&
2914 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2915 rxlen -= ETHER_VLAN_ENCAP_LEN;
2916 if (len > sc_if->msk_framesize ||
2917 ((status & GMR_FS_ANY_ERR) != 0) ||
2918 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2919 /* Don't count flow-control packet as errors. */
2920 if ((status & GMR_FS_GOOD_FC) == 0)
2921 ifp->if_ierrors++;
2922 msk_discard_jumbo_rxbuf(sc_if, cons);
2923 break;
2924 }
2925 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
2926 m = jrxd->rx_m;
2927 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
2928 ifp->if_iqdrops++;
2929 /* Reuse old buffer. */
2930 msk_discard_jumbo_rxbuf(sc_if, cons);
2931 break;
2932 }
2933 m->m_pkthdr.rcvif = ifp;
2934 m->m_pkthdr.len = m->m_len = len;
2935#ifndef __NO_STRICT_ALIGNMENT
2936 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
2937 msk_fixup_rx(m);
2938#endif
2939 ifp->if_ipackets++;
2940 /* Check for VLAN tagged packets. */
2941 if ((status & GMR_FS_VLAN) != 0 &&
2942 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2943 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2944 m->m_flags |= M_VLANTAG;
2945 }
2946 MSK_IF_UNLOCK(sc_if);
2947 (*ifp->if_input)(ifp, m);
2948 MSK_IF_LOCK(sc_if);
2949 } while (0);
2950
2951 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
2952 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
2953}
2954
2955static void
2956msk_txeof(struct msk_if_softc *sc_if, int idx)
2957{
2958 struct msk_txdesc *txd;
2959 struct msk_tx_desc *cur_tx;
2960 struct ifnet *ifp;
2961 uint32_t control;
2962 int cons, prog;
2963
2964 MSK_IF_LOCK_ASSERT(sc_if);
2965
2966 ifp = sc_if->msk_ifp;
2967
2968 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2969 sc_if->msk_cdata.msk_tx_ring_map,
2970 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2971 /*
2972 * Go through our tx ring and free mbufs for those
2973 * frames that have been sent.
2974 */
2975 cons = sc_if->msk_cdata.msk_tx_cons;
2976 prog = 0;
2977 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
2978 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
2979 break;
2980 prog++;
2981 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2982 control = le32toh(cur_tx->msk_control);
2983 sc_if->msk_cdata.msk_tx_cnt--;
2984 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2985 if ((control & EOP) == 0)
2986 continue;
2987 txd = &sc_if->msk_cdata.msk_txdesc[cons];
2988 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
2989 BUS_DMASYNC_POSTWRITE);
2990 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
2991
2992 ifp->if_opackets++;
2993 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
2994 __func__));
2995 m_freem(txd->tx_m);
2996 txd->tx_m = NULL;
2997 }
2998
2999 if (prog > 0) {
3000 sc_if->msk_cdata.msk_tx_cons = cons;
3001 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3002 sc_if->msk_watchdog_timer = 0;
3003 /* No need to sync LEs as we didn't update LEs. */
3004 }
3005}
3006
3007static void
3008msk_tick(void *xsc_if)
3009{
3010 struct msk_if_softc *sc_if;
3011 struct mii_data *mii;
3012
3013 sc_if = xsc_if;
3014
3015 MSK_IF_LOCK_ASSERT(sc_if);
3016
3017 mii = device_get_softc(sc_if->msk_miibus);
3018
3019 mii_tick(mii);
3020 msk_watchdog(sc_if);
3021 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3022}
3023
3024static void
3025msk_intr_phy(struct msk_if_softc *sc_if)
3026{
3027 uint16_t status;
3028
3029 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3030 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3031 /* Handle FIFO Underrun/Overflow? */
3032 if ((status & PHY_M_IS_FIFO_ERROR))
3033 device_printf(sc_if->msk_if_dev,
3034 "PHY FIFO underrun/overflow.\n");
3035}
3036
3037static void
3038msk_intr_gmac(struct msk_if_softc *sc_if)
3039{
3040 struct msk_softc *sc;
3041 uint8_t status;
3042
3043 sc = sc_if->msk_softc;
3044 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3045
3046 /* GMAC Rx FIFO overrun. */
3047 if ((status & GM_IS_RX_FF_OR) != 0) {
3048 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3049 GMF_CLI_RX_FO);
3050 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3051 }
3052 /* GMAC Tx FIFO underrun. */
3053 if ((status & GM_IS_TX_FF_UR) != 0) {
3054 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3055 GMF_CLI_TX_FU);
3056 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3057 /*
3058 * XXX
3059 * In case of Tx underrun, we may need to flush/reset
3060 * Tx MAC but that would also require resynchronization
3061 * with status LEs. Reintializing status LEs would
3062 * affect other port in dual MAC configuration so it
3063 * should be avoided as possible as we can.
3064 * Due to lack of documentation it's all vague guess but
3065 * it needs more investigation.
3066 */
3067 }
3068}
3069
3070static void
3071msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3072{
3073 struct msk_softc *sc;
3074
3075 sc = sc_if->msk_softc;
3076 if ((status & Y2_IS_PAR_RD1) != 0) {
3077 device_printf(sc_if->msk_if_dev,
3078 "RAM buffer read parity error\n");
3079 /* Clear IRQ. */
3080 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3081 RI_CLR_RD_PERR);
3082 }
3083 if ((status & Y2_IS_PAR_WR1) != 0) {
3084 device_printf(sc_if->msk_if_dev,
3085 "RAM buffer write parity error\n");
3086 /* Clear IRQ. */
3087 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3088 RI_CLR_WR_PERR);
3089 }
3090 if ((status & Y2_IS_PAR_MAC1) != 0) {
3091 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3092 /* Clear IRQ. */
3093 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3094 GMF_CLI_TX_PE);
3095 }
3096 if ((status & Y2_IS_PAR_RX1) != 0) {
3097 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3098 /* Clear IRQ. */
3099 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3100 }
3101 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3102 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3103 /* Clear IRQ. */
3104 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3105 }
3106}
3107
3108static void
3109msk_intr_hwerr(struct msk_softc *sc)
3110{
3111 uint32_t status;
3112 uint32_t tlphead[4];
3113
3114 status = CSR_READ_4(sc, B0_HWE_ISRC);
3115 /* Time Stamp timer overflow. */
3116 if ((status & Y2_IS_TIST_OV) != 0)
3117 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3118 if ((status & Y2_IS_PCI_NEXP) != 0) {
3119 /*
3120 * PCI Express Error occured which is not described in PEX
3121 * spec.
3122 * This error is also mapped either to Master Abort(
3123 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3124 * can only be cleared there.
3125 */
3126 device_printf(sc->msk_dev,
3127 "PCI Express protocol violation error\n");
3128 }
3129
3130 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3131 uint16_t v16;
3132
3133 if ((status & Y2_IS_MST_ERR) != 0)
3134 device_printf(sc->msk_dev,
3135 "unexpected IRQ Status error\n");
3136 else
3137 device_printf(sc->msk_dev,
3138 "unexpected IRQ Master error\n");
3139 /* Reset all bits in the PCI status register. */
3140 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3141 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3142 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3143 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3144 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3145 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3146 }
3147
3148 /* Check for PCI Express Uncorrectable Error. */
3149 if ((status & Y2_IS_PCI_EXP) != 0) {
3150 uint32_t v32;
3151
3152 /*
3153 * On PCI Express bus bridges are called root complexes (RC).
3154 * PCI Express errors are recognized by the root complex too,
3155 * which requests the system to handle the problem. After
3156 * error occurence it may be that no access to the adapter
3157 * may be performed any longer.
3158 */
3159
3160 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3161 if ((v32 & PEX_UNSUP_REQ) != 0) {
3162 /* Ignore unsupported request error. */
3163 device_printf(sc->msk_dev,
3164 "Uncorrectable PCI Express error\n");
3165 }
3166 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3167 int i;
3168
3169 /* Get TLP header form Log Registers. */
3170 for (i = 0; i < 4; i++)
3171 tlphead[i] = CSR_PCI_READ_4(sc,
3172 PEX_HEADER_LOG + i * 4);
3173 /* Check for vendor defined broadcast message. */
3174 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3175 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3176 CSR_WRITE_4(sc, B0_HWE_IMSK,
3177 sc->msk_intrhwemask);
3178 CSR_READ_4(sc, B0_HWE_IMSK);
3179 }
3180 }
3181 /* Clear the interrupt. */
3182 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3183 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3184 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3185 }
3186
3187 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3188 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3189 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3190 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3191}
3192
3193static __inline void
3194msk_rxput(struct msk_if_softc *sc_if)
3195{
3196 struct msk_softc *sc;
3197
3198 sc = sc_if->msk_softc;
3199 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
3200 bus_dmamap_sync(
3201 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3202 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3203 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3204 else
3205 bus_dmamap_sync(
3206 sc_if->msk_cdata.msk_rx_ring_tag,
3207 sc_if->msk_cdata.msk_rx_ring_map,
3208 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3209 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3210 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3211}
3212
3213static int
3214msk_handle_events(struct msk_softc *sc)
3215{
3216 struct msk_if_softc *sc_if;
3217 int rxput[2];
3218 struct msk_stat_desc *sd;
3219 uint32_t control, status;
3220 int cons, idx, len, port, rxprog;
3221
3222 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3223 if (idx == sc->msk_stat_cons)
3224 return (0);
3225
3226 /* Sync status LEs. */
3227 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3228 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3229 /* XXX Sync Rx LEs here. */
3230
3231 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3232
3233 rxprog = 0;
3234 for (cons = sc->msk_stat_cons; cons != idx;) {
3235 sd = &sc->msk_stat_ring[cons];
3236 control = le32toh(sd->msk_control);
3237 if ((control & HW_OWNER) == 0)
3238 break;
3239 /*
3240 * Marvell's FreeBSD driver updates status LE after clearing
3241 * HW_OWNER. However we don't have a way to sync single LE
3242 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3243 * an entire DMA map. So don't sync LE until we have a better
3244 * way to sync LEs.
3245 */
3246 control &= ~HW_OWNER;
3247 sd->msk_control = htole32(control);
3248 status = le32toh(sd->msk_status);
3249 len = control & STLE_LEN_MASK;
3250 port = (control >> 16) & 0x01;
3251 sc_if = sc->msk_if[port];
3252 if (sc_if == NULL) {
3253 device_printf(sc->msk_dev, "invalid port opcode "
3254 "0x%08x\n", control & STLE_OP_MASK);
3255 continue;
3256 }
3257
3258 switch (control & STLE_OP_MASK) {
3259 case OP_RXVLAN:
3260 sc_if->msk_vtag = ntohs(len);
3261 break;
3262 case OP_RXCHKSVLAN:
3263 sc_if->msk_vtag = ntohs(len);
3264 break;
3265 case OP_RXSTAT:
3266 if (sc_if->msk_framesize >
3267 (MCLBYTES - MSK_RX_BUF_ALIGN))
3268 msk_jumbo_rxeof(sc_if, status, len);
3269 else
3270 msk_rxeof(sc_if, status, len);
3271 rxprog++;
3272 /*
3273 * Because there is no way to sync single Rx LE
3274 * put the DMA sync operation off until the end of
3275 * event processing.
3276 */
3277 rxput[port]++;
3278 /* Update prefetch unit if we've passed water mark. */
3279 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3280 msk_rxput(sc_if);
3281 rxput[port] = 0;
3282 }
3283 break;
3284 case OP_TXINDEXLE:
3285 if (sc->msk_if[MSK_PORT_A] != NULL)
3286 msk_txeof(sc->msk_if[MSK_PORT_A],
3287 status & STLE_TXA1_MSKL);
3288 if (sc->msk_if[MSK_PORT_B] != NULL)
3289 msk_txeof(sc->msk_if[MSK_PORT_B],
3290 ((status & STLE_TXA2_MSKL) >>
3291 STLE_TXA2_SHIFTL) |
3292 ((len & STLE_TXA2_MSKH) <<
3293 STLE_TXA2_SHIFTH));
3294 break;
3295 default:
3296 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3297 control & STLE_OP_MASK);
3298 break;
3299 }
3300 MSK_INC(cons, MSK_STAT_RING_CNT);
3301 if (rxprog > sc->msk_process_limit)
3302 break;
3303 }
3304
3305 sc->msk_stat_cons = cons;
3306 /* XXX We should sync status LEs here. See above notes. */
3307
3308 if (rxput[MSK_PORT_A] > 0)
3309 msk_rxput(sc->msk_if[MSK_PORT_A]);
3310 if (rxput[MSK_PORT_B] > 0)
3311 msk_rxput(sc->msk_if[MSK_PORT_B]);
3312
3313 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3314}
3315
3316/* Legacy interrupt handler for shared interrupt. */
3317static void
3318msk_legacy_intr(void *xsc)
3319{
3320 struct msk_softc *sc;
3321 struct msk_if_softc *sc_if0, *sc_if1;
3322 struct ifnet *ifp0, *ifp1;
3323 uint32_t status;
3324
3325 sc = xsc;
3326 MSK_LOCK(sc);
3327
3328 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3329 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3330 if (status == 0 || status == 0xffffffff ||
3331 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3332 (status & sc->msk_intrmask) == 0) {
3333 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3334 return;
3335 }
3336
3337 sc_if0 = sc->msk_if[MSK_PORT_A];
3338 sc_if1 = sc->msk_if[MSK_PORT_B];
3339 ifp0 = ifp1 = NULL;
3340 if (sc_if0 != NULL)
3341 ifp0 = sc_if0->msk_ifp;
3342 if (sc_if1 != NULL)
3343 ifp1 = sc_if1->msk_ifp;
3344
3345 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3346 msk_intr_phy(sc_if0);
3347 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3348 msk_intr_phy(sc_if1);
3349 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3350 msk_intr_gmac(sc_if0);
3351 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3352 msk_intr_gmac(sc_if1);
3353 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3354 device_printf(sc->msk_dev, "Rx descriptor error\n");
3355 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3356 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3357 CSR_READ_4(sc, B0_IMSK);
3358 }
3359 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3360 device_printf(sc->msk_dev, "Tx descriptor error\n");
3361 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3362 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3363 CSR_READ_4(sc, B0_IMSK);
3364 }
3365 if ((status & Y2_IS_HW_ERR) != 0)
3366 msk_intr_hwerr(sc);
3367
3368 while (msk_handle_events(sc) != 0)
3369 ;
3370 if ((status & Y2_IS_STAT_BMU) != 0)
3371 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3372
3373 /* Reenable interrupts. */
3374 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3375
3376 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3377 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3378 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3379 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3380 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3381 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3382
3383 MSK_UNLOCK(sc);
3384}
3385
3386static int
3387msk_intr(void *xsc)
3388{
3389 struct msk_softc *sc;
3390 uint32_t status;
3391
3392 sc = xsc;
3393 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3394 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3395 if (status == 0 || status == 0xffffffff) {
3396 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3397 return (FILTER_STRAY);
3398 }
3399
3400 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3401 return (FILTER_HANDLED);
3402}
3403
3404static void
3405msk_int_task(void *arg, int pending)
3406{
3407 struct msk_softc *sc;
3408 struct msk_if_softc *sc_if0, *sc_if1;
3409 struct ifnet *ifp0, *ifp1;
3410 uint32_t status;
3411 int domore;
3412
3413 sc = arg;
3414 MSK_LOCK(sc);
3415
3416 /* Get interrupt source. */
3417 status = CSR_READ_4(sc, B0_ISRC);
3418 if (status == 0 || status == 0xffffffff ||
3419 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
3420 (status & sc->msk_intrmask) == 0)
3421 goto done;
3422
3423 sc_if0 = sc->msk_if[MSK_PORT_A];
3424 sc_if1 = sc->msk_if[MSK_PORT_B];
3425 ifp0 = ifp1 = NULL;
3426 if (sc_if0 != NULL)
3427 ifp0 = sc_if0->msk_ifp;
3428 if (sc_if1 != NULL)
3429 ifp1 = sc_if1->msk_ifp;
3430
3431 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3432 msk_intr_phy(sc_if0);
3433 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3434 msk_intr_phy(sc_if1);
3435 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3436 msk_intr_gmac(sc_if0);
3437 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3438 msk_intr_gmac(sc_if1);
3439 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3440 device_printf(sc->msk_dev, "Rx descriptor error\n");
3441 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3442 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3443 CSR_READ_4(sc, B0_IMSK);
3444 }
3445 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3446 device_printf(sc->msk_dev, "Tx descriptor error\n");
3447 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3448 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3449 CSR_READ_4(sc, B0_IMSK);
3450 }
3451 if ((status & Y2_IS_HW_ERR) != 0)
3452 msk_intr_hwerr(sc);
3453
3454 domore = msk_handle_events(sc);
3455 if ((status & Y2_IS_STAT_BMU) != 0)
3456 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3457
3458 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3459 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3460 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3461 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3462 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3463 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3464
3465 if (domore > 0) {
3466 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3467 MSK_UNLOCK(sc);
3468 return;
3469 }
3470done:
3471 MSK_UNLOCK(sc);
3472
3473 /* Reenable interrupts. */
3474 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3475}
3476
3477static void
3478msk_init(void *xsc)
3479{
3480 struct msk_if_softc *sc_if = xsc;
3481
3482 MSK_IF_LOCK(sc_if);
3483 msk_init_locked(sc_if);
3484 MSK_IF_UNLOCK(sc_if);
3485}
3486
3487static void
3488msk_init_locked(struct msk_if_softc *sc_if)
3489{
3490 struct msk_softc *sc;
3491 struct ifnet *ifp;
3492 struct mii_data *mii;
3493 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3494 uint16_t gmac;
3495 int error, i;
3496
3497 MSK_IF_LOCK_ASSERT(sc_if);
3498
3499 ifp = sc_if->msk_ifp;
3500 sc = sc_if->msk_softc;
3501 mii = device_get_softc(sc_if->msk_miibus);
3502
3503 error = 0;
3504 /* Cancel pending I/O and free all Rx/Tx buffers. */
3505 msk_stop(sc_if);
3506
3507 if (ifp->if_mtu < ETHERMTU)
3508 sc_if->msk_framesize = ETHERMTU;
3509 else
3510 sc_if->msk_framesize = ifp->if_mtu;
3511 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3512 if (ifp->if_mtu > ETHERMTU &&
3513 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
3514 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
3515 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3516 }
3517
3518 /*
3519 * Initialize GMAC first.
3520 * Without this initialization, Rx MAC did not work as expected
3521 * and Rx MAC garbled status LEs and it resulted in out-of-order
3522 * or duplicated frame delivery which in turn showed very poor
3523 * Rx performance.(I had to write a packet analysis code that
3524 * could be embeded in driver to diagnose this issue.)
3525 * I've spent almost 2 months to fix this issue. If I have had
3526 * datasheet for Yukon II I wouldn't have encountered this. :-(
3527 */
3528 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3529 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3530
3531 /* Dummy read the Interrupt Source Register. */
3532 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3533
3534 /* Clear MIB stats. */
3535 msk_stats_clear(sc_if);
3536
3537 /* Disable FCS. */
3538 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3539
3540 /* Setup Transmit Control Register. */
3541 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3542
3543 /* Setup Transmit Flow Control Register. */
3544 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3545
3546 /* Setup Transmit Parameter Register. */
3547 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3548 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3549 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3550
3551 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3552 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3553
3554 if (ifp->if_mtu > ETHERMTU)
3555 gmac |= GM_SMOD_JUMBO_ENA;
3556 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3557
3558 /* Set station address. */
3559 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3560 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3561 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3562 eaddr[i]);
3563 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3564 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3565 eaddr[i]);
3566
3567 /* Disable interrupts for counter overflows. */
3568 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3569 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3570 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3571
3572 /* Configure Rx MAC FIFO. */
3573 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3574 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3575 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3576 GMF_OPER_ON | GMF_RX_F_FL_ON);
3577
3578 /* Set receive filter. */
3579 msk_rxfilter(sc_if);
3580
3581 /* Flush Rx MAC FIFO on any flow control or error. */
3582 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3583 GMR_FS_ANY_ERR);
3584
3585 /*
3586 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
3587 * due to hardware hang on receipt of pause frames.
3588 */
3589 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3590 RX_GMF_FL_THR_DEF + 1);
3591
3592 /* Configure Tx MAC FIFO. */
3593 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3594 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3595 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3596
3597 /* Configure hardware VLAN tag insertion/stripping. */
3598 msk_setvlan(sc_if, ifp);
3599
3600 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3601 /* Set Rx Pause threshould. */
3602 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3603 MSK_ECU_LLPP);
3604 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3605 MSK_ECU_ULPP);
3606 if (ifp->if_mtu > ETHERMTU) {
3607 /*
3608 * Set Tx GMAC FIFO Almost Empty Threshold.
3609 */
3610 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3611 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3612 /* Disable Store & Forward mode for Tx. */
3613 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3614 TX_JUMBO_ENA | TX_STFW_DIS);
3615 } else {
3616 /* Enable Store & Forward mode for Tx. */
3617 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3618 TX_JUMBO_DIS | TX_STFW_ENA);
3619 }
3620 }
3621
3622 /*
3623 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3624 * arbiter as we don't use Sync Tx queue.
3625 */
3626 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3627 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3628 /* Enable the RAM Interface Arbiter. */
3629 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3630
3631 /* Setup RAM buffer. */
3632 msk_set_rambuffer(sc_if);
3633
3634 /* Disable Tx sync Queue. */
3635 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3636
3637 /* Setup Tx Queue Bus Memory Interface. */
3638 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3639 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3640 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3641 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3642 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3643 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3644 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3645 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3646 }
3647
3648 /* Setup Rx Queue Bus Memory Interface. */
3649 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3650 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3651 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3652 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3653 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3654 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3655 /* MAC Rx RAM Read is controlled by hardware. */
3656 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3657 }
3658
3659 msk_set_prefetch(sc, sc_if->msk_txq,
3660 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3661 msk_init_tx_ring(sc_if);
3662
3663 /* Disable Rx checksum offload and RSS hash. */
3664 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3665 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3666 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
3667 msk_set_prefetch(sc, sc_if->msk_rxq,
3668 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3669 MSK_JUMBO_RX_RING_CNT - 1);
3670 error = msk_init_jumbo_rx_ring(sc_if);
3671 } else {
3672 msk_set_prefetch(sc, sc_if->msk_rxq,
3673 sc_if->msk_rdata.msk_rx_ring_paddr,
3674 MSK_RX_RING_CNT - 1);
3675 error = msk_init_rx_ring(sc_if);
3676 }
3677 if (error != 0) {
3678 device_printf(sc_if->msk_if_dev,
3679 "initialization failed: no memory for Rx buffers\n");
3680 msk_stop(sc_if);
3681 return;
3682 }
3683
3684 /* Configure interrupt handling. */
3685 if (sc_if->msk_port == MSK_PORT_A) {
3686 sc->msk_intrmask |= Y2_IS_PORT_A;
3687 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3688 } else {
3689 sc->msk_intrmask |= Y2_IS_PORT_B;
3690 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3691 }
3692 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3693 CSR_READ_4(sc, B0_HWE_IMSK);
3694 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3695 CSR_READ_4(sc, B0_IMSK);
3696
3697 sc_if->msk_flags &= ~MSK_FLAG_LINK;
3698 mii_mediachg(mii);
3699
3700 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3701 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3702
3703 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3704}
3705
3706static void
3707msk_set_rambuffer(struct msk_if_softc *sc_if)
3708{
3709 struct msk_softc *sc;
3710 int ltpp, utpp;
3711
3712 sc = sc_if->msk_softc;
3713 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3714 return;
3715
3716 /* Setup Rx Queue. */
3717 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3718 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3719 sc->msk_rxqstart[sc_if->msk_port] / 8);
3720 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3721 sc->msk_rxqend[sc_if->msk_port] / 8);
3722 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3723 sc->msk_rxqstart[sc_if->msk_port] / 8);
3724 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3725 sc->msk_rxqstart[sc_if->msk_port] / 8);
3726
3727 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3728 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3729 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3730 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3731 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3732 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3733 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3734 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3735 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3736
3737 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3738 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3739
3740 /* Setup Tx Queue. */
3741 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3742 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3743 sc->msk_txqstart[sc_if->msk_port] / 8);
3744 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3745 sc->msk_txqend[sc_if->msk_port] / 8);
3746 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3747 sc->msk_txqstart[sc_if->msk_port] / 8);
3748 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3749 sc->msk_txqstart[sc_if->msk_port] / 8);
3750 /* Enable Store & Forward for Tx side. */
3751 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3752 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3753 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3754}
3755
3756static void
3757msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3758 uint32_t count)
3759{
3760
3761 /* Reset the prefetch unit. */
3762 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3763 PREF_UNIT_RST_SET);
3764 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3765 PREF_UNIT_RST_CLR);
3766 /* Set LE base address. */
3767 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3768 MSK_ADDR_LO(addr));
3769 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3770 MSK_ADDR_HI(addr));
3771 /* Set the list last index. */
3772 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3773 count);
3774 /* Turn on prefetch unit. */
3775 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3776 PREF_UNIT_OP_ON);
3777 /* Dummy read to ensure write. */
3778 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3779}
3780
3781static void
3782msk_stop(struct msk_if_softc *sc_if)
3783{
3784 struct msk_softc *sc;
3785 struct msk_txdesc *txd;
3786 struct msk_rxdesc *rxd;
3787 struct msk_rxdesc *jrxd;
3788 struct ifnet *ifp;
3789 uint32_t val;
3790 int i;
3791
3792 MSK_IF_LOCK_ASSERT(sc_if);
3793 sc = sc_if->msk_softc;
3794 ifp = sc_if->msk_ifp;
3795
3796 callout_stop(&sc_if->msk_tick_ch);
3797 sc_if->msk_watchdog_timer = 0;
3798
3799 /* Disable interrupts. */
3800 if (sc_if->msk_port == MSK_PORT_A) {
3801 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3802 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3803 } else {
3804 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3805 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3806 }
3807 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3808 CSR_READ_4(sc, B0_HWE_IMSK);
3809 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3810 CSR_READ_4(sc, B0_IMSK);
3811
3812 /* Disable Tx/Rx MAC. */
3813 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3814 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3815 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3816 /* Read again to ensure writing. */
3817 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3818 /* Update stats and clear counters. */
3819 msk_stats_update(sc_if);
3820
3821 /* Stop Tx BMU. */
3822 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3823 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3824 for (i = 0; i < MSK_TIMEOUT; i++) {
3825 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3826 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3827 BMU_STOP);
3828 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3829 } else
3830 break;
3831 DELAY(1);
3832 }
3833 if (i == MSK_TIMEOUT)
3834 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3835 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3836 RB_RST_SET | RB_DIS_OP_MD);
3837
3838 /* Disable all GMAC interrupt. */
3839 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3840 /* Disable PHY interrupt. */
3841 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3842
3843 /* Disable the RAM Interface Arbiter. */
3844 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3845
3846 /* Reset the PCI FIFO of the async Tx queue */
3847 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3848 BMU_RST_SET | BMU_FIFO_RST);
3849
3850 /* Reset the Tx prefetch units. */
3851 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3852 PREF_UNIT_RST_SET);
3853
3854 /* Reset the RAM Buffer async Tx queue. */
3855 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3856
3857 /* Reset Tx MAC FIFO. */
3858 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3859 /* Set Pause Off. */
3860 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3861
3862 /*
3863 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3864 * reach the end of packet and since we can't make sure that we have
3865 * incoming data, we must reset the BMU while it is not during a DMA
3866 * transfer. Since it is possible that the Rx path is still active,
3867 * the Rx RAM buffer will be stopped first, so any possible incoming
3868 * data will not trigger a DMA. After the RAM buffer is stopped, the
3869 * BMU is polled until any DMA in progress is ended and only then it
3870 * will be reset.
3871 */
3872
3873 /* Disable the RAM Buffer receive queue. */
3874 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3875 for (i = 0; i < MSK_TIMEOUT; i++) {
3876 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3877 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3878 break;
3879 DELAY(1);
3880 }
3881 if (i == MSK_TIMEOUT)
3882 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3883 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3884 BMU_RST_SET | BMU_FIFO_RST);
3885 /* Reset the Rx prefetch unit. */
3886 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3887 PREF_UNIT_RST_SET);
3888 /* Reset the RAM Buffer receive queue. */
3889 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3890 /* Reset Rx MAC FIFO. */
3891 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3892
3893 /* Free Rx and Tx mbufs still in the queues. */
3894 for (i = 0; i < MSK_RX_RING_CNT; i++) {
3895 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3896 if (rxd->rx_m != NULL) {
3897 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
3898 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3899 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3900 rxd->rx_dmamap);
3901 m_freem(rxd->rx_m);
3902 rxd->rx_m = NULL;
3903 }
3904 }
3905 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3906 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3907 if (jrxd->rx_m != NULL) {
3908 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3909 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3910 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
3911 jrxd->rx_dmamap);
3912 m_freem(jrxd->rx_m);
3913 jrxd->rx_m = NULL;
3914 }
3915 }
3916 for (i = 0; i < MSK_TX_RING_CNT; i++) {
3917 txd = &sc_if->msk_cdata.msk_txdesc[i];
3918 if (txd->tx_m != NULL) {
3919 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
3920 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3921 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
3922 txd->tx_dmamap);
3923 m_freem(txd->tx_m);
3924 txd->tx_m = NULL;
3925 }
3926 }
3927
3928 /*
3929 * Mark the interface down.
3930 */
3931 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3932 sc_if->msk_flags &= ~MSK_FLAG_LINK;
3933}
3934
3935/*
3936 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
3937 * counter clears high 16 bits of the counter such that accessing
3938 * lower 16 bits should be the last operation.
3939 */
3940#define MSK_READ_MIB32(x, y) \
3941 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
3942 (uint32_t)GMAC_READ_2(sc, x, y)
3943#define MSK_READ_MIB64(x, y) \
3944 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
3945 (uint64_t)MSK_READ_MIB32(x, y)
3946
3947static void
3948msk_stats_clear(struct msk_if_softc *sc_if)
3949{
3950 struct msk_softc *sc;
3951 uint32_t reg;
3952 uint16_t gmac;
3953 int i;
3954
3955 MSK_IF_LOCK_ASSERT(sc_if);
3956
3957 sc = sc_if->msk_softc;
3958 /* Set MIB Clear Counter Mode. */
3959 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3960 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3961 /* Read all MIB Counters with Clear Mode set. */
3962 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i++)
3963 reg = MSK_READ_MIB32(sc_if->msk_port, i);
3964 /* Clear MIB Clear Counter Mode. */
3965 gmac &= ~GM_PAR_MIB_CLR;
3966 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3967}
3968
3969static void
3970msk_stats_update(struct msk_if_softc *sc_if)
3971{
3972 struct msk_softc *sc;
3973 struct ifnet *ifp;
3974 struct msk_hw_stats *stats;
3975 uint16_t gmac;
3976 uint32_t reg;
3977
3978 MSK_IF_LOCK_ASSERT(sc_if);
3979
3980 ifp = sc_if->msk_ifp;
3981 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3982 return;
3983 sc = sc_if->msk_softc;
3984 stats = &sc_if->msk_stats;
3985 /* Set MIB Clear Counter Mode. */
3986 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3987 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3988
3989 /* Rx stats. */
3990 stats->rx_ucast_frames +=
3991 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
3992 stats->rx_bcast_frames +=
3993 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
3994 stats->rx_pause_frames +=
3995 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
3996 stats->rx_mcast_frames +=
3997 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
3998 stats->rx_crc_errs +=
3999 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
4000 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
4001 stats->rx_good_octets +=
4002 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
4003 stats->rx_bad_octets +=
4004 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
4005 stats->rx_runts +=
4006 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
4007 stats->rx_runt_errs +=
4008 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
4009 stats->rx_pkts_64 +=
4010 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
4011 stats->rx_pkts_65_127 +=
4012 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
4013 stats->rx_pkts_128_255 +=
4014 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
4015 stats->rx_pkts_256_511 +=
4016 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
4017 stats->rx_pkts_512_1023 +=
4018 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
4019 stats->rx_pkts_1024_1518 +=
4020 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
4021 stats->rx_pkts_1519_max +=
4022 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
4023 stats->rx_pkts_too_long +=
4024 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
4025 stats->rx_pkts_jabbers +=
4026 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
4027 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
4028 stats->rx_fifo_oflows +=
4029 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
4030 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
4031
4032 /* Tx stats. */
4033 stats->tx_ucast_frames +=
4034 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
4035 stats->tx_bcast_frames +=
4036 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
4037 stats->tx_pause_frames +=
4038 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
4039 stats->tx_mcast_frames +=
4040 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
4041 stats->tx_octets +=
4042 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
4043 stats->tx_pkts_64 +=
4044 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
4045 stats->tx_pkts_65_127 +=
4046 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
4047 stats->tx_pkts_128_255 +=
4048 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
4049 stats->tx_pkts_256_511 +=
4050 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
4051 stats->tx_pkts_512_1023 +=
4052 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
4053 stats->tx_pkts_1024_1518 +=
4054 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
4055 stats->tx_pkts_1519_max +=
4056 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
4057 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
4058 stats->tx_colls +=
4059 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
4060 stats->tx_late_colls +=
4061 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
4062 stats->tx_excess_colls +=
4063 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
4064 stats->tx_multi_colls +=
4065 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
4066 stats->tx_single_colls +=
4067 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
4068 stats->tx_underflows +=
4069 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
4070 /* Clear MIB Clear Counter Mode. */
4071 gmac &= ~GM_PAR_MIB_CLR;
4072 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
4073}
4074
4075static int
4076msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
4077{
4078 struct msk_softc *sc;
4079 struct msk_if_softc *sc_if;
4080 uint32_t result, *stat;
4081 int off;
4082
4083 sc_if = (struct msk_if_softc *)arg1;
4084 sc = sc_if->msk_softc;
4085 off = arg2;
4086 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
4087
4088 MSK_IF_LOCK(sc_if);
4089 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4090 result += *stat;
4091 MSK_IF_UNLOCK(sc_if);
4092
4093 return (sysctl_handle_int(oidp, &result, 0, req));
4094}
4095
4096static int
4097msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
4098{
4099 struct msk_softc *sc;
4100 struct msk_if_softc *sc_if;
4101 uint64_t result, *stat;
4102 int off;
4103
4104 sc_if = (struct msk_if_softc *)arg1;
4105 sc = sc_if->msk_softc;
4106 off = arg2;
4107 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
4108
4109 MSK_IF_LOCK(sc_if);
4110 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
4111 result += *stat;
4112 MSK_IF_UNLOCK(sc_if);
4113
4114 return (sysctl_handle_quad(oidp, &result, 0, req));
4115}
4116
4117#undef MSK_READ_MIB32
4118#undef MSK_READ_MIB64
4119
4120#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4121 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4122 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4123 "IU", d)
4124#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4125 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \
4126 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4127 "Q", d)
4128
4129static void
4130msk_sysctl_node(struct msk_if_softc *sc_if)
4131{
4132 struct sysctl_ctx_list *ctx;
4133 struct sysctl_oid_list *child, *schild;
4134 struct sysctl_oid *tree;
4135
4136 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
4137 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
4138
4139 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
4140 NULL, "MSK Statistics");
4141 schild = child = SYSCTL_CHILDREN(tree);
4142 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
4143 NULL, "MSK RX Statistics");
4144 child = SYSCTL_CHILDREN(tree);
4145 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4146 child, rx_ucast_frames, "Good unicast frames");
4147 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4148 child, rx_bcast_frames, "Good broadcast frames");
4149 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4150 child, rx_pause_frames, "Pause frames");
4151 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4152 child, rx_mcast_frames, "Multicast frames");
4153 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
4154 child, rx_crc_errs, "CRC errors");
4155 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
4156 child, rx_good_octets, "Good octets");
4157 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
4158 child, rx_bad_octets, "Bad octets");
4159 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4160 child, rx_pkts_64, "64 bytes frames");
4161 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4162 child, rx_pkts_65_127, "65 to 127 bytes frames");
4163 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4164 child, rx_pkts_128_255, "128 to 255 bytes frames");
4165 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4166 child, rx_pkts_256_511, "256 to 511 bytes frames");
4167 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4168 child, rx_pkts_512_1023, "512 to 1023 bytes frames");
4169 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4170 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
4171 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4172 child, rx_pkts_1519_max, "1519 to max frames");
4173 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
4174 child, rx_pkts_too_long, "frames too long");
4175 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
4176 child, rx_pkts_jabbers, "Jabber errors");
4177 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
4178 child, rx_fifo_oflows, "FIFO overflows");
4179
4180 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
4181 NULL, "MSK TX Statistics");
4182 child = SYSCTL_CHILDREN(tree);
4183 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
4184 child, tx_ucast_frames, "Unicast frames");
4185 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
4186 child, tx_bcast_frames, "Broadcast frames");
4187 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
4188 child, tx_pause_frames, "Pause frames");
4189 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
4190 child, tx_mcast_frames, "Multicast frames");
4191 MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
4192 child, tx_octets, "Octets");
4193 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
4194 child, tx_pkts_64, "64 bytes frames");
4195 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
4196 child, tx_pkts_65_127, "65 to 127 bytes frames");
4197 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
4198 child, tx_pkts_128_255, "128 to 255 bytes frames");
4199 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
4200 child, tx_pkts_256_511, "256 to 511 bytes frames");
4201 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
4202 child, tx_pkts_512_1023, "512 to 1023 bytes frames");
4203 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
4204 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
4205 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
4206 child, tx_pkts_1519_max, "1519 to max frames");
4207 MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
4208 child, tx_colls, "Collisions");
4209 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
4210 child, tx_late_colls, "Late collisions");
4211 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
4212 child, tx_excess_colls, "Excessive collisions");
4213 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
4214 child, tx_multi_colls, "Multiple collisions");
4215 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
4216 child, tx_single_colls, "Single collisions");
4217 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
4218 child, tx_underflows, "FIFO underflows");
4219}
4220
4221#undef MSK_SYSCTL_STAT32
4222#undef MSK_SYSCTL_STAT64
4223
4224static int
4225sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4226{
4227 int error, value;
4228
4229 if (!arg1)
4230 return (EINVAL);
4231 value = *(int *)arg1;
4232 error = sysctl_handle_int(oidp, &value, 0, req);
4233 if (error || !req->newptr)
4234 return (error);
4235 if (value < low || value > high)
4236 return (EINVAL);
4237 *(int *)arg1 = value;
4238
4239 return (0);
4240}
4241
4242static int
4243sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4244{
4245
4246 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4247 MSK_PROC_MAX));
4248}