Deleted Added
full compact
if_msk.c (165138) if_msk.c (165611)
1/******************************************************************************
2 *
3 * Name : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 * LICENSE:
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
20 * and conditions:
21 *
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 * /LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
1/******************************************************************************
2 *
3 * Name : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 * LICENSE:
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
20 * and conditions:
21 *
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 * /LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/*
96 * Device driver for the Marvell Yukon II Ethernet controller.
97 * Due to lack of documentation, this driver is based on the code from
98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
99 */
100
101#include <sys/cdefs.h>
102__FBSDID("$FreeBSD: head/sys/dev/msk/if_msk.c 165138 2006-12-13 02:30:11Z yongari $");
102__FBSDID("$FreeBSD: head/sys/dev/msk/if_msk.c 165611 2006-12-29 03:33:33Z yongari $");
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116#include <sys/taskqueue.h>
117
118#include <net/bpf.h>
119#include <net/ethernet.h>
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/if_dl.h>
123#include <net/if_media.h>
124#include <net/if_types.h>
125#include <net/if_vlan_var.h>
126
127#include <netinet/in.h>
128#include <netinet/in_systm.h>
129#include <netinet/ip.h>
130#include <netinet/tcp.h>
131#include <netinet/udp.h>
132
133#include <machine/bus.h>
134#include <machine/resource.h>
135#include <sys/rman.h>
136
137#include <dev/mii/mii.h>
138#include <dev/mii/miivar.h>
139#include <dev/mii/brgphyreg.h>
140
141#include <dev/pci/pcireg.h>
142#include <dev/pci/pcivar.h>
143
144#include <dev/msk/if_mskreg.h>
145
146MODULE_DEPEND(msk, pci, 1, 1, 1);
147MODULE_DEPEND(msk, ether, 1, 1, 1);
148MODULE_DEPEND(msk, miibus, 1, 1, 1);
149
150/* "device miibus" required. See GENERIC if you get errors here. */
151#include "miibus_if.h"
152
153/* Tunables. */
154static int msi_disable = 0;
155TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
156
157#define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
158
159/*
160 * Devices supported by this driver.
161 */
162static struct msk_product {
163 uint16_t msk_vendorid;
164 uint16_t msk_deviceid;
165 const char *msk_name;
166} msk_products[] = {
167 { VENDORID_SK, DEVICEID_SK_YUKON2,
168 "SK-9Sxx Gigabit Ethernet" },
169 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
170 "SK-9Exx Gigabit Ethernet"},
171 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
172 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
173 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
174 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
175 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
176 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
177 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
178 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
179 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
180 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
181 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
182 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
183 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
184 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
185 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
186 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
187 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
188 "Marvell Yukon 88E8035 Gigabit Ethernet" },
189 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
190 "Marvell Yukon 88E8036 Gigabit Ethernet" },
191 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
192 "Marvell Yukon 88E8038 Gigabit Ethernet" },
193 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
194 "Marvell Yukon 88E8050 Gigabit Ethernet" },
195 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
196 "Marvell Yukon 88E8052 Gigabit Ethernet" },
197 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
198 "Marvell Yukon 88E8053 Gigabit Ethernet" },
199 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
200 "Marvell Yukon 88E8055 Gigabit Ethernet" },
201 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
202 "Marvell Yukon 88E8056 Gigabit Ethernet" },
203 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
204 "D-Link 550SX Gigabit Ethernet" },
205 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
206 "D-Link 560T Gigabit Ethernet" }
207};
208
209static const char *model_name[] = {
210 "Yukon XL",
211 "Yukon EC Ultra",
212 "Yukon Unknown",
213 "Yukon EC",
214 "Yukon FE"
215};
216
217static int mskc_probe(device_t);
218static int mskc_attach(device_t);
219static int mskc_detach(device_t);
220static void mskc_shutdown(device_t);
221static int mskc_setup_rambuffer(struct msk_softc *);
222static int mskc_suspend(device_t);
223static int mskc_resume(device_t);
224static void mskc_reset(struct msk_softc *);
225
226static int msk_probe(device_t);
227static int msk_attach(device_t);
228static int msk_detach(device_t);
229
230static void msk_tick(void *);
231static void msk_intr(void *);
232static void msk_int_task(void *, int);
233static void msk_intr_phy(struct msk_if_softc *);
234static void msk_intr_gmac(struct msk_if_softc *);
235static __inline void msk_rxput(struct msk_if_softc *);
236static int msk_handle_events(struct msk_softc *);
237static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
238static void msk_intr_hwerr(struct msk_softc *);
239static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
240static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
241static void msk_txeof(struct msk_if_softc *, int);
242static struct mbuf *msk_defrag(struct mbuf *, int, int);
243static int msk_encap(struct msk_if_softc *, struct mbuf **);
244static void msk_tx_task(void *, int);
245static void msk_start(struct ifnet *);
246static int msk_ioctl(struct ifnet *, u_long, caddr_t);
247static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
248static void msk_set_rambuffer(struct msk_if_softc *);
249static void msk_init(void *);
250static void msk_init_locked(struct msk_if_softc *);
251static void msk_stop(struct msk_if_softc *);
252static void msk_watchdog(void *);
253static int msk_mediachange(struct ifnet *);
254static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
255static void msk_phy_power(struct msk_softc *, int);
256static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
257static int msk_status_dma_alloc(struct msk_softc *);
258static void msk_status_dma_free(struct msk_softc *);
259static int msk_txrx_dma_alloc(struct msk_if_softc *);
260static void msk_txrx_dma_free(struct msk_if_softc *);
261static void *msk_jalloc(struct msk_if_softc *);
262static void msk_jfree(void *, void *);
263static int msk_init_rx_ring(struct msk_if_softc *);
264static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
265static void msk_init_tx_ring(struct msk_if_softc *);
266static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
267static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
268static int msk_newbuf(struct msk_if_softc *, int);
269static int msk_jumbo_newbuf(struct msk_if_softc *, int);
270
271static int msk_phy_readreg(struct msk_if_softc *, int, int);
272static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
273static int msk_miibus_readreg(device_t, int, int);
274static int msk_miibus_writereg(device_t, int, int, int);
275static void msk_miibus_statchg(device_t);
276static void msk_link_task(void *, int);
277
278static void msk_setmulti(struct msk_if_softc *);
279static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
280static void msk_setpromisc(struct msk_if_softc *);
281
282static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
283static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
284
285static device_method_t mskc_methods[] = {
286 /* Device interface */
287 DEVMETHOD(device_probe, mskc_probe),
288 DEVMETHOD(device_attach, mskc_attach),
289 DEVMETHOD(device_detach, mskc_detach),
290 DEVMETHOD(device_suspend, mskc_suspend),
291 DEVMETHOD(device_resume, mskc_resume),
292 DEVMETHOD(device_shutdown, mskc_shutdown),
293
294 /* bus interface */
295 DEVMETHOD(bus_print_child, bus_generic_print_child),
296 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
297
298 { NULL, NULL }
299};
300
301static driver_t mskc_driver = {
302 "mskc",
303 mskc_methods,
304 sizeof(struct msk_softc)
305};
306
307static devclass_t mskc_devclass;
308
309static device_method_t msk_methods[] = {
310 /* Device interface */
311 DEVMETHOD(device_probe, msk_probe),
312 DEVMETHOD(device_attach, msk_attach),
313 DEVMETHOD(device_detach, msk_detach),
314 DEVMETHOD(device_shutdown, bus_generic_shutdown),
315
316 /* bus interface */
317 DEVMETHOD(bus_print_child, bus_generic_print_child),
318 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
319
320 /* MII interface */
321 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
322 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
323 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
324
325 { NULL, NULL }
326};
327
328static driver_t msk_driver = {
329 "msk",
330 msk_methods,
331 sizeof(struct msk_if_softc)
332};
333
334static devclass_t msk_devclass;
335
336DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
337DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
338DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
339
340static struct resource_spec msk_res_spec_io[] = {
341 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/bus.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/malloc.h>
110#include <sys/kernel.h>
111#include <sys/module.h>
112#include <sys/socket.h>
113#include <sys/sockio.h>
114#include <sys/queue.h>
115#include <sys/sysctl.h>
116#include <sys/taskqueue.h>
117
118#include <net/bpf.h>
119#include <net/ethernet.h>
120#include <net/if.h>
121#include <net/if_arp.h>
122#include <net/if_dl.h>
123#include <net/if_media.h>
124#include <net/if_types.h>
125#include <net/if_vlan_var.h>
126
127#include <netinet/in.h>
128#include <netinet/in_systm.h>
129#include <netinet/ip.h>
130#include <netinet/tcp.h>
131#include <netinet/udp.h>
132
133#include <machine/bus.h>
134#include <machine/resource.h>
135#include <sys/rman.h>
136
137#include <dev/mii/mii.h>
138#include <dev/mii/miivar.h>
139#include <dev/mii/brgphyreg.h>
140
141#include <dev/pci/pcireg.h>
142#include <dev/pci/pcivar.h>
143
144#include <dev/msk/if_mskreg.h>
145
146MODULE_DEPEND(msk, pci, 1, 1, 1);
147MODULE_DEPEND(msk, ether, 1, 1, 1);
148MODULE_DEPEND(msk, miibus, 1, 1, 1);
149
150/* "device miibus" required. See GENERIC if you get errors here. */
151#include "miibus_if.h"
152
153/* Tunables. */
154static int msi_disable = 0;
155TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
156
157#define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
158
159/*
160 * Devices supported by this driver.
161 */
162static struct msk_product {
163 uint16_t msk_vendorid;
164 uint16_t msk_deviceid;
165 const char *msk_name;
166} msk_products[] = {
167 { VENDORID_SK, DEVICEID_SK_YUKON2,
168 "SK-9Sxx Gigabit Ethernet" },
169 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
170 "SK-9Exx Gigabit Ethernet"},
171 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
172 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
173 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
174 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
175 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
176 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
177 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
178 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
179 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
180 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
181 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
182 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
183 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
184 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
185 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
186 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
187 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
188 "Marvell Yukon 88E8035 Gigabit Ethernet" },
189 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
190 "Marvell Yukon 88E8036 Gigabit Ethernet" },
191 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
192 "Marvell Yukon 88E8038 Gigabit Ethernet" },
193 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
194 "Marvell Yukon 88E8050 Gigabit Ethernet" },
195 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
196 "Marvell Yukon 88E8052 Gigabit Ethernet" },
197 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
198 "Marvell Yukon 88E8053 Gigabit Ethernet" },
199 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
200 "Marvell Yukon 88E8055 Gigabit Ethernet" },
201 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
202 "Marvell Yukon 88E8056 Gigabit Ethernet" },
203 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
204 "D-Link 550SX Gigabit Ethernet" },
205 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
206 "D-Link 560T Gigabit Ethernet" }
207};
208
209static const char *model_name[] = {
210 "Yukon XL",
211 "Yukon EC Ultra",
212 "Yukon Unknown",
213 "Yukon EC",
214 "Yukon FE"
215};
216
217static int mskc_probe(device_t);
218static int mskc_attach(device_t);
219static int mskc_detach(device_t);
220static void mskc_shutdown(device_t);
221static int mskc_setup_rambuffer(struct msk_softc *);
222static int mskc_suspend(device_t);
223static int mskc_resume(device_t);
224static void mskc_reset(struct msk_softc *);
225
226static int msk_probe(device_t);
227static int msk_attach(device_t);
228static int msk_detach(device_t);
229
230static void msk_tick(void *);
231static void msk_intr(void *);
232static void msk_int_task(void *, int);
233static void msk_intr_phy(struct msk_if_softc *);
234static void msk_intr_gmac(struct msk_if_softc *);
235static __inline void msk_rxput(struct msk_if_softc *);
236static int msk_handle_events(struct msk_softc *);
237static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
238static void msk_intr_hwerr(struct msk_softc *);
239static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
240static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
241static void msk_txeof(struct msk_if_softc *, int);
242static struct mbuf *msk_defrag(struct mbuf *, int, int);
243static int msk_encap(struct msk_if_softc *, struct mbuf **);
244static void msk_tx_task(void *, int);
245static void msk_start(struct ifnet *);
246static int msk_ioctl(struct ifnet *, u_long, caddr_t);
247static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
248static void msk_set_rambuffer(struct msk_if_softc *);
249static void msk_init(void *);
250static void msk_init_locked(struct msk_if_softc *);
251static void msk_stop(struct msk_if_softc *);
252static void msk_watchdog(void *);
253static int msk_mediachange(struct ifnet *);
254static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
255static void msk_phy_power(struct msk_softc *, int);
256static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
257static int msk_status_dma_alloc(struct msk_softc *);
258static void msk_status_dma_free(struct msk_softc *);
259static int msk_txrx_dma_alloc(struct msk_if_softc *);
260static void msk_txrx_dma_free(struct msk_if_softc *);
261static void *msk_jalloc(struct msk_if_softc *);
262static void msk_jfree(void *, void *);
263static int msk_init_rx_ring(struct msk_if_softc *);
264static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
265static void msk_init_tx_ring(struct msk_if_softc *);
266static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
267static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
268static int msk_newbuf(struct msk_if_softc *, int);
269static int msk_jumbo_newbuf(struct msk_if_softc *, int);
270
271static int msk_phy_readreg(struct msk_if_softc *, int, int);
272static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
273static int msk_miibus_readreg(device_t, int, int);
274static int msk_miibus_writereg(device_t, int, int, int);
275static void msk_miibus_statchg(device_t);
276static void msk_link_task(void *, int);
277
278static void msk_setmulti(struct msk_if_softc *);
279static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
280static void msk_setpromisc(struct msk_if_softc *);
281
282static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
283static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
284
285static device_method_t mskc_methods[] = {
286 /* Device interface */
287 DEVMETHOD(device_probe, mskc_probe),
288 DEVMETHOD(device_attach, mskc_attach),
289 DEVMETHOD(device_detach, mskc_detach),
290 DEVMETHOD(device_suspend, mskc_suspend),
291 DEVMETHOD(device_resume, mskc_resume),
292 DEVMETHOD(device_shutdown, mskc_shutdown),
293
294 /* bus interface */
295 DEVMETHOD(bus_print_child, bus_generic_print_child),
296 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
297
298 { NULL, NULL }
299};
300
301static driver_t mskc_driver = {
302 "mskc",
303 mskc_methods,
304 sizeof(struct msk_softc)
305};
306
307static devclass_t mskc_devclass;
308
309static device_method_t msk_methods[] = {
310 /* Device interface */
311 DEVMETHOD(device_probe, msk_probe),
312 DEVMETHOD(device_attach, msk_attach),
313 DEVMETHOD(device_detach, msk_detach),
314 DEVMETHOD(device_shutdown, bus_generic_shutdown),
315
316 /* bus interface */
317 DEVMETHOD(bus_print_child, bus_generic_print_child),
318 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
319
320 /* MII interface */
321 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
322 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
323 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
324
325 { NULL, NULL }
326};
327
328static driver_t msk_driver = {
329 "msk",
330 msk_methods,
331 sizeof(struct msk_if_softc)
332};
333
334static devclass_t msk_devclass;
335
336DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
337DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
338DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
339
340static struct resource_spec msk_res_spec_io[] = {
341 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
342 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
343 { -1, 0, 0 }
344};
345
346static struct resource_spec msk_res_spec_mem[] = {
347 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
342 { -1, 0, 0 }
343};
344
345static struct resource_spec msk_res_spec_mem[] = {
346 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
347 { -1, 0, 0 }
348};
349
350static struct resource_spec msk_irq_spec_legacy[] = {
348 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
349 { -1, 0, 0 }
350};
351
351 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
352 { -1, 0, 0 }
353};
354
355static struct resource_spec msk_irq_spec_msi[] = {
356 { SYS_RES_IRQ, 1, RF_ACTIVE },
357 { SYS_RES_IRQ, 2, RF_ACTIVE },
358 { -1, 0, 0 }
359};
360
352static int
353msk_miibus_readreg(device_t dev, int phy, int reg)
354{
355 struct msk_if_softc *sc_if;
356
357 sc_if = device_get_softc(dev);
358
359 return (msk_phy_readreg(sc_if, phy, reg));
360}
361
362static int
363msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
364{
365 struct msk_softc *sc;
366 int i, val;
367
368 sc = sc_if->msk_softc;
369
370 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
371 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
372
373 for (i = 0; i < MSK_TIMEOUT; i++) {
374 DELAY(1);
375 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
376 if ((val & GM_SMI_CT_RD_VAL) != 0) {
377 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
378 break;
379 }
380 }
381
382 if (i == MSK_TIMEOUT) {
383 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
384 val = 0;
385 }
386
387 return (val);
388}
389
390static int
391msk_miibus_writereg(device_t dev, int phy, int reg, int val)
392{
393 struct msk_if_softc *sc_if;
394
395 sc_if = device_get_softc(dev);
396
397 return (msk_phy_writereg(sc_if, phy, reg, val));
398}
399
400static int
401msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
402{
403 struct msk_softc *sc;
404 int i;
405
406 sc = sc_if->msk_softc;
407
408 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
409 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
410 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
411 for (i = 0; i < MSK_TIMEOUT; i++) {
412 DELAY(1);
413 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
414 GM_SMI_CT_BUSY) == 0)
415 break;
416 }
417 if (i == MSK_TIMEOUT)
418 if_printf(sc_if->msk_ifp, "phy write timeout\n");
419
420 return (0);
421}
422
423static void
424msk_miibus_statchg(device_t dev)
425{
426 struct msk_if_softc *sc_if;
427
428 sc_if = device_get_softc(dev);
429 taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task);
430}
431
432static void
433msk_link_task(void *arg, int pending)
434{
435 struct msk_softc *sc;
436 struct msk_if_softc *sc_if;
437 struct mii_data *mii;
438 struct ifnet *ifp;
439 uint32_t gmac, ane;
440
441 sc_if = (struct msk_if_softc *)arg;
442 sc = sc_if->msk_softc;
443
444 MSK_IF_LOCK(sc_if);
445
446 mii = device_get_softc(sc_if->msk_miibus);
447 ifp = sc_if->msk_ifp;
448 if (mii == NULL || ifp == NULL) {
449 MSK_IF_UNLOCK(sc_if);
450 return;
451 }
452
453 if (mii->mii_media_status & IFM_ACTIVE) {
454 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
455 sc_if->msk_link = 1;
456 } else
457 sc_if->msk_link = 0;
458
459 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
460 ane = 0;
461 if (sc_if->msk_link != 0) {
462 /* Enable Tx FIFO Underrun. */
463 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
464 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
465 switch (IFM_SUBTYPE(mii->mii_media_active)) {
466 case IFM_AUTO:
467 ane = 1;
468 break;
469 case IFM_1000_SX:
470 case IFM_1000_T:
471 gmac &= ~GM_GPCR_SPEED_100;
472 gmac |= GM_GPCR_SPEED_1000;
473 break;
474 case IFM_100_TX:
475 gmac |= GM_GPCR_SPEED_100;
476 gmac &= ~GM_GPCR_SPEED_1000;
477 break;
478 case IFM_10_T:
479 gmac &= ~(GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000);
480 break;
481 }
482
483 if (ane == 0)
484 gmac |= GM_GPCR_AU_ALL_DIS;
485 else
486 gmac &= ~GM_GPCR_AU_ALL_DIS;
487 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
488 gmac |= GM_GPCR_DUP_FULL;
489 /* Enable Rx flow control. */
490 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
491 gmac &= ~GM_GPCR_FC_RX_DIS;
492 /* Enable Tx flow control. */
493 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
494 gmac &= ~GM_GPCR_FC_TX_DIS;
495 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
496 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
497 /* Read again to ensure writing. */
498 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
499
500 gmac = GMC_PAUSE_ON;
501 if (((mii->mii_media_active & IFM_GMASK) &
502 (IFM_FLAG0 | IFM_FLAG1)) == 0)
503 gmac = GMC_PAUSE_OFF;
504 /* Diable pause for 10/100 Mbps in half-duplex mode. */
505 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
506 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
507 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
508 gmac = GMC_PAUSE_OFF;
509 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
510
511 /* Enable PHY interrupt for FIFO underrun/overflow. */
512 if (sc->msk_marvell_phy)
513 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
514 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
515 } else {
516 /*
517 * Link state changed to down.
518 * Disable PHY interrupts.
519 */
520 if (sc->msk_marvell_phy)
521 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
522 PHY_MARV_INT_MASK, 0);
523 /* Disable Rx/Tx MAC. */
524 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
525 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
526 /* Read again to ensure writing. */
527 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
528 }
529
530 MSK_IF_UNLOCK(sc_if);
531}
532
533static void
534msk_setmulti(struct msk_if_softc *sc_if)
535{
536 struct msk_softc *sc;
537 struct ifnet *ifp;
538 struct ifmultiaddr *ifma;
539 uint32_t mchash[2];
540 uint32_t crc;
541 uint16_t mode;
542
543 sc = sc_if->msk_softc;
544
545 MSK_IF_LOCK_ASSERT(sc_if);
546
547 ifp = sc_if->msk_ifp;
548
549 bzero(mchash, sizeof(mchash));
550 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
551 mode |= GM_RXCR_UCF_ENA;
552 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
553 if ((ifp->if_flags & IFF_PROMISC) != 0)
554 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
555 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
556 mchash[0] = 0xffff;
557 mchash[1] = 0xffff;
558 }
559 } else {
560 IF_ADDR_LOCK(ifp);
561 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
562 if (ifma->ifma_addr->sa_family != AF_LINK)
563 continue;
564 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
565 ifma->ifma_addr), ETHER_ADDR_LEN);
566 /* Just want the 6 least significant bits. */
567 crc &= 0x3f;
568 /* Set the corresponding bit in the hash table. */
569 mchash[crc >> 5] |= 1 << (crc & 0x1f);
570 }
571 IF_ADDR_UNLOCK(ifp);
572 mode |= GM_RXCR_MCF_ENA;
573 }
574
575 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
576 mchash[0] & 0xffff);
577 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
578 (mchash[0] >> 16) & 0xffff);
579 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
580 mchash[1] & 0xffff);
581 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
582 (mchash[1] >> 16) & 0xffff);
583 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
584}
585
586static void
587msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
588{
589 struct msk_softc *sc;
590
591 sc = sc_if->msk_softc;
592 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
593 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
594 RX_VLAN_STRIP_ON);
595 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
596 TX_VLAN_TAG_ON);
597 } else {
598 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
599 RX_VLAN_STRIP_OFF);
600 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
601 TX_VLAN_TAG_OFF);
602 }
603}
604
605static void
606msk_setpromisc(struct msk_if_softc *sc_if)
607{
608 struct msk_softc *sc;
609 struct ifnet *ifp;
610 uint16_t mode;
611
612 MSK_IF_LOCK_ASSERT(sc_if);
613
614 sc = sc_if->msk_softc;
615 ifp = sc_if->msk_ifp;
616
617 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
618 if (ifp->if_flags & IFF_PROMISC)
619 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
620 else
621 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
622 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
623}
624
625static int
626msk_init_rx_ring(struct msk_if_softc *sc_if)
627{
628 struct msk_ring_data *rd;
629 struct msk_rxdesc *rxd;
630 int i, prod;
631
632 MSK_IF_LOCK_ASSERT(sc_if);
633
634 sc_if->msk_cdata.msk_rx_cons = 0;
635 sc_if->msk_cdata.msk_rx_prod = 0;
636 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
637
638 rd = &sc_if->msk_rdata;
639 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
640 prod = sc_if->msk_cdata.msk_rx_prod;
641 for (i = 0; i < MSK_RX_RING_CNT; i++) {
642 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
643 rxd->rx_m = NULL;
644 rxd->rx_le = &rd->msk_rx_ring[prod];
645 if (msk_newbuf(sc_if, prod) != 0)
646 return (ENOBUFS);
647 MSK_INC(prod, MSK_RX_RING_CNT);
648 }
649
650 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
651 sc_if->msk_cdata.msk_rx_ring_map,
652 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
653
654 /* Update prefetch unit. */
655 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
656 CSR_WRITE_2(sc_if->msk_softc,
657 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
658 sc_if->msk_cdata.msk_rx_prod);
659
660 return (0);
661}
662
663static int
664msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
665{
666 struct msk_ring_data *rd;
667 struct msk_rxdesc *rxd;
668 int i, prod;
669
670 MSK_IF_LOCK_ASSERT(sc_if);
671
672 sc_if->msk_cdata.msk_rx_cons = 0;
673 sc_if->msk_cdata.msk_rx_prod = 0;
674 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
675
676 rd = &sc_if->msk_rdata;
677 bzero(rd->msk_jumbo_rx_ring,
678 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
679 prod = sc_if->msk_cdata.msk_rx_prod;
680 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
681 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
682 rxd->rx_m = NULL;
683 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
684 if (msk_jumbo_newbuf(sc_if, prod) != 0)
685 return (ENOBUFS);
686 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
687 }
688
689 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
690 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
691 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
692
693 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
694 CSR_WRITE_2(sc_if->msk_softc,
695 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
696 sc_if->msk_cdata.msk_rx_prod);
697
698 return (0);
699}
700
701static void
702msk_init_tx_ring(struct msk_if_softc *sc_if)
703{
704 struct msk_ring_data *rd;
705 struct msk_txdesc *txd;
706 int i;
707
708 sc_if->msk_cdata.msk_tso_mtu = 0;
709 sc_if->msk_cdata.msk_tx_prod = 0;
710 sc_if->msk_cdata.msk_tx_cons = 0;
711 sc_if->msk_cdata.msk_tx_cnt = 0;
712
713 rd = &sc_if->msk_rdata;
714 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
715 for (i = 0; i < MSK_TX_RING_CNT; i++) {
716 txd = &sc_if->msk_cdata.msk_txdesc[i];
717 txd->tx_m = NULL;
718 txd->tx_le = &rd->msk_tx_ring[i];
719 }
720
721 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
722 sc_if->msk_cdata.msk_tx_ring_map,
723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
724}
725
726static __inline void
727msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
728{
729 struct msk_rx_desc *rx_le;
730 struct msk_rxdesc *rxd;
731 struct mbuf *m;
732
733 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
734 m = rxd->rx_m;
735 rx_le = rxd->rx_le;
736 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
737}
738
739static __inline void
740msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
741{
742 struct msk_rx_desc *rx_le;
743 struct msk_rxdesc *rxd;
744 struct mbuf *m;
745
746 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
747 m = rxd->rx_m;
748 rx_le = rxd->rx_le;
749 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
750}
751
752static int
753msk_newbuf(struct msk_if_softc *sc_if, int idx)
754{
755 struct msk_rx_desc *rx_le;
756 struct msk_rxdesc *rxd;
757 struct mbuf *m;
758 bus_dma_segment_t segs[1];
759 bus_dmamap_t map;
760 int nsegs;
761
762 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
763 if (m == NULL)
764 return (ENOBUFS);
765
766 m->m_len = m->m_pkthdr.len = MCLBYTES;
767 m_adj(m, ETHER_ALIGN);
768
769 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
770 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
771 BUS_DMA_NOWAIT) != 0) {
772 m_freem(m);
773 return (ENOBUFS);
774 }
775 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
776
777 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
778 if (rxd->rx_m != NULL) {
779 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
780 BUS_DMASYNC_POSTREAD);
781 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
782 }
783 map = rxd->rx_dmamap;
784 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
785 sc_if->msk_cdata.msk_rx_sparemap = map;
786 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
787 BUS_DMASYNC_PREREAD);
788 rxd->rx_m = m;
789 rx_le = rxd->rx_le;
790 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
791 rx_le->msk_control =
792 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
793
794 return (0);
795}
796
797static int
798msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
799{
800 struct msk_rx_desc *rx_le;
801 struct msk_rxdesc *rxd;
802 struct mbuf *m;
803 bus_dma_segment_t segs[1];
804 bus_dmamap_t map;
805 int nsegs;
806 void *buf;
807
808 MGETHDR(m, M_DONTWAIT, MT_DATA);
809 if (m == NULL)
810 return (ENOBUFS);
811 buf = msk_jalloc(sc_if);
812 if (buf == NULL) {
813 m_freem(m);
814 return (ENOBUFS);
815 }
816 /* Attach the buffer to the mbuf. */
817 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
818 EXT_NET_DRV);
819 if ((m->m_flags & M_EXT) == 0) {
820 m_freem(m);
821 return (ENOBUFS);
822 }
823 m->m_pkthdr.len = m->m_len = MSK_JLEN;
824 m_adj(m, ETHER_ALIGN);
825
826 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
827 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
828 BUS_DMA_NOWAIT) != 0) {
829 m_freem(m);
830 return (ENOBUFS);
831 }
832 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
833
834 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
835 if (rxd->rx_m != NULL) {
836 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
837 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
838 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
839 rxd->rx_dmamap);
840 }
841 map = rxd->rx_dmamap;
842 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
843 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
844 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
845 BUS_DMASYNC_PREREAD);
846 rxd->rx_m = m;
847 rx_le = rxd->rx_le;
848 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
849 rx_le->msk_control =
850 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
851
852 return (0);
853}
854
855/*
856 * Set media options.
857 */
858static int
859msk_mediachange(struct ifnet *ifp)
860{
861 struct msk_if_softc *sc_if;
862 struct mii_data *mii;
863
864 sc_if = ifp->if_softc;
865
866 MSK_IF_LOCK(sc_if);
867 mii = device_get_softc(sc_if->msk_miibus);
868 mii_mediachg(mii);
869 MSK_IF_UNLOCK(sc_if);
870
871 return (0);
872}
873
874/*
875 * Report current media status.
876 */
877static void
878msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
879{
880 struct msk_if_softc *sc_if;
881 struct mii_data *mii;
882
883 sc_if = ifp->if_softc;
884 MSK_IF_LOCK(sc_if);
885 mii = device_get_softc(sc_if->msk_miibus);
886
887 mii_pollstat(mii);
888 MSK_IF_UNLOCK(sc_if);
889 ifmr->ifm_active = mii->mii_media_active;
890 ifmr->ifm_status = mii->mii_media_status;
891}
892
893static int
894msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
895{
896 struct msk_if_softc *sc_if;
897 struct ifreq *ifr;
898 struct mii_data *mii;
899 int error, mask;
900
901 sc_if = ifp->if_softc;
902 ifr = (struct ifreq *)data;
903 error = 0;
904
905 switch(command) {
906 case SIOCSIFMTU:
907 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
908 error = EINVAL;
909 break;
910 }
911 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
912 ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
913 error = EINVAL;
914 break;
915 }
916 MSK_IF_LOCK(sc_if);
917 ifp->if_mtu = ifr->ifr_mtu;
918 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
919 msk_init_locked(sc_if);
920 MSK_IF_UNLOCK(sc_if);
921 break;
922 case SIOCSIFFLAGS:
923 MSK_IF_LOCK(sc_if);
924 if ((ifp->if_flags & IFF_UP) != 0) {
925 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
926 if (((ifp->if_flags ^ sc_if->msk_if_flags)
927 & IFF_PROMISC) != 0) {
928 msk_setpromisc(sc_if);
929 msk_setmulti(sc_if);
930 }
931 } else {
932 if (sc_if->msk_detach == 0)
933 msk_init_locked(sc_if);
934 }
935 } else {
936 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
937 msk_stop(sc_if);
938 }
939 sc_if->msk_if_flags = ifp->if_flags;
940 MSK_IF_UNLOCK(sc_if);
941 break;
942 case SIOCADDMULTI:
943 case SIOCDELMULTI:
944 MSK_IF_LOCK(sc_if);
945 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
946 msk_setmulti(sc_if);
947 MSK_IF_UNLOCK(sc_if);
948 break;
949 case SIOCGIFMEDIA:
950 case SIOCSIFMEDIA:
951 mii = device_get_softc(sc_if->msk_miibus);
952 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
953 break;
954 case SIOCSIFCAP:
955 MSK_IF_LOCK(sc_if);
956 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
957 if ((mask & IFCAP_TXCSUM) != 0) {
958 ifp->if_capenable ^= IFCAP_TXCSUM;
959 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
960 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
961 ifp->if_hwassist |= MSK_CSUM_FEATURES;
962 else
963 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
964 }
965 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
966 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
967 msk_setvlan(sc_if, ifp);
968 }
969
970 if ((mask & IFCAP_TSO4) != 0) {
971 ifp->if_capenable ^= IFCAP_TSO4;
972 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
973 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
974 ifp->if_hwassist |= CSUM_TSO;
975 else
976 ifp->if_hwassist &= ~CSUM_TSO;
977 }
978 VLAN_CAPABILITIES(ifp);
979 MSK_IF_UNLOCK(sc_if);
980 break;
981 default:
982 error = ether_ioctl(ifp, command, data);
983 break;
984 }
985
986 return (error);
987}
988
989static int
990mskc_probe(device_t dev)
991{
992 struct msk_product *mp;
993 uint16_t vendor, devid;
994 int i;
995
996 vendor = pci_get_vendor(dev);
997 devid = pci_get_device(dev);
998 mp = msk_products;
999 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1000 i++, mp++) {
1001 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1002 device_set_desc(dev, mp->msk_name);
1003 return (BUS_PROBE_DEFAULT);
1004 }
1005 }
1006
1007 return (ENXIO);
1008}
1009
1010static int
1011mskc_setup_rambuffer(struct msk_softc *sc)
1012{
1013 int totqsize, minqsize;
1014 int avail, next;
1015 int i;
1016 uint8_t val;
1017
1018 /* Get adapter SRAM size. */
1019 val = CSR_READ_1(sc, B2_E_0);
1020 sc->msk_ramsize = (val == 0) ? 128 : val * 4;
1021 if (sc->msk_hw_id == CHIP_ID_YUKON_FE)
1022 sc->msk_ramsize = 4 * 4;
1023 if (bootverbose)
1024 device_printf(sc->msk_dev,
1025 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1026
1027 totqsize = sc->msk_ramsize * sc->msk_num_port;
1028 minqsize = MSK_MIN_RXQ_SIZE + MSK_MIN_TXQ_SIZE;
1029 if (minqsize > sc->msk_ramsize)
1030 minqsize = sc->msk_ramsize;
1031
1032 if (minqsize * sc->msk_num_port > totqsize) {
1033 device_printf(sc->msk_dev,
1034 "not enough RAM buffer memory : %d/%dKB\n",
1035 minqsize * sc->msk_num_port, totqsize);
1036 return (ENOSPC);
1037 }
1038
1039 avail = totqsize;
1040 if (sc->msk_num_port > 1) {
1041 /*
1042 * Divide up the memory evenly so that everyone gets a
1043 * fair share for dual port adapters.
1044 */
1045 avail = sc->msk_ramsize;
1046 }
1047
1048 /* Take away the minimum memory for active queues. */
1049 avail -= minqsize;
1050 /* Rx queue gets the minimum + 80% of the rest. */
1051 sc->msk_rxqsize =
1052 (avail * MSK_RAM_QUOTA_RX) / 100 + MSK_MIN_RXQ_SIZE;
1053 avail -= (sc->msk_rxqsize - MSK_MIN_RXQ_SIZE);
1054 sc->msk_txqsize = avail + MSK_MIN_TXQ_SIZE;
1055
1056 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1057 sc->msk_rxqstart[i] = next;
1058 sc->msk_rxqend[i] = next + (sc->msk_rxqsize * 1024) - 1;
1059 next = sc->msk_rxqend[i] + 1;
1060 sc->msk_txqstart[i] = next;
1061 sc->msk_txqend[i] = next + (sc->msk_txqsize * 1024) - 1;
1062 next = sc->msk_txqend[i] + 1;
1063 if (bootverbose) {
1064 device_printf(sc->msk_dev,
1065 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1066 sc->msk_rxqsize, sc->msk_rxqstart[i],
1067 sc->msk_rxqend[i]);
1068 device_printf(sc->msk_dev,
1069 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1070 sc->msk_txqsize, sc->msk_txqstart[i],
1071 sc->msk_txqend[i]);
1072 }
1073 }
1074
1075 return (0);
1076}
1077
1078static void
1079msk_phy_power(struct msk_softc *sc, int mode)
1080{
1081 uint32_t val;
1082 int i;
1083
1084 switch (mode) {
1085 case MSK_PHY_POWERUP:
1086 /* Switch power to VCC (WA for VAUX problem). */
1087 CSR_WRITE_1(sc, B0_POWER_CTRL,
1088 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1089 /* Disable Core Clock Division, set Clock Select to 0. */
1090 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1091
1092 val = 0;
1093 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1094 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1095 /* Enable bits are inverted. */
1096 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1097 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1098 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1099 }
1100 /*
1101 * Enable PCI & Core Clock, enable clock gating for both Links.
1102 */
1103 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1104
1105 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1106 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1107 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1108 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1109 /* Deassert Low Power for 1st PHY. */
1110 val |= PCI_Y2_PHY1_COMA;
1111 if (sc->msk_num_port > 1)
1112 val |= PCI_Y2_PHY2_COMA;
1113 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1114 uint32_t our;
1115
1116 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1117
1118 /* Enable all clocks. */
1119 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1120 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1121 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1122 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1123 /* Set all bits to 0 except bits 15..12. */
1124 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1125 /* Set to default value. */
1126 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1127 }
1128 /* Release PHY from PowerDown/COMA mode. */
1129 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1130 for (i = 0; i < sc->msk_num_port; i++) {
1131 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1132 GMLC_RST_SET);
1133 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1134 GMLC_RST_CLR);
1135 }
1136 break;
1137 case MSK_PHY_POWERDOWN:
1138 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1139 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1140 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1141 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1142 val &= ~PCI_Y2_PHY1_COMA;
1143 if (sc->msk_num_port > 1)
1144 val &= ~PCI_Y2_PHY2_COMA;
1145 }
1146 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1147
1148 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1149 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1150 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1151 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1152 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1153 /* Enable bits are inverted. */
1154 val = 0;
1155 }
1156 /*
1157 * Disable PCI & Core Clock, disable clock gating for
1158 * both Links.
1159 */
1160 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1161 CSR_WRITE_1(sc, B0_POWER_CTRL,
1162 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1163 break;
1164 default:
1165 break;
1166 }
1167}
1168
1169static void
1170mskc_reset(struct msk_softc *sc)
1171{
1172 bus_addr_t addr;
1173 uint16_t status;
1174 uint32_t val;
1175 int i;
1176
1177 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1178
1179 /* Disable ASF. */
1180 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1181 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1182 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1183 }
1184 /*
1185 * Since we disabled ASF, S/W reset is required for Power Management.
1186 */
1187 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1188 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1189
1190 /* Clear all error bits in the PCI status register. */
1191 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1192 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1193
1194 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1195 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1196 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1197 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1198
1199 switch (sc->msk_bustype) {
1200 case MSK_PEX_BUS:
1201 /* Clear all PEX errors. */
1202 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1203 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1204 if ((val & PEX_RX_OV) != 0) {
1205 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1206 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1207 }
1208 break;
1209 case MSK_PCI_BUS:
1210 case MSK_PCIX_BUS:
1211 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1212 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1213 if (val == 0)
1214 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1215 if (sc->msk_bustype == MSK_PCIX_BUS) {
1216 /* Set Cache Line Size opt. */
1217 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1218 val |= PCI_CLS_OPT;
1219 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1220 }
1221 break;
1222 }
1223 /* Set PHY power state. */
1224 msk_phy_power(sc, MSK_PHY_POWERUP);
1225
1226 /* Reset GPHY/GMAC Control */
1227 for (i = 0; i < sc->msk_num_port; i++) {
1228 /* GPHY Control reset. */
1229 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1230 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1231 /* GMAC Control reset. */
1232 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1233 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1234 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1235 }
1236 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1237
1238 /* LED On. */
1239 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1240
1241 /* Clear TWSI IRQ. */
1242 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1243
1244 /* Turn off hardware timer. */
1245 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1246 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1247
1248 /* Turn off descriptor polling. */
1249 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1250
1251 /* Turn off time stamps. */
1252 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1253 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1254
1255 /* Configure timeout values. */
1256 for (i = 0; i < sc->msk_num_port; i++) {
1257 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1258 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1259 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1260 MSK_RI_TO_53);
1261 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1262 MSK_RI_TO_53);
1263 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1264 MSK_RI_TO_53);
1265 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1266 MSK_RI_TO_53);
1267 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1268 MSK_RI_TO_53);
1269 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1270 MSK_RI_TO_53);
1271 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1272 MSK_RI_TO_53);
1273 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1274 MSK_RI_TO_53);
1275 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1276 MSK_RI_TO_53);
1277 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1278 MSK_RI_TO_53);
1279 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1280 MSK_RI_TO_53);
1281 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1282 MSK_RI_TO_53);
1283 }
1284
1285 /* Disable all interrupts. */
1286 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1287 CSR_READ_4(sc, B0_HWE_IMSK);
1288 CSR_WRITE_4(sc, B0_IMSK, 0);
1289 CSR_READ_4(sc, B0_IMSK);
1290
1291 /*
1292 * On dual port PCI-X card, there is an problem where status
1293 * can be received out of order due to split transactions.
1294 */
1295 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1296 int pcix;
1297 uint16_t pcix_cmd;
1298
1299 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1300 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1301 /* Clear Max Outstanding Split Transactions. */
1302 pcix_cmd &= ~0x70;
1303 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1304 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1305 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1306 }
1307 }
1308 if (sc->msk_bustype == MSK_PEX_BUS) {
1309 uint16_t v, width;
1310
1311 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1312 /* Change Max. Read Request Size to 4096 bytes. */
1313 v &= ~PEX_DC_MAX_RRS_MSK;
1314 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1315 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1316 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1317 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1318 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1319 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1320 if (v != width)
1321 device_printf(sc->msk_dev,
1322 "negotiated width of link(x%d) != "
1323 "max. width of link(x%d)\n", width, v);
1324 }
1325
1326 /* Clear status list. */
1327 bzero(sc->msk_stat_ring,
1328 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1329 sc->msk_stat_cons = 0;
1330 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1331 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1332 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1333 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1334 /* Set the status list base address. */
1335 addr = sc->msk_stat_ring_paddr;
1336 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1337 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1338 /* Set the status list last index. */
1339 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1340 if (HW_FEATURE(sc, HWF_WA_DEV_43_418)) {
1341 /* WA for dev. #4.3 */
1342 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1343 /* WA for dev. #4.18 */
1344 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1345 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1346 } else {
1347 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1348 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1349 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM,
1350 HW_FEATURE(sc, HWF_WA_DEV_4109) ? 0x10 : 0x04);
1351 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1352 }
1353 /*
1354 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1355 */
1356 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1357
1358 /* Enable status unit. */
1359 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1360
1361 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1362 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1363 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1364}
1365
1366static int
1367msk_probe(device_t dev)
1368{
1369 struct msk_softc *sc;
1370 char desc[100];
1371
1372 sc = device_get_softc(device_get_parent(dev));
1373 /*
1374 * Not much to do here. We always know there will be
1375 * at least one GMAC present, and if there are two,
1376 * mskc_attach() will create a second device instance
1377 * for us.
1378 */
1379 snprintf(desc, sizeof(desc),
1380 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1381 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1382 sc->msk_hw_rev);
1383 device_set_desc_copy(dev, desc);
1384
1385 return (BUS_PROBE_DEFAULT);
1386}
1387
1388static int
1389msk_attach(device_t dev)
1390{
1391 struct msk_softc *sc;
1392 struct msk_if_softc *sc_if;
1393 struct ifnet *ifp;
1394 int i, port, error;
1395 uint8_t eaddr[6];
1396
1397 if (dev == NULL)
1398 return (EINVAL);
1399
1400 error = 0;
1401 sc_if = device_get_softc(dev);
1402 sc = device_get_softc(device_get_parent(dev));
1403 port = *(int *)device_get_ivars(dev);
1404
1405 sc_if->msk_if_dev = dev;
1406 sc_if->msk_port = port;
1407 sc_if->msk_softc = sc;
1408 sc->msk_if[port] = sc_if;
1409 /* Setup Tx/Rx queue register offsets. */
1410 if (port == MSK_PORT_A) {
1411 sc_if->msk_txq = Q_XA1;
1412 sc_if->msk_txsq = Q_XS1;
1413 sc_if->msk_rxq = Q_R1;
1414 } else {
1415 sc_if->msk_txq = Q_XA2;
1416 sc_if->msk_txsq = Q_XS2;
1417 sc_if->msk_rxq = Q_R2;
1418 }
1419
1420 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1421 callout_init_mtx(&sc_if->msk_watchdog_ch, &sc_if->msk_softc->msk_mtx,
1422 0);
1423 TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if);
1424
1425 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1426 goto fail;
1427
1428 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1429 if (ifp == NULL) {
1430 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1431 error = ENOSPC;
1432 goto fail;
1433 }
1434 ifp->if_softc = sc_if;
1435 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1436 ifp->if_mtu = ETHERMTU;
1437 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1438 /*
1439 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1440 * has serious bug in Rx checksum offload for all Yukon II family
1441 * hardware. It seems there is a workaround to make it work somtimes.
1442 * However, the workaround also have to check OP code sequences to
1443 * verify whether the OP code is correct. Sometimes it should compute
1444 * IP/TCP/UDP checksum in driver in order to verify correctness of
1445 * checksum computed by hardware. If you have to compute checksum
1446 * with software to verify the hardware's checksum why have hardware
1447 * compute the checksum? I think there is no reason to spend time to
1448 * make Rx checksum offload work on Yukon II hardware.
1449 */
1450 ifp->if_capabilities = IFCAP_TXCSUM;
1451 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1452 if (sc->msk_hw_id != CHIP_ID_YUKON_EC_U) {
1453 /* It seems Yukon EC Ultra doesn't support TSO. */
1454 ifp->if_capabilities |= IFCAP_TSO4;
1455 ifp->if_hwassist |= CSUM_TSO;
1456 }
1457 ifp->if_capenable = ifp->if_capabilities;
1458 ifp->if_ioctl = msk_ioctl;
1459 ifp->if_start = msk_start;
1460 ifp->if_timer = 0;
1461 ifp->if_watchdog = NULL;
1462 ifp->if_init = msk_init;
1463 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1464 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1465 IFQ_SET_READY(&ifp->if_snd);
1466
1467 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1468
1469 /*
1470 * Get station address for this interface. Note that
1471 * dual port cards actually come with three station
1472 * addresses: one for each port, plus an extra. The
1473 * extra one is used by the SysKonnect driver software
1474 * as a 'virtual' station address for when both ports
1475 * are operating in failover mode. Currently we don't
1476 * use this extra address.
1477 */
1478 MSK_IF_LOCK(sc_if);
1479 for (i = 0; i < ETHER_ADDR_LEN; i++)
1480 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1481
1482 /*
1483 * Call MI attach routine. Can't hold locks when calling into ether_*.
1484 */
1485 MSK_IF_UNLOCK(sc_if);
1486 ether_ifattach(ifp, eaddr);
1487 MSK_IF_LOCK(sc_if);
1488
1489 /* VLAN capability setup */
1490 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1491 if (ifp->if_capabilities & IFCAP_HWCSUM)
1492 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1493 ifp->if_capenable = ifp->if_capabilities;
1494
1495 /*
1496 * Tell the upper layer(s) we support long frames.
1497 * Must appear after the call to ether_ifattach() because
1498 * ether_ifattach() sets ifi_hdrlen to the default value.
1499 */
1500 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1501
1502 /*
1503 * Do miibus setup.
1504 */
1505 MSK_IF_UNLOCK(sc_if);
1506 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1507 msk_mediastatus);
1508 if (error != 0) {
1509 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1510 ether_ifdetach(ifp);
1511 error = ENXIO;
1512 goto fail;
1513 }
1514 /* Check whether PHY Id is MARVELL. */
1515 if (msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_ID0)
1516 == PHY_MARV_ID0_VAL)
1517 sc->msk_marvell_phy = 1;
1518
1519fail:
1520 if (error != 0) {
1521 /* Access should be ok even though lock has been dropped */
1522 sc->msk_if[port] = NULL;
1523 msk_detach(dev);
1524 }
1525
1526 return (error);
1527}
1528
1529/*
1530 * Attach the interface. Allocate softc structures, do ifmedia
1531 * setup and ethernet/BPF attach.
1532 */
1533static int
1534mskc_attach(device_t dev)
1535{
1536 struct msk_softc *sc;
1537 int error, msic, *port, reg;
1538
1539 sc = device_get_softc(dev);
1540 sc->msk_dev = dev;
1541 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1542 MTX_DEF);
1543
1544 /*
1545 * Map control/status registers.
1546 */
1547 pci_enable_busmaster(dev);
1548
361static int
362msk_miibus_readreg(device_t dev, int phy, int reg)
363{
364 struct msk_if_softc *sc_if;
365
366 sc_if = device_get_softc(dev);
367
368 return (msk_phy_readreg(sc_if, phy, reg));
369}
370
371static int
372msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
373{
374 struct msk_softc *sc;
375 int i, val;
376
377 sc = sc_if->msk_softc;
378
379 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
380 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
381
382 for (i = 0; i < MSK_TIMEOUT; i++) {
383 DELAY(1);
384 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
385 if ((val & GM_SMI_CT_RD_VAL) != 0) {
386 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
387 break;
388 }
389 }
390
391 if (i == MSK_TIMEOUT) {
392 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
393 val = 0;
394 }
395
396 return (val);
397}
398
399static int
400msk_miibus_writereg(device_t dev, int phy, int reg, int val)
401{
402 struct msk_if_softc *sc_if;
403
404 sc_if = device_get_softc(dev);
405
406 return (msk_phy_writereg(sc_if, phy, reg, val));
407}
408
409static int
410msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
411{
412 struct msk_softc *sc;
413 int i;
414
415 sc = sc_if->msk_softc;
416
417 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
418 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
419 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
420 for (i = 0; i < MSK_TIMEOUT; i++) {
421 DELAY(1);
422 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
423 GM_SMI_CT_BUSY) == 0)
424 break;
425 }
426 if (i == MSK_TIMEOUT)
427 if_printf(sc_if->msk_ifp, "phy write timeout\n");
428
429 return (0);
430}
431
432static void
433msk_miibus_statchg(device_t dev)
434{
435 struct msk_if_softc *sc_if;
436
437 sc_if = device_get_softc(dev);
438 taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task);
439}
440
441static void
442msk_link_task(void *arg, int pending)
443{
444 struct msk_softc *sc;
445 struct msk_if_softc *sc_if;
446 struct mii_data *mii;
447 struct ifnet *ifp;
448 uint32_t gmac, ane;
449
450 sc_if = (struct msk_if_softc *)arg;
451 sc = sc_if->msk_softc;
452
453 MSK_IF_LOCK(sc_if);
454
455 mii = device_get_softc(sc_if->msk_miibus);
456 ifp = sc_if->msk_ifp;
457 if (mii == NULL || ifp == NULL) {
458 MSK_IF_UNLOCK(sc_if);
459 return;
460 }
461
462 if (mii->mii_media_status & IFM_ACTIVE) {
463 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
464 sc_if->msk_link = 1;
465 } else
466 sc_if->msk_link = 0;
467
468 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
469 ane = 0;
470 if (sc_if->msk_link != 0) {
471 /* Enable Tx FIFO Underrun. */
472 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
473 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
474 switch (IFM_SUBTYPE(mii->mii_media_active)) {
475 case IFM_AUTO:
476 ane = 1;
477 break;
478 case IFM_1000_SX:
479 case IFM_1000_T:
480 gmac &= ~GM_GPCR_SPEED_100;
481 gmac |= GM_GPCR_SPEED_1000;
482 break;
483 case IFM_100_TX:
484 gmac |= GM_GPCR_SPEED_100;
485 gmac &= ~GM_GPCR_SPEED_1000;
486 break;
487 case IFM_10_T:
488 gmac &= ~(GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000);
489 break;
490 }
491
492 if (ane == 0)
493 gmac |= GM_GPCR_AU_ALL_DIS;
494 else
495 gmac &= ~GM_GPCR_AU_ALL_DIS;
496 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
497 gmac |= GM_GPCR_DUP_FULL;
498 /* Enable Rx flow control. */
499 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
500 gmac &= ~GM_GPCR_FC_RX_DIS;
501 /* Enable Tx flow control. */
502 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
503 gmac &= ~GM_GPCR_FC_TX_DIS;
504 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
505 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
506 /* Read again to ensure writing. */
507 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
508
509 gmac = GMC_PAUSE_ON;
510 if (((mii->mii_media_active & IFM_GMASK) &
511 (IFM_FLAG0 | IFM_FLAG1)) == 0)
512 gmac = GMC_PAUSE_OFF;
513 /* Diable pause for 10/100 Mbps in half-duplex mode. */
514 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
515 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
516 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
517 gmac = GMC_PAUSE_OFF;
518 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
519
520 /* Enable PHY interrupt for FIFO underrun/overflow. */
521 if (sc->msk_marvell_phy)
522 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
523 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
524 } else {
525 /*
526 * Link state changed to down.
527 * Disable PHY interrupts.
528 */
529 if (sc->msk_marvell_phy)
530 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
531 PHY_MARV_INT_MASK, 0);
532 /* Disable Rx/Tx MAC. */
533 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
534 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
535 /* Read again to ensure writing. */
536 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
537 }
538
539 MSK_IF_UNLOCK(sc_if);
540}
541
542static void
543msk_setmulti(struct msk_if_softc *sc_if)
544{
545 struct msk_softc *sc;
546 struct ifnet *ifp;
547 struct ifmultiaddr *ifma;
548 uint32_t mchash[2];
549 uint32_t crc;
550 uint16_t mode;
551
552 sc = sc_if->msk_softc;
553
554 MSK_IF_LOCK_ASSERT(sc_if);
555
556 ifp = sc_if->msk_ifp;
557
558 bzero(mchash, sizeof(mchash));
559 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
560 mode |= GM_RXCR_UCF_ENA;
561 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
562 if ((ifp->if_flags & IFF_PROMISC) != 0)
563 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
564 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
565 mchash[0] = 0xffff;
566 mchash[1] = 0xffff;
567 }
568 } else {
569 IF_ADDR_LOCK(ifp);
570 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
571 if (ifma->ifma_addr->sa_family != AF_LINK)
572 continue;
573 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
574 ifma->ifma_addr), ETHER_ADDR_LEN);
575 /* Just want the 6 least significant bits. */
576 crc &= 0x3f;
577 /* Set the corresponding bit in the hash table. */
578 mchash[crc >> 5] |= 1 << (crc & 0x1f);
579 }
580 IF_ADDR_UNLOCK(ifp);
581 mode |= GM_RXCR_MCF_ENA;
582 }
583
584 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
585 mchash[0] & 0xffff);
586 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
587 (mchash[0] >> 16) & 0xffff);
588 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
589 mchash[1] & 0xffff);
590 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
591 (mchash[1] >> 16) & 0xffff);
592 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
593}
594
595static void
596msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
597{
598 struct msk_softc *sc;
599
600 sc = sc_if->msk_softc;
601 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
602 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
603 RX_VLAN_STRIP_ON);
604 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
605 TX_VLAN_TAG_ON);
606 } else {
607 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
608 RX_VLAN_STRIP_OFF);
609 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
610 TX_VLAN_TAG_OFF);
611 }
612}
613
614static void
615msk_setpromisc(struct msk_if_softc *sc_if)
616{
617 struct msk_softc *sc;
618 struct ifnet *ifp;
619 uint16_t mode;
620
621 MSK_IF_LOCK_ASSERT(sc_if);
622
623 sc = sc_if->msk_softc;
624 ifp = sc_if->msk_ifp;
625
626 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
627 if (ifp->if_flags & IFF_PROMISC)
628 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
629 else
630 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
631 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
632}
633
634static int
635msk_init_rx_ring(struct msk_if_softc *sc_if)
636{
637 struct msk_ring_data *rd;
638 struct msk_rxdesc *rxd;
639 int i, prod;
640
641 MSK_IF_LOCK_ASSERT(sc_if);
642
643 sc_if->msk_cdata.msk_rx_cons = 0;
644 sc_if->msk_cdata.msk_rx_prod = 0;
645 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
646
647 rd = &sc_if->msk_rdata;
648 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
649 prod = sc_if->msk_cdata.msk_rx_prod;
650 for (i = 0; i < MSK_RX_RING_CNT; i++) {
651 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
652 rxd->rx_m = NULL;
653 rxd->rx_le = &rd->msk_rx_ring[prod];
654 if (msk_newbuf(sc_if, prod) != 0)
655 return (ENOBUFS);
656 MSK_INC(prod, MSK_RX_RING_CNT);
657 }
658
659 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
660 sc_if->msk_cdata.msk_rx_ring_map,
661 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
662
663 /* Update prefetch unit. */
664 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
665 CSR_WRITE_2(sc_if->msk_softc,
666 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
667 sc_if->msk_cdata.msk_rx_prod);
668
669 return (0);
670}
671
672static int
673msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
674{
675 struct msk_ring_data *rd;
676 struct msk_rxdesc *rxd;
677 int i, prod;
678
679 MSK_IF_LOCK_ASSERT(sc_if);
680
681 sc_if->msk_cdata.msk_rx_cons = 0;
682 sc_if->msk_cdata.msk_rx_prod = 0;
683 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
684
685 rd = &sc_if->msk_rdata;
686 bzero(rd->msk_jumbo_rx_ring,
687 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
688 prod = sc_if->msk_cdata.msk_rx_prod;
689 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
690 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
691 rxd->rx_m = NULL;
692 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
693 if (msk_jumbo_newbuf(sc_if, prod) != 0)
694 return (ENOBUFS);
695 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
696 }
697
698 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
699 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
700 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
701
702 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
703 CSR_WRITE_2(sc_if->msk_softc,
704 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
705 sc_if->msk_cdata.msk_rx_prod);
706
707 return (0);
708}
709
710static void
711msk_init_tx_ring(struct msk_if_softc *sc_if)
712{
713 struct msk_ring_data *rd;
714 struct msk_txdesc *txd;
715 int i;
716
717 sc_if->msk_cdata.msk_tso_mtu = 0;
718 sc_if->msk_cdata.msk_tx_prod = 0;
719 sc_if->msk_cdata.msk_tx_cons = 0;
720 sc_if->msk_cdata.msk_tx_cnt = 0;
721
722 rd = &sc_if->msk_rdata;
723 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
724 for (i = 0; i < MSK_TX_RING_CNT; i++) {
725 txd = &sc_if->msk_cdata.msk_txdesc[i];
726 txd->tx_m = NULL;
727 txd->tx_le = &rd->msk_tx_ring[i];
728 }
729
730 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
731 sc_if->msk_cdata.msk_tx_ring_map,
732 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
733}
734
735static __inline void
736msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
737{
738 struct msk_rx_desc *rx_le;
739 struct msk_rxdesc *rxd;
740 struct mbuf *m;
741
742 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
743 m = rxd->rx_m;
744 rx_le = rxd->rx_le;
745 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
746}
747
748static __inline void
749msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
750{
751 struct msk_rx_desc *rx_le;
752 struct msk_rxdesc *rxd;
753 struct mbuf *m;
754
755 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
756 m = rxd->rx_m;
757 rx_le = rxd->rx_le;
758 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
759}
760
761static int
762msk_newbuf(struct msk_if_softc *sc_if, int idx)
763{
764 struct msk_rx_desc *rx_le;
765 struct msk_rxdesc *rxd;
766 struct mbuf *m;
767 bus_dma_segment_t segs[1];
768 bus_dmamap_t map;
769 int nsegs;
770
771 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
772 if (m == NULL)
773 return (ENOBUFS);
774
775 m->m_len = m->m_pkthdr.len = MCLBYTES;
776 m_adj(m, ETHER_ALIGN);
777
778 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
779 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
780 BUS_DMA_NOWAIT) != 0) {
781 m_freem(m);
782 return (ENOBUFS);
783 }
784 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
785
786 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
787 if (rxd->rx_m != NULL) {
788 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
789 BUS_DMASYNC_POSTREAD);
790 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
791 }
792 map = rxd->rx_dmamap;
793 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
794 sc_if->msk_cdata.msk_rx_sparemap = map;
795 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
796 BUS_DMASYNC_PREREAD);
797 rxd->rx_m = m;
798 rx_le = rxd->rx_le;
799 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
800 rx_le->msk_control =
801 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
802
803 return (0);
804}
805
806static int
807msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
808{
809 struct msk_rx_desc *rx_le;
810 struct msk_rxdesc *rxd;
811 struct mbuf *m;
812 bus_dma_segment_t segs[1];
813 bus_dmamap_t map;
814 int nsegs;
815 void *buf;
816
817 MGETHDR(m, M_DONTWAIT, MT_DATA);
818 if (m == NULL)
819 return (ENOBUFS);
820 buf = msk_jalloc(sc_if);
821 if (buf == NULL) {
822 m_freem(m);
823 return (ENOBUFS);
824 }
825 /* Attach the buffer to the mbuf. */
826 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
827 EXT_NET_DRV);
828 if ((m->m_flags & M_EXT) == 0) {
829 m_freem(m);
830 return (ENOBUFS);
831 }
832 m->m_pkthdr.len = m->m_len = MSK_JLEN;
833 m_adj(m, ETHER_ALIGN);
834
835 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
836 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
837 BUS_DMA_NOWAIT) != 0) {
838 m_freem(m);
839 return (ENOBUFS);
840 }
841 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
842
843 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
844 if (rxd->rx_m != NULL) {
845 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
846 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
847 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
848 rxd->rx_dmamap);
849 }
850 map = rxd->rx_dmamap;
851 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
852 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
853 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
854 BUS_DMASYNC_PREREAD);
855 rxd->rx_m = m;
856 rx_le = rxd->rx_le;
857 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
858 rx_le->msk_control =
859 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
860
861 return (0);
862}
863
864/*
865 * Set media options.
866 */
867static int
868msk_mediachange(struct ifnet *ifp)
869{
870 struct msk_if_softc *sc_if;
871 struct mii_data *mii;
872
873 sc_if = ifp->if_softc;
874
875 MSK_IF_LOCK(sc_if);
876 mii = device_get_softc(sc_if->msk_miibus);
877 mii_mediachg(mii);
878 MSK_IF_UNLOCK(sc_if);
879
880 return (0);
881}
882
883/*
884 * Report current media status.
885 */
886static void
887msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
888{
889 struct msk_if_softc *sc_if;
890 struct mii_data *mii;
891
892 sc_if = ifp->if_softc;
893 MSK_IF_LOCK(sc_if);
894 mii = device_get_softc(sc_if->msk_miibus);
895
896 mii_pollstat(mii);
897 MSK_IF_UNLOCK(sc_if);
898 ifmr->ifm_active = mii->mii_media_active;
899 ifmr->ifm_status = mii->mii_media_status;
900}
901
902static int
903msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
904{
905 struct msk_if_softc *sc_if;
906 struct ifreq *ifr;
907 struct mii_data *mii;
908 int error, mask;
909
910 sc_if = ifp->if_softc;
911 ifr = (struct ifreq *)data;
912 error = 0;
913
914 switch(command) {
915 case SIOCSIFMTU:
916 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
917 error = EINVAL;
918 break;
919 }
920 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
921 ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
922 error = EINVAL;
923 break;
924 }
925 MSK_IF_LOCK(sc_if);
926 ifp->if_mtu = ifr->ifr_mtu;
927 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
928 msk_init_locked(sc_if);
929 MSK_IF_UNLOCK(sc_if);
930 break;
931 case SIOCSIFFLAGS:
932 MSK_IF_LOCK(sc_if);
933 if ((ifp->if_flags & IFF_UP) != 0) {
934 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
935 if (((ifp->if_flags ^ sc_if->msk_if_flags)
936 & IFF_PROMISC) != 0) {
937 msk_setpromisc(sc_if);
938 msk_setmulti(sc_if);
939 }
940 } else {
941 if (sc_if->msk_detach == 0)
942 msk_init_locked(sc_if);
943 }
944 } else {
945 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
946 msk_stop(sc_if);
947 }
948 sc_if->msk_if_flags = ifp->if_flags;
949 MSK_IF_UNLOCK(sc_if);
950 break;
951 case SIOCADDMULTI:
952 case SIOCDELMULTI:
953 MSK_IF_LOCK(sc_if);
954 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
955 msk_setmulti(sc_if);
956 MSK_IF_UNLOCK(sc_if);
957 break;
958 case SIOCGIFMEDIA:
959 case SIOCSIFMEDIA:
960 mii = device_get_softc(sc_if->msk_miibus);
961 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
962 break;
963 case SIOCSIFCAP:
964 MSK_IF_LOCK(sc_if);
965 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
966 if ((mask & IFCAP_TXCSUM) != 0) {
967 ifp->if_capenable ^= IFCAP_TXCSUM;
968 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
969 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
970 ifp->if_hwassist |= MSK_CSUM_FEATURES;
971 else
972 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
973 }
974 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
975 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
976 msk_setvlan(sc_if, ifp);
977 }
978
979 if ((mask & IFCAP_TSO4) != 0) {
980 ifp->if_capenable ^= IFCAP_TSO4;
981 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
982 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
983 ifp->if_hwassist |= CSUM_TSO;
984 else
985 ifp->if_hwassist &= ~CSUM_TSO;
986 }
987 VLAN_CAPABILITIES(ifp);
988 MSK_IF_UNLOCK(sc_if);
989 break;
990 default:
991 error = ether_ioctl(ifp, command, data);
992 break;
993 }
994
995 return (error);
996}
997
998static int
999mskc_probe(device_t dev)
1000{
1001 struct msk_product *mp;
1002 uint16_t vendor, devid;
1003 int i;
1004
1005 vendor = pci_get_vendor(dev);
1006 devid = pci_get_device(dev);
1007 mp = msk_products;
1008 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
1009 i++, mp++) {
1010 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
1011 device_set_desc(dev, mp->msk_name);
1012 return (BUS_PROBE_DEFAULT);
1013 }
1014 }
1015
1016 return (ENXIO);
1017}
1018
1019static int
1020mskc_setup_rambuffer(struct msk_softc *sc)
1021{
1022 int totqsize, minqsize;
1023 int avail, next;
1024 int i;
1025 uint8_t val;
1026
1027 /* Get adapter SRAM size. */
1028 val = CSR_READ_1(sc, B2_E_0);
1029 sc->msk_ramsize = (val == 0) ? 128 : val * 4;
1030 if (sc->msk_hw_id == CHIP_ID_YUKON_FE)
1031 sc->msk_ramsize = 4 * 4;
1032 if (bootverbose)
1033 device_printf(sc->msk_dev,
1034 "RAM buffer size : %dKB\n", sc->msk_ramsize);
1035
1036 totqsize = sc->msk_ramsize * sc->msk_num_port;
1037 minqsize = MSK_MIN_RXQ_SIZE + MSK_MIN_TXQ_SIZE;
1038 if (minqsize > sc->msk_ramsize)
1039 minqsize = sc->msk_ramsize;
1040
1041 if (minqsize * sc->msk_num_port > totqsize) {
1042 device_printf(sc->msk_dev,
1043 "not enough RAM buffer memory : %d/%dKB\n",
1044 minqsize * sc->msk_num_port, totqsize);
1045 return (ENOSPC);
1046 }
1047
1048 avail = totqsize;
1049 if (sc->msk_num_port > 1) {
1050 /*
1051 * Divide up the memory evenly so that everyone gets a
1052 * fair share for dual port adapters.
1053 */
1054 avail = sc->msk_ramsize;
1055 }
1056
1057 /* Take away the minimum memory for active queues. */
1058 avail -= minqsize;
1059 /* Rx queue gets the minimum + 80% of the rest. */
1060 sc->msk_rxqsize =
1061 (avail * MSK_RAM_QUOTA_RX) / 100 + MSK_MIN_RXQ_SIZE;
1062 avail -= (sc->msk_rxqsize - MSK_MIN_RXQ_SIZE);
1063 sc->msk_txqsize = avail + MSK_MIN_TXQ_SIZE;
1064
1065 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
1066 sc->msk_rxqstart[i] = next;
1067 sc->msk_rxqend[i] = next + (sc->msk_rxqsize * 1024) - 1;
1068 next = sc->msk_rxqend[i] + 1;
1069 sc->msk_txqstart[i] = next;
1070 sc->msk_txqend[i] = next + (sc->msk_txqsize * 1024) - 1;
1071 next = sc->msk_txqend[i] + 1;
1072 if (bootverbose) {
1073 device_printf(sc->msk_dev,
1074 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1075 sc->msk_rxqsize, sc->msk_rxqstart[i],
1076 sc->msk_rxqend[i]);
1077 device_printf(sc->msk_dev,
1078 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1079 sc->msk_txqsize, sc->msk_txqstart[i],
1080 sc->msk_txqend[i]);
1081 }
1082 }
1083
1084 return (0);
1085}
1086
1087static void
1088msk_phy_power(struct msk_softc *sc, int mode)
1089{
1090 uint32_t val;
1091 int i;
1092
1093 switch (mode) {
1094 case MSK_PHY_POWERUP:
1095 /* Switch power to VCC (WA for VAUX problem). */
1096 CSR_WRITE_1(sc, B0_POWER_CTRL,
1097 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1098 /* Disable Core Clock Division, set Clock Select to 0. */
1099 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1100
1101 val = 0;
1102 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1103 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1104 /* Enable bits are inverted. */
1105 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1106 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1107 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1108 }
1109 /*
1110 * Enable PCI & Core Clock, enable clock gating for both Links.
1111 */
1112 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1113
1114 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1115 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1116 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1117 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1118 /* Deassert Low Power for 1st PHY. */
1119 val |= PCI_Y2_PHY1_COMA;
1120 if (sc->msk_num_port > 1)
1121 val |= PCI_Y2_PHY2_COMA;
1122 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1123 uint32_t our;
1124
1125 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1126
1127 /* Enable all clocks. */
1128 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1129 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1130 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1131 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1132 /* Set all bits to 0 except bits 15..12. */
1133 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1134 /* Set to default value. */
1135 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1136 }
1137 /* Release PHY from PowerDown/COMA mode. */
1138 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1139 for (i = 0; i < sc->msk_num_port; i++) {
1140 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1141 GMLC_RST_SET);
1142 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1143 GMLC_RST_CLR);
1144 }
1145 break;
1146 case MSK_PHY_POWERDOWN:
1147 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1148 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1149 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1150 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1151 val &= ~PCI_Y2_PHY1_COMA;
1152 if (sc->msk_num_port > 1)
1153 val &= ~PCI_Y2_PHY2_COMA;
1154 }
1155 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1156
1157 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1158 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1159 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1160 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1161 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1162 /* Enable bits are inverted. */
1163 val = 0;
1164 }
1165 /*
1166 * Disable PCI & Core Clock, disable clock gating for
1167 * both Links.
1168 */
1169 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1170 CSR_WRITE_1(sc, B0_POWER_CTRL,
1171 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1172 break;
1173 default:
1174 break;
1175 }
1176}
1177
1178static void
1179mskc_reset(struct msk_softc *sc)
1180{
1181 bus_addr_t addr;
1182 uint16_t status;
1183 uint32_t val;
1184 int i;
1185
1186 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1187
1188 /* Disable ASF. */
1189 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1190 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1191 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1192 }
1193 /*
1194 * Since we disabled ASF, S/W reset is required for Power Management.
1195 */
1196 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1197 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1198
1199 /* Clear all error bits in the PCI status register. */
1200 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1201 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1202
1203 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1204 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1205 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1206 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1207
1208 switch (sc->msk_bustype) {
1209 case MSK_PEX_BUS:
1210 /* Clear all PEX errors. */
1211 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1212 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1213 if ((val & PEX_RX_OV) != 0) {
1214 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1215 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1216 }
1217 break;
1218 case MSK_PCI_BUS:
1219 case MSK_PCIX_BUS:
1220 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1221 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1222 if (val == 0)
1223 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1224 if (sc->msk_bustype == MSK_PCIX_BUS) {
1225 /* Set Cache Line Size opt. */
1226 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1227 val |= PCI_CLS_OPT;
1228 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1229 }
1230 break;
1231 }
1232 /* Set PHY power state. */
1233 msk_phy_power(sc, MSK_PHY_POWERUP);
1234
1235 /* Reset GPHY/GMAC Control */
1236 for (i = 0; i < sc->msk_num_port; i++) {
1237 /* GPHY Control reset. */
1238 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1239 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1240 /* GMAC Control reset. */
1241 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1242 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1243 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1244 }
1245 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1246
1247 /* LED On. */
1248 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1249
1250 /* Clear TWSI IRQ. */
1251 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1252
1253 /* Turn off hardware timer. */
1254 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1255 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1256
1257 /* Turn off descriptor polling. */
1258 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1259
1260 /* Turn off time stamps. */
1261 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1262 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1263
1264 /* Configure timeout values. */
1265 for (i = 0; i < sc->msk_num_port; i++) {
1266 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1267 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1268 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1269 MSK_RI_TO_53);
1270 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1271 MSK_RI_TO_53);
1272 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1273 MSK_RI_TO_53);
1274 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1275 MSK_RI_TO_53);
1276 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1277 MSK_RI_TO_53);
1278 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1279 MSK_RI_TO_53);
1280 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1281 MSK_RI_TO_53);
1282 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1283 MSK_RI_TO_53);
1284 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1285 MSK_RI_TO_53);
1286 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1287 MSK_RI_TO_53);
1288 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1289 MSK_RI_TO_53);
1290 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1291 MSK_RI_TO_53);
1292 }
1293
1294 /* Disable all interrupts. */
1295 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1296 CSR_READ_4(sc, B0_HWE_IMSK);
1297 CSR_WRITE_4(sc, B0_IMSK, 0);
1298 CSR_READ_4(sc, B0_IMSK);
1299
1300 /*
1301 * On dual port PCI-X card, there is an problem where status
1302 * can be received out of order due to split transactions.
1303 */
1304 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1305 int pcix;
1306 uint16_t pcix_cmd;
1307
1308 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
1309 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1310 /* Clear Max Outstanding Split Transactions. */
1311 pcix_cmd &= ~0x70;
1312 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1313 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1314 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1315 }
1316 }
1317 if (sc->msk_bustype == MSK_PEX_BUS) {
1318 uint16_t v, width;
1319
1320 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1321 /* Change Max. Read Request Size to 4096 bytes. */
1322 v &= ~PEX_DC_MAX_RRS_MSK;
1323 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1324 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1325 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1326 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1327 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1328 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1329 if (v != width)
1330 device_printf(sc->msk_dev,
1331 "negotiated width of link(x%d) != "
1332 "max. width of link(x%d)\n", width, v);
1333 }
1334
1335 /* Clear status list. */
1336 bzero(sc->msk_stat_ring,
1337 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1338 sc->msk_stat_cons = 0;
1339 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1340 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1341 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1342 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1343 /* Set the status list base address. */
1344 addr = sc->msk_stat_ring_paddr;
1345 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1346 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1347 /* Set the status list last index. */
1348 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1349 if (HW_FEATURE(sc, HWF_WA_DEV_43_418)) {
1350 /* WA for dev. #4.3 */
1351 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1352 /* WA for dev. #4.18 */
1353 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1354 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1355 } else {
1356 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1357 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1358 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM,
1359 HW_FEATURE(sc, HWF_WA_DEV_4109) ? 0x10 : 0x04);
1360 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1361 }
1362 /*
1363 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1364 */
1365 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1366
1367 /* Enable status unit. */
1368 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1369
1370 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1371 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1372 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1373}
1374
1375static int
1376msk_probe(device_t dev)
1377{
1378 struct msk_softc *sc;
1379 char desc[100];
1380
1381 sc = device_get_softc(device_get_parent(dev));
1382 /*
1383 * Not much to do here. We always know there will be
1384 * at least one GMAC present, and if there are two,
1385 * mskc_attach() will create a second device instance
1386 * for us.
1387 */
1388 snprintf(desc, sizeof(desc),
1389 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1390 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1391 sc->msk_hw_rev);
1392 device_set_desc_copy(dev, desc);
1393
1394 return (BUS_PROBE_DEFAULT);
1395}
1396
1397static int
1398msk_attach(device_t dev)
1399{
1400 struct msk_softc *sc;
1401 struct msk_if_softc *sc_if;
1402 struct ifnet *ifp;
1403 int i, port, error;
1404 uint8_t eaddr[6];
1405
1406 if (dev == NULL)
1407 return (EINVAL);
1408
1409 error = 0;
1410 sc_if = device_get_softc(dev);
1411 sc = device_get_softc(device_get_parent(dev));
1412 port = *(int *)device_get_ivars(dev);
1413
1414 sc_if->msk_if_dev = dev;
1415 sc_if->msk_port = port;
1416 sc_if->msk_softc = sc;
1417 sc->msk_if[port] = sc_if;
1418 /* Setup Tx/Rx queue register offsets. */
1419 if (port == MSK_PORT_A) {
1420 sc_if->msk_txq = Q_XA1;
1421 sc_if->msk_txsq = Q_XS1;
1422 sc_if->msk_rxq = Q_R1;
1423 } else {
1424 sc_if->msk_txq = Q_XA2;
1425 sc_if->msk_txsq = Q_XS2;
1426 sc_if->msk_rxq = Q_R2;
1427 }
1428
1429 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
1430 callout_init_mtx(&sc_if->msk_watchdog_ch, &sc_if->msk_softc->msk_mtx,
1431 0);
1432 TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if);
1433
1434 if ((error = msk_txrx_dma_alloc(sc_if) != 0))
1435 goto fail;
1436
1437 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
1438 if (ifp == NULL) {
1439 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
1440 error = ENOSPC;
1441 goto fail;
1442 }
1443 ifp->if_softc = sc_if;
1444 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1445 ifp->if_mtu = ETHERMTU;
1446 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1447 /*
1448 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1449 * has serious bug in Rx checksum offload for all Yukon II family
1450 * hardware. It seems there is a workaround to make it work somtimes.
1451 * However, the workaround also have to check OP code sequences to
1452 * verify whether the OP code is correct. Sometimes it should compute
1453 * IP/TCP/UDP checksum in driver in order to verify correctness of
1454 * checksum computed by hardware. If you have to compute checksum
1455 * with software to verify the hardware's checksum why have hardware
1456 * compute the checksum? I think there is no reason to spend time to
1457 * make Rx checksum offload work on Yukon II hardware.
1458 */
1459 ifp->if_capabilities = IFCAP_TXCSUM;
1460 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
1461 if (sc->msk_hw_id != CHIP_ID_YUKON_EC_U) {
1462 /* It seems Yukon EC Ultra doesn't support TSO. */
1463 ifp->if_capabilities |= IFCAP_TSO4;
1464 ifp->if_hwassist |= CSUM_TSO;
1465 }
1466 ifp->if_capenable = ifp->if_capabilities;
1467 ifp->if_ioctl = msk_ioctl;
1468 ifp->if_start = msk_start;
1469 ifp->if_timer = 0;
1470 ifp->if_watchdog = NULL;
1471 ifp->if_init = msk_init;
1472 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1473 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
1474 IFQ_SET_READY(&ifp->if_snd);
1475
1476 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
1477
1478 /*
1479 * Get station address for this interface. Note that
1480 * dual port cards actually come with three station
1481 * addresses: one for each port, plus an extra. The
1482 * extra one is used by the SysKonnect driver software
1483 * as a 'virtual' station address for when both ports
1484 * are operating in failover mode. Currently we don't
1485 * use this extra address.
1486 */
1487 MSK_IF_LOCK(sc_if);
1488 for (i = 0; i < ETHER_ADDR_LEN; i++)
1489 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1490
1491 /*
1492 * Call MI attach routine. Can't hold locks when calling into ether_*.
1493 */
1494 MSK_IF_UNLOCK(sc_if);
1495 ether_ifattach(ifp, eaddr);
1496 MSK_IF_LOCK(sc_if);
1497
1498 /* VLAN capability setup */
1499 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1500 if (ifp->if_capabilities & IFCAP_HWCSUM)
1501 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1502 ifp->if_capenable = ifp->if_capabilities;
1503
1504 /*
1505 * Tell the upper layer(s) we support long frames.
1506 * Must appear after the call to ether_ifattach() because
1507 * ether_ifattach() sets ifi_hdrlen to the default value.
1508 */
1509 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1510
1511 /*
1512 * Do miibus setup.
1513 */
1514 MSK_IF_UNLOCK(sc_if);
1515 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
1516 msk_mediastatus);
1517 if (error != 0) {
1518 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1519 ether_ifdetach(ifp);
1520 error = ENXIO;
1521 goto fail;
1522 }
1523 /* Check whether PHY Id is MARVELL. */
1524 if (msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_ID0)
1525 == PHY_MARV_ID0_VAL)
1526 sc->msk_marvell_phy = 1;
1527
1528fail:
1529 if (error != 0) {
1530 /* Access should be ok even though lock has been dropped */
1531 sc->msk_if[port] = NULL;
1532 msk_detach(dev);
1533 }
1534
1535 return (error);
1536}
1537
1538/*
1539 * Attach the interface. Allocate softc structures, do ifmedia
1540 * setup and ethernet/BPF attach.
1541 */
1542static int
1543mskc_attach(device_t dev)
1544{
1545 struct msk_softc *sc;
1546 int error, msic, *port, reg;
1547
1548 sc = device_get_softc(dev);
1549 sc->msk_dev = dev;
1550 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1551 MTX_DEF);
1552
1553 /*
1554 * Map control/status registers.
1555 */
1556 pci_enable_busmaster(dev);
1557
1549 /* Allocate resources */
1550 sc->msk_msi = 0;
1551 msic = pci_msi_count(dev);
1552 if (bootverbose)
1553 device_printf(dev, "MSI count : %d\n", msic);
1554 /*
1555 * Due to a unknown reason Yukon II reports it can handle two
1556 * messages even if it can handle just one message. Forcing
1557 * to allocate 1 message seems to work but reloading kernel
1558 * module after unloading the driver fails. Only use MSI when
1559 * it reports 1 message until we have better understanding
1560 * for the hardware.
1561 */
1562 if (msic == 1 && msi_disable == 0 && pci_alloc_msi(dev, &msic) == 0) {
1563 sc->msk_msi = 1;
1564 /* Set rid to 1 for SYS_RES_IRQ to use MSI. */
1565 msk_res_spec_io[1].rid = 1;
1566 msk_res_spec_mem[1].rid = 1;
1567 }
1558 /* Allocate I/O resource */
1568#ifdef MSK_USEIOSPACE
1569 sc->msk_res_spec = msk_res_spec_io;
1570#else
1571 sc->msk_res_spec = msk_res_spec_mem;
1572#endif
1573 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1574 if (error) {
1575 if (sc->msk_res_spec == msk_res_spec_mem)
1576 sc->msk_res_spec = msk_res_spec_io;
1577 else
1578 sc->msk_res_spec = msk_res_spec_mem;
1579 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1580 if (error) {
1581 device_printf(dev, "couldn't allocate %s resources\n",
1582 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1583 "I/O");
1584 mtx_destroy(&sc->msk_mtx);
1585 return (ENXIO);
1586 }
1587 }
1588
1589 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1590 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1591 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1592 /* Bail out if chip is not recognized. */
1593 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1594 sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1595 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1596 sc->msk_hw_id, sc->msk_hw_rev);
1597 error = ENXIO;
1598 goto fail;
1599 }
1600
1601 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1602 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1603 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1604 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1605 "max number of Rx events to process");
1606
1607 sc->msk_process_limit = MSK_PROC_DEFAULT;
1608 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1609 "process_limit", &sc->msk_process_limit);
1610 if (error == 0) {
1611 if (sc->msk_process_limit < MSK_PROC_MIN ||
1612 sc->msk_process_limit > MSK_PROC_MAX) {
1613 device_printf(dev, "process_limit value out of range; "
1614 "using default: %d\n", MSK_PROC_DEFAULT);
1615 sc->msk_process_limit = MSK_PROC_DEFAULT;
1616 }
1617 }
1618
1619 /* Soft reset. */
1620 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1621 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1622 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1623 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1624 sc->msk_coppertype = 0;
1625 else
1626 sc->msk_coppertype = 1;
1627 /* Check number of MACs. */
1628 sc->msk_num_port = 1;
1629 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1630 CFG_DUAL_MAC_MSK) {
1631 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1632 sc->msk_num_port++;
1633 }
1634
1635 /* Check bus type. */
1636 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
1637 sc->msk_bustype = MSK_PEX_BUS;
1638 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
1639 sc->msk_bustype = MSK_PCIX_BUS;
1640 else
1641 sc->msk_bustype = MSK_PCI_BUS;
1642
1643 /* Get H/W features(bugs). */
1644 switch (sc->msk_hw_id) {
1645 case CHIP_ID_YUKON_EC:
1646 sc->msk_clock = 125; /* 125 Mhz */
1647 if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1648 sc->msk_hw_feature =
1649 HWF_WA_DEV_42 | HWF_WA_DEV_46 | HWF_WA_DEV_43_418 |
1650 HWF_WA_DEV_420 | HWF_WA_DEV_423 |
1651 HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 |
1652 HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1653 HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1654 } else {
1655 /* A2/A3 */
1656 sc->msk_hw_feature =
1657 HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 |
1658 HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1659 HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1660 }
1661 break;
1662 case CHIP_ID_YUKON_EC_U:
1663 sc->msk_clock = 125; /* 125 Mhz */
1664 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
1665 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_483 |
1666 HWF_WA_DEV_4109;
1667 } else if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1668 uint16_t v;
1669
1670 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 |
1671 HWF_WA_DEV_4185;
1672 v = CSR_READ_2(sc, Q_ADDR(Q_XA1, Q_WM));
1673 if (v == 0)
1674 sc->msk_hw_feature |= HWF_WA_DEV_4185CS |
1675 HWF_WA_DEV_4200;
1676 }
1677 break;
1678 case CHIP_ID_YUKON_FE:
1679 sc->msk_clock = 100; /* 100 Mhz */
1680 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 |
1681 HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1682 break;
1683 case CHIP_ID_YUKON_XL:
1684 sc->msk_clock = 156; /* 156 Mhz */
1685 switch (sc->msk_hw_rev) {
1686 case CHIP_REV_YU_XL_A0:
1687 sc->msk_hw_feature =
1688 HWF_WA_DEV_427 | HWF_WA_DEV_463 | HWF_WA_DEV_472 |
1689 HWF_WA_DEV_479 | HWF_WA_DEV_483 | HWF_WA_DEV_4115 |
1690 HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1691 break;
1692 case CHIP_REV_YU_XL_A1:
1693 sc->msk_hw_feature =
1694 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1695 HWF_WA_DEV_4115 | HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1696 break;
1697 case CHIP_REV_YU_XL_A2:
1698 sc->msk_hw_feature =
1699 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1700 HWF_WA_DEV_4115 | HWF_WA_DEV_4167;
1701 break;
1702 case CHIP_REV_YU_XL_A3:
1703 sc->msk_hw_feature =
1704 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1705 HWF_WA_DEV_4115;
1706 }
1707 break;
1708 default:
1709 sc->msk_clock = 156; /* 156 Mhz */
1710 sc->msk_hw_feature = 0;
1711 }
1712
1559#ifdef MSK_USEIOSPACE
1560 sc->msk_res_spec = msk_res_spec_io;
1561#else
1562 sc->msk_res_spec = msk_res_spec_mem;
1563#endif
1564 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1565 if (error) {
1566 if (sc->msk_res_spec == msk_res_spec_mem)
1567 sc->msk_res_spec = msk_res_spec_io;
1568 else
1569 sc->msk_res_spec = msk_res_spec_mem;
1570 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
1571 if (error) {
1572 device_printf(dev, "couldn't allocate %s resources\n",
1573 sc->msk_res_spec == msk_res_spec_mem ? "memory" :
1574 "I/O");
1575 mtx_destroy(&sc->msk_mtx);
1576 return (ENXIO);
1577 }
1578 }
1579
1580 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1581 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1582 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1583 /* Bail out if chip is not recognized. */
1584 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1585 sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1586 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1587 sc->msk_hw_id, sc->msk_hw_rev);
1588 error = ENXIO;
1589 goto fail;
1590 }
1591
1592 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1593 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1594 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1595 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
1596 "max number of Rx events to process");
1597
1598 sc->msk_process_limit = MSK_PROC_DEFAULT;
1599 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1600 "process_limit", &sc->msk_process_limit);
1601 if (error == 0) {
1602 if (sc->msk_process_limit < MSK_PROC_MIN ||
1603 sc->msk_process_limit > MSK_PROC_MAX) {
1604 device_printf(dev, "process_limit value out of range; "
1605 "using default: %d\n", MSK_PROC_DEFAULT);
1606 sc->msk_process_limit = MSK_PROC_DEFAULT;
1607 }
1608 }
1609
1610 /* Soft reset. */
1611 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1612 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1613 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1614 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1615 sc->msk_coppertype = 0;
1616 else
1617 sc->msk_coppertype = 1;
1618 /* Check number of MACs. */
1619 sc->msk_num_port = 1;
1620 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1621 CFG_DUAL_MAC_MSK) {
1622 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1623 sc->msk_num_port++;
1624 }
1625
1626 /* Check bus type. */
1627 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
1628 sc->msk_bustype = MSK_PEX_BUS;
1629 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
1630 sc->msk_bustype = MSK_PCIX_BUS;
1631 else
1632 sc->msk_bustype = MSK_PCI_BUS;
1633
1634 /* Get H/W features(bugs). */
1635 switch (sc->msk_hw_id) {
1636 case CHIP_ID_YUKON_EC:
1637 sc->msk_clock = 125; /* 125 Mhz */
1638 if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1639 sc->msk_hw_feature =
1640 HWF_WA_DEV_42 | HWF_WA_DEV_46 | HWF_WA_DEV_43_418 |
1641 HWF_WA_DEV_420 | HWF_WA_DEV_423 |
1642 HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 |
1643 HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1644 HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1645 } else {
1646 /* A2/A3 */
1647 sc->msk_hw_feature =
1648 HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 |
1649 HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1650 HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1651 }
1652 break;
1653 case CHIP_ID_YUKON_EC_U:
1654 sc->msk_clock = 125; /* 125 Mhz */
1655 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
1656 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_483 |
1657 HWF_WA_DEV_4109;
1658 } else if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1659 uint16_t v;
1660
1661 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 |
1662 HWF_WA_DEV_4185;
1663 v = CSR_READ_2(sc, Q_ADDR(Q_XA1, Q_WM));
1664 if (v == 0)
1665 sc->msk_hw_feature |= HWF_WA_DEV_4185CS |
1666 HWF_WA_DEV_4200;
1667 }
1668 break;
1669 case CHIP_ID_YUKON_FE:
1670 sc->msk_clock = 100; /* 100 Mhz */
1671 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 |
1672 HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1673 break;
1674 case CHIP_ID_YUKON_XL:
1675 sc->msk_clock = 156; /* 156 Mhz */
1676 switch (sc->msk_hw_rev) {
1677 case CHIP_REV_YU_XL_A0:
1678 sc->msk_hw_feature =
1679 HWF_WA_DEV_427 | HWF_WA_DEV_463 | HWF_WA_DEV_472 |
1680 HWF_WA_DEV_479 | HWF_WA_DEV_483 | HWF_WA_DEV_4115 |
1681 HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1682 break;
1683 case CHIP_REV_YU_XL_A1:
1684 sc->msk_hw_feature =
1685 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1686 HWF_WA_DEV_4115 | HWF_WA_DEV_4152 | HWF_WA_DEV_4167;
1687 break;
1688 case CHIP_REV_YU_XL_A2:
1689 sc->msk_hw_feature =
1690 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1691 HWF_WA_DEV_4115 | HWF_WA_DEV_4167;
1692 break;
1693 case CHIP_REV_YU_XL_A3:
1694 sc->msk_hw_feature =
1695 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 |
1696 HWF_WA_DEV_4115;
1697 }
1698 break;
1699 default:
1700 sc->msk_clock = 156; /* 156 Mhz */
1701 sc->msk_hw_feature = 0;
1702 }
1703
1704 /* Allocate IRQ resources. */
1705 msic = pci_msi_count(dev);
1706 if (bootverbose)
1707 device_printf(dev, "MSI count : %d\n", msic);
1708 /*
1709 * The Yukon II reports it can handle two messages, one for each
1710 * possible port. We go ahead and allocate two messages and only
1711 * setup a handler for both if we have a dual port card.
1712 *
1713 * XXX: I haven't untangled the interrupt handler to handle dual
1714 * port cards with separate MSI messages, so for now I disable MSI
1715 * on dual port cards.
1716 */
1717 if (msic == 2 && msi_disable == 0 && sc->msk_num_port == 1 &&
1718 pci_alloc_msi(dev, &msic) == 0) {
1719 if (msic == 2) {
1720 sc->msk_msi = 1;
1721 sc->msk_irq_spec = msk_irq_spec_msi;
1722 } else {
1723 pci_release_msi(dev);
1724 sc->msk_irq_spec = msk_irq_spec_legacy;
1725 }
1726 }
1727
1728 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1729 if (error) {
1730 device_printf(dev, "couldn't allocate IRQ resources\n");
1731 goto fail;
1732 }
1733
1713 if ((error = msk_status_dma_alloc(sc)) != 0)
1714 goto fail;
1715
1716 /* Set base interrupt mask. */
1717 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1718 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1719 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1720
1721 /* Reset the adapter. */
1722 mskc_reset(sc);
1723
1724 if ((error = mskc_setup_rambuffer(sc)) != 0)
1725 goto fail;
1726
1727 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1728 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1729 device_printf(dev, "failed to add child for PORT_A\n");
1730 error = ENXIO;
1731 goto fail;
1732 }
1733 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1734 if (port == NULL) {
1735 device_printf(dev, "failed to allocate memory for "
1736 "ivars of PORT_A\n");
1737 error = ENXIO;
1738 goto fail;
1739 }
1740 *port = MSK_PORT_A;
1741 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1742
1743 if (sc->msk_num_port > 1) {
1744 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1745 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1746 device_printf(dev, "failed to add child for PORT_B\n");
1747 error = ENXIO;
1748 goto fail;
1749 }
1750 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1751 if (port == NULL) {
1752 device_printf(dev, "failed to allocate memory for "
1753 "ivars of PORT_B\n");
1754 error = ENXIO;
1755 goto fail;
1756 }
1757 *port = MSK_PORT_B;
1758 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1759 }
1760
1761 error = bus_generic_attach(dev);
1762 if (error) {
1763 device_printf(dev, "failed to attach port(s)\n");
1764 goto fail;
1765 }
1766
1767 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1768 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1769 taskqueue_thread_enqueue, &sc->msk_tq);
1770 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1771 device_get_nameunit(sc->msk_dev));
1772 /* Hook interrupt last to avoid having to lock softc. */
1734 if ((error = msk_status_dma_alloc(sc)) != 0)
1735 goto fail;
1736
1737 /* Set base interrupt mask. */
1738 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1739 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1740 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1741
1742 /* Reset the adapter. */
1743 mskc_reset(sc);
1744
1745 if ((error = mskc_setup_rambuffer(sc)) != 0)
1746 goto fail;
1747
1748 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1749 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1750 device_printf(dev, "failed to add child for PORT_A\n");
1751 error = ENXIO;
1752 goto fail;
1753 }
1754 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1755 if (port == NULL) {
1756 device_printf(dev, "failed to allocate memory for "
1757 "ivars of PORT_A\n");
1758 error = ENXIO;
1759 goto fail;
1760 }
1761 *port = MSK_PORT_A;
1762 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1763
1764 if (sc->msk_num_port > 1) {
1765 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1766 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1767 device_printf(dev, "failed to add child for PORT_B\n");
1768 error = ENXIO;
1769 goto fail;
1770 }
1771 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1772 if (port == NULL) {
1773 device_printf(dev, "failed to allocate memory for "
1774 "ivars of PORT_B\n");
1775 error = ENXIO;
1776 goto fail;
1777 }
1778 *port = MSK_PORT_B;
1779 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1780 }
1781
1782 error = bus_generic_attach(dev);
1783 if (error) {
1784 device_printf(dev, "failed to attach port(s)\n");
1785 goto fail;
1786 }
1787
1788 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
1789 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
1790 taskqueue_thread_enqueue, &sc->msk_tq);
1791 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
1792 device_get_nameunit(sc->msk_dev));
1793 /* Hook interrupt last to avoid having to lock softc. */
1773 error = bus_setup_intr(dev, sc->msk_res[1], INTR_TYPE_NET |
1774 INTR_MPSAFE | INTR_FAST, msk_intr, sc, &sc->msk_intrhand);
1794 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
1795 INTR_MPSAFE | INTR_FAST, msk_intr, sc, &sc->msk_intrhand[0]);
1775
1776 if (error != 0) {
1777 device_printf(dev, "couldn't set up interrupt handler\n");
1778 taskqueue_free(sc->msk_tq);
1779 sc->msk_tq = NULL;
1780 goto fail;
1781 }
1782fail:
1783 if (error != 0)
1784 mskc_detach(dev);
1785
1786 return (error);
1787}
1788
1789/*
1790 * Shutdown hardware and free up resources. This can be called any
1791 * time after the mutex has been initialized. It is called in both
1792 * the error case in attach and the normal detach case so it needs
1793 * to be careful about only freeing resources that have actually been
1794 * allocated.
1795 */
1796static int
1797msk_detach(device_t dev)
1798{
1799 struct msk_softc *sc;
1800 struct msk_if_softc *sc_if;
1801 struct ifnet *ifp;
1802
1803 sc_if = device_get_softc(dev);
1804 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1805 ("msk mutex not initialized in msk_detach"));
1806 MSK_IF_LOCK(sc_if);
1807
1808 ifp = sc_if->msk_ifp;
1809 if (device_is_attached(dev)) {
1810 /* XXX */
1811 sc_if->msk_detach = 1;
1812 msk_stop(sc_if);
1813 /* Can't hold locks while calling detach. */
1814 MSK_IF_UNLOCK(sc_if);
1815 callout_drain(&sc_if->msk_tick_ch);
1816 callout_drain(&sc_if->msk_watchdog_ch);
1817 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1818 taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task);
1819 ether_ifdetach(ifp);
1820 MSK_IF_LOCK(sc_if);
1821 }
1822
1823 /*
1824 * We're generally called from mskc_detach() which is using
1825 * device_delete_child() to get to here. It's already trashed
1826 * miibus for us, so don't do it here or we'll panic.
1827 *
1828 * if (sc_if->msk_miibus != NULL) {
1829 * device_delete_child(dev, sc_if->msk_miibus);
1830 * sc_if->msk_miibus = NULL;
1831 * }
1832 */
1833
1834 msk_txrx_dma_free(sc_if);
1835 bus_generic_detach(dev);
1836
1837 if (ifp)
1838 if_free(ifp);
1839 sc = sc_if->msk_softc;
1840 sc->msk_if[sc_if->msk_port] = NULL;
1841 MSK_IF_UNLOCK(sc_if);
1842
1843 return (0);
1844}
1845
1846static int
1847mskc_detach(device_t dev)
1848{
1849 struct msk_softc *sc;
1850
1851 sc = device_get_softc(dev);
1852 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1853
1854 if (device_is_alive(dev)) {
1855 if (sc->msk_devs[MSK_PORT_A] != NULL) {
1856 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1857 M_DEVBUF);
1858 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1859 }
1860 if (sc->msk_devs[MSK_PORT_B] != NULL) {
1861 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1862 M_DEVBUF);
1863 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1864 }
1865 bus_generic_detach(dev);
1866 }
1867
1868 /* Disable all interrupts. */
1869 CSR_WRITE_4(sc, B0_IMSK, 0);
1870 CSR_READ_4(sc, B0_IMSK);
1871 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1872 CSR_READ_4(sc, B0_HWE_IMSK);
1873
1874 /* LED Off. */
1875 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1876
1877 /* Put hardware reset. */
1878 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1879
1880 msk_status_dma_free(sc);
1881
1882 if (sc->msk_tq != NULL) {
1883 taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1884 taskqueue_free(sc->msk_tq);
1885 sc->msk_tq = NULL;
1886 }
1796
1797 if (error != 0) {
1798 device_printf(dev, "couldn't set up interrupt handler\n");
1799 taskqueue_free(sc->msk_tq);
1800 sc->msk_tq = NULL;
1801 goto fail;
1802 }
1803fail:
1804 if (error != 0)
1805 mskc_detach(dev);
1806
1807 return (error);
1808}
1809
1810/*
1811 * Shutdown hardware and free up resources. This can be called any
1812 * time after the mutex has been initialized. It is called in both
1813 * the error case in attach and the normal detach case so it needs
1814 * to be careful about only freeing resources that have actually been
1815 * allocated.
1816 */
1817static int
1818msk_detach(device_t dev)
1819{
1820 struct msk_softc *sc;
1821 struct msk_if_softc *sc_if;
1822 struct ifnet *ifp;
1823
1824 sc_if = device_get_softc(dev);
1825 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
1826 ("msk mutex not initialized in msk_detach"));
1827 MSK_IF_LOCK(sc_if);
1828
1829 ifp = sc_if->msk_ifp;
1830 if (device_is_attached(dev)) {
1831 /* XXX */
1832 sc_if->msk_detach = 1;
1833 msk_stop(sc_if);
1834 /* Can't hold locks while calling detach. */
1835 MSK_IF_UNLOCK(sc_if);
1836 callout_drain(&sc_if->msk_tick_ch);
1837 callout_drain(&sc_if->msk_watchdog_ch);
1838 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
1839 taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task);
1840 ether_ifdetach(ifp);
1841 MSK_IF_LOCK(sc_if);
1842 }
1843
1844 /*
1845 * We're generally called from mskc_detach() which is using
1846 * device_delete_child() to get to here. It's already trashed
1847 * miibus for us, so don't do it here or we'll panic.
1848 *
1849 * if (sc_if->msk_miibus != NULL) {
1850 * device_delete_child(dev, sc_if->msk_miibus);
1851 * sc_if->msk_miibus = NULL;
1852 * }
1853 */
1854
1855 msk_txrx_dma_free(sc_if);
1856 bus_generic_detach(dev);
1857
1858 if (ifp)
1859 if_free(ifp);
1860 sc = sc_if->msk_softc;
1861 sc->msk_if[sc_if->msk_port] = NULL;
1862 MSK_IF_UNLOCK(sc_if);
1863
1864 return (0);
1865}
1866
1867static int
1868mskc_detach(device_t dev)
1869{
1870 struct msk_softc *sc;
1871
1872 sc = device_get_softc(dev);
1873 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
1874
1875 if (device_is_alive(dev)) {
1876 if (sc->msk_devs[MSK_PORT_A] != NULL) {
1877 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
1878 M_DEVBUF);
1879 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
1880 }
1881 if (sc->msk_devs[MSK_PORT_B] != NULL) {
1882 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
1883 M_DEVBUF);
1884 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
1885 }
1886 bus_generic_detach(dev);
1887 }
1888
1889 /* Disable all interrupts. */
1890 CSR_WRITE_4(sc, B0_IMSK, 0);
1891 CSR_READ_4(sc, B0_IMSK);
1892 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1893 CSR_READ_4(sc, B0_HWE_IMSK);
1894
1895 /* LED Off. */
1896 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1897
1898 /* Put hardware reset. */
1899 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1900
1901 msk_status_dma_free(sc);
1902
1903 if (sc->msk_tq != NULL) {
1904 taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
1905 taskqueue_free(sc->msk_tq);
1906 sc->msk_tq = NULL;
1907 }
1887 if (sc->msk_intrhand) {
1888 bus_teardown_intr(dev, sc->msk_res[1], sc->msk_intrhand);
1889 sc->msk_intrhand = NULL;
1908 if (sc->msk_intrhand[0]) {
1909 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1910 sc->msk_intrhand[0] = NULL;
1890 }
1911 }
1912 if (sc->msk_intrhand[1]) {
1913 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
1914 sc->msk_intrhand[1] = NULL;
1915 }
1916 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
1891 if (sc->msk_msi)
1892 pci_release_msi(dev);
1893 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1894 mtx_destroy(&sc->msk_mtx);
1895
1896 return (0);
1897}
1898
1899struct msk_dmamap_arg {
1900 bus_addr_t msk_busaddr;
1901};
1902
1903static void
1904msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1905{
1906 struct msk_dmamap_arg *ctx;
1907
1908 if (error != 0)
1909 return;
1910 ctx = arg;
1911 ctx->msk_busaddr = segs[0].ds_addr;
1912}
1913
1914/* Create status DMA region. */
1915static int
1916msk_status_dma_alloc(struct msk_softc *sc)
1917{
1918 struct msk_dmamap_arg ctx;
1919 int error;
1920
1921 error = bus_dma_tag_create(
1922 bus_get_dma_tag(sc->msk_dev), /* parent */
1923 MSK_STAT_ALIGN, 0, /* alignment, boundary */
1924 BUS_SPACE_MAXADDR, /* lowaddr */
1925 BUS_SPACE_MAXADDR, /* highaddr */
1926 NULL, NULL, /* filter, filterarg */
1927 MSK_STAT_RING_SZ, /* maxsize */
1928 1, /* nsegments */
1929 MSK_STAT_RING_SZ, /* maxsegsize */
1930 0, /* flags */
1931 NULL, NULL, /* lockfunc, lockarg */
1932 &sc->msk_stat_tag);
1933 if (error != 0) {
1934 device_printf(sc->msk_dev,
1935 "failed to create status DMA tag\n");
1936 return (error);
1937 }
1938
1939 /* Allocate DMA'able memory and load the DMA map for status ring. */
1940 error = bus_dmamem_alloc(sc->msk_stat_tag,
1941 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1942 BUS_DMA_ZERO, &sc->msk_stat_map);
1943 if (error != 0) {
1944 device_printf(sc->msk_dev,
1945 "failed to allocate DMA'able memory for status ring\n");
1946 return (error);
1947 }
1948
1949 ctx.msk_busaddr = 0;
1950 error = bus_dmamap_load(sc->msk_stat_tag,
1951 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1952 msk_dmamap_cb, &ctx, 0);
1953 if (error != 0) {
1954 device_printf(sc->msk_dev,
1955 "failed to load DMA'able memory for status ring\n");
1956 return (error);
1957 }
1958 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1959
1960 return (0);
1961}
1962
1963static void
1964msk_status_dma_free(struct msk_softc *sc)
1965{
1966
1967 /* Destroy status block. */
1968 if (sc->msk_stat_tag) {
1969 if (sc->msk_stat_map) {
1970 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1971 if (sc->msk_stat_ring) {
1972 bus_dmamem_free(sc->msk_stat_tag,
1973 sc->msk_stat_ring, sc->msk_stat_map);
1974 sc->msk_stat_ring = NULL;
1975 }
1976 sc->msk_stat_map = NULL;
1977 }
1978 bus_dma_tag_destroy(sc->msk_stat_tag);
1979 sc->msk_stat_tag = NULL;
1980 }
1981}
1982
1983static int
1984msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1985{
1986 struct msk_dmamap_arg ctx;
1987 struct msk_txdesc *txd;
1988 struct msk_rxdesc *rxd;
1989 struct msk_rxdesc *jrxd;
1990 struct msk_jpool_entry *entry;
1991 uint8_t *ptr;
1992 int error, i;
1993
1994 mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF);
1995 SLIST_INIT(&sc_if->msk_jfree_listhead);
1996 SLIST_INIT(&sc_if->msk_jinuse_listhead);
1997
1998 /* Create parent DMA tag. */
1999 /*
2000 * XXX
2001 * It seems that Yukon II supports full 64bits DMA operations. But
2002 * it needs two descriptors(list elements) for 64bits DMA operations.
2003 * Since we don't know what DMA address mappings(32bits or 64bits)
2004 * would be used in advance for each mbufs, we limits its DMA space
2005 * to be in range of 32bits address space. Otherwise, we should check
2006 * what DMA address is used and chain another descriptor for the
2007 * 64bits DMA operation. This also means descriptor ring size is
2008 * variable. Limiting DMA address to be in 32bit address space greatly
2009 * simplyfies descriptor handling and possibly would increase
2010 * performance a bit due to efficient handling of descriptors.
2011 * Apart from harassing checksum offloading mechanisms, it seems
2012 * it's really bad idea to use a seperate descriptor for 64bit
2013 * DMA operation to save small descriptor memory. Anyway, I've
2014 * never seen these exotic scheme on ethernet interface hardware.
2015 */
2016 error = bus_dma_tag_create(
2017 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2018 1, 0, /* alignment, boundary */
2019 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2020 BUS_SPACE_MAXADDR, /* highaddr */
2021 NULL, NULL, /* filter, filterarg */
2022 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2023 0, /* nsegments */
2024 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2025 0, /* flags */
2026 NULL, NULL, /* lockfunc, lockarg */
2027 &sc_if->msk_cdata.msk_parent_tag);
2028 if (error != 0) {
2029 device_printf(sc_if->msk_if_dev,
2030 "failed to create parent DMA tag\n");
2031 goto fail;
2032 }
2033 /* Create tag for Tx ring. */
2034 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2035 MSK_RING_ALIGN, 0, /* alignment, boundary */
2036 BUS_SPACE_MAXADDR, /* lowaddr */
2037 BUS_SPACE_MAXADDR, /* highaddr */
2038 NULL, NULL, /* filter, filterarg */
2039 MSK_TX_RING_SZ, /* maxsize */
2040 1, /* nsegments */
2041 MSK_TX_RING_SZ, /* maxsegsize */
2042 0, /* flags */
2043 NULL, NULL, /* lockfunc, lockarg */
2044 &sc_if->msk_cdata.msk_tx_ring_tag);
2045 if (error != 0) {
2046 device_printf(sc_if->msk_if_dev,
2047 "failed to create Tx ring DMA tag\n");
2048 goto fail;
2049 }
2050
2051 /* Create tag for Rx ring. */
2052 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2053 MSK_RING_ALIGN, 0, /* alignment, boundary */
2054 BUS_SPACE_MAXADDR, /* lowaddr */
2055 BUS_SPACE_MAXADDR, /* highaddr */
2056 NULL, NULL, /* filter, filterarg */
2057 MSK_RX_RING_SZ, /* maxsize */
2058 1, /* nsegments */
2059 MSK_RX_RING_SZ, /* maxsegsize */
2060 0, /* flags */
2061 NULL, NULL, /* lockfunc, lockarg */
2062 &sc_if->msk_cdata.msk_rx_ring_tag);
2063 if (error != 0) {
2064 device_printf(sc_if->msk_if_dev,
2065 "failed to create Rx ring DMA tag\n");
2066 goto fail;
2067 }
2068
2069 /* Create tag for jumbo Rx ring. */
2070 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2071 MSK_RING_ALIGN, 0, /* alignment, boundary */
2072 BUS_SPACE_MAXADDR, /* lowaddr */
2073 BUS_SPACE_MAXADDR, /* highaddr */
2074 NULL, NULL, /* filter, filterarg */
2075 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2076 1, /* nsegments */
2077 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2078 0, /* flags */
2079 NULL, NULL, /* lockfunc, lockarg */
2080 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2081 if (error != 0) {
2082 device_printf(sc_if->msk_if_dev,
2083 "failed to create jumbo Rx ring DMA tag\n");
2084 goto fail;
2085 }
2086
2087 /* Create tag for jumbo buffer blocks. */
2088 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2089 PAGE_SIZE, 0, /* alignment, boundary */
2090 BUS_SPACE_MAXADDR, /* lowaddr */
2091 BUS_SPACE_MAXADDR, /* highaddr */
2092 NULL, NULL, /* filter, filterarg */
2093 MSK_JMEM, /* maxsize */
2094 1, /* nsegments */
2095 MSK_JMEM, /* maxsegsize */
2096 0, /* flags */
2097 NULL, NULL, /* lockfunc, lockarg */
2098 &sc_if->msk_cdata.msk_jumbo_tag);
2099 if (error != 0) {
2100 device_printf(sc_if->msk_if_dev,
2101 "failed to create jumbo Rx buffer block DMA tag\n");
2102 goto fail;
2103 }
2104
2105 /* Create tag for Tx buffers. */
2106 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2107 1, 0, /* alignment, boundary */
2108 BUS_SPACE_MAXADDR, /* lowaddr */
2109 BUS_SPACE_MAXADDR, /* highaddr */
2110 NULL, NULL, /* filter, filterarg */
2111 MCLBYTES * MSK_MAXTXSEGS, /* maxsize */
2112 MSK_MAXTXSEGS, /* nsegments */
2113 MCLBYTES, /* maxsegsize */
2114 0, /* flags */
2115 NULL, NULL, /* lockfunc, lockarg */
2116 &sc_if->msk_cdata.msk_tx_tag);
2117 if (error != 0) {
2118 device_printf(sc_if->msk_if_dev,
2119 "failed to create Tx DMA tag\n");
2120 goto fail;
2121 }
2122
2123 /* Create tag for Rx buffers. */
2124 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2125 1, 0, /* alignment, boundary */
2126 BUS_SPACE_MAXADDR, /* lowaddr */
2127 BUS_SPACE_MAXADDR, /* highaddr */
2128 NULL, NULL, /* filter, filterarg */
2129 MCLBYTES, /* maxsize */
2130 1, /* nsegments */
2131 MCLBYTES, /* maxsegsize */
2132 0, /* flags */
2133 NULL, NULL, /* lockfunc, lockarg */
2134 &sc_if->msk_cdata.msk_rx_tag);
2135 if (error != 0) {
2136 device_printf(sc_if->msk_if_dev,
2137 "failed to create Rx DMA tag\n");
2138 goto fail;
2139 }
2140
2141 /* Create tag for jumbo Rx buffers. */
2142 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2143 PAGE_SIZE, 0, /* alignment, boundary */
2144 BUS_SPACE_MAXADDR, /* lowaddr */
2145 BUS_SPACE_MAXADDR, /* highaddr */
2146 NULL, NULL, /* filter, filterarg */
2147 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */
2148 MSK_MAXRXSEGS, /* nsegments */
2149 MSK_JLEN, /* maxsegsize */
2150 0, /* flags */
2151 NULL, NULL, /* lockfunc, lockarg */
2152 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2153 if (error != 0) {
2154 device_printf(sc_if->msk_if_dev,
2155 "failed to create jumbo Rx DMA tag\n");
2156 goto fail;
2157 }
2158
2159 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2160 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2161 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2162 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2163 if (error != 0) {
2164 device_printf(sc_if->msk_if_dev,
2165 "failed to allocate DMA'able memory for Tx ring\n");
2166 goto fail;
2167 }
2168
2169 ctx.msk_busaddr = 0;
2170 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2171 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2172 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2173 if (error != 0) {
2174 device_printf(sc_if->msk_if_dev,
2175 "failed to load DMA'able memory for Tx ring\n");
2176 goto fail;
2177 }
2178 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2179
2180 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2181 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2182 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2183 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2184 if (error != 0) {
2185 device_printf(sc_if->msk_if_dev,
2186 "failed to allocate DMA'able memory for Rx ring\n");
2187 goto fail;
2188 }
2189
2190 ctx.msk_busaddr = 0;
2191 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2192 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2193 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2194 if (error != 0) {
2195 device_printf(sc_if->msk_if_dev,
2196 "failed to load DMA'able memory for Rx ring\n");
2197 goto fail;
2198 }
2199 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2200
2201 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2202 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2203 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2204 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2205 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2206 if (error != 0) {
2207 device_printf(sc_if->msk_if_dev,
2208 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2209 goto fail;
2210 }
2211
2212 ctx.msk_busaddr = 0;
2213 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2214 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2215 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2216 msk_dmamap_cb, &ctx, 0);
2217 if (error != 0) {
2218 device_printf(sc_if->msk_if_dev,
2219 "failed to load DMA'able memory for jumbo Rx ring\n");
2220 goto fail;
2221 }
2222 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2223
2224 /* Create DMA maps for Tx buffers. */
2225 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2226 txd = &sc_if->msk_cdata.msk_txdesc[i];
2227 txd->tx_m = NULL;
2228 txd->tx_dmamap = NULL;
2229 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2230 &txd->tx_dmamap);
2231 if (error != 0) {
2232 device_printf(sc_if->msk_if_dev,
2233 "failed to create Tx dmamap\n");
2234 goto fail;
2235 }
2236 }
2237 /* Create DMA maps for Rx buffers. */
2238 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2239 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2240 device_printf(sc_if->msk_if_dev,
2241 "failed to create spare Rx dmamap\n");
2242 goto fail;
2243 }
2244 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2245 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2246 rxd->rx_m = NULL;
2247 rxd->rx_dmamap = NULL;
2248 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2249 &rxd->rx_dmamap);
2250 if (error != 0) {
2251 device_printf(sc_if->msk_if_dev,
2252 "failed to create Rx dmamap\n");
2253 goto fail;
2254 }
2255 }
2256 /* Create DMA maps for jumbo Rx buffers. */
2257 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2258 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2259 device_printf(sc_if->msk_if_dev,
2260 "failed to create spare jumbo Rx dmamap\n");
2261 goto fail;
2262 }
2263 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2264 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2265 jrxd->rx_m = NULL;
2266 jrxd->rx_dmamap = NULL;
2267 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2268 &jrxd->rx_dmamap);
2269 if (error != 0) {
2270 device_printf(sc_if->msk_if_dev,
2271 "failed to create jumbo Rx dmamap\n");
2272 goto fail;
2273 }
2274 }
2275
2276 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2277 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2278 (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2279 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2280 &sc_if->msk_cdata.msk_jumbo_map);
2281 if (error != 0) {
2282 device_printf(sc_if->msk_if_dev,
2283 "failed to allocate DMA'able memory for jumbo buf\n");
2284 goto fail;
2285 }
2286
2287 ctx.msk_busaddr = 0;
2288 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2289 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2290 MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2291 if (error != 0) {
2292 device_printf(sc_if->msk_if_dev,
2293 "failed to load DMA'able memory for jumbobuf\n");
2294 goto fail;
2295 }
2296 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2297
2298 /*
2299 * Now divide it up into 9K pieces and save the addresses
2300 * in an array.
2301 */
2302 ptr = sc_if->msk_rdata.msk_jumbo_buf;
2303 for (i = 0; i < MSK_JSLOTS; i++) {
2304 sc_if->msk_cdata.msk_jslots[i] = ptr;
2305 ptr += MSK_JLEN;
2306 entry = malloc(sizeof(struct msk_jpool_entry),
2307 M_DEVBUF, M_WAITOK);
2308 if (entry == NULL) {
2309 device_printf(sc_if->msk_if_dev,
2310 "no memory for jumbo buffers!\n");
2311 error = ENOMEM;
2312 goto fail;
2313 }
2314 entry->slot = i;
2315 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2316 jpool_entries);
2317 }
2318
2319fail:
2320 return (error);
2321}
2322
2323static void
2324msk_txrx_dma_free(struct msk_if_softc *sc_if)
2325{
2326 struct msk_txdesc *txd;
2327 struct msk_rxdesc *rxd;
2328 struct msk_rxdesc *jrxd;
2329 struct msk_jpool_entry *entry;
2330 int i;
2331
2332 MSK_JLIST_LOCK(sc_if);
2333 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2334 device_printf(sc_if->msk_if_dev,
2335 "asked to free buffer that is in use!\n");
2336 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2337 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2338 jpool_entries);
2339 }
2340
2341 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2342 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2343 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2344 free(entry, M_DEVBUF);
2345 }
2346 MSK_JLIST_UNLOCK(sc_if);
2347
2348 /* Destroy jumbo buffer block. */
2349 if (sc_if->msk_cdata.msk_jumbo_map)
2350 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2351 sc_if->msk_cdata.msk_jumbo_map);
2352
2353 if (sc_if->msk_rdata.msk_jumbo_buf) {
2354 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2355 sc_if->msk_rdata.msk_jumbo_buf,
2356 sc_if->msk_cdata.msk_jumbo_map);
2357 sc_if->msk_rdata.msk_jumbo_buf = NULL;
2358 sc_if->msk_cdata.msk_jumbo_map = NULL;
2359 }
2360
2361 /* Tx ring. */
2362 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2363 if (sc_if->msk_cdata.msk_tx_ring_map)
2364 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2365 sc_if->msk_cdata.msk_tx_ring_map);
2366 if (sc_if->msk_cdata.msk_tx_ring_map &&
2367 sc_if->msk_rdata.msk_tx_ring)
2368 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2369 sc_if->msk_rdata.msk_tx_ring,
2370 sc_if->msk_cdata.msk_tx_ring_map);
2371 sc_if->msk_rdata.msk_tx_ring = NULL;
2372 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2373 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2374 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2375 }
2376 /* Rx ring. */
2377 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2378 if (sc_if->msk_cdata.msk_rx_ring_map)
2379 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2380 sc_if->msk_cdata.msk_rx_ring_map);
2381 if (sc_if->msk_cdata.msk_rx_ring_map &&
2382 sc_if->msk_rdata.msk_rx_ring)
2383 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2384 sc_if->msk_rdata.msk_rx_ring,
2385 sc_if->msk_cdata.msk_rx_ring_map);
2386 sc_if->msk_rdata.msk_rx_ring = NULL;
2387 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2388 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2389 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2390 }
2391 /* Jumbo Rx ring. */
2392 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2393 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2394 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2395 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2396 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2397 sc_if->msk_rdata.msk_jumbo_rx_ring)
2398 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2399 sc_if->msk_rdata.msk_jumbo_rx_ring,
2400 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2401 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2402 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2403 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2404 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2405 }
2406 /* Tx buffers. */
2407 if (sc_if->msk_cdata.msk_tx_tag) {
2408 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2409 txd = &sc_if->msk_cdata.msk_txdesc[i];
2410 if (txd->tx_dmamap) {
2411 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2412 txd->tx_dmamap);
2413 txd->tx_dmamap = NULL;
2414 }
2415 }
2416 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2417 sc_if->msk_cdata.msk_tx_tag = NULL;
2418 }
2419 /* Rx buffers. */
2420 if (sc_if->msk_cdata.msk_rx_tag) {
2421 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2422 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2423 if (rxd->rx_dmamap) {
2424 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2425 rxd->rx_dmamap);
2426 rxd->rx_dmamap = NULL;
2427 }
2428 }
2429 if (sc_if->msk_cdata.msk_rx_sparemap) {
2430 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2431 sc_if->msk_cdata.msk_rx_sparemap);
2432 sc_if->msk_cdata.msk_rx_sparemap = 0;
2433 }
2434 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2435 sc_if->msk_cdata.msk_rx_tag = NULL;
2436 }
2437 /* Jumbo Rx buffers. */
2438 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2439 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2440 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2441 if (jrxd->rx_dmamap) {
2442 bus_dmamap_destroy(
2443 sc_if->msk_cdata.msk_jumbo_rx_tag,
2444 jrxd->rx_dmamap);
2445 jrxd->rx_dmamap = NULL;
2446 }
2447 }
2448 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2449 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2450 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2451 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2452 }
2453 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2454 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2455 }
2456
2457 if (sc_if->msk_cdata.msk_parent_tag) {
2458 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2459 sc_if->msk_cdata.msk_parent_tag = NULL;
2460 }
2461 mtx_destroy(&sc_if->msk_jlist_mtx);
2462}
2463
2464/*
2465 * Allocate a jumbo buffer.
2466 */
2467static void *
2468msk_jalloc(struct msk_if_softc *sc_if)
2469{
2470 struct msk_jpool_entry *entry;
2471
2472 MSK_JLIST_LOCK(sc_if);
2473
2474 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2475
2476 if (entry == NULL) {
2477 MSK_JLIST_UNLOCK(sc_if);
2478 return (NULL);
2479 }
2480
2481 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2482 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2483
2484 MSK_JLIST_UNLOCK(sc_if);
2485
2486 return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2487}
2488
2489/*
2490 * Release a jumbo buffer.
2491 */
2492static void
2493msk_jfree(void *buf, void *args)
2494{
2495 struct msk_if_softc *sc_if;
2496 struct msk_jpool_entry *entry;
2497 int i;
2498
2499 /* Extract the softc struct pointer. */
2500 sc_if = (struct msk_if_softc *)args;
2501 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2502
2503 MSK_JLIST_LOCK(sc_if);
2504 /* Calculate the slot this buffer belongs to. */
2505 i = ((vm_offset_t)buf
2506 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2507 KASSERT(i >= 0 && i < MSK_JSLOTS,
2508 ("%s: asked to free buffer that we don't manage!", __func__));
2509
2510 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2511 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2512 entry->slot = i;
2513 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2514 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2515 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2516 wakeup(sc_if);
2517
2518 MSK_JLIST_UNLOCK(sc_if);
2519}
2520
2521/*
2522 * It's copy of ath_defrag(ath(4)).
2523 *
2524 * Defragment an mbuf chain, returning at most maxfrags separate
2525 * mbufs+clusters. If this is not possible NULL is returned and
2526 * the original mbuf chain is left in it's present (potentially
2527 * modified) state. We use two techniques: collapsing consecutive
2528 * mbufs and replacing consecutive mbufs by a cluster.
2529 */
2530static struct mbuf *
2531msk_defrag(struct mbuf *m0, int how, int maxfrags)
2532{
2533 struct mbuf *m, *n, *n2, **prev;
2534 u_int curfrags;
2535
2536 /*
2537 * Calculate the current number of frags.
2538 */
2539 curfrags = 0;
2540 for (m = m0; m != NULL; m = m->m_next)
2541 curfrags++;
2542 /*
2543 * First, try to collapse mbufs. Note that we always collapse
2544 * towards the front so we don't need to deal with moving the
2545 * pkthdr. This may be suboptimal if the first mbuf has much
2546 * less data than the following.
2547 */
2548 m = m0;
2549again:
2550 for (;;) {
2551 n = m->m_next;
2552 if (n == NULL)
2553 break;
2554 if ((m->m_flags & M_RDONLY) == 0 &&
2555 n->m_len < M_TRAILINGSPACE(m)) {
2556 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
2557 n->m_len);
2558 m->m_len += n->m_len;
2559 m->m_next = n->m_next;
2560 m_free(n);
2561 if (--curfrags <= maxfrags)
2562 return (m0);
2563 } else
2564 m = n;
2565 }
2566 KASSERT(maxfrags > 1,
2567 ("maxfrags %u, but normal collapse failed", maxfrags));
2568 /*
2569 * Collapse consecutive mbufs to a cluster.
2570 */
2571 prev = &m0->m_next; /* NB: not the first mbuf */
2572 while ((n = *prev) != NULL) {
2573 if ((n2 = n->m_next) != NULL &&
2574 n->m_len + n2->m_len < MCLBYTES) {
2575 m = m_getcl(how, MT_DATA, 0);
2576 if (m == NULL)
2577 goto bad;
2578 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
2579 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
2580 n2->m_len);
2581 m->m_len = n->m_len + n2->m_len;
2582 m->m_next = n2->m_next;
2583 *prev = m;
2584 m_free(n);
2585 m_free(n2);
2586 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
2587 return m0;
2588 /*
2589 * Still not there, try the normal collapse
2590 * again before we allocate another cluster.
2591 */
2592 goto again;
2593 }
2594 prev = &n->m_next;
2595 }
2596 /*
2597 * No place where we can collapse to a cluster; punt.
2598 * This can occur if, for example, you request 2 frags
2599 * but the packet requires that both be clusters (we
2600 * never reallocate the first mbuf to avoid moving the
2601 * packet header).
2602 */
2603bad:
2604 return (NULL);
2605}
2606
2607static int
2608msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2609{
2610 struct msk_txdesc *txd, *txd_last;
2611 struct msk_tx_desc *tx_le;
2612 struct mbuf *m;
2613 bus_dmamap_t map;
2614 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2615 uint32_t control, prod, si;
2616 uint16_t offset, tcp_offset, tso_mtu;
2617 int error, i, nseg, tso;
2618
2619 MSK_IF_LOCK_ASSERT(sc_if);
2620
2621 tcp_offset = offset = 0;
2622 m = *m_head;
2623 if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2624 /*
2625 * Since mbuf has no protocol specific structure information
2626 * in it we have to inspect protocol information here to
2627 * setup TSO and checksum offload. I don't know why Marvell
2628 * made a such decision in chip design because other GigE
2629 * hardwares normally takes care of all these chores in
2630 * hardware. However, TSO performance of Yukon II is very
2631 * good such that it's worth to implement it.
2632 */
2633 struct ether_vlan_header *evh;
2634 struct ether_header *eh;
2635 struct ip *ip;
2636 struct tcphdr *tcp;
2637
2638 /* TODO check for M_WRITABLE(m) */
2639
2640 offset = sizeof(struct ether_header);
2641 m = m_pullup(m, offset);
2642 if (m == NULL) {
2643 *m_head = NULL;
2644 return (ENOBUFS);
2645 }
2646 eh = mtod(m, struct ether_header *);
2647 /* Check if hardware VLAN insertion is off. */
2648 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2649 offset = sizeof(struct ether_vlan_header);
2650 m = m_pullup(m, offset);
2651 if (m == NULL) {
2652 *m_head = NULL;
2653 return (ENOBUFS);
2654 }
2655 evh = mtod(m, struct ether_vlan_header *);
2656 ip = (struct ip *)(evh + 1);
2657 } else
2658 ip = (struct ip *)(eh + 1);
2659 m = m_pullup(m, offset + sizeof(struct ip));
2660 if (m == NULL) {
2661 *m_head = NULL;
2662 return (ENOBUFS);
2663 }
2664 offset += (ip->ip_hl << 2);
2665 tcp_offset = offset;
2666 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2667 m = m_pullup(m, offset + sizeof(struct tcphdr));
2668 if (m == NULL) {
2669 *m_head = NULL;
2670 return (ENOBUFS);
2671 }
2672 tcp = mtod(m, struct tcphdr *);
2673 offset += (tcp->th_off << 2);
2674 }
2675 *m_head = m;
2676 }
2677
2678 prod = sc_if->msk_cdata.msk_tx_prod;
2679 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2680 txd_last = txd;
2681 map = txd->tx_dmamap;
2682 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2683 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2684 if (error == EFBIG) {
2685 m = msk_defrag(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2686 if (m == NULL) {
2687 m_freem(*m_head);
2688 *m_head = NULL;
2689 return (ENOBUFS);
2690 }
2691 *m_head = m;
2692 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2693 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2694 if (error != 0) {
2695 m_freem(*m_head);
2696 *m_head = NULL;
2697 return (error);
2698 }
2699 } else if (error != 0)
2700 return (error);
2701 if (nseg == 0) {
2702 m_freem(*m_head);
2703 *m_head = NULL;
2704 return (EIO);
2705 }
2706
2707 /* Check number of available descriptors. */
2708 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2709 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2710 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2711 return (ENOBUFS);
2712 }
2713
2714 control = 0;
2715 tso = 0;
2716 tx_le = NULL;
2717
2718 /* Check TSO support. */
2719 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2720 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2721 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2722 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2723 tx_le->msk_addr = htole32(tso_mtu);
2724 tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER);
2725 sc_if->msk_cdata.msk_tx_cnt++;
2726 MSK_INC(prod, MSK_TX_RING_CNT);
2727 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2728 }
2729 tso++;
2730 }
2731 /* Check if we have a VLAN tag to insert. */
2732 if ((m->m_flags & M_VLANTAG) != 0) {
2733 if (tso == 0) {
2734 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2735 tx_le->msk_addr = htole32(0);
2736 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2737 htons(m->m_pkthdr.ether_vtag));
2738 sc_if->msk_cdata.msk_tx_cnt++;
2739 MSK_INC(prod, MSK_TX_RING_CNT);
2740 } else {
2741 tx_le->msk_control |= htole32(OP_VLAN |
2742 htons(m->m_pkthdr.ether_vtag));
2743 }
2744 control |= INS_VLAN;
2745 }
2746 /* Check if we have to handle checksum offload. */
2747 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2748 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2749 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2750 & 0xffff) | ((uint32_t)tcp_offset << 16));
2751 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2752 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2753 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2754 control |= UDPTCP;
2755 sc_if->msk_cdata.msk_tx_cnt++;
2756 MSK_INC(prod, MSK_TX_RING_CNT);
2757 }
2758
2759 si = prod;
2760 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2761 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2762 if (tso == 0)
2763 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2764 OP_PACKET);
2765 else
2766 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2767 OP_LARGESEND);
2768 sc_if->msk_cdata.msk_tx_cnt++;
2769 MSK_INC(prod, MSK_TX_RING_CNT);
2770
2771 for (i = 1; i < nseg; i++) {
2772 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2773 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2774 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2775 OP_BUFFER | HW_OWNER);
2776 sc_if->msk_cdata.msk_tx_cnt++;
2777 MSK_INC(prod, MSK_TX_RING_CNT);
2778 }
2779 /* Update producer index. */
2780 sc_if->msk_cdata.msk_tx_prod = prod;
2781
2782 /* Set EOP on the last desciptor. */
2783 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2784 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2785 tx_le->msk_control |= htole32(EOP);
2786
2787 /* Turn the first descriptor ownership to hardware. */
2788 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2789 tx_le->msk_control |= htole32(HW_OWNER);
2790
2791 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2792 map = txd_last->tx_dmamap;
2793 txd_last->tx_dmamap = txd->tx_dmamap;
2794 txd->tx_dmamap = map;
2795 txd->tx_m = m;
2796
2797 /* Sync descriptors. */
2798 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2799 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2800 sc_if->msk_cdata.msk_tx_ring_map,
2801 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2802
2803 return (0);
2804}
2805
2806static void
2807msk_tx_task(void *arg, int pending)
2808{
2809 struct ifnet *ifp;
2810
2811 ifp = arg;
2812 msk_start(ifp);
2813}
2814
2815static void
2816msk_start(struct ifnet *ifp)
2817{
2818 struct msk_if_softc *sc_if;
2819 struct mbuf *m_head;
2820 int enq;
2821
2822 sc_if = ifp->if_softc;
2823
2824 MSK_IF_LOCK(sc_if);
2825
2826 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2827 IFF_DRV_RUNNING || sc_if->msk_link == 0) {
2828 MSK_IF_UNLOCK(sc_if);
2829 return;
2830 }
2831
2832 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2833 sc_if->msk_cdata.msk_tx_cnt <
2834 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2835 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2836 if (m_head == NULL)
2837 break;
2838 /*
2839 * Pack the data into the transmit ring. If we
2840 * don't have room, set the OACTIVE flag and wait
2841 * for the NIC to drain the ring.
2842 */
2843 if (msk_encap(sc_if, &m_head) != 0) {
2844 if (m_head == NULL)
2845 break;
2846 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2847 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2848 break;
2849 }
2850
2851 enq++;
2852 /*
2853 * If there's a BPF listener, bounce a copy of this frame
2854 * to him.
2855 */
2856 BPF_MTAP(ifp, m_head);
2857 }
2858
2859 if (enq > 0) {
2860 /* Transmit */
2861 CSR_WRITE_2(sc_if->msk_softc,
2862 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2863 sc_if->msk_cdata.msk_tx_prod);
2864
2865 /* Set a timeout in case the chip goes out to lunch. */
2866 callout_reset(&sc_if->msk_watchdog_ch, MSK_TX_TIMEOUT * hz,
2867 msk_watchdog, sc_if);
2868 }
2869
2870 MSK_IF_UNLOCK(sc_if);
2871}
2872
2873static void
2874msk_watchdog(void *arg)
2875{
2876 struct msk_if_softc *sc_if;
2877 struct ifnet *ifp;
2878 uint32_t ridx;
2879 int idx;
2880
2881 sc_if = arg;
2882
2883 MSK_IF_LOCK_ASSERT(sc_if);
2884
2885 ifp = sc_if->msk_ifp;
2886 if (sc_if->msk_link == 0) {
2887 if (bootverbose)
2888 if_printf(sc_if->msk_ifp, "watchdog timeout "
2889 "(missed link)\n");
2890 ifp->if_oerrors++;
2891 msk_init_locked(sc_if);
2892 return;
2893 }
2894
2895 /*
2896 * Reclaim first as there is a possibility of losing Tx completion
2897 * interrupts.
2898 */
2899 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2900 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2901 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2902 msk_txeof(sc_if, idx);
2903 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2904 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2905 "-- recovering\n");
2906 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2907 taskqueue_enqueue(taskqueue_fast,
2908 &sc_if->msk_tx_task);
2909 return;
2910 }
2911 }
2912
2913 if_printf(ifp, "watchdog timeout\n");
2914 ifp->if_oerrors++;
2915 msk_init_locked(sc_if);
2916 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2917 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2918}
2919
2920static void
2921mskc_shutdown(device_t dev)
2922{
2923 struct msk_softc *sc;
2924 int i;
2925
2926 sc = device_get_softc(dev);
2927 MSK_LOCK(sc);
2928 for (i = 0; i < sc->msk_num_port; i++) {
2929 if (sc->msk_if[i] != NULL)
2930 msk_stop(sc->msk_if[i]);
2931 }
2932
2933 /* Disable all interrupts. */
2934 CSR_WRITE_4(sc, B0_IMSK, 0);
2935 CSR_READ_4(sc, B0_IMSK);
2936 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2937 CSR_READ_4(sc, B0_HWE_IMSK);
2938
2939 /* Put hardware reset. */
2940 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2941
2942 MSK_UNLOCK(sc);
2943}
2944
2945static int
2946mskc_suspend(device_t dev)
2947{
2948 struct msk_softc *sc;
2949 int i;
2950
2951 sc = device_get_softc(dev);
2952
2953 MSK_LOCK(sc);
2954
2955 for (i = 0; i < sc->msk_num_port; i++) {
2956 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2957 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2958 IFF_DRV_RUNNING) != 0))
2959 msk_stop(sc->msk_if[i]);
2960 }
2961
2962 /* Disable all interrupts. */
2963 CSR_WRITE_4(sc, B0_IMSK, 0);
2964 CSR_READ_4(sc, B0_IMSK);
2965 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2966 CSR_READ_4(sc, B0_HWE_IMSK);
2967
2968 msk_phy_power(sc, MSK_PHY_POWERDOWN);
2969
2970 /* Put hardware reset. */
2971 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2972 sc->msk_suspended = 1;
2973
2974 MSK_UNLOCK(sc);
2975
2976 return (0);
2977}
2978
2979static int
2980mskc_resume(device_t dev)
2981{
2982 struct msk_softc *sc;
2983 int i;
2984
2985 sc = device_get_softc(dev);
2986
2987 MSK_LOCK(sc);
2988
2989 mskc_reset(sc);
2990 for (i = 0; i < sc->msk_num_port; i++) {
2991 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2992 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2993 msk_init_locked(sc->msk_if[i]);
2994 }
2995 sc->msk_suspended = 0;
2996
2997 MSK_UNLOCK(sc);
2998
2999 return (0);
3000}
3001
3002static void
3003msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3004{
3005 struct mbuf *m;
3006 struct ifnet *ifp;
3007 struct msk_rxdesc *rxd;
3008 int cons, rxlen;
3009
3010 ifp = sc_if->msk_ifp;
3011
3012 MSK_IF_LOCK_ASSERT(sc_if);
3013
3014 cons = sc_if->msk_cdata.msk_rx_cons;
3015 do {
3016 rxlen = status >> 16;
3017 if ((status & GMR_FS_VLAN) != 0)
3018 rxlen -= ETHER_VLAN_ENCAP_LEN;
3019 if (len > sc_if->msk_framesize ||
3020 ((status & GMR_FS_ANY_ERR) != 0) ||
3021 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3022 /* Don't count flow-control packet as errors. */
3023 if ((status & GMR_FS_GOOD_FC) == 0)
3024 ifp->if_ierrors++;
3025 msk_discard_rxbuf(sc_if, cons);
3026 break;
3027 }
3028 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3029 m = rxd->rx_m;
3030 if (msk_newbuf(sc_if, cons) != 0) {
3031 ifp->if_iqdrops++;
3032 /* Reuse old buffer. */
3033 msk_discard_rxbuf(sc_if, cons);
3034 break;
3035 }
3036 m->m_pkthdr.rcvif = ifp;
3037 m->m_pkthdr.len = m->m_len = len;
3038 ifp->if_ipackets++;
3039 /* Check for VLAN tagged packets. */
3040 if ((status & GMR_FS_VLAN) != 0 &&
3041 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3042 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3043 m->m_flags |= M_VLANTAG;
3044 }
3045 MSK_IF_UNLOCK(sc_if);
3046 (*ifp->if_input)(ifp, m);
3047 MSK_IF_LOCK(sc_if);
3048 } while (0);
3049
3050 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3051 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3052}
3053
3054static void
3055msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3056{
3057 struct mbuf *m;
3058 struct ifnet *ifp;
3059 struct msk_rxdesc *jrxd;
3060 int cons, rxlen;
3061
3062 ifp = sc_if->msk_ifp;
3063
3064 MSK_IF_LOCK_ASSERT(sc_if);
3065
3066 cons = sc_if->msk_cdata.msk_rx_cons;
3067 do {
3068 rxlen = status >> 16;
3069 if ((status & GMR_FS_VLAN) != 0)
3070 rxlen -= ETHER_VLAN_ENCAP_LEN;
3071 if (len > sc_if->msk_framesize ||
3072 ((status & GMR_FS_ANY_ERR) != 0) ||
3073 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3074 /* Don't count flow-control packet as errors. */
3075 if ((status & GMR_FS_GOOD_FC) == 0)
3076 ifp->if_ierrors++;
3077 msk_discard_jumbo_rxbuf(sc_if, cons);
3078 break;
3079 }
3080 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3081 m = jrxd->rx_m;
3082 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3083 ifp->if_iqdrops++;
3084 /* Reuse old buffer. */
3085 msk_discard_jumbo_rxbuf(sc_if, cons);
3086 break;
3087 }
3088 m->m_pkthdr.rcvif = ifp;
3089 m->m_pkthdr.len = m->m_len = len;
3090 ifp->if_ipackets++;
3091 /* Check for VLAN tagged packets. */
3092 if ((status & GMR_FS_VLAN) != 0 &&
3093 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3094 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3095 m->m_flags |= M_VLANTAG;
3096 }
3097 MSK_IF_UNLOCK(sc_if);
3098 (*ifp->if_input)(ifp, m);
3099 MSK_IF_LOCK(sc_if);
3100 } while (0);
3101
3102 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3103 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3104}
3105
3106static void
3107msk_txeof(struct msk_if_softc *sc_if, int idx)
3108{
3109 struct msk_txdesc *txd;
3110 struct msk_tx_desc *cur_tx;
3111 struct ifnet *ifp;
3112 uint32_t control;
3113 int cons, prog;
3114
3115 MSK_IF_LOCK_ASSERT(sc_if);
3116
3117 ifp = sc_if->msk_ifp;
3118
3119 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3120 sc_if->msk_cdata.msk_tx_ring_map,
3121 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3122 /*
3123 * Go through our tx ring and free mbufs for those
3124 * frames that have been sent.
3125 */
3126 cons = sc_if->msk_cdata.msk_tx_cons;
3127 prog = 0;
3128 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3129 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3130 break;
3131 prog++;
3132 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3133 control = le32toh(cur_tx->msk_control);
3134 sc_if->msk_cdata.msk_tx_cnt--;
3135 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3136 if ((control & EOP) == 0)
3137 continue;
3138 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3139 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3140 BUS_DMASYNC_POSTWRITE);
3141 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3142
3143 ifp->if_opackets++;
3144 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3145 __func__));
3146 m_freem(txd->tx_m);
3147 txd->tx_m = NULL;
3148 }
3149
3150 if (prog > 0) {
3151 sc_if->msk_cdata.msk_tx_cons = cons;
3152 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3153 callout_stop(&sc_if->msk_watchdog_ch);
3154 /* No need to sync LEs as we didn't update LEs. */
3155 }
3156}
3157
3158static void
3159msk_tick(void *xsc_if)
3160{
3161 struct msk_if_softc *sc_if;
3162 struct mii_data *mii;
3163
3164 sc_if = xsc_if;
3165
3166 MSK_IF_LOCK_ASSERT(sc_if);
3167
3168 mii = device_get_softc(sc_if->msk_miibus);
3169
3170 mii_tick(mii);
3171 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3172}
3173
3174static void
3175msk_intr_phy(struct msk_if_softc *sc_if)
3176{
3177 uint16_t status;
3178
3179 if (sc_if->msk_softc->msk_marvell_phy) {
3180 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3181 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV,
3182 PHY_MARV_INT_STAT);
3183 /* Handle FIFO Underrun/Overflow? */
3184 if ((status & PHY_M_IS_FIFO_ERROR))
3185 device_printf(sc_if->msk_if_dev,
3186 "PHY FIFO underrun/overflow.\n");
3187 }
3188}
3189
3190static void
3191msk_intr_gmac(struct msk_if_softc *sc_if)
3192{
3193 struct msk_softc *sc;
3194 uint8_t status;
3195
3196 sc = sc_if->msk_softc;
3197 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3198
3199 /* GMAC Rx FIFO overrun. */
3200 if ((status & GM_IS_RX_FF_OR) != 0) {
3201 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3202 GMF_CLI_RX_FO);
3203 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3204 }
3205 /* GMAC Tx FIFO underrun. */
3206 if ((status & GM_IS_TX_FF_UR) != 0) {
3207 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3208 GMF_CLI_TX_FU);
3209 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3210 /*
3211 * XXX
3212 * In case of Tx underrun, we may need to flush/reset
3213 * Tx MAC but that would also require resynchronization
3214 * with status LEs. Reintializing status LEs would
3215 * affect other port in dual MAC configuration so it
3216 * should be avoided as possible as we can.
3217 * Due to lack of documentation it's all vague guess but
3218 * it needs more investigation.
3219 */
3220 }
3221}
3222
3223static void
3224msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3225{
3226 struct msk_softc *sc;
3227
3228 sc = sc_if->msk_softc;
3229 if ((status & Y2_IS_PAR_RD1) != 0) {
3230 device_printf(sc_if->msk_if_dev,
3231 "RAM buffer read parity error\n");
3232 /* Clear IRQ. */
3233 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3234 RI_CLR_RD_PERR);
3235 }
3236 if ((status & Y2_IS_PAR_WR1) != 0) {
3237 device_printf(sc_if->msk_if_dev,
3238 "RAM buffer write parity error\n");
3239 /* Clear IRQ. */
3240 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3241 RI_CLR_WR_PERR);
3242 }
3243 if ((status & Y2_IS_PAR_MAC1) != 0) {
3244 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3245 /* Clear IRQ. */
3246 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3247 GMF_CLI_TX_PE);
3248 }
3249 if ((status & Y2_IS_PAR_RX1) != 0) {
3250 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3251 /* Clear IRQ. */
3252 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3253 }
3254 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3255 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3256 /* Clear IRQ. */
3257 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3258 }
3259}
3260
3261static void
3262msk_intr_hwerr(struct msk_softc *sc)
3263{
3264 uint32_t status;
3265 uint32_t tlphead[4];
3266
3267 status = CSR_READ_4(sc, B0_HWE_ISRC);
3268 /* Time Stamp timer overflow. */
3269 if ((status & Y2_IS_TIST_OV) != 0)
3270 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3271 if ((status & Y2_IS_PCI_NEXP) != 0) {
3272 /*
3273 * PCI Express Error occured which is not described in PEX
3274 * spec.
3275 * This error is also mapped either to Master Abort(
3276 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3277 * can only be cleared there.
3278 */
3279 device_printf(sc->msk_dev,
3280 "PCI Express protocol violation error\n");
3281 }
3282
3283 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3284 uint16_t v16;
3285
3286 if ((status & Y2_IS_MST_ERR) != 0)
3287 device_printf(sc->msk_dev,
3288 "unexpected IRQ Status error\n");
3289 else
3290 device_printf(sc->msk_dev,
3291 "unexpected IRQ Master error\n");
3292 /* Reset all bits in the PCI status register. */
3293 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3294 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3295 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3296 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3297 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3298 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3299 }
3300
3301 /* Check for PCI Express Uncorrectable Error. */
3302 if ((status & Y2_IS_PCI_EXP) != 0) {
3303 uint32_t v32;
3304
3305 /*
3306 * On PCI Express bus bridges are called root complexes (RC).
3307 * PCI Express errors are recognized by the root complex too,
3308 * which requests the system to handle the problem. After
3309 * error occurence it may be that no access to the adapter
3310 * may be performed any longer.
3311 */
3312
3313 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3314 if ((v32 & PEX_UNSUP_REQ) != 0) {
3315 /* Ignore unsupported request error. */
3316 device_printf(sc->msk_dev,
3317 "Uncorrectable PCI Express error\n");
3318 }
3319 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3320 int i;
3321
3322 /* Get TLP header form Log Registers. */
3323 for (i = 0; i < 4; i++)
3324 tlphead[i] = CSR_PCI_READ_4(sc,
3325 PEX_HEADER_LOG + i * 4);
3326 /* Check for vendor defined broadcast message. */
3327 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3328 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3329 CSR_WRITE_4(sc, B0_HWE_IMSK,
3330 sc->msk_intrhwemask);
3331 CSR_READ_4(sc, B0_HWE_IMSK);
3332 }
3333 }
3334 /* Clear the interrupt. */
3335 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3336 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3337 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3338 }
3339
3340 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3341 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3342 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3343 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3344}
3345
3346static __inline void
3347msk_rxput(struct msk_if_softc *sc_if)
3348{
3349 struct msk_softc *sc;
3350
3351 sc = sc_if->msk_softc;
3352 if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN))
3353 bus_dmamap_sync(
3354 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3355 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3356 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3357 else
3358 bus_dmamap_sync(
3359 sc_if->msk_cdata.msk_rx_ring_tag,
3360 sc_if->msk_cdata.msk_rx_ring_map,
3361 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3362 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3363 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3364}
3365
3366static int
3367msk_handle_events(struct msk_softc *sc)
3368{
3369 struct msk_if_softc *sc_if;
3370 int rxput[2];
3371 struct msk_stat_desc *sd;
3372 uint32_t control, status;
3373 int cons, idx, len, port, rxprog;
3374
3375 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3376 if (idx == sc->msk_stat_cons)
3377 return (0);
3378
3379 /* Sync status LEs. */
3380 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3381 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3382 /* XXX Sync Rx LEs here. */
3383
3384 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3385
3386 rxprog = 0;
3387 for (cons = sc->msk_stat_cons; cons != idx;) {
3388 sd = &sc->msk_stat_ring[cons];
3389 control = le32toh(sd->msk_control);
3390 if ((control & HW_OWNER) == 0)
3391 break;
3392 /*
3393 * Marvell's FreeBSD driver updates status LE after clearing
3394 * HW_OWNER. However we don't have a way to sync single LE
3395 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3396 * an entire DMA map. So don't sync LE until we have a better
3397 * way to sync LEs.
3398 */
3399 control &= ~HW_OWNER;
3400 sd->msk_control = htole32(control);
3401 status = le32toh(sd->msk_status);
3402 len = control & STLE_LEN_MASK;
3403 port = (control >> 16) & 0x01;
3404 sc_if = sc->msk_if[port];
3405 if (sc_if == NULL) {
3406 device_printf(sc->msk_dev, "invalid port opcode "
3407 "0x%08x\n", control & STLE_OP_MASK);
3408 continue;
3409 }
3410
3411 switch (control & STLE_OP_MASK) {
3412 case OP_RXVLAN:
3413 sc_if->msk_vtag = ntohs(len);
3414 break;
3415 case OP_RXCHKSVLAN:
3416 sc_if->msk_vtag = ntohs(len);
3417 break;
3418 case OP_RXSTAT:
3419 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3420 msk_jumbo_rxeof(sc_if, status, len);
3421 else
3422 msk_rxeof(sc_if, status, len);
3423 rxprog++;
3424 /*
3425 * Because there is no way to sync single Rx LE
3426 * put the DMA sync operation off until the end of
3427 * event processing.
3428 */
3429 rxput[port]++;
3430 /* Update prefetch unit if we've passed water mark. */
3431 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3432 msk_rxput(sc_if);
3433 rxput[port] = 0;
3434 }
3435 break;
3436 case OP_TXINDEXLE:
3437 if (sc->msk_if[MSK_PORT_A] != NULL)
3438 msk_txeof(sc->msk_if[MSK_PORT_A],
3439 status & STLE_TXA1_MSKL);
3440 if (sc->msk_if[MSK_PORT_B] != NULL)
3441 msk_txeof(sc->msk_if[MSK_PORT_B],
3442 ((status & STLE_TXA2_MSKL) >>
3443 STLE_TXA2_SHIFTL) |
3444 ((len & STLE_TXA2_MSKH) <<
3445 STLE_TXA2_SHIFTH));
3446 break;
3447 default:
3448 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3449 control & STLE_OP_MASK);
3450 break;
3451 }
3452 MSK_INC(cons, MSK_STAT_RING_CNT);
3453 if (rxprog > sc->msk_process_limit)
3454 break;
3455 }
3456
3457 sc->msk_stat_cons = cons;
3458 /* XXX We should sync status LEs here. See above notes. */
3459
3460 if (rxput[MSK_PORT_A] > 0)
3461 msk_rxput(sc->msk_if[MSK_PORT_A]);
3462 if (rxput[MSK_PORT_B] > 0)
3463 msk_rxput(sc->msk_if[MSK_PORT_B]);
3464
3465 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3466}
3467
3468static void
3469msk_intr(void *xsc)
3470{
3471 struct msk_softc *sc;
3472 uint32_t status;
3473
3474 sc = xsc;
3475 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3476 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3477 if (status == 0 || status == 0xffffffff) {
3478 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3479 return;
3480 }
3481
3482 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3483}
3484
3485static void
3486msk_int_task(void *arg, int pending)
3487{
3488 struct msk_softc *sc;
3489 struct msk_if_softc *sc_if0, *sc_if1;
3490 struct ifnet *ifp0, *ifp1;
3491 uint32_t status;
3492 int domore;
3493
3494 sc = arg;
3495 MSK_LOCK(sc);
3496
3497 /* Get interrupt source. */
3498 status = CSR_READ_4(sc, B0_ISRC);
3499 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3500 (status & sc->msk_intrmask) == 0)
3501 goto done;
3502
3503 sc_if0 = sc->msk_if[MSK_PORT_A];
3504 sc_if1 = sc->msk_if[MSK_PORT_B];
3505 ifp0 = ifp1 = NULL;
3506 if (sc_if0 != NULL) {
3507 ifp0 = sc_if0->msk_ifp;
3508 if ((ifp0->if_drv_flags & IFF_DRV_RUNNING) == 0)
3509 goto done;
3510 }
3511 if (sc_if1 != NULL) {
3512 ifp1 = sc_if1->msk_ifp;
3513 if ((ifp1->if_drv_flags & IFF_DRV_RUNNING) == 0)
3514 goto done;
3515 }
3516
3517 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3518 msk_intr_phy(sc_if0);
3519 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3520 msk_intr_phy(sc_if1);
3521 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3522 msk_intr_gmac(sc_if0);
3523 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3524 msk_intr_gmac(sc_if1);
3525 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3526 device_printf(sc->msk_dev, "Rx descriptor error\n");
3527 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3528 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3529 CSR_READ_4(sc, B0_IMSK);
3530 }
3531 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3532 device_printf(sc->msk_dev, "Tx descriptor error\n");
3533 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3534 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3535 CSR_READ_4(sc, B0_IMSK);
3536 }
3537 if ((status & Y2_IS_HW_ERR) != 0)
3538 msk_intr_hwerr(sc);
3539
3540 domore = msk_handle_events(sc);
3541 if ((status & Y2_IS_STAT_BMU) != 0)
3542 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3543
3544 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3545 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3546 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3547 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3548
3549 if (domore > 0) {
3550 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3551 MSK_UNLOCK(sc);
3552 return;
3553 }
3554done:
3555 MSK_UNLOCK(sc);
3556
3557 /* Reenable interrupts. */
3558 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3559}
3560
3561static void
3562msk_init(void *xsc)
3563{
3564 struct msk_if_softc *sc_if = xsc;
3565
3566 MSK_IF_LOCK(sc_if);
3567 msk_init_locked(sc_if);
3568 MSK_IF_UNLOCK(sc_if);
3569}
3570
3571static void
3572msk_init_locked(struct msk_if_softc *sc_if)
3573{
3574 struct msk_softc *sc;
3575 struct ifnet *ifp;
3576 struct mii_data *mii;
3577 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3578 uint16_t gmac;
3579 int error, i;
3580
3581 MSK_IF_LOCK_ASSERT(sc_if);
3582
3583 ifp = sc_if->msk_ifp;
3584 sc = sc_if->msk_softc;
3585 mii = device_get_softc(sc_if->msk_miibus);
3586
3587 error = 0;
3588 /* Cancel pending I/O and free all Rx/Tx buffers. */
3589 msk_stop(sc_if);
3590
3591 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN +
3592 ETHER_VLAN_ENCAP_LEN;
3593
3594 /*
3595 * Initialize GMAC first.
3596 * Without this initialization, Rx MAC did not work as expected
3597 * and Rx MAC garbled status LEs and it resulted in out-of-order
3598 * or duplicated frame delivery which in turn showed very poor
3599 * Rx performance.(I had to write a packet analysis code that
3600 * could be embeded in driver to diagnose this issue.)
3601 * I've spent almost 2 months to fix this issue. If I have had
3602 * datasheet for Yukon II I wouldn't have encountered this. :-(
3603 */
3604 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3605 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3606
3607 /* Dummy read the Interrupt Source Register. */
3608 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3609
3610 /* Set MIB Clear Counter Mode. */
3611 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3612 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3613 /* Read all MIB Counters with Clear Mode set. */
3614 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3615 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3616 /* Clear MIB Clear Counter Mode. */
3617 gmac &= ~GM_PAR_MIB_CLR;
3618 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3619
3620 /* Disable FCS. */
3621 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3622
3623 /* Setup Transmit Control Register. */
3624 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3625
3626 /* Setup Transmit Flow Control Register. */
3627 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3628
3629 /* Setup Transmit Parameter Register. */
3630 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3631 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3632 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3633
3634 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3635 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3636
3637 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3638 gmac |= GM_SMOD_JUMBO_ENA;
3639 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3640
3641 /* Set station address. */
3642 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3643 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3644 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3645 eaddr[i]);
3646 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3647 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3648 eaddr[i]);
3649
3650 /* Disable interrupts for counter overflows. */
3651 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3652 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3653 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3654
3655 /* Configure Rx MAC FIFO. */
3656 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3657 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3658 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3659 GMF_OPER_ON | GMF_RX_F_FL_ON);
3660
3661 /* Set promiscuous mode. */
3662 msk_setpromisc(sc_if);
3663
3664 /* Set multicast filter. */
3665 msk_setmulti(sc_if);
3666
3667 /* Flush Rx MAC FIFO on any flow control or error. */
3668 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3669 GMR_FS_ANY_ERR);
3670
3671 /* Set Rx FIFO flush threshold to 64 bytes. */
3672 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3673 RX_GMF_FL_THR_DEF);
3674
3675 /* Configure Tx MAC FIFO. */
3676 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3677 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3678 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3679
3680 /* Configure hardware VLAN tag insertion/stripping. */
3681 msk_setvlan(sc_if, ifp);
3682
3683 /* XXX It seems STFW is requried for all cases. */
3684 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA);
3685
3686 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3687 /* Set Rx Pause threshould. */
3688 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3689 MSK_ECU_LLPP);
3690 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3691 MSK_ECU_ULPP);
3692 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) {
3693 /*
3694 * Can't sure the following code is needed as Yukon
3695 * Yukon EC Ultra may not support jumbo frames.
3696 *
3697 * Set Tx GMAC FIFO Almost Empty Threshold.
3698 */
3699 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3700 MSK_ECU_AE_THR);
3701 /* Disable Store & Forward mode for Tx. */
3702 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3703 TX_STFW_DIS);
3704 }
3705 }
3706
3707 /*
3708 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3709 * arbiter as we don't use Sync Tx queue.
3710 */
3711 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3712 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3713 /* Enable the RAM Interface Arbiter. */
3714 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3715
3716 /* Setup RAM buffer. */
3717 msk_set_rambuffer(sc_if);
3718
3719 /* Disable Tx sync Queue. */
3720 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3721
3722 /* Setup Tx Queue Bus Memory Interface. */
3723 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3724 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3725 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3726 /* Increase IPID when hardware generates IP packets in TSO. */
3727 if ((ifp->if_hwassist & CSUM_TSO) != 0)
3728 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3729 BMU_TX_IPIDINCR_ON);
3730 else
3731 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3732 BMU_TX_IPIDINCR_OFF);
3733 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3734 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3735 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3736 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3737 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3738 }
3739
3740 /* Setup Rx Queue Bus Memory Interface. */
3741 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3742 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3743 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3744 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3745 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3746 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3747 /* MAC Rx RAM Read is controlled by hardware. */
3748 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3749 }
3750
3751 msk_set_prefetch(sc, sc_if->msk_txq,
3752 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3753 msk_init_tx_ring(sc_if);
3754
3755 /* Disable Rx checksum offload and RSS hash. */
3756 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3757 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3758 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3759 msk_set_prefetch(sc, sc_if->msk_rxq,
3760 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3761 MSK_JUMBO_RX_RING_CNT - 1);
3762 error = msk_init_jumbo_rx_ring(sc_if);
3763 } else {
3764 msk_set_prefetch(sc, sc_if->msk_rxq,
3765 sc_if->msk_rdata.msk_rx_ring_paddr,
3766 MSK_RX_RING_CNT - 1);
3767 error = msk_init_rx_ring(sc_if);
3768 }
3769 if (error != 0) {
3770 device_printf(sc_if->msk_if_dev,
3771 "initialization failed: no memory for Rx buffers\n");
3772 msk_stop(sc_if);
3773 return;
3774 }
3775
3776 /* Configure interrupt handling. */
3777 if (sc_if->msk_port == MSK_PORT_A) {
3778 sc->msk_intrmask |= Y2_IS_PORT_A;
3779 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3780 } else {
3781 sc->msk_intrmask |= Y2_IS_PORT_B;
3782 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3783 }
3784 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3785 CSR_READ_4(sc, B0_HWE_IMSK);
3786 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3787 CSR_READ_4(sc, B0_IMSK);
3788
3789 sc_if->msk_link = 0;
3790 mii_mediachg(mii);
3791
3792 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3793 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3794
3795 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3796}
3797
3798static void
3799msk_set_rambuffer(struct msk_if_softc *sc_if)
3800{
3801 struct msk_softc *sc;
3802 int ltpp, utpp;
3803
3804 sc = sc_if->msk_softc;
3805
3806 /* Setup Rx Queue. */
3807 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3808 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3809 sc->msk_rxqstart[sc_if->msk_port] / 8);
3810 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3811 sc->msk_rxqend[sc_if->msk_port] / 8);
3812 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3813 sc->msk_rxqstart[sc_if->msk_port] / 8);
3814 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3815 sc->msk_rxqstart[sc_if->msk_port] / 8);
3816
3817 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3818 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3819 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3820 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3821 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3822 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3823 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3824 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3825 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3826
3827 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3828 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3829
3830 /* Setup Tx Queue. */
3831 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3832 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3833 sc->msk_txqstart[sc_if->msk_port] / 8);
3834 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3835 sc->msk_txqend[sc_if->msk_port] / 8);
3836 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3837 sc->msk_txqstart[sc_if->msk_port] / 8);
3838 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3839 sc->msk_txqstart[sc_if->msk_port] / 8);
3840 /* Enable Store & Forward for Tx side. */
3841 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3842 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3843 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3844}
3845
3846static void
3847msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3848 uint32_t count)
3849{
3850
3851 /* Reset the prefetch unit. */
3852 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3853 PREF_UNIT_RST_SET);
3854 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3855 PREF_UNIT_RST_CLR);
3856 /* Set LE base address. */
3857 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3858 MSK_ADDR_LO(addr));
3859 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3860 MSK_ADDR_HI(addr));
3861 /* Set the list last index. */
3862 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3863 count);
3864 /* Turn on prefetch unit. */
3865 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3866 PREF_UNIT_OP_ON);
3867 /* Dummy read to ensure write. */
3868 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3869}
3870
3871static void
3872msk_stop(struct msk_if_softc *sc_if)
3873{
3874 struct msk_softc *sc;
3875 struct msk_txdesc *txd;
3876 struct msk_rxdesc *rxd;
3877 struct msk_rxdesc *jrxd;
3878 struct ifnet *ifp;
3879 uint32_t val;
3880 int i;
3881
3882 MSK_IF_LOCK_ASSERT(sc_if);
3883 sc = sc_if->msk_softc;
3884 ifp = sc_if->msk_ifp;
3885
3886 callout_stop(&sc_if->msk_tick_ch);
3887 callout_stop(&sc_if->msk_watchdog_ch);
3888
3889 /* Disable interrupts. */
3890 if (sc_if->msk_port == MSK_PORT_A) {
3891 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3892 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3893 } else {
3894 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3895 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3896 }
3897 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3898 CSR_READ_4(sc, B0_HWE_IMSK);
3899 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3900 CSR_READ_4(sc, B0_IMSK);
3901
3902 /* Disable Tx/Rx MAC. */
3903 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3904 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3905 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3906 /* Read again to ensure writing. */
3907 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3908
3909 /* Stop Tx BMU. */
3910 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3911 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3912 for (i = 0; i < MSK_TIMEOUT; i++) {
3913 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3914 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3915 BMU_STOP);
3916 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3917 } else
3918 break;
3919 DELAY(1);
3920 }
3921 if (i == MSK_TIMEOUT)
3922 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3923 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3924 RB_RST_SET | RB_DIS_OP_MD);
3925
3926 /* Disable all GMAC interrupt. */
3927 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3928 /* Disable PHY interrupt. */
3929 if (sc->msk_marvell_phy)
3930 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3931
3932 /* Disable the RAM Interface Arbiter. */
3933 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3934
3935 /* Reset the PCI FIFO of the async Tx queue */
3936 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3937 BMU_RST_SET | BMU_FIFO_RST);
3938
3939 /* Reset the Tx prefetch units. */
3940 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3941 PREF_UNIT_RST_SET);
3942
3943 /* Reset the RAM Buffer async Tx queue. */
3944 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3945
3946 /* Reset Tx MAC FIFO. */
3947 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3948 /* Set Pause Off. */
3949 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3950
3951 /*
3952 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3953 * reach the end of packet and since we can't make sure that we have
3954 * incoming data, we must reset the BMU while it is not during a DMA
3955 * transfer. Since it is possible that the Rx path is still active,
3956 * the Rx RAM buffer will be stopped first, so any possible incoming
3957 * data will not trigger a DMA. After the RAM buffer is stopped, the
3958 * BMU is polled until any DMA in progress is ended and only then it
3959 * will be reset.
3960 */
3961
3962 /* Disable the RAM Buffer receive queue. */
3963 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3964 for (i = 0; i < MSK_TIMEOUT; i++) {
3965 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3966 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3967 break;
3968 DELAY(1);
3969 }
3970 if (i == MSK_TIMEOUT)
3971 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3972 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3973 BMU_RST_SET | BMU_FIFO_RST);
3974 /* Reset the Rx prefetch unit. */
3975 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3976 PREF_UNIT_RST_SET);
3977 /* Reset the RAM Buffer receive queue. */
3978 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3979 /* Reset Rx MAC FIFO. */
3980 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3981
3982 /* Free Rx and Tx mbufs still in the queues. */
3983 for (i = 0; i < MSK_RX_RING_CNT; i++) {
3984 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3985 if (rxd->rx_m != NULL) {
3986 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
3987 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3988 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3989 rxd->rx_dmamap);
3990 m_freem(rxd->rx_m);
3991 rxd->rx_m = NULL;
3992 }
3993 }
3994 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3995 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3996 if (jrxd->rx_m != NULL) {
3997 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3998 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3999 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4000 jrxd->rx_dmamap);
4001 m_freem(jrxd->rx_m);
4002 jrxd->rx_m = NULL;
4003 }
4004 }
4005 for (i = 0; i < MSK_TX_RING_CNT; i++) {
4006 txd = &sc_if->msk_cdata.msk_txdesc[i];
4007 if (txd->tx_m != NULL) {
4008 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4009 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4010 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4011 txd->tx_dmamap);
4012 m_freem(txd->tx_m);
4013 txd->tx_m = NULL;
4014 }
4015 }
4016
4017 /*
4018 * Mark the interface down.
4019 */
4020 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4021 sc_if->msk_link = 0;
4022}
4023
4024static int
4025sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4026{
4027 int error, value;
4028
4029 if (!arg1)
4030 return (EINVAL);
4031 value = *(int *)arg1;
4032 error = sysctl_handle_int(oidp, &value, 0, req);
4033 if (error || !req->newptr)
4034 return (error);
4035 if (value < low || value > high)
4036 return (EINVAL);
4037 *(int *)arg1 = value;
4038
4039 return (0);
4040}
4041
4042static int
4043sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4044{
4045
4046 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4047 MSK_PROC_MAX));
4048}
1917 if (sc->msk_msi)
1918 pci_release_msi(dev);
1919 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
1920 mtx_destroy(&sc->msk_mtx);
1921
1922 return (0);
1923}
1924
1925struct msk_dmamap_arg {
1926 bus_addr_t msk_busaddr;
1927};
1928
1929static void
1930msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1931{
1932 struct msk_dmamap_arg *ctx;
1933
1934 if (error != 0)
1935 return;
1936 ctx = arg;
1937 ctx->msk_busaddr = segs[0].ds_addr;
1938}
1939
1940/* Create status DMA region. */
1941static int
1942msk_status_dma_alloc(struct msk_softc *sc)
1943{
1944 struct msk_dmamap_arg ctx;
1945 int error;
1946
1947 error = bus_dma_tag_create(
1948 bus_get_dma_tag(sc->msk_dev), /* parent */
1949 MSK_STAT_ALIGN, 0, /* alignment, boundary */
1950 BUS_SPACE_MAXADDR, /* lowaddr */
1951 BUS_SPACE_MAXADDR, /* highaddr */
1952 NULL, NULL, /* filter, filterarg */
1953 MSK_STAT_RING_SZ, /* maxsize */
1954 1, /* nsegments */
1955 MSK_STAT_RING_SZ, /* maxsegsize */
1956 0, /* flags */
1957 NULL, NULL, /* lockfunc, lockarg */
1958 &sc->msk_stat_tag);
1959 if (error != 0) {
1960 device_printf(sc->msk_dev,
1961 "failed to create status DMA tag\n");
1962 return (error);
1963 }
1964
1965 /* Allocate DMA'able memory and load the DMA map for status ring. */
1966 error = bus_dmamem_alloc(sc->msk_stat_tag,
1967 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1968 BUS_DMA_ZERO, &sc->msk_stat_map);
1969 if (error != 0) {
1970 device_printf(sc->msk_dev,
1971 "failed to allocate DMA'able memory for status ring\n");
1972 return (error);
1973 }
1974
1975 ctx.msk_busaddr = 0;
1976 error = bus_dmamap_load(sc->msk_stat_tag,
1977 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
1978 msk_dmamap_cb, &ctx, 0);
1979 if (error != 0) {
1980 device_printf(sc->msk_dev,
1981 "failed to load DMA'able memory for status ring\n");
1982 return (error);
1983 }
1984 sc->msk_stat_ring_paddr = ctx.msk_busaddr;
1985
1986 return (0);
1987}
1988
1989static void
1990msk_status_dma_free(struct msk_softc *sc)
1991{
1992
1993 /* Destroy status block. */
1994 if (sc->msk_stat_tag) {
1995 if (sc->msk_stat_map) {
1996 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1997 if (sc->msk_stat_ring) {
1998 bus_dmamem_free(sc->msk_stat_tag,
1999 sc->msk_stat_ring, sc->msk_stat_map);
2000 sc->msk_stat_ring = NULL;
2001 }
2002 sc->msk_stat_map = NULL;
2003 }
2004 bus_dma_tag_destroy(sc->msk_stat_tag);
2005 sc->msk_stat_tag = NULL;
2006 }
2007}
2008
2009static int
2010msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
2011{
2012 struct msk_dmamap_arg ctx;
2013 struct msk_txdesc *txd;
2014 struct msk_rxdesc *rxd;
2015 struct msk_rxdesc *jrxd;
2016 struct msk_jpool_entry *entry;
2017 uint8_t *ptr;
2018 int error, i;
2019
2020 mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF);
2021 SLIST_INIT(&sc_if->msk_jfree_listhead);
2022 SLIST_INIT(&sc_if->msk_jinuse_listhead);
2023
2024 /* Create parent DMA tag. */
2025 /*
2026 * XXX
2027 * It seems that Yukon II supports full 64bits DMA operations. But
2028 * it needs two descriptors(list elements) for 64bits DMA operations.
2029 * Since we don't know what DMA address mappings(32bits or 64bits)
2030 * would be used in advance for each mbufs, we limits its DMA space
2031 * to be in range of 32bits address space. Otherwise, we should check
2032 * what DMA address is used and chain another descriptor for the
2033 * 64bits DMA operation. This also means descriptor ring size is
2034 * variable. Limiting DMA address to be in 32bit address space greatly
2035 * simplyfies descriptor handling and possibly would increase
2036 * performance a bit due to efficient handling of descriptors.
2037 * Apart from harassing checksum offloading mechanisms, it seems
2038 * it's really bad idea to use a seperate descriptor for 64bit
2039 * DMA operation to save small descriptor memory. Anyway, I've
2040 * never seen these exotic scheme on ethernet interface hardware.
2041 */
2042 error = bus_dma_tag_create(
2043 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
2044 1, 0, /* alignment, boundary */
2045 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2046 BUS_SPACE_MAXADDR, /* highaddr */
2047 NULL, NULL, /* filter, filterarg */
2048 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2049 0, /* nsegments */
2050 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2051 0, /* flags */
2052 NULL, NULL, /* lockfunc, lockarg */
2053 &sc_if->msk_cdata.msk_parent_tag);
2054 if (error != 0) {
2055 device_printf(sc_if->msk_if_dev,
2056 "failed to create parent DMA tag\n");
2057 goto fail;
2058 }
2059 /* Create tag for Tx ring. */
2060 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2061 MSK_RING_ALIGN, 0, /* alignment, boundary */
2062 BUS_SPACE_MAXADDR, /* lowaddr */
2063 BUS_SPACE_MAXADDR, /* highaddr */
2064 NULL, NULL, /* filter, filterarg */
2065 MSK_TX_RING_SZ, /* maxsize */
2066 1, /* nsegments */
2067 MSK_TX_RING_SZ, /* maxsegsize */
2068 0, /* flags */
2069 NULL, NULL, /* lockfunc, lockarg */
2070 &sc_if->msk_cdata.msk_tx_ring_tag);
2071 if (error != 0) {
2072 device_printf(sc_if->msk_if_dev,
2073 "failed to create Tx ring DMA tag\n");
2074 goto fail;
2075 }
2076
2077 /* Create tag for Rx ring. */
2078 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2079 MSK_RING_ALIGN, 0, /* alignment, boundary */
2080 BUS_SPACE_MAXADDR, /* lowaddr */
2081 BUS_SPACE_MAXADDR, /* highaddr */
2082 NULL, NULL, /* filter, filterarg */
2083 MSK_RX_RING_SZ, /* maxsize */
2084 1, /* nsegments */
2085 MSK_RX_RING_SZ, /* maxsegsize */
2086 0, /* flags */
2087 NULL, NULL, /* lockfunc, lockarg */
2088 &sc_if->msk_cdata.msk_rx_ring_tag);
2089 if (error != 0) {
2090 device_printf(sc_if->msk_if_dev,
2091 "failed to create Rx ring DMA tag\n");
2092 goto fail;
2093 }
2094
2095 /* Create tag for jumbo Rx ring. */
2096 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2097 MSK_RING_ALIGN, 0, /* alignment, boundary */
2098 BUS_SPACE_MAXADDR, /* lowaddr */
2099 BUS_SPACE_MAXADDR, /* highaddr */
2100 NULL, NULL, /* filter, filterarg */
2101 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2102 1, /* nsegments */
2103 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2104 0, /* flags */
2105 NULL, NULL, /* lockfunc, lockarg */
2106 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2107 if (error != 0) {
2108 device_printf(sc_if->msk_if_dev,
2109 "failed to create jumbo Rx ring DMA tag\n");
2110 goto fail;
2111 }
2112
2113 /* Create tag for jumbo buffer blocks. */
2114 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2115 PAGE_SIZE, 0, /* alignment, boundary */
2116 BUS_SPACE_MAXADDR, /* lowaddr */
2117 BUS_SPACE_MAXADDR, /* highaddr */
2118 NULL, NULL, /* filter, filterarg */
2119 MSK_JMEM, /* maxsize */
2120 1, /* nsegments */
2121 MSK_JMEM, /* maxsegsize */
2122 0, /* flags */
2123 NULL, NULL, /* lockfunc, lockarg */
2124 &sc_if->msk_cdata.msk_jumbo_tag);
2125 if (error != 0) {
2126 device_printf(sc_if->msk_if_dev,
2127 "failed to create jumbo Rx buffer block DMA tag\n");
2128 goto fail;
2129 }
2130
2131 /* Create tag for Tx buffers. */
2132 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2133 1, 0, /* alignment, boundary */
2134 BUS_SPACE_MAXADDR, /* lowaddr */
2135 BUS_SPACE_MAXADDR, /* highaddr */
2136 NULL, NULL, /* filter, filterarg */
2137 MCLBYTES * MSK_MAXTXSEGS, /* maxsize */
2138 MSK_MAXTXSEGS, /* nsegments */
2139 MCLBYTES, /* maxsegsize */
2140 0, /* flags */
2141 NULL, NULL, /* lockfunc, lockarg */
2142 &sc_if->msk_cdata.msk_tx_tag);
2143 if (error != 0) {
2144 device_printf(sc_if->msk_if_dev,
2145 "failed to create Tx DMA tag\n");
2146 goto fail;
2147 }
2148
2149 /* Create tag for Rx buffers. */
2150 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2151 1, 0, /* alignment, boundary */
2152 BUS_SPACE_MAXADDR, /* lowaddr */
2153 BUS_SPACE_MAXADDR, /* highaddr */
2154 NULL, NULL, /* filter, filterarg */
2155 MCLBYTES, /* maxsize */
2156 1, /* nsegments */
2157 MCLBYTES, /* maxsegsize */
2158 0, /* flags */
2159 NULL, NULL, /* lockfunc, lockarg */
2160 &sc_if->msk_cdata.msk_rx_tag);
2161 if (error != 0) {
2162 device_printf(sc_if->msk_if_dev,
2163 "failed to create Rx DMA tag\n");
2164 goto fail;
2165 }
2166
2167 /* Create tag for jumbo Rx buffers. */
2168 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2169 PAGE_SIZE, 0, /* alignment, boundary */
2170 BUS_SPACE_MAXADDR, /* lowaddr */
2171 BUS_SPACE_MAXADDR, /* highaddr */
2172 NULL, NULL, /* filter, filterarg */
2173 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */
2174 MSK_MAXRXSEGS, /* nsegments */
2175 MSK_JLEN, /* maxsegsize */
2176 0, /* flags */
2177 NULL, NULL, /* lockfunc, lockarg */
2178 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2179 if (error != 0) {
2180 device_printf(sc_if->msk_if_dev,
2181 "failed to create jumbo Rx DMA tag\n");
2182 goto fail;
2183 }
2184
2185 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
2186 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
2187 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
2188 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
2189 if (error != 0) {
2190 device_printf(sc_if->msk_if_dev,
2191 "failed to allocate DMA'able memory for Tx ring\n");
2192 goto fail;
2193 }
2194
2195 ctx.msk_busaddr = 0;
2196 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
2197 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
2198 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2199 if (error != 0) {
2200 device_printf(sc_if->msk_if_dev,
2201 "failed to load DMA'able memory for Tx ring\n");
2202 goto fail;
2203 }
2204 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
2205
2206 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
2207 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
2208 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
2209 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
2210 if (error != 0) {
2211 device_printf(sc_if->msk_if_dev,
2212 "failed to allocate DMA'able memory for Rx ring\n");
2213 goto fail;
2214 }
2215
2216 ctx.msk_busaddr = 0;
2217 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
2218 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
2219 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
2220 if (error != 0) {
2221 device_printf(sc_if->msk_if_dev,
2222 "failed to load DMA'able memory for Rx ring\n");
2223 goto fail;
2224 }
2225 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
2226
2227 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2228 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2229 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2230 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2231 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2232 if (error != 0) {
2233 device_printf(sc_if->msk_if_dev,
2234 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2235 goto fail;
2236 }
2237
2238 ctx.msk_busaddr = 0;
2239 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2240 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2241 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2242 msk_dmamap_cb, &ctx, 0);
2243 if (error != 0) {
2244 device_printf(sc_if->msk_if_dev,
2245 "failed to load DMA'able memory for jumbo Rx ring\n");
2246 goto fail;
2247 }
2248 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2249
2250 /* Create DMA maps for Tx buffers. */
2251 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2252 txd = &sc_if->msk_cdata.msk_txdesc[i];
2253 txd->tx_m = NULL;
2254 txd->tx_dmamap = NULL;
2255 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
2256 &txd->tx_dmamap);
2257 if (error != 0) {
2258 device_printf(sc_if->msk_if_dev,
2259 "failed to create Tx dmamap\n");
2260 goto fail;
2261 }
2262 }
2263 /* Create DMA maps for Rx buffers. */
2264 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2265 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
2266 device_printf(sc_if->msk_if_dev,
2267 "failed to create spare Rx dmamap\n");
2268 goto fail;
2269 }
2270 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2271 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2272 rxd->rx_m = NULL;
2273 rxd->rx_dmamap = NULL;
2274 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2275 &rxd->rx_dmamap);
2276 if (error != 0) {
2277 device_printf(sc_if->msk_if_dev,
2278 "failed to create Rx dmamap\n");
2279 goto fail;
2280 }
2281 }
2282 /* Create DMA maps for jumbo Rx buffers. */
2283 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2284 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2285 device_printf(sc_if->msk_if_dev,
2286 "failed to create spare jumbo Rx dmamap\n");
2287 goto fail;
2288 }
2289 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2290 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2291 jrxd->rx_m = NULL;
2292 jrxd->rx_dmamap = NULL;
2293 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2294 &jrxd->rx_dmamap);
2295 if (error != 0) {
2296 device_printf(sc_if->msk_if_dev,
2297 "failed to create jumbo Rx dmamap\n");
2298 goto fail;
2299 }
2300 }
2301
2302 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2303 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2304 (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2305 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2306 &sc_if->msk_cdata.msk_jumbo_map);
2307 if (error != 0) {
2308 device_printf(sc_if->msk_if_dev,
2309 "failed to allocate DMA'able memory for jumbo buf\n");
2310 goto fail;
2311 }
2312
2313 ctx.msk_busaddr = 0;
2314 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2315 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2316 MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2317 if (error != 0) {
2318 device_printf(sc_if->msk_if_dev,
2319 "failed to load DMA'able memory for jumbobuf\n");
2320 goto fail;
2321 }
2322 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2323
2324 /*
2325 * Now divide it up into 9K pieces and save the addresses
2326 * in an array.
2327 */
2328 ptr = sc_if->msk_rdata.msk_jumbo_buf;
2329 for (i = 0; i < MSK_JSLOTS; i++) {
2330 sc_if->msk_cdata.msk_jslots[i] = ptr;
2331 ptr += MSK_JLEN;
2332 entry = malloc(sizeof(struct msk_jpool_entry),
2333 M_DEVBUF, M_WAITOK);
2334 if (entry == NULL) {
2335 device_printf(sc_if->msk_if_dev,
2336 "no memory for jumbo buffers!\n");
2337 error = ENOMEM;
2338 goto fail;
2339 }
2340 entry->slot = i;
2341 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2342 jpool_entries);
2343 }
2344
2345fail:
2346 return (error);
2347}
2348
2349static void
2350msk_txrx_dma_free(struct msk_if_softc *sc_if)
2351{
2352 struct msk_txdesc *txd;
2353 struct msk_rxdesc *rxd;
2354 struct msk_rxdesc *jrxd;
2355 struct msk_jpool_entry *entry;
2356 int i;
2357
2358 MSK_JLIST_LOCK(sc_if);
2359 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2360 device_printf(sc_if->msk_if_dev,
2361 "asked to free buffer that is in use!\n");
2362 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2363 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2364 jpool_entries);
2365 }
2366
2367 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2368 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2369 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2370 free(entry, M_DEVBUF);
2371 }
2372 MSK_JLIST_UNLOCK(sc_if);
2373
2374 /* Destroy jumbo buffer block. */
2375 if (sc_if->msk_cdata.msk_jumbo_map)
2376 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2377 sc_if->msk_cdata.msk_jumbo_map);
2378
2379 if (sc_if->msk_rdata.msk_jumbo_buf) {
2380 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2381 sc_if->msk_rdata.msk_jumbo_buf,
2382 sc_if->msk_cdata.msk_jumbo_map);
2383 sc_if->msk_rdata.msk_jumbo_buf = NULL;
2384 sc_if->msk_cdata.msk_jumbo_map = NULL;
2385 }
2386
2387 /* Tx ring. */
2388 if (sc_if->msk_cdata.msk_tx_ring_tag) {
2389 if (sc_if->msk_cdata.msk_tx_ring_map)
2390 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
2391 sc_if->msk_cdata.msk_tx_ring_map);
2392 if (sc_if->msk_cdata.msk_tx_ring_map &&
2393 sc_if->msk_rdata.msk_tx_ring)
2394 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
2395 sc_if->msk_rdata.msk_tx_ring,
2396 sc_if->msk_cdata.msk_tx_ring_map);
2397 sc_if->msk_rdata.msk_tx_ring = NULL;
2398 sc_if->msk_cdata.msk_tx_ring_map = NULL;
2399 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
2400 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
2401 }
2402 /* Rx ring. */
2403 if (sc_if->msk_cdata.msk_rx_ring_tag) {
2404 if (sc_if->msk_cdata.msk_rx_ring_map)
2405 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
2406 sc_if->msk_cdata.msk_rx_ring_map);
2407 if (sc_if->msk_cdata.msk_rx_ring_map &&
2408 sc_if->msk_rdata.msk_rx_ring)
2409 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
2410 sc_if->msk_rdata.msk_rx_ring,
2411 sc_if->msk_cdata.msk_rx_ring_map);
2412 sc_if->msk_rdata.msk_rx_ring = NULL;
2413 sc_if->msk_cdata.msk_rx_ring_map = NULL;
2414 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
2415 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
2416 }
2417 /* Jumbo Rx ring. */
2418 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2419 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2420 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2421 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2422 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2423 sc_if->msk_rdata.msk_jumbo_rx_ring)
2424 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2425 sc_if->msk_rdata.msk_jumbo_rx_ring,
2426 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2427 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2428 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2429 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2430 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2431 }
2432 /* Tx buffers. */
2433 if (sc_if->msk_cdata.msk_tx_tag) {
2434 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2435 txd = &sc_if->msk_cdata.msk_txdesc[i];
2436 if (txd->tx_dmamap) {
2437 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2438 txd->tx_dmamap);
2439 txd->tx_dmamap = NULL;
2440 }
2441 }
2442 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2443 sc_if->msk_cdata.msk_tx_tag = NULL;
2444 }
2445 /* Rx buffers. */
2446 if (sc_if->msk_cdata.msk_rx_tag) {
2447 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2448 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2449 if (rxd->rx_dmamap) {
2450 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2451 rxd->rx_dmamap);
2452 rxd->rx_dmamap = NULL;
2453 }
2454 }
2455 if (sc_if->msk_cdata.msk_rx_sparemap) {
2456 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2457 sc_if->msk_cdata.msk_rx_sparemap);
2458 sc_if->msk_cdata.msk_rx_sparemap = 0;
2459 }
2460 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2461 sc_if->msk_cdata.msk_rx_tag = NULL;
2462 }
2463 /* Jumbo Rx buffers. */
2464 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2465 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2466 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2467 if (jrxd->rx_dmamap) {
2468 bus_dmamap_destroy(
2469 sc_if->msk_cdata.msk_jumbo_rx_tag,
2470 jrxd->rx_dmamap);
2471 jrxd->rx_dmamap = NULL;
2472 }
2473 }
2474 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2475 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2476 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2477 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2478 }
2479 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2480 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2481 }
2482
2483 if (sc_if->msk_cdata.msk_parent_tag) {
2484 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2485 sc_if->msk_cdata.msk_parent_tag = NULL;
2486 }
2487 mtx_destroy(&sc_if->msk_jlist_mtx);
2488}
2489
2490/*
2491 * Allocate a jumbo buffer.
2492 */
2493static void *
2494msk_jalloc(struct msk_if_softc *sc_if)
2495{
2496 struct msk_jpool_entry *entry;
2497
2498 MSK_JLIST_LOCK(sc_if);
2499
2500 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2501
2502 if (entry == NULL) {
2503 MSK_JLIST_UNLOCK(sc_if);
2504 return (NULL);
2505 }
2506
2507 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2508 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2509
2510 MSK_JLIST_UNLOCK(sc_if);
2511
2512 return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2513}
2514
2515/*
2516 * Release a jumbo buffer.
2517 */
2518static void
2519msk_jfree(void *buf, void *args)
2520{
2521 struct msk_if_softc *sc_if;
2522 struct msk_jpool_entry *entry;
2523 int i;
2524
2525 /* Extract the softc struct pointer. */
2526 sc_if = (struct msk_if_softc *)args;
2527 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2528
2529 MSK_JLIST_LOCK(sc_if);
2530 /* Calculate the slot this buffer belongs to. */
2531 i = ((vm_offset_t)buf
2532 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2533 KASSERT(i >= 0 && i < MSK_JSLOTS,
2534 ("%s: asked to free buffer that we don't manage!", __func__));
2535
2536 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2537 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2538 entry->slot = i;
2539 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2540 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2541 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2542 wakeup(sc_if);
2543
2544 MSK_JLIST_UNLOCK(sc_if);
2545}
2546
2547/*
2548 * It's copy of ath_defrag(ath(4)).
2549 *
2550 * Defragment an mbuf chain, returning at most maxfrags separate
2551 * mbufs+clusters. If this is not possible NULL is returned and
2552 * the original mbuf chain is left in it's present (potentially
2553 * modified) state. We use two techniques: collapsing consecutive
2554 * mbufs and replacing consecutive mbufs by a cluster.
2555 */
2556static struct mbuf *
2557msk_defrag(struct mbuf *m0, int how, int maxfrags)
2558{
2559 struct mbuf *m, *n, *n2, **prev;
2560 u_int curfrags;
2561
2562 /*
2563 * Calculate the current number of frags.
2564 */
2565 curfrags = 0;
2566 for (m = m0; m != NULL; m = m->m_next)
2567 curfrags++;
2568 /*
2569 * First, try to collapse mbufs. Note that we always collapse
2570 * towards the front so we don't need to deal with moving the
2571 * pkthdr. This may be suboptimal if the first mbuf has much
2572 * less data than the following.
2573 */
2574 m = m0;
2575again:
2576 for (;;) {
2577 n = m->m_next;
2578 if (n == NULL)
2579 break;
2580 if ((m->m_flags & M_RDONLY) == 0 &&
2581 n->m_len < M_TRAILINGSPACE(m)) {
2582 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
2583 n->m_len);
2584 m->m_len += n->m_len;
2585 m->m_next = n->m_next;
2586 m_free(n);
2587 if (--curfrags <= maxfrags)
2588 return (m0);
2589 } else
2590 m = n;
2591 }
2592 KASSERT(maxfrags > 1,
2593 ("maxfrags %u, but normal collapse failed", maxfrags));
2594 /*
2595 * Collapse consecutive mbufs to a cluster.
2596 */
2597 prev = &m0->m_next; /* NB: not the first mbuf */
2598 while ((n = *prev) != NULL) {
2599 if ((n2 = n->m_next) != NULL &&
2600 n->m_len + n2->m_len < MCLBYTES) {
2601 m = m_getcl(how, MT_DATA, 0);
2602 if (m == NULL)
2603 goto bad;
2604 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
2605 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
2606 n2->m_len);
2607 m->m_len = n->m_len + n2->m_len;
2608 m->m_next = n2->m_next;
2609 *prev = m;
2610 m_free(n);
2611 m_free(n2);
2612 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
2613 return m0;
2614 /*
2615 * Still not there, try the normal collapse
2616 * again before we allocate another cluster.
2617 */
2618 goto again;
2619 }
2620 prev = &n->m_next;
2621 }
2622 /*
2623 * No place where we can collapse to a cluster; punt.
2624 * This can occur if, for example, you request 2 frags
2625 * but the packet requires that both be clusters (we
2626 * never reallocate the first mbuf to avoid moving the
2627 * packet header).
2628 */
2629bad:
2630 return (NULL);
2631}
2632
2633static int
2634msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2635{
2636 struct msk_txdesc *txd, *txd_last;
2637 struct msk_tx_desc *tx_le;
2638 struct mbuf *m;
2639 bus_dmamap_t map;
2640 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2641 uint32_t control, prod, si;
2642 uint16_t offset, tcp_offset, tso_mtu;
2643 int error, i, nseg, tso;
2644
2645 MSK_IF_LOCK_ASSERT(sc_if);
2646
2647 tcp_offset = offset = 0;
2648 m = *m_head;
2649 if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) {
2650 /*
2651 * Since mbuf has no protocol specific structure information
2652 * in it we have to inspect protocol information here to
2653 * setup TSO and checksum offload. I don't know why Marvell
2654 * made a such decision in chip design because other GigE
2655 * hardwares normally takes care of all these chores in
2656 * hardware. However, TSO performance of Yukon II is very
2657 * good such that it's worth to implement it.
2658 */
2659 struct ether_vlan_header *evh;
2660 struct ether_header *eh;
2661 struct ip *ip;
2662 struct tcphdr *tcp;
2663
2664 /* TODO check for M_WRITABLE(m) */
2665
2666 offset = sizeof(struct ether_header);
2667 m = m_pullup(m, offset);
2668 if (m == NULL) {
2669 *m_head = NULL;
2670 return (ENOBUFS);
2671 }
2672 eh = mtod(m, struct ether_header *);
2673 /* Check if hardware VLAN insertion is off. */
2674 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2675 offset = sizeof(struct ether_vlan_header);
2676 m = m_pullup(m, offset);
2677 if (m == NULL) {
2678 *m_head = NULL;
2679 return (ENOBUFS);
2680 }
2681 evh = mtod(m, struct ether_vlan_header *);
2682 ip = (struct ip *)(evh + 1);
2683 } else
2684 ip = (struct ip *)(eh + 1);
2685 m = m_pullup(m, offset + sizeof(struct ip));
2686 if (m == NULL) {
2687 *m_head = NULL;
2688 return (ENOBUFS);
2689 }
2690 offset += (ip->ip_hl << 2);
2691 tcp_offset = offset;
2692 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2693 m = m_pullup(m, offset + sizeof(struct tcphdr));
2694 if (m == NULL) {
2695 *m_head = NULL;
2696 return (ENOBUFS);
2697 }
2698 tcp = mtod(m, struct tcphdr *);
2699 offset += (tcp->th_off << 2);
2700 }
2701 *m_head = m;
2702 }
2703
2704 prod = sc_if->msk_cdata.msk_tx_prod;
2705 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2706 txd_last = txd;
2707 map = txd->tx_dmamap;
2708 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
2709 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2710 if (error == EFBIG) {
2711 m = msk_defrag(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
2712 if (m == NULL) {
2713 m_freem(*m_head);
2714 *m_head = NULL;
2715 return (ENOBUFS);
2716 }
2717 *m_head = m;
2718 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
2719 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2720 if (error != 0) {
2721 m_freem(*m_head);
2722 *m_head = NULL;
2723 return (error);
2724 }
2725 } else if (error != 0)
2726 return (error);
2727 if (nseg == 0) {
2728 m_freem(*m_head);
2729 *m_head = NULL;
2730 return (EIO);
2731 }
2732
2733 /* Check number of available descriptors. */
2734 if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
2735 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2736 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2737 return (ENOBUFS);
2738 }
2739
2740 control = 0;
2741 tso = 0;
2742 tx_le = NULL;
2743
2744 /* Check TSO support. */
2745 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2746 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2747 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
2748 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2749 tx_le->msk_addr = htole32(tso_mtu);
2750 tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER);
2751 sc_if->msk_cdata.msk_tx_cnt++;
2752 MSK_INC(prod, MSK_TX_RING_CNT);
2753 sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
2754 }
2755 tso++;
2756 }
2757 /* Check if we have a VLAN tag to insert. */
2758 if ((m->m_flags & M_VLANTAG) != 0) {
2759 if (tso == 0) {
2760 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2761 tx_le->msk_addr = htole32(0);
2762 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2763 htons(m->m_pkthdr.ether_vtag));
2764 sc_if->msk_cdata.msk_tx_cnt++;
2765 MSK_INC(prod, MSK_TX_RING_CNT);
2766 } else {
2767 tx_le->msk_control |= htole32(OP_VLAN |
2768 htons(m->m_pkthdr.ether_vtag));
2769 }
2770 control |= INS_VLAN;
2771 }
2772 /* Check if we have to handle checksum offload. */
2773 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
2774 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2775 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2776 & 0xffff) | ((uint32_t)tcp_offset << 16));
2777 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2778 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2779 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2780 control |= UDPTCP;
2781 sc_if->msk_cdata.msk_tx_cnt++;
2782 MSK_INC(prod, MSK_TX_RING_CNT);
2783 }
2784
2785 si = prod;
2786 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2787 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2788 if (tso == 0)
2789 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2790 OP_PACKET);
2791 else
2792 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2793 OP_LARGESEND);
2794 sc_if->msk_cdata.msk_tx_cnt++;
2795 MSK_INC(prod, MSK_TX_RING_CNT);
2796
2797 for (i = 1; i < nseg; i++) {
2798 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2799 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2800 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2801 OP_BUFFER | HW_OWNER);
2802 sc_if->msk_cdata.msk_tx_cnt++;
2803 MSK_INC(prod, MSK_TX_RING_CNT);
2804 }
2805 /* Update producer index. */
2806 sc_if->msk_cdata.msk_tx_prod = prod;
2807
2808 /* Set EOP on the last desciptor. */
2809 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2810 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2811 tx_le->msk_control |= htole32(EOP);
2812
2813 /* Turn the first descriptor ownership to hardware. */
2814 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2815 tx_le->msk_control |= htole32(HW_OWNER);
2816
2817 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2818 map = txd_last->tx_dmamap;
2819 txd_last->tx_dmamap = txd->tx_dmamap;
2820 txd->tx_dmamap = map;
2821 txd->tx_m = m;
2822
2823 /* Sync descriptors. */
2824 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2825 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2826 sc_if->msk_cdata.msk_tx_ring_map,
2827 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2828
2829 return (0);
2830}
2831
2832static void
2833msk_tx_task(void *arg, int pending)
2834{
2835 struct ifnet *ifp;
2836
2837 ifp = arg;
2838 msk_start(ifp);
2839}
2840
2841static void
2842msk_start(struct ifnet *ifp)
2843{
2844 struct msk_if_softc *sc_if;
2845 struct mbuf *m_head;
2846 int enq;
2847
2848 sc_if = ifp->if_softc;
2849
2850 MSK_IF_LOCK(sc_if);
2851
2852 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2853 IFF_DRV_RUNNING || sc_if->msk_link == 0) {
2854 MSK_IF_UNLOCK(sc_if);
2855 return;
2856 }
2857
2858 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2859 sc_if->msk_cdata.msk_tx_cnt <
2860 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2861 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2862 if (m_head == NULL)
2863 break;
2864 /*
2865 * Pack the data into the transmit ring. If we
2866 * don't have room, set the OACTIVE flag and wait
2867 * for the NIC to drain the ring.
2868 */
2869 if (msk_encap(sc_if, &m_head) != 0) {
2870 if (m_head == NULL)
2871 break;
2872 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2873 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2874 break;
2875 }
2876
2877 enq++;
2878 /*
2879 * If there's a BPF listener, bounce a copy of this frame
2880 * to him.
2881 */
2882 BPF_MTAP(ifp, m_head);
2883 }
2884
2885 if (enq > 0) {
2886 /* Transmit */
2887 CSR_WRITE_2(sc_if->msk_softc,
2888 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2889 sc_if->msk_cdata.msk_tx_prod);
2890
2891 /* Set a timeout in case the chip goes out to lunch. */
2892 callout_reset(&sc_if->msk_watchdog_ch, MSK_TX_TIMEOUT * hz,
2893 msk_watchdog, sc_if);
2894 }
2895
2896 MSK_IF_UNLOCK(sc_if);
2897}
2898
2899static void
2900msk_watchdog(void *arg)
2901{
2902 struct msk_if_softc *sc_if;
2903 struct ifnet *ifp;
2904 uint32_t ridx;
2905 int idx;
2906
2907 sc_if = arg;
2908
2909 MSK_IF_LOCK_ASSERT(sc_if);
2910
2911 ifp = sc_if->msk_ifp;
2912 if (sc_if->msk_link == 0) {
2913 if (bootverbose)
2914 if_printf(sc_if->msk_ifp, "watchdog timeout "
2915 "(missed link)\n");
2916 ifp->if_oerrors++;
2917 msk_init_locked(sc_if);
2918 return;
2919 }
2920
2921 /*
2922 * Reclaim first as there is a possibility of losing Tx completion
2923 * interrupts.
2924 */
2925 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2926 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2927 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2928 msk_txeof(sc_if, idx);
2929 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2930 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2931 "-- recovering\n");
2932 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2933 taskqueue_enqueue(taskqueue_fast,
2934 &sc_if->msk_tx_task);
2935 return;
2936 }
2937 }
2938
2939 if_printf(ifp, "watchdog timeout\n");
2940 ifp->if_oerrors++;
2941 msk_init_locked(sc_if);
2942 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2943 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
2944}
2945
2946static void
2947mskc_shutdown(device_t dev)
2948{
2949 struct msk_softc *sc;
2950 int i;
2951
2952 sc = device_get_softc(dev);
2953 MSK_LOCK(sc);
2954 for (i = 0; i < sc->msk_num_port; i++) {
2955 if (sc->msk_if[i] != NULL)
2956 msk_stop(sc->msk_if[i]);
2957 }
2958
2959 /* Disable all interrupts. */
2960 CSR_WRITE_4(sc, B0_IMSK, 0);
2961 CSR_READ_4(sc, B0_IMSK);
2962 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2963 CSR_READ_4(sc, B0_HWE_IMSK);
2964
2965 /* Put hardware reset. */
2966 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2967
2968 MSK_UNLOCK(sc);
2969}
2970
2971static int
2972mskc_suspend(device_t dev)
2973{
2974 struct msk_softc *sc;
2975 int i;
2976
2977 sc = device_get_softc(dev);
2978
2979 MSK_LOCK(sc);
2980
2981 for (i = 0; i < sc->msk_num_port; i++) {
2982 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2983 ((sc->msk_if[i]->msk_ifp->if_drv_flags &
2984 IFF_DRV_RUNNING) != 0))
2985 msk_stop(sc->msk_if[i]);
2986 }
2987
2988 /* Disable all interrupts. */
2989 CSR_WRITE_4(sc, B0_IMSK, 0);
2990 CSR_READ_4(sc, B0_IMSK);
2991 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2992 CSR_READ_4(sc, B0_HWE_IMSK);
2993
2994 msk_phy_power(sc, MSK_PHY_POWERDOWN);
2995
2996 /* Put hardware reset. */
2997 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2998 sc->msk_suspended = 1;
2999
3000 MSK_UNLOCK(sc);
3001
3002 return (0);
3003}
3004
3005static int
3006mskc_resume(device_t dev)
3007{
3008 struct msk_softc *sc;
3009 int i;
3010
3011 sc = device_get_softc(dev);
3012
3013 MSK_LOCK(sc);
3014
3015 mskc_reset(sc);
3016 for (i = 0; i < sc->msk_num_port; i++) {
3017 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
3018 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
3019 msk_init_locked(sc->msk_if[i]);
3020 }
3021 sc->msk_suspended = 0;
3022
3023 MSK_UNLOCK(sc);
3024
3025 return (0);
3026}
3027
3028static void
3029msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3030{
3031 struct mbuf *m;
3032 struct ifnet *ifp;
3033 struct msk_rxdesc *rxd;
3034 int cons, rxlen;
3035
3036 ifp = sc_if->msk_ifp;
3037
3038 MSK_IF_LOCK_ASSERT(sc_if);
3039
3040 cons = sc_if->msk_cdata.msk_rx_cons;
3041 do {
3042 rxlen = status >> 16;
3043 if ((status & GMR_FS_VLAN) != 0)
3044 rxlen -= ETHER_VLAN_ENCAP_LEN;
3045 if (len > sc_if->msk_framesize ||
3046 ((status & GMR_FS_ANY_ERR) != 0) ||
3047 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3048 /* Don't count flow-control packet as errors. */
3049 if ((status & GMR_FS_GOOD_FC) == 0)
3050 ifp->if_ierrors++;
3051 msk_discard_rxbuf(sc_if, cons);
3052 break;
3053 }
3054 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
3055 m = rxd->rx_m;
3056 if (msk_newbuf(sc_if, cons) != 0) {
3057 ifp->if_iqdrops++;
3058 /* Reuse old buffer. */
3059 msk_discard_rxbuf(sc_if, cons);
3060 break;
3061 }
3062 m->m_pkthdr.rcvif = ifp;
3063 m->m_pkthdr.len = m->m_len = len;
3064 ifp->if_ipackets++;
3065 /* Check for VLAN tagged packets. */
3066 if ((status & GMR_FS_VLAN) != 0 &&
3067 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3068 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3069 m->m_flags |= M_VLANTAG;
3070 }
3071 MSK_IF_UNLOCK(sc_if);
3072 (*ifp->if_input)(ifp, m);
3073 MSK_IF_LOCK(sc_if);
3074 } while (0);
3075
3076 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
3077 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
3078}
3079
3080static void
3081msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
3082{
3083 struct mbuf *m;
3084 struct ifnet *ifp;
3085 struct msk_rxdesc *jrxd;
3086 int cons, rxlen;
3087
3088 ifp = sc_if->msk_ifp;
3089
3090 MSK_IF_LOCK_ASSERT(sc_if);
3091
3092 cons = sc_if->msk_cdata.msk_rx_cons;
3093 do {
3094 rxlen = status >> 16;
3095 if ((status & GMR_FS_VLAN) != 0)
3096 rxlen -= ETHER_VLAN_ENCAP_LEN;
3097 if (len > sc_if->msk_framesize ||
3098 ((status & GMR_FS_ANY_ERR) != 0) ||
3099 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
3100 /* Don't count flow-control packet as errors. */
3101 if ((status & GMR_FS_GOOD_FC) == 0)
3102 ifp->if_ierrors++;
3103 msk_discard_jumbo_rxbuf(sc_if, cons);
3104 break;
3105 }
3106 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
3107 m = jrxd->rx_m;
3108 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
3109 ifp->if_iqdrops++;
3110 /* Reuse old buffer. */
3111 msk_discard_jumbo_rxbuf(sc_if, cons);
3112 break;
3113 }
3114 m->m_pkthdr.rcvif = ifp;
3115 m->m_pkthdr.len = m->m_len = len;
3116 ifp->if_ipackets++;
3117 /* Check for VLAN tagged packets. */
3118 if ((status & GMR_FS_VLAN) != 0 &&
3119 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3120 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
3121 m->m_flags |= M_VLANTAG;
3122 }
3123 MSK_IF_UNLOCK(sc_if);
3124 (*ifp->if_input)(ifp, m);
3125 MSK_IF_LOCK(sc_if);
3126 } while (0);
3127
3128 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
3129 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
3130}
3131
3132static void
3133msk_txeof(struct msk_if_softc *sc_if, int idx)
3134{
3135 struct msk_txdesc *txd;
3136 struct msk_tx_desc *cur_tx;
3137 struct ifnet *ifp;
3138 uint32_t control;
3139 int cons, prog;
3140
3141 MSK_IF_LOCK_ASSERT(sc_if);
3142
3143 ifp = sc_if->msk_ifp;
3144
3145 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
3146 sc_if->msk_cdata.msk_tx_ring_map,
3147 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3148 /*
3149 * Go through our tx ring and free mbufs for those
3150 * frames that have been sent.
3151 */
3152 cons = sc_if->msk_cdata.msk_tx_cons;
3153 prog = 0;
3154 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
3155 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
3156 break;
3157 prog++;
3158 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
3159 control = le32toh(cur_tx->msk_control);
3160 sc_if->msk_cdata.msk_tx_cnt--;
3161 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3162 if ((control & EOP) == 0)
3163 continue;
3164 txd = &sc_if->msk_cdata.msk_txdesc[cons];
3165 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
3166 BUS_DMASYNC_POSTWRITE);
3167 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
3168
3169 ifp->if_opackets++;
3170 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
3171 __func__));
3172 m_freem(txd->tx_m);
3173 txd->tx_m = NULL;
3174 }
3175
3176 if (prog > 0) {
3177 sc_if->msk_cdata.msk_tx_cons = cons;
3178 if (sc_if->msk_cdata.msk_tx_cnt == 0)
3179 callout_stop(&sc_if->msk_watchdog_ch);
3180 /* No need to sync LEs as we didn't update LEs. */
3181 }
3182}
3183
3184static void
3185msk_tick(void *xsc_if)
3186{
3187 struct msk_if_softc *sc_if;
3188 struct mii_data *mii;
3189
3190 sc_if = xsc_if;
3191
3192 MSK_IF_LOCK_ASSERT(sc_if);
3193
3194 mii = device_get_softc(sc_if->msk_miibus);
3195
3196 mii_tick(mii);
3197 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3198}
3199
3200static void
3201msk_intr_phy(struct msk_if_softc *sc_if)
3202{
3203 uint16_t status;
3204
3205 if (sc_if->msk_softc->msk_marvell_phy) {
3206 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3207 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV,
3208 PHY_MARV_INT_STAT);
3209 /* Handle FIFO Underrun/Overflow? */
3210 if ((status & PHY_M_IS_FIFO_ERROR))
3211 device_printf(sc_if->msk_if_dev,
3212 "PHY FIFO underrun/overflow.\n");
3213 }
3214}
3215
3216static void
3217msk_intr_gmac(struct msk_if_softc *sc_if)
3218{
3219 struct msk_softc *sc;
3220 uint8_t status;
3221
3222 sc = sc_if->msk_softc;
3223 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3224
3225 /* GMAC Rx FIFO overrun. */
3226 if ((status & GM_IS_RX_FF_OR) != 0) {
3227 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3228 GMF_CLI_RX_FO);
3229 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3230 }
3231 /* GMAC Tx FIFO underrun. */
3232 if ((status & GM_IS_TX_FF_UR) != 0) {
3233 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3234 GMF_CLI_TX_FU);
3235 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3236 /*
3237 * XXX
3238 * In case of Tx underrun, we may need to flush/reset
3239 * Tx MAC but that would also require resynchronization
3240 * with status LEs. Reintializing status LEs would
3241 * affect other port in dual MAC configuration so it
3242 * should be avoided as possible as we can.
3243 * Due to lack of documentation it's all vague guess but
3244 * it needs more investigation.
3245 */
3246 }
3247}
3248
3249static void
3250msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3251{
3252 struct msk_softc *sc;
3253
3254 sc = sc_if->msk_softc;
3255 if ((status & Y2_IS_PAR_RD1) != 0) {
3256 device_printf(sc_if->msk_if_dev,
3257 "RAM buffer read parity error\n");
3258 /* Clear IRQ. */
3259 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3260 RI_CLR_RD_PERR);
3261 }
3262 if ((status & Y2_IS_PAR_WR1) != 0) {
3263 device_printf(sc_if->msk_if_dev,
3264 "RAM buffer write parity error\n");
3265 /* Clear IRQ. */
3266 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3267 RI_CLR_WR_PERR);
3268 }
3269 if ((status & Y2_IS_PAR_MAC1) != 0) {
3270 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3271 /* Clear IRQ. */
3272 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3273 GMF_CLI_TX_PE);
3274 }
3275 if ((status & Y2_IS_PAR_RX1) != 0) {
3276 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3277 /* Clear IRQ. */
3278 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3279 }
3280 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3281 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3282 /* Clear IRQ. */
3283 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3284 }
3285}
3286
3287static void
3288msk_intr_hwerr(struct msk_softc *sc)
3289{
3290 uint32_t status;
3291 uint32_t tlphead[4];
3292
3293 status = CSR_READ_4(sc, B0_HWE_ISRC);
3294 /* Time Stamp timer overflow. */
3295 if ((status & Y2_IS_TIST_OV) != 0)
3296 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3297 if ((status & Y2_IS_PCI_NEXP) != 0) {
3298 /*
3299 * PCI Express Error occured which is not described in PEX
3300 * spec.
3301 * This error is also mapped either to Master Abort(
3302 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3303 * can only be cleared there.
3304 */
3305 device_printf(sc->msk_dev,
3306 "PCI Express protocol violation error\n");
3307 }
3308
3309 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3310 uint16_t v16;
3311
3312 if ((status & Y2_IS_MST_ERR) != 0)
3313 device_printf(sc->msk_dev,
3314 "unexpected IRQ Status error\n");
3315 else
3316 device_printf(sc->msk_dev,
3317 "unexpected IRQ Master error\n");
3318 /* Reset all bits in the PCI status register. */
3319 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3320 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3321 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3322 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3323 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3324 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3325 }
3326
3327 /* Check for PCI Express Uncorrectable Error. */
3328 if ((status & Y2_IS_PCI_EXP) != 0) {
3329 uint32_t v32;
3330
3331 /*
3332 * On PCI Express bus bridges are called root complexes (RC).
3333 * PCI Express errors are recognized by the root complex too,
3334 * which requests the system to handle the problem. After
3335 * error occurence it may be that no access to the adapter
3336 * may be performed any longer.
3337 */
3338
3339 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3340 if ((v32 & PEX_UNSUP_REQ) != 0) {
3341 /* Ignore unsupported request error. */
3342 device_printf(sc->msk_dev,
3343 "Uncorrectable PCI Express error\n");
3344 }
3345 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3346 int i;
3347
3348 /* Get TLP header form Log Registers. */
3349 for (i = 0; i < 4; i++)
3350 tlphead[i] = CSR_PCI_READ_4(sc,
3351 PEX_HEADER_LOG + i * 4);
3352 /* Check for vendor defined broadcast message. */
3353 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3354 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3355 CSR_WRITE_4(sc, B0_HWE_IMSK,
3356 sc->msk_intrhwemask);
3357 CSR_READ_4(sc, B0_HWE_IMSK);
3358 }
3359 }
3360 /* Clear the interrupt. */
3361 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3362 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3363 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3364 }
3365
3366 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3367 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3368 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3369 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3370}
3371
3372static __inline void
3373msk_rxput(struct msk_if_softc *sc_if)
3374{
3375 struct msk_softc *sc;
3376
3377 sc = sc_if->msk_softc;
3378 if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN))
3379 bus_dmamap_sync(
3380 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3381 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3382 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3383 else
3384 bus_dmamap_sync(
3385 sc_if->msk_cdata.msk_rx_ring_tag,
3386 sc_if->msk_cdata.msk_rx_ring_map,
3387 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3388 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3389 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3390}
3391
3392static int
3393msk_handle_events(struct msk_softc *sc)
3394{
3395 struct msk_if_softc *sc_if;
3396 int rxput[2];
3397 struct msk_stat_desc *sd;
3398 uint32_t control, status;
3399 int cons, idx, len, port, rxprog;
3400
3401 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3402 if (idx == sc->msk_stat_cons)
3403 return (0);
3404
3405 /* Sync status LEs. */
3406 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3407 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3408 /* XXX Sync Rx LEs here. */
3409
3410 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3411
3412 rxprog = 0;
3413 for (cons = sc->msk_stat_cons; cons != idx;) {
3414 sd = &sc->msk_stat_ring[cons];
3415 control = le32toh(sd->msk_control);
3416 if ((control & HW_OWNER) == 0)
3417 break;
3418 /*
3419 * Marvell's FreeBSD driver updates status LE after clearing
3420 * HW_OWNER. However we don't have a way to sync single LE
3421 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3422 * an entire DMA map. So don't sync LE until we have a better
3423 * way to sync LEs.
3424 */
3425 control &= ~HW_OWNER;
3426 sd->msk_control = htole32(control);
3427 status = le32toh(sd->msk_status);
3428 len = control & STLE_LEN_MASK;
3429 port = (control >> 16) & 0x01;
3430 sc_if = sc->msk_if[port];
3431 if (sc_if == NULL) {
3432 device_printf(sc->msk_dev, "invalid port opcode "
3433 "0x%08x\n", control & STLE_OP_MASK);
3434 continue;
3435 }
3436
3437 switch (control & STLE_OP_MASK) {
3438 case OP_RXVLAN:
3439 sc_if->msk_vtag = ntohs(len);
3440 break;
3441 case OP_RXCHKSVLAN:
3442 sc_if->msk_vtag = ntohs(len);
3443 break;
3444 case OP_RXSTAT:
3445 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3446 msk_jumbo_rxeof(sc_if, status, len);
3447 else
3448 msk_rxeof(sc_if, status, len);
3449 rxprog++;
3450 /*
3451 * Because there is no way to sync single Rx LE
3452 * put the DMA sync operation off until the end of
3453 * event processing.
3454 */
3455 rxput[port]++;
3456 /* Update prefetch unit if we've passed water mark. */
3457 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3458 msk_rxput(sc_if);
3459 rxput[port] = 0;
3460 }
3461 break;
3462 case OP_TXINDEXLE:
3463 if (sc->msk_if[MSK_PORT_A] != NULL)
3464 msk_txeof(sc->msk_if[MSK_PORT_A],
3465 status & STLE_TXA1_MSKL);
3466 if (sc->msk_if[MSK_PORT_B] != NULL)
3467 msk_txeof(sc->msk_if[MSK_PORT_B],
3468 ((status & STLE_TXA2_MSKL) >>
3469 STLE_TXA2_SHIFTL) |
3470 ((len & STLE_TXA2_MSKH) <<
3471 STLE_TXA2_SHIFTH));
3472 break;
3473 default:
3474 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3475 control & STLE_OP_MASK);
3476 break;
3477 }
3478 MSK_INC(cons, MSK_STAT_RING_CNT);
3479 if (rxprog > sc->msk_process_limit)
3480 break;
3481 }
3482
3483 sc->msk_stat_cons = cons;
3484 /* XXX We should sync status LEs here. See above notes. */
3485
3486 if (rxput[MSK_PORT_A] > 0)
3487 msk_rxput(sc->msk_if[MSK_PORT_A]);
3488 if (rxput[MSK_PORT_B] > 0)
3489 msk_rxput(sc->msk_if[MSK_PORT_B]);
3490
3491 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3492}
3493
3494static void
3495msk_intr(void *xsc)
3496{
3497 struct msk_softc *sc;
3498 uint32_t status;
3499
3500 sc = xsc;
3501 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3502 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3503 if (status == 0 || status == 0xffffffff) {
3504 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3505 return;
3506 }
3507
3508 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3509}
3510
3511static void
3512msk_int_task(void *arg, int pending)
3513{
3514 struct msk_softc *sc;
3515 struct msk_if_softc *sc_if0, *sc_if1;
3516 struct ifnet *ifp0, *ifp1;
3517 uint32_t status;
3518 int domore;
3519
3520 sc = arg;
3521 MSK_LOCK(sc);
3522
3523 /* Get interrupt source. */
3524 status = CSR_READ_4(sc, B0_ISRC);
3525 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3526 (status & sc->msk_intrmask) == 0)
3527 goto done;
3528
3529 sc_if0 = sc->msk_if[MSK_PORT_A];
3530 sc_if1 = sc->msk_if[MSK_PORT_B];
3531 ifp0 = ifp1 = NULL;
3532 if (sc_if0 != NULL) {
3533 ifp0 = sc_if0->msk_ifp;
3534 if ((ifp0->if_drv_flags & IFF_DRV_RUNNING) == 0)
3535 goto done;
3536 }
3537 if (sc_if1 != NULL) {
3538 ifp1 = sc_if1->msk_ifp;
3539 if ((ifp1->if_drv_flags & IFF_DRV_RUNNING) == 0)
3540 goto done;
3541 }
3542
3543 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3544 msk_intr_phy(sc_if0);
3545 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3546 msk_intr_phy(sc_if1);
3547 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3548 msk_intr_gmac(sc_if0);
3549 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3550 msk_intr_gmac(sc_if1);
3551 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3552 device_printf(sc->msk_dev, "Rx descriptor error\n");
3553 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3554 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3555 CSR_READ_4(sc, B0_IMSK);
3556 }
3557 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3558 device_printf(sc->msk_dev, "Tx descriptor error\n");
3559 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3560 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3561 CSR_READ_4(sc, B0_IMSK);
3562 }
3563 if ((status & Y2_IS_HW_ERR) != 0)
3564 msk_intr_hwerr(sc);
3565
3566 domore = msk_handle_events(sc);
3567 if ((status & Y2_IS_STAT_BMU) != 0)
3568 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3569
3570 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3571 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
3572 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3573 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
3574
3575 if (domore > 0) {
3576 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
3577 MSK_UNLOCK(sc);
3578 return;
3579 }
3580done:
3581 MSK_UNLOCK(sc);
3582
3583 /* Reenable interrupts. */
3584 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3585}
3586
3587static void
3588msk_init(void *xsc)
3589{
3590 struct msk_if_softc *sc_if = xsc;
3591
3592 MSK_IF_LOCK(sc_if);
3593 msk_init_locked(sc_if);
3594 MSK_IF_UNLOCK(sc_if);
3595}
3596
3597static void
3598msk_init_locked(struct msk_if_softc *sc_if)
3599{
3600 struct msk_softc *sc;
3601 struct ifnet *ifp;
3602 struct mii_data *mii;
3603 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3604 uint16_t gmac;
3605 int error, i;
3606
3607 MSK_IF_LOCK_ASSERT(sc_if);
3608
3609 ifp = sc_if->msk_ifp;
3610 sc = sc_if->msk_softc;
3611 mii = device_get_softc(sc_if->msk_miibus);
3612
3613 error = 0;
3614 /* Cancel pending I/O and free all Rx/Tx buffers. */
3615 msk_stop(sc_if);
3616
3617 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN +
3618 ETHER_VLAN_ENCAP_LEN;
3619
3620 /*
3621 * Initialize GMAC first.
3622 * Without this initialization, Rx MAC did not work as expected
3623 * and Rx MAC garbled status LEs and it resulted in out-of-order
3624 * or duplicated frame delivery which in turn showed very poor
3625 * Rx performance.(I had to write a packet analysis code that
3626 * could be embeded in driver to diagnose this issue.)
3627 * I've spent almost 2 months to fix this issue. If I have had
3628 * datasheet for Yukon II I wouldn't have encountered this. :-(
3629 */
3630 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3631 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3632
3633 /* Dummy read the Interrupt Source Register. */
3634 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3635
3636 /* Set MIB Clear Counter Mode. */
3637 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3638 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3639 /* Read all MIB Counters with Clear Mode set. */
3640 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3641 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3642 /* Clear MIB Clear Counter Mode. */
3643 gmac &= ~GM_PAR_MIB_CLR;
3644 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3645
3646 /* Disable FCS. */
3647 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3648
3649 /* Setup Transmit Control Register. */
3650 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3651
3652 /* Setup Transmit Flow Control Register. */
3653 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3654
3655 /* Setup Transmit Parameter Register. */
3656 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3657 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3658 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3659
3660 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3661 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3662
3663 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3664 gmac |= GM_SMOD_JUMBO_ENA;
3665 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3666
3667 /* Set station address. */
3668 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3669 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3670 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3671 eaddr[i]);
3672 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3673 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3674 eaddr[i]);
3675
3676 /* Disable interrupts for counter overflows. */
3677 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3678 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3679 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3680
3681 /* Configure Rx MAC FIFO. */
3682 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3683 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3684 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3685 GMF_OPER_ON | GMF_RX_F_FL_ON);
3686
3687 /* Set promiscuous mode. */
3688 msk_setpromisc(sc_if);
3689
3690 /* Set multicast filter. */
3691 msk_setmulti(sc_if);
3692
3693 /* Flush Rx MAC FIFO on any flow control or error. */
3694 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3695 GMR_FS_ANY_ERR);
3696
3697 /* Set Rx FIFO flush threshold to 64 bytes. */
3698 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3699 RX_GMF_FL_THR_DEF);
3700
3701 /* Configure Tx MAC FIFO. */
3702 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3703 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3704 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3705
3706 /* Configure hardware VLAN tag insertion/stripping. */
3707 msk_setvlan(sc_if, ifp);
3708
3709 /* XXX It seems STFW is requried for all cases. */
3710 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA);
3711
3712 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3713 /* Set Rx Pause threshould. */
3714 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3715 MSK_ECU_LLPP);
3716 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3717 MSK_ECU_ULPP);
3718 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) {
3719 /*
3720 * Can't sure the following code is needed as Yukon
3721 * Yukon EC Ultra may not support jumbo frames.
3722 *
3723 * Set Tx GMAC FIFO Almost Empty Threshold.
3724 */
3725 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3726 MSK_ECU_AE_THR);
3727 /* Disable Store & Forward mode for Tx. */
3728 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3729 TX_STFW_DIS);
3730 }
3731 }
3732
3733 /*
3734 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3735 * arbiter as we don't use Sync Tx queue.
3736 */
3737 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3738 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3739 /* Enable the RAM Interface Arbiter. */
3740 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3741
3742 /* Setup RAM buffer. */
3743 msk_set_rambuffer(sc_if);
3744
3745 /* Disable Tx sync Queue. */
3746 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3747
3748 /* Setup Tx Queue Bus Memory Interface. */
3749 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3750 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3751 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3752 /* Increase IPID when hardware generates IP packets in TSO. */
3753 if ((ifp->if_hwassist & CSUM_TSO) != 0)
3754 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3755 BMU_TX_IPIDINCR_ON);
3756 else
3757 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3758 BMU_TX_IPIDINCR_OFF);
3759 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3760 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3761 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3762 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3763 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3764 }
3765
3766 /* Setup Rx Queue Bus Memory Interface. */
3767 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3768 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3769 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3770 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3771 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3772 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3773 /* MAC Rx RAM Read is controlled by hardware. */
3774 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3775 }
3776
3777 msk_set_prefetch(sc, sc_if->msk_txq,
3778 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3779 msk_init_tx_ring(sc_if);
3780
3781 /* Disable Rx checksum offload and RSS hash. */
3782 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3783 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3784 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3785 msk_set_prefetch(sc, sc_if->msk_rxq,
3786 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3787 MSK_JUMBO_RX_RING_CNT - 1);
3788 error = msk_init_jumbo_rx_ring(sc_if);
3789 } else {
3790 msk_set_prefetch(sc, sc_if->msk_rxq,
3791 sc_if->msk_rdata.msk_rx_ring_paddr,
3792 MSK_RX_RING_CNT - 1);
3793 error = msk_init_rx_ring(sc_if);
3794 }
3795 if (error != 0) {
3796 device_printf(sc_if->msk_if_dev,
3797 "initialization failed: no memory for Rx buffers\n");
3798 msk_stop(sc_if);
3799 return;
3800 }
3801
3802 /* Configure interrupt handling. */
3803 if (sc_if->msk_port == MSK_PORT_A) {
3804 sc->msk_intrmask |= Y2_IS_PORT_A;
3805 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3806 } else {
3807 sc->msk_intrmask |= Y2_IS_PORT_B;
3808 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3809 }
3810 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3811 CSR_READ_4(sc, B0_HWE_IMSK);
3812 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3813 CSR_READ_4(sc, B0_IMSK);
3814
3815 sc_if->msk_link = 0;
3816 mii_mediachg(mii);
3817
3818 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3819 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3820
3821 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3822}
3823
3824static void
3825msk_set_rambuffer(struct msk_if_softc *sc_if)
3826{
3827 struct msk_softc *sc;
3828 int ltpp, utpp;
3829
3830 sc = sc_if->msk_softc;
3831
3832 /* Setup Rx Queue. */
3833 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3834 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3835 sc->msk_rxqstart[sc_if->msk_port] / 8);
3836 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3837 sc->msk_rxqend[sc_if->msk_port] / 8);
3838 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3839 sc->msk_rxqstart[sc_if->msk_port] / 8);
3840 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3841 sc->msk_rxqstart[sc_if->msk_port] / 8);
3842
3843 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3844 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3845 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3846 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3847 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3848 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3849 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3850 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3851 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3852
3853 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3854 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3855
3856 /* Setup Tx Queue. */
3857 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3858 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3859 sc->msk_txqstart[sc_if->msk_port] / 8);
3860 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3861 sc->msk_txqend[sc_if->msk_port] / 8);
3862 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3863 sc->msk_txqstart[sc_if->msk_port] / 8);
3864 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3865 sc->msk_txqstart[sc_if->msk_port] / 8);
3866 /* Enable Store & Forward for Tx side. */
3867 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3868 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3869 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3870}
3871
3872static void
3873msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3874 uint32_t count)
3875{
3876
3877 /* Reset the prefetch unit. */
3878 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3879 PREF_UNIT_RST_SET);
3880 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3881 PREF_UNIT_RST_CLR);
3882 /* Set LE base address. */
3883 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3884 MSK_ADDR_LO(addr));
3885 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3886 MSK_ADDR_HI(addr));
3887 /* Set the list last index. */
3888 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3889 count);
3890 /* Turn on prefetch unit. */
3891 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3892 PREF_UNIT_OP_ON);
3893 /* Dummy read to ensure write. */
3894 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3895}
3896
3897static void
3898msk_stop(struct msk_if_softc *sc_if)
3899{
3900 struct msk_softc *sc;
3901 struct msk_txdesc *txd;
3902 struct msk_rxdesc *rxd;
3903 struct msk_rxdesc *jrxd;
3904 struct ifnet *ifp;
3905 uint32_t val;
3906 int i;
3907
3908 MSK_IF_LOCK_ASSERT(sc_if);
3909 sc = sc_if->msk_softc;
3910 ifp = sc_if->msk_ifp;
3911
3912 callout_stop(&sc_if->msk_tick_ch);
3913 callout_stop(&sc_if->msk_watchdog_ch);
3914
3915 /* Disable interrupts. */
3916 if (sc_if->msk_port == MSK_PORT_A) {
3917 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3918 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3919 } else {
3920 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3921 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3922 }
3923 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3924 CSR_READ_4(sc, B0_HWE_IMSK);
3925 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3926 CSR_READ_4(sc, B0_IMSK);
3927
3928 /* Disable Tx/Rx MAC. */
3929 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3930 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3931 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3932 /* Read again to ensure writing. */
3933 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3934
3935 /* Stop Tx BMU. */
3936 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3937 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3938 for (i = 0; i < MSK_TIMEOUT; i++) {
3939 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3940 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3941 BMU_STOP);
3942 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3943 } else
3944 break;
3945 DELAY(1);
3946 }
3947 if (i == MSK_TIMEOUT)
3948 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3949 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3950 RB_RST_SET | RB_DIS_OP_MD);
3951
3952 /* Disable all GMAC interrupt. */
3953 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3954 /* Disable PHY interrupt. */
3955 if (sc->msk_marvell_phy)
3956 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3957
3958 /* Disable the RAM Interface Arbiter. */
3959 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3960
3961 /* Reset the PCI FIFO of the async Tx queue */
3962 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3963 BMU_RST_SET | BMU_FIFO_RST);
3964
3965 /* Reset the Tx prefetch units. */
3966 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3967 PREF_UNIT_RST_SET);
3968
3969 /* Reset the RAM Buffer async Tx queue. */
3970 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3971
3972 /* Reset Tx MAC FIFO. */
3973 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3974 /* Set Pause Off. */
3975 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3976
3977 /*
3978 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3979 * reach the end of packet and since we can't make sure that we have
3980 * incoming data, we must reset the BMU while it is not during a DMA
3981 * transfer. Since it is possible that the Rx path is still active,
3982 * the Rx RAM buffer will be stopped first, so any possible incoming
3983 * data will not trigger a DMA. After the RAM buffer is stopped, the
3984 * BMU is polled until any DMA in progress is ended and only then it
3985 * will be reset.
3986 */
3987
3988 /* Disable the RAM Buffer receive queue. */
3989 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3990 for (i = 0; i < MSK_TIMEOUT; i++) {
3991 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3992 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3993 break;
3994 DELAY(1);
3995 }
3996 if (i == MSK_TIMEOUT)
3997 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3998 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3999 BMU_RST_SET | BMU_FIFO_RST);
4000 /* Reset the Rx prefetch unit. */
4001 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
4002 PREF_UNIT_RST_SET);
4003 /* Reset the RAM Buffer receive queue. */
4004 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
4005 /* Reset Rx MAC FIFO. */
4006 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
4007
4008 /* Free Rx and Tx mbufs still in the queues. */
4009 for (i = 0; i < MSK_RX_RING_CNT; i++) {
4010 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
4011 if (rxd->rx_m != NULL) {
4012 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
4013 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4014 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
4015 rxd->rx_dmamap);
4016 m_freem(rxd->rx_m);
4017 rxd->rx_m = NULL;
4018 }
4019 }
4020 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
4021 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
4022 if (jrxd->rx_m != NULL) {
4023 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
4024 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4025 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
4026 jrxd->rx_dmamap);
4027 m_freem(jrxd->rx_m);
4028 jrxd->rx_m = NULL;
4029 }
4030 }
4031 for (i = 0; i < MSK_TX_RING_CNT; i++) {
4032 txd = &sc_if->msk_cdata.msk_txdesc[i];
4033 if (txd->tx_m != NULL) {
4034 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
4035 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4036 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
4037 txd->tx_dmamap);
4038 m_freem(txd->tx_m);
4039 txd->tx_m = NULL;
4040 }
4041 }
4042
4043 /*
4044 * Mark the interface down.
4045 */
4046 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4047 sc_if->msk_link = 0;
4048}
4049
4050static int
4051sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4052{
4053 int error, value;
4054
4055 if (!arg1)
4056 return (EINVAL);
4057 value = *(int *)arg1;
4058 error = sysctl_handle_int(oidp, &value, 0, req);
4059 if (error || !req->newptr)
4060 return (error);
4061 if (value < low || value > high)
4062 return (EINVAL);
4063 *(int *)arg1 = value;
4064
4065 return (0);
4066}
4067
4068static int
4069sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
4070{
4071
4072 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
4073 MSK_PROC_MAX));
4074}