Deleted Added
full compact
if_nfe.c (215194) if_nfe.c (215297)
1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 215194 2010-11-12 19:33:13Z yongari $");
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 215297 2010-11-14 13:26:10Z marius $");
25
26#ifdef HAVE_KERNEL_OPTION_HEADERS
27#include "opt_device_polling.h"
28#endif
29
30#include <sys/param.h>
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/kernel.h>
38#include <sys/queue.h>
39#include <sys/socket.h>
40#include <sys/sysctl.h>
41#include <sys/taskqueue.h>
42
43#include <net/if.h>
44#include <net/if_arp.h>
45#include <net/ethernet.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48#include <net/if_types.h>
49#include <net/if_vlan_var.h>
50
51#include <net/bpf.h>
52
53#include <machine/bus.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63
64#include <dev/nfe/if_nfereg.h>
65#include <dev/nfe/if_nfevar.h>
66
67MODULE_DEPEND(nfe, pci, 1, 1, 1);
68MODULE_DEPEND(nfe, ether, 1, 1, 1);
69MODULE_DEPEND(nfe, miibus, 1, 1, 1);
70
71/* "device miibus" required. See GENERIC if you get errors here. */
72#include "miibus_if.h"
73
74static int nfe_probe(device_t);
75static int nfe_attach(device_t);
76static int nfe_detach(device_t);
77static int nfe_suspend(device_t);
78static int nfe_resume(device_t);
79static int nfe_shutdown(device_t);
80static void nfe_power(struct nfe_softc *);
81static int nfe_miibus_readreg(device_t, int, int);
82static int nfe_miibus_writereg(device_t, int, int, int);
83static void nfe_miibus_statchg(device_t);
84static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
85static void nfe_set_intr(struct nfe_softc *);
86static __inline void nfe_enable_intr(struct nfe_softc *);
87static __inline void nfe_disable_intr(struct nfe_softc *);
88static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
89static void nfe_alloc_msix(struct nfe_softc *, int);
90static int nfe_intr(void *);
91static void nfe_int_task(void *, int);
92static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
93static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
94static int nfe_newbuf(struct nfe_softc *, int);
95static int nfe_jnewbuf(struct nfe_softc *, int);
96static int nfe_rxeof(struct nfe_softc *, int, int *);
97static int nfe_jrxeof(struct nfe_softc *, int, int *);
98static void nfe_txeof(struct nfe_softc *);
99static int nfe_encap(struct nfe_softc *, struct mbuf **);
100static void nfe_setmulti(struct nfe_softc *);
101static void nfe_tx_task(void *, int);
102static void nfe_start(struct ifnet *);
103static void nfe_watchdog(struct ifnet *);
104static void nfe_init(void *);
105static void nfe_init_locked(void *);
106static void nfe_stop(struct ifnet *);
107static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
109static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
114static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116static int nfe_ifmedia_upd(struct ifnet *);
117static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
118static void nfe_tick(void *);
119static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
120static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
121static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
122
123static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
124static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
125static void nfe_sysctl_node(struct nfe_softc *);
126static void nfe_stats_clear(struct nfe_softc *);
127static void nfe_stats_update(struct nfe_softc *);
128static void nfe_set_linkspeed(struct nfe_softc *);
129static void nfe_set_wol(struct nfe_softc *);
130
131#ifdef NFE_DEBUG
132static int nfedebug = 0;
133#define DPRINTF(sc, ...) do { \
134 if (nfedebug) \
135 device_printf((sc)->nfe_dev, __VA_ARGS__); \
136} while (0)
137#define DPRINTFN(sc, n, ...) do { \
138 if (nfedebug >= (n)) \
139 device_printf((sc)->nfe_dev, __VA_ARGS__); \
140} while (0)
141#else
142#define DPRINTF(sc, ...)
143#define DPRINTFN(sc, n, ...)
144#endif
145
146#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
147#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
148#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
149
150/* Tunables. */
151static int msi_disable = 0;
152static int msix_disable = 0;
153static int jumbo_disable = 0;
154TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
155TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
156TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
157
158static device_method_t nfe_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, nfe_probe),
161 DEVMETHOD(device_attach, nfe_attach),
162 DEVMETHOD(device_detach, nfe_detach),
163 DEVMETHOD(device_suspend, nfe_suspend),
164 DEVMETHOD(device_resume, nfe_resume),
165 DEVMETHOD(device_shutdown, nfe_shutdown),
166
167 /* bus interface */
168 DEVMETHOD(bus_print_child, bus_generic_print_child),
169 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
170
171 /* MII interface */
172 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
173 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
174 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
175
176 { NULL, NULL }
177};
178
179static driver_t nfe_driver = {
180 "nfe",
181 nfe_methods,
182 sizeof(struct nfe_softc)
183};
184
185static devclass_t nfe_devclass;
186
187DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
188DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
189
190static struct nfe_type nfe_devs[] = {
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
192 "NVIDIA nForce MCP Networking Adapter"},
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
194 "NVIDIA nForce2 MCP2 Networking Adapter"},
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
196 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
198 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
200 "NVIDIA nForce3 MCP3 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
202 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
204 "NVIDIA nForce3 MCP7 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
206 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
208 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
210 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
212 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
214 "NVIDIA nForce 430 MCP12 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
216 "NVIDIA nForce 430 MCP13 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
218 "NVIDIA nForce MCP55 Networking Adapter"},
219 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
220 "NVIDIA nForce MCP55 Networking Adapter"},
221 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
222 "NVIDIA nForce MCP61 Networking Adapter"},
223 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
224 "NVIDIA nForce MCP61 Networking Adapter"},
225 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
226 "NVIDIA nForce MCP61 Networking Adapter"},
227 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
228 "NVIDIA nForce MCP61 Networking Adapter"},
229 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
230 "NVIDIA nForce MCP65 Networking Adapter"},
231 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
232 "NVIDIA nForce MCP65 Networking Adapter"},
233 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
234 "NVIDIA nForce MCP65 Networking Adapter"},
235 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
236 "NVIDIA nForce MCP65 Networking Adapter"},
237 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
238 "NVIDIA nForce MCP67 Networking Adapter"},
239 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
240 "NVIDIA nForce MCP67 Networking Adapter"},
241 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
242 "NVIDIA nForce MCP67 Networking Adapter"},
243 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
244 "NVIDIA nForce MCP67 Networking Adapter"},
245 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
246 "NVIDIA nForce MCP73 Networking Adapter"},
247 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
248 "NVIDIA nForce MCP73 Networking Adapter"},
249 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
250 "NVIDIA nForce MCP73 Networking Adapter"},
251 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
252 "NVIDIA nForce MCP73 Networking Adapter"},
253 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
254 "NVIDIA nForce MCP77 Networking Adapter"},
255 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
256 "NVIDIA nForce MCP77 Networking Adapter"},
257 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
258 "NVIDIA nForce MCP77 Networking Adapter"},
259 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
260 "NVIDIA nForce MCP77 Networking Adapter"},
261 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
262 "NVIDIA nForce MCP79 Networking Adapter"},
263 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
264 "NVIDIA nForce MCP79 Networking Adapter"},
265 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
266 "NVIDIA nForce MCP79 Networking Adapter"},
267 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
268 "NVIDIA nForce MCP79 Networking Adapter"},
269 {0, 0, NULL}
270};
271
272
273/* Probe for supported hardware ID's */
274static int
275nfe_probe(device_t dev)
276{
277 struct nfe_type *t;
278
279 t = nfe_devs;
280 /* Check for matching PCI DEVICE ID's */
281 while (t->name != NULL) {
282 if ((pci_get_vendor(dev) == t->vid_id) &&
283 (pci_get_device(dev) == t->dev_id)) {
284 device_set_desc(dev, t->name);
285 return (BUS_PROBE_DEFAULT);
286 }
287 t++;
288 }
289
290 return (ENXIO);
291}
292
293static void
294nfe_alloc_msix(struct nfe_softc *sc, int count)
295{
296 int rid;
297
298 rid = PCIR_BAR(2);
299 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
300 &rid, RF_ACTIVE);
301 if (sc->nfe_msix_res == NULL) {
302 device_printf(sc->nfe_dev,
303 "couldn't allocate MSIX table resource\n");
304 return;
305 }
306 rid = PCIR_BAR(3);
307 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
308 SYS_RES_MEMORY, &rid, RF_ACTIVE);
309 if (sc->nfe_msix_pba_res == NULL) {
310 device_printf(sc->nfe_dev,
311 "couldn't allocate MSIX PBA resource\n");
312 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
313 sc->nfe_msix_res);
314 sc->nfe_msix_res = NULL;
315 return;
316 }
317
318 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
319 if (count == NFE_MSI_MESSAGES) {
320 if (bootverbose)
321 device_printf(sc->nfe_dev,
322 "Using %d MSIX messages\n", count);
323 sc->nfe_msix = 1;
324 } else {
325 if (bootverbose)
326 device_printf(sc->nfe_dev,
327 "couldn't allocate MSIX\n");
328 pci_release_msi(sc->nfe_dev);
329 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
330 PCIR_BAR(3), sc->nfe_msix_pba_res);
331 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
332 PCIR_BAR(2), sc->nfe_msix_res);
333 sc->nfe_msix_pba_res = NULL;
334 sc->nfe_msix_res = NULL;
335 }
336 }
337}
338
339static int
340nfe_attach(device_t dev)
341{
342 struct nfe_softc *sc;
343 struct ifnet *ifp;
344 bus_addr_t dma_addr_max;
345 int error = 0, i, msic, reg, rid;
346
347 sc = device_get_softc(dev);
348 sc->nfe_dev = dev;
349
350 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
351 MTX_DEF);
352 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
353
354 pci_enable_busmaster(dev);
355
356 rid = PCIR_BAR(0);
357 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
358 RF_ACTIVE);
359 if (sc->nfe_res[0] == NULL) {
360 device_printf(dev, "couldn't map memory resources\n");
361 mtx_destroy(&sc->nfe_mtx);
362 return (ENXIO);
363 }
364
365 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
366 uint16_t v, width;
367
368 v = pci_read_config(dev, reg + 0x08, 2);
369 /* Change max. read request size to 4096. */
370 v &= ~(7 << 12);
371 v |= (5 << 12);
372 pci_write_config(dev, reg + 0x08, v, 2);
373
374 v = pci_read_config(dev, reg + 0x0c, 2);
375 /* link capability */
376 v = (v >> 4) & 0x0f;
377 width = pci_read_config(dev, reg + 0x12, 2);
378 /* negotiated link width */
379 width = (width >> 4) & 0x3f;
380 if (v != width)
381 device_printf(sc->nfe_dev,
382 "warning, negotiated width of link(x%d) != "
383 "max. width of link(x%d)\n", width, v);
384 }
385
386 /* Allocate interrupt */
387 if (msix_disable == 0 || msi_disable == 0) {
388 if (msix_disable == 0 &&
389 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
390 nfe_alloc_msix(sc, msic);
391 if (msi_disable == 0 && sc->nfe_msix == 0 &&
392 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
393 pci_alloc_msi(dev, &msic) == 0) {
394 if (msic == NFE_MSI_MESSAGES) {
395 if (bootverbose)
396 device_printf(dev,
397 "Using %d MSI messages\n", msic);
398 sc->nfe_msi = 1;
399 } else
400 pci_release_msi(dev);
401 }
402 }
403
404 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
405 rid = 0;
406 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
407 RF_SHAREABLE | RF_ACTIVE);
408 if (sc->nfe_irq[0] == NULL) {
409 device_printf(dev, "couldn't allocate IRQ resources\n");
410 error = ENXIO;
411 goto fail;
412 }
413 } else {
414 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
415 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
416 SYS_RES_IRQ, &rid, RF_ACTIVE);
417 if (sc->nfe_irq[i] == NULL) {
418 device_printf(dev,
419 "couldn't allocate IRQ resources for "
420 "message %d\n", rid);
421 error = ENXIO;
422 goto fail;
423 }
424 }
425 /* Map interrupts to vector 0. */
426 if (sc->nfe_msix != 0) {
427 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
428 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
429 } else if (sc->nfe_msi != 0) {
430 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
431 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
432 }
433 }
434
435 /* Set IRQ status/mask register. */
436 sc->nfe_irq_status = NFE_IRQ_STATUS;
437 sc->nfe_irq_mask = NFE_IRQ_MASK;
438 sc->nfe_intrs = NFE_IRQ_WANTED;
439 sc->nfe_nointrs = 0;
440 if (sc->nfe_msix != 0) {
441 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
442 sc->nfe_nointrs = NFE_IRQ_WANTED;
443 } else if (sc->nfe_msi != 0) {
444 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
445 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
446 }
447
448 sc->nfe_devid = pci_get_device(dev);
449 sc->nfe_revid = pci_get_revid(dev);
450 sc->nfe_flags = 0;
451
452 switch (sc->nfe_devid) {
453 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
454 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
455 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
456 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
457 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
458 break;
459 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
460 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
461 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
462 break;
463 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
464 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
465 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
466 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
467 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
468 NFE_MIB_V1;
469 break;
470 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
471 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
472 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
473 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
474 break;
475
476 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
477 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
478 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
479 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
480 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
481 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
482 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
483 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
484 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
485 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
486 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
487 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
488 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
489 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
490 break;
491 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
492 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
493 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
494 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
495 /* XXX flow control */
496 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
497 NFE_CORRECT_MACADDR | NFE_MIB_V3;
498 break;
499 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
500 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
501 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
502 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
503 /* XXX flow control */
504 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
505 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
506 break;
507 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
508 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
509 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
510 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
511 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
512 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
513 NFE_MIB_V2;
514 break;
515 }
516
517 nfe_power(sc);
518 /* Check for reversed ethernet address */
519 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
520 sc->nfe_flags |= NFE_CORRECT_MACADDR;
521 nfe_get_macaddr(sc, sc->eaddr);
522 /*
523 * Allocate the parent bus DMA tag appropriate for PCI.
524 */
525 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
526 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
527 dma_addr_max = NFE_DMA_MAXADDR;
528 error = bus_dma_tag_create(
529 bus_get_dma_tag(sc->nfe_dev), /* parent */
530 1, 0, /* alignment, boundary */
531 dma_addr_max, /* lowaddr */
532 BUS_SPACE_MAXADDR, /* highaddr */
533 NULL, NULL, /* filter, filterarg */
534 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
535 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
536 0, /* flags */
537 NULL, NULL, /* lockfunc, lockarg */
538 &sc->nfe_parent_tag);
539 if (error)
540 goto fail;
541
542 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
543 if (ifp == NULL) {
544 device_printf(dev, "can not if_alloc()\n");
545 error = ENOSPC;
546 goto fail;
547 }
548 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
549
550 /*
551 * Allocate Tx and Rx rings.
552 */
553 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
554 goto fail;
555
556 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
557 goto fail;
558
559 nfe_alloc_jrx_ring(sc, &sc->jrxq);
560 /* Create sysctl node. */
561 nfe_sysctl_node(sc);
562
563 ifp->if_softc = sc;
564 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
565 ifp->if_mtu = ETHERMTU;
566 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
567 ifp->if_ioctl = nfe_ioctl;
568 ifp->if_start = nfe_start;
569 ifp->if_hwassist = 0;
570 ifp->if_capabilities = 0;
571 ifp->if_init = nfe_init;
572 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
573 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
574 IFQ_SET_READY(&ifp->if_snd);
575
576 if (sc->nfe_flags & NFE_HW_CSUM) {
577 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
578 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
579 }
580 ifp->if_capenable = ifp->if_capabilities;
581
582 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
583 /* VLAN capability setup. */
584 ifp->if_capabilities |= IFCAP_VLAN_MTU;
585 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
586 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
587 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
588 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
589 }
590
591 if (pci_find_extcap(dev, PCIY_PMG, &reg) == 0)
592 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
593 ifp->if_capenable = ifp->if_capabilities;
594
595 /*
596 * Tell the upper layer(s) we support long frames.
597 * Must appear after the call to ether_ifattach() because
598 * ether_ifattach() sets ifi_hdrlen to the default value.
599 */
600 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
601
602#ifdef DEVICE_POLLING
603 ifp->if_capabilities |= IFCAP_POLLING;
604#endif
605
606 /* Do MII setup */
607 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
25
26#ifdef HAVE_KERNEL_OPTION_HEADERS
27#include "opt_device_polling.h"
28#endif
29
30#include <sys/param.h>
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/kernel.h>
38#include <sys/queue.h>
39#include <sys/socket.h>
40#include <sys/sysctl.h>
41#include <sys/taskqueue.h>
42
43#include <net/if.h>
44#include <net/if_arp.h>
45#include <net/ethernet.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48#include <net/if_types.h>
49#include <net/if_vlan_var.h>
50
51#include <net/bpf.h>
52
53#include <machine/bus.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63
64#include <dev/nfe/if_nfereg.h>
65#include <dev/nfe/if_nfevar.h>
66
67MODULE_DEPEND(nfe, pci, 1, 1, 1);
68MODULE_DEPEND(nfe, ether, 1, 1, 1);
69MODULE_DEPEND(nfe, miibus, 1, 1, 1);
70
71/* "device miibus" required. See GENERIC if you get errors here. */
72#include "miibus_if.h"
73
74static int nfe_probe(device_t);
75static int nfe_attach(device_t);
76static int nfe_detach(device_t);
77static int nfe_suspend(device_t);
78static int nfe_resume(device_t);
79static int nfe_shutdown(device_t);
80static void nfe_power(struct nfe_softc *);
81static int nfe_miibus_readreg(device_t, int, int);
82static int nfe_miibus_writereg(device_t, int, int, int);
83static void nfe_miibus_statchg(device_t);
84static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
85static void nfe_set_intr(struct nfe_softc *);
86static __inline void nfe_enable_intr(struct nfe_softc *);
87static __inline void nfe_disable_intr(struct nfe_softc *);
88static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
89static void nfe_alloc_msix(struct nfe_softc *, int);
90static int nfe_intr(void *);
91static void nfe_int_task(void *, int);
92static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
93static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
94static int nfe_newbuf(struct nfe_softc *, int);
95static int nfe_jnewbuf(struct nfe_softc *, int);
96static int nfe_rxeof(struct nfe_softc *, int, int *);
97static int nfe_jrxeof(struct nfe_softc *, int, int *);
98static void nfe_txeof(struct nfe_softc *);
99static int nfe_encap(struct nfe_softc *, struct mbuf **);
100static void nfe_setmulti(struct nfe_softc *);
101static void nfe_tx_task(void *, int);
102static void nfe_start(struct ifnet *);
103static void nfe_watchdog(struct ifnet *);
104static void nfe_init(void *);
105static void nfe_init_locked(void *);
106static void nfe_stop(struct ifnet *);
107static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
109static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
114static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116static int nfe_ifmedia_upd(struct ifnet *);
117static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
118static void nfe_tick(void *);
119static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
120static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
121static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
122
123static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
124static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
125static void nfe_sysctl_node(struct nfe_softc *);
126static void nfe_stats_clear(struct nfe_softc *);
127static void nfe_stats_update(struct nfe_softc *);
128static void nfe_set_linkspeed(struct nfe_softc *);
129static void nfe_set_wol(struct nfe_softc *);
130
131#ifdef NFE_DEBUG
132static int nfedebug = 0;
133#define DPRINTF(sc, ...) do { \
134 if (nfedebug) \
135 device_printf((sc)->nfe_dev, __VA_ARGS__); \
136} while (0)
137#define DPRINTFN(sc, n, ...) do { \
138 if (nfedebug >= (n)) \
139 device_printf((sc)->nfe_dev, __VA_ARGS__); \
140} while (0)
141#else
142#define DPRINTF(sc, ...)
143#define DPRINTFN(sc, n, ...)
144#endif
145
146#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
147#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
148#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
149
150/* Tunables. */
151static int msi_disable = 0;
152static int msix_disable = 0;
153static int jumbo_disable = 0;
154TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
155TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
156TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
157
158static device_method_t nfe_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, nfe_probe),
161 DEVMETHOD(device_attach, nfe_attach),
162 DEVMETHOD(device_detach, nfe_detach),
163 DEVMETHOD(device_suspend, nfe_suspend),
164 DEVMETHOD(device_resume, nfe_resume),
165 DEVMETHOD(device_shutdown, nfe_shutdown),
166
167 /* bus interface */
168 DEVMETHOD(bus_print_child, bus_generic_print_child),
169 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
170
171 /* MII interface */
172 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
173 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
174 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
175
176 { NULL, NULL }
177};
178
179static driver_t nfe_driver = {
180 "nfe",
181 nfe_methods,
182 sizeof(struct nfe_softc)
183};
184
185static devclass_t nfe_devclass;
186
187DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
188DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
189
190static struct nfe_type nfe_devs[] = {
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
192 "NVIDIA nForce MCP Networking Adapter"},
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
194 "NVIDIA nForce2 MCP2 Networking Adapter"},
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
196 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
198 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
200 "NVIDIA nForce3 MCP3 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
202 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
204 "NVIDIA nForce3 MCP7 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
206 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
208 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
210 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
212 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
214 "NVIDIA nForce 430 MCP12 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
216 "NVIDIA nForce 430 MCP13 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
218 "NVIDIA nForce MCP55 Networking Adapter"},
219 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
220 "NVIDIA nForce MCP55 Networking Adapter"},
221 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
222 "NVIDIA nForce MCP61 Networking Adapter"},
223 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
224 "NVIDIA nForce MCP61 Networking Adapter"},
225 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
226 "NVIDIA nForce MCP61 Networking Adapter"},
227 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
228 "NVIDIA nForce MCP61 Networking Adapter"},
229 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
230 "NVIDIA nForce MCP65 Networking Adapter"},
231 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
232 "NVIDIA nForce MCP65 Networking Adapter"},
233 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
234 "NVIDIA nForce MCP65 Networking Adapter"},
235 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
236 "NVIDIA nForce MCP65 Networking Adapter"},
237 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
238 "NVIDIA nForce MCP67 Networking Adapter"},
239 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
240 "NVIDIA nForce MCP67 Networking Adapter"},
241 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
242 "NVIDIA nForce MCP67 Networking Adapter"},
243 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
244 "NVIDIA nForce MCP67 Networking Adapter"},
245 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
246 "NVIDIA nForce MCP73 Networking Adapter"},
247 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
248 "NVIDIA nForce MCP73 Networking Adapter"},
249 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
250 "NVIDIA nForce MCP73 Networking Adapter"},
251 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
252 "NVIDIA nForce MCP73 Networking Adapter"},
253 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
254 "NVIDIA nForce MCP77 Networking Adapter"},
255 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
256 "NVIDIA nForce MCP77 Networking Adapter"},
257 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
258 "NVIDIA nForce MCP77 Networking Adapter"},
259 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
260 "NVIDIA nForce MCP77 Networking Adapter"},
261 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
262 "NVIDIA nForce MCP79 Networking Adapter"},
263 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
264 "NVIDIA nForce MCP79 Networking Adapter"},
265 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
266 "NVIDIA nForce MCP79 Networking Adapter"},
267 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
268 "NVIDIA nForce MCP79 Networking Adapter"},
269 {0, 0, NULL}
270};
271
272
273/* Probe for supported hardware ID's */
274static int
275nfe_probe(device_t dev)
276{
277 struct nfe_type *t;
278
279 t = nfe_devs;
280 /* Check for matching PCI DEVICE ID's */
281 while (t->name != NULL) {
282 if ((pci_get_vendor(dev) == t->vid_id) &&
283 (pci_get_device(dev) == t->dev_id)) {
284 device_set_desc(dev, t->name);
285 return (BUS_PROBE_DEFAULT);
286 }
287 t++;
288 }
289
290 return (ENXIO);
291}
292
293static void
294nfe_alloc_msix(struct nfe_softc *sc, int count)
295{
296 int rid;
297
298 rid = PCIR_BAR(2);
299 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
300 &rid, RF_ACTIVE);
301 if (sc->nfe_msix_res == NULL) {
302 device_printf(sc->nfe_dev,
303 "couldn't allocate MSIX table resource\n");
304 return;
305 }
306 rid = PCIR_BAR(3);
307 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
308 SYS_RES_MEMORY, &rid, RF_ACTIVE);
309 if (sc->nfe_msix_pba_res == NULL) {
310 device_printf(sc->nfe_dev,
311 "couldn't allocate MSIX PBA resource\n");
312 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
313 sc->nfe_msix_res);
314 sc->nfe_msix_res = NULL;
315 return;
316 }
317
318 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
319 if (count == NFE_MSI_MESSAGES) {
320 if (bootverbose)
321 device_printf(sc->nfe_dev,
322 "Using %d MSIX messages\n", count);
323 sc->nfe_msix = 1;
324 } else {
325 if (bootverbose)
326 device_printf(sc->nfe_dev,
327 "couldn't allocate MSIX\n");
328 pci_release_msi(sc->nfe_dev);
329 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
330 PCIR_BAR(3), sc->nfe_msix_pba_res);
331 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
332 PCIR_BAR(2), sc->nfe_msix_res);
333 sc->nfe_msix_pba_res = NULL;
334 sc->nfe_msix_res = NULL;
335 }
336 }
337}
338
339static int
340nfe_attach(device_t dev)
341{
342 struct nfe_softc *sc;
343 struct ifnet *ifp;
344 bus_addr_t dma_addr_max;
345 int error = 0, i, msic, reg, rid;
346
347 sc = device_get_softc(dev);
348 sc->nfe_dev = dev;
349
350 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
351 MTX_DEF);
352 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
353
354 pci_enable_busmaster(dev);
355
356 rid = PCIR_BAR(0);
357 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
358 RF_ACTIVE);
359 if (sc->nfe_res[0] == NULL) {
360 device_printf(dev, "couldn't map memory resources\n");
361 mtx_destroy(&sc->nfe_mtx);
362 return (ENXIO);
363 }
364
365 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
366 uint16_t v, width;
367
368 v = pci_read_config(dev, reg + 0x08, 2);
369 /* Change max. read request size to 4096. */
370 v &= ~(7 << 12);
371 v |= (5 << 12);
372 pci_write_config(dev, reg + 0x08, v, 2);
373
374 v = pci_read_config(dev, reg + 0x0c, 2);
375 /* link capability */
376 v = (v >> 4) & 0x0f;
377 width = pci_read_config(dev, reg + 0x12, 2);
378 /* negotiated link width */
379 width = (width >> 4) & 0x3f;
380 if (v != width)
381 device_printf(sc->nfe_dev,
382 "warning, negotiated width of link(x%d) != "
383 "max. width of link(x%d)\n", width, v);
384 }
385
386 /* Allocate interrupt */
387 if (msix_disable == 0 || msi_disable == 0) {
388 if (msix_disable == 0 &&
389 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
390 nfe_alloc_msix(sc, msic);
391 if (msi_disable == 0 && sc->nfe_msix == 0 &&
392 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
393 pci_alloc_msi(dev, &msic) == 0) {
394 if (msic == NFE_MSI_MESSAGES) {
395 if (bootverbose)
396 device_printf(dev,
397 "Using %d MSI messages\n", msic);
398 sc->nfe_msi = 1;
399 } else
400 pci_release_msi(dev);
401 }
402 }
403
404 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
405 rid = 0;
406 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
407 RF_SHAREABLE | RF_ACTIVE);
408 if (sc->nfe_irq[0] == NULL) {
409 device_printf(dev, "couldn't allocate IRQ resources\n");
410 error = ENXIO;
411 goto fail;
412 }
413 } else {
414 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
415 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
416 SYS_RES_IRQ, &rid, RF_ACTIVE);
417 if (sc->nfe_irq[i] == NULL) {
418 device_printf(dev,
419 "couldn't allocate IRQ resources for "
420 "message %d\n", rid);
421 error = ENXIO;
422 goto fail;
423 }
424 }
425 /* Map interrupts to vector 0. */
426 if (sc->nfe_msix != 0) {
427 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
428 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
429 } else if (sc->nfe_msi != 0) {
430 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
431 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
432 }
433 }
434
435 /* Set IRQ status/mask register. */
436 sc->nfe_irq_status = NFE_IRQ_STATUS;
437 sc->nfe_irq_mask = NFE_IRQ_MASK;
438 sc->nfe_intrs = NFE_IRQ_WANTED;
439 sc->nfe_nointrs = 0;
440 if (sc->nfe_msix != 0) {
441 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
442 sc->nfe_nointrs = NFE_IRQ_WANTED;
443 } else if (sc->nfe_msi != 0) {
444 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
445 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
446 }
447
448 sc->nfe_devid = pci_get_device(dev);
449 sc->nfe_revid = pci_get_revid(dev);
450 sc->nfe_flags = 0;
451
452 switch (sc->nfe_devid) {
453 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
454 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
455 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
456 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
457 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
458 break;
459 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
460 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
461 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
462 break;
463 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
464 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
465 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
466 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
467 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
468 NFE_MIB_V1;
469 break;
470 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
471 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
472 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
473 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
474 break;
475
476 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
477 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
478 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
479 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
480 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
481 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
482 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
483 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
484 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
485 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
486 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
487 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
488 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
489 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
490 break;
491 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
492 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
493 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
494 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
495 /* XXX flow control */
496 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
497 NFE_CORRECT_MACADDR | NFE_MIB_V3;
498 break;
499 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
500 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
501 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
502 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
503 /* XXX flow control */
504 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
505 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
506 break;
507 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
508 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
509 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
510 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
511 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
512 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
513 NFE_MIB_V2;
514 break;
515 }
516
517 nfe_power(sc);
518 /* Check for reversed ethernet address */
519 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
520 sc->nfe_flags |= NFE_CORRECT_MACADDR;
521 nfe_get_macaddr(sc, sc->eaddr);
522 /*
523 * Allocate the parent bus DMA tag appropriate for PCI.
524 */
525 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
526 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
527 dma_addr_max = NFE_DMA_MAXADDR;
528 error = bus_dma_tag_create(
529 bus_get_dma_tag(sc->nfe_dev), /* parent */
530 1, 0, /* alignment, boundary */
531 dma_addr_max, /* lowaddr */
532 BUS_SPACE_MAXADDR, /* highaddr */
533 NULL, NULL, /* filter, filterarg */
534 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
535 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
536 0, /* flags */
537 NULL, NULL, /* lockfunc, lockarg */
538 &sc->nfe_parent_tag);
539 if (error)
540 goto fail;
541
542 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
543 if (ifp == NULL) {
544 device_printf(dev, "can not if_alloc()\n");
545 error = ENOSPC;
546 goto fail;
547 }
548 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
549
550 /*
551 * Allocate Tx and Rx rings.
552 */
553 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
554 goto fail;
555
556 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
557 goto fail;
558
559 nfe_alloc_jrx_ring(sc, &sc->jrxq);
560 /* Create sysctl node. */
561 nfe_sysctl_node(sc);
562
563 ifp->if_softc = sc;
564 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
565 ifp->if_mtu = ETHERMTU;
566 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
567 ifp->if_ioctl = nfe_ioctl;
568 ifp->if_start = nfe_start;
569 ifp->if_hwassist = 0;
570 ifp->if_capabilities = 0;
571 ifp->if_init = nfe_init;
572 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
573 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
574 IFQ_SET_READY(&ifp->if_snd);
575
576 if (sc->nfe_flags & NFE_HW_CSUM) {
577 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
578 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
579 }
580 ifp->if_capenable = ifp->if_capabilities;
581
582 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
583 /* VLAN capability setup. */
584 ifp->if_capabilities |= IFCAP_VLAN_MTU;
585 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
586 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
587 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
588 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
589 }
590
591 if (pci_find_extcap(dev, PCIY_PMG, &reg) == 0)
592 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
593 ifp->if_capenable = ifp->if_capabilities;
594
595 /*
596 * Tell the upper layer(s) we support long frames.
597 * Must appear after the call to ether_ifattach() because
598 * ether_ifattach() sets ifi_hdrlen to the default value.
599 */
600 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
601
602#ifdef DEVICE_POLLING
603 ifp->if_capabilities |= IFCAP_POLLING;
604#endif
605
606 /* Do MII setup */
607 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
608 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
608 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
609 MIIF_DOPAUSE);
609 if (error != 0) {
610 device_printf(dev, "attaching PHYs failed\n");
611 goto fail;
612 }
613 ether_ifattach(ifp, sc->eaddr);
614
615 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
616 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
617 taskqueue_thread_enqueue, &sc->nfe_tq);
618 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
619 device_get_nameunit(sc->nfe_dev));
620 error = 0;
621 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
622 error = bus_setup_intr(dev, sc->nfe_irq[0],
623 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
624 &sc->nfe_intrhand[0]);
625 } else {
626 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
627 error = bus_setup_intr(dev, sc->nfe_irq[i],
628 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
629 &sc->nfe_intrhand[i]);
630 if (error != 0)
631 break;
632 }
633 }
634 if (error) {
635 device_printf(dev, "couldn't set up irq\n");
636 taskqueue_free(sc->nfe_tq);
637 sc->nfe_tq = NULL;
638 ether_ifdetach(ifp);
639 goto fail;
640 }
641
642fail:
643 if (error)
644 nfe_detach(dev);
645
646 return (error);
647}
648
649
650static int
651nfe_detach(device_t dev)
652{
653 struct nfe_softc *sc;
654 struct ifnet *ifp;
655 uint8_t eaddr[ETHER_ADDR_LEN];
656 int i, rid;
657
658 sc = device_get_softc(dev);
659 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
660 ifp = sc->nfe_ifp;
661
662#ifdef DEVICE_POLLING
663 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
664 ether_poll_deregister(ifp);
665#endif
666 if (device_is_attached(dev)) {
667 NFE_LOCK(sc);
668 nfe_stop(ifp);
669 ifp->if_flags &= ~IFF_UP;
670 NFE_UNLOCK(sc);
671 callout_drain(&sc->nfe_stat_ch);
672 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
673 ether_ifdetach(ifp);
674 }
675
676 if (ifp) {
677 /* restore ethernet address */
678 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
679 for (i = 0; i < ETHER_ADDR_LEN; i++) {
680 eaddr[i] = sc->eaddr[5 - i];
681 }
682 } else
683 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
684 nfe_set_macaddr(sc, eaddr);
685 if_free(ifp);
686 }
687 if (sc->nfe_miibus)
688 device_delete_child(dev, sc->nfe_miibus);
689 bus_generic_detach(dev);
690 if (sc->nfe_tq != NULL) {
691 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
692 taskqueue_free(sc->nfe_tq);
693 sc->nfe_tq = NULL;
694 }
695
696 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
697 if (sc->nfe_intrhand[i] != NULL) {
698 bus_teardown_intr(dev, sc->nfe_irq[i],
699 sc->nfe_intrhand[i]);
700 sc->nfe_intrhand[i] = NULL;
701 }
702 }
703
704 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
705 if (sc->nfe_irq[0] != NULL)
706 bus_release_resource(dev, SYS_RES_IRQ, 0,
707 sc->nfe_irq[0]);
708 } else {
709 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
710 if (sc->nfe_irq[i] != NULL) {
711 bus_release_resource(dev, SYS_RES_IRQ, rid,
712 sc->nfe_irq[i]);
713 sc->nfe_irq[i] = NULL;
714 }
715 }
716 pci_release_msi(dev);
717 }
718 if (sc->nfe_msix_pba_res != NULL) {
719 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
720 sc->nfe_msix_pba_res);
721 sc->nfe_msix_pba_res = NULL;
722 }
723 if (sc->nfe_msix_res != NULL) {
724 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
725 sc->nfe_msix_res);
726 sc->nfe_msix_res = NULL;
727 }
728 if (sc->nfe_res[0] != NULL) {
729 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
730 sc->nfe_res[0]);
731 sc->nfe_res[0] = NULL;
732 }
733
734 nfe_free_tx_ring(sc, &sc->txq);
735 nfe_free_rx_ring(sc, &sc->rxq);
736 nfe_free_jrx_ring(sc, &sc->jrxq);
737
738 if (sc->nfe_parent_tag) {
739 bus_dma_tag_destroy(sc->nfe_parent_tag);
740 sc->nfe_parent_tag = NULL;
741 }
742
743 mtx_destroy(&sc->nfe_mtx);
744
745 return (0);
746}
747
748
749static int
750nfe_suspend(device_t dev)
751{
752 struct nfe_softc *sc;
753
754 sc = device_get_softc(dev);
755
756 NFE_LOCK(sc);
757 nfe_stop(sc->nfe_ifp);
758 nfe_set_wol(sc);
759 sc->nfe_suspended = 1;
760 NFE_UNLOCK(sc);
761
762 return (0);
763}
764
765
766static int
767nfe_resume(device_t dev)
768{
769 struct nfe_softc *sc;
770 struct ifnet *ifp;
771
772 sc = device_get_softc(dev);
773
774 NFE_LOCK(sc);
775 nfe_power(sc);
776 ifp = sc->nfe_ifp;
777 if (ifp->if_flags & IFF_UP)
778 nfe_init_locked(sc);
779 sc->nfe_suspended = 0;
780 NFE_UNLOCK(sc);
781
782 return (0);
783}
784
785
786/* Take PHY/NIC out of powerdown, from Linux */
787static void
788nfe_power(struct nfe_softc *sc)
789{
790 uint32_t pwr;
791
792 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
793 return;
794 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
795 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
796 DELAY(100);
797 NFE_WRITE(sc, NFE_MAC_RESET, 0);
798 DELAY(100);
799 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
800 pwr = NFE_READ(sc, NFE_PWR2_CTL);
801 pwr &= ~NFE_PWR2_WAKEUP_MASK;
802 if (sc->nfe_revid >= 0xa3 &&
803 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
804 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
805 pwr |= NFE_PWR2_REVA3;
806 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
807}
808
809
810static void
811nfe_miibus_statchg(device_t dev)
812{
813 struct nfe_softc *sc;
814 struct mii_data *mii;
815 struct ifnet *ifp;
816 uint32_t rxctl, txctl;
817
818 sc = device_get_softc(dev);
819
820 mii = device_get_softc(sc->nfe_miibus);
821 ifp = sc->nfe_ifp;
822
823 sc->nfe_link = 0;
824 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
825 (IFM_ACTIVE | IFM_AVALID)) {
826 switch (IFM_SUBTYPE(mii->mii_media_active)) {
827 case IFM_10_T:
828 case IFM_100_TX:
829 case IFM_1000_T:
830 sc->nfe_link = 1;
831 break;
832 default:
833 break;
834 }
835 }
836
837 nfe_mac_config(sc, mii);
838 txctl = NFE_READ(sc, NFE_TX_CTL);
839 rxctl = NFE_READ(sc, NFE_RX_CTL);
840 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
841 txctl |= NFE_TX_START;
842 rxctl |= NFE_RX_START;
843 } else {
844 txctl &= ~NFE_TX_START;
845 rxctl &= ~NFE_RX_START;
846 }
847 NFE_WRITE(sc, NFE_TX_CTL, txctl);
848 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
849}
850
851
852static void
853nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
854{
855 uint32_t link, misc, phy, seed;
856 uint32_t val;
857
858 NFE_LOCK_ASSERT(sc);
859
860 phy = NFE_READ(sc, NFE_PHY_IFACE);
861 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
862
863 seed = NFE_READ(sc, NFE_RNDSEED);
864 seed &= ~NFE_SEED_MASK;
865
866 misc = NFE_MISC1_MAGIC;
867 link = NFE_MEDIA_SET;
868
869 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
870 phy |= NFE_PHY_HDX; /* half-duplex */
871 misc |= NFE_MISC1_HDX;
872 }
873
874 switch (IFM_SUBTYPE(mii->mii_media_active)) {
875 case IFM_1000_T: /* full-duplex only */
876 link |= NFE_MEDIA_1000T;
877 seed |= NFE_SEED_1000T;
878 phy |= NFE_PHY_1000T;
879 break;
880 case IFM_100_TX:
881 link |= NFE_MEDIA_100TX;
882 seed |= NFE_SEED_100TX;
883 phy |= NFE_PHY_100TX;
884 break;
885 case IFM_10_T:
886 link |= NFE_MEDIA_10T;
887 seed |= NFE_SEED_10T;
888 break;
889 }
890
891 if ((phy & 0x10000000) != 0) {
892 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
893 val = NFE_R1_MAGIC_1000;
894 else
895 val = NFE_R1_MAGIC_10_100;
896 } else
897 val = NFE_R1_MAGIC_DEFAULT;
898 NFE_WRITE(sc, NFE_SETUP_R1, val);
899
900 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
901
902 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
903 NFE_WRITE(sc, NFE_MISC1, misc);
904 NFE_WRITE(sc, NFE_LINKSPEED, link);
905
906 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
907 /* It seems all hardwares supports Rx pause frames. */
908 val = NFE_READ(sc, NFE_RXFILTER);
610 if (error != 0) {
611 device_printf(dev, "attaching PHYs failed\n");
612 goto fail;
613 }
614 ether_ifattach(ifp, sc->eaddr);
615
616 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
617 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
618 taskqueue_thread_enqueue, &sc->nfe_tq);
619 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
620 device_get_nameunit(sc->nfe_dev));
621 error = 0;
622 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
623 error = bus_setup_intr(dev, sc->nfe_irq[0],
624 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
625 &sc->nfe_intrhand[0]);
626 } else {
627 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
628 error = bus_setup_intr(dev, sc->nfe_irq[i],
629 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
630 &sc->nfe_intrhand[i]);
631 if (error != 0)
632 break;
633 }
634 }
635 if (error) {
636 device_printf(dev, "couldn't set up irq\n");
637 taskqueue_free(sc->nfe_tq);
638 sc->nfe_tq = NULL;
639 ether_ifdetach(ifp);
640 goto fail;
641 }
642
643fail:
644 if (error)
645 nfe_detach(dev);
646
647 return (error);
648}
649
650
651static int
652nfe_detach(device_t dev)
653{
654 struct nfe_softc *sc;
655 struct ifnet *ifp;
656 uint8_t eaddr[ETHER_ADDR_LEN];
657 int i, rid;
658
659 sc = device_get_softc(dev);
660 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
661 ifp = sc->nfe_ifp;
662
663#ifdef DEVICE_POLLING
664 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
665 ether_poll_deregister(ifp);
666#endif
667 if (device_is_attached(dev)) {
668 NFE_LOCK(sc);
669 nfe_stop(ifp);
670 ifp->if_flags &= ~IFF_UP;
671 NFE_UNLOCK(sc);
672 callout_drain(&sc->nfe_stat_ch);
673 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
674 ether_ifdetach(ifp);
675 }
676
677 if (ifp) {
678 /* restore ethernet address */
679 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
680 for (i = 0; i < ETHER_ADDR_LEN; i++) {
681 eaddr[i] = sc->eaddr[5 - i];
682 }
683 } else
684 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
685 nfe_set_macaddr(sc, eaddr);
686 if_free(ifp);
687 }
688 if (sc->nfe_miibus)
689 device_delete_child(dev, sc->nfe_miibus);
690 bus_generic_detach(dev);
691 if (sc->nfe_tq != NULL) {
692 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
693 taskqueue_free(sc->nfe_tq);
694 sc->nfe_tq = NULL;
695 }
696
697 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
698 if (sc->nfe_intrhand[i] != NULL) {
699 bus_teardown_intr(dev, sc->nfe_irq[i],
700 sc->nfe_intrhand[i]);
701 sc->nfe_intrhand[i] = NULL;
702 }
703 }
704
705 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
706 if (sc->nfe_irq[0] != NULL)
707 bus_release_resource(dev, SYS_RES_IRQ, 0,
708 sc->nfe_irq[0]);
709 } else {
710 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
711 if (sc->nfe_irq[i] != NULL) {
712 bus_release_resource(dev, SYS_RES_IRQ, rid,
713 sc->nfe_irq[i]);
714 sc->nfe_irq[i] = NULL;
715 }
716 }
717 pci_release_msi(dev);
718 }
719 if (sc->nfe_msix_pba_res != NULL) {
720 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
721 sc->nfe_msix_pba_res);
722 sc->nfe_msix_pba_res = NULL;
723 }
724 if (sc->nfe_msix_res != NULL) {
725 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
726 sc->nfe_msix_res);
727 sc->nfe_msix_res = NULL;
728 }
729 if (sc->nfe_res[0] != NULL) {
730 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
731 sc->nfe_res[0]);
732 sc->nfe_res[0] = NULL;
733 }
734
735 nfe_free_tx_ring(sc, &sc->txq);
736 nfe_free_rx_ring(sc, &sc->rxq);
737 nfe_free_jrx_ring(sc, &sc->jrxq);
738
739 if (sc->nfe_parent_tag) {
740 bus_dma_tag_destroy(sc->nfe_parent_tag);
741 sc->nfe_parent_tag = NULL;
742 }
743
744 mtx_destroy(&sc->nfe_mtx);
745
746 return (0);
747}
748
749
750static int
751nfe_suspend(device_t dev)
752{
753 struct nfe_softc *sc;
754
755 sc = device_get_softc(dev);
756
757 NFE_LOCK(sc);
758 nfe_stop(sc->nfe_ifp);
759 nfe_set_wol(sc);
760 sc->nfe_suspended = 1;
761 NFE_UNLOCK(sc);
762
763 return (0);
764}
765
766
767static int
768nfe_resume(device_t dev)
769{
770 struct nfe_softc *sc;
771 struct ifnet *ifp;
772
773 sc = device_get_softc(dev);
774
775 NFE_LOCK(sc);
776 nfe_power(sc);
777 ifp = sc->nfe_ifp;
778 if (ifp->if_flags & IFF_UP)
779 nfe_init_locked(sc);
780 sc->nfe_suspended = 0;
781 NFE_UNLOCK(sc);
782
783 return (0);
784}
785
786
787/* Take PHY/NIC out of powerdown, from Linux */
788static void
789nfe_power(struct nfe_softc *sc)
790{
791 uint32_t pwr;
792
793 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
794 return;
795 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
796 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
797 DELAY(100);
798 NFE_WRITE(sc, NFE_MAC_RESET, 0);
799 DELAY(100);
800 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
801 pwr = NFE_READ(sc, NFE_PWR2_CTL);
802 pwr &= ~NFE_PWR2_WAKEUP_MASK;
803 if (sc->nfe_revid >= 0xa3 &&
804 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
805 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
806 pwr |= NFE_PWR2_REVA3;
807 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
808}
809
810
811static void
812nfe_miibus_statchg(device_t dev)
813{
814 struct nfe_softc *sc;
815 struct mii_data *mii;
816 struct ifnet *ifp;
817 uint32_t rxctl, txctl;
818
819 sc = device_get_softc(dev);
820
821 mii = device_get_softc(sc->nfe_miibus);
822 ifp = sc->nfe_ifp;
823
824 sc->nfe_link = 0;
825 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
826 (IFM_ACTIVE | IFM_AVALID)) {
827 switch (IFM_SUBTYPE(mii->mii_media_active)) {
828 case IFM_10_T:
829 case IFM_100_TX:
830 case IFM_1000_T:
831 sc->nfe_link = 1;
832 break;
833 default:
834 break;
835 }
836 }
837
838 nfe_mac_config(sc, mii);
839 txctl = NFE_READ(sc, NFE_TX_CTL);
840 rxctl = NFE_READ(sc, NFE_RX_CTL);
841 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
842 txctl |= NFE_TX_START;
843 rxctl |= NFE_RX_START;
844 } else {
845 txctl &= ~NFE_TX_START;
846 rxctl &= ~NFE_RX_START;
847 }
848 NFE_WRITE(sc, NFE_TX_CTL, txctl);
849 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
850}
851
852
853static void
854nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
855{
856 uint32_t link, misc, phy, seed;
857 uint32_t val;
858
859 NFE_LOCK_ASSERT(sc);
860
861 phy = NFE_READ(sc, NFE_PHY_IFACE);
862 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
863
864 seed = NFE_READ(sc, NFE_RNDSEED);
865 seed &= ~NFE_SEED_MASK;
866
867 misc = NFE_MISC1_MAGIC;
868 link = NFE_MEDIA_SET;
869
870 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
871 phy |= NFE_PHY_HDX; /* half-duplex */
872 misc |= NFE_MISC1_HDX;
873 }
874
875 switch (IFM_SUBTYPE(mii->mii_media_active)) {
876 case IFM_1000_T: /* full-duplex only */
877 link |= NFE_MEDIA_1000T;
878 seed |= NFE_SEED_1000T;
879 phy |= NFE_PHY_1000T;
880 break;
881 case IFM_100_TX:
882 link |= NFE_MEDIA_100TX;
883 seed |= NFE_SEED_100TX;
884 phy |= NFE_PHY_100TX;
885 break;
886 case IFM_10_T:
887 link |= NFE_MEDIA_10T;
888 seed |= NFE_SEED_10T;
889 break;
890 }
891
892 if ((phy & 0x10000000) != 0) {
893 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
894 val = NFE_R1_MAGIC_1000;
895 else
896 val = NFE_R1_MAGIC_10_100;
897 } else
898 val = NFE_R1_MAGIC_DEFAULT;
899 NFE_WRITE(sc, NFE_SETUP_R1, val);
900
901 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
902
903 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
904 NFE_WRITE(sc, NFE_MISC1, misc);
905 NFE_WRITE(sc, NFE_LINKSPEED, link);
906
907 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
908 /* It seems all hardwares supports Rx pause frames. */
909 val = NFE_READ(sc, NFE_RXFILTER);
909 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FLAG0) != 0)
910 if ((IFM_OPTIONS(mii->mii_media_active) &
911 IFM_ETH_RXPAUSE) != 0)
910 val |= NFE_PFF_RX_PAUSE;
911 else
912 val &= ~NFE_PFF_RX_PAUSE;
913 NFE_WRITE(sc, NFE_RXFILTER, val);
914 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
915 val = NFE_READ(sc, NFE_MISC1);
916 if ((IFM_OPTIONS(mii->mii_media_active) &
912 val |= NFE_PFF_RX_PAUSE;
913 else
914 val &= ~NFE_PFF_RX_PAUSE;
915 NFE_WRITE(sc, NFE_RXFILTER, val);
916 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
917 val = NFE_READ(sc, NFE_MISC1);
918 if ((IFM_OPTIONS(mii->mii_media_active) &
917 IFM_FLAG1) != 0) {
919 IFM_ETH_TXPAUSE) != 0) {
918 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
919 NFE_TX_PAUSE_FRAME_ENABLE);
920 val |= NFE_MISC1_TX_PAUSE;
921 } else {
922 val &= ~NFE_MISC1_TX_PAUSE;
923 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
924 NFE_TX_PAUSE_FRAME_DISABLE);
925 }
926 NFE_WRITE(sc, NFE_MISC1, val);
927 }
928 } else {
929 /* disable rx/tx pause frames */
930 val = NFE_READ(sc, NFE_RXFILTER);
931 val &= ~NFE_PFF_RX_PAUSE;
932 NFE_WRITE(sc, NFE_RXFILTER, val);
933 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
934 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
935 NFE_TX_PAUSE_FRAME_DISABLE);
936 val = NFE_READ(sc, NFE_MISC1);
937 val &= ~NFE_MISC1_TX_PAUSE;
938 NFE_WRITE(sc, NFE_MISC1, val);
939 }
940 }
941}
942
943
944static int
945nfe_miibus_readreg(device_t dev, int phy, int reg)
946{
947 struct nfe_softc *sc = device_get_softc(dev);
948 uint32_t val;
949 int ntries;
950
951 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
952
953 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
954 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
955 DELAY(100);
956 }
957
958 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
959
960 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
961 DELAY(100);
962 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
963 break;
964 }
965 if (ntries == NFE_TIMEOUT) {
966 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
967 return 0;
968 }
969
970 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
971 DPRINTFN(sc, 2, "could not read PHY\n");
972 return 0;
973 }
974
975 val = NFE_READ(sc, NFE_PHY_DATA);
976 if (val != 0xffffffff && val != 0)
977 sc->mii_phyaddr = phy;
978
979 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
980
981 return (val);
982}
983
984
985static int
986nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
987{
988 struct nfe_softc *sc = device_get_softc(dev);
989 uint32_t ctl;
990 int ntries;
991
992 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
993
994 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
995 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
996 DELAY(100);
997 }
998
999 NFE_WRITE(sc, NFE_PHY_DATA, val);
1000 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1001 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1002
1003 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1004 DELAY(100);
1005 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1006 break;
1007 }
1008#ifdef NFE_DEBUG
1009 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1010 device_printf(sc->nfe_dev, "could not write to PHY\n");
1011#endif
1012 return (0);
1013}
1014
1015struct nfe_dmamap_arg {
1016 bus_addr_t nfe_busaddr;
1017};
1018
1019static int
1020nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1021{
1022 struct nfe_dmamap_arg ctx;
1023 struct nfe_rx_data *data;
1024 void *desc;
1025 int i, error, descsize;
1026
1027 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1028 desc = ring->desc64;
1029 descsize = sizeof (struct nfe_desc64);
1030 } else {
1031 desc = ring->desc32;
1032 descsize = sizeof (struct nfe_desc32);
1033 }
1034
1035 ring->cur = ring->next = 0;
1036
1037 error = bus_dma_tag_create(sc->nfe_parent_tag,
1038 NFE_RING_ALIGN, 0, /* alignment, boundary */
1039 BUS_SPACE_MAXADDR, /* lowaddr */
1040 BUS_SPACE_MAXADDR, /* highaddr */
1041 NULL, NULL, /* filter, filterarg */
1042 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1043 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1044 0, /* flags */
1045 NULL, NULL, /* lockfunc, lockarg */
1046 &ring->rx_desc_tag);
1047 if (error != 0) {
1048 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1049 goto fail;
1050 }
1051
1052 /* allocate memory to desc */
1053 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1054 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1055 if (error != 0) {
1056 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1057 goto fail;
1058 }
1059 if (sc->nfe_flags & NFE_40BIT_ADDR)
1060 ring->desc64 = desc;
1061 else
1062 ring->desc32 = desc;
1063
1064 /* map desc to device visible address space */
1065 ctx.nfe_busaddr = 0;
1066 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1067 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1068 if (error != 0) {
1069 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1070 goto fail;
1071 }
1072 ring->physaddr = ctx.nfe_busaddr;
1073
1074 error = bus_dma_tag_create(sc->nfe_parent_tag,
1075 1, 0, /* alignment, boundary */
1076 BUS_SPACE_MAXADDR, /* lowaddr */
1077 BUS_SPACE_MAXADDR, /* highaddr */
1078 NULL, NULL, /* filter, filterarg */
1079 MCLBYTES, 1, /* maxsize, nsegments */
1080 MCLBYTES, /* maxsegsize */
1081 0, /* flags */
1082 NULL, NULL, /* lockfunc, lockarg */
1083 &ring->rx_data_tag);
1084 if (error != 0) {
1085 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1086 goto fail;
1087 }
1088
1089 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1090 if (error != 0) {
1091 device_printf(sc->nfe_dev,
1092 "could not create Rx DMA spare map\n");
1093 goto fail;
1094 }
1095
1096 /*
1097 * Pre-allocate Rx buffers and populate Rx ring.
1098 */
1099 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1100 data = &sc->rxq.data[i];
1101 data->rx_data_map = NULL;
1102 data->m = NULL;
1103 error = bus_dmamap_create(ring->rx_data_tag, 0,
1104 &data->rx_data_map);
1105 if (error != 0) {
1106 device_printf(sc->nfe_dev,
1107 "could not create Rx DMA map\n");
1108 goto fail;
1109 }
1110 }
1111
1112fail:
1113 return (error);
1114}
1115
1116
1117static void
1118nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1119{
1120 struct nfe_dmamap_arg ctx;
1121 struct nfe_rx_data *data;
1122 void *desc;
1123 int i, error, descsize;
1124
1125 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1126 return;
1127 if (jumbo_disable != 0) {
1128 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1129 sc->nfe_jumbo_disable = 1;
1130 return;
1131 }
1132
1133 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1134 desc = ring->jdesc64;
1135 descsize = sizeof (struct nfe_desc64);
1136 } else {
1137 desc = ring->jdesc32;
1138 descsize = sizeof (struct nfe_desc32);
1139 }
1140
1141 ring->jcur = ring->jnext = 0;
1142
1143 /* Create DMA tag for jumbo Rx ring. */
1144 error = bus_dma_tag_create(sc->nfe_parent_tag,
1145 NFE_RING_ALIGN, 0, /* alignment, boundary */
1146 BUS_SPACE_MAXADDR, /* lowaddr */
1147 BUS_SPACE_MAXADDR, /* highaddr */
1148 NULL, NULL, /* filter, filterarg */
1149 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1150 1, /* nsegments */
1151 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1152 0, /* flags */
1153 NULL, NULL, /* lockfunc, lockarg */
1154 &ring->jrx_desc_tag);
1155 if (error != 0) {
1156 device_printf(sc->nfe_dev,
1157 "could not create jumbo ring DMA tag\n");
1158 goto fail;
1159 }
1160
1161 /* Create DMA tag for jumbo Rx buffers. */
1162 error = bus_dma_tag_create(sc->nfe_parent_tag,
1163 1, 0, /* alignment, boundary */
1164 BUS_SPACE_MAXADDR, /* lowaddr */
1165 BUS_SPACE_MAXADDR, /* highaddr */
1166 NULL, NULL, /* filter, filterarg */
1167 MJUM9BYTES, /* maxsize */
1168 1, /* nsegments */
1169 MJUM9BYTES, /* maxsegsize */
1170 0, /* flags */
1171 NULL, NULL, /* lockfunc, lockarg */
1172 &ring->jrx_data_tag);
1173 if (error != 0) {
1174 device_printf(sc->nfe_dev,
1175 "could not create jumbo Rx buffer DMA tag\n");
1176 goto fail;
1177 }
1178
1179 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1180 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1181 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1182 if (error != 0) {
1183 device_printf(sc->nfe_dev,
1184 "could not allocate DMA'able memory for jumbo Rx ring\n");
1185 goto fail;
1186 }
1187 if (sc->nfe_flags & NFE_40BIT_ADDR)
1188 ring->jdesc64 = desc;
1189 else
1190 ring->jdesc32 = desc;
1191
1192 ctx.nfe_busaddr = 0;
1193 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1194 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1195 if (error != 0) {
1196 device_printf(sc->nfe_dev,
1197 "could not load DMA'able memory for jumbo Rx ring\n");
1198 goto fail;
1199 }
1200 ring->jphysaddr = ctx.nfe_busaddr;
1201
1202 /* Create DMA maps for jumbo Rx buffers. */
1203 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1204 if (error != 0) {
1205 device_printf(sc->nfe_dev,
1206 "could not create jumbo Rx DMA spare map\n");
1207 goto fail;
1208 }
1209
1210 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1211 data = &sc->jrxq.jdata[i];
1212 data->rx_data_map = NULL;
1213 data->m = NULL;
1214 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1215 &data->rx_data_map);
1216 if (error != 0) {
1217 device_printf(sc->nfe_dev,
1218 "could not create jumbo Rx DMA map\n");
1219 goto fail;
1220 }
1221 }
1222
1223 return;
1224
1225fail:
1226 /*
1227 * Running without jumbo frame support is ok for most cases
1228 * so don't fail on creating dma tag/map for jumbo frame.
1229 */
1230 nfe_free_jrx_ring(sc, ring);
1231 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1232 "resource shortage\n");
1233 sc->nfe_jumbo_disable = 1;
1234}
1235
1236
1237static int
1238nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1239{
1240 void *desc;
1241 size_t descsize;
1242 int i;
1243
1244 ring->cur = ring->next = 0;
1245 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1246 desc = ring->desc64;
1247 descsize = sizeof (struct nfe_desc64);
1248 } else {
1249 desc = ring->desc32;
1250 descsize = sizeof (struct nfe_desc32);
1251 }
1252 bzero(desc, descsize * NFE_RX_RING_COUNT);
1253 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1254 if (nfe_newbuf(sc, i) != 0)
1255 return (ENOBUFS);
1256 }
1257
1258 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1259 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1260
1261 return (0);
1262}
1263
1264
1265static int
1266nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1267{
1268 void *desc;
1269 size_t descsize;
1270 int i;
1271
1272 ring->jcur = ring->jnext = 0;
1273 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1274 desc = ring->jdesc64;
1275 descsize = sizeof (struct nfe_desc64);
1276 } else {
1277 desc = ring->jdesc32;
1278 descsize = sizeof (struct nfe_desc32);
1279 }
1280 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1281 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1282 if (nfe_jnewbuf(sc, i) != 0)
1283 return (ENOBUFS);
1284 }
1285
1286 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1287 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1288
1289 return (0);
1290}
1291
1292
1293static void
1294nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1295{
1296 struct nfe_rx_data *data;
1297 void *desc;
1298 int i, descsize;
1299
1300 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1301 desc = ring->desc64;
1302 descsize = sizeof (struct nfe_desc64);
1303 } else {
1304 desc = ring->desc32;
1305 descsize = sizeof (struct nfe_desc32);
1306 }
1307
1308 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1309 data = &ring->data[i];
1310 if (data->rx_data_map != NULL) {
1311 bus_dmamap_destroy(ring->rx_data_tag,
1312 data->rx_data_map);
1313 data->rx_data_map = NULL;
1314 }
1315 if (data->m != NULL) {
1316 m_freem(data->m);
1317 data->m = NULL;
1318 }
1319 }
1320 if (ring->rx_data_tag != NULL) {
1321 if (ring->rx_spare_map != NULL) {
1322 bus_dmamap_destroy(ring->rx_data_tag,
1323 ring->rx_spare_map);
1324 ring->rx_spare_map = NULL;
1325 }
1326 bus_dma_tag_destroy(ring->rx_data_tag);
1327 ring->rx_data_tag = NULL;
1328 }
1329
1330 if (desc != NULL) {
1331 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1332 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1333 ring->desc64 = NULL;
1334 ring->desc32 = NULL;
1335 ring->rx_desc_map = NULL;
1336 }
1337 if (ring->rx_desc_tag != NULL) {
1338 bus_dma_tag_destroy(ring->rx_desc_tag);
1339 ring->rx_desc_tag = NULL;
1340 }
1341}
1342
1343
1344static void
1345nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1346{
1347 struct nfe_rx_data *data;
1348 void *desc;
1349 int i, descsize;
1350
1351 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1352 return;
1353
1354 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1355 desc = ring->jdesc64;
1356 descsize = sizeof (struct nfe_desc64);
1357 } else {
1358 desc = ring->jdesc32;
1359 descsize = sizeof (struct nfe_desc32);
1360 }
1361
1362 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1363 data = &ring->jdata[i];
1364 if (data->rx_data_map != NULL) {
1365 bus_dmamap_destroy(ring->jrx_data_tag,
1366 data->rx_data_map);
1367 data->rx_data_map = NULL;
1368 }
1369 if (data->m != NULL) {
1370 m_freem(data->m);
1371 data->m = NULL;
1372 }
1373 }
1374 if (ring->jrx_data_tag != NULL) {
1375 if (ring->jrx_spare_map != NULL) {
1376 bus_dmamap_destroy(ring->jrx_data_tag,
1377 ring->jrx_spare_map);
1378 ring->jrx_spare_map = NULL;
1379 }
1380 bus_dma_tag_destroy(ring->jrx_data_tag);
1381 ring->jrx_data_tag = NULL;
1382 }
1383
1384 if (desc != NULL) {
1385 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1386 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1387 ring->jdesc64 = NULL;
1388 ring->jdesc32 = NULL;
1389 ring->jrx_desc_map = NULL;
1390 }
1391
1392 if (ring->jrx_desc_tag != NULL) {
1393 bus_dma_tag_destroy(ring->jrx_desc_tag);
1394 ring->jrx_desc_tag = NULL;
1395 }
1396}
1397
1398
1399static int
1400nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1401{
1402 struct nfe_dmamap_arg ctx;
1403 int i, error;
1404 void *desc;
1405 int descsize;
1406
1407 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1408 desc = ring->desc64;
1409 descsize = sizeof (struct nfe_desc64);
1410 } else {
1411 desc = ring->desc32;
1412 descsize = sizeof (struct nfe_desc32);
1413 }
1414
1415 ring->queued = 0;
1416 ring->cur = ring->next = 0;
1417
1418 error = bus_dma_tag_create(sc->nfe_parent_tag,
1419 NFE_RING_ALIGN, 0, /* alignment, boundary */
1420 BUS_SPACE_MAXADDR, /* lowaddr */
1421 BUS_SPACE_MAXADDR, /* highaddr */
1422 NULL, NULL, /* filter, filterarg */
1423 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1424 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1425 0, /* flags */
1426 NULL, NULL, /* lockfunc, lockarg */
1427 &ring->tx_desc_tag);
1428 if (error != 0) {
1429 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1430 goto fail;
1431 }
1432
1433 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1434 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1435 if (error != 0) {
1436 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1437 goto fail;
1438 }
1439 if (sc->nfe_flags & NFE_40BIT_ADDR)
1440 ring->desc64 = desc;
1441 else
1442 ring->desc32 = desc;
1443
1444 ctx.nfe_busaddr = 0;
1445 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1446 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1447 if (error != 0) {
1448 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1449 goto fail;
1450 }
1451 ring->physaddr = ctx.nfe_busaddr;
1452
1453 error = bus_dma_tag_create(sc->nfe_parent_tag,
1454 1, 0,
1455 BUS_SPACE_MAXADDR,
1456 BUS_SPACE_MAXADDR,
1457 NULL, NULL,
1458 NFE_TSO_MAXSIZE,
1459 NFE_MAX_SCATTER,
1460 NFE_TSO_MAXSGSIZE,
1461 0,
1462 NULL, NULL,
1463 &ring->tx_data_tag);
1464 if (error != 0) {
1465 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1466 goto fail;
1467 }
1468
1469 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1470 error = bus_dmamap_create(ring->tx_data_tag, 0,
1471 &ring->data[i].tx_data_map);
1472 if (error != 0) {
1473 device_printf(sc->nfe_dev,
1474 "could not create Tx DMA map\n");
1475 goto fail;
1476 }
1477 }
1478
1479fail:
1480 return (error);
1481}
1482
1483
1484static void
1485nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1486{
1487 void *desc;
1488 size_t descsize;
1489
1490 sc->nfe_force_tx = 0;
1491 ring->queued = 0;
1492 ring->cur = ring->next = 0;
1493 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1494 desc = ring->desc64;
1495 descsize = sizeof (struct nfe_desc64);
1496 } else {
1497 desc = ring->desc32;
1498 descsize = sizeof (struct nfe_desc32);
1499 }
1500 bzero(desc, descsize * NFE_TX_RING_COUNT);
1501
1502 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1503 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1504}
1505
1506
1507static void
1508nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1509{
1510 struct nfe_tx_data *data;
1511 void *desc;
1512 int i, descsize;
1513
1514 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1515 desc = ring->desc64;
1516 descsize = sizeof (struct nfe_desc64);
1517 } else {
1518 desc = ring->desc32;
1519 descsize = sizeof (struct nfe_desc32);
1520 }
1521
1522 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1523 data = &ring->data[i];
1524
1525 if (data->m != NULL) {
1526 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1527 BUS_DMASYNC_POSTWRITE);
1528 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1529 m_freem(data->m);
1530 data->m = NULL;
1531 }
1532 if (data->tx_data_map != NULL) {
1533 bus_dmamap_destroy(ring->tx_data_tag,
1534 data->tx_data_map);
1535 data->tx_data_map = NULL;
1536 }
1537 }
1538
1539 if (ring->tx_data_tag != NULL) {
1540 bus_dma_tag_destroy(ring->tx_data_tag);
1541 ring->tx_data_tag = NULL;
1542 }
1543
1544 if (desc != NULL) {
1545 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1546 BUS_DMASYNC_POSTWRITE);
1547 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1548 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1549 ring->desc64 = NULL;
1550 ring->desc32 = NULL;
1551 ring->tx_desc_map = NULL;
1552 bus_dma_tag_destroy(ring->tx_desc_tag);
1553 ring->tx_desc_tag = NULL;
1554 }
1555}
1556
1557#ifdef DEVICE_POLLING
1558static poll_handler_t nfe_poll;
1559
1560
1561static int
1562nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1563{
1564 struct nfe_softc *sc = ifp->if_softc;
1565 uint32_t r;
1566 int rx_npkts = 0;
1567
1568 NFE_LOCK(sc);
1569
1570 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1571 NFE_UNLOCK(sc);
1572 return (rx_npkts);
1573 }
1574
1575 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1576 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1577 else
1578 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1579 nfe_txeof(sc);
1580 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1581 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1582
1583 if (cmd == POLL_AND_CHECK_STATUS) {
1584 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1585 NFE_UNLOCK(sc);
1586 return (rx_npkts);
1587 }
1588 NFE_WRITE(sc, sc->nfe_irq_status, r);
1589
1590 if (r & NFE_IRQ_LINK) {
1591 NFE_READ(sc, NFE_PHY_STATUS);
1592 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1593 DPRINTF(sc, "link state changed\n");
1594 }
1595 }
1596 NFE_UNLOCK(sc);
1597 return (rx_npkts);
1598}
1599#endif /* DEVICE_POLLING */
1600
1601static void
1602nfe_set_intr(struct nfe_softc *sc)
1603{
1604
1605 if (sc->nfe_msi != 0)
1606 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1607}
1608
1609
1610/* In MSIX, a write to mask reegisters behaves as XOR. */
1611static __inline void
1612nfe_enable_intr(struct nfe_softc *sc)
1613{
1614
1615 if (sc->nfe_msix != 0) {
1616 /* XXX Should have a better way to enable interrupts! */
1617 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1618 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1619 } else
1620 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1621}
1622
1623
1624static __inline void
1625nfe_disable_intr(struct nfe_softc *sc)
1626{
1627
1628 if (sc->nfe_msix != 0) {
1629 /* XXX Should have a better way to disable interrupts! */
1630 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1631 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1632 } else
1633 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1634}
1635
1636
1637static int
1638nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1639{
1640 struct nfe_softc *sc;
1641 struct ifreq *ifr;
1642 struct mii_data *mii;
1643 int error, init, mask;
1644
1645 sc = ifp->if_softc;
1646 ifr = (struct ifreq *) data;
1647 error = 0;
1648 init = 0;
1649 switch (cmd) {
1650 case SIOCSIFMTU:
1651 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1652 error = EINVAL;
1653 else if (ifp->if_mtu != ifr->ifr_mtu) {
1654 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1655 (sc->nfe_jumbo_disable != 0)) &&
1656 ifr->ifr_mtu > ETHERMTU)
1657 error = EINVAL;
1658 else {
1659 NFE_LOCK(sc);
1660 ifp->if_mtu = ifr->ifr_mtu;
1661 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1662 nfe_init_locked(sc);
1663 NFE_UNLOCK(sc);
1664 }
1665 }
1666 break;
1667 case SIOCSIFFLAGS:
1668 NFE_LOCK(sc);
1669 if (ifp->if_flags & IFF_UP) {
1670 /*
1671 * If only the PROMISC or ALLMULTI flag changes, then
1672 * don't do a full re-init of the chip, just update
1673 * the Rx filter.
1674 */
1675 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1676 ((ifp->if_flags ^ sc->nfe_if_flags) &
1677 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1678 nfe_setmulti(sc);
1679 else
1680 nfe_init_locked(sc);
1681 } else {
1682 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1683 nfe_stop(ifp);
1684 }
1685 sc->nfe_if_flags = ifp->if_flags;
1686 NFE_UNLOCK(sc);
1687 error = 0;
1688 break;
1689 case SIOCADDMULTI:
1690 case SIOCDELMULTI:
1691 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1692 NFE_LOCK(sc);
1693 nfe_setmulti(sc);
1694 NFE_UNLOCK(sc);
1695 error = 0;
1696 }
1697 break;
1698 case SIOCSIFMEDIA:
1699 case SIOCGIFMEDIA:
1700 mii = device_get_softc(sc->nfe_miibus);
1701 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1702 break;
1703 case SIOCSIFCAP:
1704 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1705#ifdef DEVICE_POLLING
1706 if ((mask & IFCAP_POLLING) != 0) {
1707 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1708 error = ether_poll_register(nfe_poll, ifp);
1709 if (error)
1710 break;
1711 NFE_LOCK(sc);
1712 nfe_disable_intr(sc);
1713 ifp->if_capenable |= IFCAP_POLLING;
1714 NFE_UNLOCK(sc);
1715 } else {
1716 error = ether_poll_deregister(ifp);
1717 /* Enable interrupt even in error case */
1718 NFE_LOCK(sc);
1719 nfe_enable_intr(sc);
1720 ifp->if_capenable &= ~IFCAP_POLLING;
1721 NFE_UNLOCK(sc);
1722 }
1723 }
1724#endif /* DEVICE_POLLING */
1725 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1726 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1727 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1728
1729 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1730 (mask & IFCAP_HWCSUM) != 0) {
1731 ifp->if_capenable ^= IFCAP_HWCSUM;
1732 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1733 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1734 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1735 else
1736 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1737 init++;
1738 }
1739 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1740 (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1741 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1742 init++;
1743 }
1744 /*
1745 * XXX
1746 * It seems that VLAN stripping requires Rx checksum offload.
1747 * Unfortunately FreeBSD has no way to disable only Rx side
1748 * VLAN stripping. So when we know Rx checksum offload is
1749 * disabled turn entire hardware VLAN assist off.
1750 */
1751 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1752 (NFE_HW_CSUM | NFE_HW_VLAN)) {
1753 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1754 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1755 }
1756
1757 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1758 (mask & IFCAP_TSO4) != 0) {
1759 ifp->if_capenable ^= IFCAP_TSO4;
1760 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1761 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1762 ifp->if_hwassist |= CSUM_TSO;
1763 else
1764 ifp->if_hwassist &= ~CSUM_TSO;
1765 }
1766
1767 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1768 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1769 nfe_init(sc);
1770 }
1771 if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1772 VLAN_CAPABILITIES(ifp);
1773 break;
1774 default:
1775 error = ether_ioctl(ifp, cmd, data);
1776 break;
1777 }
1778
1779 return (error);
1780}
1781
1782
1783static int
1784nfe_intr(void *arg)
1785{
1786 struct nfe_softc *sc;
1787 uint32_t status;
1788
1789 sc = (struct nfe_softc *)arg;
1790
1791 status = NFE_READ(sc, sc->nfe_irq_status);
1792 if (status == 0 || status == 0xffffffff)
1793 return (FILTER_STRAY);
1794 nfe_disable_intr(sc);
1795 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1796
1797 return (FILTER_HANDLED);
1798}
1799
1800
1801static void
1802nfe_int_task(void *arg, int pending)
1803{
1804 struct nfe_softc *sc = arg;
1805 struct ifnet *ifp = sc->nfe_ifp;
1806 uint32_t r;
1807 int domore;
1808
1809 NFE_LOCK(sc);
1810
1811 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1812 nfe_enable_intr(sc);
1813 NFE_UNLOCK(sc);
1814 return; /* not for us */
1815 }
1816 NFE_WRITE(sc, sc->nfe_irq_status, r);
1817
1818 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1819
1820#ifdef DEVICE_POLLING
1821 if (ifp->if_capenable & IFCAP_POLLING) {
1822 NFE_UNLOCK(sc);
1823 return;
1824 }
1825#endif
1826
1827 if (r & NFE_IRQ_LINK) {
1828 NFE_READ(sc, NFE_PHY_STATUS);
1829 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1830 DPRINTF(sc, "link state changed\n");
1831 }
1832
1833 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1834 NFE_UNLOCK(sc);
1835 nfe_enable_intr(sc);
1836 return;
1837 }
1838
1839 domore = 0;
1840 /* check Rx ring */
1841 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1842 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1843 else
1844 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1845 /* check Tx ring */
1846 nfe_txeof(sc);
1847
1848 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1849 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1850
1851 NFE_UNLOCK(sc);
1852
1853 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1854 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1855 return;
1856 }
1857
1858 /* Reenable interrupts. */
1859 nfe_enable_intr(sc);
1860}
1861
1862
1863static __inline void
1864nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1865{
1866 struct nfe_desc32 *desc32;
1867 struct nfe_desc64 *desc64;
1868 struct nfe_rx_data *data;
1869 struct mbuf *m;
1870
1871 data = &sc->rxq.data[idx];
1872 m = data->m;
1873
1874 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1875 desc64 = &sc->rxq.desc64[idx];
1876 /* VLAN packet may have overwritten it. */
1877 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1878 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1879 desc64->length = htole16(m->m_len);
1880 desc64->flags = htole16(NFE_RX_READY);
1881 } else {
1882 desc32 = &sc->rxq.desc32[idx];
1883 desc32->length = htole16(m->m_len);
1884 desc32->flags = htole16(NFE_RX_READY);
1885 }
1886}
1887
1888
1889static __inline void
1890nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1891{
1892 struct nfe_desc32 *desc32;
1893 struct nfe_desc64 *desc64;
1894 struct nfe_rx_data *data;
1895 struct mbuf *m;
1896
1897 data = &sc->jrxq.jdata[idx];
1898 m = data->m;
1899
1900 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1901 desc64 = &sc->jrxq.jdesc64[idx];
1902 /* VLAN packet may have overwritten it. */
1903 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1904 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1905 desc64->length = htole16(m->m_len);
1906 desc64->flags = htole16(NFE_RX_READY);
1907 } else {
1908 desc32 = &sc->jrxq.jdesc32[idx];
1909 desc32->length = htole16(m->m_len);
1910 desc32->flags = htole16(NFE_RX_READY);
1911 }
1912}
1913
1914
1915static int
1916nfe_newbuf(struct nfe_softc *sc, int idx)
1917{
1918 struct nfe_rx_data *data;
1919 struct nfe_desc32 *desc32;
1920 struct nfe_desc64 *desc64;
1921 struct mbuf *m;
1922 bus_dma_segment_t segs[1];
1923 bus_dmamap_t map;
1924 int nsegs;
1925
1926 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1927 if (m == NULL)
1928 return (ENOBUFS);
1929
1930 m->m_len = m->m_pkthdr.len = MCLBYTES;
1931 m_adj(m, ETHER_ALIGN);
1932
1933 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1934 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1935 m_freem(m);
1936 return (ENOBUFS);
1937 }
1938 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1939
1940 data = &sc->rxq.data[idx];
1941 if (data->m != NULL) {
1942 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1943 BUS_DMASYNC_POSTREAD);
1944 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1945 }
1946 map = data->rx_data_map;
1947 data->rx_data_map = sc->rxq.rx_spare_map;
1948 sc->rxq.rx_spare_map = map;
1949 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1950 BUS_DMASYNC_PREREAD);
1951 data->paddr = segs[0].ds_addr;
1952 data->m = m;
1953 /* update mapping address in h/w descriptor */
1954 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1955 desc64 = &sc->rxq.desc64[idx];
1956 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
1957 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1958 desc64->length = htole16(segs[0].ds_len);
1959 desc64->flags = htole16(NFE_RX_READY);
1960 } else {
1961 desc32 = &sc->rxq.desc32[idx];
1962 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1963 desc32->length = htole16(segs[0].ds_len);
1964 desc32->flags = htole16(NFE_RX_READY);
1965 }
1966
1967 return (0);
1968}
1969
1970
1971static int
1972nfe_jnewbuf(struct nfe_softc *sc, int idx)
1973{
1974 struct nfe_rx_data *data;
1975 struct nfe_desc32 *desc32;
1976 struct nfe_desc64 *desc64;
1977 struct mbuf *m;
1978 bus_dma_segment_t segs[1];
1979 bus_dmamap_t map;
1980 int nsegs;
1981
1982 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1983 if (m == NULL)
1984 return (ENOBUFS);
1985 if ((m->m_flags & M_EXT) == 0) {
1986 m_freem(m);
1987 return (ENOBUFS);
1988 }
1989 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1990 m_adj(m, ETHER_ALIGN);
1991
1992 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
1993 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1994 m_freem(m);
1995 return (ENOBUFS);
1996 }
1997 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1998
1999 data = &sc->jrxq.jdata[idx];
2000 if (data->m != NULL) {
2001 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2002 BUS_DMASYNC_POSTREAD);
2003 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2004 }
2005 map = data->rx_data_map;
2006 data->rx_data_map = sc->jrxq.jrx_spare_map;
2007 sc->jrxq.jrx_spare_map = map;
2008 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2009 BUS_DMASYNC_PREREAD);
2010 data->paddr = segs[0].ds_addr;
2011 data->m = m;
2012 /* update mapping address in h/w descriptor */
2013 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2014 desc64 = &sc->jrxq.jdesc64[idx];
2015 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2016 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2017 desc64->length = htole16(segs[0].ds_len);
2018 desc64->flags = htole16(NFE_RX_READY);
2019 } else {
2020 desc32 = &sc->jrxq.jdesc32[idx];
2021 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2022 desc32->length = htole16(segs[0].ds_len);
2023 desc32->flags = htole16(NFE_RX_READY);
2024 }
2025
2026 return (0);
2027}
2028
2029
2030static int
2031nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2032{
2033 struct ifnet *ifp = sc->nfe_ifp;
2034 struct nfe_desc32 *desc32;
2035 struct nfe_desc64 *desc64;
2036 struct nfe_rx_data *data;
2037 struct mbuf *m;
2038 uint16_t flags;
2039 int len, prog, rx_npkts;
2040 uint32_t vtag = 0;
2041
2042 rx_npkts = 0;
2043 NFE_LOCK_ASSERT(sc);
2044
2045 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2046 BUS_DMASYNC_POSTREAD);
2047
2048 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2049 if (count <= 0)
2050 break;
2051 count--;
2052
2053 data = &sc->rxq.data[sc->rxq.cur];
2054
2055 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2056 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2057 vtag = le32toh(desc64->physaddr[1]);
2058 flags = le16toh(desc64->flags);
2059 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2060 } else {
2061 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2062 flags = le16toh(desc32->flags);
2063 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2064 }
2065
2066 if (flags & NFE_RX_READY)
2067 break;
2068 prog++;
2069 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2070 if (!(flags & NFE_RX_VALID_V1)) {
2071 ifp->if_ierrors++;
2072 nfe_discard_rxbuf(sc, sc->rxq.cur);
2073 continue;
2074 }
2075 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2076 flags &= ~NFE_RX_ERROR;
2077 len--; /* fix buffer length */
2078 }
2079 } else {
2080 if (!(flags & NFE_RX_VALID_V2)) {
2081 ifp->if_ierrors++;
2082 nfe_discard_rxbuf(sc, sc->rxq.cur);
2083 continue;
2084 }
2085
2086 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2087 flags &= ~NFE_RX_ERROR;
2088 len--; /* fix buffer length */
2089 }
2090 }
2091
2092 if (flags & NFE_RX_ERROR) {
2093 ifp->if_ierrors++;
2094 nfe_discard_rxbuf(sc, sc->rxq.cur);
2095 continue;
2096 }
2097
2098 m = data->m;
2099 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2100 ifp->if_iqdrops++;
2101 nfe_discard_rxbuf(sc, sc->rxq.cur);
2102 continue;
2103 }
2104
2105 if ((vtag & NFE_RX_VTAG) != 0 &&
2106 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2107 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2108 m->m_flags |= M_VLANTAG;
2109 }
2110
2111 m->m_pkthdr.len = m->m_len = len;
2112 m->m_pkthdr.rcvif = ifp;
2113
2114 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2115 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2116 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2117 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2118 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2119 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2120 m->m_pkthdr.csum_flags |=
2121 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2122 m->m_pkthdr.csum_data = 0xffff;
2123 }
2124 }
2125 }
2126
2127 ifp->if_ipackets++;
2128
2129 NFE_UNLOCK(sc);
2130 (*ifp->if_input)(ifp, m);
2131 NFE_LOCK(sc);
2132 rx_npkts++;
2133 }
2134
2135 if (prog > 0)
2136 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2137 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2138
2139 if (rx_npktsp != NULL)
2140 *rx_npktsp = rx_npkts;
2141 return (count > 0 ? 0 : EAGAIN);
2142}
2143
2144
2145static int
2146nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2147{
2148 struct ifnet *ifp = sc->nfe_ifp;
2149 struct nfe_desc32 *desc32;
2150 struct nfe_desc64 *desc64;
2151 struct nfe_rx_data *data;
2152 struct mbuf *m;
2153 uint16_t flags;
2154 int len, prog, rx_npkts;
2155 uint32_t vtag = 0;
2156
2157 rx_npkts = 0;
2158 NFE_LOCK_ASSERT(sc);
2159
2160 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2161 BUS_DMASYNC_POSTREAD);
2162
2163 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2164 vtag = 0) {
2165 if (count <= 0)
2166 break;
2167 count--;
2168
2169 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2170
2171 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2172 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2173 vtag = le32toh(desc64->physaddr[1]);
2174 flags = le16toh(desc64->flags);
2175 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2176 } else {
2177 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2178 flags = le16toh(desc32->flags);
2179 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2180 }
2181
2182 if (flags & NFE_RX_READY)
2183 break;
2184 prog++;
2185 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2186 if (!(flags & NFE_RX_VALID_V1)) {
2187 ifp->if_ierrors++;
2188 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2189 continue;
2190 }
2191 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2192 flags &= ~NFE_RX_ERROR;
2193 len--; /* fix buffer length */
2194 }
2195 } else {
2196 if (!(flags & NFE_RX_VALID_V2)) {
2197 ifp->if_ierrors++;
2198 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2199 continue;
2200 }
2201
2202 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2203 flags &= ~NFE_RX_ERROR;
2204 len--; /* fix buffer length */
2205 }
2206 }
2207
2208 if (flags & NFE_RX_ERROR) {
2209 ifp->if_ierrors++;
2210 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2211 continue;
2212 }
2213
2214 m = data->m;
2215 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2216 ifp->if_iqdrops++;
2217 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2218 continue;
2219 }
2220
2221 if ((vtag & NFE_RX_VTAG) != 0 &&
2222 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2223 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2224 m->m_flags |= M_VLANTAG;
2225 }
2226
2227 m->m_pkthdr.len = m->m_len = len;
2228 m->m_pkthdr.rcvif = ifp;
2229
2230 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2231 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2232 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2233 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2234 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2235 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2236 m->m_pkthdr.csum_flags |=
2237 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2238 m->m_pkthdr.csum_data = 0xffff;
2239 }
2240 }
2241 }
2242
2243 ifp->if_ipackets++;
2244
2245 NFE_UNLOCK(sc);
2246 (*ifp->if_input)(ifp, m);
2247 NFE_LOCK(sc);
2248 rx_npkts++;
2249 }
2250
2251 if (prog > 0)
2252 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2253 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2254
2255 if (rx_npktsp != NULL)
2256 *rx_npktsp = rx_npkts;
2257 return (count > 0 ? 0 : EAGAIN);
2258}
2259
2260
2261static void
2262nfe_txeof(struct nfe_softc *sc)
2263{
2264 struct ifnet *ifp = sc->nfe_ifp;
2265 struct nfe_desc32 *desc32;
2266 struct nfe_desc64 *desc64;
2267 struct nfe_tx_data *data = NULL;
2268 uint16_t flags;
2269 int cons, prog;
2270
2271 NFE_LOCK_ASSERT(sc);
2272
2273 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2274 BUS_DMASYNC_POSTREAD);
2275
2276 prog = 0;
2277 for (cons = sc->txq.next; cons != sc->txq.cur;
2278 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2279 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2280 desc64 = &sc->txq.desc64[cons];
2281 flags = le16toh(desc64->flags);
2282 } else {
2283 desc32 = &sc->txq.desc32[cons];
2284 flags = le16toh(desc32->flags);
2285 }
2286
2287 if (flags & NFE_TX_VALID)
2288 break;
2289
2290 prog++;
2291 sc->txq.queued--;
2292 data = &sc->txq.data[cons];
2293
2294 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2295 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2296 continue;
2297 if ((flags & NFE_TX_ERROR_V1) != 0) {
2298 device_printf(sc->nfe_dev,
2299 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2300
2301 ifp->if_oerrors++;
2302 } else
2303 ifp->if_opackets++;
2304 } else {
2305 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2306 continue;
2307 if ((flags & NFE_TX_ERROR_V2) != 0) {
2308 device_printf(sc->nfe_dev,
2309 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2310 ifp->if_oerrors++;
2311 } else
2312 ifp->if_opackets++;
2313 }
2314
2315 /* last fragment of the mbuf chain transmitted */
2316 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2317 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2318 BUS_DMASYNC_POSTWRITE);
2319 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2320 m_freem(data->m);
2321 data->m = NULL;
2322 }
2323
2324 if (prog > 0) {
2325 sc->nfe_force_tx = 0;
2326 sc->txq.next = cons;
2327 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2328 if (sc->txq.queued == 0)
2329 sc->nfe_watchdog_timer = 0;
2330 }
2331}
2332
2333static int
2334nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2335{
2336 struct nfe_desc32 *desc32 = NULL;
2337 struct nfe_desc64 *desc64 = NULL;
2338 bus_dmamap_t map;
2339 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2340 int error, i, nsegs, prod, si;
2341 uint32_t tso_segsz;
2342 uint16_t cflags, flags;
2343 struct mbuf *m;
2344
2345 prod = si = sc->txq.cur;
2346 map = sc->txq.data[prod].tx_data_map;
2347
2348 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2349 &nsegs, BUS_DMA_NOWAIT);
2350 if (error == EFBIG) {
2351 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2352 if (m == NULL) {
2353 m_freem(*m_head);
2354 *m_head = NULL;
2355 return (ENOBUFS);
2356 }
2357 *m_head = m;
2358 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2359 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2360 if (error != 0) {
2361 m_freem(*m_head);
2362 *m_head = NULL;
2363 return (ENOBUFS);
2364 }
2365 } else if (error != 0)
2366 return (error);
2367 if (nsegs == 0) {
2368 m_freem(*m_head);
2369 *m_head = NULL;
2370 return (EIO);
2371 }
2372
2373 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2374 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2375 return (ENOBUFS);
2376 }
2377
2378 m = *m_head;
2379 cflags = flags = 0;
2380 tso_segsz = 0;
2381 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2382 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2383 NFE_TX_TSO_SHIFT;
2384 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2385 cflags |= NFE_TX_TSO;
2386 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2387 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2388 cflags |= NFE_TX_IP_CSUM;
2389 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2390 cflags |= NFE_TX_TCP_UDP_CSUM;
2391 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2392 cflags |= NFE_TX_TCP_UDP_CSUM;
2393 }
2394
2395 for (i = 0; i < nsegs; i++) {
2396 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2397 desc64 = &sc->txq.desc64[prod];
2398 desc64->physaddr[0] =
2399 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2400 desc64->physaddr[1] =
2401 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2402 desc64->vtag = 0;
2403 desc64->length = htole16(segs[i].ds_len - 1);
2404 desc64->flags = htole16(flags);
2405 } else {
2406 desc32 = &sc->txq.desc32[prod];
2407 desc32->physaddr =
2408 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2409 desc32->length = htole16(segs[i].ds_len - 1);
2410 desc32->flags = htole16(flags);
2411 }
2412
2413 /*
2414 * Setting of the valid bit in the first descriptor is
2415 * deferred until the whole chain is fully setup.
2416 */
2417 flags |= NFE_TX_VALID;
2418
2419 sc->txq.queued++;
2420 NFE_INC(prod, NFE_TX_RING_COUNT);
2421 }
2422
2423 /*
2424 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2425 * csum flags, vtag and TSO belong to the first fragment only.
2426 */
2427 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2428 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2429 desc64 = &sc->txq.desc64[si];
2430 if ((m->m_flags & M_VLANTAG) != 0)
2431 desc64->vtag = htole32(NFE_TX_VTAG |
2432 m->m_pkthdr.ether_vtag);
2433 if (tso_segsz != 0) {
2434 /*
2435 * XXX
2436 * The following indicates the descriptor element
2437 * is a 32bit quantity.
2438 */
2439 desc64->length |= htole16((uint16_t)tso_segsz);
2440 desc64->flags |= htole16(tso_segsz >> 16);
2441 }
2442 /*
2443 * finally, set the valid/checksum/TSO bit in the first
2444 * descriptor.
2445 */
2446 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2447 } else {
2448 if (sc->nfe_flags & NFE_JUMBO_SUP)
2449 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2450 else
2451 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2452 desc32 = &sc->txq.desc32[si];
2453 if (tso_segsz != 0) {
2454 /*
2455 * XXX
2456 * The following indicates the descriptor element
2457 * is a 32bit quantity.
2458 */
2459 desc32->length |= htole16((uint16_t)tso_segsz);
2460 desc32->flags |= htole16(tso_segsz >> 16);
2461 }
2462 /*
2463 * finally, set the valid/checksum/TSO bit in the first
2464 * descriptor.
2465 */
2466 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2467 }
2468
2469 sc->txq.cur = prod;
2470 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2471 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2472 sc->txq.data[prod].tx_data_map = map;
2473 sc->txq.data[prod].m = m;
2474
2475 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2476
2477 return (0);
2478}
2479
2480
2481static void
2482nfe_setmulti(struct nfe_softc *sc)
2483{
2484 struct ifnet *ifp = sc->nfe_ifp;
2485 struct ifmultiaddr *ifma;
2486 int i;
2487 uint32_t filter;
2488 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2489 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2490 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2491 };
2492
2493 NFE_LOCK_ASSERT(sc);
2494
2495 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2496 bzero(addr, ETHER_ADDR_LEN);
2497 bzero(mask, ETHER_ADDR_LEN);
2498 goto done;
2499 }
2500
2501 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2502 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2503
2504 if_maddr_rlock(ifp);
2505 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2506 u_char *addrp;
2507
2508 if (ifma->ifma_addr->sa_family != AF_LINK)
2509 continue;
2510
2511 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2512 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2513 u_int8_t mcaddr = addrp[i];
2514 addr[i] &= mcaddr;
2515 mask[i] &= ~mcaddr;
2516 }
2517 }
2518 if_maddr_runlock(ifp);
2519
2520 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2521 mask[i] |= addr[i];
2522 }
2523
2524done:
2525 addr[0] |= 0x01; /* make sure multicast bit is set */
2526
2527 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2528 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2529 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2530 addr[5] << 8 | addr[4]);
2531 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2532 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2533 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2534 mask[5] << 8 | mask[4]);
2535
2536 filter = NFE_READ(sc, NFE_RXFILTER);
2537 filter &= NFE_PFF_RX_PAUSE;
2538 filter |= NFE_RXFILTER_MAGIC;
2539 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2540 NFE_WRITE(sc, NFE_RXFILTER, filter);
2541}
2542
2543
2544static void
2545nfe_tx_task(void *arg, int pending)
2546{
2547 struct ifnet *ifp;
2548
2549 ifp = (struct ifnet *)arg;
2550 nfe_start(ifp);
2551}
2552
2553
2554static void
2555nfe_start(struct ifnet *ifp)
2556{
2557 struct nfe_softc *sc = ifp->if_softc;
2558 struct mbuf *m0;
2559 int enq;
2560
2561 NFE_LOCK(sc);
2562
2563 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2564 IFF_DRV_RUNNING || sc->nfe_link == 0) {
2565 NFE_UNLOCK(sc);
2566 return;
2567 }
2568
2569 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2570 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2571 if (m0 == NULL)
2572 break;
2573
2574 if (nfe_encap(sc, &m0) != 0) {
2575 if (m0 == NULL)
2576 break;
2577 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2578 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2579 break;
2580 }
2581 enq++;
2582 ETHER_BPF_MTAP(ifp, m0);
2583 }
2584
2585 if (enq > 0) {
2586 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2587 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2588
2589 /* kick Tx */
2590 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2591
2592 /*
2593 * Set a timeout in case the chip goes out to lunch.
2594 */
2595 sc->nfe_watchdog_timer = 5;
2596 }
2597
2598 NFE_UNLOCK(sc);
2599}
2600
2601
2602static void
2603nfe_watchdog(struct ifnet *ifp)
2604{
2605 struct nfe_softc *sc = ifp->if_softc;
2606
2607 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2608 return;
2609
2610 /* Check if we've lost Tx completion interrupt. */
2611 nfe_txeof(sc);
2612 if (sc->txq.queued == 0) {
2613 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2614 "-- recovering\n");
2615 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2616 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2617 return;
2618 }
2619 /* Check if we've lost start Tx command. */
2620 sc->nfe_force_tx++;
2621 if (sc->nfe_force_tx <= 3) {
2622 /*
2623 * If this is the case for watchdog timeout, the following
2624 * code should go to nfe_txeof().
2625 */
2626 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2627 return;
2628 }
2629 sc->nfe_force_tx = 0;
2630
2631 if_printf(ifp, "watchdog timeout\n");
2632
2633 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2634 ifp->if_oerrors++;
2635 nfe_init_locked(sc);
2636}
2637
2638
2639static void
2640nfe_init(void *xsc)
2641{
2642 struct nfe_softc *sc = xsc;
2643
2644 NFE_LOCK(sc);
2645 nfe_init_locked(sc);
2646 NFE_UNLOCK(sc);
2647}
2648
2649
2650static void
2651nfe_init_locked(void *xsc)
2652{
2653 struct nfe_softc *sc = xsc;
2654 struct ifnet *ifp = sc->nfe_ifp;
2655 struct mii_data *mii;
2656 uint32_t val;
2657 int error;
2658
2659 NFE_LOCK_ASSERT(sc);
2660
2661 mii = device_get_softc(sc->nfe_miibus);
2662
2663 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2664 return;
2665
2666 nfe_stop(ifp);
2667
2668 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2669
2670 nfe_init_tx_ring(sc, &sc->txq);
2671 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2672 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2673 else
2674 error = nfe_init_rx_ring(sc, &sc->rxq);
2675 if (error != 0) {
2676 device_printf(sc->nfe_dev,
2677 "initialization failed: no memory for rx buffers\n");
2678 nfe_stop(ifp);
2679 return;
2680 }
2681
2682 val = 0;
2683 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2684 val |= NFE_MAC_ADDR_INORDER;
2685 NFE_WRITE(sc, NFE_TX_UNK, val);
2686 NFE_WRITE(sc, NFE_STATUS, 0);
2687
2688 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2689 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2690
2691 sc->rxtxctl = NFE_RXTX_BIT2;
2692 if (sc->nfe_flags & NFE_40BIT_ADDR)
2693 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2694 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2695 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2696
2697 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2698 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2699 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2700 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2701
2702 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2703 DELAY(10);
2704 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2705
2706 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2707 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2708 else
2709 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2710
2711 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2712
2713 /* set MAC address */
2714 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2715
2716 /* tell MAC where rings are in memory */
2717 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2718 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2719 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2720 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2721 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2722 } else {
2723 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2724 NFE_ADDR_HI(sc->rxq.physaddr));
2725 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2726 NFE_ADDR_LO(sc->rxq.physaddr));
2727 }
2728 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2729 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2730
2731 NFE_WRITE(sc, NFE_RING_SIZE,
2732 (NFE_RX_RING_COUNT - 1) << 16 |
2733 (NFE_TX_RING_COUNT - 1));
2734
2735 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2736
2737 /* force MAC to wakeup */
2738 val = NFE_READ(sc, NFE_PWR_STATE);
2739 if ((val & NFE_PWR_WAKEUP) == 0)
2740 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2741 DELAY(10);
2742 val = NFE_READ(sc, NFE_PWR_STATE);
2743 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2744
2745#if 1
2746 /* configure interrupts coalescing/mitigation */
2747 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2748#else
2749 /* no interrupt mitigation: one interrupt per packet */
2750 NFE_WRITE(sc, NFE_IMTIMER, 970);
2751#endif
2752
2753 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2754 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2755 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2756
2757 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2758 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2759
2760 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2761 /* Disable WOL. */
2762 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2763
2764 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2765 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2766 DELAY(10);
2767 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2768
2769 /* set Rx filter */
2770 nfe_setmulti(sc);
2771
2772 /* enable Rx */
2773 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2774
2775 /* enable Tx */
2776 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2777
2778 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2779
2780 /* Clear hardware stats. */
2781 nfe_stats_clear(sc);
2782
2783#ifdef DEVICE_POLLING
2784 if (ifp->if_capenable & IFCAP_POLLING)
2785 nfe_disable_intr(sc);
2786 else
2787#endif
2788 nfe_set_intr(sc);
2789 nfe_enable_intr(sc); /* enable interrupts */
2790
2791 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2792 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2793
2794 sc->nfe_link = 0;
2795 mii_mediachg(mii);
2796
2797 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2798}
2799
2800
2801static void
2802nfe_stop(struct ifnet *ifp)
2803{
2804 struct nfe_softc *sc = ifp->if_softc;
2805 struct nfe_rx_ring *rx_ring;
2806 struct nfe_jrx_ring *jrx_ring;
2807 struct nfe_tx_ring *tx_ring;
2808 struct nfe_rx_data *rdata;
2809 struct nfe_tx_data *tdata;
2810 int i;
2811
2812 NFE_LOCK_ASSERT(sc);
2813
2814 sc->nfe_watchdog_timer = 0;
2815 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2816
2817 callout_stop(&sc->nfe_stat_ch);
2818
2819 /* abort Tx */
2820 NFE_WRITE(sc, NFE_TX_CTL, 0);
2821
2822 /* disable Rx */
2823 NFE_WRITE(sc, NFE_RX_CTL, 0);
2824
2825 /* disable interrupts */
2826 nfe_disable_intr(sc);
2827
2828 sc->nfe_link = 0;
2829
2830 /* free Rx and Tx mbufs still in the queues. */
2831 rx_ring = &sc->rxq;
2832 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2833 rdata = &rx_ring->data[i];
2834 if (rdata->m != NULL) {
2835 bus_dmamap_sync(rx_ring->rx_data_tag,
2836 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2837 bus_dmamap_unload(rx_ring->rx_data_tag,
2838 rdata->rx_data_map);
2839 m_freem(rdata->m);
2840 rdata->m = NULL;
2841 }
2842 }
2843
2844 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2845 jrx_ring = &sc->jrxq;
2846 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2847 rdata = &jrx_ring->jdata[i];
2848 if (rdata->m != NULL) {
2849 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2850 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2851 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2852 rdata->rx_data_map);
2853 m_freem(rdata->m);
2854 rdata->m = NULL;
2855 }
2856 }
2857 }
2858
2859 tx_ring = &sc->txq;
2860 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2861 tdata = &tx_ring->data[i];
2862 if (tdata->m != NULL) {
2863 bus_dmamap_sync(tx_ring->tx_data_tag,
2864 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2865 bus_dmamap_unload(tx_ring->tx_data_tag,
2866 tdata->tx_data_map);
2867 m_freem(tdata->m);
2868 tdata->m = NULL;
2869 }
2870 }
2871 /* Update hardware stats. */
2872 nfe_stats_update(sc);
2873}
2874
2875
2876static int
2877nfe_ifmedia_upd(struct ifnet *ifp)
2878{
2879 struct nfe_softc *sc = ifp->if_softc;
2880 struct mii_data *mii;
2881
2882 NFE_LOCK(sc);
2883 mii = device_get_softc(sc->nfe_miibus);
2884 mii_mediachg(mii);
2885 NFE_UNLOCK(sc);
2886
2887 return (0);
2888}
2889
2890
2891static void
2892nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2893{
2894 struct nfe_softc *sc;
2895 struct mii_data *mii;
2896
2897 sc = ifp->if_softc;
2898
2899 NFE_LOCK(sc);
2900 mii = device_get_softc(sc->nfe_miibus);
2901 mii_pollstat(mii);
2902 NFE_UNLOCK(sc);
2903
2904 ifmr->ifm_active = mii->mii_media_active;
2905 ifmr->ifm_status = mii->mii_media_status;
2906}
2907
2908
2909void
2910nfe_tick(void *xsc)
2911{
2912 struct nfe_softc *sc;
2913 struct mii_data *mii;
2914 struct ifnet *ifp;
2915
2916 sc = (struct nfe_softc *)xsc;
2917
2918 NFE_LOCK_ASSERT(sc);
2919
2920 ifp = sc->nfe_ifp;
2921
2922 mii = device_get_softc(sc->nfe_miibus);
2923 mii_tick(mii);
2924 nfe_stats_update(sc);
2925 nfe_watchdog(ifp);
2926 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2927}
2928
2929
2930static int
2931nfe_shutdown(device_t dev)
2932{
2933
2934 return (nfe_suspend(dev));
2935}
2936
2937
2938static void
2939nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2940{
2941 uint32_t val;
2942
2943 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2944 val = NFE_READ(sc, NFE_MACADDR_LO);
2945 addr[0] = (val >> 8) & 0xff;
2946 addr[1] = (val & 0xff);
2947
2948 val = NFE_READ(sc, NFE_MACADDR_HI);
2949 addr[2] = (val >> 24) & 0xff;
2950 addr[3] = (val >> 16) & 0xff;
2951 addr[4] = (val >> 8) & 0xff;
2952 addr[5] = (val & 0xff);
2953 } else {
2954 val = NFE_READ(sc, NFE_MACADDR_LO);
2955 addr[5] = (val >> 8) & 0xff;
2956 addr[4] = (val & 0xff);
2957
2958 val = NFE_READ(sc, NFE_MACADDR_HI);
2959 addr[3] = (val >> 24) & 0xff;
2960 addr[2] = (val >> 16) & 0xff;
2961 addr[1] = (val >> 8) & 0xff;
2962 addr[0] = (val & 0xff);
2963 }
2964}
2965
2966
2967static void
2968nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2969{
2970
2971 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
2972 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2973 addr[1] << 8 | addr[0]);
2974}
2975
2976
2977/*
2978 * Map a single buffer address.
2979 */
2980
2981static void
2982nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2983{
2984 struct nfe_dmamap_arg *ctx;
2985
2986 if (error != 0)
2987 return;
2988
2989 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2990
2991 ctx = (struct nfe_dmamap_arg *)arg;
2992 ctx->nfe_busaddr = segs[0].ds_addr;
2993}
2994
2995
2996static int
2997sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2998{
2999 int error, value;
3000
3001 if (!arg1)
3002 return (EINVAL);
3003 value = *(int *)arg1;
3004 error = sysctl_handle_int(oidp, &value, 0, req);
3005 if (error || !req->newptr)
3006 return (error);
3007 if (value < low || value > high)
3008 return (EINVAL);
3009 *(int *)arg1 = value;
3010
3011 return (0);
3012}
3013
3014
3015static int
3016sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3017{
3018
3019 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3020 NFE_PROC_MAX));
3021}
3022
3023
3024#define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3025 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3026#define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3027 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3028
3029static void
3030nfe_sysctl_node(struct nfe_softc *sc)
3031{
3032 struct sysctl_ctx_list *ctx;
3033 struct sysctl_oid_list *child, *parent;
3034 struct sysctl_oid *tree;
3035 struct nfe_hw_stats *stats;
3036 int error;
3037
3038 stats = &sc->nfe_stats;
3039 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3040 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3041 SYSCTL_ADD_PROC(ctx, child,
3042 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3043 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3044 "max number of Rx events to process");
3045
3046 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3047 error = resource_int_value(device_get_name(sc->nfe_dev),
3048 device_get_unit(sc->nfe_dev), "process_limit",
3049 &sc->nfe_process_limit);
3050 if (error == 0) {
3051 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3052 sc->nfe_process_limit > NFE_PROC_MAX) {
3053 device_printf(sc->nfe_dev,
3054 "process_limit value out of range; "
3055 "using default: %d\n", NFE_PROC_DEFAULT);
3056 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3057 }
3058 }
3059
3060 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3061 return;
3062
3063 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3064 NULL, "NFE statistics");
3065 parent = SYSCTL_CHILDREN(tree);
3066
3067 /* Rx statistics. */
3068 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3069 NULL, "Rx MAC statistics");
3070 child = SYSCTL_CHILDREN(tree);
3071
3072 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3073 &stats->rx_frame_errors, "Framing Errors");
3074 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3075 &stats->rx_extra_bytes, "Extra Bytes");
3076 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3077 &stats->rx_late_cols, "Late Collisions");
3078 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3079 &stats->rx_runts, "Runts");
3080 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3081 &stats->rx_jumbos, "Jumbos");
3082 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3083 &stats->rx_fifo_overuns, "FIFO Overruns");
3084 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3085 &stats->rx_crc_errors, "CRC Errors");
3086 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3087 &stats->rx_fae, "Frame Alignment Errors");
3088 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3089 &stats->rx_len_errors, "Length Errors");
3090 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3091 &stats->rx_unicast, "Unicast Frames");
3092 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3093 &stats->rx_multicast, "Multicast Frames");
3094 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3095 &stats->rx_broadcast, "Broadcast Frames");
3096 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3097 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3098 &stats->rx_octets, "Octets");
3099 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3100 &stats->rx_pause, "Pause frames");
3101 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3102 &stats->rx_drops, "Drop frames");
3103 }
3104
3105 /* Tx statistics. */
3106 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3107 NULL, "Tx MAC statistics");
3108 child = SYSCTL_CHILDREN(tree);
3109 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3110 &stats->tx_octets, "Octets");
3111 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3112 &stats->tx_zero_rexmits, "Zero Retransmits");
3113 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3114 &stats->tx_one_rexmits, "One Retransmits");
3115 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3116 &stats->tx_multi_rexmits, "Multiple Retransmits");
3117 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3118 &stats->tx_late_cols, "Late Collisions");
3119 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3120 &stats->tx_fifo_underuns, "FIFO Underruns");
3121 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3122 &stats->tx_carrier_losts, "Carrier Losts");
3123 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3124 &stats->tx_excess_deferals, "Excess Deferrals");
3125 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3126 &stats->tx_retry_errors, "Retry Errors");
3127 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3128 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3129 &stats->tx_deferals, "Deferrals");
3130 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3131 &stats->tx_frames, "Frames");
3132 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3133 &stats->tx_pause, "Pause Frames");
3134 }
3135 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3136 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3137 &stats->tx_deferals, "Unicast Frames");
3138 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3139 &stats->tx_frames, "Multicast Frames");
3140 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3141 &stats->tx_pause, "Broadcast Frames");
3142 }
3143}
3144
3145#undef NFE_SYSCTL_STAT_ADD32
3146#undef NFE_SYSCTL_STAT_ADD64
3147
3148static void
3149nfe_stats_clear(struct nfe_softc *sc)
3150{
3151 int i, mib_cnt;
3152
3153 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3154 mib_cnt = NFE_NUM_MIB_STATV1;
3155 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3156 mib_cnt = NFE_NUM_MIB_STATV2;
3157 else
3158 return;
3159
3160 for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3161 NFE_READ(sc, NFE_TX_OCTET + i);
3162
3163 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3164 NFE_READ(sc, NFE_TX_UNICAST);
3165 NFE_READ(sc, NFE_TX_MULTICAST);
3166 NFE_READ(sc, NFE_TX_BROADCAST);
3167 }
3168}
3169
3170static void
3171nfe_stats_update(struct nfe_softc *sc)
3172{
3173 struct nfe_hw_stats *stats;
3174
3175 NFE_LOCK_ASSERT(sc);
3176
3177 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3178 return;
3179
3180 stats = &sc->nfe_stats;
3181 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3182 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3183 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3184 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3185 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3186 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3187 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3188 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3189 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3190 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3191 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3192 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3193 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3194 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3195 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3196 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3197 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3198 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3199 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3200 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3201 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3202
3203 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3204 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3205 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3206 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3207 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3208 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3209 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3210 }
3211
3212 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3213 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3214 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3215 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3216 }
3217}
3218
3219
3220static void
3221nfe_set_linkspeed(struct nfe_softc *sc)
3222{
3223 struct mii_softc *miisc;
3224 struct mii_data *mii;
3225 int aneg, i, phyno;
3226
3227 NFE_LOCK_ASSERT(sc);
3228
3229 mii = device_get_softc(sc->nfe_miibus);
3230 mii_pollstat(mii);
3231 aneg = 0;
3232 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3233 (IFM_ACTIVE | IFM_AVALID)) {
3234 switch IFM_SUBTYPE(mii->mii_media_active) {
3235 case IFM_10_T:
3236 case IFM_100_TX:
3237 return;
3238 case IFM_1000_T:
3239 aneg++;
3240 break;
3241 default:
3242 break;
3243 }
3244 }
3245 phyno = 0;
3246 if (mii->mii_instance) {
3247 miisc = LIST_FIRST(&mii->mii_phys);
3248 phyno = miisc->mii_phy;
3249 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3250 mii_phy_reset(miisc);
3251 } else
3252 return;
3253 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3254 nfe_miibus_writereg(sc->nfe_dev, phyno,
3255 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3256 nfe_miibus_writereg(sc->nfe_dev, phyno,
3257 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3258 DELAY(1000);
3259 if (aneg != 0) {
3260 /*
3261 * Poll link state until nfe(4) get a 10/100Mbps link.
3262 */
3263 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3264 mii_pollstat(mii);
3265 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3266 == (IFM_ACTIVE | IFM_AVALID)) {
3267 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3268 case IFM_10_T:
3269 case IFM_100_TX:
3270 nfe_mac_config(sc, mii);
3271 return;
3272 default:
3273 break;
3274 }
3275 }
3276 NFE_UNLOCK(sc);
3277 pause("nfelnk", hz);
3278 NFE_LOCK(sc);
3279 }
3280 if (i == MII_ANEGTICKS_GIGE)
3281 device_printf(sc->nfe_dev,
3282 "establishing a link failed, WOL may not work!");
3283 }
3284 /*
3285 * No link, force MAC to have 100Mbps, full-duplex link.
3286 * This is the last resort and may/may not work.
3287 */
3288 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3289 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3290 nfe_mac_config(sc, mii);
3291}
3292
3293
3294static void
3295nfe_set_wol(struct nfe_softc *sc)
3296{
3297 struct ifnet *ifp;
3298 uint32_t wolctl;
3299 int pmc;
3300 uint16_t pmstat;
3301
3302 NFE_LOCK_ASSERT(sc);
3303
3304 if (pci_find_extcap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3305 return;
3306 ifp = sc->nfe_ifp;
3307 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3308 wolctl = NFE_WOL_MAGIC;
3309 else
3310 wolctl = 0;
3311 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3312 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3313 nfe_set_linkspeed(sc);
3314 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3315 NFE_WRITE(sc, NFE_PWR2_CTL,
3316 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3317 /* Enable RX. */
3318 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3319 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3320 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3321 NFE_RX_START);
3322 }
3323 /* Request PME if WOL is requested. */
3324 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3325 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3326 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3327 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3328 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3329}
920 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
921 NFE_TX_PAUSE_FRAME_ENABLE);
922 val |= NFE_MISC1_TX_PAUSE;
923 } else {
924 val &= ~NFE_MISC1_TX_PAUSE;
925 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
926 NFE_TX_PAUSE_FRAME_DISABLE);
927 }
928 NFE_WRITE(sc, NFE_MISC1, val);
929 }
930 } else {
931 /* disable rx/tx pause frames */
932 val = NFE_READ(sc, NFE_RXFILTER);
933 val &= ~NFE_PFF_RX_PAUSE;
934 NFE_WRITE(sc, NFE_RXFILTER, val);
935 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
936 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
937 NFE_TX_PAUSE_FRAME_DISABLE);
938 val = NFE_READ(sc, NFE_MISC1);
939 val &= ~NFE_MISC1_TX_PAUSE;
940 NFE_WRITE(sc, NFE_MISC1, val);
941 }
942 }
943}
944
945
946static int
947nfe_miibus_readreg(device_t dev, int phy, int reg)
948{
949 struct nfe_softc *sc = device_get_softc(dev);
950 uint32_t val;
951 int ntries;
952
953 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
954
955 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
956 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
957 DELAY(100);
958 }
959
960 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
961
962 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
963 DELAY(100);
964 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
965 break;
966 }
967 if (ntries == NFE_TIMEOUT) {
968 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
969 return 0;
970 }
971
972 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
973 DPRINTFN(sc, 2, "could not read PHY\n");
974 return 0;
975 }
976
977 val = NFE_READ(sc, NFE_PHY_DATA);
978 if (val != 0xffffffff && val != 0)
979 sc->mii_phyaddr = phy;
980
981 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
982
983 return (val);
984}
985
986
987static int
988nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
989{
990 struct nfe_softc *sc = device_get_softc(dev);
991 uint32_t ctl;
992 int ntries;
993
994 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
995
996 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
997 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
998 DELAY(100);
999 }
1000
1001 NFE_WRITE(sc, NFE_PHY_DATA, val);
1002 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1003 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1004
1005 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1006 DELAY(100);
1007 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1008 break;
1009 }
1010#ifdef NFE_DEBUG
1011 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1012 device_printf(sc->nfe_dev, "could not write to PHY\n");
1013#endif
1014 return (0);
1015}
1016
1017struct nfe_dmamap_arg {
1018 bus_addr_t nfe_busaddr;
1019};
1020
1021static int
1022nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1023{
1024 struct nfe_dmamap_arg ctx;
1025 struct nfe_rx_data *data;
1026 void *desc;
1027 int i, error, descsize;
1028
1029 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1030 desc = ring->desc64;
1031 descsize = sizeof (struct nfe_desc64);
1032 } else {
1033 desc = ring->desc32;
1034 descsize = sizeof (struct nfe_desc32);
1035 }
1036
1037 ring->cur = ring->next = 0;
1038
1039 error = bus_dma_tag_create(sc->nfe_parent_tag,
1040 NFE_RING_ALIGN, 0, /* alignment, boundary */
1041 BUS_SPACE_MAXADDR, /* lowaddr */
1042 BUS_SPACE_MAXADDR, /* highaddr */
1043 NULL, NULL, /* filter, filterarg */
1044 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1045 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1046 0, /* flags */
1047 NULL, NULL, /* lockfunc, lockarg */
1048 &ring->rx_desc_tag);
1049 if (error != 0) {
1050 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1051 goto fail;
1052 }
1053
1054 /* allocate memory to desc */
1055 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1056 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1057 if (error != 0) {
1058 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1059 goto fail;
1060 }
1061 if (sc->nfe_flags & NFE_40BIT_ADDR)
1062 ring->desc64 = desc;
1063 else
1064 ring->desc32 = desc;
1065
1066 /* map desc to device visible address space */
1067 ctx.nfe_busaddr = 0;
1068 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1069 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1070 if (error != 0) {
1071 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1072 goto fail;
1073 }
1074 ring->physaddr = ctx.nfe_busaddr;
1075
1076 error = bus_dma_tag_create(sc->nfe_parent_tag,
1077 1, 0, /* alignment, boundary */
1078 BUS_SPACE_MAXADDR, /* lowaddr */
1079 BUS_SPACE_MAXADDR, /* highaddr */
1080 NULL, NULL, /* filter, filterarg */
1081 MCLBYTES, 1, /* maxsize, nsegments */
1082 MCLBYTES, /* maxsegsize */
1083 0, /* flags */
1084 NULL, NULL, /* lockfunc, lockarg */
1085 &ring->rx_data_tag);
1086 if (error != 0) {
1087 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1088 goto fail;
1089 }
1090
1091 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1092 if (error != 0) {
1093 device_printf(sc->nfe_dev,
1094 "could not create Rx DMA spare map\n");
1095 goto fail;
1096 }
1097
1098 /*
1099 * Pre-allocate Rx buffers and populate Rx ring.
1100 */
1101 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1102 data = &sc->rxq.data[i];
1103 data->rx_data_map = NULL;
1104 data->m = NULL;
1105 error = bus_dmamap_create(ring->rx_data_tag, 0,
1106 &data->rx_data_map);
1107 if (error != 0) {
1108 device_printf(sc->nfe_dev,
1109 "could not create Rx DMA map\n");
1110 goto fail;
1111 }
1112 }
1113
1114fail:
1115 return (error);
1116}
1117
1118
1119static void
1120nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1121{
1122 struct nfe_dmamap_arg ctx;
1123 struct nfe_rx_data *data;
1124 void *desc;
1125 int i, error, descsize;
1126
1127 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1128 return;
1129 if (jumbo_disable != 0) {
1130 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1131 sc->nfe_jumbo_disable = 1;
1132 return;
1133 }
1134
1135 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1136 desc = ring->jdesc64;
1137 descsize = sizeof (struct nfe_desc64);
1138 } else {
1139 desc = ring->jdesc32;
1140 descsize = sizeof (struct nfe_desc32);
1141 }
1142
1143 ring->jcur = ring->jnext = 0;
1144
1145 /* Create DMA tag for jumbo Rx ring. */
1146 error = bus_dma_tag_create(sc->nfe_parent_tag,
1147 NFE_RING_ALIGN, 0, /* alignment, boundary */
1148 BUS_SPACE_MAXADDR, /* lowaddr */
1149 BUS_SPACE_MAXADDR, /* highaddr */
1150 NULL, NULL, /* filter, filterarg */
1151 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1152 1, /* nsegments */
1153 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1154 0, /* flags */
1155 NULL, NULL, /* lockfunc, lockarg */
1156 &ring->jrx_desc_tag);
1157 if (error != 0) {
1158 device_printf(sc->nfe_dev,
1159 "could not create jumbo ring DMA tag\n");
1160 goto fail;
1161 }
1162
1163 /* Create DMA tag for jumbo Rx buffers. */
1164 error = bus_dma_tag_create(sc->nfe_parent_tag,
1165 1, 0, /* alignment, boundary */
1166 BUS_SPACE_MAXADDR, /* lowaddr */
1167 BUS_SPACE_MAXADDR, /* highaddr */
1168 NULL, NULL, /* filter, filterarg */
1169 MJUM9BYTES, /* maxsize */
1170 1, /* nsegments */
1171 MJUM9BYTES, /* maxsegsize */
1172 0, /* flags */
1173 NULL, NULL, /* lockfunc, lockarg */
1174 &ring->jrx_data_tag);
1175 if (error != 0) {
1176 device_printf(sc->nfe_dev,
1177 "could not create jumbo Rx buffer DMA tag\n");
1178 goto fail;
1179 }
1180
1181 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1182 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1183 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1184 if (error != 0) {
1185 device_printf(sc->nfe_dev,
1186 "could not allocate DMA'able memory for jumbo Rx ring\n");
1187 goto fail;
1188 }
1189 if (sc->nfe_flags & NFE_40BIT_ADDR)
1190 ring->jdesc64 = desc;
1191 else
1192 ring->jdesc32 = desc;
1193
1194 ctx.nfe_busaddr = 0;
1195 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1196 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1197 if (error != 0) {
1198 device_printf(sc->nfe_dev,
1199 "could not load DMA'able memory for jumbo Rx ring\n");
1200 goto fail;
1201 }
1202 ring->jphysaddr = ctx.nfe_busaddr;
1203
1204 /* Create DMA maps for jumbo Rx buffers. */
1205 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1206 if (error != 0) {
1207 device_printf(sc->nfe_dev,
1208 "could not create jumbo Rx DMA spare map\n");
1209 goto fail;
1210 }
1211
1212 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1213 data = &sc->jrxq.jdata[i];
1214 data->rx_data_map = NULL;
1215 data->m = NULL;
1216 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1217 &data->rx_data_map);
1218 if (error != 0) {
1219 device_printf(sc->nfe_dev,
1220 "could not create jumbo Rx DMA map\n");
1221 goto fail;
1222 }
1223 }
1224
1225 return;
1226
1227fail:
1228 /*
1229 * Running without jumbo frame support is ok for most cases
1230 * so don't fail on creating dma tag/map for jumbo frame.
1231 */
1232 nfe_free_jrx_ring(sc, ring);
1233 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1234 "resource shortage\n");
1235 sc->nfe_jumbo_disable = 1;
1236}
1237
1238
1239static int
1240nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1241{
1242 void *desc;
1243 size_t descsize;
1244 int i;
1245
1246 ring->cur = ring->next = 0;
1247 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1248 desc = ring->desc64;
1249 descsize = sizeof (struct nfe_desc64);
1250 } else {
1251 desc = ring->desc32;
1252 descsize = sizeof (struct nfe_desc32);
1253 }
1254 bzero(desc, descsize * NFE_RX_RING_COUNT);
1255 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1256 if (nfe_newbuf(sc, i) != 0)
1257 return (ENOBUFS);
1258 }
1259
1260 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1261 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1262
1263 return (0);
1264}
1265
1266
1267static int
1268nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1269{
1270 void *desc;
1271 size_t descsize;
1272 int i;
1273
1274 ring->jcur = ring->jnext = 0;
1275 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1276 desc = ring->jdesc64;
1277 descsize = sizeof (struct nfe_desc64);
1278 } else {
1279 desc = ring->jdesc32;
1280 descsize = sizeof (struct nfe_desc32);
1281 }
1282 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1283 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1284 if (nfe_jnewbuf(sc, i) != 0)
1285 return (ENOBUFS);
1286 }
1287
1288 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1289 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1290
1291 return (0);
1292}
1293
1294
1295static void
1296nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1297{
1298 struct nfe_rx_data *data;
1299 void *desc;
1300 int i, descsize;
1301
1302 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1303 desc = ring->desc64;
1304 descsize = sizeof (struct nfe_desc64);
1305 } else {
1306 desc = ring->desc32;
1307 descsize = sizeof (struct nfe_desc32);
1308 }
1309
1310 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1311 data = &ring->data[i];
1312 if (data->rx_data_map != NULL) {
1313 bus_dmamap_destroy(ring->rx_data_tag,
1314 data->rx_data_map);
1315 data->rx_data_map = NULL;
1316 }
1317 if (data->m != NULL) {
1318 m_freem(data->m);
1319 data->m = NULL;
1320 }
1321 }
1322 if (ring->rx_data_tag != NULL) {
1323 if (ring->rx_spare_map != NULL) {
1324 bus_dmamap_destroy(ring->rx_data_tag,
1325 ring->rx_spare_map);
1326 ring->rx_spare_map = NULL;
1327 }
1328 bus_dma_tag_destroy(ring->rx_data_tag);
1329 ring->rx_data_tag = NULL;
1330 }
1331
1332 if (desc != NULL) {
1333 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1334 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1335 ring->desc64 = NULL;
1336 ring->desc32 = NULL;
1337 ring->rx_desc_map = NULL;
1338 }
1339 if (ring->rx_desc_tag != NULL) {
1340 bus_dma_tag_destroy(ring->rx_desc_tag);
1341 ring->rx_desc_tag = NULL;
1342 }
1343}
1344
1345
1346static void
1347nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1348{
1349 struct nfe_rx_data *data;
1350 void *desc;
1351 int i, descsize;
1352
1353 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1354 return;
1355
1356 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1357 desc = ring->jdesc64;
1358 descsize = sizeof (struct nfe_desc64);
1359 } else {
1360 desc = ring->jdesc32;
1361 descsize = sizeof (struct nfe_desc32);
1362 }
1363
1364 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1365 data = &ring->jdata[i];
1366 if (data->rx_data_map != NULL) {
1367 bus_dmamap_destroy(ring->jrx_data_tag,
1368 data->rx_data_map);
1369 data->rx_data_map = NULL;
1370 }
1371 if (data->m != NULL) {
1372 m_freem(data->m);
1373 data->m = NULL;
1374 }
1375 }
1376 if (ring->jrx_data_tag != NULL) {
1377 if (ring->jrx_spare_map != NULL) {
1378 bus_dmamap_destroy(ring->jrx_data_tag,
1379 ring->jrx_spare_map);
1380 ring->jrx_spare_map = NULL;
1381 }
1382 bus_dma_tag_destroy(ring->jrx_data_tag);
1383 ring->jrx_data_tag = NULL;
1384 }
1385
1386 if (desc != NULL) {
1387 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1388 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1389 ring->jdesc64 = NULL;
1390 ring->jdesc32 = NULL;
1391 ring->jrx_desc_map = NULL;
1392 }
1393
1394 if (ring->jrx_desc_tag != NULL) {
1395 bus_dma_tag_destroy(ring->jrx_desc_tag);
1396 ring->jrx_desc_tag = NULL;
1397 }
1398}
1399
1400
1401static int
1402nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1403{
1404 struct nfe_dmamap_arg ctx;
1405 int i, error;
1406 void *desc;
1407 int descsize;
1408
1409 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1410 desc = ring->desc64;
1411 descsize = sizeof (struct nfe_desc64);
1412 } else {
1413 desc = ring->desc32;
1414 descsize = sizeof (struct nfe_desc32);
1415 }
1416
1417 ring->queued = 0;
1418 ring->cur = ring->next = 0;
1419
1420 error = bus_dma_tag_create(sc->nfe_parent_tag,
1421 NFE_RING_ALIGN, 0, /* alignment, boundary */
1422 BUS_SPACE_MAXADDR, /* lowaddr */
1423 BUS_SPACE_MAXADDR, /* highaddr */
1424 NULL, NULL, /* filter, filterarg */
1425 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1426 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1427 0, /* flags */
1428 NULL, NULL, /* lockfunc, lockarg */
1429 &ring->tx_desc_tag);
1430 if (error != 0) {
1431 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1432 goto fail;
1433 }
1434
1435 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1436 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1437 if (error != 0) {
1438 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1439 goto fail;
1440 }
1441 if (sc->nfe_flags & NFE_40BIT_ADDR)
1442 ring->desc64 = desc;
1443 else
1444 ring->desc32 = desc;
1445
1446 ctx.nfe_busaddr = 0;
1447 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1448 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1449 if (error != 0) {
1450 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1451 goto fail;
1452 }
1453 ring->physaddr = ctx.nfe_busaddr;
1454
1455 error = bus_dma_tag_create(sc->nfe_parent_tag,
1456 1, 0,
1457 BUS_SPACE_MAXADDR,
1458 BUS_SPACE_MAXADDR,
1459 NULL, NULL,
1460 NFE_TSO_MAXSIZE,
1461 NFE_MAX_SCATTER,
1462 NFE_TSO_MAXSGSIZE,
1463 0,
1464 NULL, NULL,
1465 &ring->tx_data_tag);
1466 if (error != 0) {
1467 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1468 goto fail;
1469 }
1470
1471 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1472 error = bus_dmamap_create(ring->tx_data_tag, 0,
1473 &ring->data[i].tx_data_map);
1474 if (error != 0) {
1475 device_printf(sc->nfe_dev,
1476 "could not create Tx DMA map\n");
1477 goto fail;
1478 }
1479 }
1480
1481fail:
1482 return (error);
1483}
1484
1485
1486static void
1487nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1488{
1489 void *desc;
1490 size_t descsize;
1491
1492 sc->nfe_force_tx = 0;
1493 ring->queued = 0;
1494 ring->cur = ring->next = 0;
1495 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1496 desc = ring->desc64;
1497 descsize = sizeof (struct nfe_desc64);
1498 } else {
1499 desc = ring->desc32;
1500 descsize = sizeof (struct nfe_desc32);
1501 }
1502 bzero(desc, descsize * NFE_TX_RING_COUNT);
1503
1504 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1505 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1506}
1507
1508
1509static void
1510nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1511{
1512 struct nfe_tx_data *data;
1513 void *desc;
1514 int i, descsize;
1515
1516 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1517 desc = ring->desc64;
1518 descsize = sizeof (struct nfe_desc64);
1519 } else {
1520 desc = ring->desc32;
1521 descsize = sizeof (struct nfe_desc32);
1522 }
1523
1524 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1525 data = &ring->data[i];
1526
1527 if (data->m != NULL) {
1528 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1529 BUS_DMASYNC_POSTWRITE);
1530 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1531 m_freem(data->m);
1532 data->m = NULL;
1533 }
1534 if (data->tx_data_map != NULL) {
1535 bus_dmamap_destroy(ring->tx_data_tag,
1536 data->tx_data_map);
1537 data->tx_data_map = NULL;
1538 }
1539 }
1540
1541 if (ring->tx_data_tag != NULL) {
1542 bus_dma_tag_destroy(ring->tx_data_tag);
1543 ring->tx_data_tag = NULL;
1544 }
1545
1546 if (desc != NULL) {
1547 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1548 BUS_DMASYNC_POSTWRITE);
1549 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1550 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1551 ring->desc64 = NULL;
1552 ring->desc32 = NULL;
1553 ring->tx_desc_map = NULL;
1554 bus_dma_tag_destroy(ring->tx_desc_tag);
1555 ring->tx_desc_tag = NULL;
1556 }
1557}
1558
1559#ifdef DEVICE_POLLING
1560static poll_handler_t nfe_poll;
1561
1562
1563static int
1564nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1565{
1566 struct nfe_softc *sc = ifp->if_softc;
1567 uint32_t r;
1568 int rx_npkts = 0;
1569
1570 NFE_LOCK(sc);
1571
1572 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1573 NFE_UNLOCK(sc);
1574 return (rx_npkts);
1575 }
1576
1577 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1578 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1579 else
1580 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1581 nfe_txeof(sc);
1582 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1583 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1584
1585 if (cmd == POLL_AND_CHECK_STATUS) {
1586 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1587 NFE_UNLOCK(sc);
1588 return (rx_npkts);
1589 }
1590 NFE_WRITE(sc, sc->nfe_irq_status, r);
1591
1592 if (r & NFE_IRQ_LINK) {
1593 NFE_READ(sc, NFE_PHY_STATUS);
1594 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1595 DPRINTF(sc, "link state changed\n");
1596 }
1597 }
1598 NFE_UNLOCK(sc);
1599 return (rx_npkts);
1600}
1601#endif /* DEVICE_POLLING */
1602
1603static void
1604nfe_set_intr(struct nfe_softc *sc)
1605{
1606
1607 if (sc->nfe_msi != 0)
1608 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1609}
1610
1611
1612/* In MSIX, a write to mask reegisters behaves as XOR. */
1613static __inline void
1614nfe_enable_intr(struct nfe_softc *sc)
1615{
1616
1617 if (sc->nfe_msix != 0) {
1618 /* XXX Should have a better way to enable interrupts! */
1619 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1620 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1621 } else
1622 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1623}
1624
1625
1626static __inline void
1627nfe_disable_intr(struct nfe_softc *sc)
1628{
1629
1630 if (sc->nfe_msix != 0) {
1631 /* XXX Should have a better way to disable interrupts! */
1632 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1633 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1634 } else
1635 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1636}
1637
1638
1639static int
1640nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1641{
1642 struct nfe_softc *sc;
1643 struct ifreq *ifr;
1644 struct mii_data *mii;
1645 int error, init, mask;
1646
1647 sc = ifp->if_softc;
1648 ifr = (struct ifreq *) data;
1649 error = 0;
1650 init = 0;
1651 switch (cmd) {
1652 case SIOCSIFMTU:
1653 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1654 error = EINVAL;
1655 else if (ifp->if_mtu != ifr->ifr_mtu) {
1656 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1657 (sc->nfe_jumbo_disable != 0)) &&
1658 ifr->ifr_mtu > ETHERMTU)
1659 error = EINVAL;
1660 else {
1661 NFE_LOCK(sc);
1662 ifp->if_mtu = ifr->ifr_mtu;
1663 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1664 nfe_init_locked(sc);
1665 NFE_UNLOCK(sc);
1666 }
1667 }
1668 break;
1669 case SIOCSIFFLAGS:
1670 NFE_LOCK(sc);
1671 if (ifp->if_flags & IFF_UP) {
1672 /*
1673 * If only the PROMISC or ALLMULTI flag changes, then
1674 * don't do a full re-init of the chip, just update
1675 * the Rx filter.
1676 */
1677 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1678 ((ifp->if_flags ^ sc->nfe_if_flags) &
1679 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1680 nfe_setmulti(sc);
1681 else
1682 nfe_init_locked(sc);
1683 } else {
1684 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1685 nfe_stop(ifp);
1686 }
1687 sc->nfe_if_flags = ifp->if_flags;
1688 NFE_UNLOCK(sc);
1689 error = 0;
1690 break;
1691 case SIOCADDMULTI:
1692 case SIOCDELMULTI:
1693 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1694 NFE_LOCK(sc);
1695 nfe_setmulti(sc);
1696 NFE_UNLOCK(sc);
1697 error = 0;
1698 }
1699 break;
1700 case SIOCSIFMEDIA:
1701 case SIOCGIFMEDIA:
1702 mii = device_get_softc(sc->nfe_miibus);
1703 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1704 break;
1705 case SIOCSIFCAP:
1706 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1707#ifdef DEVICE_POLLING
1708 if ((mask & IFCAP_POLLING) != 0) {
1709 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1710 error = ether_poll_register(nfe_poll, ifp);
1711 if (error)
1712 break;
1713 NFE_LOCK(sc);
1714 nfe_disable_intr(sc);
1715 ifp->if_capenable |= IFCAP_POLLING;
1716 NFE_UNLOCK(sc);
1717 } else {
1718 error = ether_poll_deregister(ifp);
1719 /* Enable interrupt even in error case */
1720 NFE_LOCK(sc);
1721 nfe_enable_intr(sc);
1722 ifp->if_capenable &= ~IFCAP_POLLING;
1723 NFE_UNLOCK(sc);
1724 }
1725 }
1726#endif /* DEVICE_POLLING */
1727 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1728 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1729 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1730
1731 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1732 (mask & IFCAP_HWCSUM) != 0) {
1733 ifp->if_capenable ^= IFCAP_HWCSUM;
1734 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1735 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1736 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1737 else
1738 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1739 init++;
1740 }
1741 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1742 (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1743 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1744 init++;
1745 }
1746 /*
1747 * XXX
1748 * It seems that VLAN stripping requires Rx checksum offload.
1749 * Unfortunately FreeBSD has no way to disable only Rx side
1750 * VLAN stripping. So when we know Rx checksum offload is
1751 * disabled turn entire hardware VLAN assist off.
1752 */
1753 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1754 (NFE_HW_CSUM | NFE_HW_VLAN)) {
1755 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1756 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1757 }
1758
1759 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1760 (mask & IFCAP_TSO4) != 0) {
1761 ifp->if_capenable ^= IFCAP_TSO4;
1762 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1763 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1764 ifp->if_hwassist |= CSUM_TSO;
1765 else
1766 ifp->if_hwassist &= ~CSUM_TSO;
1767 }
1768
1769 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1770 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1771 nfe_init(sc);
1772 }
1773 if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1774 VLAN_CAPABILITIES(ifp);
1775 break;
1776 default:
1777 error = ether_ioctl(ifp, cmd, data);
1778 break;
1779 }
1780
1781 return (error);
1782}
1783
1784
1785static int
1786nfe_intr(void *arg)
1787{
1788 struct nfe_softc *sc;
1789 uint32_t status;
1790
1791 sc = (struct nfe_softc *)arg;
1792
1793 status = NFE_READ(sc, sc->nfe_irq_status);
1794 if (status == 0 || status == 0xffffffff)
1795 return (FILTER_STRAY);
1796 nfe_disable_intr(sc);
1797 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1798
1799 return (FILTER_HANDLED);
1800}
1801
1802
1803static void
1804nfe_int_task(void *arg, int pending)
1805{
1806 struct nfe_softc *sc = arg;
1807 struct ifnet *ifp = sc->nfe_ifp;
1808 uint32_t r;
1809 int domore;
1810
1811 NFE_LOCK(sc);
1812
1813 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1814 nfe_enable_intr(sc);
1815 NFE_UNLOCK(sc);
1816 return; /* not for us */
1817 }
1818 NFE_WRITE(sc, sc->nfe_irq_status, r);
1819
1820 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1821
1822#ifdef DEVICE_POLLING
1823 if (ifp->if_capenable & IFCAP_POLLING) {
1824 NFE_UNLOCK(sc);
1825 return;
1826 }
1827#endif
1828
1829 if (r & NFE_IRQ_LINK) {
1830 NFE_READ(sc, NFE_PHY_STATUS);
1831 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1832 DPRINTF(sc, "link state changed\n");
1833 }
1834
1835 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1836 NFE_UNLOCK(sc);
1837 nfe_enable_intr(sc);
1838 return;
1839 }
1840
1841 domore = 0;
1842 /* check Rx ring */
1843 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1844 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1845 else
1846 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1847 /* check Tx ring */
1848 nfe_txeof(sc);
1849
1850 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1851 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1852
1853 NFE_UNLOCK(sc);
1854
1855 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1856 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1857 return;
1858 }
1859
1860 /* Reenable interrupts. */
1861 nfe_enable_intr(sc);
1862}
1863
1864
1865static __inline void
1866nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1867{
1868 struct nfe_desc32 *desc32;
1869 struct nfe_desc64 *desc64;
1870 struct nfe_rx_data *data;
1871 struct mbuf *m;
1872
1873 data = &sc->rxq.data[idx];
1874 m = data->m;
1875
1876 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1877 desc64 = &sc->rxq.desc64[idx];
1878 /* VLAN packet may have overwritten it. */
1879 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1880 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1881 desc64->length = htole16(m->m_len);
1882 desc64->flags = htole16(NFE_RX_READY);
1883 } else {
1884 desc32 = &sc->rxq.desc32[idx];
1885 desc32->length = htole16(m->m_len);
1886 desc32->flags = htole16(NFE_RX_READY);
1887 }
1888}
1889
1890
1891static __inline void
1892nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1893{
1894 struct nfe_desc32 *desc32;
1895 struct nfe_desc64 *desc64;
1896 struct nfe_rx_data *data;
1897 struct mbuf *m;
1898
1899 data = &sc->jrxq.jdata[idx];
1900 m = data->m;
1901
1902 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1903 desc64 = &sc->jrxq.jdesc64[idx];
1904 /* VLAN packet may have overwritten it. */
1905 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1906 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1907 desc64->length = htole16(m->m_len);
1908 desc64->flags = htole16(NFE_RX_READY);
1909 } else {
1910 desc32 = &sc->jrxq.jdesc32[idx];
1911 desc32->length = htole16(m->m_len);
1912 desc32->flags = htole16(NFE_RX_READY);
1913 }
1914}
1915
1916
1917static int
1918nfe_newbuf(struct nfe_softc *sc, int idx)
1919{
1920 struct nfe_rx_data *data;
1921 struct nfe_desc32 *desc32;
1922 struct nfe_desc64 *desc64;
1923 struct mbuf *m;
1924 bus_dma_segment_t segs[1];
1925 bus_dmamap_t map;
1926 int nsegs;
1927
1928 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1929 if (m == NULL)
1930 return (ENOBUFS);
1931
1932 m->m_len = m->m_pkthdr.len = MCLBYTES;
1933 m_adj(m, ETHER_ALIGN);
1934
1935 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1936 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1937 m_freem(m);
1938 return (ENOBUFS);
1939 }
1940 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1941
1942 data = &sc->rxq.data[idx];
1943 if (data->m != NULL) {
1944 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1945 BUS_DMASYNC_POSTREAD);
1946 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1947 }
1948 map = data->rx_data_map;
1949 data->rx_data_map = sc->rxq.rx_spare_map;
1950 sc->rxq.rx_spare_map = map;
1951 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1952 BUS_DMASYNC_PREREAD);
1953 data->paddr = segs[0].ds_addr;
1954 data->m = m;
1955 /* update mapping address in h/w descriptor */
1956 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1957 desc64 = &sc->rxq.desc64[idx];
1958 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
1959 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1960 desc64->length = htole16(segs[0].ds_len);
1961 desc64->flags = htole16(NFE_RX_READY);
1962 } else {
1963 desc32 = &sc->rxq.desc32[idx];
1964 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1965 desc32->length = htole16(segs[0].ds_len);
1966 desc32->flags = htole16(NFE_RX_READY);
1967 }
1968
1969 return (0);
1970}
1971
1972
1973static int
1974nfe_jnewbuf(struct nfe_softc *sc, int idx)
1975{
1976 struct nfe_rx_data *data;
1977 struct nfe_desc32 *desc32;
1978 struct nfe_desc64 *desc64;
1979 struct mbuf *m;
1980 bus_dma_segment_t segs[1];
1981 bus_dmamap_t map;
1982 int nsegs;
1983
1984 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1985 if (m == NULL)
1986 return (ENOBUFS);
1987 if ((m->m_flags & M_EXT) == 0) {
1988 m_freem(m);
1989 return (ENOBUFS);
1990 }
1991 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1992 m_adj(m, ETHER_ALIGN);
1993
1994 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
1995 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1996 m_freem(m);
1997 return (ENOBUFS);
1998 }
1999 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2000
2001 data = &sc->jrxq.jdata[idx];
2002 if (data->m != NULL) {
2003 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2004 BUS_DMASYNC_POSTREAD);
2005 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2006 }
2007 map = data->rx_data_map;
2008 data->rx_data_map = sc->jrxq.jrx_spare_map;
2009 sc->jrxq.jrx_spare_map = map;
2010 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2011 BUS_DMASYNC_PREREAD);
2012 data->paddr = segs[0].ds_addr;
2013 data->m = m;
2014 /* update mapping address in h/w descriptor */
2015 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2016 desc64 = &sc->jrxq.jdesc64[idx];
2017 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2018 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2019 desc64->length = htole16(segs[0].ds_len);
2020 desc64->flags = htole16(NFE_RX_READY);
2021 } else {
2022 desc32 = &sc->jrxq.jdesc32[idx];
2023 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2024 desc32->length = htole16(segs[0].ds_len);
2025 desc32->flags = htole16(NFE_RX_READY);
2026 }
2027
2028 return (0);
2029}
2030
2031
2032static int
2033nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2034{
2035 struct ifnet *ifp = sc->nfe_ifp;
2036 struct nfe_desc32 *desc32;
2037 struct nfe_desc64 *desc64;
2038 struct nfe_rx_data *data;
2039 struct mbuf *m;
2040 uint16_t flags;
2041 int len, prog, rx_npkts;
2042 uint32_t vtag = 0;
2043
2044 rx_npkts = 0;
2045 NFE_LOCK_ASSERT(sc);
2046
2047 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2048 BUS_DMASYNC_POSTREAD);
2049
2050 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2051 if (count <= 0)
2052 break;
2053 count--;
2054
2055 data = &sc->rxq.data[sc->rxq.cur];
2056
2057 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2058 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2059 vtag = le32toh(desc64->physaddr[1]);
2060 flags = le16toh(desc64->flags);
2061 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2062 } else {
2063 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2064 flags = le16toh(desc32->flags);
2065 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2066 }
2067
2068 if (flags & NFE_RX_READY)
2069 break;
2070 prog++;
2071 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2072 if (!(flags & NFE_RX_VALID_V1)) {
2073 ifp->if_ierrors++;
2074 nfe_discard_rxbuf(sc, sc->rxq.cur);
2075 continue;
2076 }
2077 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2078 flags &= ~NFE_RX_ERROR;
2079 len--; /* fix buffer length */
2080 }
2081 } else {
2082 if (!(flags & NFE_RX_VALID_V2)) {
2083 ifp->if_ierrors++;
2084 nfe_discard_rxbuf(sc, sc->rxq.cur);
2085 continue;
2086 }
2087
2088 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2089 flags &= ~NFE_RX_ERROR;
2090 len--; /* fix buffer length */
2091 }
2092 }
2093
2094 if (flags & NFE_RX_ERROR) {
2095 ifp->if_ierrors++;
2096 nfe_discard_rxbuf(sc, sc->rxq.cur);
2097 continue;
2098 }
2099
2100 m = data->m;
2101 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2102 ifp->if_iqdrops++;
2103 nfe_discard_rxbuf(sc, sc->rxq.cur);
2104 continue;
2105 }
2106
2107 if ((vtag & NFE_RX_VTAG) != 0 &&
2108 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2109 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2110 m->m_flags |= M_VLANTAG;
2111 }
2112
2113 m->m_pkthdr.len = m->m_len = len;
2114 m->m_pkthdr.rcvif = ifp;
2115
2116 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2117 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2118 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2119 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2120 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2121 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2122 m->m_pkthdr.csum_flags |=
2123 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2124 m->m_pkthdr.csum_data = 0xffff;
2125 }
2126 }
2127 }
2128
2129 ifp->if_ipackets++;
2130
2131 NFE_UNLOCK(sc);
2132 (*ifp->if_input)(ifp, m);
2133 NFE_LOCK(sc);
2134 rx_npkts++;
2135 }
2136
2137 if (prog > 0)
2138 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2139 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2140
2141 if (rx_npktsp != NULL)
2142 *rx_npktsp = rx_npkts;
2143 return (count > 0 ? 0 : EAGAIN);
2144}
2145
2146
2147static int
2148nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2149{
2150 struct ifnet *ifp = sc->nfe_ifp;
2151 struct nfe_desc32 *desc32;
2152 struct nfe_desc64 *desc64;
2153 struct nfe_rx_data *data;
2154 struct mbuf *m;
2155 uint16_t flags;
2156 int len, prog, rx_npkts;
2157 uint32_t vtag = 0;
2158
2159 rx_npkts = 0;
2160 NFE_LOCK_ASSERT(sc);
2161
2162 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2163 BUS_DMASYNC_POSTREAD);
2164
2165 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2166 vtag = 0) {
2167 if (count <= 0)
2168 break;
2169 count--;
2170
2171 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2172
2173 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2174 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2175 vtag = le32toh(desc64->physaddr[1]);
2176 flags = le16toh(desc64->flags);
2177 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2178 } else {
2179 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2180 flags = le16toh(desc32->flags);
2181 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2182 }
2183
2184 if (flags & NFE_RX_READY)
2185 break;
2186 prog++;
2187 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2188 if (!(flags & NFE_RX_VALID_V1)) {
2189 ifp->if_ierrors++;
2190 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2191 continue;
2192 }
2193 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2194 flags &= ~NFE_RX_ERROR;
2195 len--; /* fix buffer length */
2196 }
2197 } else {
2198 if (!(flags & NFE_RX_VALID_V2)) {
2199 ifp->if_ierrors++;
2200 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2201 continue;
2202 }
2203
2204 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2205 flags &= ~NFE_RX_ERROR;
2206 len--; /* fix buffer length */
2207 }
2208 }
2209
2210 if (flags & NFE_RX_ERROR) {
2211 ifp->if_ierrors++;
2212 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2213 continue;
2214 }
2215
2216 m = data->m;
2217 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2218 ifp->if_iqdrops++;
2219 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2220 continue;
2221 }
2222
2223 if ((vtag & NFE_RX_VTAG) != 0 &&
2224 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2225 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2226 m->m_flags |= M_VLANTAG;
2227 }
2228
2229 m->m_pkthdr.len = m->m_len = len;
2230 m->m_pkthdr.rcvif = ifp;
2231
2232 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2233 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2234 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2235 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2236 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2237 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2238 m->m_pkthdr.csum_flags |=
2239 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2240 m->m_pkthdr.csum_data = 0xffff;
2241 }
2242 }
2243 }
2244
2245 ifp->if_ipackets++;
2246
2247 NFE_UNLOCK(sc);
2248 (*ifp->if_input)(ifp, m);
2249 NFE_LOCK(sc);
2250 rx_npkts++;
2251 }
2252
2253 if (prog > 0)
2254 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2255 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2256
2257 if (rx_npktsp != NULL)
2258 *rx_npktsp = rx_npkts;
2259 return (count > 0 ? 0 : EAGAIN);
2260}
2261
2262
2263static void
2264nfe_txeof(struct nfe_softc *sc)
2265{
2266 struct ifnet *ifp = sc->nfe_ifp;
2267 struct nfe_desc32 *desc32;
2268 struct nfe_desc64 *desc64;
2269 struct nfe_tx_data *data = NULL;
2270 uint16_t flags;
2271 int cons, prog;
2272
2273 NFE_LOCK_ASSERT(sc);
2274
2275 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2276 BUS_DMASYNC_POSTREAD);
2277
2278 prog = 0;
2279 for (cons = sc->txq.next; cons != sc->txq.cur;
2280 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2281 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2282 desc64 = &sc->txq.desc64[cons];
2283 flags = le16toh(desc64->flags);
2284 } else {
2285 desc32 = &sc->txq.desc32[cons];
2286 flags = le16toh(desc32->flags);
2287 }
2288
2289 if (flags & NFE_TX_VALID)
2290 break;
2291
2292 prog++;
2293 sc->txq.queued--;
2294 data = &sc->txq.data[cons];
2295
2296 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2297 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2298 continue;
2299 if ((flags & NFE_TX_ERROR_V1) != 0) {
2300 device_printf(sc->nfe_dev,
2301 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2302
2303 ifp->if_oerrors++;
2304 } else
2305 ifp->if_opackets++;
2306 } else {
2307 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2308 continue;
2309 if ((flags & NFE_TX_ERROR_V2) != 0) {
2310 device_printf(sc->nfe_dev,
2311 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2312 ifp->if_oerrors++;
2313 } else
2314 ifp->if_opackets++;
2315 }
2316
2317 /* last fragment of the mbuf chain transmitted */
2318 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2319 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2320 BUS_DMASYNC_POSTWRITE);
2321 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2322 m_freem(data->m);
2323 data->m = NULL;
2324 }
2325
2326 if (prog > 0) {
2327 sc->nfe_force_tx = 0;
2328 sc->txq.next = cons;
2329 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2330 if (sc->txq.queued == 0)
2331 sc->nfe_watchdog_timer = 0;
2332 }
2333}
2334
2335static int
2336nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2337{
2338 struct nfe_desc32 *desc32 = NULL;
2339 struct nfe_desc64 *desc64 = NULL;
2340 bus_dmamap_t map;
2341 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2342 int error, i, nsegs, prod, si;
2343 uint32_t tso_segsz;
2344 uint16_t cflags, flags;
2345 struct mbuf *m;
2346
2347 prod = si = sc->txq.cur;
2348 map = sc->txq.data[prod].tx_data_map;
2349
2350 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2351 &nsegs, BUS_DMA_NOWAIT);
2352 if (error == EFBIG) {
2353 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2354 if (m == NULL) {
2355 m_freem(*m_head);
2356 *m_head = NULL;
2357 return (ENOBUFS);
2358 }
2359 *m_head = m;
2360 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2361 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2362 if (error != 0) {
2363 m_freem(*m_head);
2364 *m_head = NULL;
2365 return (ENOBUFS);
2366 }
2367 } else if (error != 0)
2368 return (error);
2369 if (nsegs == 0) {
2370 m_freem(*m_head);
2371 *m_head = NULL;
2372 return (EIO);
2373 }
2374
2375 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2376 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2377 return (ENOBUFS);
2378 }
2379
2380 m = *m_head;
2381 cflags = flags = 0;
2382 tso_segsz = 0;
2383 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2384 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2385 NFE_TX_TSO_SHIFT;
2386 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2387 cflags |= NFE_TX_TSO;
2388 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2389 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2390 cflags |= NFE_TX_IP_CSUM;
2391 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2392 cflags |= NFE_TX_TCP_UDP_CSUM;
2393 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2394 cflags |= NFE_TX_TCP_UDP_CSUM;
2395 }
2396
2397 for (i = 0; i < nsegs; i++) {
2398 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2399 desc64 = &sc->txq.desc64[prod];
2400 desc64->physaddr[0] =
2401 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2402 desc64->physaddr[1] =
2403 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2404 desc64->vtag = 0;
2405 desc64->length = htole16(segs[i].ds_len - 1);
2406 desc64->flags = htole16(flags);
2407 } else {
2408 desc32 = &sc->txq.desc32[prod];
2409 desc32->physaddr =
2410 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2411 desc32->length = htole16(segs[i].ds_len - 1);
2412 desc32->flags = htole16(flags);
2413 }
2414
2415 /*
2416 * Setting of the valid bit in the first descriptor is
2417 * deferred until the whole chain is fully setup.
2418 */
2419 flags |= NFE_TX_VALID;
2420
2421 sc->txq.queued++;
2422 NFE_INC(prod, NFE_TX_RING_COUNT);
2423 }
2424
2425 /*
2426 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2427 * csum flags, vtag and TSO belong to the first fragment only.
2428 */
2429 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2430 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2431 desc64 = &sc->txq.desc64[si];
2432 if ((m->m_flags & M_VLANTAG) != 0)
2433 desc64->vtag = htole32(NFE_TX_VTAG |
2434 m->m_pkthdr.ether_vtag);
2435 if (tso_segsz != 0) {
2436 /*
2437 * XXX
2438 * The following indicates the descriptor element
2439 * is a 32bit quantity.
2440 */
2441 desc64->length |= htole16((uint16_t)tso_segsz);
2442 desc64->flags |= htole16(tso_segsz >> 16);
2443 }
2444 /*
2445 * finally, set the valid/checksum/TSO bit in the first
2446 * descriptor.
2447 */
2448 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2449 } else {
2450 if (sc->nfe_flags & NFE_JUMBO_SUP)
2451 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2452 else
2453 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2454 desc32 = &sc->txq.desc32[si];
2455 if (tso_segsz != 0) {
2456 /*
2457 * XXX
2458 * The following indicates the descriptor element
2459 * is a 32bit quantity.
2460 */
2461 desc32->length |= htole16((uint16_t)tso_segsz);
2462 desc32->flags |= htole16(tso_segsz >> 16);
2463 }
2464 /*
2465 * finally, set the valid/checksum/TSO bit in the first
2466 * descriptor.
2467 */
2468 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2469 }
2470
2471 sc->txq.cur = prod;
2472 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2473 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2474 sc->txq.data[prod].tx_data_map = map;
2475 sc->txq.data[prod].m = m;
2476
2477 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2478
2479 return (0);
2480}
2481
2482
2483static void
2484nfe_setmulti(struct nfe_softc *sc)
2485{
2486 struct ifnet *ifp = sc->nfe_ifp;
2487 struct ifmultiaddr *ifma;
2488 int i;
2489 uint32_t filter;
2490 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2491 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2492 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2493 };
2494
2495 NFE_LOCK_ASSERT(sc);
2496
2497 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2498 bzero(addr, ETHER_ADDR_LEN);
2499 bzero(mask, ETHER_ADDR_LEN);
2500 goto done;
2501 }
2502
2503 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2504 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2505
2506 if_maddr_rlock(ifp);
2507 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2508 u_char *addrp;
2509
2510 if (ifma->ifma_addr->sa_family != AF_LINK)
2511 continue;
2512
2513 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2514 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2515 u_int8_t mcaddr = addrp[i];
2516 addr[i] &= mcaddr;
2517 mask[i] &= ~mcaddr;
2518 }
2519 }
2520 if_maddr_runlock(ifp);
2521
2522 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2523 mask[i] |= addr[i];
2524 }
2525
2526done:
2527 addr[0] |= 0x01; /* make sure multicast bit is set */
2528
2529 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2530 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2531 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2532 addr[5] << 8 | addr[4]);
2533 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2534 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2535 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2536 mask[5] << 8 | mask[4]);
2537
2538 filter = NFE_READ(sc, NFE_RXFILTER);
2539 filter &= NFE_PFF_RX_PAUSE;
2540 filter |= NFE_RXFILTER_MAGIC;
2541 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2542 NFE_WRITE(sc, NFE_RXFILTER, filter);
2543}
2544
2545
2546static void
2547nfe_tx_task(void *arg, int pending)
2548{
2549 struct ifnet *ifp;
2550
2551 ifp = (struct ifnet *)arg;
2552 nfe_start(ifp);
2553}
2554
2555
2556static void
2557nfe_start(struct ifnet *ifp)
2558{
2559 struct nfe_softc *sc = ifp->if_softc;
2560 struct mbuf *m0;
2561 int enq;
2562
2563 NFE_LOCK(sc);
2564
2565 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2566 IFF_DRV_RUNNING || sc->nfe_link == 0) {
2567 NFE_UNLOCK(sc);
2568 return;
2569 }
2570
2571 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2572 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2573 if (m0 == NULL)
2574 break;
2575
2576 if (nfe_encap(sc, &m0) != 0) {
2577 if (m0 == NULL)
2578 break;
2579 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2580 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2581 break;
2582 }
2583 enq++;
2584 ETHER_BPF_MTAP(ifp, m0);
2585 }
2586
2587 if (enq > 0) {
2588 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2589 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2590
2591 /* kick Tx */
2592 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2593
2594 /*
2595 * Set a timeout in case the chip goes out to lunch.
2596 */
2597 sc->nfe_watchdog_timer = 5;
2598 }
2599
2600 NFE_UNLOCK(sc);
2601}
2602
2603
2604static void
2605nfe_watchdog(struct ifnet *ifp)
2606{
2607 struct nfe_softc *sc = ifp->if_softc;
2608
2609 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2610 return;
2611
2612 /* Check if we've lost Tx completion interrupt. */
2613 nfe_txeof(sc);
2614 if (sc->txq.queued == 0) {
2615 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2616 "-- recovering\n");
2617 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2618 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2619 return;
2620 }
2621 /* Check if we've lost start Tx command. */
2622 sc->nfe_force_tx++;
2623 if (sc->nfe_force_tx <= 3) {
2624 /*
2625 * If this is the case for watchdog timeout, the following
2626 * code should go to nfe_txeof().
2627 */
2628 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2629 return;
2630 }
2631 sc->nfe_force_tx = 0;
2632
2633 if_printf(ifp, "watchdog timeout\n");
2634
2635 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2636 ifp->if_oerrors++;
2637 nfe_init_locked(sc);
2638}
2639
2640
2641static void
2642nfe_init(void *xsc)
2643{
2644 struct nfe_softc *sc = xsc;
2645
2646 NFE_LOCK(sc);
2647 nfe_init_locked(sc);
2648 NFE_UNLOCK(sc);
2649}
2650
2651
2652static void
2653nfe_init_locked(void *xsc)
2654{
2655 struct nfe_softc *sc = xsc;
2656 struct ifnet *ifp = sc->nfe_ifp;
2657 struct mii_data *mii;
2658 uint32_t val;
2659 int error;
2660
2661 NFE_LOCK_ASSERT(sc);
2662
2663 mii = device_get_softc(sc->nfe_miibus);
2664
2665 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2666 return;
2667
2668 nfe_stop(ifp);
2669
2670 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2671
2672 nfe_init_tx_ring(sc, &sc->txq);
2673 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2674 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2675 else
2676 error = nfe_init_rx_ring(sc, &sc->rxq);
2677 if (error != 0) {
2678 device_printf(sc->nfe_dev,
2679 "initialization failed: no memory for rx buffers\n");
2680 nfe_stop(ifp);
2681 return;
2682 }
2683
2684 val = 0;
2685 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2686 val |= NFE_MAC_ADDR_INORDER;
2687 NFE_WRITE(sc, NFE_TX_UNK, val);
2688 NFE_WRITE(sc, NFE_STATUS, 0);
2689
2690 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2691 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2692
2693 sc->rxtxctl = NFE_RXTX_BIT2;
2694 if (sc->nfe_flags & NFE_40BIT_ADDR)
2695 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2696 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2697 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2698
2699 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2700 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2701 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2702 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2703
2704 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2705 DELAY(10);
2706 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2707
2708 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2709 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2710 else
2711 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2712
2713 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2714
2715 /* set MAC address */
2716 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2717
2718 /* tell MAC where rings are in memory */
2719 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2720 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2721 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2722 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2723 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2724 } else {
2725 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2726 NFE_ADDR_HI(sc->rxq.physaddr));
2727 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2728 NFE_ADDR_LO(sc->rxq.physaddr));
2729 }
2730 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2731 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2732
2733 NFE_WRITE(sc, NFE_RING_SIZE,
2734 (NFE_RX_RING_COUNT - 1) << 16 |
2735 (NFE_TX_RING_COUNT - 1));
2736
2737 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2738
2739 /* force MAC to wakeup */
2740 val = NFE_READ(sc, NFE_PWR_STATE);
2741 if ((val & NFE_PWR_WAKEUP) == 0)
2742 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2743 DELAY(10);
2744 val = NFE_READ(sc, NFE_PWR_STATE);
2745 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2746
2747#if 1
2748 /* configure interrupts coalescing/mitigation */
2749 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2750#else
2751 /* no interrupt mitigation: one interrupt per packet */
2752 NFE_WRITE(sc, NFE_IMTIMER, 970);
2753#endif
2754
2755 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2756 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2757 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2758
2759 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2760 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2761
2762 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2763 /* Disable WOL. */
2764 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2765
2766 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2767 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2768 DELAY(10);
2769 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2770
2771 /* set Rx filter */
2772 nfe_setmulti(sc);
2773
2774 /* enable Rx */
2775 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2776
2777 /* enable Tx */
2778 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2779
2780 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2781
2782 /* Clear hardware stats. */
2783 nfe_stats_clear(sc);
2784
2785#ifdef DEVICE_POLLING
2786 if (ifp->if_capenable & IFCAP_POLLING)
2787 nfe_disable_intr(sc);
2788 else
2789#endif
2790 nfe_set_intr(sc);
2791 nfe_enable_intr(sc); /* enable interrupts */
2792
2793 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2794 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2795
2796 sc->nfe_link = 0;
2797 mii_mediachg(mii);
2798
2799 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2800}
2801
2802
2803static void
2804nfe_stop(struct ifnet *ifp)
2805{
2806 struct nfe_softc *sc = ifp->if_softc;
2807 struct nfe_rx_ring *rx_ring;
2808 struct nfe_jrx_ring *jrx_ring;
2809 struct nfe_tx_ring *tx_ring;
2810 struct nfe_rx_data *rdata;
2811 struct nfe_tx_data *tdata;
2812 int i;
2813
2814 NFE_LOCK_ASSERT(sc);
2815
2816 sc->nfe_watchdog_timer = 0;
2817 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2818
2819 callout_stop(&sc->nfe_stat_ch);
2820
2821 /* abort Tx */
2822 NFE_WRITE(sc, NFE_TX_CTL, 0);
2823
2824 /* disable Rx */
2825 NFE_WRITE(sc, NFE_RX_CTL, 0);
2826
2827 /* disable interrupts */
2828 nfe_disable_intr(sc);
2829
2830 sc->nfe_link = 0;
2831
2832 /* free Rx and Tx mbufs still in the queues. */
2833 rx_ring = &sc->rxq;
2834 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2835 rdata = &rx_ring->data[i];
2836 if (rdata->m != NULL) {
2837 bus_dmamap_sync(rx_ring->rx_data_tag,
2838 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2839 bus_dmamap_unload(rx_ring->rx_data_tag,
2840 rdata->rx_data_map);
2841 m_freem(rdata->m);
2842 rdata->m = NULL;
2843 }
2844 }
2845
2846 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2847 jrx_ring = &sc->jrxq;
2848 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2849 rdata = &jrx_ring->jdata[i];
2850 if (rdata->m != NULL) {
2851 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2852 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2853 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2854 rdata->rx_data_map);
2855 m_freem(rdata->m);
2856 rdata->m = NULL;
2857 }
2858 }
2859 }
2860
2861 tx_ring = &sc->txq;
2862 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2863 tdata = &tx_ring->data[i];
2864 if (tdata->m != NULL) {
2865 bus_dmamap_sync(tx_ring->tx_data_tag,
2866 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2867 bus_dmamap_unload(tx_ring->tx_data_tag,
2868 tdata->tx_data_map);
2869 m_freem(tdata->m);
2870 tdata->m = NULL;
2871 }
2872 }
2873 /* Update hardware stats. */
2874 nfe_stats_update(sc);
2875}
2876
2877
2878static int
2879nfe_ifmedia_upd(struct ifnet *ifp)
2880{
2881 struct nfe_softc *sc = ifp->if_softc;
2882 struct mii_data *mii;
2883
2884 NFE_LOCK(sc);
2885 mii = device_get_softc(sc->nfe_miibus);
2886 mii_mediachg(mii);
2887 NFE_UNLOCK(sc);
2888
2889 return (0);
2890}
2891
2892
2893static void
2894nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2895{
2896 struct nfe_softc *sc;
2897 struct mii_data *mii;
2898
2899 sc = ifp->if_softc;
2900
2901 NFE_LOCK(sc);
2902 mii = device_get_softc(sc->nfe_miibus);
2903 mii_pollstat(mii);
2904 NFE_UNLOCK(sc);
2905
2906 ifmr->ifm_active = mii->mii_media_active;
2907 ifmr->ifm_status = mii->mii_media_status;
2908}
2909
2910
2911void
2912nfe_tick(void *xsc)
2913{
2914 struct nfe_softc *sc;
2915 struct mii_data *mii;
2916 struct ifnet *ifp;
2917
2918 sc = (struct nfe_softc *)xsc;
2919
2920 NFE_LOCK_ASSERT(sc);
2921
2922 ifp = sc->nfe_ifp;
2923
2924 mii = device_get_softc(sc->nfe_miibus);
2925 mii_tick(mii);
2926 nfe_stats_update(sc);
2927 nfe_watchdog(ifp);
2928 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2929}
2930
2931
2932static int
2933nfe_shutdown(device_t dev)
2934{
2935
2936 return (nfe_suspend(dev));
2937}
2938
2939
2940static void
2941nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2942{
2943 uint32_t val;
2944
2945 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2946 val = NFE_READ(sc, NFE_MACADDR_LO);
2947 addr[0] = (val >> 8) & 0xff;
2948 addr[1] = (val & 0xff);
2949
2950 val = NFE_READ(sc, NFE_MACADDR_HI);
2951 addr[2] = (val >> 24) & 0xff;
2952 addr[3] = (val >> 16) & 0xff;
2953 addr[4] = (val >> 8) & 0xff;
2954 addr[5] = (val & 0xff);
2955 } else {
2956 val = NFE_READ(sc, NFE_MACADDR_LO);
2957 addr[5] = (val >> 8) & 0xff;
2958 addr[4] = (val & 0xff);
2959
2960 val = NFE_READ(sc, NFE_MACADDR_HI);
2961 addr[3] = (val >> 24) & 0xff;
2962 addr[2] = (val >> 16) & 0xff;
2963 addr[1] = (val >> 8) & 0xff;
2964 addr[0] = (val & 0xff);
2965 }
2966}
2967
2968
2969static void
2970nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2971{
2972
2973 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
2974 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2975 addr[1] << 8 | addr[0]);
2976}
2977
2978
2979/*
2980 * Map a single buffer address.
2981 */
2982
2983static void
2984nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2985{
2986 struct nfe_dmamap_arg *ctx;
2987
2988 if (error != 0)
2989 return;
2990
2991 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2992
2993 ctx = (struct nfe_dmamap_arg *)arg;
2994 ctx->nfe_busaddr = segs[0].ds_addr;
2995}
2996
2997
2998static int
2999sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3000{
3001 int error, value;
3002
3003 if (!arg1)
3004 return (EINVAL);
3005 value = *(int *)arg1;
3006 error = sysctl_handle_int(oidp, &value, 0, req);
3007 if (error || !req->newptr)
3008 return (error);
3009 if (value < low || value > high)
3010 return (EINVAL);
3011 *(int *)arg1 = value;
3012
3013 return (0);
3014}
3015
3016
3017static int
3018sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3019{
3020
3021 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3022 NFE_PROC_MAX));
3023}
3024
3025
3026#define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3027 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3028#define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3029 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3030
3031static void
3032nfe_sysctl_node(struct nfe_softc *sc)
3033{
3034 struct sysctl_ctx_list *ctx;
3035 struct sysctl_oid_list *child, *parent;
3036 struct sysctl_oid *tree;
3037 struct nfe_hw_stats *stats;
3038 int error;
3039
3040 stats = &sc->nfe_stats;
3041 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3042 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3043 SYSCTL_ADD_PROC(ctx, child,
3044 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3045 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3046 "max number of Rx events to process");
3047
3048 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3049 error = resource_int_value(device_get_name(sc->nfe_dev),
3050 device_get_unit(sc->nfe_dev), "process_limit",
3051 &sc->nfe_process_limit);
3052 if (error == 0) {
3053 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3054 sc->nfe_process_limit > NFE_PROC_MAX) {
3055 device_printf(sc->nfe_dev,
3056 "process_limit value out of range; "
3057 "using default: %d\n", NFE_PROC_DEFAULT);
3058 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3059 }
3060 }
3061
3062 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3063 return;
3064
3065 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3066 NULL, "NFE statistics");
3067 parent = SYSCTL_CHILDREN(tree);
3068
3069 /* Rx statistics. */
3070 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3071 NULL, "Rx MAC statistics");
3072 child = SYSCTL_CHILDREN(tree);
3073
3074 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3075 &stats->rx_frame_errors, "Framing Errors");
3076 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3077 &stats->rx_extra_bytes, "Extra Bytes");
3078 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3079 &stats->rx_late_cols, "Late Collisions");
3080 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3081 &stats->rx_runts, "Runts");
3082 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3083 &stats->rx_jumbos, "Jumbos");
3084 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3085 &stats->rx_fifo_overuns, "FIFO Overruns");
3086 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3087 &stats->rx_crc_errors, "CRC Errors");
3088 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3089 &stats->rx_fae, "Frame Alignment Errors");
3090 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3091 &stats->rx_len_errors, "Length Errors");
3092 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3093 &stats->rx_unicast, "Unicast Frames");
3094 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3095 &stats->rx_multicast, "Multicast Frames");
3096 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3097 &stats->rx_broadcast, "Broadcast Frames");
3098 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3099 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3100 &stats->rx_octets, "Octets");
3101 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3102 &stats->rx_pause, "Pause frames");
3103 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3104 &stats->rx_drops, "Drop frames");
3105 }
3106
3107 /* Tx statistics. */
3108 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3109 NULL, "Tx MAC statistics");
3110 child = SYSCTL_CHILDREN(tree);
3111 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3112 &stats->tx_octets, "Octets");
3113 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3114 &stats->tx_zero_rexmits, "Zero Retransmits");
3115 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3116 &stats->tx_one_rexmits, "One Retransmits");
3117 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3118 &stats->tx_multi_rexmits, "Multiple Retransmits");
3119 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3120 &stats->tx_late_cols, "Late Collisions");
3121 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3122 &stats->tx_fifo_underuns, "FIFO Underruns");
3123 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3124 &stats->tx_carrier_losts, "Carrier Losts");
3125 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3126 &stats->tx_excess_deferals, "Excess Deferrals");
3127 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3128 &stats->tx_retry_errors, "Retry Errors");
3129 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3130 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3131 &stats->tx_deferals, "Deferrals");
3132 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3133 &stats->tx_frames, "Frames");
3134 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3135 &stats->tx_pause, "Pause Frames");
3136 }
3137 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3138 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3139 &stats->tx_deferals, "Unicast Frames");
3140 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3141 &stats->tx_frames, "Multicast Frames");
3142 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3143 &stats->tx_pause, "Broadcast Frames");
3144 }
3145}
3146
3147#undef NFE_SYSCTL_STAT_ADD32
3148#undef NFE_SYSCTL_STAT_ADD64
3149
3150static void
3151nfe_stats_clear(struct nfe_softc *sc)
3152{
3153 int i, mib_cnt;
3154
3155 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3156 mib_cnt = NFE_NUM_MIB_STATV1;
3157 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3158 mib_cnt = NFE_NUM_MIB_STATV2;
3159 else
3160 return;
3161
3162 for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3163 NFE_READ(sc, NFE_TX_OCTET + i);
3164
3165 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3166 NFE_READ(sc, NFE_TX_UNICAST);
3167 NFE_READ(sc, NFE_TX_MULTICAST);
3168 NFE_READ(sc, NFE_TX_BROADCAST);
3169 }
3170}
3171
3172static void
3173nfe_stats_update(struct nfe_softc *sc)
3174{
3175 struct nfe_hw_stats *stats;
3176
3177 NFE_LOCK_ASSERT(sc);
3178
3179 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3180 return;
3181
3182 stats = &sc->nfe_stats;
3183 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3184 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3185 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3186 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3187 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3188 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3189 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3190 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3191 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3192 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3193 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3194 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3195 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3196 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3197 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3198 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3199 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3200 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3201 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3202 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3203 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3204
3205 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3206 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3207 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3208 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3209 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3210 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3211 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3212 }
3213
3214 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3215 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3216 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3217 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3218 }
3219}
3220
3221
3222static void
3223nfe_set_linkspeed(struct nfe_softc *sc)
3224{
3225 struct mii_softc *miisc;
3226 struct mii_data *mii;
3227 int aneg, i, phyno;
3228
3229 NFE_LOCK_ASSERT(sc);
3230
3231 mii = device_get_softc(sc->nfe_miibus);
3232 mii_pollstat(mii);
3233 aneg = 0;
3234 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3235 (IFM_ACTIVE | IFM_AVALID)) {
3236 switch IFM_SUBTYPE(mii->mii_media_active) {
3237 case IFM_10_T:
3238 case IFM_100_TX:
3239 return;
3240 case IFM_1000_T:
3241 aneg++;
3242 break;
3243 default:
3244 break;
3245 }
3246 }
3247 phyno = 0;
3248 if (mii->mii_instance) {
3249 miisc = LIST_FIRST(&mii->mii_phys);
3250 phyno = miisc->mii_phy;
3251 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3252 mii_phy_reset(miisc);
3253 } else
3254 return;
3255 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3256 nfe_miibus_writereg(sc->nfe_dev, phyno,
3257 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3258 nfe_miibus_writereg(sc->nfe_dev, phyno,
3259 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3260 DELAY(1000);
3261 if (aneg != 0) {
3262 /*
3263 * Poll link state until nfe(4) get a 10/100Mbps link.
3264 */
3265 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3266 mii_pollstat(mii);
3267 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3268 == (IFM_ACTIVE | IFM_AVALID)) {
3269 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3270 case IFM_10_T:
3271 case IFM_100_TX:
3272 nfe_mac_config(sc, mii);
3273 return;
3274 default:
3275 break;
3276 }
3277 }
3278 NFE_UNLOCK(sc);
3279 pause("nfelnk", hz);
3280 NFE_LOCK(sc);
3281 }
3282 if (i == MII_ANEGTICKS_GIGE)
3283 device_printf(sc->nfe_dev,
3284 "establishing a link failed, WOL may not work!");
3285 }
3286 /*
3287 * No link, force MAC to have 100Mbps, full-duplex link.
3288 * This is the last resort and may/may not work.
3289 */
3290 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3291 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3292 nfe_mac_config(sc, mii);
3293}
3294
3295
3296static void
3297nfe_set_wol(struct nfe_softc *sc)
3298{
3299 struct ifnet *ifp;
3300 uint32_t wolctl;
3301 int pmc;
3302 uint16_t pmstat;
3303
3304 NFE_LOCK_ASSERT(sc);
3305
3306 if (pci_find_extcap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3307 return;
3308 ifp = sc->nfe_ifp;
3309 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3310 wolctl = NFE_WOL_MAGIC;
3311 else
3312 wolctl = 0;
3313 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3314 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3315 nfe_set_linkspeed(sc);
3316 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3317 NFE_WRITE(sc, NFE_PWR2_CTL,
3318 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3319 /* Enable RX. */
3320 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3321 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3322 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3323 NFE_RX_START);
3324 }
3325 /* Request PME if WOL is requested. */
3326 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3327 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3328 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3329 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3330 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3331}