Deleted Added
full compact
if_nfe.c (163437) if_nfe.c (163503)
1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any

--- 7 unchanged lines hidden (view full) ---

16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any

--- 7 unchanged lines hidden (view full) ---

16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 163437 2006-10-16 16:36:29Z obrien $");
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 163503 2006-10-19 10:01:26Z obrien $");
25
26/* Uncomment the following line to enable polling. */
25
26/* Uncomment the following line to enable polling. */
27/* #define DEVICE_POLLING */
27/* #define DEVICE_POLLING */
28
28
29#define NFE_NO_JUMBO
30#define NFE_CSUM
31#define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
32#define NVLAN 0
29#define NFE_NO_JUMBO
30#define NFE_CSUM
31#define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
32#define NVLAN 0
33
34#ifdef HAVE_KERNEL_OPTION_HEADERS
35#include "opt_device_polling.h"
36#endif
37
38#include <sys/param.h>
39#include <sys/endian.h>
40#include <sys/systm.h>

--- 29 unchanged lines hidden (view full) ---

70#include <dev/nfe/if_nfereg.h>
71#include <dev/nfe/if_nfevar.h>
72
73MODULE_DEPEND(nfe, pci, 1, 1, 1);
74MODULE_DEPEND(nfe, ether, 1, 1, 1);
75MODULE_DEPEND(nfe, miibus, 1, 1, 1);
76#include "miibus_if.h"
77
33
34#ifdef HAVE_KERNEL_OPTION_HEADERS
35#include "opt_device_polling.h"
36#endif
37
38#include <sys/param.h>
39#include <sys/endian.h>
40#include <sys/systm.h>

--- 29 unchanged lines hidden (view full) ---

70#include <dev/nfe/if_nfereg.h>
71#include <dev/nfe/if_nfevar.h>
72
73MODULE_DEPEND(nfe, pci, 1, 1, 1);
74MODULE_DEPEND(nfe, ether, 1, 1, 1);
75MODULE_DEPEND(nfe, miibus, 1, 1, 1);
76#include "miibus_if.h"
77
78static int nfe_probe (device_t);
79static int nfe_attach (device_t);
80static int nfe_detach (device_t);
81static void nfe_shutdown(device_t);
82static int nfe_miibus_readreg (device_t, int, int);
83static int nfe_miibus_writereg (device_t, int, int, int);
84static void nfe_miibus_statchg (device_t);
85static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
86static void nfe_intr(void *);
78static int nfe_probe(device_t);
79static int nfe_attach(device_t);
80static int nfe_detach(device_t);
81static void nfe_shutdown(device_t);
82static int nfe_miibus_readreg(device_t, int, int);
83static int nfe_miibus_writereg(device_t, int, int, int);
84static void nfe_miibus_statchg(device_t);
85static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
86static void nfe_intr(void *);
87static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
88static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
89static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
90static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
91static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
92static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
93static void nfe_rxeof(struct nfe_softc *);
94static void nfe_txeof(struct nfe_softc *);

--- 24 unchanged lines hidden (view full) ---

119static void nfe_set_macaddr(struct nfe_softc *, u_char *);
120static void nfe_dma_map_segs (void *, bus_dma_segment_t *, int, int);
121#ifdef DEVICE_POLLING
122static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int);
123#endif
124
125#ifdef NFE_DEBUG
126int nfedebug = 0;
87static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
88static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
89static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
90static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
91static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
92static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
93static void nfe_rxeof(struct nfe_softc *);
94static void nfe_txeof(struct nfe_softc *);

--- 24 unchanged lines hidden (view full) ---

119static void nfe_set_macaddr(struct nfe_softc *, u_char *);
120static void nfe_dma_map_segs (void *, bus_dma_segment_t *, int, int);
121#ifdef DEVICE_POLLING
122static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int);
123#endif
124
125#ifdef NFE_DEBUG
126int nfedebug = 0;
127#define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
128#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
127#define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
128#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
129#else
129#else
130#define DPRINTF(x)
131#define DPRINTFN(n,x)
130#define DPRINTF(x)
131#define DPRINTFN(n,x)
132#endif
133
134#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
135#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
136#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
137
132#endif
133
134#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
135#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
136#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
137
138#define letoh16(x) le16toh(x)
138#define letoh16(x) le16toh(x)
139
140#define NV_RID 0x10
141
142static device_method_t nfe_methods[] = {
143 /* Device interface */
144 DEVMETHOD(device_probe, nfe_probe),
145 DEVMETHOD(device_attach, nfe_attach),
146 DEVMETHOD(device_detach, nfe_detach),
147 DEVMETHOD(device_shutdown, nfe_shutdown),
148
149 /* bus interface */
150 DEVMETHOD(bus_print_child, bus_generic_print_child),
151 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
152
153 /* MII interface */
154 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
155 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
139
140#define NV_RID 0x10
141
142static device_method_t nfe_methods[] = {
143 /* Device interface */
144 DEVMETHOD(device_probe, nfe_probe),
145 DEVMETHOD(device_attach, nfe_attach),
146 DEVMETHOD(device_detach, nfe_detach),
147 DEVMETHOD(device_shutdown, nfe_shutdown),
148
149 /* bus interface */
150 DEVMETHOD(bus_print_child, bus_generic_print_child),
151 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
152
153 /* MII interface */
154 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
155 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
156 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
156 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
157
158 { 0, 0 }
159};
160
161static driver_t nfe_driver = {
162 "nfe",
163 nfe_methods,
164 sizeof(struct nfe_softc)
165};
166
167static devclass_t nfe_devclass;
168
169DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
170DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
171
172static struct nfe_type nfe_devs[] = {
173 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
157
158 { 0, 0 }
159};
160
161static driver_t nfe_driver = {
162 "nfe",
163 nfe_methods,
164 sizeof(struct nfe_softc)
165};
166
167static devclass_t nfe_devclass;
168
169DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
170DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
171
172static struct nfe_type nfe_devs[] = {
173 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
174 "NVIDIA nForce MCP Networking Adapter"},
174 "NVIDIA nForce MCP Networking Adapter"},
175 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
175 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
176 "NVIDIA nForce2 MCP2 Networking Adapter"},
176 "NVIDIA nForce2 MCP2 Networking Adapter"},
177 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
177 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
178 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
178 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
179 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
179 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
180 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
180 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
181 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
181 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
182 "NVIDIA nForce3 MCP3 Networking Adapter"},
182 "NVIDIA nForce3 MCP3 Networking Adapter"},
183 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
183 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
184 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
184 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
185 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
185 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
186 "NVIDIA nForce3 MCP7 Networking Adapter"},
186 "NVIDIA nForce3 MCP7 Networking Adapter"},
187 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
187 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
188 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
188 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
190 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
190 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
192 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP10
192 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP10
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
194 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP11
194 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP11
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
196 "NVIDIA nForce 430 MCP12 Networking Adapter"},
196 "NVIDIA nForce 430 MCP12 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
198 "NVIDIA nForce 430 MCP13 Networking Adapter"},
198 "NVIDIA nForce 430 MCP13 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
200 "NVIDIA nForce MCP55 Networking Adapter"},
200 "NVIDIA nForce MCP55 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
202 "NVIDIA nForce MCP55 Networking Adapter"},
202 "NVIDIA nForce MCP55 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
204 "NVIDIA nForce MCP61 Networking Adapter"},
204 "NVIDIA nForce MCP61 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
206 "NVIDIA nForce MCP61 Networking Adapter"},
206 "NVIDIA nForce MCP61 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
208 "NVIDIA nForce MCP61 Networking Adapter"},
208 "NVIDIA nForce MCP61 Networking Adapter"},
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
210 "NVIDIA nForce MCP61 Networking Adapter"},
210 "NVIDIA nForce MCP61 Networking Adapter"},
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
212 "NVIDIA nForce MCP65 Networking Adapter"},
212 "NVIDIA nForce MCP65 Networking Adapter"},
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
214 "NVIDIA nForce MCP65 Networking Adapter"},
214 "NVIDIA nForce MCP65 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
216 "NVIDIA nForce MCP65 Networking Adapter"},
216 "NVIDIA nForce MCP65 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
218 "NVIDIA nForce MCP65 Networking Adapter"},
218 "NVIDIA nForce MCP65 Networking Adapter"},
219 {0, 0, NULL}
220};
221
222
223/* Probe for supported hardware ID's */
224static int
225nfe_probe(device_t dev)
226{

--- 8 unchanged lines hidden (view full) ---

235 return (0);
236 }
237 t++;
238 }
239
240 return (ENXIO);
241}
242
219 {0, 0, NULL}
220};
221
222
223/* Probe for supported hardware ID's */
224static int
225nfe_probe(device_t dev)
226{

--- 8 unchanged lines hidden (view full) ---

235 return (0);
236 }
237 t++;
238 }
239
240 return (ENXIO);
241}
242
243
243static int
244nfe_attach(device_t dev)
245{
246 struct nfe_softc *sc;
247 struct ifnet *ifp;
248 int unit, error = 0, rid;
249
250 sc = device_get_softc(dev);
251 unit = device_get_unit(dev);
252 sc->nfe_dev = dev;
253 sc->nfe_unit = unit;
254
255 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
256 MTX_DEF | MTX_RECURSE);
257 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
258
244static int
245nfe_attach(device_t dev)
246{
247 struct nfe_softc *sc;
248 struct ifnet *ifp;
249 int unit, error = 0, rid;
250
251 sc = device_get_softc(dev);
252 unit = device_get_unit(dev);
253 sc->nfe_dev = dev;
254 sc->nfe_unit = unit;
255
256 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
257 MTX_DEF | MTX_RECURSE);
258 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
259
259
260 pci_enable_busmaster(dev);
260 pci_enable_busmaster(dev);
261
261
262 rid = NV_RID;
263 sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
264 0, ~0, 1, RF_ACTIVE);
265
266 if (sc->nfe_res == NULL) {
267 printf ("nfe%d: couldn't map ports/memory\n", unit);
268 error = ENXIO;
269 goto fail;

--- 31 unchanged lines hidden (view full) ---

301 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
302 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
303 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
304 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
305 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
306 break;
307 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
308 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
262 rid = NV_RID;
263 sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
264 0, ~0, 1, RF_ACTIVE);
265
266 if (sc->nfe_res == NULL) {
267 printf ("nfe%d: couldn't map ports/memory\n", unit);
268 error = ENXIO;
269 goto fail;

--- 31 unchanged lines hidden (view full) ---

301 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
302 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
303 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
304 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
305 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
306 break;
307 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
308 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
309 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN;
309 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
310 NFE_HW_VLAN;
310 break;
311 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
312 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
313 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
314 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
315 sc->nfe_flags |= NFE_40BIT_ADDR;
316 break;
317 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
318 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
319 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
320 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
321 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
311 break;
312 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
313 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
314 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
315 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
316 sc->nfe_flags |= NFE_40BIT_ADDR;
317 break;
318 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
319 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
320 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
321 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
322 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
322 break;
323 break;
323 }
324
325#ifndef NFE_NO_JUMBO
326 /* enable jumbo frames for adapters that support it */
327 if (sc->nfe_flags & NFE_JUMBO_SUP)
328 sc->nfe_flags |= NFE_USE_JUMBO;
329#endif
330
331 /*
332 * Allocate the parent bus DMA tag appropriate for PCI.
333 */
324 }
325
326#ifndef NFE_NO_JUMBO
327 /* enable jumbo frames for adapters that support it */
328 if (sc->nfe_flags & NFE_JUMBO_SUP)
329 sc->nfe_flags |= NFE_USE_JUMBO;
330#endif
331
332 /*
333 * Allocate the parent bus DMA tag appropriate for PCI.
334 */
334#define NFE_NSEG_NEW 32
335#define NFE_NSEG_NEW 32
335 error = bus_dma_tag_create(NULL, /* parent */
336 error = bus_dma_tag_create(NULL, /* parent */
336 1, 0, /* alignment, boundary */
337 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
338 BUS_SPACE_MAXADDR, /* highaddr */
339 NULL, NULL, /* filter, filterarg */
340 MAXBSIZE, NFE_NSEG_NEW, /* maxsize, nsegments */
341 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
342 BUS_DMA_ALLOCNOW, /* flags */
343 NULL, NULL, /* lockfunc, lockarg */
344 &sc->nfe_parent_tag);
337 1, 0, /* alignment, boundary */
338 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
339 BUS_SPACE_MAXADDR, /* highaddr */
340 NULL, NULL, /* filter, filterarg */
341 MAXBSIZE, NFE_NSEG_NEW, /* maxsize, nsegments */
342 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
343 BUS_DMA_ALLOCNOW, /* flags */
344 NULL, NULL, /* lockfunc, lockarg */
345 &sc->nfe_parent_tag);
345 if (error)
346 goto fail;
347
348 /*
349 * Allocate Tx and Rx rings.
350 */
351 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
352 printf("nfe%d: could not allocate Tx ring\n", unit);

--- 39 unchanged lines hidden (view full) ---

392#endif
393 ifp->if_capenable = ifp->if_capabilities;
394
395#ifdef DEVICE_POLLING
396 ifp->if_capabilities |= IFCAP_POLLING;
397#endif
398
399 /* Do MII setup */
346 if (error)
347 goto fail;
348
349 /*
350 * Allocate Tx and Rx rings.
351 */
352 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
353 printf("nfe%d: could not allocate Tx ring\n", unit);

--- 39 unchanged lines hidden (view full) ---

393#endif
394 ifp->if_capenable = ifp->if_capabilities;
395
396#ifdef DEVICE_POLLING
397 ifp->if_capabilities |= IFCAP_POLLING;
398#endif
399
400 /* Do MII setup */
400 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, nfe_ifmedia_sts)) {
401 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
402 nfe_ifmedia_sts)) {
401 printf("nfe%d: MII without any phy!\n", unit);
402 error = ENXIO;
403 goto fail;
404 }
405
406 ether_ifattach(ifp, sc->eaddr);
407
403 printf("nfe%d: MII without any phy!\n", unit);
404 error = ENXIO;
405 goto fail;
406 }
407
408 ether_ifattach(ifp, sc->eaddr);
409
408 error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET|INTR_MPSAFE,
410 error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET | INTR_MPSAFE,
409 nfe_intr, sc, &sc->nfe_intrhand);
410
411 if (error) {
412 printf("nfe%d: couldn't set up irq\n", unit);
413 ether_ifdetach(ifp);
414 goto fail;
415 }
416
417fail:
418 if (error)
419 nfe_detach(dev);
420
421 return (error);
422}
423
424
425static int
426nfe_detach(device_t dev)
427{
411 nfe_intr, sc, &sc->nfe_intrhand);
412
413 if (error) {
414 printf("nfe%d: couldn't set up irq\n", unit);
415 ether_ifdetach(ifp);
416 goto fail;
417 }
418
419fail:
420 if (error)
421 nfe_detach(dev);
422
423 return (error);
424}
425
426
427static int
428nfe_detach(device_t dev)
429{
428 struct nfe_softc *sc;
429 struct ifnet *ifp;
430 u_char eaddr[ETHER_ADDR_LEN];
431 int i;
430 struct nfe_softc *sc;
431 struct ifnet *ifp;
432 u_char eaddr[ETHER_ADDR_LEN];
433 int i;
432
433 sc = device_get_softc(dev);
434 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
435 ifp = sc->nfe_ifp;
436
437#ifdef DEVICE_POLLING
438 if (ifp->if_capenable & IFCAP_POLLING)
439 ether_poll_deregister(ifp);

--- 76 unchanged lines hidden (view full) ---

516
517 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
518
519 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
520 NFE_WRITE(sc, NFE_MISC1, misc);
521 NFE_WRITE(sc, NFE_LINKSPEED, link);
522}
523
434
435 sc = device_get_softc(dev);
436 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
437 ifp = sc->nfe_ifp;
438
439#ifdef DEVICE_POLLING
440 if (ifp->if_capenable & IFCAP_POLLING)
441 ether_poll_deregister(ifp);

--- 76 unchanged lines hidden (view full) ---

518
519 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
520
521 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
522 NFE_WRITE(sc, NFE_MISC1, misc);
523 NFE_WRITE(sc, NFE_LINKSPEED, link);
524}
525
526
524static int
525nfe_miibus_readreg(device_t dev, int phy, int reg)
526{
527 struct nfe_softc *sc = device_get_softc(dev);
528 u_int32_t val;
529 int ntries;
530
531 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);

--- 19 unchanged lines hidden (view full) ---

551 DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit));
552 return 0;
553 }
554
555 val = NFE_READ(sc, NFE_PHY_DATA);
556 if (val != 0xffffffff && val != 0)
557 sc->mii_phyaddr = phy;
558
527static int
528nfe_miibus_readreg(device_t dev, int phy, int reg)
529{
530 struct nfe_softc *sc = device_get_softc(dev);
531 u_int32_t val;
532 int ntries;
533
534 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);

--- 19 unchanged lines hidden (view full) ---

554 DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit));
555 return 0;
556 }
557
558 val = NFE_READ(sc, NFE_PHY_DATA);
559 if (val != 0xffffffff && val != 0)
560 sc->mii_phyaddr = phy;
561
559 DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n", sc->nfe_unit, phy, reg, val));
562 DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n",
563 sc->nfe_unit, phy, reg, val));
560
561 return val;
562}
563
564
565 return val;
566}
567
568
564static int
565nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
566{
567 struct nfe_softc *sc = device_get_softc(dev);
568 u_int32_t ctl;
569static int
570nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
571{
572 struct nfe_softc *sc = device_get_softc(dev);
573 u_int32_t ctl;
569 int ntries;
574 int ntries;
570
571 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
572
573 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
574 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
575 DELAY(100);
576 }
577

--- 8 unchanged lines hidden (view full) ---

586 }
587#ifdef NFE_DEBUG
588 if (nfedebug >= 2 && ntries == 1000)
589 printf("could not write to PHY\n");
590#endif
591 return 0;
592}
593
575
576 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
577
578 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
579 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
580 DELAY(100);
581 }
582

--- 8 unchanged lines hidden (view full) ---

591 }
592#ifdef NFE_DEBUG
593 if (nfedebug >= 2 && ntries == 1000)
594 printf("could not write to PHY\n");
595#endif
596 return 0;
597}
598
599
594static int
595nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
596{
597 struct nfe_desc32 *desc32;
598 struct nfe_desc64 *desc64;
599 struct nfe_rx_data *data;
600 struct nfe_jbuf *jbuf;
601 void **desc;

--- 6 unchanged lines hidden (view full) ---

608 } else {
609 desc = (void **)&ring->desc32;
610 descsize = sizeof (struct nfe_desc32);
611 }
612
613 ring->cur = ring->next = 0;
614 ring->bufsz = MCLBYTES;
615
600static int
601nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
602{
603 struct nfe_desc32 *desc32;
604 struct nfe_desc64 *desc64;
605 struct nfe_rx_data *data;
606 struct nfe_jbuf *jbuf;
607 void **desc;

--- 6 unchanged lines hidden (view full) ---

614 } else {
615 desc = (void **)&ring->desc32;
616 descsize = sizeof (struct nfe_desc32);
617 }
618
619 ring->cur = ring->next = 0;
620 ring->bufsz = MCLBYTES;
621
616 error = bus_dma_tag_create(sc->nfe_parent_tag,
617 PAGE_SIZE, 0, /* alignment, boundary */
618 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
619 BUS_SPACE_MAXADDR, /* highaddr */
620 NULL, NULL, /* filter, filterarg */
621 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
622 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
623 BUS_DMA_ALLOCNOW, /* flags */
624 NULL, NULL, /* lockfunc, lockarg */
625 &ring->rx_desc_tag);
622 error = bus_dma_tag_create(sc->nfe_parent_tag,
623 PAGE_SIZE, 0, /* alignment, boundary */
624 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
625 BUS_SPACE_MAXADDR, /* highaddr */
626 NULL, NULL, /* filter, filterarg */
627 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
628 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
629 BUS_DMA_ALLOCNOW, /* flags */
630 NULL, NULL, /* lockfunc, lockarg */
631 &ring->rx_desc_tag);
626 if (error != 0) {
627 printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
628 goto fail;
629 }
630
631 /* allocate memory to desc */
632 if (error != 0) {
633 printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
634 goto fail;
635 }
636
637 /* allocate memory to desc */
632 error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc, BUS_DMA_NOWAIT, &ring->rx_desc_map);
638 error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc,
639 BUS_DMA_NOWAIT, &ring->rx_desc_map);
633 if (error != 0) {
634 printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
635 goto fail;
636 }
637
638 /* map desc to device visible address space */
639 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc,
640 if (error != 0) {
641 printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
642 goto fail;
643 }
644
645 /* map desc to device visible address space */
646 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc,
640 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->rx_desc_segs, BUS_DMA_NOWAIT);
647 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs,
648 &ring->rx_desc_segs, BUS_DMA_NOWAIT);
641 if (error != 0) {
642 printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
643 goto fail;
644 }
645
646 bzero(*desc, NFE_RX_RING_COUNT * descsize);
647 ring->rx_desc_addr = ring->rx_desc_segs.ds_addr;
648 ring->physaddr = ring->rx_desc_addr;
649
650 if (sc->nfe_flags & NFE_USE_JUMBO) {
651 ring->bufsz = NFE_JBYTES;
652 if ((error = nfe_jpool_alloc(sc)) != 0) {
649 if (error != 0) {
650 printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
651 goto fail;
652 }
653
654 bzero(*desc, NFE_RX_RING_COUNT * descsize);
655 ring->rx_desc_addr = ring->rx_desc_segs.ds_addr;
656 ring->physaddr = ring->rx_desc_addr;
657
658 if (sc->nfe_flags & NFE_USE_JUMBO) {
659 ring->bufsz = NFE_JBYTES;
660 if ((error = nfe_jpool_alloc(sc)) != 0) {
653 printf("nfe%d: could not allocate jumbo frames\n", sc->nfe_unit);
661 printf("nfe%d: could not allocate jumbo frames\n",
662 sc->nfe_unit);
654 goto fail;
655 }
656 }
657
658 /*
659 * Pre-allocate Rx buffers and populate Rx ring.
660 */
661 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
662 data = &sc->rxq.data[i];
663
664 MGETHDR(data->m, M_DONTWAIT, MT_DATA);
665 if (data->m == NULL) {
663 goto fail;
664 }
665 }
666
667 /*
668 * Pre-allocate Rx buffers and populate Rx ring.
669 */
670 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
671 data = &sc->rxq.data[i];
672
673 MGETHDR(data->m, M_DONTWAIT, MT_DATA);
674 if (data->m == NULL) {
666 printf("nfe%d: could not allocate rx mbuf\n", sc->nfe_unit);
675 printf("nfe%d: could not allocate rx mbuf\n",
676 sc->nfe_unit);
667 error = ENOMEM;
668 goto fail;
669 }
670
671 if (sc->nfe_flags & NFE_USE_JUMBO) {
672 if ((jbuf = nfe_jalloc(sc)) == NULL) {
677 error = ENOMEM;
678 goto fail;
679 }
680
681 if (sc->nfe_flags & NFE_USE_JUMBO) {
682 if ((jbuf = nfe_jalloc(sc)) == NULL) {
673 printf("nfe%d: could not allocate jumbo buffer\n", sc->nfe_unit);
683 printf("nfe%d: could not allocate jumbo buffer\n",
684 sc->nfe_unit);
674 goto fail;
675 }
676 data->m->m_data = (void *)jbuf->buf;
677 data->m->m_len = data->m->m_pkthdr.len = NFE_JBYTES;
685 goto fail;
686 }
687 data->m->m_data = (void *)jbuf->buf;
688 data->m->m_len = data->m->m_pkthdr.len = NFE_JBYTES;
678 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, nfe_jfree, (struct nfe_softc *)sc, 0, EXT_NET_DRV);
689 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, nfe_jfree,
690 (struct nfe_softc *)sc, 0, EXT_NET_DRV);
679 /* m_adj(data->m, ETHER_ALIGN); */
680 physaddr = jbuf->physaddr;
681 } else {
691 /* m_adj(data->m, ETHER_ALIGN); */
692 physaddr = jbuf->physaddr;
693 } else {
682 error = bus_dma_tag_create(sc->nfe_parent_tag,
683 ETHER_ALIGN, 0, /* alignment, boundary */
684 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
685 BUS_SPACE_MAXADDR, /* highaddr */
686 NULL, NULL, /* filter, filterarg */
687 MCLBYTES, 1, /* maxsize, nsegments */
688 MCLBYTES, /* maxsegsize */
689 BUS_DMA_ALLOCNOW, /* flags */
690 NULL, NULL, /* lockfunc, lockarg */
691 &data->rx_data_tag);
692 if (error != 0) {
693 printf("nfe%d: could not create DMA map\n", sc->nfe_unit);
694 goto fail;
695 }
694 error = bus_dma_tag_create(sc->nfe_parent_tag,
695 ETHER_ALIGN, 0, /* alignment, boundary */
696 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
697 BUS_SPACE_MAXADDR, /* highaddr */
698 NULL, NULL, /* filter, filterarg */
699 MCLBYTES, 1, /* maxsize, nsegments */
700 MCLBYTES, /* maxsegsize */
701 BUS_DMA_ALLOCNOW, /* flags */
702 NULL, NULL, /* lockfunc, lockarg */
703 &data->rx_data_tag);
704 if (error != 0) {
705 printf("nfe%d: could not create DMA map\n",
706 sc->nfe_unit);
707 goto fail;
708 }
696
709
697 error = bus_dmamap_create(data->rx_data_tag, 0, &data->rx_data_map);
698 if (error != 0) {
699 printf("nfe%d: could not allocate mbuf cluster\n", sc->nfe_unit);
700 goto fail;
701 }
710 error = bus_dmamap_create(data->rx_data_tag, 0,
711 &data->rx_data_map);
712 if (error != 0) {
713 printf("nfe%d: could not allocate mbuf cluster\n",
714 sc->nfe_unit);
715 goto fail;
716 }
702
717
703 MCLGET(data->m, M_DONTWAIT);
704 if (!(data->m->m_flags & M_EXT)) {
705 error = ENOMEM;
706 goto fail;
707 }
718 MCLGET(data->m, M_DONTWAIT);
719 if (!(data->m->m_flags & M_EXT)) {
720 error = ENOMEM;
721 goto fail;
722 }
708
723
709 error = bus_dmamap_load(data->rx_data_tag, data->rx_data_map, mtod(data->m, void *),
710 MCLBYTES, nfe_dma_map_segs, &data->rx_data_segs, BUS_DMA_NOWAIT);
711 if (error != 0) {
712 printf("nfe%d: could not load rx buf DMA map\n", sc->nfe_unit);
713 goto fail;
714 }
724 error = bus_dmamap_load(data->rx_data_tag,
725 data->rx_data_map, mtod(data->m, void *), MCLBYTES,
726 nfe_dma_map_segs, &data->rx_data_segs,
727 BUS_DMA_NOWAIT);
728 if (error != 0) {
729 printf("nfe%d: could not load rx buf DMA map\n",
730 sc->nfe_unit);
731 goto fail;
732 }
715
733
716 data->rx_data_addr = data->rx_data_segs.ds_addr;
717 physaddr = data->rx_data_addr;
734 data->rx_data_addr = data->rx_data_segs.ds_addr;
735 physaddr = data->rx_data_addr;
718
719 }
720
721 if (sc->nfe_flags & NFE_40BIT_ADDR) {
722 desc64 = &sc->rxq.desc64[i];
723#if defined(__LP64__)
724 desc64->physaddr[0] = htole32(physaddr >> 32);
725#endif

--- 4 unchanged lines hidden (view full) ---

730 desc32 = &sc->rxq.desc32[i];
731 desc32->physaddr = htole32(physaddr);
732 desc32->length = htole16(sc->rxq.bufsz);
733 desc32->flags = htole16(NFE_RX_READY);
734 }
735
736 }
737
736
737 }
738
739 if (sc->nfe_flags & NFE_40BIT_ADDR) {
740 desc64 = &sc->rxq.desc64[i];
741#if defined(__LP64__)
742 desc64->physaddr[0] = htole32(physaddr >> 32);
743#endif

--- 4 unchanged lines hidden (view full) ---

748 desc32 = &sc->rxq.desc32[i];
749 desc32->physaddr = htole32(physaddr);
750 desc32->length = htole16(sc->rxq.bufsz);
751 desc32->flags = htole16(NFE_RX_READY);
752 }
753
754 }
755
738 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE);
756 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
757 BUS_DMASYNC_PREWRITE);
739
740 return 0;
741
742fail: nfe_free_rx_ring(sc, ring);
743
744 return error;
745}
746
758
759 return 0;
760
761fail: nfe_free_rx_ring(sc, ring);
762
763 return error;
764}
765
766
747static int
748nfe_jpool_alloc(struct nfe_softc *sc)
749{
750 struct nfe_rx_ring *ring = &sc->rxq;
751 struct nfe_jbuf *jbuf;
752 bus_addr_t physaddr;
753 caddr_t buf;
754 int i, error;
755
756 /*
757 * Allocate a big chunk of DMA'able memory.
758 */
767static int
768nfe_jpool_alloc(struct nfe_softc *sc)
769{
770 struct nfe_rx_ring *ring = &sc->rxq;
771 struct nfe_jbuf *jbuf;
772 bus_addr_t physaddr;
773 caddr_t buf;
774 int i, error;
775
776 /*
777 * Allocate a big chunk of DMA'able memory.
778 */
759 error = bus_dma_tag_create(sc->nfe_parent_tag,
760 PAGE_SIZE, 0, /* alignment, boundary */
761 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
762 BUS_SPACE_MAXADDR, /* highaddr */
763 NULL, NULL, /* filter, filterarg */
764 NFE_JPOOL_SIZE, 1, /* maxsize, nsegments */
765 NFE_JPOOL_SIZE, /* maxsegsize */
766 BUS_DMA_ALLOCNOW, /* flags */
767 NULL, NULL, /* lockfunc, lockarg */
768 &ring->rx_jumbo_tag);
779 error = bus_dma_tag_create(sc->nfe_parent_tag,
780 PAGE_SIZE, 0, /* alignment, boundary */
781 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
782 BUS_SPACE_MAXADDR, /* highaddr */
783 NULL, NULL, /* filter, filterarg */
784 NFE_JPOOL_SIZE, 1, /* maxsize, nsegments */
785 NFE_JPOOL_SIZE, /* maxsegsize */
786 BUS_DMA_ALLOCNOW, /* flags */
787 NULL, NULL, /* lockfunc, lockarg */
788 &ring->rx_jumbo_tag);
769 if (error != 0) {
770 printf("nfe%d: could not create jumbo DMA tag\n", sc->nfe_unit);
771 goto fail;
772 }
789 if (error != 0) {
790 printf("nfe%d: could not create jumbo DMA tag\n", sc->nfe_unit);
791 goto fail;
792 }
773 error = bus_dmamem_alloc(ring->rx_jumbo_tag, (void **)&ring->jpool, BUS_DMA_NOWAIT, &ring->rx_jumbo_map);
793
794 error = bus_dmamem_alloc(ring->rx_jumbo_tag, (void **)&ring->jpool,
795 BUS_DMA_NOWAIT, &ring->rx_jumbo_map);
774 if (error != 0) {
796 if (error != 0) {
775 printf("nfe%d: could not create jumbo DMA memory\n", sc->nfe_unit);
797 printf("nfe%d: could not create jumbo DMA memory\n",
798 sc->nfe_unit);
776 goto fail;
777 }
778
799 goto fail;
800 }
801
779 error = bus_dmamap_load(ring->rx_jumbo_tag, ring->rx_jumbo_map, ring->jpool,
780 NFE_JPOOL_SIZE, nfe_dma_map_segs, &ring->rx_jumbo_segs, BUS_DMA_NOWAIT);
802 error = bus_dmamap_load(ring->rx_jumbo_tag, ring->rx_jumbo_map,
803 ring->jpool, NFE_JPOOL_SIZE, nfe_dma_map_segs, &ring->rx_jumbo_segs,
804 BUS_DMA_NOWAIT);
781 if (error != 0) {
782 printf("nfe%d: could not load jumbo DMA map\n", sc->nfe_unit);
783 goto fail;
784 }
785
786 /* ..and split it into 9KB chunks */
787 SLIST_INIT(&ring->jfreelist);
788

--- 22 unchanged lines hidden (view full) ---

811
812static void
813nfe_jpool_free(struct nfe_softc *sc)
814{
815 struct nfe_rx_ring *ring = &sc->rxq;
816
817 if (ring->jpool != NULL) {
818#if 0
805 if (error != 0) {
806 printf("nfe%d: could not load jumbo DMA map\n", sc->nfe_unit);
807 goto fail;
808 }
809
810 /* ..and split it into 9KB chunks */
811 SLIST_INIT(&ring->jfreelist);
812

--- 22 unchanged lines hidden (view full) ---

835
836static void
837nfe_jpool_free(struct nfe_softc *sc)
838{
839 struct nfe_rx_ring *ring = &sc->rxq;
840
841 if (ring->jpool != NULL) {
842#if 0
819 bus_dmamem_unmap(ring->rx_jumbo_tag, ring->jpool, NFE_JPOOL_SIZE);
843 bus_dmamem_unmap(ring->rx_jumbo_tag, ring->jpool,
844 NFE_JPOOL_SIZE);
820#endif
845#endif
821 bus_dmamem_free(ring->rx_jumbo_tag, &ring->rx_jumbo_segs, ring->rx_jumbo_map);
846 bus_dmamem_free(ring->rx_jumbo_tag, &ring->rx_jumbo_segs,
847 ring->rx_jumbo_map);
822 }
823 if (ring->rx_jumbo_map != NULL) {
848 }
849 if (ring->rx_jumbo_map != NULL) {
824 bus_dmamap_sync(ring->rx_jumbo_tag, ring->rx_jumbo_map, BUS_DMASYNC_POSTWRITE);
850 bus_dmamap_sync(ring->rx_jumbo_tag, ring->rx_jumbo_map,
851 BUS_DMASYNC_POSTWRITE);
825 bus_dmamap_unload(ring->rx_jumbo_tag, ring->rx_jumbo_map);
826 bus_dmamap_destroy(ring->rx_jumbo_tag, ring->rx_jumbo_map);
827 }
828}
829
852 bus_dmamap_unload(ring->rx_jumbo_tag, ring->rx_jumbo_map);
853 bus_dmamap_destroy(ring->rx_jumbo_tag, ring->rx_jumbo_map);
854 }
855}
856
857
830static struct nfe_jbuf *
831nfe_jalloc(struct nfe_softc *sc)
832{
833 struct nfe_jbuf *jbuf;
834
835 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
836 if (jbuf == NULL)
837 return NULL;
838 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
839 return jbuf;
840}
841
858static struct nfe_jbuf *
859nfe_jalloc(struct nfe_softc *sc)
860{
861 struct nfe_jbuf *jbuf;
862
863 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
864 if (jbuf == NULL)
865 return NULL;
866 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
867 return jbuf;
868}
869
870
842/*
843 * This is called automatically by the network stack when the mbuf is freed.
844 * Caution must be taken that the NIC might be reset by the time the mbuf is
845 * freed.
846 */
847static void
848nfe_jfree(void *buf, void *arg)
849{
850 struct nfe_softc *sc = arg;
851 struct nfe_jbuf *jbuf;
852 int i;
853
854 /* find the jbuf from the base pointer */
855 i = ((vm_offset_t)buf - (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES;
856 if (i < 0 || i >= NFE_JPOOL_COUNT) {
871/*
872 * This is called automatically by the network stack when the mbuf is freed.
873 * Caution must be taken that the NIC might be reset by the time the mbuf is
874 * freed.
875 */
876static void
877nfe_jfree(void *buf, void *arg)
878{
879 struct nfe_softc *sc = arg;
880 struct nfe_jbuf *jbuf;
881 int i;
882
883 /* find the jbuf from the base pointer */
884 i = ((vm_offset_t)buf - (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES;
885 if (i < 0 || i >= NFE_JPOOL_COUNT) {
857 printf("nfe%d: request to free a buffer (%p) not managed by us\n", sc->nfe_unit, buf);
886 printf("nfe%d: request to free a buffer (%p) not managed by us\n",
887 sc->nfe_unit, buf);
858 return;
859 }
860 jbuf = &sc->rxq.jbuf[i];
861
862 /* ..and put it back in the free list */
863 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
864}
865

--- 8 unchanged lines hidden (view full) ---

874 ring->desc64[i].length = htole16(ring->bufsz);
875 ring->desc64[i].flags = htole16(NFE_RX_READY);
876 } else {
877 ring->desc32[i].length = htole16(ring->bufsz);
878 ring->desc32[i].flags = htole16(NFE_RX_READY);
879 }
880 }
881
888 return;
889 }
890 jbuf = &sc->rxq.jbuf[i];
891
892 /* ..and put it back in the free list */
893 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
894}
895

--- 8 unchanged lines hidden (view full) ---

904 ring->desc64[i].length = htole16(ring->bufsz);
905 ring->desc64[i].flags = htole16(NFE_RX_READY);
906 } else {
907 ring->desc32[i].length = htole16(ring->bufsz);
908 ring->desc32[i].flags = htole16(NFE_RX_READY);
909 }
910 }
911
882 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE);
912 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
913 BUS_DMASYNC_PREWRITE);
883
884 ring->cur = ring->next = 0;
885}
886
887
888static void
889nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
890{

--- 5 unchanged lines hidden (view full) ---

896 desc = ring->desc64;
897 descsize = sizeof (struct nfe_desc64);
898 } else {
899 desc = ring->desc32;
900 descsize = sizeof (struct nfe_desc32);
901 }
902
903 if (desc != NULL) {
914
915 ring->cur = ring->next = 0;
916}
917
918
919static void
920nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
921{

--- 5 unchanged lines hidden (view full) ---

927 desc = ring->desc64;
928 descsize = sizeof (struct nfe_desc64);
929 } else {
930 desc = ring->desc32;
931 descsize = sizeof (struct nfe_desc32);
932 }
933
934 if (desc != NULL) {
904 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_POSTWRITE);
935 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
936 BUS_DMASYNC_POSTWRITE);
905 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
906 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
907 bus_dma_tag_destroy(ring->rx_desc_tag);
908 }
909
937 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
938 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
939 bus_dma_tag_destroy(ring->rx_desc_tag);
940 }
941
910
911 if (sc->nfe_flags & NFE_USE_JUMBO) {
942 if (sc->nfe_flags & NFE_USE_JUMBO) {
912 nfe_jpool_free(sc);
943 nfe_jpool_free(sc);
913 } else {
944 } else {
914 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
915 data = &ring->data[i];
945 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
946 data = &ring->data[i];
916
947
917 if (data->rx_data_map != NULL) {
918 bus_dmamap_sync(data->rx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD);
919 bus_dmamap_unload(data->rx_data_tag, data->rx_data_map);
920 bus_dmamap_destroy(data->rx_data_tag, data->rx_data_map);
921 bus_dma_tag_destroy(data->rx_data_tag);
922 }
923 if (data->m != NULL)
924 m_freem(data->m);
925 }
948 if (data->rx_data_map != NULL) {
949 bus_dmamap_sync(data->rx_data_tag,
950 data->rx_data_map, BUS_DMASYNC_POSTREAD);
951 bus_dmamap_unload(data->rx_data_tag,
952 data->rx_data_map);
953 bus_dmamap_destroy(data->rx_data_tag,
954 data->rx_data_map);
955 bus_dma_tag_destroy(data->rx_data_tag);
956 }
957
958 if (data->m != NULL)
959 m_freem(data->m);
960 }
926 }
927}
928
961 }
962}
963
964
929static int
930nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
931{
932 int i, error;
933 void **desc;
934 int descsize;
935
936 if (sc->nfe_flags & NFE_40BIT_ADDR) {
937 desc = (void **)&ring->desc64;
938 descsize = sizeof (struct nfe_desc64);
939 } else {
940 desc = (void **)&ring->desc32;
941 descsize = sizeof (struct nfe_desc32);
942 }
943
944 ring->queued = 0;
945 ring->cur = ring->next = 0;
946
965static int
966nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
967{
968 int i, error;
969 void **desc;
970 int descsize;
971
972 if (sc->nfe_flags & NFE_40BIT_ADDR) {
973 desc = (void **)&ring->desc64;
974 descsize = sizeof (struct nfe_desc64);
975 } else {
976 desc = (void **)&ring->desc32;
977 descsize = sizeof (struct nfe_desc32);
978 }
979
980 ring->queued = 0;
981 ring->cur = ring->next = 0;
982
947 error = bus_dma_tag_create(sc->nfe_parent_tag,
948 PAGE_SIZE, 0, /* alignment, boundary */
949 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
950 BUS_SPACE_MAXADDR, /* highaddr */
951 NULL, NULL, /* filter, filterarg */
952 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
953 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
954 BUS_DMA_ALLOCNOW, /* flags */
955 NULL, NULL, /* lockfunc, lockarg */
956 &ring->tx_desc_tag);
983 error = bus_dma_tag_create(sc->nfe_parent_tag,
984 PAGE_SIZE, 0, /* alignment, boundary */
985 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
986 BUS_SPACE_MAXADDR, /* highaddr */
987 NULL, NULL, /* filter, filterarg */
988 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
989 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
990 BUS_DMA_ALLOCNOW, /* flags */
991 NULL, NULL, /* lockfunc, lockarg */
992 &ring->tx_desc_tag);
957 if (error != 0) {
958 printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
959 goto fail;
960 }
961
993 if (error != 0) {
994 printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
995 goto fail;
996 }
997
962 error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc, BUS_DMA_NOWAIT, &ring->tx_desc_map);
998 error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc,
999 BUS_DMA_NOWAIT, &ring->tx_desc_map);
963 if (error != 0) {
964 printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
965 goto fail;
966 }
967
968 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc,
1000 if (error != 0) {
1001 printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
1002 goto fail;
1003 }
1004
1005 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc,
969 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs, BUS_DMA_NOWAIT);
1006 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs,
1007 BUS_DMA_NOWAIT);
970 if (error != 0) {
971 printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
972 goto fail;
973 }
974
975 bzero(*desc, NFE_TX_RING_COUNT * descsize);
976
977 ring->tx_desc_addr = ring->tx_desc_segs.ds_addr;
978 ring->physaddr = ring->tx_desc_addr;
979
1008 if (error != 0) {
1009 printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
1010 goto fail;
1011 }
1012
1013 bzero(*desc, NFE_TX_RING_COUNT * descsize);
1014
1015 ring->tx_desc_addr = ring->tx_desc_segs.ds_addr;
1016 ring->physaddr = ring->tx_desc_addr;
1017
980 error = bus_dma_tag_create(sc->nfe_parent_tag,
981 ETHER_ALIGN, 0,
982 BUS_SPACE_MAXADDR_32BIT,
983 BUS_SPACE_MAXADDR,
984 NULL, NULL,
985 NFE_JBYTES, NFE_MAX_SCATTER,
986 NFE_JBYTES,
987 BUS_DMA_ALLOCNOW,
988 NULL, NULL,
989 &ring->tx_data_tag);
1018 error = bus_dma_tag_create(sc->nfe_parent_tag,
1019 ETHER_ALIGN, 0,
1020 BUS_SPACE_MAXADDR_32BIT,
1021 BUS_SPACE_MAXADDR,
1022 NULL, NULL,
1023 NFE_JBYTES, NFE_MAX_SCATTER,
1024 NFE_JBYTES,
1025 BUS_DMA_ALLOCNOW,
1026 NULL, NULL,
1027 &ring->tx_data_tag);
990 if (error != 0) {
991 printf("nfe%d: could not create DMA tag\n", sc->nfe_unit);
992 goto fail;
993 }
994
995 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1028 if (error != 0) {
1029 printf("nfe%d: could not create DMA tag\n", sc->nfe_unit);
1030 goto fail;
1031 }
1032
1033 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
996 error = bus_dmamap_create(ring->tx_data_tag, 0, &ring->data[i].tx_data_map);
1034 error = bus_dmamap_create(ring->tx_data_tag, 0,
1035 &ring->data[i].tx_data_map);
997 if (error != 0) {
1036 if (error != 0) {
998 printf("nfe%d: could not create DMA map\n", sc->nfe_unit);
1037 printf("nfe%d: could not create DMA map\n",
1038 sc->nfe_unit);
999 goto fail;
1000 }
1001 }
1002
1003 return 0;
1004
1005fail: nfe_free_tx_ring(sc, ring);
1006 return error;

--- 10 unchanged lines hidden (view full) ---

1017 if (sc->nfe_flags & NFE_40BIT_ADDR)
1018 ring->desc64[i].flags = 0;
1019 else
1020 ring->desc32[i].flags = 0;
1021
1022 data = &ring->data[i];
1023
1024 if (data->m != NULL) {
1039 goto fail;
1040 }
1041 }
1042
1043 return 0;
1044
1045fail: nfe_free_tx_ring(sc, ring);
1046 return error;

--- 10 unchanged lines hidden (view full) ---

1057 if (sc->nfe_flags & NFE_40BIT_ADDR)
1058 ring->desc64[i].flags = 0;
1059 else
1060 ring->desc32[i].flags = 0;
1061
1062 data = &ring->data[i];
1063
1064 if (data->m != NULL) {
1025 bus_dmamap_sync(ring->tx_data_tag, data->active, BUS_DMASYNC_POSTWRITE);
1065 bus_dmamap_sync(ring->tx_data_tag, data->active,
1066 BUS_DMASYNC_POSTWRITE);
1026 bus_dmamap_unload(ring->tx_data_tag, data->active);
1027 m_freem(data->m);
1028 data->m = NULL;
1029 }
1030 }
1031
1067 bus_dmamap_unload(ring->tx_data_tag, data->active);
1068 m_freem(data->m);
1069 data->m = NULL;
1070 }
1071 }
1072
1032 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_PREWRITE);
1073 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1074 BUS_DMASYNC_PREWRITE);
1033
1034 ring->queued = 0;
1035 ring->cur = ring->next = 0;
1036}
1037
1075
1076 ring->queued = 0;
1077 ring->cur = ring->next = 0;
1078}
1079
1080
1038static void
1039nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1040{
1041 struct nfe_tx_data *data;
1042 void *desc;
1043 int i, descsize;
1044
1045 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1046 desc = ring->desc64;
1047 descsize = sizeof (struct nfe_desc64);
1048 } else {
1049 desc = ring->desc32;
1050 descsize = sizeof (struct nfe_desc32);
1051 }
1052
1053 if (desc != NULL) {
1081static void
1082nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1083{
1084 struct nfe_tx_data *data;
1085 void *desc;
1086 int i, descsize;
1087
1088 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1089 desc = ring->desc64;
1090 descsize = sizeof (struct nfe_desc64);
1091 } else {
1092 desc = ring->desc32;
1093 descsize = sizeof (struct nfe_desc32);
1094 }
1095
1096 if (desc != NULL) {
1054 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_POSTWRITE);
1097 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1098 BUS_DMASYNC_POSTWRITE);
1055 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1056 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1057 bus_dma_tag_destroy(ring->tx_desc_tag);
1058 }
1059
1060 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1061 data = &ring->data[i];
1062
1063 if (data->m != NULL) {
1099 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1100 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1101 bus_dma_tag_destroy(ring->tx_desc_tag);
1102 }
1103
1104 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1105 data = &ring->data[i];
1106
1107 if (data->m != NULL) {
1064 bus_dmamap_sync(ring->tx_data_tag, data->active, BUS_DMASYNC_POSTWRITE);
1108 bus_dmamap_sync(ring->tx_data_tag, data->active,
1109 BUS_DMASYNC_POSTWRITE);
1065 bus_dmamap_unload(ring->tx_data_tag, data->active);
1066 m_freem(data->m);
1067 }
1068 }
1069
1070 /* ..and now actually destroy the DMA mappings */
1071 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1072 data = &ring->data[i];
1073 if (data->tx_data_map == NULL)
1074 continue;
1075 bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map);
1076 }
1077
1078 bus_dma_tag_destroy(ring->tx_data_tag);
1079}
1080
1081#ifdef DEVICE_POLLING
1082static poll_handler_t nfe_poll;
1083
1110 bus_dmamap_unload(ring->tx_data_tag, data->active);
1111 m_freem(data->m);
1112 }
1113 }
1114
1115 /* ..and now actually destroy the DMA mappings */
1116 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1117 data = &ring->data[i];
1118 if (data->tx_data_map == NULL)
1119 continue;
1120 bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map);
1121 }
1122
1123 bus_dma_tag_destroy(ring->tx_data_tag);
1124}
1125
1126#ifdef DEVICE_POLLING
1127static poll_handler_t nfe_poll;
1128
1129
1084static void
1085nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1086{
1087 struct nfe_softc *sc = ifp->if_softc;
1088
1089 NFE_LOCK(sc);
1090 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1091 nfe_poll_locked(ifp, cmd, count);

--- 15 unchanged lines hidden (view full) ---

1107
1108 sc->rxcycles = count;
1109 nfe_rxeof(sc);
1110 nfe_txeof(sc);
1111 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1112 nfe_start_locked(ifp);
1113
1114 if (cmd == POLL_AND_CHECK_STATUS) {
1130static void
1131nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1132{
1133 struct nfe_softc *sc = ifp->if_softc;
1134
1135 NFE_LOCK(sc);
1136 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1137 nfe_poll_locked(ifp, cmd, count);

--- 15 unchanged lines hidden (view full) ---

1153
1154 sc->rxcycles = count;
1155 nfe_rxeof(sc);
1156 nfe_txeof(sc);
1157 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1158 nfe_start_locked(ifp);
1159
1160 if (cmd == POLL_AND_CHECK_STATUS) {
1115 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1116 return;
1117 }
1118 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1161 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1162 return;
1163 }
1164 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1119
1165
1120 if (r & NFE_IRQ_LINK) {
1121 NFE_READ(sc, NFE_PHY_STATUS);
1122 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1123 DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1124 }
1166 if (r & NFE_IRQ_LINK) {
1167 NFE_READ(sc, NFE_PHY_STATUS);
1168 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1169 DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1170 }
1125 }
1126}
1127#endif /* DEVICE_POLLING */
1128
1129
1130static int
1131nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1132{
1171 }
1172}
1173#endif /* DEVICE_POLLING */
1174
1175
1176static int
1177nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1178{
1133 int error = 0;
1134 struct nfe_softc *sc = ifp->if_softc;
1135 struct ifreq *ifr = (struct ifreq *) data;
1136 struct mii_data *mii;
1179 struct nfe_softc *sc = ifp->if_softc;
1180 struct ifreq *ifr = (struct ifreq *) data;
1181 struct mii_data *mii;
1182 int error = 0;
1137
1138 switch (cmd) {
1139 case SIOCSIFMTU:
1140 if (ifr->ifr_mtu < ETHERMIN ||
1141 ((sc->nfe_flags & NFE_USE_JUMBO) &&
1142 ifr->ifr_mtu > ETHERMTU_JUMBO) ||
1143 (!(sc->nfe_flags & NFE_USE_JUMBO) &&
1183
1184 switch (cmd) {
1185 case SIOCSIFMTU:
1186 if (ifr->ifr_mtu < ETHERMIN ||
1187 ((sc->nfe_flags & NFE_USE_JUMBO) &&
1188 ifr->ifr_mtu > ETHERMTU_JUMBO) ||
1189 (!(sc->nfe_flags & NFE_USE_JUMBO) &&
1144 ifr->ifr_mtu > ETHERMTU))
1190 ifr->ifr_mtu > ETHERMTU)) {
1145 error = EINVAL;
1191 error = EINVAL;
1146 else if (ifp->if_mtu != ifr->ifr_mtu) {
1192 } else if (ifp->if_mtu != ifr->ifr_mtu) {
1147 ifp->if_mtu = ifr->ifr_mtu;
1148 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1149 nfe_init(sc);
1150 }
1151 break;
1152 case SIOCSIFFLAGS:
1153 NFE_LOCK(sc);
1154 if (ifp->if_flags & IFF_UP) {

--- 26 unchanged lines hidden (view full) ---

1181 }
1182 break;
1183 case SIOCSIFMEDIA:
1184 case SIOCGIFMEDIA:
1185 mii = device_get_softc(sc->nfe_miibus);
1186 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1187 break;
1188 case SIOCSIFCAP:
1193 ifp->if_mtu = ifr->ifr_mtu;
1194 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1195 nfe_init(sc);
1196 }
1197 break;
1198 case SIOCSIFFLAGS:
1199 NFE_LOCK(sc);
1200 if (ifp->if_flags & IFF_UP) {

--- 26 unchanged lines hidden (view full) ---

1227 }
1228 break;
1229 case SIOCSIFMEDIA:
1230 case SIOCGIFMEDIA:
1231 mii = device_get_softc(sc->nfe_miibus);
1232 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1233 break;
1234 case SIOCSIFCAP:
1189 {
1235 {
1190 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1191#ifdef DEVICE_POLLING
1192 if (mask & IFCAP_POLLING) {
1193 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1194 error = ether_poll_register(nfe_poll, ifp);
1195 if (error)
1196 return(error);
1197 NFE_LOCK(sc);
1198 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1236 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1237#ifdef DEVICE_POLLING
1238 if (mask & IFCAP_POLLING) {
1239 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1240 error = ether_poll_register(nfe_poll, ifp);
1241 if (error)
1242 return(error);
1243 NFE_LOCK(sc);
1244 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1199 ifp->if_capenable |= IFCAP_POLLING;
1245 ifp->if_capenable |= IFCAP_POLLING;
1200 NFE_UNLOCK(sc);
1201 } else {
1202 error = ether_poll_deregister(ifp);
1203 /* Enable interrupt even in error case */
1204 NFE_LOCK(sc);
1205 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1206 ifp->if_capenable &= ~IFCAP_POLLING;
1207 NFE_UNLOCK(sc);
1208 }
1209 }
1246 NFE_UNLOCK(sc);
1247 } else {
1248 error = ether_poll_deregister(ifp);
1249 /* Enable interrupt even in error case */
1250 NFE_LOCK(sc);
1251 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1252 ifp->if_capenable &= ~IFCAP_POLLING;
1253 NFE_UNLOCK(sc);
1254 }
1255 }
1210#endif
1256#endif /* DEVICE_POLLING */
1211 if (mask & IFCAP_HWCSUM) {
1212 ifp->if_capenable ^= IFCAP_HWCSUM;
1213 if (IFCAP_HWCSUM & ifp->if_capenable &&
1214 IFCAP_HWCSUM & ifp->if_capabilities)
1215 ifp->if_hwassist = NFE_CSUM_FEATURES;
1216 else
1217 ifp->if_hwassist = 0;
1218 }
1257 if (mask & IFCAP_HWCSUM) {
1258 ifp->if_capenable ^= IFCAP_HWCSUM;
1259 if (IFCAP_HWCSUM & ifp->if_capenable &&
1260 IFCAP_HWCSUM & ifp->if_capabilities)
1261 ifp->if_hwassist = NFE_CSUM_FEATURES;
1262 else
1263 ifp->if_hwassist = 0;
1264 }
1219 }
1265 }
1220 break;
1221
1222 default:
1223 error = ether_ioctl(ifp, cmd, data);
1224 break;
1225 }
1226
1227 return error;
1228}
1229
1230
1266 break;
1267
1268 default:
1269 error = ether_ioctl(ifp, cmd, data);
1270 break;
1271 }
1272
1273 return error;
1274}
1275
1276
1231static void nfe_intr(void *arg)
1277static void
1278nfe_intr(void *arg)
1232{
1233 struct nfe_softc *sc = arg;
1234 struct ifnet *ifp = sc->nfe_ifp;
1235 u_int32_t r;
1236
1279{
1280 struct nfe_softc *sc = arg;
1281 struct ifnet *ifp = sc->nfe_ifp;
1282 u_int32_t r;
1283
1237 NFE_LOCK(sc);
1284 NFE_LOCK(sc);
1238
1239#ifdef DEVICE_POLLING
1240 if (ifp->if_capenable & IFCAP_POLLING) {
1241 NFE_UNLOCK(sc);
1242 return;
1243 }
1244#endif
1245
1246 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1285
1286#ifdef DEVICE_POLLING
1287 if (ifp->if_capenable & IFCAP_POLLING) {
1288 NFE_UNLOCK(sc);
1289 return;
1290 }
1291#endif
1292
1293 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1247 NFE_UNLOCK(sc);
1294 NFE_UNLOCK(sc);
1248 return; /* not for us */
1249 }
1250 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1251
1252 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
1253
1254 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1255

--- 5 unchanged lines hidden (view full) ---

1261
1262 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1263 /* check Rx ring */
1264 nfe_rxeof(sc);
1265 /* check Tx ring */
1266 nfe_txeof(sc);
1267 }
1268
1295 return; /* not for us */
1296 }
1297 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1298
1299 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
1300
1301 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1302

--- 5 unchanged lines hidden (view full) ---

1308
1309 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1310 /* check Rx ring */
1311 nfe_rxeof(sc);
1312 /* check Tx ring */
1313 nfe_txeof(sc);
1314 }
1315
1269 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1316 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1270
1271 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1272 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1273 nfe_start_locked(ifp);
1274
1275 NFE_UNLOCK(sc);
1276
1277 return;
1278}
1279
1317
1318 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1319 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1320 nfe_start_locked(ifp);
1321
1322 NFE_UNLOCK(sc);
1323
1324 return;
1325}
1326
1327
1280static void
1281nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1282{
1328static void
1329nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1330{
1331
1283 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1284}
1285
1332 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1333}
1334
1335
1286static void
1287nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1288{
1336static void
1337nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1338{
1339
1289 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1290}
1291
1340 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1341}
1342
1343
1292static void
1293nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
1294{
1344static void
1345nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
1346{
1347
1295 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1296}
1297
1348 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1349}
1350
1351
1298static void
1299nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
1300{
1352static void
1353nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
1354{
1355
1301 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1302}
1303
1356 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1357}
1358
1359
1304static void
1305nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1306{
1360static void
1361nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1362{
1363
1307 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1308}
1309
1364 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1365}
1366
1367
1310static void
1311nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1312{
1313
1314 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1315}
1316
1368static void
1369nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1370{
1371
1372 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1373}
1374
1317static void nfe_rxeof(struct nfe_softc *sc)
1375
1376static void
1377nfe_rxeof(struct nfe_softc *sc)
1318{
1319 struct ifnet *ifp = sc->nfe_ifp;
1320 struct nfe_desc32 *desc32=NULL;
1321 struct nfe_desc64 *desc64=NULL;
1322 struct nfe_rx_data *data;
1323 struct nfe_jbuf *jbuf;
1324 struct mbuf *m, *mnew;
1325 bus_addr_t physaddr;

--- 143 unchanged lines hidden (view full) ---

1469 if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) {
1470 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1471 if (flags & NFE_RX_IP_CSUMOK_V2) {
1472 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1473 }
1474 if (flags & NFE_RX_UDP_CSUMOK_V2 ||
1475 flags & NFE_RX_TCP_CSUMOK_V2) {
1476 m->m_pkthdr.csum_flags |=
1378{
1379 struct ifnet *ifp = sc->nfe_ifp;
1380 struct nfe_desc32 *desc32=NULL;
1381 struct nfe_desc64 *desc64=NULL;
1382 struct nfe_rx_data *data;
1383 struct nfe_jbuf *jbuf;
1384 struct mbuf *m, *mnew;
1385 bus_addr_t physaddr;

--- 143 unchanged lines hidden (view full) ---

1529 if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) {
1530 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1531 if (flags & NFE_RX_IP_CSUMOK_V2) {
1532 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1533 }
1534 if (flags & NFE_RX_UDP_CSUMOK_V2 ||
1535 flags & NFE_RX_TCP_CSUMOK_V2) {
1536 m->m_pkthdr.csum_flags |=
1477 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1537 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1478 m->m_pkthdr.csum_data = 0xffff;
1479 }
1480 }
1481#endif
1482
1483#if NVLAN > 1
1484 if (have_tag) {
1485 m->m_pkthdr.ether_vtag = vlan_tag;

--- 28 unchanged lines hidden (view full) ---

1514
1515 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
1516 }
1517
1518 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
1519 }
1520}
1521
1538 m->m_pkthdr.csum_data = 0xffff;
1539 }
1540 }
1541#endif
1542
1543#if NVLAN > 1
1544 if (have_tag) {
1545 m->m_pkthdr.ether_vtag = vlan_tag;

--- 28 unchanged lines hidden (view full) ---

1574
1575 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
1576 }
1577
1578 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
1579 }
1580}
1581
1522static void nfe_txeof(struct nfe_softc *sc)
1582
1583static void
1584nfe_txeof(struct nfe_softc *sc)
1523{
1524 struct ifnet *ifp = sc->nfe_ifp;
1525 struct nfe_desc32 *desc32;
1526 struct nfe_desc64 *desc64;
1527 struct nfe_tx_data *data = NULL;
1528 u_int16_t flags;
1529
1530 NFE_LOCK_ASSERT(sc);

--- 17 unchanged lines hidden (view full) ---

1548 data = &sc->txq.data[sc->txq.next];
1549
1550 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1551 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1552 goto skip;
1553
1554 if ((flags & NFE_TX_ERROR_V1) != 0) {
1555 printf("nfe%d: tx v1 error 0x%4b\n",
1585{
1586 struct ifnet *ifp = sc->nfe_ifp;
1587 struct nfe_desc32 *desc32;
1588 struct nfe_desc64 *desc64;
1589 struct nfe_tx_data *data = NULL;
1590 u_int16_t flags;
1591
1592 NFE_LOCK_ASSERT(sc);

--- 17 unchanged lines hidden (view full) ---

1610 data = &sc->txq.data[sc->txq.next];
1611
1612 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1613 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1614 goto skip;
1615
1616 if ((flags & NFE_TX_ERROR_V1) != 0) {
1617 printf("nfe%d: tx v1 error 0x%4b\n",
1556 sc->nfe_unit, flags, NFE_V1_TXERR);
1618 sc->nfe_unit, flags, NFE_V1_TXERR);
1557
1558 ifp->if_oerrors++;
1559 } else
1560 ifp->if_opackets++;
1561 } else {
1562 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1563 goto skip;
1564
1565 if ((flags & NFE_TX_ERROR_V2) != 0) {
1566 printf("nfe%d: tx v1 error 0x%4b\n",
1619
1620 ifp->if_oerrors++;
1621 } else
1622 ifp->if_opackets++;
1623 } else {
1624 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1625 goto skip;
1626
1627 if ((flags & NFE_TX_ERROR_V2) != 0) {
1628 printf("nfe%d: tx v1 error 0x%4b\n",
1567 sc->nfe_unit, flags, NFE_V2_TXERR);
1629 sc->nfe_unit, flags, NFE_V2_TXERR);
1568
1569 ifp->if_oerrors++;
1570 } else
1571 ifp->if_opackets++;
1572 }
1573
1574 if (data->m == NULL) { /* should not get there */
1630
1631 ifp->if_oerrors++;
1632 } else
1633 ifp->if_opackets++;
1634 }
1635
1636 if (data->m == NULL) { /* should not get there */
1575 printf("nfe%d: last fragment bit w/o associated mbuf!\n",
1637 printf("nfe%d: last fragment bit w/o associated mbuf!\n",
1576 sc->nfe_unit);
1577 goto skip;
1578 }
1579
1580 /* last fragment of the mbuf chain transmitted */
1581 bus_dmamap_sync(sc->txq.tx_data_tag, data->active,
1582 BUS_DMASYNC_POSTWRITE);
1583 bus_dmamap_unload(sc->txq.tx_data_tag, data->active);

--- 7 unchanged lines hidden (view full) ---

1591 }
1592
1593 if (data != NULL) { /* at least one slot freed */
1594 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1595 nfe_start_locked(ifp);
1596 }
1597}
1598
1638 sc->nfe_unit);
1639 goto skip;
1640 }
1641
1642 /* last fragment of the mbuf chain transmitted */
1643 bus_dmamap_sync(sc->txq.tx_data_tag, data->active,
1644 BUS_DMASYNC_POSTWRITE);
1645 bus_dmamap_unload(sc->txq.tx_data_tag, data->active);

--- 7 unchanged lines hidden (view full) ---

1653 }
1654
1655 if (data != NULL) { /* at least one slot freed */
1656 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1657 nfe_start_locked(ifp);
1658 }
1659}
1660
1599static int nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1661
1662static int
1663nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1600{
1601 struct nfe_desc32 *desc32=NULL;
1602 struct nfe_desc64 *desc64=NULL;
1603 struct nfe_tx_data *data=NULL;
1604 bus_dmamap_t map;
1664{
1665 struct nfe_desc32 *desc32=NULL;
1666 struct nfe_desc64 *desc64=NULL;
1667 struct nfe_tx_data *data=NULL;
1668 bus_dmamap_t map;
1605 u_int16_t flags = NFE_TX_VALID;
1606 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1669 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1607 int nsegs;
1608 int error, i;
1670 int error, i, nsegs;
1671 u_int16_t flags = NFE_TX_VALID;
1609
1610 map = sc->txq.data[sc->txq.cur].tx_data_map;
1611
1612 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs,
1613 &nsegs, BUS_DMA_NOWAIT);
1614
1615 if (error != 0) {
1616 printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit,

--- 40 unchanged lines hidden (view full) ---

1657 desc32->length = htole16(segs[i].ds_len - 1);
1658 desc32->flags = htole16(flags);
1659 }
1660
1661 /* csum flags and vtag belong to the first fragment only */
1662 if (nsegs > 1) {
1663 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1664 }
1672
1673 map = sc->txq.data[sc->txq.cur].tx_data_map;
1674
1675 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs,
1676 &nsegs, BUS_DMA_NOWAIT);
1677
1678 if (error != 0) {
1679 printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit,

--- 40 unchanged lines hidden (view full) ---

1720 desc32->length = htole16(segs[i].ds_len - 1);
1721 desc32->flags = htole16(flags);
1722 }
1723
1724 /* csum flags and vtag belong to the first fragment only */
1725 if (nsegs > 1) {
1726 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1727 }
1665
1728
1666 sc->txq.queued++;
1667 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
1668 }
1669
1670 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1671 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1672 flags |= NFE_TX_LASTFRAG_V2;
1673 desc64->flags = htole16(flags);

--- 10 unchanged lines hidden (view full) ---

1684 data->nsegs = nsegs;
1685
1686 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
1687
1688 return 0;
1689}
1690
1691
1729 sc->txq.queued++;
1730 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
1731 }
1732
1733 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1734 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1735 flags |= NFE_TX_LASTFRAG_V2;
1736 desc64->flags = htole16(flags);

--- 10 unchanged lines hidden (view full) ---

1747 data->nsegs = nsegs;
1748
1749 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
1750
1751 return 0;
1752}
1753
1754
1692static void nfe_setmulti(struct nfe_softc *sc)
1755static void
1756nfe_setmulti(struct nfe_softc *sc)
1693{
1694 struct ifnet *ifp = sc->nfe_ifp;
1757{
1758 struct ifnet *ifp = sc->nfe_ifp;
1695 struct ifmultiaddr *ifma;
1696 u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1697 u_int32_t filter = NFE_RXFILTER_MAGIC;
1698 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
1699 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1759 struct ifmultiaddr *ifma;
1700 int i;
1760 int i;
1761 u_int32_t filter = NFE_RXFILTER_MAGIC;
1762 u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1763 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
1764 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1765 };
1701
1702 NFE_LOCK_ASSERT(sc);
1703
1704 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1705 bzero(addr, ETHER_ADDR_LEN);
1706 bzero(mask, ETHER_ADDR_LEN);
1707 goto done;
1708 }

--- 32 unchanged lines hidden (view full) ---

1741 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1742 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1743 mask[5] << 8 | mask[4]);
1744
1745 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1746 NFE_WRITE(sc, NFE_RXFILTER, filter);
1747}
1748
1766
1767 NFE_LOCK_ASSERT(sc);
1768
1769 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1770 bzero(addr, ETHER_ADDR_LEN);
1771 bzero(mask, ETHER_ADDR_LEN);
1772 goto done;
1773 }

--- 32 unchanged lines hidden (view full) ---

1806 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1807 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1808 mask[5] << 8 | mask[4]);
1809
1810 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1811 NFE_WRITE(sc, NFE_RXFILTER, filter);
1812}
1813
1749static void nfe_start(struct ifnet *ifp)
1814
1815static void
1816nfe_start(struct ifnet *ifp)
1750{
1751 struct nfe_softc *sc;
1752
1753 sc = ifp->if_softc;
1754 NFE_LOCK(sc);
1755 nfe_start_locked(ifp);
1756 NFE_UNLOCK(sc);
1757}
1758
1817{
1818 struct nfe_softc *sc;
1819
1820 sc = ifp->if_softc;
1821 NFE_LOCK(sc);
1822 nfe_start_locked(ifp);
1823 NFE_UNLOCK(sc);
1824}
1825
1759static void nfe_start_locked(struct ifnet *ifp)
1826
1827static void
1828nfe_start_locked(struct ifnet *ifp)
1760{
1761 struct nfe_softc *sc = ifp->if_softc;
1829{
1830 struct nfe_softc *sc = ifp->if_softc;
1762 int old = sc->txq.cur;
1763 struct mbuf *m0;
1831 struct mbuf *m0;
1832 int old = sc->txq.cur;
1764
1765 if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1766 return;
1767 }
1768
1769 for (;;) {
1770 IFQ_POLL(&ifp->if_snd, m0);
1771 if (m0 == NULL)

--- 24 unchanged lines hidden (view full) ---

1796 /*
1797 * Set a timeout in case the chip goes out to lunch.
1798 */
1799 ifp->if_timer = 5;
1800
1801 return;
1802}
1803
1833
1834 if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1835 return;
1836 }
1837
1838 for (;;) {
1839 IFQ_POLL(&ifp->if_snd, m0);
1840 if (m0 == NULL)

--- 24 unchanged lines hidden (view full) ---

1865 /*
1866 * Set a timeout in case the chip goes out to lunch.
1867 */
1868 ifp->if_timer = 5;
1869
1870 return;
1871}
1872
1804static void nfe_watchdog(struct ifnet *ifp)
1873
1874static void
1875nfe_watchdog(struct ifnet *ifp)
1805{
1806 struct nfe_softc *sc = ifp->if_softc;
1807
1808 printf("nfe%d: watchdog timeout\n", sc->nfe_unit);
1809
1810 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1811 nfe_init(sc);
1876{
1877 struct nfe_softc *sc = ifp->if_softc;
1878
1879 printf("nfe%d: watchdog timeout\n", sc->nfe_unit);
1880
1881 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1882 nfe_init(sc);
1812
1813 ifp->if_oerrors++;
1814
1815 return;
1816}
1817
1883 ifp->if_oerrors++;
1884
1885 return;
1886}
1887
1818static void nfe_init(void *xsc)
1888
1889static void
1890nfe_init(void *xsc)
1819{
1820 struct nfe_softc *sc = xsc;
1821
1822 NFE_LOCK(sc);
1823 nfe_init_locked(sc);
1824 NFE_UNLOCK(sc);
1825
1826 return;
1827}
1828
1891{
1892 struct nfe_softc *sc = xsc;
1893
1894 NFE_LOCK(sc);
1895 nfe_init_locked(sc);
1896 NFE_UNLOCK(sc);
1897
1898 return;
1899}
1900
1829static void nfe_init_locked(void *xsc)
1901
1902static void
1903nfe_init_locked(void *xsc)
1830{
1831 struct nfe_softc *sc = xsc;
1832 struct ifnet *ifp = sc->nfe_ifp;
1833 struct mii_data *mii;
1834 u_int32_t tmp;
1835
1836 NFE_LOCK_ASSERT(sc);
1837

--- 113 unchanged lines hidden (view full) ---

1951 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1952 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1953
1954 sc->nfe_link = 0;
1955
1956 return;
1957}
1958
1904{
1905 struct nfe_softc *sc = xsc;
1906 struct ifnet *ifp = sc->nfe_ifp;
1907 struct mii_data *mii;
1908 u_int32_t tmp;
1909
1910 NFE_LOCK_ASSERT(sc);
1911

--- 113 unchanged lines hidden (view full) ---

2025 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2026 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2027
2028 sc->nfe_link = 0;
2029
2030 return;
2031}
2032
1959static void nfe_stop(struct ifnet *ifp, int disable)
2033
2034static void
2035nfe_stop(struct ifnet *ifp, int disable)
1960{
1961 struct nfe_softc *sc = ifp->if_softc;
1962 struct mii_data *mii;
1963
1964 NFE_LOCK_ASSERT(sc);
1965
1966 ifp->if_timer = 0;
1967 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);

--- 15 unchanged lines hidden (view full) ---

1983
1984 /* reset Tx and Rx rings */
1985 nfe_reset_tx_ring(sc, &sc->txq);
1986 nfe_reset_rx_ring(sc, &sc->rxq);
1987
1988 return;
1989}
1990
2036{
2037 struct nfe_softc *sc = ifp->if_softc;
2038 struct mii_data *mii;
2039
2040 NFE_LOCK_ASSERT(sc);
2041
2042 ifp->if_timer = 0;
2043 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);

--- 15 unchanged lines hidden (view full) ---

2059
2060 /* reset Tx and Rx rings */
2061 nfe_reset_tx_ring(sc, &sc->txq);
2062 nfe_reset_rx_ring(sc, &sc->rxq);
2063
2064 return;
2065}
2066
1991static int nfe_ifmedia_upd(struct ifnet *ifp)
2067
2068static int
2069nfe_ifmedia_upd(struct ifnet *ifp)
1992{
1993 struct nfe_softc *sc = ifp->if_softc;
1994
1995 NFE_LOCK(sc);
1996 nfe_ifmedia_upd_locked(ifp);
1997 NFE_UNLOCK(sc);
1998 return (0);
1999}
2000
2070{
2071 struct nfe_softc *sc = ifp->if_softc;
2072
2073 NFE_LOCK(sc);
2074 nfe_ifmedia_upd_locked(ifp);
2075 NFE_UNLOCK(sc);
2076 return (0);
2077}
2078
2001static int nfe_ifmedia_upd_locked(struct ifnet *ifp)
2079
2080static int
2081nfe_ifmedia_upd_locked(struct ifnet *ifp)
2002{
2082{
2003 struct nfe_softc *sc = ifp->if_softc;
2004 struct mii_data *mii;
2083 struct nfe_softc *sc = ifp->if_softc;
2084 struct mii_data *mii;
2005
2006 NFE_LOCK_ASSERT(sc);
2007
2008 mii = device_get_softc(sc->nfe_miibus);
2009
2010 if (mii->mii_instance) {
2011 struct mii_softc *miisc;
2012 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2013 miisc = LIST_NEXT(miisc, mii_list)) {
2014 mii_phy_reset(miisc);
2015 }
2016 }
2017 mii_mediachg(mii);
2018
2019 return (0);
2020}
2021
2085
2086 NFE_LOCK_ASSERT(sc);
2087
2088 mii = device_get_softc(sc->nfe_miibus);
2089
2090 if (mii->mii_instance) {
2091 struct mii_softc *miisc;
2092 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2093 miisc = LIST_NEXT(miisc, mii_list)) {
2094 mii_phy_reset(miisc);
2095 }
2096 }
2097 mii_mediachg(mii);
2098
2099 return (0);
2100}
2101
2022static void nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2102
2103static void
2104nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2023{
2105{
2024 struct nfe_softc *sc;
2025 struct mii_data *mii;
2106 struct nfe_softc *sc;
2107 struct mii_data *mii;
2026
2027 sc = ifp->if_softc;
2028
2029 NFE_LOCK(sc);
2030 mii = device_get_softc(sc->nfe_miibus);
2031 mii_pollstat(mii);
2032 NFE_UNLOCK(sc);
2033
2034 ifmr->ifm_active = mii->mii_media_active;
2035 ifmr->ifm_status = mii->mii_media_status;
2036
2037 return;
2038}
2039
2108
2109 sc = ifp->if_softc;
2110
2111 NFE_LOCK(sc);
2112 mii = device_get_softc(sc->nfe_miibus);
2113 mii_pollstat(mii);
2114 NFE_UNLOCK(sc);
2115
2116 ifmr->ifm_active = mii->mii_media_active;
2117 ifmr->ifm_status = mii->mii_media_status;
2118
2119 return;
2120}
2121
2122
2040static void
2041nfe_tick(void *xsc)
2042{
2043 struct nfe_softc *sc;
2044
2045 sc = xsc;
2046
2047 NFE_LOCK(sc);
2048 nfe_tick_locked(sc);
2049 NFE_UNLOCK(sc);
2050}
2051
2052
2123static void
2124nfe_tick(void *xsc)
2125{
2126 struct nfe_softc *sc;
2127
2128 sc = xsc;
2129
2130 NFE_LOCK(sc);
2131 nfe_tick_locked(sc);
2132 NFE_UNLOCK(sc);
2133}
2134
2135
2053void nfe_tick_locked(struct nfe_softc *arg)
2136void
2137nfe_tick_locked(struct nfe_softc *arg)
2054{
2138{
2055 struct nfe_softc *sc;
2056 struct mii_data *mii;
2057 struct ifnet *ifp;
2139 struct nfe_softc *sc;
2140 struct mii_data *mii;
2141 struct ifnet *ifp;
2058
2059 sc = arg;
2060
2142
2143 sc = arg;
2144
2061 NFE_LOCK_ASSERT(sc);
2145 NFE_LOCK_ASSERT(sc);
2062
2063 ifp = sc->nfe_ifp;
2064
2065 mii = device_get_softc(sc->nfe_miibus);
2066 mii_tick(mii);
2067
2068 if (!sc->nfe_link) {
2069 if (mii->mii_media_status & IFM_ACTIVE &&

--- 7 unchanged lines hidden (view full) ---

2077 }
2078 }
2079 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2080
2081 return;
2082}
2083
2084
2146
2147 ifp = sc->nfe_ifp;
2148
2149 mii = device_get_softc(sc->nfe_miibus);
2150 mii_tick(mii);
2151
2152 if (!sc->nfe_link) {
2153 if (mii->mii_media_status & IFM_ACTIVE &&

--- 7 unchanged lines hidden (view full) ---

2161 }
2162 }
2163 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2164
2165 return;
2166}
2167
2168
2085static void nfe_shutdown(device_t dev)
2169static void
2170nfe_shutdown(device_t dev)
2086{
2087 struct nfe_softc *sc;
2088 struct ifnet *ifp;
2089
2090 sc = device_get_softc(dev);
2091
2092 NFE_LOCK(sc);
2093 ifp = sc->nfe_ifp;
2094 nfe_stop(ifp,0);
2095 /* nfe_reset(sc); */
2096 NFE_UNLOCK(sc);
2097
2098 return;
2099}
2100
2101
2171{
2172 struct nfe_softc *sc;
2173 struct ifnet *ifp;
2174
2175 sc = device_get_softc(dev);
2176
2177 NFE_LOCK(sc);
2178 ifp = sc->nfe_ifp;
2179 nfe_stop(ifp,0);
2180 /* nfe_reset(sc); */
2181 NFE_UNLOCK(sc);
2182
2183 return;
2184}
2185
2186
2102static void nfe_get_macaddr(struct nfe_softc *sc, u_char *addr)
2187static void
2188nfe_get_macaddr(struct nfe_softc *sc, u_char *addr)
2103{
2104 uint32_t tmp;
2105
2106 tmp = NFE_READ(sc, NFE_MACADDR_LO);
2107 addr[0] = (tmp >> 8) & 0xff;
2108 addr[1] = (tmp & 0xff);
2109
2110 tmp = NFE_READ(sc, NFE_MACADDR_HI);
2111 addr[2] = (tmp >> 24) & 0xff;
2112 addr[3] = (tmp >> 16) & 0xff;
2113 addr[4] = (tmp >> 8) & 0xff;
2114 addr[5] = (tmp & 0xff);
2115}
2116
2189{
2190 uint32_t tmp;
2191
2192 tmp = NFE_READ(sc, NFE_MACADDR_LO);
2193 addr[0] = (tmp >> 8) & 0xff;
2194 addr[1] = (tmp & 0xff);
2195
2196 tmp = NFE_READ(sc, NFE_MACADDR_HI);
2197 addr[2] = (tmp >> 24) & 0xff;
2198 addr[3] = (tmp >> 16) & 0xff;
2199 addr[4] = (tmp >> 8) & 0xff;
2200 addr[5] = (tmp & 0xff);
2201}
2202
2117static void nfe_set_macaddr(struct nfe_softc *sc, u_char *addr)
2203
2204static void
2205nfe_set_macaddr(struct nfe_softc *sc, u_char *addr)
2118{
2119
2120 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
2121 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2122 addr[1] << 8 | addr[0]);
2123}
2124
2206{
2207
2208 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
2209 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2210 addr[1] << 8 | addr[0]);
2211}
2212
2213
2125/*
2126 * Map a single buffer address.
2127 */
2128
2129static void
2130nfe_dma_map_segs(arg, segs, nseg, error)
2131 void *arg;
2132 bus_dma_segment_t *segs;

--- 12 unchanged lines hidden ---
2214/*
2215 * Map a single buffer address.
2216 */
2217
2218static void
2219nfe_dma_map_segs(arg, segs, nseg, error)
2220 void *arg;
2221 bus_dma_segment_t *segs;

--- 12 unchanged lines hidden ---