Deleted Added
full compact
if_lem.c (241856) if_lem.c (241885)
1/******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
1/******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/e1000/if_lem.c 241856 2012-10-22 03:41:14Z eadler $*/
33/*$FreeBSD: head/sys/dev/e1000/if_lem.c 241885 2012-10-22 13:06:09Z eadler $*/
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/endian.h>
45#include <sys/kernel.h>
46#include <sys/kthread.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/module.h>
50#include <sys/rman.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55#include <sys/eventhandler.h>
56#include <machine/bus.h>
57#include <machine/resource.h>
58
59#include <net/bpf.h>
60#include <net/ethernet.h>
61#include <net/if.h>
62#include <net/if_arp.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <netinet/in_systm.h>
70#include <netinet/in.h>
71#include <netinet/if_ether.h>
72#include <netinet/ip.h>
73#include <netinet/ip6.h>
74#include <netinet/tcp.h>
75#include <netinet/udp.h>
76
77#include <machine/in_cksum.h>
78#include <dev/led/led.h>
79#include <dev/pci/pcivar.h>
80#include <dev/pci/pcireg.h>
81
82#include "e1000_api.h"
83#include "if_lem.h"
84
85/*********************************************************************
86 * Legacy Em Driver version:
87 *********************************************************************/
88char lem_driver_version[] = "1.0.5";
89
90/*********************************************************************
91 * PCI Device ID Table
92 *
93 * Used by probe to select devices to load on
94 * Last field stores an index into e1000_strings
95 * Last entry must be all 0s
96 *
97 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
98 *********************************************************************/
99
100static em_vendor_info_t lem_vendor_info_array[] =
101{
102 /* Intel(R) PRO/1000 Network Connection */
103 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
105 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
108
109 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
116
117 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
118
119 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
121
122 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
126
127 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
132
133 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
142 PCI_ANY_ID, PCI_ANY_ID, 0},
143
144 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
147 /* required last entry */
148 { 0, 0, 0, 0, 0}
149};
150
151/*********************************************************************
152 * Table of branding strings for all supported NICs.
153 *********************************************************************/
154
155static char *lem_strings[] = {
156 "Intel(R) PRO/1000 Legacy Network Connection"
157};
158
159/*********************************************************************
160 * Function prototypes
161 *********************************************************************/
162static int lem_probe(device_t);
163static int lem_attach(device_t);
164static int lem_detach(device_t);
165static int lem_shutdown(device_t);
166static int lem_suspend(device_t);
167static int lem_resume(device_t);
168static void lem_start(struct ifnet *);
169static void lem_start_locked(struct ifnet *ifp);
170static int lem_ioctl(struct ifnet *, u_long, caddr_t);
171static void lem_init(void *);
172static void lem_init_locked(struct adapter *);
173static void lem_stop(void *);
174static void lem_media_status(struct ifnet *, struct ifmediareq *);
175static int lem_media_change(struct ifnet *);
176static void lem_identify_hardware(struct adapter *);
177static int lem_allocate_pci_resources(struct adapter *);
178static int lem_allocate_irq(struct adapter *adapter);
179static void lem_free_pci_resources(struct adapter *);
180static void lem_local_timer(void *);
181static int lem_hardware_init(struct adapter *);
182static int lem_setup_interface(device_t, struct adapter *);
183static void lem_setup_transmit_structures(struct adapter *);
184static void lem_initialize_transmit_unit(struct adapter *);
185static int lem_setup_receive_structures(struct adapter *);
186static void lem_initialize_receive_unit(struct adapter *);
187static void lem_enable_intr(struct adapter *);
188static void lem_disable_intr(struct adapter *);
189static void lem_free_transmit_structures(struct adapter *);
190static void lem_free_receive_structures(struct adapter *);
191static void lem_update_stats_counters(struct adapter *);
192static void lem_add_hw_stats(struct adapter *adapter);
193static void lem_txeof(struct adapter *);
194static void lem_tx_purge(struct adapter *);
195static int lem_allocate_receive_structures(struct adapter *);
196static int lem_allocate_transmit_structures(struct adapter *);
197static bool lem_rxeof(struct adapter *, int, int *);
198#ifndef __NO_STRICT_ALIGNMENT
199static int lem_fixup_rx(struct adapter *);
200#endif
201static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
202 struct mbuf *);
203static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
204 u32 *, u32 *);
205static void lem_set_promisc(struct adapter *);
206static void lem_disable_promisc(struct adapter *);
207static void lem_set_multi(struct adapter *);
208static void lem_update_link_status(struct adapter *);
209static int lem_get_buf(struct adapter *, int);
210static void lem_register_vlan(void *, struct ifnet *, u16);
211static void lem_unregister_vlan(void *, struct ifnet *, u16);
212static void lem_setup_vlan_hw_support(struct adapter *);
213static int lem_xmit(struct adapter *, struct mbuf **);
214static void lem_smartspeed(struct adapter *);
215static int lem_82547_fifo_workaround(struct adapter *, int);
216static void lem_82547_update_fifo_head(struct adapter *, int);
217static int lem_82547_tx_fifo_reset(struct adapter *);
218static void lem_82547_move_tail(void *);
219static int lem_dma_malloc(struct adapter *, bus_size_t,
220 struct em_dma_alloc *, int);
221static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
222static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
223static void lem_print_nvm_info(struct adapter *);
224static int lem_is_valid_ether_addr(u8 *);
225static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
226 PDESC_ARRAY desc_array);
227static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
228static void lem_add_int_delay_sysctl(struct adapter *, const char *,
229 const char *, struct em_int_delay_info *, int, int);
230static void lem_set_flow_cntrl(struct adapter *, const char *,
231 const char *, int *, int);
232/* Management and WOL Support */
233static void lem_init_manageability(struct adapter *);
234static void lem_release_manageability(struct adapter *);
235static void lem_get_hw_control(struct adapter *);
236static void lem_release_hw_control(struct adapter *);
237static void lem_get_wakeup(device_t);
238static void lem_enable_wakeup(device_t);
239static int lem_enable_phy_wakeup(struct adapter *);
240static void lem_led_func(void *, int);
241
242static void lem_intr(void *);
243static int lem_irq_fast(void *);
244static void lem_handle_rxtx(void *context, int pending);
245static void lem_handle_link(void *context, int pending);
246static void lem_add_rx_process_limit(struct adapter *, const char *,
247 const char *, int *, int);
248
249#ifdef DEVICE_POLLING
250static poll_handler_t lem_poll;
251#endif /* POLLING */
252
253/*********************************************************************
254 * FreeBSD Device Interface Entry Points
255 *********************************************************************/
256
257static device_method_t lem_methods[] = {
258 /* Device interface */
259 DEVMETHOD(device_probe, lem_probe),
260 DEVMETHOD(device_attach, lem_attach),
261 DEVMETHOD(device_detach, lem_detach),
262 DEVMETHOD(device_shutdown, lem_shutdown),
263 DEVMETHOD(device_suspend, lem_suspend),
264 DEVMETHOD(device_resume, lem_resume),
265 {0, 0}
266};
267
268static driver_t lem_driver = {
269 "em", lem_methods, sizeof(struct adapter),
270};
271
272extern devclass_t em_devclass;
273DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
274MODULE_DEPEND(lem, pci, 1, 1, 1);
275MODULE_DEPEND(lem, ether, 1, 1, 1);
276
277/*********************************************************************
278 * Tunable default values.
279 *********************************************************************/
280
281#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
282#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
283
284static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
285static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
286static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
287static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
288static int lem_rxd = EM_DEFAULT_RXD;
289static int lem_txd = EM_DEFAULT_TXD;
290static int lem_smart_pwr_down = FALSE;
291
292/* Controls whether promiscuous also shows bad packets */
293static int lem_debug_sbp = FALSE;
294
295TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
296TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
297TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
298TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
299TUNABLE_INT("hw.em.rxd", &lem_rxd);
300TUNABLE_INT("hw.em.txd", &lem_txd);
301TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
302TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
303
304/* Interrupt style - default to fast */
305static int lem_use_legacy_irq = 0;
306TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
307
308/* How many packets rxeof tries to clean at a time */
309static int lem_rx_process_limit = 100;
310TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
311
312/* Flow control setting - default to FULL */
313static int lem_fc_setting = e1000_fc_full;
314TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
315
316/* Global used in WOL setup with multiport cards */
317static int global_quad_port_a = 0;
318
319#ifdef DEV_NETMAP /* see ixgbe.c for details */
320#include <dev/netmap/if_lem_netmap.h>
321#endif /* DEV_NETMAP */
322
323/*********************************************************************
324 * Device identification routine
325 *
326 * em_probe determines if the driver should be loaded on
327 * adapter based on PCI vendor/device id of the adapter.
328 *
329 * return BUS_PROBE_DEFAULT on success, positive on failure
330 *********************************************************************/
331
332static int
333lem_probe(device_t dev)
334{
335 char adapter_name[60];
336 u16 pci_vendor_id = 0;
337 u16 pci_device_id = 0;
338 u16 pci_subvendor_id = 0;
339 u16 pci_subdevice_id = 0;
340 em_vendor_info_t *ent;
341
342 INIT_DEBUGOUT("em_probe: begin");
343
344 pci_vendor_id = pci_get_vendor(dev);
345 if (pci_vendor_id != EM_VENDOR_ID)
346 return (ENXIO);
347
348 pci_device_id = pci_get_device(dev);
349 pci_subvendor_id = pci_get_subvendor(dev);
350 pci_subdevice_id = pci_get_subdevice(dev);
351
352 ent = lem_vendor_info_array;
353 while (ent->vendor_id != 0) {
354 if ((pci_vendor_id == ent->vendor_id) &&
355 (pci_device_id == ent->device_id) &&
356
357 ((pci_subvendor_id == ent->subvendor_id) ||
358 (ent->subvendor_id == PCI_ANY_ID)) &&
359
360 ((pci_subdevice_id == ent->subdevice_id) ||
361 (ent->subdevice_id == PCI_ANY_ID))) {
362 sprintf(adapter_name, "%s %s",
363 lem_strings[ent->index],
364 lem_driver_version);
365 device_set_desc_copy(dev, adapter_name);
366 return (BUS_PROBE_DEFAULT);
367 }
368 ent++;
369 }
370
371 return (ENXIO);
372}
373
374/*********************************************************************
375 * Device initialization routine
376 *
377 * The attach entry point is called when the driver is being loaded.
378 * This routine identifies the type of hardware, allocates all resources
379 * and initializes the hardware.
380 *
381 * return 0 on success, positive on failure
382 *********************************************************************/
383
384static int
385lem_attach(device_t dev)
386{
387 struct adapter *adapter;
388 int tsize, rsize;
389 int error = 0;
390
391 INIT_DEBUGOUT("lem_attach: begin");
392
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/endian.h>
45#include <sys/kernel.h>
46#include <sys/kthread.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/module.h>
50#include <sys/rman.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55#include <sys/eventhandler.h>
56#include <machine/bus.h>
57#include <machine/resource.h>
58
59#include <net/bpf.h>
60#include <net/ethernet.h>
61#include <net/if.h>
62#include <net/if_arp.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <netinet/in_systm.h>
70#include <netinet/in.h>
71#include <netinet/if_ether.h>
72#include <netinet/ip.h>
73#include <netinet/ip6.h>
74#include <netinet/tcp.h>
75#include <netinet/udp.h>
76
77#include <machine/in_cksum.h>
78#include <dev/led/led.h>
79#include <dev/pci/pcivar.h>
80#include <dev/pci/pcireg.h>
81
82#include "e1000_api.h"
83#include "if_lem.h"
84
85/*********************************************************************
86 * Legacy Em Driver version:
87 *********************************************************************/
88char lem_driver_version[] = "1.0.5";
89
90/*********************************************************************
91 * PCI Device ID Table
92 *
93 * Used by probe to select devices to load on
94 * Last field stores an index into e1000_strings
95 * Last entry must be all 0s
96 *
97 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
98 *********************************************************************/
99
100static em_vendor_info_t lem_vendor_info_array[] =
101{
102 /* Intel(R) PRO/1000 Network Connection */
103 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
105 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
108
109 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
116
117 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
118
119 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
121
122 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
126
127 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
132
133 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
142 PCI_ANY_ID, PCI_ANY_ID, 0},
143
144 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
147 /* required last entry */
148 { 0, 0, 0, 0, 0}
149};
150
151/*********************************************************************
152 * Table of branding strings for all supported NICs.
153 *********************************************************************/
154
155static char *lem_strings[] = {
156 "Intel(R) PRO/1000 Legacy Network Connection"
157};
158
159/*********************************************************************
160 * Function prototypes
161 *********************************************************************/
162static int lem_probe(device_t);
163static int lem_attach(device_t);
164static int lem_detach(device_t);
165static int lem_shutdown(device_t);
166static int lem_suspend(device_t);
167static int lem_resume(device_t);
168static void lem_start(struct ifnet *);
169static void lem_start_locked(struct ifnet *ifp);
170static int lem_ioctl(struct ifnet *, u_long, caddr_t);
171static void lem_init(void *);
172static void lem_init_locked(struct adapter *);
173static void lem_stop(void *);
174static void lem_media_status(struct ifnet *, struct ifmediareq *);
175static int lem_media_change(struct ifnet *);
176static void lem_identify_hardware(struct adapter *);
177static int lem_allocate_pci_resources(struct adapter *);
178static int lem_allocate_irq(struct adapter *adapter);
179static void lem_free_pci_resources(struct adapter *);
180static void lem_local_timer(void *);
181static int lem_hardware_init(struct adapter *);
182static int lem_setup_interface(device_t, struct adapter *);
183static void lem_setup_transmit_structures(struct adapter *);
184static void lem_initialize_transmit_unit(struct adapter *);
185static int lem_setup_receive_structures(struct adapter *);
186static void lem_initialize_receive_unit(struct adapter *);
187static void lem_enable_intr(struct adapter *);
188static void lem_disable_intr(struct adapter *);
189static void lem_free_transmit_structures(struct adapter *);
190static void lem_free_receive_structures(struct adapter *);
191static void lem_update_stats_counters(struct adapter *);
192static void lem_add_hw_stats(struct adapter *adapter);
193static void lem_txeof(struct adapter *);
194static void lem_tx_purge(struct adapter *);
195static int lem_allocate_receive_structures(struct adapter *);
196static int lem_allocate_transmit_structures(struct adapter *);
197static bool lem_rxeof(struct adapter *, int, int *);
198#ifndef __NO_STRICT_ALIGNMENT
199static int lem_fixup_rx(struct adapter *);
200#endif
201static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
202 struct mbuf *);
203static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
204 u32 *, u32 *);
205static void lem_set_promisc(struct adapter *);
206static void lem_disable_promisc(struct adapter *);
207static void lem_set_multi(struct adapter *);
208static void lem_update_link_status(struct adapter *);
209static int lem_get_buf(struct adapter *, int);
210static void lem_register_vlan(void *, struct ifnet *, u16);
211static void lem_unregister_vlan(void *, struct ifnet *, u16);
212static void lem_setup_vlan_hw_support(struct adapter *);
213static int lem_xmit(struct adapter *, struct mbuf **);
214static void lem_smartspeed(struct adapter *);
215static int lem_82547_fifo_workaround(struct adapter *, int);
216static void lem_82547_update_fifo_head(struct adapter *, int);
217static int lem_82547_tx_fifo_reset(struct adapter *);
218static void lem_82547_move_tail(void *);
219static int lem_dma_malloc(struct adapter *, bus_size_t,
220 struct em_dma_alloc *, int);
221static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
222static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
223static void lem_print_nvm_info(struct adapter *);
224static int lem_is_valid_ether_addr(u8 *);
225static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
226 PDESC_ARRAY desc_array);
227static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
228static void lem_add_int_delay_sysctl(struct adapter *, const char *,
229 const char *, struct em_int_delay_info *, int, int);
230static void lem_set_flow_cntrl(struct adapter *, const char *,
231 const char *, int *, int);
232/* Management and WOL Support */
233static void lem_init_manageability(struct adapter *);
234static void lem_release_manageability(struct adapter *);
235static void lem_get_hw_control(struct adapter *);
236static void lem_release_hw_control(struct adapter *);
237static void lem_get_wakeup(device_t);
238static void lem_enable_wakeup(device_t);
239static int lem_enable_phy_wakeup(struct adapter *);
240static void lem_led_func(void *, int);
241
242static void lem_intr(void *);
243static int lem_irq_fast(void *);
244static void lem_handle_rxtx(void *context, int pending);
245static void lem_handle_link(void *context, int pending);
246static void lem_add_rx_process_limit(struct adapter *, const char *,
247 const char *, int *, int);
248
249#ifdef DEVICE_POLLING
250static poll_handler_t lem_poll;
251#endif /* POLLING */
252
253/*********************************************************************
254 * FreeBSD Device Interface Entry Points
255 *********************************************************************/
256
257static device_method_t lem_methods[] = {
258 /* Device interface */
259 DEVMETHOD(device_probe, lem_probe),
260 DEVMETHOD(device_attach, lem_attach),
261 DEVMETHOD(device_detach, lem_detach),
262 DEVMETHOD(device_shutdown, lem_shutdown),
263 DEVMETHOD(device_suspend, lem_suspend),
264 DEVMETHOD(device_resume, lem_resume),
265 {0, 0}
266};
267
268static driver_t lem_driver = {
269 "em", lem_methods, sizeof(struct adapter),
270};
271
272extern devclass_t em_devclass;
273DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
274MODULE_DEPEND(lem, pci, 1, 1, 1);
275MODULE_DEPEND(lem, ether, 1, 1, 1);
276
277/*********************************************************************
278 * Tunable default values.
279 *********************************************************************/
280
281#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
282#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
283
284static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
285static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
286static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
287static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
288static int lem_rxd = EM_DEFAULT_RXD;
289static int lem_txd = EM_DEFAULT_TXD;
290static int lem_smart_pwr_down = FALSE;
291
292/* Controls whether promiscuous also shows bad packets */
293static int lem_debug_sbp = FALSE;
294
295TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
296TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
297TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
298TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
299TUNABLE_INT("hw.em.rxd", &lem_rxd);
300TUNABLE_INT("hw.em.txd", &lem_txd);
301TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
302TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
303
304/* Interrupt style - default to fast */
305static int lem_use_legacy_irq = 0;
306TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
307
308/* How many packets rxeof tries to clean at a time */
309static int lem_rx_process_limit = 100;
310TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
311
312/* Flow control setting - default to FULL */
313static int lem_fc_setting = e1000_fc_full;
314TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
315
316/* Global used in WOL setup with multiport cards */
317static int global_quad_port_a = 0;
318
319#ifdef DEV_NETMAP /* see ixgbe.c for details */
320#include <dev/netmap/if_lem_netmap.h>
321#endif /* DEV_NETMAP */
322
323/*********************************************************************
324 * Device identification routine
325 *
326 * em_probe determines if the driver should be loaded on
327 * adapter based on PCI vendor/device id of the adapter.
328 *
329 * return BUS_PROBE_DEFAULT on success, positive on failure
330 *********************************************************************/
331
332static int
333lem_probe(device_t dev)
334{
335 char adapter_name[60];
336 u16 pci_vendor_id = 0;
337 u16 pci_device_id = 0;
338 u16 pci_subvendor_id = 0;
339 u16 pci_subdevice_id = 0;
340 em_vendor_info_t *ent;
341
342 INIT_DEBUGOUT("em_probe: begin");
343
344 pci_vendor_id = pci_get_vendor(dev);
345 if (pci_vendor_id != EM_VENDOR_ID)
346 return (ENXIO);
347
348 pci_device_id = pci_get_device(dev);
349 pci_subvendor_id = pci_get_subvendor(dev);
350 pci_subdevice_id = pci_get_subdevice(dev);
351
352 ent = lem_vendor_info_array;
353 while (ent->vendor_id != 0) {
354 if ((pci_vendor_id == ent->vendor_id) &&
355 (pci_device_id == ent->device_id) &&
356
357 ((pci_subvendor_id == ent->subvendor_id) ||
358 (ent->subvendor_id == PCI_ANY_ID)) &&
359
360 ((pci_subdevice_id == ent->subdevice_id) ||
361 (ent->subdevice_id == PCI_ANY_ID))) {
362 sprintf(adapter_name, "%s %s",
363 lem_strings[ent->index],
364 lem_driver_version);
365 device_set_desc_copy(dev, adapter_name);
366 return (BUS_PROBE_DEFAULT);
367 }
368 ent++;
369 }
370
371 return (ENXIO);
372}
373
374/*********************************************************************
375 * Device initialization routine
376 *
377 * The attach entry point is called when the driver is being loaded.
378 * This routine identifies the type of hardware, allocates all resources
379 * and initializes the hardware.
380 *
381 * return 0 on success, positive on failure
382 *********************************************************************/
383
384static int
385lem_attach(device_t dev)
386{
387 struct adapter *adapter;
388 int tsize, rsize;
389 int error = 0;
390
391 INIT_DEBUGOUT("lem_attach: begin");
392
393 if (resource_disabled("lem", device_get_unit(dev))) {
394 device_printf(dev, "Disabled by device hint\n");
395 return (ENXIO);
396 }
397
393 adapter = device_get_softc(dev);
394 adapter->dev = adapter->osdep.dev = dev;
395 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
396 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
397 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
398
399 /* SYSCTL stuff */
400 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
401 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
402 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
403 lem_sysctl_nvm_info, "I", "NVM Information");
404
405 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
406 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
407
408 /* Determine hardware and mac info */
409 lem_identify_hardware(adapter);
410
411 /* Setup PCI resources */
412 if (lem_allocate_pci_resources(adapter)) {
413 device_printf(dev, "Allocation of PCI resources failed\n");
414 error = ENXIO;
415 goto err_pci;
416 }
417
418 /* Do Shared Code initialization */
419 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
420 device_printf(dev, "Setup of Shared code failed\n");
421 error = ENXIO;
422 goto err_pci;
423 }
424
425 e1000_get_bus_info(&adapter->hw);
426
427 /* Set up some sysctls for the tunable interrupt delays */
428 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
429 "receive interrupt delay in usecs", &adapter->rx_int_delay,
430 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
431 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
432 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
433 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
434 if (adapter->hw.mac.type >= e1000_82540) {
435 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
436 "receive interrupt delay limit in usecs",
437 &adapter->rx_abs_int_delay,
438 E1000_REGISTER(&adapter->hw, E1000_RADV),
439 lem_rx_abs_int_delay_dflt);
440 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
441 "transmit interrupt delay limit in usecs",
442 &adapter->tx_abs_int_delay,
443 E1000_REGISTER(&adapter->hw, E1000_TADV),
444 lem_tx_abs_int_delay_dflt);
445 }
446
447 /* Sysctls for limiting the amount of work done in the taskqueue */
448 lem_add_rx_process_limit(adapter, "rx_processing_limit",
449 "max number of rx packets to process", &adapter->rx_process_limit,
450 lem_rx_process_limit);
451
452 /* Sysctl for setting the interface flow control */
453 lem_set_flow_cntrl(adapter, "flow_control",
454 "flow control setting",
455 &adapter->fc_setting, lem_fc_setting);
456
457 /*
458 * Validate number of transmit and receive descriptors. It
459 * must not exceed hardware maximum, and must be multiple
460 * of E1000_DBA_ALIGN.
461 */
462 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
463 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
464 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
465 (lem_txd < EM_MIN_TXD)) {
466 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
467 EM_DEFAULT_TXD, lem_txd);
468 adapter->num_tx_desc = EM_DEFAULT_TXD;
469 } else
470 adapter->num_tx_desc = lem_txd;
471 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
472 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
473 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
474 (lem_rxd < EM_MIN_RXD)) {
475 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
476 EM_DEFAULT_RXD, lem_rxd);
477 adapter->num_rx_desc = EM_DEFAULT_RXD;
478 } else
479 adapter->num_rx_desc = lem_rxd;
480
481 adapter->hw.mac.autoneg = DO_AUTO_NEG;
482 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
483 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
484 adapter->rx_buffer_len = 2048;
485
486 e1000_init_script_state_82541(&adapter->hw, TRUE);
487 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
488
489 /* Copper options */
490 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
491 adapter->hw.phy.mdix = AUTO_ALL_MODES;
492 adapter->hw.phy.disable_polarity_correction = FALSE;
493 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
494 }
495
496 /*
497 * Set the frame limits assuming
498 * standard ethernet sized frames.
499 */
500 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
501 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
502
503 /*
504 * This controls when hardware reports transmit completion
505 * status.
506 */
507 adapter->hw.mac.report_tx_early = 1;
508
509 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
510 EM_DBA_ALIGN);
511
512 /* Allocate Transmit Descriptor ring */
513 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
514 device_printf(dev, "Unable to allocate tx_desc memory\n");
515 error = ENOMEM;
516 goto err_tx_desc;
517 }
518 adapter->tx_desc_base =
519 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
520
521 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
522 EM_DBA_ALIGN);
523
524 /* Allocate Receive Descriptor ring */
525 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
526 device_printf(dev, "Unable to allocate rx_desc memory\n");
527 error = ENOMEM;
528 goto err_rx_desc;
529 }
530 adapter->rx_desc_base =
531 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
532
533 /* Allocate multicast array memory. */
534 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
535 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
536 if (adapter->mta == NULL) {
537 device_printf(dev, "Can not allocate multicast setup array\n");
538 error = ENOMEM;
539 goto err_hw_init;
540 }
541
542 /*
543 ** Start from a known state, this is
544 ** important in reading the nvm and
545 ** mac from that.
546 */
547 e1000_reset_hw(&adapter->hw);
548
549 /* Make sure we have a good EEPROM before we read from it */
550 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
551 /*
552 ** Some PCI-E parts fail the first check due to
553 ** the link being in sleep state, call it again,
554 ** if it fails a second time its a real issue.
555 */
556 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
557 device_printf(dev,
558 "The EEPROM Checksum Is Not Valid\n");
559 error = EIO;
560 goto err_hw_init;
561 }
562 }
563
564 /* Copy the permanent MAC address out of the EEPROM */
565 if (e1000_read_mac_addr(&adapter->hw) < 0) {
566 device_printf(dev, "EEPROM read error while reading MAC"
567 " address\n");
568 error = EIO;
569 goto err_hw_init;
570 }
571
572 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
573 device_printf(dev, "Invalid MAC address\n");
574 error = EIO;
575 goto err_hw_init;
576 }
577
578 /* Initialize the hardware */
579 if (lem_hardware_init(adapter)) {
580 device_printf(dev, "Unable to initialize the hardware\n");
581 error = EIO;
582 goto err_hw_init;
583 }
584
585 /* Allocate transmit descriptors and buffers */
586 if (lem_allocate_transmit_structures(adapter)) {
587 device_printf(dev, "Could not setup transmit structures\n");
588 error = ENOMEM;
589 goto err_tx_struct;
590 }
591
592 /* Allocate receive descriptors and buffers */
593 if (lem_allocate_receive_structures(adapter)) {
594 device_printf(dev, "Could not setup receive structures\n");
595 error = ENOMEM;
596 goto err_rx_struct;
597 }
598
599 /*
600 ** Do interrupt configuration
601 */
602 error = lem_allocate_irq(adapter);
603 if (error)
604 goto err_rx_struct;
605
606 /*
607 * Get Wake-on-Lan and Management info for later use
608 */
609 lem_get_wakeup(dev);
610
611 /* Setup OS specific network interface */
612 if (lem_setup_interface(dev, adapter) != 0)
613 goto err_rx_struct;
614
615 /* Initialize statistics */
616 lem_update_stats_counters(adapter);
617
618 adapter->hw.mac.get_link_status = 1;
619 lem_update_link_status(adapter);
620
621 /* Indicate SOL/IDER usage */
622 if (e1000_check_reset_block(&adapter->hw))
623 device_printf(dev,
624 "PHY reset is blocked due to SOL/IDER session.\n");
625
626 /* Do we need workaround for 82544 PCI-X adapter? */
627 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
628 adapter->hw.mac.type == e1000_82544)
629 adapter->pcix_82544 = TRUE;
630 else
631 adapter->pcix_82544 = FALSE;
632
633 /* Register for VLAN events */
634 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
635 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
636 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
637 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
638
639 lem_add_hw_stats(adapter);
640
641 /* Non-AMT based hardware can now take control from firmware */
642 if (adapter->has_manage && !adapter->has_amt)
643 lem_get_hw_control(adapter);
644
645 /* Tell the stack that the interface is not active */
646 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
647
648 adapter->led_dev = led_create(lem_led_func, adapter,
649 device_get_nameunit(dev));
650
651#ifdef DEV_NETMAP
652 lem_netmap_attach(adapter);
653#endif /* DEV_NETMAP */
654 INIT_DEBUGOUT("lem_attach: end");
655
656 return (0);
657
658err_rx_struct:
659 lem_free_transmit_structures(adapter);
660err_tx_struct:
661err_hw_init:
662 lem_release_hw_control(adapter);
663 lem_dma_free(adapter, &adapter->rxdma);
664err_rx_desc:
665 lem_dma_free(adapter, &adapter->txdma);
666err_tx_desc:
667err_pci:
668 if (adapter->ifp != NULL)
669 if_free(adapter->ifp);
670 lem_free_pci_resources(adapter);
671 free(adapter->mta, M_DEVBUF);
672 EM_TX_LOCK_DESTROY(adapter);
673 EM_RX_LOCK_DESTROY(adapter);
674 EM_CORE_LOCK_DESTROY(adapter);
675
676 return (error);
677}
678
679/*********************************************************************
680 * Device removal routine
681 *
682 * The detach entry point is called when the driver is being removed.
683 * This routine stops the adapter and deallocates all the resources
684 * that were allocated for driver operation.
685 *
686 * return 0 on success, positive on failure
687 *********************************************************************/
688
689static int
690lem_detach(device_t dev)
691{
692 struct adapter *adapter = device_get_softc(dev);
693 struct ifnet *ifp = adapter->ifp;
694
695 INIT_DEBUGOUT("em_detach: begin");
696
697 /* Make sure VLANS are not using driver */
698 if (adapter->ifp->if_vlantrunk != NULL) {
699 device_printf(dev,"Vlan in use, detach first\n");
700 return (EBUSY);
701 }
702
703#ifdef DEVICE_POLLING
704 if (ifp->if_capenable & IFCAP_POLLING)
705 ether_poll_deregister(ifp);
706#endif
707
708 if (adapter->led_dev != NULL)
709 led_destroy(adapter->led_dev);
710
711 EM_CORE_LOCK(adapter);
712 EM_TX_LOCK(adapter);
713 adapter->in_detach = 1;
714 lem_stop(adapter);
715 e1000_phy_hw_reset(&adapter->hw);
716
717 lem_release_manageability(adapter);
718
719 EM_TX_UNLOCK(adapter);
720 EM_CORE_UNLOCK(adapter);
721
722 /* Unregister VLAN events */
723 if (adapter->vlan_attach != NULL)
724 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
725 if (adapter->vlan_detach != NULL)
726 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
727
728 ether_ifdetach(adapter->ifp);
729 callout_drain(&adapter->timer);
730 callout_drain(&adapter->tx_fifo_timer);
731
732#ifdef DEV_NETMAP
733 netmap_detach(ifp);
734#endif /* DEV_NETMAP */
735 lem_free_pci_resources(adapter);
736 bus_generic_detach(dev);
737 if_free(ifp);
738
739 lem_free_transmit_structures(adapter);
740 lem_free_receive_structures(adapter);
741
742 /* Free Transmit Descriptor ring */
743 if (adapter->tx_desc_base) {
744 lem_dma_free(adapter, &adapter->txdma);
745 adapter->tx_desc_base = NULL;
746 }
747
748 /* Free Receive Descriptor ring */
749 if (adapter->rx_desc_base) {
750 lem_dma_free(adapter, &adapter->rxdma);
751 adapter->rx_desc_base = NULL;
752 }
753
754 lem_release_hw_control(adapter);
755 free(adapter->mta, M_DEVBUF);
756 EM_TX_LOCK_DESTROY(adapter);
757 EM_RX_LOCK_DESTROY(adapter);
758 EM_CORE_LOCK_DESTROY(adapter);
759
760 return (0);
761}
762
763/*********************************************************************
764 *
765 * Shutdown entry point
766 *
767 **********************************************************************/
768
769static int
770lem_shutdown(device_t dev)
771{
772 return lem_suspend(dev);
773}
774
775/*
776 * Suspend/resume device methods.
777 */
778static int
779lem_suspend(device_t dev)
780{
781 struct adapter *adapter = device_get_softc(dev);
782
783 EM_CORE_LOCK(adapter);
784
785 lem_release_manageability(adapter);
786 lem_release_hw_control(adapter);
787 lem_enable_wakeup(dev);
788
789 EM_CORE_UNLOCK(adapter);
790
791 return bus_generic_suspend(dev);
792}
793
794static int
795lem_resume(device_t dev)
796{
797 struct adapter *adapter = device_get_softc(dev);
798 struct ifnet *ifp = adapter->ifp;
799
800 EM_CORE_LOCK(adapter);
801 lem_init_locked(adapter);
802 lem_init_manageability(adapter);
803 EM_CORE_UNLOCK(adapter);
804 lem_start(ifp);
805
806 return bus_generic_resume(dev);
807}
808
809
810static void
811lem_start_locked(struct ifnet *ifp)
812{
813 struct adapter *adapter = ifp->if_softc;
814 struct mbuf *m_head;
815
816 EM_TX_LOCK_ASSERT(adapter);
817
818 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
819 IFF_DRV_RUNNING)
820 return;
821 if (!adapter->link_active)
822 return;
823
824 /*
825 * Force a cleanup if number of TX descriptors
826 * available hits the threshold
827 */
828 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
829 lem_txeof(adapter);
830 /* Now do we at least have a minimal? */
831 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
832 adapter->no_tx_desc_avail1++;
833 return;
834 }
835 }
836
837 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
838
839 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
840 if (m_head == NULL)
841 break;
842 /*
843 * Encapsulation can modify our pointer, and or make it
844 * NULL on failure. In that event, we can't requeue.
845 */
846 if (lem_xmit(adapter, &m_head)) {
847 if (m_head == NULL)
848 break;
849 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
850 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
851 break;
852 }
853
854 /* Send a copy of the frame to the BPF listener */
855 ETHER_BPF_MTAP(ifp, m_head);
856
857 /* Set timeout in case hardware has problems transmitting. */
858 adapter->watchdog_check = TRUE;
859 adapter->watchdog_time = ticks;
860 }
861 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
862 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
863
864 return;
865}
866
867static void
868lem_start(struct ifnet *ifp)
869{
870 struct adapter *adapter = ifp->if_softc;
871
872 EM_TX_LOCK(adapter);
873 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
874 lem_start_locked(ifp);
875 EM_TX_UNLOCK(adapter);
876}
877
878/*********************************************************************
879 * Ioctl entry point
880 *
881 * em_ioctl is called when the user wants to configure the
882 * interface.
883 *
884 * return 0 on success, positive on failure
885 **********************************************************************/
886
887static int
888lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
889{
890 struct adapter *adapter = ifp->if_softc;
891 struct ifreq *ifr = (struct ifreq *)data;
892#if defined(INET) || defined(INET6)
893 struct ifaddr *ifa = (struct ifaddr *)data;
894#endif
895 bool avoid_reset = FALSE;
896 int error = 0;
897
898 if (adapter->in_detach)
899 return (error);
900
901 switch (command) {
902 case SIOCSIFADDR:
903#ifdef INET
904 if (ifa->ifa_addr->sa_family == AF_INET)
905 avoid_reset = TRUE;
906#endif
907#ifdef INET6
908 if (ifa->ifa_addr->sa_family == AF_INET6)
909 avoid_reset = TRUE;
910#endif
911 /*
912 ** Calling init results in link renegotiation,
913 ** so we avoid doing it when possible.
914 */
915 if (avoid_reset) {
916 ifp->if_flags |= IFF_UP;
917 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
918 lem_init(adapter);
919#ifdef INET
920 if (!(ifp->if_flags & IFF_NOARP))
921 arp_ifinit(ifp, ifa);
922#endif
923 } else
924 error = ether_ioctl(ifp, command, data);
925 break;
926 case SIOCSIFMTU:
927 {
928 int max_frame_size;
929
930 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
931
932 EM_CORE_LOCK(adapter);
933 switch (adapter->hw.mac.type) {
934 case e1000_82542:
935 max_frame_size = ETHER_MAX_LEN;
936 break;
937 default:
938 max_frame_size = MAX_JUMBO_FRAME_SIZE;
939 }
940 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
941 ETHER_CRC_LEN) {
942 EM_CORE_UNLOCK(adapter);
943 error = EINVAL;
944 break;
945 }
946
947 ifp->if_mtu = ifr->ifr_mtu;
948 adapter->max_frame_size =
949 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
950 lem_init_locked(adapter);
951 EM_CORE_UNLOCK(adapter);
952 break;
953 }
954 case SIOCSIFFLAGS:
955 IOCTL_DEBUGOUT("ioctl rcv'd:\
956 SIOCSIFFLAGS (Set Interface Flags)");
957 EM_CORE_LOCK(adapter);
958 if (ifp->if_flags & IFF_UP) {
959 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
960 if ((ifp->if_flags ^ adapter->if_flags) &
961 (IFF_PROMISC | IFF_ALLMULTI)) {
962 lem_disable_promisc(adapter);
963 lem_set_promisc(adapter);
964 }
965 } else
966 lem_init_locked(adapter);
967 } else
968 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
969 EM_TX_LOCK(adapter);
970 lem_stop(adapter);
971 EM_TX_UNLOCK(adapter);
972 }
973 adapter->if_flags = ifp->if_flags;
974 EM_CORE_UNLOCK(adapter);
975 break;
976 case SIOCADDMULTI:
977 case SIOCDELMULTI:
978 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
979 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
980 EM_CORE_LOCK(adapter);
981 lem_disable_intr(adapter);
982 lem_set_multi(adapter);
983 if (adapter->hw.mac.type == e1000_82542 &&
984 adapter->hw.revision_id == E1000_REVISION_2) {
985 lem_initialize_receive_unit(adapter);
986 }
987#ifdef DEVICE_POLLING
988 if (!(ifp->if_capenable & IFCAP_POLLING))
989#endif
990 lem_enable_intr(adapter);
991 EM_CORE_UNLOCK(adapter);
992 }
993 break;
994 case SIOCSIFMEDIA:
995 /* Check SOL/IDER usage */
996 EM_CORE_LOCK(adapter);
997 if (e1000_check_reset_block(&adapter->hw)) {
998 EM_CORE_UNLOCK(adapter);
999 device_printf(adapter->dev, "Media change is"
1000 " blocked due to SOL/IDER session.\n");
1001 break;
1002 }
1003 EM_CORE_UNLOCK(adapter);
1004 case SIOCGIFMEDIA:
1005 IOCTL_DEBUGOUT("ioctl rcv'd: \
1006 SIOCxIFMEDIA (Get/Set Interface Media)");
1007 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1008 break;
1009 case SIOCSIFCAP:
1010 {
1011 int mask, reinit;
1012
1013 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1014 reinit = 0;
1015 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1016#ifdef DEVICE_POLLING
1017 if (mask & IFCAP_POLLING) {
1018 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1019 error = ether_poll_register(lem_poll, ifp);
1020 if (error)
1021 return (error);
1022 EM_CORE_LOCK(adapter);
1023 lem_disable_intr(adapter);
1024 ifp->if_capenable |= IFCAP_POLLING;
1025 EM_CORE_UNLOCK(adapter);
1026 } else {
1027 error = ether_poll_deregister(ifp);
1028 /* Enable interrupt even in error case */
1029 EM_CORE_LOCK(adapter);
1030 lem_enable_intr(adapter);
1031 ifp->if_capenable &= ~IFCAP_POLLING;
1032 EM_CORE_UNLOCK(adapter);
1033 }
1034 }
1035#endif
1036 if (mask & IFCAP_HWCSUM) {
1037 ifp->if_capenable ^= IFCAP_HWCSUM;
1038 reinit = 1;
1039 }
1040 if (mask & IFCAP_VLAN_HWTAGGING) {
1041 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1042 reinit = 1;
1043 }
1044 if ((mask & IFCAP_WOL) &&
1045 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1046 if (mask & IFCAP_WOL_MCAST)
1047 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1048 if (mask & IFCAP_WOL_MAGIC)
1049 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1050 }
1051 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1052 lem_init(adapter);
1053 VLAN_CAPABILITIES(ifp);
1054 break;
1055 }
1056
1057 default:
1058 error = ether_ioctl(ifp, command, data);
1059 break;
1060 }
1061
1062 return (error);
1063}
1064
1065
1066/*********************************************************************
1067 * Init entry point
1068 *
1069 * This routine is used in two ways. It is used by the stack as
1070 * init entry point in network interface structure. It is also used
1071 * by the driver as a hw/sw initialization routine to get to a
1072 * consistent state.
1073 *
1074 * return 0 on success, positive on failure
1075 **********************************************************************/
1076
1077static void
1078lem_init_locked(struct adapter *adapter)
1079{
1080 struct ifnet *ifp = adapter->ifp;
1081 device_t dev = adapter->dev;
1082 u32 pba;
1083
1084 INIT_DEBUGOUT("lem_init: begin");
1085
1086 EM_CORE_LOCK_ASSERT(adapter);
1087
1088 EM_TX_LOCK(adapter);
1089 lem_stop(adapter);
1090 EM_TX_UNLOCK(adapter);
1091
1092 /*
1093 * Packet Buffer Allocation (PBA)
1094 * Writing PBA sets the receive portion of the buffer
1095 * the remainder is used for the transmit buffer.
1096 *
1097 * Devices before the 82547 had a Packet Buffer of 64K.
1098 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1099 * After the 82547 the buffer was reduced to 40K.
1100 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1101 * Note: default does not leave enough room for Jumbo Frame >10k.
1102 */
1103 switch (adapter->hw.mac.type) {
1104 case e1000_82547:
1105 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1106 if (adapter->max_frame_size > 8192)
1107 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1108 else
1109 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1110 adapter->tx_fifo_head = 0;
1111 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1112 adapter->tx_fifo_size =
1113 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1114 break;
1115 default:
1116 /* Devices before 82547 had a Packet Buffer of 64K. */
1117 if (adapter->max_frame_size > 8192)
1118 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1119 else
1120 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1121 }
1122
1123 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1124 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1125
1126 /* Get the latest mac address, User can use a LAA */
1127 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1128 ETHER_ADDR_LEN);
1129
1130 /* Put the address into the Receive Address Array */
1131 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1132
1133 /* Initialize the hardware */
1134 if (lem_hardware_init(adapter)) {
1135 device_printf(dev, "Unable to initialize the hardware\n");
1136 return;
1137 }
1138 lem_update_link_status(adapter);
1139
1140 /* Setup VLAN support, basic and offload if available */
1141 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1142
1143 /* Set hardware offload abilities */
1144 ifp->if_hwassist = 0;
1145 if (adapter->hw.mac.type >= e1000_82543) {
1146 if (ifp->if_capenable & IFCAP_TXCSUM)
1147 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1148 }
1149
1150 /* Configure for OS presence */
1151 lem_init_manageability(adapter);
1152
1153 /* Prepare transmit descriptors and buffers */
1154 lem_setup_transmit_structures(adapter);
1155 lem_initialize_transmit_unit(adapter);
1156
1157 /* Setup Multicast table */
1158 lem_set_multi(adapter);
1159
1160 /* Prepare receive descriptors and buffers */
1161 if (lem_setup_receive_structures(adapter)) {
1162 device_printf(dev, "Could not setup receive structures\n");
1163 EM_TX_LOCK(adapter);
1164 lem_stop(adapter);
1165 EM_TX_UNLOCK(adapter);
1166 return;
1167 }
1168 lem_initialize_receive_unit(adapter);
1169
1170 /* Use real VLAN Filter support? */
1171 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1172 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1173 /* Use real VLAN Filter support */
1174 lem_setup_vlan_hw_support(adapter);
1175 else {
1176 u32 ctrl;
1177 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1178 ctrl |= E1000_CTRL_VME;
1179 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1180 }
1181 }
1182
1183 /* Don't lose promiscuous settings */
1184 lem_set_promisc(adapter);
1185
1186 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1187 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1188
1189 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1190 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1191
1192#ifdef DEVICE_POLLING
1193 /*
1194 * Only enable interrupts if we are not polling, make sure
1195 * they are off otherwise.
1196 */
1197 if (ifp->if_capenable & IFCAP_POLLING)
1198 lem_disable_intr(adapter);
1199 else
1200#endif /* DEVICE_POLLING */
1201 lem_enable_intr(adapter);
1202
1203 /* AMT based hardware can now take control from firmware */
1204 if (adapter->has_manage && adapter->has_amt)
1205 lem_get_hw_control(adapter);
1206}
1207
1208static void
1209lem_init(void *arg)
1210{
1211 struct adapter *adapter = arg;
1212
1213 EM_CORE_LOCK(adapter);
1214 lem_init_locked(adapter);
1215 EM_CORE_UNLOCK(adapter);
1216}
1217
1218
1219#ifdef DEVICE_POLLING
1220/*********************************************************************
1221 *
1222 * Legacy polling routine
1223 *
1224 *********************************************************************/
1225static int
1226lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1227{
1228 struct adapter *adapter = ifp->if_softc;
1229 u32 reg_icr, rx_done = 0;
1230
1231 EM_CORE_LOCK(adapter);
1232 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1233 EM_CORE_UNLOCK(adapter);
1234 return (rx_done);
1235 }
1236
1237 if (cmd == POLL_AND_CHECK_STATUS) {
1238 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1239 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1240 callout_stop(&adapter->timer);
1241 adapter->hw.mac.get_link_status = 1;
1242 lem_update_link_status(adapter);
1243 callout_reset(&adapter->timer, hz,
1244 lem_local_timer, adapter);
1245 }
1246 }
1247 EM_CORE_UNLOCK(adapter);
1248
1249 lem_rxeof(adapter, count, &rx_done);
1250
1251 EM_TX_LOCK(adapter);
1252 lem_txeof(adapter);
1253 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1254 lem_start_locked(ifp);
1255 EM_TX_UNLOCK(adapter);
1256 return (rx_done);
1257}
1258#endif /* DEVICE_POLLING */
1259
1260/*********************************************************************
1261 *
1262 * Legacy Interrupt Service routine
1263 *
1264 *********************************************************************/
1265static void
1266lem_intr(void *arg)
1267{
1268 struct adapter *adapter = arg;
1269 struct ifnet *ifp = adapter->ifp;
1270 u32 reg_icr;
1271
1272
1273 if ((ifp->if_capenable & IFCAP_POLLING) ||
1274 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1275 return;
1276
1277 EM_CORE_LOCK(adapter);
1278 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1279 if (reg_icr & E1000_ICR_RXO)
1280 adapter->rx_overruns++;
1281
1282 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1283 EM_CORE_UNLOCK(adapter);
1284 return;
1285 }
1286
1287 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1288 callout_stop(&adapter->timer);
1289 adapter->hw.mac.get_link_status = 1;
1290 lem_update_link_status(adapter);
1291 /* Deal with TX cruft when link lost */
1292 lem_tx_purge(adapter);
1293 callout_reset(&adapter->timer, hz,
1294 lem_local_timer, adapter);
1295 EM_CORE_UNLOCK(adapter);
1296 return;
1297 }
1298
1299 EM_CORE_UNLOCK(adapter);
1300 lem_rxeof(adapter, -1, NULL);
1301
1302 EM_TX_LOCK(adapter);
1303 lem_txeof(adapter);
1304 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1305 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1306 lem_start_locked(ifp);
1307 EM_TX_UNLOCK(adapter);
1308 return;
1309}
1310
1311
1312static void
1313lem_handle_link(void *context, int pending)
1314{
1315 struct adapter *adapter = context;
1316 struct ifnet *ifp = adapter->ifp;
1317
1318 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1319 return;
1320
1321 EM_CORE_LOCK(adapter);
1322 callout_stop(&adapter->timer);
1323 lem_update_link_status(adapter);
1324 /* Deal with TX cruft when link lost */
1325 lem_tx_purge(adapter);
1326 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1327 EM_CORE_UNLOCK(adapter);
1328}
1329
1330
1331/* Combined RX/TX handler, used by Legacy and MSI */
1332static void
1333lem_handle_rxtx(void *context, int pending)
1334{
1335 struct adapter *adapter = context;
1336 struct ifnet *ifp = adapter->ifp;
1337
1338
1339 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1340 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1341 EM_TX_LOCK(adapter);
1342 lem_txeof(adapter);
1343 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1344 lem_start_locked(ifp);
1345 EM_TX_UNLOCK(adapter);
1346 }
1347
1348 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1349 lem_enable_intr(adapter);
1350}
1351
1352/*********************************************************************
1353 *
1354 * Fast Legacy/MSI Combined Interrupt Service routine
1355 *
1356 *********************************************************************/
1357static int
1358lem_irq_fast(void *arg)
1359{
1360 struct adapter *adapter = arg;
1361 struct ifnet *ifp;
1362 u32 reg_icr;
1363
1364 ifp = adapter->ifp;
1365
1366 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1367
1368 /* Hot eject? */
1369 if (reg_icr == 0xffffffff)
1370 return FILTER_STRAY;
1371
1372 /* Definitely not our interrupt. */
1373 if (reg_icr == 0x0)
1374 return FILTER_STRAY;
1375
1376 /*
1377 * Mask interrupts until the taskqueue is finished running. This is
1378 * cheap, just assume that it is needed. This also works around the
1379 * MSI message reordering errata on certain systems.
1380 */
1381 lem_disable_intr(adapter);
1382 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1383
1384 /* Link status change */
1385 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1386 adapter->hw.mac.get_link_status = 1;
1387 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1388 }
1389
1390 if (reg_icr & E1000_ICR_RXO)
1391 adapter->rx_overruns++;
1392 return FILTER_HANDLED;
1393}
1394
1395
1396/*********************************************************************
1397 *
1398 * Media Ioctl callback
1399 *
1400 * This routine is called whenever the user queries the status of
1401 * the interface using ifconfig.
1402 *
1403 **********************************************************************/
1404static void
1405lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1406{
1407 struct adapter *adapter = ifp->if_softc;
1408 u_char fiber_type = IFM_1000_SX;
1409
1410 INIT_DEBUGOUT("lem_media_status: begin");
1411
1412 EM_CORE_LOCK(adapter);
1413 lem_update_link_status(adapter);
1414
1415 ifmr->ifm_status = IFM_AVALID;
1416 ifmr->ifm_active = IFM_ETHER;
1417
1418 if (!adapter->link_active) {
1419 EM_CORE_UNLOCK(adapter);
1420 return;
1421 }
1422
1423 ifmr->ifm_status |= IFM_ACTIVE;
1424
1425 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1426 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1427 if (adapter->hw.mac.type == e1000_82545)
1428 fiber_type = IFM_1000_LX;
1429 ifmr->ifm_active |= fiber_type | IFM_FDX;
1430 } else {
1431 switch (adapter->link_speed) {
1432 case 10:
1433 ifmr->ifm_active |= IFM_10_T;
1434 break;
1435 case 100:
1436 ifmr->ifm_active |= IFM_100_TX;
1437 break;
1438 case 1000:
1439 ifmr->ifm_active |= IFM_1000_T;
1440 break;
1441 }
1442 if (adapter->link_duplex == FULL_DUPLEX)
1443 ifmr->ifm_active |= IFM_FDX;
1444 else
1445 ifmr->ifm_active |= IFM_HDX;
1446 }
1447 EM_CORE_UNLOCK(adapter);
1448}
1449
1450/*********************************************************************
1451 *
1452 * Media Ioctl callback
1453 *
1454 * This routine is called when the user changes speed/duplex using
1455 * media/mediopt option with ifconfig.
1456 *
1457 **********************************************************************/
1458static int
1459lem_media_change(struct ifnet *ifp)
1460{
1461 struct adapter *adapter = ifp->if_softc;
1462 struct ifmedia *ifm = &adapter->media;
1463
1464 INIT_DEBUGOUT("lem_media_change: begin");
1465
1466 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1467 return (EINVAL);
1468
1469 EM_CORE_LOCK(adapter);
1470 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1471 case IFM_AUTO:
1472 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1473 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1474 break;
1475 case IFM_1000_LX:
1476 case IFM_1000_SX:
1477 case IFM_1000_T:
1478 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1479 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1480 break;
1481 case IFM_100_TX:
1482 adapter->hw.mac.autoneg = FALSE;
1483 adapter->hw.phy.autoneg_advertised = 0;
1484 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1485 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1486 else
1487 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1488 break;
1489 case IFM_10_T:
1490 adapter->hw.mac.autoneg = FALSE;
1491 adapter->hw.phy.autoneg_advertised = 0;
1492 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1493 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1494 else
1495 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1496 break;
1497 default:
1498 device_printf(adapter->dev, "Unsupported media type\n");
1499 }
1500
1501 lem_init_locked(adapter);
1502 EM_CORE_UNLOCK(adapter);
1503
1504 return (0);
1505}
1506
1507/*********************************************************************
1508 *
1509 * This routine maps the mbufs to tx descriptors.
1510 *
1511 * return 0 on success, positive on failure
1512 **********************************************************************/
1513
1514static int
1515lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1516{
1517 bus_dma_segment_t segs[EM_MAX_SCATTER];
1518 bus_dmamap_t map;
1519 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1520 struct e1000_tx_desc *ctxd = NULL;
1521 struct mbuf *m_head;
1522 u32 txd_upper, txd_lower, txd_used, txd_saved;
1523 int error, nsegs, i, j, first, last = 0;
1524
1525 m_head = *m_headp;
1526 txd_upper = txd_lower = txd_used = txd_saved = 0;
1527
1528 /*
1529 ** When doing checksum offload, it is critical to
1530 ** make sure the first mbuf has more than header,
1531 ** because that routine expects data to be present.
1532 */
1533 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1534 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1535 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1536 *m_headp = m_head;
1537 if (m_head == NULL)
1538 return (ENOBUFS);
1539 }
1540
1541 /*
1542 * Map the packet for DMA
1543 *
1544 * Capture the first descriptor index,
1545 * this descriptor will have the index
1546 * of the EOP which is the only one that
1547 * now gets a DONE bit writeback.
1548 */
1549 first = adapter->next_avail_tx_desc;
1550 tx_buffer = &adapter->tx_buffer_area[first];
1551 tx_buffer_mapped = tx_buffer;
1552 map = tx_buffer->map;
1553
1554 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1555 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1556
1557 /*
1558 * There are two types of errors we can (try) to handle:
1559 * - EFBIG means the mbuf chain was too long and bus_dma ran
1560 * out of segments. Defragment the mbuf chain and try again.
1561 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1562 * at this point in time. Defer sending and try again later.
1563 * All other errors, in particular EINVAL, are fatal and prevent the
1564 * mbuf chain from ever going through. Drop it and report error.
1565 */
1566 if (error == EFBIG) {
1567 struct mbuf *m;
1568
1569 m = m_defrag(*m_headp, M_DONTWAIT);
1570 if (m == NULL) {
1571 adapter->mbuf_alloc_failed++;
1572 m_freem(*m_headp);
1573 *m_headp = NULL;
1574 return (ENOBUFS);
1575 }
1576 *m_headp = m;
1577
1578 /* Try it again */
1579 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1580 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1581
1582 if (error) {
1583 adapter->no_tx_dma_setup++;
1584 m_freem(*m_headp);
1585 *m_headp = NULL;
1586 return (error);
1587 }
1588 } else if (error != 0) {
1589 adapter->no_tx_dma_setup++;
1590 return (error);
1591 }
1592
1593 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1594 adapter->no_tx_desc_avail2++;
1595 bus_dmamap_unload(adapter->txtag, map);
1596 return (ENOBUFS);
1597 }
1598 m_head = *m_headp;
1599
1600 /* Do hardware assists */
1601 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1602 lem_transmit_checksum_setup(adapter, m_head,
1603 &txd_upper, &txd_lower);
1604
1605 i = adapter->next_avail_tx_desc;
1606 if (adapter->pcix_82544)
1607 txd_saved = i;
1608
1609 /* Set up our transmit descriptors */
1610 for (j = 0; j < nsegs; j++) {
1611 bus_size_t seg_len;
1612 bus_addr_t seg_addr;
1613 /* If adapter is 82544 and on PCIX bus */
1614 if(adapter->pcix_82544) {
1615 DESC_ARRAY desc_array;
1616 u32 array_elements, counter;
1617 /*
1618 * Check the Address and Length combination and
1619 * split the data accordingly
1620 */
1621 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1622 segs[j].ds_len, &desc_array);
1623 for (counter = 0; counter < array_elements; counter++) {
1624 if (txd_used == adapter->num_tx_desc_avail) {
1625 adapter->next_avail_tx_desc = txd_saved;
1626 adapter->no_tx_desc_avail2++;
1627 bus_dmamap_unload(adapter->txtag, map);
1628 return (ENOBUFS);
1629 }
1630 tx_buffer = &adapter->tx_buffer_area[i];
1631 ctxd = &adapter->tx_desc_base[i];
1632 ctxd->buffer_addr = htole64(
1633 desc_array.descriptor[counter].address);
1634 ctxd->lower.data = htole32(
1635 (adapter->txd_cmd | txd_lower | (u16)
1636 desc_array.descriptor[counter].length));
1637 ctxd->upper.data =
1638 htole32((txd_upper));
1639 last = i;
1640 if (++i == adapter->num_tx_desc)
1641 i = 0;
1642 tx_buffer->m_head = NULL;
1643 tx_buffer->next_eop = -1;
1644 txd_used++;
1645 }
1646 } else {
1647 tx_buffer = &adapter->tx_buffer_area[i];
1648 ctxd = &adapter->tx_desc_base[i];
1649 seg_addr = segs[j].ds_addr;
1650 seg_len = segs[j].ds_len;
1651 ctxd->buffer_addr = htole64(seg_addr);
1652 ctxd->lower.data = htole32(
1653 adapter->txd_cmd | txd_lower | seg_len);
1654 ctxd->upper.data =
1655 htole32(txd_upper);
1656 last = i;
1657 if (++i == adapter->num_tx_desc)
1658 i = 0;
1659 tx_buffer->m_head = NULL;
1660 tx_buffer->next_eop = -1;
1661 }
1662 }
1663
1664 adapter->next_avail_tx_desc = i;
1665
1666 if (adapter->pcix_82544)
1667 adapter->num_tx_desc_avail -= txd_used;
1668 else
1669 adapter->num_tx_desc_avail -= nsegs;
1670
1671 if (m_head->m_flags & M_VLANTAG) {
1672 /* Set the vlan id. */
1673 ctxd->upper.fields.special =
1674 htole16(m_head->m_pkthdr.ether_vtag);
1675 /* Tell hardware to add tag */
1676 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1677 }
1678
1679 tx_buffer->m_head = m_head;
1680 tx_buffer_mapped->map = tx_buffer->map;
1681 tx_buffer->map = map;
1682 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1683
1684 /*
1685 * Last Descriptor of Packet
1686 * needs End Of Packet (EOP)
1687 * and Report Status (RS)
1688 */
1689 ctxd->lower.data |=
1690 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1691 /*
1692 * Keep track in the first buffer which
1693 * descriptor will be written back
1694 */
1695 tx_buffer = &adapter->tx_buffer_area[first];
1696 tx_buffer->next_eop = last;
1697 adapter->watchdog_time = ticks;
1698
1699 /*
1700 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1701 * that this frame is available to transmit.
1702 */
1703 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1704 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1705 if (adapter->hw.mac.type == e1000_82547 &&
1706 adapter->link_duplex == HALF_DUPLEX)
1707 lem_82547_move_tail(adapter);
1708 else {
1709 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1710 if (adapter->hw.mac.type == e1000_82547)
1711 lem_82547_update_fifo_head(adapter,
1712 m_head->m_pkthdr.len);
1713 }
1714
1715 return (0);
1716}
1717
1718/*********************************************************************
1719 *
1720 * 82547 workaround to avoid controller hang in half-duplex environment.
1721 * The workaround is to avoid queuing a large packet that would span
1722 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1723 * in this case. We do that only when FIFO is quiescent.
1724 *
1725 **********************************************************************/
1726static void
1727lem_82547_move_tail(void *arg)
1728{
1729 struct adapter *adapter = arg;
1730 struct e1000_tx_desc *tx_desc;
1731 u16 hw_tdt, sw_tdt, length = 0;
1732 bool eop = 0;
1733
1734 EM_TX_LOCK_ASSERT(adapter);
1735
1736 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1737 sw_tdt = adapter->next_avail_tx_desc;
1738
1739 while (hw_tdt != sw_tdt) {
1740 tx_desc = &adapter->tx_desc_base[hw_tdt];
1741 length += tx_desc->lower.flags.length;
1742 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1743 if (++hw_tdt == adapter->num_tx_desc)
1744 hw_tdt = 0;
1745
1746 if (eop) {
1747 if (lem_82547_fifo_workaround(adapter, length)) {
1748 adapter->tx_fifo_wrk_cnt++;
1749 callout_reset(&adapter->tx_fifo_timer, 1,
1750 lem_82547_move_tail, adapter);
1751 break;
1752 }
1753 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1754 lem_82547_update_fifo_head(adapter, length);
1755 length = 0;
1756 }
1757 }
1758}
1759
1760static int
1761lem_82547_fifo_workaround(struct adapter *adapter, int len)
1762{
1763 int fifo_space, fifo_pkt_len;
1764
1765 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1766
1767 if (adapter->link_duplex == HALF_DUPLEX) {
1768 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1769
1770 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1771 if (lem_82547_tx_fifo_reset(adapter))
1772 return (0);
1773 else
1774 return (1);
1775 }
1776 }
1777
1778 return (0);
1779}
1780
1781static void
1782lem_82547_update_fifo_head(struct adapter *adapter, int len)
1783{
1784 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1785
1786 /* tx_fifo_head is always 16 byte aligned */
1787 adapter->tx_fifo_head += fifo_pkt_len;
1788 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1789 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1790 }
1791}
1792
1793
1794static int
1795lem_82547_tx_fifo_reset(struct adapter *adapter)
1796{
1797 u32 tctl;
1798
1799 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1800 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1801 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1802 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1803 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1804 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1805 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1806 /* Disable TX unit */
1807 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1808 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1809 tctl & ~E1000_TCTL_EN);
1810
1811 /* Reset FIFO pointers */
1812 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1813 adapter->tx_head_addr);
1814 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1815 adapter->tx_head_addr);
1816 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1817 adapter->tx_head_addr);
1818 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1819 adapter->tx_head_addr);
1820
1821 /* Re-enable TX unit */
1822 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1823 E1000_WRITE_FLUSH(&adapter->hw);
1824
1825 adapter->tx_fifo_head = 0;
1826 adapter->tx_fifo_reset_cnt++;
1827
1828 return (TRUE);
1829 }
1830 else {
1831 return (FALSE);
1832 }
1833}
1834
1835static void
1836lem_set_promisc(struct adapter *adapter)
1837{
1838 struct ifnet *ifp = adapter->ifp;
1839 u32 reg_rctl;
1840
1841 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1842
1843 if (ifp->if_flags & IFF_PROMISC) {
1844 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1845 /* Turn this on if you want to see bad packets */
1846 if (lem_debug_sbp)
1847 reg_rctl |= E1000_RCTL_SBP;
1848 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1849 } else if (ifp->if_flags & IFF_ALLMULTI) {
1850 reg_rctl |= E1000_RCTL_MPE;
1851 reg_rctl &= ~E1000_RCTL_UPE;
1852 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1853 }
1854}
1855
1856static void
1857lem_disable_promisc(struct adapter *adapter)
1858{
1859 u32 reg_rctl;
1860
1861 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1862
1863 reg_rctl &= (~E1000_RCTL_UPE);
1864 reg_rctl &= (~E1000_RCTL_MPE);
1865 reg_rctl &= (~E1000_RCTL_SBP);
1866 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1867}
1868
1869
1870/*********************************************************************
1871 * Multicast Update
1872 *
1873 * This routine is called whenever multicast address list is updated.
1874 *
1875 **********************************************************************/
1876
1877static void
1878lem_set_multi(struct adapter *adapter)
1879{
1880 struct ifnet *ifp = adapter->ifp;
1881 struct ifmultiaddr *ifma;
1882 u32 reg_rctl = 0;
1883 u8 *mta; /* Multicast array memory */
1884 int mcnt = 0;
1885
1886 IOCTL_DEBUGOUT("lem_set_multi: begin");
1887
1888 mta = adapter->mta;
1889 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1890
1891 if (adapter->hw.mac.type == e1000_82542 &&
1892 adapter->hw.revision_id == E1000_REVISION_2) {
1893 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1894 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1895 e1000_pci_clear_mwi(&adapter->hw);
1896 reg_rctl |= E1000_RCTL_RST;
1897 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1898 msec_delay(5);
1899 }
1900
1901#if __FreeBSD_version < 800000
1902 IF_ADDR_LOCK(ifp);
1903#else
1904 if_maddr_rlock(ifp);
1905#endif
1906 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1907 if (ifma->ifma_addr->sa_family != AF_LINK)
1908 continue;
1909
1910 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1911 break;
1912
1913 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1914 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1915 mcnt++;
1916 }
1917#if __FreeBSD_version < 800000
1918 IF_ADDR_UNLOCK(ifp);
1919#else
1920 if_maddr_runlock(ifp);
1921#endif
1922 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1923 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1924 reg_rctl |= E1000_RCTL_MPE;
1925 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1926 } else
1927 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1928
1929 if (adapter->hw.mac.type == e1000_82542 &&
1930 adapter->hw.revision_id == E1000_REVISION_2) {
1931 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1932 reg_rctl &= ~E1000_RCTL_RST;
1933 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1934 msec_delay(5);
1935 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1936 e1000_pci_set_mwi(&adapter->hw);
1937 }
1938}
1939
1940
1941/*********************************************************************
1942 * Timer routine
1943 *
1944 * This routine checks for link status and updates statistics.
1945 *
1946 **********************************************************************/
1947
1948static void
1949lem_local_timer(void *arg)
1950{
1951 struct adapter *adapter = arg;
1952
1953 EM_CORE_LOCK_ASSERT(adapter);
1954
1955 lem_update_link_status(adapter);
1956 lem_update_stats_counters(adapter);
1957
1958 lem_smartspeed(adapter);
1959
1960 /*
1961 * We check the watchdog: the time since
1962 * the last TX descriptor was cleaned.
1963 * This implies a functional TX engine.
1964 */
1965 if ((adapter->watchdog_check == TRUE) &&
1966 (ticks - adapter->watchdog_time > EM_WATCHDOG))
1967 goto hung;
1968
1969 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1970 return;
1971hung:
1972 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1973 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1974 adapter->watchdog_events++;
1975 lem_init_locked(adapter);
1976}
1977
1978static void
1979lem_update_link_status(struct adapter *adapter)
1980{
1981 struct e1000_hw *hw = &adapter->hw;
1982 struct ifnet *ifp = adapter->ifp;
1983 device_t dev = adapter->dev;
1984 u32 link_check = 0;
1985
1986 /* Get the cached link value or read phy for real */
1987 switch (hw->phy.media_type) {
1988 case e1000_media_type_copper:
1989 if (hw->mac.get_link_status) {
1990 /* Do the work to read phy */
1991 e1000_check_for_link(hw);
1992 link_check = !hw->mac.get_link_status;
1993 if (link_check) /* ESB2 fix */
1994 e1000_cfg_on_link_up(hw);
1995 } else
1996 link_check = TRUE;
1997 break;
1998 case e1000_media_type_fiber:
1999 e1000_check_for_link(hw);
2000 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2001 E1000_STATUS_LU);
2002 break;
2003 case e1000_media_type_internal_serdes:
2004 e1000_check_for_link(hw);
2005 link_check = adapter->hw.mac.serdes_has_link;
2006 break;
2007 default:
2008 case e1000_media_type_unknown:
2009 break;
2010 }
2011
2012 /* Now check for a transition */
2013 if (link_check && (adapter->link_active == 0)) {
2014 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2015 &adapter->link_duplex);
2016 if (bootverbose)
2017 device_printf(dev, "Link is up %d Mbps %s\n",
2018 adapter->link_speed,
2019 ((adapter->link_duplex == FULL_DUPLEX) ?
2020 "Full Duplex" : "Half Duplex"));
2021 adapter->link_active = 1;
2022 adapter->smartspeed = 0;
2023 ifp->if_baudrate = adapter->link_speed * 1000000;
2024 if_link_state_change(ifp, LINK_STATE_UP);
2025 } else if (!link_check && (adapter->link_active == 1)) {
2026 ifp->if_baudrate = adapter->link_speed = 0;
2027 adapter->link_duplex = 0;
2028 if (bootverbose)
2029 device_printf(dev, "Link is Down\n");
2030 adapter->link_active = 0;
2031 /* Link down, disable watchdog */
2032 adapter->watchdog_check = FALSE;
2033 if_link_state_change(ifp, LINK_STATE_DOWN);
2034 }
2035}
2036
2037/*********************************************************************
2038 *
2039 * This routine disables all traffic on the adapter by issuing a
2040 * global reset on the MAC and deallocates TX/RX buffers.
2041 *
2042 * This routine should always be called with BOTH the CORE
2043 * and TX locks.
2044 **********************************************************************/
2045
2046static void
2047lem_stop(void *arg)
2048{
2049 struct adapter *adapter = arg;
2050 struct ifnet *ifp = adapter->ifp;
2051
2052 EM_CORE_LOCK_ASSERT(adapter);
2053 EM_TX_LOCK_ASSERT(adapter);
2054
2055 INIT_DEBUGOUT("lem_stop: begin");
2056
2057 lem_disable_intr(adapter);
2058 callout_stop(&adapter->timer);
2059 callout_stop(&adapter->tx_fifo_timer);
2060
2061 /* Tell the stack that the interface is no longer active */
2062 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2063
2064 e1000_reset_hw(&adapter->hw);
2065 if (adapter->hw.mac.type >= e1000_82544)
2066 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2067
2068 e1000_led_off(&adapter->hw);
2069 e1000_cleanup_led(&adapter->hw);
2070}
2071
2072
2073/*********************************************************************
2074 *
2075 * Determine hardware revision.
2076 *
2077 **********************************************************************/
2078static void
2079lem_identify_hardware(struct adapter *adapter)
2080{
2081 device_t dev = adapter->dev;
2082
2083 /* Make sure our PCI config space has the necessary stuff set */
2084 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2085 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2086 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2087 device_printf(dev, "Memory Access and/or Bus Master bits "
2088 "were not set!\n");
2089 adapter->hw.bus.pci_cmd_word |=
2090 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2091 pci_write_config(dev, PCIR_COMMAND,
2092 adapter->hw.bus.pci_cmd_word, 2);
2093 }
2094
2095 /* Save off the information about this board */
2096 adapter->hw.vendor_id = pci_get_vendor(dev);
2097 adapter->hw.device_id = pci_get_device(dev);
2098 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2099 adapter->hw.subsystem_vendor_id =
2100 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2101 adapter->hw.subsystem_device_id =
2102 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2103
2104 /* Do Shared Code Init and Setup */
2105 if (e1000_set_mac_type(&adapter->hw)) {
2106 device_printf(dev, "Setup init failure\n");
2107 return;
2108 }
2109}
2110
2111static int
2112lem_allocate_pci_resources(struct adapter *adapter)
2113{
2114 device_t dev = adapter->dev;
2115 int val, rid, error = E1000_SUCCESS;
2116
2117 rid = PCIR_BAR(0);
2118 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2119 &rid, RF_ACTIVE);
2120 if (adapter->memory == NULL) {
2121 device_printf(dev, "Unable to allocate bus resource: memory\n");
2122 return (ENXIO);
2123 }
2124 adapter->osdep.mem_bus_space_tag =
2125 rman_get_bustag(adapter->memory);
2126 adapter->osdep.mem_bus_space_handle =
2127 rman_get_bushandle(adapter->memory);
2128 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2129
2130 /* Only older adapters use IO mapping */
2131 if (adapter->hw.mac.type > e1000_82543) {
2132 /* Figure our where our IO BAR is ? */
2133 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2134 val = pci_read_config(dev, rid, 4);
2135 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2136 adapter->io_rid = rid;
2137 break;
2138 }
2139 rid += 4;
2140 /* check for 64bit BAR */
2141 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2142 rid += 4;
2143 }
2144 if (rid >= PCIR_CIS) {
2145 device_printf(dev, "Unable to locate IO BAR\n");
2146 return (ENXIO);
2147 }
2148 adapter->ioport = bus_alloc_resource_any(dev,
2149 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2150 if (adapter->ioport == NULL) {
2151 device_printf(dev, "Unable to allocate bus resource: "
2152 "ioport\n");
2153 return (ENXIO);
2154 }
2155 adapter->hw.io_base = 0;
2156 adapter->osdep.io_bus_space_tag =
2157 rman_get_bustag(adapter->ioport);
2158 adapter->osdep.io_bus_space_handle =
2159 rman_get_bushandle(adapter->ioport);
2160 }
2161
2162 adapter->hw.back = &adapter->osdep;
2163
2164 return (error);
2165}
2166
2167/*********************************************************************
2168 *
2169 * Setup the Legacy or MSI Interrupt handler
2170 *
2171 **********************************************************************/
2172int
2173lem_allocate_irq(struct adapter *adapter)
2174{
2175 device_t dev = adapter->dev;
2176 int error, rid = 0;
2177
2178 /* Manually turn off all interrupts */
2179 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2180
2181 /* We allocate a single interrupt resource */
2182 adapter->res[0] = bus_alloc_resource_any(dev,
2183 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2184 if (adapter->res[0] == NULL) {
2185 device_printf(dev, "Unable to allocate bus resource: "
2186 "interrupt\n");
2187 return (ENXIO);
2188 }
2189
2190 /* Do Legacy setup? */
2191 if (lem_use_legacy_irq) {
2192 if ((error = bus_setup_intr(dev, adapter->res[0],
2193 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2194 &adapter->tag[0])) != 0) {
2195 device_printf(dev,
2196 "Failed to register interrupt handler");
2197 return (error);
2198 }
2199 return (0);
2200 }
2201
2202 /*
2203 * Use a Fast interrupt and the associated
2204 * deferred processing contexts.
2205 */
2206 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2207 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2208 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2209 taskqueue_thread_enqueue, &adapter->tq);
2210 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2211 device_get_nameunit(adapter->dev));
2212 if ((error = bus_setup_intr(dev, adapter->res[0],
2213 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2214 &adapter->tag[0])) != 0) {
2215 device_printf(dev, "Failed to register fast interrupt "
2216 "handler: %d\n", error);
2217 taskqueue_free(adapter->tq);
2218 adapter->tq = NULL;
2219 return (error);
2220 }
2221
2222 return (0);
2223}
2224
2225
2226static void
2227lem_free_pci_resources(struct adapter *adapter)
2228{
2229 device_t dev = adapter->dev;
2230
2231
2232 if (adapter->tag[0] != NULL) {
2233 bus_teardown_intr(dev, adapter->res[0],
2234 adapter->tag[0]);
2235 adapter->tag[0] = NULL;
2236 }
2237
2238 if (adapter->res[0] != NULL) {
2239 bus_release_resource(dev, SYS_RES_IRQ,
2240 0, adapter->res[0]);
2241 }
2242
2243 if (adapter->memory != NULL)
2244 bus_release_resource(dev, SYS_RES_MEMORY,
2245 PCIR_BAR(0), adapter->memory);
2246
2247 if (adapter->ioport != NULL)
2248 bus_release_resource(dev, SYS_RES_IOPORT,
2249 adapter->io_rid, adapter->ioport);
2250}
2251
2252
2253/*********************************************************************
2254 *
2255 * Initialize the hardware to a configuration
2256 * as specified by the adapter structure.
2257 *
2258 **********************************************************************/
2259static int
2260lem_hardware_init(struct adapter *adapter)
2261{
2262 device_t dev = adapter->dev;
2263 u16 rx_buffer_size;
2264
2265 INIT_DEBUGOUT("lem_hardware_init: begin");
2266
2267 /* Issue a global reset */
2268 e1000_reset_hw(&adapter->hw);
2269
2270 /* When hardware is reset, fifo_head is also reset */
2271 adapter->tx_fifo_head = 0;
2272
2273 /*
2274 * These parameters control the automatic generation (Tx) and
2275 * response (Rx) to Ethernet PAUSE frames.
2276 * - High water mark should allow for at least two frames to be
2277 * received after sending an XOFF.
2278 * - Low water mark works best when it is very near the high water mark.
2279 * This allows the receiver to restart by sending XON when it has
2280 * drained a bit. Here we use an arbitary value of 1500 which will
2281 * restart after one full frame is pulled from the buffer. There
2282 * could be several smaller frames in the buffer and if so they will
2283 * not trigger the XON until their total number reduces the buffer
2284 * by 1500.
2285 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2286 */
2287 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2288 0xffff) << 10 );
2289
2290 adapter->hw.fc.high_water = rx_buffer_size -
2291 roundup2(adapter->max_frame_size, 1024);
2292 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2293
2294 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2295 adapter->hw.fc.send_xon = TRUE;
2296
2297 /* Set Flow control, use the tunable location if sane */
2298 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2299 adapter->hw.fc.requested_mode = lem_fc_setting;
2300 else
2301 adapter->hw.fc.requested_mode = e1000_fc_none;
2302
2303 if (e1000_init_hw(&adapter->hw) < 0) {
2304 device_printf(dev, "Hardware Initialization Failed\n");
2305 return (EIO);
2306 }
2307
2308 e1000_check_for_link(&adapter->hw);
2309
2310 return (0);
2311}
2312
2313/*********************************************************************
2314 *
2315 * Setup networking device structure and register an interface.
2316 *
2317 **********************************************************************/
2318static int
2319lem_setup_interface(device_t dev, struct adapter *adapter)
2320{
2321 struct ifnet *ifp;
2322
2323 INIT_DEBUGOUT("lem_setup_interface: begin");
2324
2325 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2326 if (ifp == NULL) {
2327 device_printf(dev, "can not allocate ifnet structure\n");
2328 return (-1);
2329 }
2330 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2331 ifp->if_init = lem_init;
2332 ifp->if_softc = adapter;
2333 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2334 ifp->if_ioctl = lem_ioctl;
2335 ifp->if_start = lem_start;
2336 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2337 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2338 IFQ_SET_READY(&ifp->if_snd);
2339
2340 ether_ifattach(ifp, adapter->hw.mac.addr);
2341
2342 ifp->if_capabilities = ifp->if_capenable = 0;
2343
2344 if (adapter->hw.mac.type >= e1000_82543) {
2345 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2346 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2347 }
2348
2349 /*
2350 * Tell the upper layer(s) we support long frames.
2351 */
2352 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2353 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2354 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2355
2356 /*
2357 ** Dont turn this on by default, if vlans are
2358 ** created on another pseudo device (eg. lagg)
2359 ** then vlan events are not passed thru, breaking
2360 ** operation, but with HW FILTER off it works. If
2361 ** using vlans directly on the em driver you can
2362 ** enable this and get full hardware tag filtering.
2363 */
2364 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2365
2366#ifdef DEVICE_POLLING
2367 ifp->if_capabilities |= IFCAP_POLLING;
2368#endif
2369
2370 /* Enable only WOL MAGIC by default */
2371 if (adapter->wol) {
2372 ifp->if_capabilities |= IFCAP_WOL;
2373 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2374 }
2375
2376 /*
2377 * Specify the media types supported by this adapter and register
2378 * callbacks to update media and link information
2379 */
2380 ifmedia_init(&adapter->media, IFM_IMASK,
2381 lem_media_change, lem_media_status);
2382 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2383 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2384 u_char fiber_type = IFM_1000_SX; /* default type */
2385
2386 if (adapter->hw.mac.type == e1000_82545)
2387 fiber_type = IFM_1000_LX;
2388 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2389 0, NULL);
2390 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2391 } else {
2392 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2393 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2394 0, NULL);
2395 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2396 0, NULL);
2397 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2398 0, NULL);
2399 if (adapter->hw.phy.type != e1000_phy_ife) {
2400 ifmedia_add(&adapter->media,
2401 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2402 ifmedia_add(&adapter->media,
2403 IFM_ETHER | IFM_1000_T, 0, NULL);
2404 }
2405 }
2406 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2407 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2408 return (0);
2409}
2410
2411
2412/*********************************************************************
2413 *
2414 * Workaround for SmartSpeed on 82541 and 82547 controllers
2415 *
2416 **********************************************************************/
2417static void
2418lem_smartspeed(struct adapter *adapter)
2419{
2420 u16 phy_tmp;
2421
2422 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2423 adapter->hw.mac.autoneg == 0 ||
2424 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2425 return;
2426
2427 if (adapter->smartspeed == 0) {
2428 /* If Master/Slave config fault is asserted twice,
2429 * we assume back-to-back */
2430 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2431 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2432 return;
2433 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2434 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2435 e1000_read_phy_reg(&adapter->hw,
2436 PHY_1000T_CTRL, &phy_tmp);
2437 if(phy_tmp & CR_1000T_MS_ENABLE) {
2438 phy_tmp &= ~CR_1000T_MS_ENABLE;
2439 e1000_write_phy_reg(&adapter->hw,
2440 PHY_1000T_CTRL, phy_tmp);
2441 adapter->smartspeed++;
2442 if(adapter->hw.mac.autoneg &&
2443 !e1000_copper_link_autoneg(&adapter->hw) &&
2444 !e1000_read_phy_reg(&adapter->hw,
2445 PHY_CONTROL, &phy_tmp)) {
2446 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2447 MII_CR_RESTART_AUTO_NEG);
2448 e1000_write_phy_reg(&adapter->hw,
2449 PHY_CONTROL, phy_tmp);
2450 }
2451 }
2452 }
2453 return;
2454 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2455 /* If still no link, perhaps using 2/3 pair cable */
2456 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2457 phy_tmp |= CR_1000T_MS_ENABLE;
2458 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2459 if(adapter->hw.mac.autoneg &&
2460 !e1000_copper_link_autoneg(&adapter->hw) &&
2461 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2462 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2463 MII_CR_RESTART_AUTO_NEG);
2464 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2465 }
2466 }
2467 /* Restart process after EM_SMARTSPEED_MAX iterations */
2468 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2469 adapter->smartspeed = 0;
2470}
2471
2472
2473/*
2474 * Manage DMA'able memory.
2475 */
2476static void
2477lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2478{
2479 if (error)
2480 return;
2481 *(bus_addr_t *) arg = segs[0].ds_addr;
2482}
2483
2484static int
2485lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2486 struct em_dma_alloc *dma, int mapflags)
2487{
2488 int error;
2489
2490 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2491 EM_DBA_ALIGN, 0, /* alignment, bounds */
2492 BUS_SPACE_MAXADDR, /* lowaddr */
2493 BUS_SPACE_MAXADDR, /* highaddr */
2494 NULL, NULL, /* filter, filterarg */
2495 size, /* maxsize */
2496 1, /* nsegments */
2497 size, /* maxsegsize */
2498 0, /* flags */
2499 NULL, /* lockfunc */
2500 NULL, /* lockarg */
2501 &dma->dma_tag);
2502 if (error) {
2503 device_printf(adapter->dev,
2504 "%s: bus_dma_tag_create failed: %d\n",
2505 __func__, error);
2506 goto fail_0;
2507 }
2508
2509 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2510 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2511 if (error) {
2512 device_printf(adapter->dev,
2513 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2514 __func__, (uintmax_t)size, error);
2515 goto fail_2;
2516 }
2517
2518 dma->dma_paddr = 0;
2519 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2520 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2521 if (error || dma->dma_paddr == 0) {
2522 device_printf(adapter->dev,
2523 "%s: bus_dmamap_load failed: %d\n",
2524 __func__, error);
2525 goto fail_3;
2526 }
2527
2528 return (0);
2529
2530fail_3:
2531 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2532fail_2:
2533 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2534 bus_dma_tag_destroy(dma->dma_tag);
2535fail_0:
2536 dma->dma_map = NULL;
2537 dma->dma_tag = NULL;
2538
2539 return (error);
2540}
2541
2542static void
2543lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2544{
2545 if (dma->dma_tag == NULL)
2546 return;
2547 if (dma->dma_map != NULL) {
2548 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2549 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2550 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2551 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2552 dma->dma_map = NULL;
2553 }
2554 bus_dma_tag_destroy(dma->dma_tag);
2555 dma->dma_tag = NULL;
2556}
2557
2558
2559/*********************************************************************
2560 *
2561 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2562 * the information needed to transmit a packet on the wire.
2563 *
2564 **********************************************************************/
2565static int
2566lem_allocate_transmit_structures(struct adapter *adapter)
2567{
2568 device_t dev = adapter->dev;
2569 struct em_buffer *tx_buffer;
2570 int error;
2571
2572 /*
2573 * Create DMA tags for tx descriptors
2574 */
2575 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2576 1, 0, /* alignment, bounds */
2577 BUS_SPACE_MAXADDR, /* lowaddr */
2578 BUS_SPACE_MAXADDR, /* highaddr */
2579 NULL, NULL, /* filter, filterarg */
2580 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2581 EM_MAX_SCATTER, /* nsegments */
2582 MCLBYTES, /* maxsegsize */
2583 0, /* flags */
2584 NULL, /* lockfunc */
2585 NULL, /* lockarg */
2586 &adapter->txtag)) != 0) {
2587 device_printf(dev, "Unable to allocate TX DMA tag\n");
2588 goto fail;
2589 }
2590
2591 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2592 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2593 if (adapter->tx_buffer_area == NULL) {
2594 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2595 error = ENOMEM;
2596 goto fail;
2597 }
2598
2599 /* Create the descriptor buffer dma maps */
2600 for (int i = 0; i < adapter->num_tx_desc; i++) {
2601 tx_buffer = &adapter->tx_buffer_area[i];
2602 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2603 if (error != 0) {
2604 device_printf(dev, "Unable to create TX DMA map\n");
2605 goto fail;
2606 }
2607 tx_buffer->next_eop = -1;
2608 }
2609
2610 return (0);
2611fail:
2612 lem_free_transmit_structures(adapter);
2613 return (error);
2614}
2615
2616/*********************************************************************
2617 *
2618 * (Re)Initialize transmit structures.
2619 *
2620 **********************************************************************/
2621static void
2622lem_setup_transmit_structures(struct adapter *adapter)
2623{
2624 struct em_buffer *tx_buffer;
2625#ifdef DEV_NETMAP
2626 /* we are already locked */
2627 struct netmap_adapter *na = NA(adapter->ifp);
2628 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2629#endif /* DEV_NETMAP */
2630
2631 /* Clear the old ring contents */
2632 bzero(adapter->tx_desc_base,
2633 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2634
2635 /* Free any existing TX buffers */
2636 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2637 tx_buffer = &adapter->tx_buffer_area[i];
2638 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2639 BUS_DMASYNC_POSTWRITE);
2640 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2641 m_freem(tx_buffer->m_head);
2642 tx_buffer->m_head = NULL;
2643#ifdef DEV_NETMAP
2644 if (slot) {
2645 /* the i-th NIC entry goes to slot si */
2646 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2647 uint64_t paddr;
2648 void *addr;
2649
2650 addr = PNMB(slot + si, &paddr);
2651 adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2652 /* reload the map for netmap mode */
2653 netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2654 }
2655#endif /* DEV_NETMAP */
2656 tx_buffer->next_eop = -1;
2657 }
2658
2659 /* Reset state */
2660 adapter->last_hw_offload = 0;
2661 adapter->next_avail_tx_desc = 0;
2662 adapter->next_tx_to_clean = 0;
2663 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2664
2665 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2666 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2667
2668 return;
2669}
2670
2671/*********************************************************************
2672 *
2673 * Enable transmit unit.
2674 *
2675 **********************************************************************/
2676static void
2677lem_initialize_transmit_unit(struct adapter *adapter)
2678{
2679 u32 tctl, tipg = 0;
2680 u64 bus_addr;
2681
2682 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2683 /* Setup the Base and Length of the Tx Descriptor Ring */
2684 bus_addr = adapter->txdma.dma_paddr;
2685 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2686 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2687 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2688 (u32)(bus_addr >> 32));
2689 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2690 (u32)bus_addr);
2691 /* Setup the HW Tx Head and Tail descriptor pointers */
2692 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2693 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2694
2695 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2696 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2697 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2698
2699 /* Set the default values for the Tx Inter Packet Gap timer */
2700 switch (adapter->hw.mac.type) {
2701 case e1000_82542:
2702 tipg = DEFAULT_82542_TIPG_IPGT;
2703 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2704 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2705 break;
2706 default:
2707 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2708 (adapter->hw.phy.media_type ==
2709 e1000_media_type_internal_serdes))
2710 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2711 else
2712 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2713 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2714 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2715 }
2716
2717 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2718 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2719 if(adapter->hw.mac.type >= e1000_82540)
2720 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2721 adapter->tx_abs_int_delay.value);
2722
2723 /* Program the Transmit Control Register */
2724 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2725 tctl &= ~E1000_TCTL_CT;
2726 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2727 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2728
2729 /* This write will effectively turn on the transmit unit. */
2730 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2731
2732 /* Setup Transmit Descriptor Base Settings */
2733 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2734
2735 if (adapter->tx_int_delay.value > 0)
2736 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2737}
2738
2739/*********************************************************************
2740 *
2741 * Free all transmit related data structures.
2742 *
2743 **********************************************************************/
2744static void
2745lem_free_transmit_structures(struct adapter *adapter)
2746{
2747 struct em_buffer *tx_buffer;
2748
2749 INIT_DEBUGOUT("free_transmit_structures: begin");
2750
2751 if (adapter->tx_buffer_area != NULL) {
2752 for (int i = 0; i < adapter->num_tx_desc; i++) {
2753 tx_buffer = &adapter->tx_buffer_area[i];
2754 if (tx_buffer->m_head != NULL) {
2755 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2756 BUS_DMASYNC_POSTWRITE);
2757 bus_dmamap_unload(adapter->txtag,
2758 tx_buffer->map);
2759 m_freem(tx_buffer->m_head);
2760 tx_buffer->m_head = NULL;
2761 } else if (tx_buffer->map != NULL)
2762 bus_dmamap_unload(adapter->txtag,
2763 tx_buffer->map);
2764 if (tx_buffer->map != NULL) {
2765 bus_dmamap_destroy(adapter->txtag,
2766 tx_buffer->map);
2767 tx_buffer->map = NULL;
2768 }
2769 }
2770 }
2771 if (adapter->tx_buffer_area != NULL) {
2772 free(adapter->tx_buffer_area, M_DEVBUF);
2773 adapter->tx_buffer_area = NULL;
2774 }
2775 if (adapter->txtag != NULL) {
2776 bus_dma_tag_destroy(adapter->txtag);
2777 adapter->txtag = NULL;
2778 }
2779#if __FreeBSD_version >= 800000
2780 if (adapter->br != NULL)
2781 buf_ring_free(adapter->br, M_DEVBUF);
2782#endif
2783}
2784
2785/*********************************************************************
2786 *
2787 * The offload context needs to be set when we transfer the first
2788 * packet of a particular protocol (TCP/UDP). This routine has been
2789 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2790 *
2791 * Added back the old method of keeping the current context type
2792 * and not setting if unnecessary, as this is reported to be a
2793 * big performance win. -jfv
2794 **********************************************************************/
2795static void
2796lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2797 u32 *txd_upper, u32 *txd_lower)
2798{
2799 struct e1000_context_desc *TXD = NULL;
2800 struct em_buffer *tx_buffer;
2801 struct ether_vlan_header *eh;
2802 struct ip *ip = NULL;
2803 struct ip6_hdr *ip6;
2804 int curr_txd, ehdrlen;
2805 u32 cmd, hdr_len, ip_hlen;
2806 u16 etype;
2807 u8 ipproto;
2808
2809
2810 cmd = hdr_len = ipproto = 0;
2811 *txd_upper = *txd_lower = 0;
2812 curr_txd = adapter->next_avail_tx_desc;
2813
2814 /*
2815 * Determine where frame payload starts.
2816 * Jump over vlan headers if already present,
2817 * helpful for QinQ too.
2818 */
2819 eh = mtod(mp, struct ether_vlan_header *);
2820 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2821 etype = ntohs(eh->evl_proto);
2822 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2823 } else {
2824 etype = ntohs(eh->evl_encap_proto);
2825 ehdrlen = ETHER_HDR_LEN;
2826 }
2827
2828 /*
2829 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2830 * TODO: Support SCTP too when it hits the tree.
2831 */
2832 switch (etype) {
2833 case ETHERTYPE_IP:
2834 ip = (struct ip *)(mp->m_data + ehdrlen);
2835 ip_hlen = ip->ip_hl << 2;
2836
2837 /* Setup of IP header checksum. */
2838 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2839 /*
2840 * Start offset for header checksum calculation.
2841 * End offset for header checksum calculation.
2842 * Offset of place to put the checksum.
2843 */
2844 TXD = (struct e1000_context_desc *)
2845 &adapter->tx_desc_base[curr_txd];
2846 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2847 TXD->lower_setup.ip_fields.ipcse =
2848 htole16(ehdrlen + ip_hlen);
2849 TXD->lower_setup.ip_fields.ipcso =
2850 ehdrlen + offsetof(struct ip, ip_sum);
2851 cmd |= E1000_TXD_CMD_IP;
2852 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2853 }
2854
2855 hdr_len = ehdrlen + ip_hlen;
2856 ipproto = ip->ip_p;
2857
2858 break;
2859 case ETHERTYPE_IPV6:
2860 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2861 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2862
2863 /* IPv6 doesn't have a header checksum. */
2864
2865 hdr_len = ehdrlen + ip_hlen;
2866 ipproto = ip6->ip6_nxt;
2867 break;
2868
2869 default:
2870 return;
2871 }
2872
2873 switch (ipproto) {
2874 case IPPROTO_TCP:
2875 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2876 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2877 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2878 /* no need for context if already set */
2879 if (adapter->last_hw_offload == CSUM_TCP)
2880 return;
2881 adapter->last_hw_offload = CSUM_TCP;
2882 /*
2883 * Start offset for payload checksum calculation.
2884 * End offset for payload checksum calculation.
2885 * Offset of place to put the checksum.
2886 */
2887 TXD = (struct e1000_context_desc *)
2888 &adapter->tx_desc_base[curr_txd];
2889 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2890 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2891 TXD->upper_setup.tcp_fields.tucso =
2892 hdr_len + offsetof(struct tcphdr, th_sum);
2893 cmd |= E1000_TXD_CMD_TCP;
2894 }
2895 break;
2896 case IPPROTO_UDP:
2897 {
2898 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2899 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2900 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2901 /* no need for context if already set */
2902 if (adapter->last_hw_offload == CSUM_UDP)
2903 return;
2904 adapter->last_hw_offload = CSUM_UDP;
2905 /*
2906 * Start offset for header checksum calculation.
2907 * End offset for header checksum calculation.
2908 * Offset of place to put the checksum.
2909 */
2910 TXD = (struct e1000_context_desc *)
2911 &adapter->tx_desc_base[curr_txd];
2912 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2913 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2914 TXD->upper_setup.tcp_fields.tucso =
2915 hdr_len + offsetof(struct udphdr, uh_sum);
2916 }
2917 /* Fall Thru */
2918 }
2919 default:
2920 break;
2921 }
2922
2923 if (TXD == NULL)
2924 return;
2925 TXD->tcp_seg_setup.data = htole32(0);
2926 TXD->cmd_and_length =
2927 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2928 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2929 tx_buffer->m_head = NULL;
2930 tx_buffer->next_eop = -1;
2931
2932 if (++curr_txd == adapter->num_tx_desc)
2933 curr_txd = 0;
2934
2935 adapter->num_tx_desc_avail--;
2936 adapter->next_avail_tx_desc = curr_txd;
2937}
2938
2939
2940/**********************************************************************
2941 *
2942 * Examine each tx_buffer in the used queue. If the hardware is done
2943 * processing the packet then free associated resources. The
2944 * tx_buffer is put back on the free queue.
2945 *
2946 **********************************************************************/
2947static void
2948lem_txeof(struct adapter *adapter)
2949{
2950 int first, last, done, num_avail;
2951 struct em_buffer *tx_buffer;
2952 struct e1000_tx_desc *tx_desc, *eop_desc;
2953 struct ifnet *ifp = adapter->ifp;
2954
2955 EM_TX_LOCK_ASSERT(adapter);
2956
2957#ifdef DEV_NETMAP
2958 if (ifp->if_capenable & IFCAP_NETMAP) {
2959 selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET);
2960 return;
2961 }
2962#endif /* DEV_NETMAP */
2963 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2964 return;
2965
2966 num_avail = adapter->num_tx_desc_avail;
2967 first = adapter->next_tx_to_clean;
2968 tx_desc = &adapter->tx_desc_base[first];
2969 tx_buffer = &adapter->tx_buffer_area[first];
2970 last = tx_buffer->next_eop;
2971 eop_desc = &adapter->tx_desc_base[last];
2972
2973 /*
2974 * What this does is get the index of the
2975 * first descriptor AFTER the EOP of the
2976 * first packet, that way we can do the
2977 * simple comparison on the inner while loop.
2978 */
2979 if (++last == adapter->num_tx_desc)
2980 last = 0;
2981 done = last;
2982
2983 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2984 BUS_DMASYNC_POSTREAD);
2985
2986 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2987 /* We clean the range of the packet */
2988 while (first != done) {
2989 tx_desc->upper.data = 0;
2990 tx_desc->lower.data = 0;
2991 tx_desc->buffer_addr = 0;
2992 ++num_avail;
2993
2994 if (tx_buffer->m_head) {
2995 ifp->if_opackets++;
2996 bus_dmamap_sync(adapter->txtag,
2997 tx_buffer->map,
2998 BUS_DMASYNC_POSTWRITE);
2999 bus_dmamap_unload(adapter->txtag,
3000 tx_buffer->map);
3001
3002 m_freem(tx_buffer->m_head);
3003 tx_buffer->m_head = NULL;
3004 }
3005 tx_buffer->next_eop = -1;
3006 adapter->watchdog_time = ticks;
3007
3008 if (++first == adapter->num_tx_desc)
3009 first = 0;
3010
3011 tx_buffer = &adapter->tx_buffer_area[first];
3012 tx_desc = &adapter->tx_desc_base[first];
3013 }
3014 /* See if we can continue to the next packet */
3015 last = tx_buffer->next_eop;
3016 if (last != -1) {
3017 eop_desc = &adapter->tx_desc_base[last];
3018 /* Get new done point */
3019 if (++last == adapter->num_tx_desc) last = 0;
3020 done = last;
3021 } else
3022 break;
3023 }
3024 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3025 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3026
3027 adapter->next_tx_to_clean = first;
3028 adapter->num_tx_desc_avail = num_avail;
3029
3030 /*
3031 * If we have enough room, clear IFF_DRV_OACTIVE to
3032 * tell the stack that it is OK to send packets.
3033 * If there are no pending descriptors, clear the watchdog.
3034 */
3035 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3036 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3037 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3038 adapter->watchdog_check = FALSE;
3039 return;
3040 }
3041 }
3042}
3043
3044/*********************************************************************
3045 *
3046 * When Link is lost sometimes there is work still in the TX ring
3047 * which may result in a watchdog, rather than allow that we do an
3048 * attempted cleanup and then reinit here. Note that this has been
3049 * seens mostly with fiber adapters.
3050 *
3051 **********************************************************************/
3052static void
3053lem_tx_purge(struct adapter *adapter)
3054{
3055 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3056 EM_TX_LOCK(adapter);
3057 lem_txeof(adapter);
3058 EM_TX_UNLOCK(adapter);
3059 if (adapter->watchdog_check) /* Still outstanding? */
3060 lem_init_locked(adapter);
3061 }
3062}
3063
3064/*********************************************************************
3065 *
3066 * Get a buffer from system mbuf buffer pool.
3067 *
3068 **********************************************************************/
3069static int
3070lem_get_buf(struct adapter *adapter, int i)
3071{
3072 struct mbuf *m;
3073 bus_dma_segment_t segs[1];
3074 bus_dmamap_t map;
3075 struct em_buffer *rx_buffer;
3076 int error, nsegs;
3077
3078 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3079 if (m == NULL) {
3080 adapter->mbuf_cluster_failed++;
3081 return (ENOBUFS);
3082 }
3083 m->m_len = m->m_pkthdr.len = MCLBYTES;
3084
3085 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3086 m_adj(m, ETHER_ALIGN);
3087
3088 /*
3089 * Using memory from the mbuf cluster pool, invoke the
3090 * bus_dma machinery to arrange the memory mapping.
3091 */
3092 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3093 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3094 if (error != 0) {
3095 m_free(m);
3096 return (error);
3097 }
3098
3099 /* If nsegs is wrong then the stack is corrupt. */
3100 KASSERT(nsegs == 1, ("Too many segments returned!"));
3101
3102 rx_buffer = &adapter->rx_buffer_area[i];
3103 if (rx_buffer->m_head != NULL)
3104 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3105
3106 map = rx_buffer->map;
3107 rx_buffer->map = adapter->rx_sparemap;
3108 adapter->rx_sparemap = map;
3109 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3110 rx_buffer->m_head = m;
3111
3112 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3113 return (0);
3114}
3115
3116/*********************************************************************
3117 *
3118 * Allocate memory for rx_buffer structures. Since we use one
3119 * rx_buffer per received packet, the maximum number of rx_buffer's
3120 * that we'll need is equal to the number of receive descriptors
3121 * that we've allocated.
3122 *
3123 **********************************************************************/
3124static int
3125lem_allocate_receive_structures(struct adapter *adapter)
3126{
3127 device_t dev = adapter->dev;
3128 struct em_buffer *rx_buffer;
3129 int i, error;
3130
3131 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3132 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3133 if (adapter->rx_buffer_area == NULL) {
3134 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3135 return (ENOMEM);
3136 }
3137
3138 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3139 1, 0, /* alignment, bounds */
3140 BUS_SPACE_MAXADDR, /* lowaddr */
3141 BUS_SPACE_MAXADDR, /* highaddr */
3142 NULL, NULL, /* filter, filterarg */
3143 MCLBYTES, /* maxsize */
3144 1, /* nsegments */
3145 MCLBYTES, /* maxsegsize */
3146 0, /* flags */
3147 NULL, /* lockfunc */
3148 NULL, /* lockarg */
3149 &adapter->rxtag);
3150 if (error) {
3151 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3152 __func__, error);
3153 goto fail;
3154 }
3155
3156 /* Create the spare map (used by getbuf) */
3157 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3158 &adapter->rx_sparemap);
3159 if (error) {
3160 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3161 __func__, error);
3162 goto fail;
3163 }
3164
3165 rx_buffer = adapter->rx_buffer_area;
3166 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3167 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3168 &rx_buffer->map);
3169 if (error) {
3170 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3171 __func__, error);
3172 goto fail;
3173 }
3174 }
3175
3176 return (0);
3177
3178fail:
3179 lem_free_receive_structures(adapter);
3180 return (error);
3181}
3182
3183/*********************************************************************
3184 *
3185 * (Re)initialize receive structures.
3186 *
3187 **********************************************************************/
3188static int
3189lem_setup_receive_structures(struct adapter *adapter)
3190{
3191 struct em_buffer *rx_buffer;
3192 int i, error;
3193#ifdef DEV_NETMAP
3194 /* we are already under lock */
3195 struct netmap_adapter *na = NA(adapter->ifp);
3196 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3197#endif
3198
3199 /* Reset descriptor ring */
3200 bzero(adapter->rx_desc_base,
3201 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3202
3203 /* Free current RX buffers. */
3204 rx_buffer = adapter->rx_buffer_area;
3205 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3206 if (rx_buffer->m_head != NULL) {
3207 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3208 BUS_DMASYNC_POSTREAD);
3209 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3210 m_freem(rx_buffer->m_head);
3211 rx_buffer->m_head = NULL;
3212 }
3213 }
3214
3215 /* Allocate new ones. */
3216 for (i = 0; i < adapter->num_rx_desc; i++) {
3217#ifdef DEV_NETMAP
3218 if (slot) {
3219 /* the i-th NIC entry goes to slot si */
3220 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3221 uint64_t paddr;
3222 void *addr;
3223
3224 addr = PNMB(slot + si, &paddr);
3225 netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3226 /* Update descriptor */
3227 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3228 continue;
3229 }
3230#endif /* DEV_NETMAP */
3231 error = lem_get_buf(adapter, i);
3232 if (error)
3233 return (error);
3234 }
3235
3236 /* Setup our descriptor pointers */
3237 adapter->next_rx_desc_to_check = 0;
3238 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3239 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3240
3241 return (0);
3242}
3243
3244/*********************************************************************
3245 *
3246 * Enable receive unit.
3247 *
3248 **********************************************************************/
3249#define MAX_INTS_PER_SEC 8000
3250#define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
3251
3252static void
3253lem_initialize_receive_unit(struct adapter *adapter)
3254{
3255 struct ifnet *ifp = adapter->ifp;
3256 u64 bus_addr;
3257 u32 rctl, rxcsum;
3258
3259 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3260
3261 /*
3262 * Make sure receives are disabled while setting
3263 * up the descriptor ring
3264 */
3265 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3266 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3267
3268 if (adapter->hw.mac.type >= e1000_82540) {
3269 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3270 adapter->rx_abs_int_delay.value);
3271 /*
3272 * Set the interrupt throttling rate. Value is calculated
3273 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3274 */
3275 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3276 }
3277
3278 /* Setup the Base and Length of the Rx Descriptor Ring */
3279 bus_addr = adapter->rxdma.dma_paddr;
3280 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3281 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3282 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3283 (u32)(bus_addr >> 32));
3284 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3285 (u32)bus_addr);
3286
3287 /* Setup the Receive Control Register */
3288 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3289 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3290 E1000_RCTL_RDMTS_HALF |
3291 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3292
3293 /* Make sure VLAN Filters are off */
3294 rctl &= ~E1000_RCTL_VFE;
3295
3296 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3297 rctl |= E1000_RCTL_SBP;
3298 else
3299 rctl &= ~E1000_RCTL_SBP;
3300
3301 switch (adapter->rx_buffer_len) {
3302 default:
3303 case 2048:
3304 rctl |= E1000_RCTL_SZ_2048;
3305 break;
3306 case 4096:
3307 rctl |= E1000_RCTL_SZ_4096 |
3308 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3309 break;
3310 case 8192:
3311 rctl |= E1000_RCTL_SZ_8192 |
3312 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3313 break;
3314 case 16384:
3315 rctl |= E1000_RCTL_SZ_16384 |
3316 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3317 break;
3318 }
3319
3320 if (ifp->if_mtu > ETHERMTU)
3321 rctl |= E1000_RCTL_LPE;
3322 else
3323 rctl &= ~E1000_RCTL_LPE;
3324
3325 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3326 if ((adapter->hw.mac.type >= e1000_82543) &&
3327 (ifp->if_capenable & IFCAP_RXCSUM)) {
3328 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3329 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3330 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3331 }
3332
3333 /* Enable Receives */
3334 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3335
3336 /*
3337 * Setup the HW Rx Head and
3338 * Tail Descriptor Pointers
3339 */
3340 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3341#ifdef DEV_NETMAP
3342 /* preserve buffers already made available to clients */
3343 if (ifp->if_capenable & IFCAP_NETMAP) {
3344 struct netmap_adapter *na = NA(adapter->ifp);
3345 struct netmap_kring *kring = &na->rx_rings[0];
3346 int t = na->num_rx_desc - 1 - kring->nr_hwavail;
3347
3348 if (t >= na->num_rx_desc)
3349 t -= na->num_rx_desc;
3350 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), t);
3351 } else
3352#endif /* DEV_NETMAP */
3353 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3354
3355 return;
3356}
3357
3358/*********************************************************************
3359 *
3360 * Free receive related data structures.
3361 *
3362 **********************************************************************/
3363static void
3364lem_free_receive_structures(struct adapter *adapter)
3365{
3366 struct em_buffer *rx_buffer;
3367 int i;
3368
3369 INIT_DEBUGOUT("free_receive_structures: begin");
3370
3371 if (adapter->rx_sparemap) {
3372 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3373 adapter->rx_sparemap = NULL;
3374 }
3375
3376 /* Cleanup any existing buffers */
3377 if (adapter->rx_buffer_area != NULL) {
3378 rx_buffer = adapter->rx_buffer_area;
3379 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3380 if (rx_buffer->m_head != NULL) {
3381 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3382 BUS_DMASYNC_POSTREAD);
3383 bus_dmamap_unload(adapter->rxtag,
3384 rx_buffer->map);
3385 m_freem(rx_buffer->m_head);
3386 rx_buffer->m_head = NULL;
3387 } else if (rx_buffer->map != NULL)
3388 bus_dmamap_unload(adapter->rxtag,
3389 rx_buffer->map);
3390 if (rx_buffer->map != NULL) {
3391 bus_dmamap_destroy(adapter->rxtag,
3392 rx_buffer->map);
3393 rx_buffer->map = NULL;
3394 }
3395 }
3396 }
3397
3398 if (adapter->rx_buffer_area != NULL) {
3399 free(adapter->rx_buffer_area, M_DEVBUF);
3400 adapter->rx_buffer_area = NULL;
3401 }
3402
3403 if (adapter->rxtag != NULL) {
3404 bus_dma_tag_destroy(adapter->rxtag);
3405 adapter->rxtag = NULL;
3406 }
3407}
3408
3409/*********************************************************************
3410 *
3411 * This routine executes in interrupt context. It replenishes
3412 * the mbufs in the descriptor and sends data which has been
3413 * dma'ed into host memory to upper layer.
3414 *
3415 * We loop at most count times if count is > 0, or until done if
3416 * count < 0.
3417 *
3418 * For polling we also now return the number of cleaned packets
3419 *********************************************************************/
3420static bool
3421lem_rxeof(struct adapter *adapter, int count, int *done)
3422{
3423 struct ifnet *ifp = adapter->ifp;
3424 struct mbuf *mp;
3425 u8 status = 0, accept_frame = 0, eop = 0;
3426 u16 len, desc_len, prev_len_adj;
3427 int i, rx_sent = 0;
3428 struct e1000_rx_desc *current_desc;
3429
3430 EM_RX_LOCK(adapter);
3431 i = adapter->next_rx_desc_to_check;
3432 current_desc = &adapter->rx_desc_base[i];
3433 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3434 BUS_DMASYNC_POSTREAD);
3435
3436#ifdef DEV_NETMAP
3437 if (ifp->if_capenable & IFCAP_NETMAP) {
3438 struct netmap_adapter *na = NA(ifp);
3439 na->rx_rings[0].nr_kflags |= NKR_PENDINTR;
3440 selwakeuppri(&na->rx_rings[0].si, PI_NET);
3441 EM_RX_UNLOCK(adapter);
3442 return (0);
3443 }
3444#endif /* DEV_NETMAP */
3445
3446 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3447 if (done != NULL)
3448 *done = rx_sent;
3449 EM_RX_UNLOCK(adapter);
3450 return (FALSE);
3451 }
3452
3453 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3454 struct mbuf *m = NULL;
3455
3456 status = current_desc->status;
3457 if ((status & E1000_RXD_STAT_DD) == 0)
3458 break;
3459
3460 mp = adapter->rx_buffer_area[i].m_head;
3461 /*
3462 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3463 * needs to access the last received byte in the mbuf.
3464 */
3465 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3466 BUS_DMASYNC_POSTREAD);
3467
3468 accept_frame = 1;
3469 prev_len_adj = 0;
3470 desc_len = le16toh(current_desc->length);
3471 if (status & E1000_RXD_STAT_EOP) {
3472 count--;
3473 eop = 1;
3474 if (desc_len < ETHER_CRC_LEN) {
3475 len = 0;
3476 prev_len_adj = ETHER_CRC_LEN - desc_len;
3477 } else
3478 len = desc_len - ETHER_CRC_LEN;
3479 } else {
3480 eop = 0;
3481 len = desc_len;
3482 }
3483
3484 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3485 u8 last_byte;
3486 u32 pkt_len = desc_len;
3487
3488 if (adapter->fmp != NULL)
3489 pkt_len += adapter->fmp->m_pkthdr.len;
3490
3491 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3492 if (TBI_ACCEPT(&adapter->hw, status,
3493 current_desc->errors, pkt_len, last_byte,
3494 adapter->min_frame_size, adapter->max_frame_size)) {
3495 e1000_tbi_adjust_stats_82543(&adapter->hw,
3496 &adapter->stats, pkt_len,
3497 adapter->hw.mac.addr,
3498 adapter->max_frame_size);
3499 if (len > 0)
3500 len--;
3501 } else
3502 accept_frame = 0;
3503 }
3504
3505 if (accept_frame) {
3506 if (lem_get_buf(adapter, i) != 0) {
3507 ifp->if_iqdrops++;
3508 goto discard;
3509 }
3510
3511 /* Assign correct length to the current fragment */
3512 mp->m_len = len;
3513
3514 if (adapter->fmp == NULL) {
3515 mp->m_pkthdr.len = len;
3516 adapter->fmp = mp; /* Store the first mbuf */
3517 adapter->lmp = mp;
3518 } else {
3519 /* Chain mbuf's together */
3520 mp->m_flags &= ~M_PKTHDR;
3521 /*
3522 * Adjust length of previous mbuf in chain if
3523 * we received less than 4 bytes in the last
3524 * descriptor.
3525 */
3526 if (prev_len_adj > 0) {
3527 adapter->lmp->m_len -= prev_len_adj;
3528 adapter->fmp->m_pkthdr.len -=
3529 prev_len_adj;
3530 }
3531 adapter->lmp->m_next = mp;
3532 adapter->lmp = adapter->lmp->m_next;
3533 adapter->fmp->m_pkthdr.len += len;
3534 }
3535
3536 if (eop) {
3537 adapter->fmp->m_pkthdr.rcvif = ifp;
3538 ifp->if_ipackets++;
3539 lem_receive_checksum(adapter, current_desc,
3540 adapter->fmp);
3541#ifndef __NO_STRICT_ALIGNMENT
3542 if (adapter->max_frame_size >
3543 (MCLBYTES - ETHER_ALIGN) &&
3544 lem_fixup_rx(adapter) != 0)
3545 goto skip;
3546#endif
3547 if (status & E1000_RXD_STAT_VP) {
3548 adapter->fmp->m_pkthdr.ether_vtag =
3549 le16toh(current_desc->special);
3550 adapter->fmp->m_flags |= M_VLANTAG;
3551 }
3552#ifndef __NO_STRICT_ALIGNMENT
3553skip:
3554#endif
3555 m = adapter->fmp;
3556 adapter->fmp = NULL;
3557 adapter->lmp = NULL;
3558 }
3559 } else {
3560 adapter->dropped_pkts++;
3561discard:
3562 /* Reuse loaded DMA map and just update mbuf chain */
3563 mp = adapter->rx_buffer_area[i].m_head;
3564 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3565 mp->m_data = mp->m_ext.ext_buf;
3566 mp->m_next = NULL;
3567 if (adapter->max_frame_size <=
3568 (MCLBYTES - ETHER_ALIGN))
3569 m_adj(mp, ETHER_ALIGN);
3570 if (adapter->fmp != NULL) {
3571 m_freem(adapter->fmp);
3572 adapter->fmp = NULL;
3573 adapter->lmp = NULL;
3574 }
3575 m = NULL;
3576 }
3577
3578 /* Zero out the receive descriptors status. */
3579 current_desc->status = 0;
3580 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3581 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3582
3583 /* Advance our pointers to the next descriptor. */
3584 if (++i == adapter->num_rx_desc)
3585 i = 0;
3586 /* Call into the stack */
3587 if (m != NULL) {
3588 adapter->next_rx_desc_to_check = i;
3589 EM_RX_UNLOCK(adapter);
3590 (*ifp->if_input)(ifp, m);
3591 EM_RX_LOCK(adapter);
3592 rx_sent++;
3593 i = adapter->next_rx_desc_to_check;
3594 }
3595 current_desc = &adapter->rx_desc_base[i];
3596 }
3597 adapter->next_rx_desc_to_check = i;
3598
3599 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3600 if (--i < 0)
3601 i = adapter->num_rx_desc - 1;
3602 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3603 if (done != NULL)
3604 *done = rx_sent;
3605 EM_RX_UNLOCK(adapter);
3606 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3607}
3608
3609#ifndef __NO_STRICT_ALIGNMENT
3610/*
3611 * When jumbo frames are enabled we should realign entire payload on
3612 * architecures with strict alignment. This is serious design mistake of 8254x
3613 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3614 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3615 * payload. On architecures without strict alignment restrictions 8254x still
3616 * performs unaligned memory access which would reduce the performance too.
3617 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3618 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3619 * existing mbuf chain.
3620 *
3621 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3622 * not used at all on architectures with strict alignment.
3623 */
3624static int
3625lem_fixup_rx(struct adapter *adapter)
3626{
3627 struct mbuf *m, *n;
3628 int error;
3629
3630 error = 0;
3631 m = adapter->fmp;
3632 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3633 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3634 m->m_data += ETHER_HDR_LEN;
3635 } else {
3636 MGETHDR(n, M_DONTWAIT, MT_DATA);
3637 if (n != NULL) {
3638 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3639 m->m_data += ETHER_HDR_LEN;
3640 m->m_len -= ETHER_HDR_LEN;
3641 n->m_len = ETHER_HDR_LEN;
3642 M_MOVE_PKTHDR(n, m);
3643 n->m_next = m;
3644 adapter->fmp = n;
3645 } else {
3646 adapter->dropped_pkts++;
3647 m_freem(adapter->fmp);
3648 adapter->fmp = NULL;
3649 error = ENOMEM;
3650 }
3651 }
3652
3653 return (error);
3654}
3655#endif
3656
3657/*********************************************************************
3658 *
3659 * Verify that the hardware indicated that the checksum is valid.
3660 * Inform the stack about the status of checksum so that stack
3661 * doesn't spend time verifying the checksum.
3662 *
3663 *********************************************************************/
3664static void
3665lem_receive_checksum(struct adapter *adapter,
3666 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3667{
3668 /* 82543 or newer only */
3669 if ((adapter->hw.mac.type < e1000_82543) ||
3670 /* Ignore Checksum bit is set */
3671 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3672 mp->m_pkthdr.csum_flags = 0;
3673 return;
3674 }
3675
3676 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3677 /* Did it pass? */
3678 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3679 /* IP Checksum Good */
3680 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3681 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3682
3683 } else {
3684 mp->m_pkthdr.csum_flags = 0;
3685 }
3686 }
3687
3688 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3689 /* Did it pass? */
3690 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3691 mp->m_pkthdr.csum_flags |=
3692 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3693 mp->m_pkthdr.csum_data = htons(0xffff);
3694 }
3695 }
3696}
3697
3698/*
3699 * This routine is run via an vlan
3700 * config EVENT
3701 */
3702static void
3703lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3704{
3705 struct adapter *adapter = ifp->if_softc;
3706 u32 index, bit;
3707
3708 if (ifp->if_softc != arg) /* Not our event */
3709 return;
3710
3711 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3712 return;
3713
3714 EM_CORE_LOCK(adapter);
3715 index = (vtag >> 5) & 0x7F;
3716 bit = vtag & 0x1F;
3717 adapter->shadow_vfta[index] |= (1 << bit);
3718 ++adapter->num_vlans;
3719 /* Re-init to load the changes */
3720 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3721 lem_init_locked(adapter);
3722 EM_CORE_UNLOCK(adapter);
3723}
3724
3725/*
3726 * This routine is run via an vlan
3727 * unconfig EVENT
3728 */
3729static void
3730lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3731{
3732 struct adapter *adapter = ifp->if_softc;
3733 u32 index, bit;
3734
3735 if (ifp->if_softc != arg)
3736 return;
3737
3738 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3739 return;
3740
3741 EM_CORE_LOCK(adapter);
3742 index = (vtag >> 5) & 0x7F;
3743 bit = vtag & 0x1F;
3744 adapter->shadow_vfta[index] &= ~(1 << bit);
3745 --adapter->num_vlans;
3746 /* Re-init to load the changes */
3747 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3748 lem_init_locked(adapter);
3749 EM_CORE_UNLOCK(adapter);
3750}
3751
3752static void
3753lem_setup_vlan_hw_support(struct adapter *adapter)
3754{
3755 struct e1000_hw *hw = &adapter->hw;
3756 u32 reg;
3757
3758 /*
3759 ** We get here thru init_locked, meaning
3760 ** a soft reset, this has already cleared
3761 ** the VFTA and other state, so if there
3762 ** have been no vlan's registered do nothing.
3763 */
3764 if (adapter->num_vlans == 0)
3765 return;
3766
3767 /*
3768 ** A soft reset zero's out the VFTA, so
3769 ** we need to repopulate it now.
3770 */
3771 for (int i = 0; i < EM_VFTA_SIZE; i++)
3772 if (adapter->shadow_vfta[i] != 0)
3773 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3774 i, adapter->shadow_vfta[i]);
3775
3776 reg = E1000_READ_REG(hw, E1000_CTRL);
3777 reg |= E1000_CTRL_VME;
3778 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3779
3780 /* Enable the Filter Table */
3781 reg = E1000_READ_REG(hw, E1000_RCTL);
3782 reg &= ~E1000_RCTL_CFIEN;
3783 reg |= E1000_RCTL_VFE;
3784 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3785
3786 /* Update the frame size */
3787 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3788 adapter->max_frame_size + VLAN_TAG_SIZE);
3789}
3790
3791static void
3792lem_enable_intr(struct adapter *adapter)
3793{
3794 struct e1000_hw *hw = &adapter->hw;
3795 u32 ims_mask = IMS_ENABLE_MASK;
3796
3797 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3798}
3799
3800static void
3801lem_disable_intr(struct adapter *adapter)
3802{
3803 struct e1000_hw *hw = &adapter->hw;
3804
3805 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3806}
3807
3808/*
3809 * Bit of a misnomer, what this really means is
3810 * to enable OS management of the system... aka
3811 * to disable special hardware management features
3812 */
3813static void
3814lem_init_manageability(struct adapter *adapter)
3815{
3816 /* A shared code workaround */
3817 if (adapter->has_manage) {
3818 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3819 /* disable hardware interception of ARP */
3820 manc &= ~(E1000_MANC_ARP_EN);
3821 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3822 }
3823}
3824
3825/*
3826 * Give control back to hardware management
3827 * controller if there is one.
3828 */
3829static void
3830lem_release_manageability(struct adapter *adapter)
3831{
3832 if (adapter->has_manage) {
3833 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3834
3835 /* re-enable hardware interception of ARP */
3836 manc |= E1000_MANC_ARP_EN;
3837 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3838 }
3839}
3840
3841/*
3842 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3843 * For ASF and Pass Through versions of f/w this means
3844 * that the driver is loaded. For AMT version type f/w
3845 * this means that the network i/f is open.
3846 */
3847static void
3848lem_get_hw_control(struct adapter *adapter)
3849{
3850 u32 ctrl_ext;
3851
3852 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3853 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3854 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3855 return;
3856}
3857
3858/*
3859 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3860 * For ASF and Pass Through versions of f/w this means that
3861 * the driver is no longer loaded. For AMT versions of the
3862 * f/w this means that the network i/f is closed.
3863 */
3864static void
3865lem_release_hw_control(struct adapter *adapter)
3866{
3867 u32 ctrl_ext;
3868
3869 if (!adapter->has_manage)
3870 return;
3871
3872 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3873 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3874 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3875 return;
3876}
3877
3878static int
3879lem_is_valid_ether_addr(u8 *addr)
3880{
3881 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3882
3883 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3884 return (FALSE);
3885 }
3886
3887 return (TRUE);
3888}
3889
3890/*
3891** Parse the interface capabilities with regard
3892** to both system management and wake-on-lan for
3893** later use.
3894*/
3895static void
3896lem_get_wakeup(device_t dev)
3897{
3898 struct adapter *adapter = device_get_softc(dev);
3899 u16 eeprom_data = 0, device_id, apme_mask;
3900
3901 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3902 apme_mask = EM_EEPROM_APME;
3903
3904 switch (adapter->hw.mac.type) {
3905 case e1000_82542:
3906 case e1000_82543:
3907 break;
3908 case e1000_82544:
3909 e1000_read_nvm(&adapter->hw,
3910 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3911 apme_mask = EM_82544_APME;
3912 break;
3913 case e1000_82546:
3914 case e1000_82546_rev_3:
3915 if (adapter->hw.bus.func == 1) {
3916 e1000_read_nvm(&adapter->hw,
3917 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3918 break;
3919 } else
3920 e1000_read_nvm(&adapter->hw,
3921 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3922 break;
3923 default:
3924 e1000_read_nvm(&adapter->hw,
3925 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3926 break;
3927 }
3928 if (eeprom_data & apme_mask)
3929 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3930 /*
3931 * We have the eeprom settings, now apply the special cases
3932 * where the eeprom may be wrong or the board won't support
3933 * wake on lan on a particular port
3934 */
3935 device_id = pci_get_device(dev);
3936 switch (device_id) {
3937 case E1000_DEV_ID_82546GB_PCIE:
3938 adapter->wol = 0;
3939 break;
3940 case E1000_DEV_ID_82546EB_FIBER:
3941 case E1000_DEV_ID_82546GB_FIBER:
3942 /* Wake events only supported on port A for dual fiber
3943 * regardless of eeprom setting */
3944 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3945 E1000_STATUS_FUNC_1)
3946 adapter->wol = 0;
3947 break;
3948 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3949 /* if quad port adapter, disable WoL on all but port A */
3950 if (global_quad_port_a != 0)
3951 adapter->wol = 0;
3952 /* Reset for multiple quad port adapters */
3953 if (++global_quad_port_a == 4)
3954 global_quad_port_a = 0;
3955 break;
3956 }
3957 return;
3958}
3959
3960
3961/*
3962 * Enable PCI Wake On Lan capability
3963 */
3964static void
3965lem_enable_wakeup(device_t dev)
3966{
3967 struct adapter *adapter = device_get_softc(dev);
3968 struct ifnet *ifp = adapter->ifp;
3969 u32 pmc, ctrl, ctrl_ext, rctl;
3970 u16 status;
3971
3972 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3973 return;
3974
3975 /* Advertise the wakeup capability */
3976 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3977 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3978 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3979 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3980
3981 /* Keep the laser running on Fiber adapters */
3982 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3983 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3984 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3985 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3986 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3987 }
3988
3989 /*
3990 ** Determine type of Wakeup: note that wol
3991 ** is set with all bits on by default.
3992 */
3993 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3994 adapter->wol &= ~E1000_WUFC_MAG;
3995
3996 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3997 adapter->wol &= ~E1000_WUFC_MC;
3998 else {
3999 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4000 rctl |= E1000_RCTL_MPE;
4001 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4002 }
4003
4004 if (adapter->hw.mac.type == e1000_pchlan) {
4005 if (lem_enable_phy_wakeup(adapter))
4006 return;
4007 } else {
4008 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4009 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4010 }
4011
4012
4013 /* Request PME */
4014 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4015 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4016 if (ifp->if_capenable & IFCAP_WOL)
4017 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4018 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4019
4020 return;
4021}
4022
4023/*
4024** WOL in the newer chipset interfaces (pchlan)
4025** require thing to be copied into the phy
4026*/
4027static int
4028lem_enable_phy_wakeup(struct adapter *adapter)
4029{
4030 struct e1000_hw *hw = &adapter->hw;
4031 u32 mreg, ret = 0;
4032 u16 preg;
4033
4034 /* copy MAC RARs to PHY RARs */
4035 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4036 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4037 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4038 e1000_write_phy_reg(hw, BM_RAR_M(i),
4039 (u16)((mreg >> 16) & 0xFFFF));
4040 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4041 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4042 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4043 (u16)((mreg >> 16) & 0xFFFF));
4044 }
4045
4046 /* copy MAC MTA to PHY MTA */
4047 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4048 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4049 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4050 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4051 (u16)((mreg >> 16) & 0xFFFF));
4052 }
4053
4054 /* configure PHY Rx Control register */
4055 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4056 mreg = E1000_READ_REG(hw, E1000_RCTL);
4057 if (mreg & E1000_RCTL_UPE)
4058 preg |= BM_RCTL_UPE;
4059 if (mreg & E1000_RCTL_MPE)
4060 preg |= BM_RCTL_MPE;
4061 preg &= ~(BM_RCTL_MO_MASK);
4062 if (mreg & E1000_RCTL_MO_3)
4063 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4064 << BM_RCTL_MO_SHIFT);
4065 if (mreg & E1000_RCTL_BAM)
4066 preg |= BM_RCTL_BAM;
4067 if (mreg & E1000_RCTL_PMCF)
4068 preg |= BM_RCTL_PMCF;
4069 mreg = E1000_READ_REG(hw, E1000_CTRL);
4070 if (mreg & E1000_CTRL_RFCE)
4071 preg |= BM_RCTL_RFCE;
4072 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4073
4074 /* enable PHY wakeup in MAC register */
4075 E1000_WRITE_REG(hw, E1000_WUC,
4076 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4077 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4078
4079 /* configure and enable PHY wakeup in PHY registers */
4080 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4081 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4082
4083 /* activate PHY wakeup */
4084 ret = hw->phy.ops.acquire(hw);
4085 if (ret) {
4086 printf("Could not acquire PHY\n");
4087 return ret;
4088 }
4089 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4090 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4091 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4092 if (ret) {
4093 printf("Could not read PHY page 769\n");
4094 goto out;
4095 }
4096 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4097 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4098 if (ret)
4099 printf("Could not set PHY Host Wakeup bit\n");
4100out:
4101 hw->phy.ops.release(hw);
4102
4103 return ret;
4104}
4105
4106static void
4107lem_led_func(void *arg, int onoff)
4108{
4109 struct adapter *adapter = arg;
4110
4111 EM_CORE_LOCK(adapter);
4112 if (onoff) {
4113 e1000_setup_led(&adapter->hw);
4114 e1000_led_on(&adapter->hw);
4115 } else {
4116 e1000_led_off(&adapter->hw);
4117 e1000_cleanup_led(&adapter->hw);
4118 }
4119 EM_CORE_UNLOCK(adapter);
4120}
4121
4122/*********************************************************************
4123* 82544 Coexistence issue workaround.
4124* There are 2 issues.
4125* 1. Transmit Hang issue.
4126* To detect this issue, following equation can be used...
4127* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4128* If SUM[3:0] is in between 1 to 4, we will have this issue.
4129*
4130* 2. DAC issue.
4131* To detect this issue, following equation can be used...
4132* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4133* If SUM[3:0] is in between 9 to c, we will have this issue.
4134*
4135*
4136* WORKAROUND:
4137* Make sure we do not have ending address
4138* as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4139*
4140*************************************************************************/
4141static u32
4142lem_fill_descriptors (bus_addr_t address, u32 length,
4143 PDESC_ARRAY desc_array)
4144{
4145 u32 safe_terminator;
4146
4147 /* Since issue is sensitive to length and address.*/
4148 /* Let us first check the address...*/
4149 if (length <= 4) {
4150 desc_array->descriptor[0].address = address;
4151 desc_array->descriptor[0].length = length;
4152 desc_array->elements = 1;
4153 return (desc_array->elements);
4154 }
4155 safe_terminator = (u32)((((u32)address & 0x7) +
4156 (length & 0xF)) & 0xF);
4157 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4158 if (safe_terminator == 0 ||
4159 (safe_terminator > 4 &&
4160 safe_terminator < 9) ||
4161 (safe_terminator > 0xC &&
4162 safe_terminator <= 0xF)) {
4163 desc_array->descriptor[0].address = address;
4164 desc_array->descriptor[0].length = length;
4165 desc_array->elements = 1;
4166 return (desc_array->elements);
4167 }
4168
4169 desc_array->descriptor[0].address = address;
4170 desc_array->descriptor[0].length = length - 4;
4171 desc_array->descriptor[1].address = address + (length - 4);
4172 desc_array->descriptor[1].length = 4;
4173 desc_array->elements = 2;
4174 return (desc_array->elements);
4175}
4176
4177/**********************************************************************
4178 *
4179 * Update the board statistics counters.
4180 *
4181 **********************************************************************/
4182static void
4183lem_update_stats_counters(struct adapter *adapter)
4184{
4185 struct ifnet *ifp;
4186
4187 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4188 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4189 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4190 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4191 }
4192 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4193 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4194 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4195 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4196
4197 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4198 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4199 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4200 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4201 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4202 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4203 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4204 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4205 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4206 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4207 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4208 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4209 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4210 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4211 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4212 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4213 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4214 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4215 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4216 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4217
4218 /* For the 64-bit byte counters the low dword must be read first. */
4219 /* Both registers clear on the read of the high dword */
4220
4221 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4222 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4223 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4224 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4225
4226 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4227 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4228 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4229 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4230 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4231
4232 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4233 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4234
4235 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4236 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4237 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4238 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4239 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4240 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4241 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4242 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4243 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4244 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4245
4246 if (adapter->hw.mac.type >= e1000_82543) {
4247 adapter->stats.algnerrc +=
4248 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4249 adapter->stats.rxerrc +=
4250 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4251 adapter->stats.tncrs +=
4252 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4253 adapter->stats.cexterr +=
4254 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4255 adapter->stats.tsctc +=
4256 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4257 adapter->stats.tsctfc +=
4258 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4259 }
4260 ifp = adapter->ifp;
4261
4262 ifp->if_collisions = adapter->stats.colc;
4263
4264 /* Rx Errors */
4265 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4266 adapter->stats.crcerrs + adapter->stats.algnerrc +
4267 adapter->stats.ruc + adapter->stats.roc +
4268 adapter->stats.mpc + adapter->stats.cexterr;
4269
4270 /* Tx Errors */
4271 ifp->if_oerrors = adapter->stats.ecol +
4272 adapter->stats.latecol + adapter->watchdog_events;
4273}
4274
4275/* Export a single 32-bit register via a read-only sysctl. */
4276static int
4277lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4278{
4279 struct adapter *adapter;
4280 u_int val;
4281
4282 adapter = oidp->oid_arg1;
4283 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4284 return (sysctl_handle_int(oidp, &val, 0, req));
4285}
4286
4287/*
4288 * Add sysctl variables, one per statistic, to the system.
4289 */
4290static void
4291lem_add_hw_stats(struct adapter *adapter)
4292{
4293 device_t dev = adapter->dev;
4294
4295 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4296 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4297 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4298 struct e1000_hw_stats *stats = &adapter->stats;
4299
4300 struct sysctl_oid *stat_node;
4301 struct sysctl_oid_list *stat_list;
4302
4303 /* Driver Statistics */
4304 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4305 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4306 "Std mbuf failed");
4307 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4308 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4309 "Std mbuf cluster failed");
4310 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4311 CTLFLAG_RD, &adapter->dropped_pkts,
4312 "Driver dropped packets");
4313 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4314 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4315 "Driver tx dma failure in xmit");
4316 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4317 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4318 "Not enough tx descriptors failure in xmit");
4319 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4320 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4321 "Not enough tx descriptors failure in xmit");
4322 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4323 CTLFLAG_RD, &adapter->rx_overruns,
4324 "RX overruns");
4325 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4326 CTLFLAG_RD, &adapter->watchdog_events,
4327 "Watchdog timeouts");
4328
4329 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4330 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4331 lem_sysctl_reg_handler, "IU",
4332 "Device Control Register");
4333 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4334 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4335 lem_sysctl_reg_handler, "IU",
4336 "Receiver Control Register");
4337 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4338 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4339 "Flow Control High Watermark");
4340 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4341 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4342 "Flow Control Low Watermark");
4343 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4344 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4345 "TX FIFO workaround events");
4346 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4347 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4348 "TX FIFO resets");
4349
4350 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4351 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4352 lem_sysctl_reg_handler, "IU",
4353 "Transmit Descriptor Head");
4354 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4355 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4356 lem_sysctl_reg_handler, "IU",
4357 "Transmit Descriptor Tail");
4358 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4359 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4360 lem_sysctl_reg_handler, "IU",
4361 "Receive Descriptor Head");
4362 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4363 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4364 lem_sysctl_reg_handler, "IU",
4365 "Receive Descriptor Tail");
4366
4367
4368 /* MAC stats get their own sub node */
4369
4370 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4371 CTLFLAG_RD, NULL, "Statistics");
4372 stat_list = SYSCTL_CHILDREN(stat_node);
4373
4374 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4375 CTLFLAG_RD, &stats->ecol,
4376 "Excessive collisions");
4377 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4378 CTLFLAG_RD, &stats->scc,
4379 "Single collisions");
4380 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4381 CTLFLAG_RD, &stats->mcc,
4382 "Multiple collisions");
4383 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4384 CTLFLAG_RD, &stats->latecol,
4385 "Late collisions");
4386 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4387 CTLFLAG_RD, &stats->colc,
4388 "Collision Count");
4389 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4390 CTLFLAG_RD, &adapter->stats.symerrs,
4391 "Symbol Errors");
4392 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4393 CTLFLAG_RD, &adapter->stats.sec,
4394 "Sequence Errors");
4395 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4396 CTLFLAG_RD, &adapter->stats.dc,
4397 "Defer Count");
4398 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4399 CTLFLAG_RD, &adapter->stats.mpc,
4400 "Missed Packets");
4401 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4402 CTLFLAG_RD, &adapter->stats.rnbc,
4403 "Receive No Buffers");
4404 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4405 CTLFLAG_RD, &adapter->stats.ruc,
4406 "Receive Undersize");
4407 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4408 CTLFLAG_RD, &adapter->stats.rfc,
4409 "Fragmented Packets Received ");
4410 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4411 CTLFLAG_RD, &adapter->stats.roc,
4412 "Oversized Packets Received");
4413 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4414 CTLFLAG_RD, &adapter->stats.rjc,
4415 "Recevied Jabber");
4416 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4417 CTLFLAG_RD, &adapter->stats.rxerrc,
4418 "Receive Errors");
4419 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4420 CTLFLAG_RD, &adapter->stats.crcerrs,
4421 "CRC errors");
4422 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4423 CTLFLAG_RD, &adapter->stats.algnerrc,
4424 "Alignment Errors");
4425 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4426 CTLFLAG_RD, &adapter->stats.cexterr,
4427 "Collision/Carrier extension errors");
4428 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4429 CTLFLAG_RD, &adapter->stats.xonrxc,
4430 "XON Received");
4431 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4432 CTLFLAG_RD, &adapter->stats.xontxc,
4433 "XON Transmitted");
4434 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4435 CTLFLAG_RD, &adapter->stats.xoffrxc,
4436 "XOFF Received");
4437 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4438 CTLFLAG_RD, &adapter->stats.xofftxc,
4439 "XOFF Transmitted");
4440
4441 /* Packet Reception Stats */
4442 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4443 CTLFLAG_RD, &adapter->stats.tpr,
4444 "Total Packets Received ");
4445 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4446 CTLFLAG_RD, &adapter->stats.gprc,
4447 "Good Packets Received");
4448 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4449 CTLFLAG_RD, &adapter->stats.bprc,
4450 "Broadcast Packets Received");
4451 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4452 CTLFLAG_RD, &adapter->stats.mprc,
4453 "Multicast Packets Received");
4454 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4455 CTLFLAG_RD, &adapter->stats.prc64,
4456 "64 byte frames received ");
4457 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4458 CTLFLAG_RD, &adapter->stats.prc127,
4459 "65-127 byte frames received");
4460 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4461 CTLFLAG_RD, &adapter->stats.prc255,
4462 "128-255 byte frames received");
4463 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4464 CTLFLAG_RD, &adapter->stats.prc511,
4465 "256-511 byte frames received");
4466 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4467 CTLFLAG_RD, &adapter->stats.prc1023,
4468 "512-1023 byte frames received");
4469 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4470 CTLFLAG_RD, &adapter->stats.prc1522,
4471 "1023-1522 byte frames received");
4472 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4473 CTLFLAG_RD, &adapter->stats.gorc,
4474 "Good Octets Received");
4475
4476 /* Packet Transmission Stats */
4477 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4478 CTLFLAG_RD, &adapter->stats.gotc,
4479 "Good Octets Transmitted");
4480 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4481 CTLFLAG_RD, &adapter->stats.tpt,
4482 "Total Packets Transmitted");
4483 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4484 CTLFLAG_RD, &adapter->stats.gptc,
4485 "Good Packets Transmitted");
4486 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4487 CTLFLAG_RD, &adapter->stats.bptc,
4488 "Broadcast Packets Transmitted");
4489 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4490 CTLFLAG_RD, &adapter->stats.mptc,
4491 "Multicast Packets Transmitted");
4492 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4493 CTLFLAG_RD, &adapter->stats.ptc64,
4494 "64 byte frames transmitted ");
4495 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4496 CTLFLAG_RD, &adapter->stats.ptc127,
4497 "65-127 byte frames transmitted");
4498 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4499 CTLFLAG_RD, &adapter->stats.ptc255,
4500 "128-255 byte frames transmitted");
4501 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4502 CTLFLAG_RD, &adapter->stats.ptc511,
4503 "256-511 byte frames transmitted");
4504 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4505 CTLFLAG_RD, &adapter->stats.ptc1023,
4506 "512-1023 byte frames transmitted");
4507 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4508 CTLFLAG_RD, &adapter->stats.ptc1522,
4509 "1024-1522 byte frames transmitted");
4510 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4511 CTLFLAG_RD, &adapter->stats.tsctc,
4512 "TSO Contexts Transmitted");
4513 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4514 CTLFLAG_RD, &adapter->stats.tsctfc,
4515 "TSO Contexts Failed");
4516}
4517
4518/**********************************************************************
4519 *
4520 * This routine provides a way to dump out the adapter eeprom,
4521 * often a useful debug/service tool. This only dumps the first
4522 * 32 words, stuff that matters is in that extent.
4523 *
4524 **********************************************************************/
4525
4526static int
4527lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4528{
4529 struct adapter *adapter;
4530 int error;
4531 int result;
4532
4533 result = -1;
4534 error = sysctl_handle_int(oidp, &result, 0, req);
4535
4536 if (error || !req->newptr)
4537 return (error);
4538
4539 /*
4540 * This value will cause a hex dump of the
4541 * first 32 16-bit words of the EEPROM to
4542 * the screen.
4543 */
4544 if (result == 1) {
4545 adapter = (struct adapter *)arg1;
4546 lem_print_nvm_info(adapter);
4547 }
4548
4549 return (error);
4550}
4551
4552static void
4553lem_print_nvm_info(struct adapter *adapter)
4554{
4555 u16 eeprom_data;
4556 int i, j, row = 0;
4557
4558 /* Its a bit crude, but it gets the job done */
4559 printf("\nInterface EEPROM Dump:\n");
4560 printf("Offset\n0x0000 ");
4561 for (i = 0, j = 0; i < 32; i++, j++) {
4562 if (j == 8) { /* Make the offset block */
4563 j = 0; ++row;
4564 printf("\n0x00%x0 ",row);
4565 }
4566 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4567 printf("%04x ", eeprom_data);
4568 }
4569 printf("\n");
4570}
4571
4572static int
4573lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4574{
4575 struct em_int_delay_info *info;
4576 struct adapter *adapter;
4577 u32 regval;
4578 int error;
4579 int usecs;
4580 int ticks;
4581
4582 info = (struct em_int_delay_info *)arg1;
4583 usecs = info->value;
4584 error = sysctl_handle_int(oidp, &usecs, 0, req);
4585 if (error != 0 || req->newptr == NULL)
4586 return (error);
4587 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4588 return (EINVAL);
4589 info->value = usecs;
4590 ticks = EM_USECS_TO_TICKS(usecs);
4591
4592 adapter = info->adapter;
4593
4594 EM_CORE_LOCK(adapter);
4595 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4596 regval = (regval & ~0xffff) | (ticks & 0xffff);
4597 /* Handle a few special cases. */
4598 switch (info->offset) {
4599 case E1000_RDTR:
4600 break;
4601 case E1000_TIDV:
4602 if (ticks == 0) {
4603 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4604 /* Don't write 0 into the TIDV register. */
4605 regval++;
4606 } else
4607 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4608 break;
4609 }
4610 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4611 EM_CORE_UNLOCK(adapter);
4612 return (0);
4613}
4614
4615static void
4616lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4617 const char *description, struct em_int_delay_info *info,
4618 int offset, int value)
4619{
4620 info->adapter = adapter;
4621 info->offset = offset;
4622 info->value = value;
4623 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4624 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4625 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4626 info, 0, lem_sysctl_int_delay, "I", description);
4627}
4628
4629static void
4630lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4631 const char *description, int *limit, int value)
4632{
4633 *limit = value;
4634 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4635 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4636 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4637}
4638
4639static void
4640lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4641 const char *description, int *limit, int value)
4642{
4643 *limit = value;
4644 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4645 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4646 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4647}
398 adapter = device_get_softc(dev);
399 adapter->dev = adapter->osdep.dev = dev;
400 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
401 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
402 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
403
404 /* SYSCTL stuff */
405 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
408 lem_sysctl_nvm_info, "I", "NVM Information");
409
410 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
411 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
412
413 /* Determine hardware and mac info */
414 lem_identify_hardware(adapter);
415
416 /* Setup PCI resources */
417 if (lem_allocate_pci_resources(adapter)) {
418 device_printf(dev, "Allocation of PCI resources failed\n");
419 error = ENXIO;
420 goto err_pci;
421 }
422
423 /* Do Shared Code initialization */
424 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
425 device_printf(dev, "Setup of Shared code failed\n");
426 error = ENXIO;
427 goto err_pci;
428 }
429
430 e1000_get_bus_info(&adapter->hw);
431
432 /* Set up some sysctls for the tunable interrupt delays */
433 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
434 "receive interrupt delay in usecs", &adapter->rx_int_delay,
435 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
436 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
437 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
438 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
439 if (adapter->hw.mac.type >= e1000_82540) {
440 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
441 "receive interrupt delay limit in usecs",
442 &adapter->rx_abs_int_delay,
443 E1000_REGISTER(&adapter->hw, E1000_RADV),
444 lem_rx_abs_int_delay_dflt);
445 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
446 "transmit interrupt delay limit in usecs",
447 &adapter->tx_abs_int_delay,
448 E1000_REGISTER(&adapter->hw, E1000_TADV),
449 lem_tx_abs_int_delay_dflt);
450 }
451
452 /* Sysctls for limiting the amount of work done in the taskqueue */
453 lem_add_rx_process_limit(adapter, "rx_processing_limit",
454 "max number of rx packets to process", &adapter->rx_process_limit,
455 lem_rx_process_limit);
456
457 /* Sysctl for setting the interface flow control */
458 lem_set_flow_cntrl(adapter, "flow_control",
459 "flow control setting",
460 &adapter->fc_setting, lem_fc_setting);
461
462 /*
463 * Validate number of transmit and receive descriptors. It
464 * must not exceed hardware maximum, and must be multiple
465 * of E1000_DBA_ALIGN.
466 */
467 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
468 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
469 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
470 (lem_txd < EM_MIN_TXD)) {
471 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
472 EM_DEFAULT_TXD, lem_txd);
473 adapter->num_tx_desc = EM_DEFAULT_TXD;
474 } else
475 adapter->num_tx_desc = lem_txd;
476 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
477 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
478 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
479 (lem_rxd < EM_MIN_RXD)) {
480 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
481 EM_DEFAULT_RXD, lem_rxd);
482 adapter->num_rx_desc = EM_DEFAULT_RXD;
483 } else
484 adapter->num_rx_desc = lem_rxd;
485
486 adapter->hw.mac.autoneg = DO_AUTO_NEG;
487 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
488 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
489 adapter->rx_buffer_len = 2048;
490
491 e1000_init_script_state_82541(&adapter->hw, TRUE);
492 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
493
494 /* Copper options */
495 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
496 adapter->hw.phy.mdix = AUTO_ALL_MODES;
497 adapter->hw.phy.disable_polarity_correction = FALSE;
498 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
499 }
500
501 /*
502 * Set the frame limits assuming
503 * standard ethernet sized frames.
504 */
505 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
506 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
507
508 /*
509 * This controls when hardware reports transmit completion
510 * status.
511 */
512 adapter->hw.mac.report_tx_early = 1;
513
514 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
515 EM_DBA_ALIGN);
516
517 /* Allocate Transmit Descriptor ring */
518 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
519 device_printf(dev, "Unable to allocate tx_desc memory\n");
520 error = ENOMEM;
521 goto err_tx_desc;
522 }
523 adapter->tx_desc_base =
524 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
525
526 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
527 EM_DBA_ALIGN);
528
529 /* Allocate Receive Descriptor ring */
530 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
531 device_printf(dev, "Unable to allocate rx_desc memory\n");
532 error = ENOMEM;
533 goto err_rx_desc;
534 }
535 adapter->rx_desc_base =
536 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
537
538 /* Allocate multicast array memory. */
539 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
540 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
541 if (adapter->mta == NULL) {
542 device_printf(dev, "Can not allocate multicast setup array\n");
543 error = ENOMEM;
544 goto err_hw_init;
545 }
546
547 /*
548 ** Start from a known state, this is
549 ** important in reading the nvm and
550 ** mac from that.
551 */
552 e1000_reset_hw(&adapter->hw);
553
554 /* Make sure we have a good EEPROM before we read from it */
555 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
556 /*
557 ** Some PCI-E parts fail the first check due to
558 ** the link being in sleep state, call it again,
559 ** if it fails a second time its a real issue.
560 */
561 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
562 device_printf(dev,
563 "The EEPROM Checksum Is Not Valid\n");
564 error = EIO;
565 goto err_hw_init;
566 }
567 }
568
569 /* Copy the permanent MAC address out of the EEPROM */
570 if (e1000_read_mac_addr(&adapter->hw) < 0) {
571 device_printf(dev, "EEPROM read error while reading MAC"
572 " address\n");
573 error = EIO;
574 goto err_hw_init;
575 }
576
577 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
578 device_printf(dev, "Invalid MAC address\n");
579 error = EIO;
580 goto err_hw_init;
581 }
582
583 /* Initialize the hardware */
584 if (lem_hardware_init(adapter)) {
585 device_printf(dev, "Unable to initialize the hardware\n");
586 error = EIO;
587 goto err_hw_init;
588 }
589
590 /* Allocate transmit descriptors and buffers */
591 if (lem_allocate_transmit_structures(adapter)) {
592 device_printf(dev, "Could not setup transmit structures\n");
593 error = ENOMEM;
594 goto err_tx_struct;
595 }
596
597 /* Allocate receive descriptors and buffers */
598 if (lem_allocate_receive_structures(adapter)) {
599 device_printf(dev, "Could not setup receive structures\n");
600 error = ENOMEM;
601 goto err_rx_struct;
602 }
603
604 /*
605 ** Do interrupt configuration
606 */
607 error = lem_allocate_irq(adapter);
608 if (error)
609 goto err_rx_struct;
610
611 /*
612 * Get Wake-on-Lan and Management info for later use
613 */
614 lem_get_wakeup(dev);
615
616 /* Setup OS specific network interface */
617 if (lem_setup_interface(dev, adapter) != 0)
618 goto err_rx_struct;
619
620 /* Initialize statistics */
621 lem_update_stats_counters(adapter);
622
623 adapter->hw.mac.get_link_status = 1;
624 lem_update_link_status(adapter);
625
626 /* Indicate SOL/IDER usage */
627 if (e1000_check_reset_block(&adapter->hw))
628 device_printf(dev,
629 "PHY reset is blocked due to SOL/IDER session.\n");
630
631 /* Do we need workaround for 82544 PCI-X adapter? */
632 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
633 adapter->hw.mac.type == e1000_82544)
634 adapter->pcix_82544 = TRUE;
635 else
636 adapter->pcix_82544 = FALSE;
637
638 /* Register for VLAN events */
639 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
640 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
641 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
642 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
643
644 lem_add_hw_stats(adapter);
645
646 /* Non-AMT based hardware can now take control from firmware */
647 if (adapter->has_manage && !adapter->has_amt)
648 lem_get_hw_control(adapter);
649
650 /* Tell the stack that the interface is not active */
651 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
652
653 adapter->led_dev = led_create(lem_led_func, adapter,
654 device_get_nameunit(dev));
655
656#ifdef DEV_NETMAP
657 lem_netmap_attach(adapter);
658#endif /* DEV_NETMAP */
659 INIT_DEBUGOUT("lem_attach: end");
660
661 return (0);
662
663err_rx_struct:
664 lem_free_transmit_structures(adapter);
665err_tx_struct:
666err_hw_init:
667 lem_release_hw_control(adapter);
668 lem_dma_free(adapter, &adapter->rxdma);
669err_rx_desc:
670 lem_dma_free(adapter, &adapter->txdma);
671err_tx_desc:
672err_pci:
673 if (adapter->ifp != NULL)
674 if_free(adapter->ifp);
675 lem_free_pci_resources(adapter);
676 free(adapter->mta, M_DEVBUF);
677 EM_TX_LOCK_DESTROY(adapter);
678 EM_RX_LOCK_DESTROY(adapter);
679 EM_CORE_LOCK_DESTROY(adapter);
680
681 return (error);
682}
683
684/*********************************************************************
685 * Device removal routine
686 *
687 * The detach entry point is called when the driver is being removed.
688 * This routine stops the adapter and deallocates all the resources
689 * that were allocated for driver operation.
690 *
691 * return 0 on success, positive on failure
692 *********************************************************************/
693
694static int
695lem_detach(device_t dev)
696{
697 struct adapter *adapter = device_get_softc(dev);
698 struct ifnet *ifp = adapter->ifp;
699
700 INIT_DEBUGOUT("em_detach: begin");
701
702 /* Make sure VLANS are not using driver */
703 if (adapter->ifp->if_vlantrunk != NULL) {
704 device_printf(dev,"Vlan in use, detach first\n");
705 return (EBUSY);
706 }
707
708#ifdef DEVICE_POLLING
709 if (ifp->if_capenable & IFCAP_POLLING)
710 ether_poll_deregister(ifp);
711#endif
712
713 if (adapter->led_dev != NULL)
714 led_destroy(adapter->led_dev);
715
716 EM_CORE_LOCK(adapter);
717 EM_TX_LOCK(adapter);
718 adapter->in_detach = 1;
719 lem_stop(adapter);
720 e1000_phy_hw_reset(&adapter->hw);
721
722 lem_release_manageability(adapter);
723
724 EM_TX_UNLOCK(adapter);
725 EM_CORE_UNLOCK(adapter);
726
727 /* Unregister VLAN events */
728 if (adapter->vlan_attach != NULL)
729 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
730 if (adapter->vlan_detach != NULL)
731 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
732
733 ether_ifdetach(adapter->ifp);
734 callout_drain(&adapter->timer);
735 callout_drain(&adapter->tx_fifo_timer);
736
737#ifdef DEV_NETMAP
738 netmap_detach(ifp);
739#endif /* DEV_NETMAP */
740 lem_free_pci_resources(adapter);
741 bus_generic_detach(dev);
742 if_free(ifp);
743
744 lem_free_transmit_structures(adapter);
745 lem_free_receive_structures(adapter);
746
747 /* Free Transmit Descriptor ring */
748 if (adapter->tx_desc_base) {
749 lem_dma_free(adapter, &adapter->txdma);
750 adapter->tx_desc_base = NULL;
751 }
752
753 /* Free Receive Descriptor ring */
754 if (adapter->rx_desc_base) {
755 lem_dma_free(adapter, &adapter->rxdma);
756 adapter->rx_desc_base = NULL;
757 }
758
759 lem_release_hw_control(adapter);
760 free(adapter->mta, M_DEVBUF);
761 EM_TX_LOCK_DESTROY(adapter);
762 EM_RX_LOCK_DESTROY(adapter);
763 EM_CORE_LOCK_DESTROY(adapter);
764
765 return (0);
766}
767
768/*********************************************************************
769 *
770 * Shutdown entry point
771 *
772 **********************************************************************/
773
774static int
775lem_shutdown(device_t dev)
776{
777 return lem_suspend(dev);
778}
779
780/*
781 * Suspend/resume device methods.
782 */
783static int
784lem_suspend(device_t dev)
785{
786 struct adapter *adapter = device_get_softc(dev);
787
788 EM_CORE_LOCK(adapter);
789
790 lem_release_manageability(adapter);
791 lem_release_hw_control(adapter);
792 lem_enable_wakeup(dev);
793
794 EM_CORE_UNLOCK(adapter);
795
796 return bus_generic_suspend(dev);
797}
798
799static int
800lem_resume(device_t dev)
801{
802 struct adapter *adapter = device_get_softc(dev);
803 struct ifnet *ifp = adapter->ifp;
804
805 EM_CORE_LOCK(adapter);
806 lem_init_locked(adapter);
807 lem_init_manageability(adapter);
808 EM_CORE_UNLOCK(adapter);
809 lem_start(ifp);
810
811 return bus_generic_resume(dev);
812}
813
814
815static void
816lem_start_locked(struct ifnet *ifp)
817{
818 struct adapter *adapter = ifp->if_softc;
819 struct mbuf *m_head;
820
821 EM_TX_LOCK_ASSERT(adapter);
822
823 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
824 IFF_DRV_RUNNING)
825 return;
826 if (!adapter->link_active)
827 return;
828
829 /*
830 * Force a cleanup if number of TX descriptors
831 * available hits the threshold
832 */
833 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
834 lem_txeof(adapter);
835 /* Now do we at least have a minimal? */
836 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
837 adapter->no_tx_desc_avail1++;
838 return;
839 }
840 }
841
842 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
843
844 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
845 if (m_head == NULL)
846 break;
847 /*
848 * Encapsulation can modify our pointer, and or make it
849 * NULL on failure. In that event, we can't requeue.
850 */
851 if (lem_xmit(adapter, &m_head)) {
852 if (m_head == NULL)
853 break;
854 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
855 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
856 break;
857 }
858
859 /* Send a copy of the frame to the BPF listener */
860 ETHER_BPF_MTAP(ifp, m_head);
861
862 /* Set timeout in case hardware has problems transmitting. */
863 adapter->watchdog_check = TRUE;
864 adapter->watchdog_time = ticks;
865 }
866 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
867 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
868
869 return;
870}
871
872static void
873lem_start(struct ifnet *ifp)
874{
875 struct adapter *adapter = ifp->if_softc;
876
877 EM_TX_LOCK(adapter);
878 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
879 lem_start_locked(ifp);
880 EM_TX_UNLOCK(adapter);
881}
882
883/*********************************************************************
884 * Ioctl entry point
885 *
886 * em_ioctl is called when the user wants to configure the
887 * interface.
888 *
889 * return 0 on success, positive on failure
890 **********************************************************************/
891
892static int
893lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
894{
895 struct adapter *adapter = ifp->if_softc;
896 struct ifreq *ifr = (struct ifreq *)data;
897#if defined(INET) || defined(INET6)
898 struct ifaddr *ifa = (struct ifaddr *)data;
899#endif
900 bool avoid_reset = FALSE;
901 int error = 0;
902
903 if (adapter->in_detach)
904 return (error);
905
906 switch (command) {
907 case SIOCSIFADDR:
908#ifdef INET
909 if (ifa->ifa_addr->sa_family == AF_INET)
910 avoid_reset = TRUE;
911#endif
912#ifdef INET6
913 if (ifa->ifa_addr->sa_family == AF_INET6)
914 avoid_reset = TRUE;
915#endif
916 /*
917 ** Calling init results in link renegotiation,
918 ** so we avoid doing it when possible.
919 */
920 if (avoid_reset) {
921 ifp->if_flags |= IFF_UP;
922 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
923 lem_init(adapter);
924#ifdef INET
925 if (!(ifp->if_flags & IFF_NOARP))
926 arp_ifinit(ifp, ifa);
927#endif
928 } else
929 error = ether_ioctl(ifp, command, data);
930 break;
931 case SIOCSIFMTU:
932 {
933 int max_frame_size;
934
935 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
936
937 EM_CORE_LOCK(adapter);
938 switch (adapter->hw.mac.type) {
939 case e1000_82542:
940 max_frame_size = ETHER_MAX_LEN;
941 break;
942 default:
943 max_frame_size = MAX_JUMBO_FRAME_SIZE;
944 }
945 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
946 ETHER_CRC_LEN) {
947 EM_CORE_UNLOCK(adapter);
948 error = EINVAL;
949 break;
950 }
951
952 ifp->if_mtu = ifr->ifr_mtu;
953 adapter->max_frame_size =
954 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
955 lem_init_locked(adapter);
956 EM_CORE_UNLOCK(adapter);
957 break;
958 }
959 case SIOCSIFFLAGS:
960 IOCTL_DEBUGOUT("ioctl rcv'd:\
961 SIOCSIFFLAGS (Set Interface Flags)");
962 EM_CORE_LOCK(adapter);
963 if (ifp->if_flags & IFF_UP) {
964 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
965 if ((ifp->if_flags ^ adapter->if_flags) &
966 (IFF_PROMISC | IFF_ALLMULTI)) {
967 lem_disable_promisc(adapter);
968 lem_set_promisc(adapter);
969 }
970 } else
971 lem_init_locked(adapter);
972 } else
973 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
974 EM_TX_LOCK(adapter);
975 lem_stop(adapter);
976 EM_TX_UNLOCK(adapter);
977 }
978 adapter->if_flags = ifp->if_flags;
979 EM_CORE_UNLOCK(adapter);
980 break;
981 case SIOCADDMULTI:
982 case SIOCDELMULTI:
983 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
984 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
985 EM_CORE_LOCK(adapter);
986 lem_disable_intr(adapter);
987 lem_set_multi(adapter);
988 if (adapter->hw.mac.type == e1000_82542 &&
989 adapter->hw.revision_id == E1000_REVISION_2) {
990 lem_initialize_receive_unit(adapter);
991 }
992#ifdef DEVICE_POLLING
993 if (!(ifp->if_capenable & IFCAP_POLLING))
994#endif
995 lem_enable_intr(adapter);
996 EM_CORE_UNLOCK(adapter);
997 }
998 break;
999 case SIOCSIFMEDIA:
1000 /* Check SOL/IDER usage */
1001 EM_CORE_LOCK(adapter);
1002 if (e1000_check_reset_block(&adapter->hw)) {
1003 EM_CORE_UNLOCK(adapter);
1004 device_printf(adapter->dev, "Media change is"
1005 " blocked due to SOL/IDER session.\n");
1006 break;
1007 }
1008 EM_CORE_UNLOCK(adapter);
1009 case SIOCGIFMEDIA:
1010 IOCTL_DEBUGOUT("ioctl rcv'd: \
1011 SIOCxIFMEDIA (Get/Set Interface Media)");
1012 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1013 break;
1014 case SIOCSIFCAP:
1015 {
1016 int mask, reinit;
1017
1018 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1019 reinit = 0;
1020 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1021#ifdef DEVICE_POLLING
1022 if (mask & IFCAP_POLLING) {
1023 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1024 error = ether_poll_register(lem_poll, ifp);
1025 if (error)
1026 return (error);
1027 EM_CORE_LOCK(adapter);
1028 lem_disable_intr(adapter);
1029 ifp->if_capenable |= IFCAP_POLLING;
1030 EM_CORE_UNLOCK(adapter);
1031 } else {
1032 error = ether_poll_deregister(ifp);
1033 /* Enable interrupt even in error case */
1034 EM_CORE_LOCK(adapter);
1035 lem_enable_intr(adapter);
1036 ifp->if_capenable &= ~IFCAP_POLLING;
1037 EM_CORE_UNLOCK(adapter);
1038 }
1039 }
1040#endif
1041 if (mask & IFCAP_HWCSUM) {
1042 ifp->if_capenable ^= IFCAP_HWCSUM;
1043 reinit = 1;
1044 }
1045 if (mask & IFCAP_VLAN_HWTAGGING) {
1046 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1047 reinit = 1;
1048 }
1049 if ((mask & IFCAP_WOL) &&
1050 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1051 if (mask & IFCAP_WOL_MCAST)
1052 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1053 if (mask & IFCAP_WOL_MAGIC)
1054 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1055 }
1056 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1057 lem_init(adapter);
1058 VLAN_CAPABILITIES(ifp);
1059 break;
1060 }
1061
1062 default:
1063 error = ether_ioctl(ifp, command, data);
1064 break;
1065 }
1066
1067 return (error);
1068}
1069
1070
1071/*********************************************************************
1072 * Init entry point
1073 *
1074 * This routine is used in two ways. It is used by the stack as
1075 * init entry point in network interface structure. It is also used
1076 * by the driver as a hw/sw initialization routine to get to a
1077 * consistent state.
1078 *
1079 * return 0 on success, positive on failure
1080 **********************************************************************/
1081
1082static void
1083lem_init_locked(struct adapter *adapter)
1084{
1085 struct ifnet *ifp = adapter->ifp;
1086 device_t dev = adapter->dev;
1087 u32 pba;
1088
1089 INIT_DEBUGOUT("lem_init: begin");
1090
1091 EM_CORE_LOCK_ASSERT(adapter);
1092
1093 EM_TX_LOCK(adapter);
1094 lem_stop(adapter);
1095 EM_TX_UNLOCK(adapter);
1096
1097 /*
1098 * Packet Buffer Allocation (PBA)
1099 * Writing PBA sets the receive portion of the buffer
1100 * the remainder is used for the transmit buffer.
1101 *
1102 * Devices before the 82547 had a Packet Buffer of 64K.
1103 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1104 * After the 82547 the buffer was reduced to 40K.
1105 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1106 * Note: default does not leave enough room for Jumbo Frame >10k.
1107 */
1108 switch (adapter->hw.mac.type) {
1109 case e1000_82547:
1110 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1111 if (adapter->max_frame_size > 8192)
1112 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1113 else
1114 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1115 adapter->tx_fifo_head = 0;
1116 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1117 adapter->tx_fifo_size =
1118 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1119 break;
1120 default:
1121 /* Devices before 82547 had a Packet Buffer of 64K. */
1122 if (adapter->max_frame_size > 8192)
1123 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1124 else
1125 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1126 }
1127
1128 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1129 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1130
1131 /* Get the latest mac address, User can use a LAA */
1132 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1133 ETHER_ADDR_LEN);
1134
1135 /* Put the address into the Receive Address Array */
1136 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1137
1138 /* Initialize the hardware */
1139 if (lem_hardware_init(adapter)) {
1140 device_printf(dev, "Unable to initialize the hardware\n");
1141 return;
1142 }
1143 lem_update_link_status(adapter);
1144
1145 /* Setup VLAN support, basic and offload if available */
1146 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1147
1148 /* Set hardware offload abilities */
1149 ifp->if_hwassist = 0;
1150 if (adapter->hw.mac.type >= e1000_82543) {
1151 if (ifp->if_capenable & IFCAP_TXCSUM)
1152 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1153 }
1154
1155 /* Configure for OS presence */
1156 lem_init_manageability(adapter);
1157
1158 /* Prepare transmit descriptors and buffers */
1159 lem_setup_transmit_structures(adapter);
1160 lem_initialize_transmit_unit(adapter);
1161
1162 /* Setup Multicast table */
1163 lem_set_multi(adapter);
1164
1165 /* Prepare receive descriptors and buffers */
1166 if (lem_setup_receive_structures(adapter)) {
1167 device_printf(dev, "Could not setup receive structures\n");
1168 EM_TX_LOCK(adapter);
1169 lem_stop(adapter);
1170 EM_TX_UNLOCK(adapter);
1171 return;
1172 }
1173 lem_initialize_receive_unit(adapter);
1174
1175 /* Use real VLAN Filter support? */
1176 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1177 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1178 /* Use real VLAN Filter support */
1179 lem_setup_vlan_hw_support(adapter);
1180 else {
1181 u32 ctrl;
1182 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1183 ctrl |= E1000_CTRL_VME;
1184 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1185 }
1186 }
1187
1188 /* Don't lose promiscuous settings */
1189 lem_set_promisc(adapter);
1190
1191 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1192 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1193
1194 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1195 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1196
1197#ifdef DEVICE_POLLING
1198 /*
1199 * Only enable interrupts if we are not polling, make sure
1200 * they are off otherwise.
1201 */
1202 if (ifp->if_capenable & IFCAP_POLLING)
1203 lem_disable_intr(adapter);
1204 else
1205#endif /* DEVICE_POLLING */
1206 lem_enable_intr(adapter);
1207
1208 /* AMT based hardware can now take control from firmware */
1209 if (adapter->has_manage && adapter->has_amt)
1210 lem_get_hw_control(adapter);
1211}
1212
1213static void
1214lem_init(void *arg)
1215{
1216 struct adapter *adapter = arg;
1217
1218 EM_CORE_LOCK(adapter);
1219 lem_init_locked(adapter);
1220 EM_CORE_UNLOCK(adapter);
1221}
1222
1223
1224#ifdef DEVICE_POLLING
1225/*********************************************************************
1226 *
1227 * Legacy polling routine
1228 *
1229 *********************************************************************/
1230static int
1231lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1232{
1233 struct adapter *adapter = ifp->if_softc;
1234 u32 reg_icr, rx_done = 0;
1235
1236 EM_CORE_LOCK(adapter);
1237 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1238 EM_CORE_UNLOCK(adapter);
1239 return (rx_done);
1240 }
1241
1242 if (cmd == POLL_AND_CHECK_STATUS) {
1243 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1244 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1245 callout_stop(&adapter->timer);
1246 adapter->hw.mac.get_link_status = 1;
1247 lem_update_link_status(adapter);
1248 callout_reset(&adapter->timer, hz,
1249 lem_local_timer, adapter);
1250 }
1251 }
1252 EM_CORE_UNLOCK(adapter);
1253
1254 lem_rxeof(adapter, count, &rx_done);
1255
1256 EM_TX_LOCK(adapter);
1257 lem_txeof(adapter);
1258 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1259 lem_start_locked(ifp);
1260 EM_TX_UNLOCK(adapter);
1261 return (rx_done);
1262}
1263#endif /* DEVICE_POLLING */
1264
1265/*********************************************************************
1266 *
1267 * Legacy Interrupt Service routine
1268 *
1269 *********************************************************************/
1270static void
1271lem_intr(void *arg)
1272{
1273 struct adapter *adapter = arg;
1274 struct ifnet *ifp = adapter->ifp;
1275 u32 reg_icr;
1276
1277
1278 if ((ifp->if_capenable & IFCAP_POLLING) ||
1279 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1280 return;
1281
1282 EM_CORE_LOCK(adapter);
1283 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1284 if (reg_icr & E1000_ICR_RXO)
1285 adapter->rx_overruns++;
1286
1287 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1288 EM_CORE_UNLOCK(adapter);
1289 return;
1290 }
1291
1292 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1293 callout_stop(&adapter->timer);
1294 adapter->hw.mac.get_link_status = 1;
1295 lem_update_link_status(adapter);
1296 /* Deal with TX cruft when link lost */
1297 lem_tx_purge(adapter);
1298 callout_reset(&adapter->timer, hz,
1299 lem_local_timer, adapter);
1300 EM_CORE_UNLOCK(adapter);
1301 return;
1302 }
1303
1304 EM_CORE_UNLOCK(adapter);
1305 lem_rxeof(adapter, -1, NULL);
1306
1307 EM_TX_LOCK(adapter);
1308 lem_txeof(adapter);
1309 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1310 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1311 lem_start_locked(ifp);
1312 EM_TX_UNLOCK(adapter);
1313 return;
1314}
1315
1316
1317static void
1318lem_handle_link(void *context, int pending)
1319{
1320 struct adapter *adapter = context;
1321 struct ifnet *ifp = adapter->ifp;
1322
1323 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1324 return;
1325
1326 EM_CORE_LOCK(adapter);
1327 callout_stop(&adapter->timer);
1328 lem_update_link_status(adapter);
1329 /* Deal with TX cruft when link lost */
1330 lem_tx_purge(adapter);
1331 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1332 EM_CORE_UNLOCK(adapter);
1333}
1334
1335
1336/* Combined RX/TX handler, used by Legacy and MSI */
1337static void
1338lem_handle_rxtx(void *context, int pending)
1339{
1340 struct adapter *adapter = context;
1341 struct ifnet *ifp = adapter->ifp;
1342
1343
1344 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1345 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1346 EM_TX_LOCK(adapter);
1347 lem_txeof(adapter);
1348 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1349 lem_start_locked(ifp);
1350 EM_TX_UNLOCK(adapter);
1351 }
1352
1353 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1354 lem_enable_intr(adapter);
1355}
1356
1357/*********************************************************************
1358 *
1359 * Fast Legacy/MSI Combined Interrupt Service routine
1360 *
1361 *********************************************************************/
1362static int
1363lem_irq_fast(void *arg)
1364{
1365 struct adapter *adapter = arg;
1366 struct ifnet *ifp;
1367 u32 reg_icr;
1368
1369 ifp = adapter->ifp;
1370
1371 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1372
1373 /* Hot eject? */
1374 if (reg_icr == 0xffffffff)
1375 return FILTER_STRAY;
1376
1377 /* Definitely not our interrupt. */
1378 if (reg_icr == 0x0)
1379 return FILTER_STRAY;
1380
1381 /*
1382 * Mask interrupts until the taskqueue is finished running. This is
1383 * cheap, just assume that it is needed. This also works around the
1384 * MSI message reordering errata on certain systems.
1385 */
1386 lem_disable_intr(adapter);
1387 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1388
1389 /* Link status change */
1390 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1391 adapter->hw.mac.get_link_status = 1;
1392 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1393 }
1394
1395 if (reg_icr & E1000_ICR_RXO)
1396 adapter->rx_overruns++;
1397 return FILTER_HANDLED;
1398}
1399
1400
1401/*********************************************************************
1402 *
1403 * Media Ioctl callback
1404 *
1405 * This routine is called whenever the user queries the status of
1406 * the interface using ifconfig.
1407 *
1408 **********************************************************************/
1409static void
1410lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1411{
1412 struct adapter *adapter = ifp->if_softc;
1413 u_char fiber_type = IFM_1000_SX;
1414
1415 INIT_DEBUGOUT("lem_media_status: begin");
1416
1417 EM_CORE_LOCK(adapter);
1418 lem_update_link_status(adapter);
1419
1420 ifmr->ifm_status = IFM_AVALID;
1421 ifmr->ifm_active = IFM_ETHER;
1422
1423 if (!adapter->link_active) {
1424 EM_CORE_UNLOCK(adapter);
1425 return;
1426 }
1427
1428 ifmr->ifm_status |= IFM_ACTIVE;
1429
1430 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1431 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1432 if (adapter->hw.mac.type == e1000_82545)
1433 fiber_type = IFM_1000_LX;
1434 ifmr->ifm_active |= fiber_type | IFM_FDX;
1435 } else {
1436 switch (adapter->link_speed) {
1437 case 10:
1438 ifmr->ifm_active |= IFM_10_T;
1439 break;
1440 case 100:
1441 ifmr->ifm_active |= IFM_100_TX;
1442 break;
1443 case 1000:
1444 ifmr->ifm_active |= IFM_1000_T;
1445 break;
1446 }
1447 if (adapter->link_duplex == FULL_DUPLEX)
1448 ifmr->ifm_active |= IFM_FDX;
1449 else
1450 ifmr->ifm_active |= IFM_HDX;
1451 }
1452 EM_CORE_UNLOCK(adapter);
1453}
1454
1455/*********************************************************************
1456 *
1457 * Media Ioctl callback
1458 *
1459 * This routine is called when the user changes speed/duplex using
1460 * media/mediopt option with ifconfig.
1461 *
1462 **********************************************************************/
1463static int
1464lem_media_change(struct ifnet *ifp)
1465{
1466 struct adapter *adapter = ifp->if_softc;
1467 struct ifmedia *ifm = &adapter->media;
1468
1469 INIT_DEBUGOUT("lem_media_change: begin");
1470
1471 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1472 return (EINVAL);
1473
1474 EM_CORE_LOCK(adapter);
1475 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1476 case IFM_AUTO:
1477 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1478 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1479 break;
1480 case IFM_1000_LX:
1481 case IFM_1000_SX:
1482 case IFM_1000_T:
1483 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1484 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1485 break;
1486 case IFM_100_TX:
1487 adapter->hw.mac.autoneg = FALSE;
1488 adapter->hw.phy.autoneg_advertised = 0;
1489 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1490 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1491 else
1492 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1493 break;
1494 case IFM_10_T:
1495 adapter->hw.mac.autoneg = FALSE;
1496 adapter->hw.phy.autoneg_advertised = 0;
1497 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1498 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1499 else
1500 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1501 break;
1502 default:
1503 device_printf(adapter->dev, "Unsupported media type\n");
1504 }
1505
1506 lem_init_locked(adapter);
1507 EM_CORE_UNLOCK(adapter);
1508
1509 return (0);
1510}
1511
1512/*********************************************************************
1513 *
1514 * This routine maps the mbufs to tx descriptors.
1515 *
1516 * return 0 on success, positive on failure
1517 **********************************************************************/
1518
1519static int
1520lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1521{
1522 bus_dma_segment_t segs[EM_MAX_SCATTER];
1523 bus_dmamap_t map;
1524 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1525 struct e1000_tx_desc *ctxd = NULL;
1526 struct mbuf *m_head;
1527 u32 txd_upper, txd_lower, txd_used, txd_saved;
1528 int error, nsegs, i, j, first, last = 0;
1529
1530 m_head = *m_headp;
1531 txd_upper = txd_lower = txd_used = txd_saved = 0;
1532
1533 /*
1534 ** When doing checksum offload, it is critical to
1535 ** make sure the first mbuf has more than header,
1536 ** because that routine expects data to be present.
1537 */
1538 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1539 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1540 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1541 *m_headp = m_head;
1542 if (m_head == NULL)
1543 return (ENOBUFS);
1544 }
1545
1546 /*
1547 * Map the packet for DMA
1548 *
1549 * Capture the first descriptor index,
1550 * this descriptor will have the index
1551 * of the EOP which is the only one that
1552 * now gets a DONE bit writeback.
1553 */
1554 first = adapter->next_avail_tx_desc;
1555 tx_buffer = &adapter->tx_buffer_area[first];
1556 tx_buffer_mapped = tx_buffer;
1557 map = tx_buffer->map;
1558
1559 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1560 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1561
1562 /*
1563 * There are two types of errors we can (try) to handle:
1564 * - EFBIG means the mbuf chain was too long and bus_dma ran
1565 * out of segments. Defragment the mbuf chain and try again.
1566 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1567 * at this point in time. Defer sending and try again later.
1568 * All other errors, in particular EINVAL, are fatal and prevent the
1569 * mbuf chain from ever going through. Drop it and report error.
1570 */
1571 if (error == EFBIG) {
1572 struct mbuf *m;
1573
1574 m = m_defrag(*m_headp, M_DONTWAIT);
1575 if (m == NULL) {
1576 adapter->mbuf_alloc_failed++;
1577 m_freem(*m_headp);
1578 *m_headp = NULL;
1579 return (ENOBUFS);
1580 }
1581 *m_headp = m;
1582
1583 /* Try it again */
1584 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1585 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1586
1587 if (error) {
1588 adapter->no_tx_dma_setup++;
1589 m_freem(*m_headp);
1590 *m_headp = NULL;
1591 return (error);
1592 }
1593 } else if (error != 0) {
1594 adapter->no_tx_dma_setup++;
1595 return (error);
1596 }
1597
1598 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1599 adapter->no_tx_desc_avail2++;
1600 bus_dmamap_unload(adapter->txtag, map);
1601 return (ENOBUFS);
1602 }
1603 m_head = *m_headp;
1604
1605 /* Do hardware assists */
1606 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1607 lem_transmit_checksum_setup(adapter, m_head,
1608 &txd_upper, &txd_lower);
1609
1610 i = adapter->next_avail_tx_desc;
1611 if (adapter->pcix_82544)
1612 txd_saved = i;
1613
1614 /* Set up our transmit descriptors */
1615 for (j = 0; j < nsegs; j++) {
1616 bus_size_t seg_len;
1617 bus_addr_t seg_addr;
1618 /* If adapter is 82544 and on PCIX bus */
1619 if(adapter->pcix_82544) {
1620 DESC_ARRAY desc_array;
1621 u32 array_elements, counter;
1622 /*
1623 * Check the Address and Length combination and
1624 * split the data accordingly
1625 */
1626 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1627 segs[j].ds_len, &desc_array);
1628 for (counter = 0; counter < array_elements; counter++) {
1629 if (txd_used == adapter->num_tx_desc_avail) {
1630 adapter->next_avail_tx_desc = txd_saved;
1631 adapter->no_tx_desc_avail2++;
1632 bus_dmamap_unload(adapter->txtag, map);
1633 return (ENOBUFS);
1634 }
1635 tx_buffer = &adapter->tx_buffer_area[i];
1636 ctxd = &adapter->tx_desc_base[i];
1637 ctxd->buffer_addr = htole64(
1638 desc_array.descriptor[counter].address);
1639 ctxd->lower.data = htole32(
1640 (adapter->txd_cmd | txd_lower | (u16)
1641 desc_array.descriptor[counter].length));
1642 ctxd->upper.data =
1643 htole32((txd_upper));
1644 last = i;
1645 if (++i == adapter->num_tx_desc)
1646 i = 0;
1647 tx_buffer->m_head = NULL;
1648 tx_buffer->next_eop = -1;
1649 txd_used++;
1650 }
1651 } else {
1652 tx_buffer = &adapter->tx_buffer_area[i];
1653 ctxd = &adapter->tx_desc_base[i];
1654 seg_addr = segs[j].ds_addr;
1655 seg_len = segs[j].ds_len;
1656 ctxd->buffer_addr = htole64(seg_addr);
1657 ctxd->lower.data = htole32(
1658 adapter->txd_cmd | txd_lower | seg_len);
1659 ctxd->upper.data =
1660 htole32(txd_upper);
1661 last = i;
1662 if (++i == adapter->num_tx_desc)
1663 i = 0;
1664 tx_buffer->m_head = NULL;
1665 tx_buffer->next_eop = -1;
1666 }
1667 }
1668
1669 adapter->next_avail_tx_desc = i;
1670
1671 if (adapter->pcix_82544)
1672 adapter->num_tx_desc_avail -= txd_used;
1673 else
1674 adapter->num_tx_desc_avail -= nsegs;
1675
1676 if (m_head->m_flags & M_VLANTAG) {
1677 /* Set the vlan id. */
1678 ctxd->upper.fields.special =
1679 htole16(m_head->m_pkthdr.ether_vtag);
1680 /* Tell hardware to add tag */
1681 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1682 }
1683
1684 tx_buffer->m_head = m_head;
1685 tx_buffer_mapped->map = tx_buffer->map;
1686 tx_buffer->map = map;
1687 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1688
1689 /*
1690 * Last Descriptor of Packet
1691 * needs End Of Packet (EOP)
1692 * and Report Status (RS)
1693 */
1694 ctxd->lower.data |=
1695 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1696 /*
1697 * Keep track in the first buffer which
1698 * descriptor will be written back
1699 */
1700 tx_buffer = &adapter->tx_buffer_area[first];
1701 tx_buffer->next_eop = last;
1702 adapter->watchdog_time = ticks;
1703
1704 /*
1705 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1706 * that this frame is available to transmit.
1707 */
1708 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1709 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1710 if (adapter->hw.mac.type == e1000_82547 &&
1711 adapter->link_duplex == HALF_DUPLEX)
1712 lem_82547_move_tail(adapter);
1713 else {
1714 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1715 if (adapter->hw.mac.type == e1000_82547)
1716 lem_82547_update_fifo_head(adapter,
1717 m_head->m_pkthdr.len);
1718 }
1719
1720 return (0);
1721}
1722
1723/*********************************************************************
1724 *
1725 * 82547 workaround to avoid controller hang in half-duplex environment.
1726 * The workaround is to avoid queuing a large packet that would span
1727 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1728 * in this case. We do that only when FIFO is quiescent.
1729 *
1730 **********************************************************************/
1731static void
1732lem_82547_move_tail(void *arg)
1733{
1734 struct adapter *adapter = arg;
1735 struct e1000_tx_desc *tx_desc;
1736 u16 hw_tdt, sw_tdt, length = 0;
1737 bool eop = 0;
1738
1739 EM_TX_LOCK_ASSERT(adapter);
1740
1741 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1742 sw_tdt = adapter->next_avail_tx_desc;
1743
1744 while (hw_tdt != sw_tdt) {
1745 tx_desc = &adapter->tx_desc_base[hw_tdt];
1746 length += tx_desc->lower.flags.length;
1747 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1748 if (++hw_tdt == adapter->num_tx_desc)
1749 hw_tdt = 0;
1750
1751 if (eop) {
1752 if (lem_82547_fifo_workaround(adapter, length)) {
1753 adapter->tx_fifo_wrk_cnt++;
1754 callout_reset(&adapter->tx_fifo_timer, 1,
1755 lem_82547_move_tail, adapter);
1756 break;
1757 }
1758 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1759 lem_82547_update_fifo_head(adapter, length);
1760 length = 0;
1761 }
1762 }
1763}
1764
1765static int
1766lem_82547_fifo_workaround(struct adapter *adapter, int len)
1767{
1768 int fifo_space, fifo_pkt_len;
1769
1770 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1771
1772 if (adapter->link_duplex == HALF_DUPLEX) {
1773 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1774
1775 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1776 if (lem_82547_tx_fifo_reset(adapter))
1777 return (0);
1778 else
1779 return (1);
1780 }
1781 }
1782
1783 return (0);
1784}
1785
1786static void
1787lem_82547_update_fifo_head(struct adapter *adapter, int len)
1788{
1789 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1790
1791 /* tx_fifo_head is always 16 byte aligned */
1792 adapter->tx_fifo_head += fifo_pkt_len;
1793 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1794 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1795 }
1796}
1797
1798
1799static int
1800lem_82547_tx_fifo_reset(struct adapter *adapter)
1801{
1802 u32 tctl;
1803
1804 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1805 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1806 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1807 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1808 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1809 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1810 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1811 /* Disable TX unit */
1812 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1813 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1814 tctl & ~E1000_TCTL_EN);
1815
1816 /* Reset FIFO pointers */
1817 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1818 adapter->tx_head_addr);
1819 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1820 adapter->tx_head_addr);
1821 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1822 adapter->tx_head_addr);
1823 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1824 adapter->tx_head_addr);
1825
1826 /* Re-enable TX unit */
1827 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1828 E1000_WRITE_FLUSH(&adapter->hw);
1829
1830 adapter->tx_fifo_head = 0;
1831 adapter->tx_fifo_reset_cnt++;
1832
1833 return (TRUE);
1834 }
1835 else {
1836 return (FALSE);
1837 }
1838}
1839
1840static void
1841lem_set_promisc(struct adapter *adapter)
1842{
1843 struct ifnet *ifp = adapter->ifp;
1844 u32 reg_rctl;
1845
1846 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1847
1848 if (ifp->if_flags & IFF_PROMISC) {
1849 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1850 /* Turn this on if you want to see bad packets */
1851 if (lem_debug_sbp)
1852 reg_rctl |= E1000_RCTL_SBP;
1853 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1854 } else if (ifp->if_flags & IFF_ALLMULTI) {
1855 reg_rctl |= E1000_RCTL_MPE;
1856 reg_rctl &= ~E1000_RCTL_UPE;
1857 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1858 }
1859}
1860
1861static void
1862lem_disable_promisc(struct adapter *adapter)
1863{
1864 u32 reg_rctl;
1865
1866 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1867
1868 reg_rctl &= (~E1000_RCTL_UPE);
1869 reg_rctl &= (~E1000_RCTL_MPE);
1870 reg_rctl &= (~E1000_RCTL_SBP);
1871 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1872}
1873
1874
1875/*********************************************************************
1876 * Multicast Update
1877 *
1878 * This routine is called whenever multicast address list is updated.
1879 *
1880 **********************************************************************/
1881
1882static void
1883lem_set_multi(struct adapter *adapter)
1884{
1885 struct ifnet *ifp = adapter->ifp;
1886 struct ifmultiaddr *ifma;
1887 u32 reg_rctl = 0;
1888 u8 *mta; /* Multicast array memory */
1889 int mcnt = 0;
1890
1891 IOCTL_DEBUGOUT("lem_set_multi: begin");
1892
1893 mta = adapter->mta;
1894 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1895
1896 if (adapter->hw.mac.type == e1000_82542 &&
1897 adapter->hw.revision_id == E1000_REVISION_2) {
1898 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1899 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1900 e1000_pci_clear_mwi(&adapter->hw);
1901 reg_rctl |= E1000_RCTL_RST;
1902 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1903 msec_delay(5);
1904 }
1905
1906#if __FreeBSD_version < 800000
1907 IF_ADDR_LOCK(ifp);
1908#else
1909 if_maddr_rlock(ifp);
1910#endif
1911 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1912 if (ifma->ifma_addr->sa_family != AF_LINK)
1913 continue;
1914
1915 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1916 break;
1917
1918 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1919 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1920 mcnt++;
1921 }
1922#if __FreeBSD_version < 800000
1923 IF_ADDR_UNLOCK(ifp);
1924#else
1925 if_maddr_runlock(ifp);
1926#endif
1927 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1928 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1929 reg_rctl |= E1000_RCTL_MPE;
1930 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1931 } else
1932 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1933
1934 if (adapter->hw.mac.type == e1000_82542 &&
1935 adapter->hw.revision_id == E1000_REVISION_2) {
1936 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1937 reg_rctl &= ~E1000_RCTL_RST;
1938 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1939 msec_delay(5);
1940 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1941 e1000_pci_set_mwi(&adapter->hw);
1942 }
1943}
1944
1945
1946/*********************************************************************
1947 * Timer routine
1948 *
1949 * This routine checks for link status and updates statistics.
1950 *
1951 **********************************************************************/
1952
1953static void
1954lem_local_timer(void *arg)
1955{
1956 struct adapter *adapter = arg;
1957
1958 EM_CORE_LOCK_ASSERT(adapter);
1959
1960 lem_update_link_status(adapter);
1961 lem_update_stats_counters(adapter);
1962
1963 lem_smartspeed(adapter);
1964
1965 /*
1966 * We check the watchdog: the time since
1967 * the last TX descriptor was cleaned.
1968 * This implies a functional TX engine.
1969 */
1970 if ((adapter->watchdog_check == TRUE) &&
1971 (ticks - adapter->watchdog_time > EM_WATCHDOG))
1972 goto hung;
1973
1974 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1975 return;
1976hung:
1977 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1978 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1979 adapter->watchdog_events++;
1980 lem_init_locked(adapter);
1981}
1982
1983static void
1984lem_update_link_status(struct adapter *adapter)
1985{
1986 struct e1000_hw *hw = &adapter->hw;
1987 struct ifnet *ifp = adapter->ifp;
1988 device_t dev = adapter->dev;
1989 u32 link_check = 0;
1990
1991 /* Get the cached link value or read phy for real */
1992 switch (hw->phy.media_type) {
1993 case e1000_media_type_copper:
1994 if (hw->mac.get_link_status) {
1995 /* Do the work to read phy */
1996 e1000_check_for_link(hw);
1997 link_check = !hw->mac.get_link_status;
1998 if (link_check) /* ESB2 fix */
1999 e1000_cfg_on_link_up(hw);
2000 } else
2001 link_check = TRUE;
2002 break;
2003 case e1000_media_type_fiber:
2004 e1000_check_for_link(hw);
2005 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2006 E1000_STATUS_LU);
2007 break;
2008 case e1000_media_type_internal_serdes:
2009 e1000_check_for_link(hw);
2010 link_check = adapter->hw.mac.serdes_has_link;
2011 break;
2012 default:
2013 case e1000_media_type_unknown:
2014 break;
2015 }
2016
2017 /* Now check for a transition */
2018 if (link_check && (adapter->link_active == 0)) {
2019 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2020 &adapter->link_duplex);
2021 if (bootverbose)
2022 device_printf(dev, "Link is up %d Mbps %s\n",
2023 adapter->link_speed,
2024 ((adapter->link_duplex == FULL_DUPLEX) ?
2025 "Full Duplex" : "Half Duplex"));
2026 adapter->link_active = 1;
2027 adapter->smartspeed = 0;
2028 ifp->if_baudrate = adapter->link_speed * 1000000;
2029 if_link_state_change(ifp, LINK_STATE_UP);
2030 } else if (!link_check && (adapter->link_active == 1)) {
2031 ifp->if_baudrate = adapter->link_speed = 0;
2032 adapter->link_duplex = 0;
2033 if (bootverbose)
2034 device_printf(dev, "Link is Down\n");
2035 adapter->link_active = 0;
2036 /* Link down, disable watchdog */
2037 adapter->watchdog_check = FALSE;
2038 if_link_state_change(ifp, LINK_STATE_DOWN);
2039 }
2040}
2041
2042/*********************************************************************
2043 *
2044 * This routine disables all traffic on the adapter by issuing a
2045 * global reset on the MAC and deallocates TX/RX buffers.
2046 *
2047 * This routine should always be called with BOTH the CORE
2048 * and TX locks.
2049 **********************************************************************/
2050
2051static void
2052lem_stop(void *arg)
2053{
2054 struct adapter *adapter = arg;
2055 struct ifnet *ifp = adapter->ifp;
2056
2057 EM_CORE_LOCK_ASSERT(adapter);
2058 EM_TX_LOCK_ASSERT(adapter);
2059
2060 INIT_DEBUGOUT("lem_stop: begin");
2061
2062 lem_disable_intr(adapter);
2063 callout_stop(&adapter->timer);
2064 callout_stop(&adapter->tx_fifo_timer);
2065
2066 /* Tell the stack that the interface is no longer active */
2067 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2068
2069 e1000_reset_hw(&adapter->hw);
2070 if (adapter->hw.mac.type >= e1000_82544)
2071 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2072
2073 e1000_led_off(&adapter->hw);
2074 e1000_cleanup_led(&adapter->hw);
2075}
2076
2077
2078/*********************************************************************
2079 *
2080 * Determine hardware revision.
2081 *
2082 **********************************************************************/
2083static void
2084lem_identify_hardware(struct adapter *adapter)
2085{
2086 device_t dev = adapter->dev;
2087
2088 /* Make sure our PCI config space has the necessary stuff set */
2089 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2090 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2091 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2092 device_printf(dev, "Memory Access and/or Bus Master bits "
2093 "were not set!\n");
2094 adapter->hw.bus.pci_cmd_word |=
2095 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2096 pci_write_config(dev, PCIR_COMMAND,
2097 adapter->hw.bus.pci_cmd_word, 2);
2098 }
2099
2100 /* Save off the information about this board */
2101 adapter->hw.vendor_id = pci_get_vendor(dev);
2102 adapter->hw.device_id = pci_get_device(dev);
2103 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2104 adapter->hw.subsystem_vendor_id =
2105 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2106 adapter->hw.subsystem_device_id =
2107 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2108
2109 /* Do Shared Code Init and Setup */
2110 if (e1000_set_mac_type(&adapter->hw)) {
2111 device_printf(dev, "Setup init failure\n");
2112 return;
2113 }
2114}
2115
2116static int
2117lem_allocate_pci_resources(struct adapter *adapter)
2118{
2119 device_t dev = adapter->dev;
2120 int val, rid, error = E1000_SUCCESS;
2121
2122 rid = PCIR_BAR(0);
2123 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2124 &rid, RF_ACTIVE);
2125 if (adapter->memory == NULL) {
2126 device_printf(dev, "Unable to allocate bus resource: memory\n");
2127 return (ENXIO);
2128 }
2129 adapter->osdep.mem_bus_space_tag =
2130 rman_get_bustag(adapter->memory);
2131 adapter->osdep.mem_bus_space_handle =
2132 rman_get_bushandle(adapter->memory);
2133 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2134
2135 /* Only older adapters use IO mapping */
2136 if (adapter->hw.mac.type > e1000_82543) {
2137 /* Figure our where our IO BAR is ? */
2138 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2139 val = pci_read_config(dev, rid, 4);
2140 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2141 adapter->io_rid = rid;
2142 break;
2143 }
2144 rid += 4;
2145 /* check for 64bit BAR */
2146 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2147 rid += 4;
2148 }
2149 if (rid >= PCIR_CIS) {
2150 device_printf(dev, "Unable to locate IO BAR\n");
2151 return (ENXIO);
2152 }
2153 adapter->ioport = bus_alloc_resource_any(dev,
2154 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2155 if (adapter->ioport == NULL) {
2156 device_printf(dev, "Unable to allocate bus resource: "
2157 "ioport\n");
2158 return (ENXIO);
2159 }
2160 adapter->hw.io_base = 0;
2161 adapter->osdep.io_bus_space_tag =
2162 rman_get_bustag(adapter->ioport);
2163 adapter->osdep.io_bus_space_handle =
2164 rman_get_bushandle(adapter->ioport);
2165 }
2166
2167 adapter->hw.back = &adapter->osdep;
2168
2169 return (error);
2170}
2171
2172/*********************************************************************
2173 *
2174 * Setup the Legacy or MSI Interrupt handler
2175 *
2176 **********************************************************************/
2177int
2178lem_allocate_irq(struct adapter *adapter)
2179{
2180 device_t dev = adapter->dev;
2181 int error, rid = 0;
2182
2183 /* Manually turn off all interrupts */
2184 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2185
2186 /* We allocate a single interrupt resource */
2187 adapter->res[0] = bus_alloc_resource_any(dev,
2188 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2189 if (adapter->res[0] == NULL) {
2190 device_printf(dev, "Unable to allocate bus resource: "
2191 "interrupt\n");
2192 return (ENXIO);
2193 }
2194
2195 /* Do Legacy setup? */
2196 if (lem_use_legacy_irq) {
2197 if ((error = bus_setup_intr(dev, adapter->res[0],
2198 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2199 &adapter->tag[0])) != 0) {
2200 device_printf(dev,
2201 "Failed to register interrupt handler");
2202 return (error);
2203 }
2204 return (0);
2205 }
2206
2207 /*
2208 * Use a Fast interrupt and the associated
2209 * deferred processing contexts.
2210 */
2211 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2212 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2213 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2214 taskqueue_thread_enqueue, &adapter->tq);
2215 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2216 device_get_nameunit(adapter->dev));
2217 if ((error = bus_setup_intr(dev, adapter->res[0],
2218 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2219 &adapter->tag[0])) != 0) {
2220 device_printf(dev, "Failed to register fast interrupt "
2221 "handler: %d\n", error);
2222 taskqueue_free(adapter->tq);
2223 adapter->tq = NULL;
2224 return (error);
2225 }
2226
2227 return (0);
2228}
2229
2230
2231static void
2232lem_free_pci_resources(struct adapter *adapter)
2233{
2234 device_t dev = adapter->dev;
2235
2236
2237 if (adapter->tag[0] != NULL) {
2238 bus_teardown_intr(dev, adapter->res[0],
2239 adapter->tag[0]);
2240 adapter->tag[0] = NULL;
2241 }
2242
2243 if (adapter->res[0] != NULL) {
2244 bus_release_resource(dev, SYS_RES_IRQ,
2245 0, adapter->res[0]);
2246 }
2247
2248 if (adapter->memory != NULL)
2249 bus_release_resource(dev, SYS_RES_MEMORY,
2250 PCIR_BAR(0), adapter->memory);
2251
2252 if (adapter->ioport != NULL)
2253 bus_release_resource(dev, SYS_RES_IOPORT,
2254 adapter->io_rid, adapter->ioport);
2255}
2256
2257
2258/*********************************************************************
2259 *
2260 * Initialize the hardware to a configuration
2261 * as specified by the adapter structure.
2262 *
2263 **********************************************************************/
2264static int
2265lem_hardware_init(struct adapter *adapter)
2266{
2267 device_t dev = adapter->dev;
2268 u16 rx_buffer_size;
2269
2270 INIT_DEBUGOUT("lem_hardware_init: begin");
2271
2272 /* Issue a global reset */
2273 e1000_reset_hw(&adapter->hw);
2274
2275 /* When hardware is reset, fifo_head is also reset */
2276 adapter->tx_fifo_head = 0;
2277
2278 /*
2279 * These parameters control the automatic generation (Tx) and
2280 * response (Rx) to Ethernet PAUSE frames.
2281 * - High water mark should allow for at least two frames to be
2282 * received after sending an XOFF.
2283 * - Low water mark works best when it is very near the high water mark.
2284 * This allows the receiver to restart by sending XON when it has
2285 * drained a bit. Here we use an arbitary value of 1500 which will
2286 * restart after one full frame is pulled from the buffer. There
2287 * could be several smaller frames in the buffer and if so they will
2288 * not trigger the XON until their total number reduces the buffer
2289 * by 1500.
2290 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2291 */
2292 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2293 0xffff) << 10 );
2294
2295 adapter->hw.fc.high_water = rx_buffer_size -
2296 roundup2(adapter->max_frame_size, 1024);
2297 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2298
2299 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2300 adapter->hw.fc.send_xon = TRUE;
2301
2302 /* Set Flow control, use the tunable location if sane */
2303 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2304 adapter->hw.fc.requested_mode = lem_fc_setting;
2305 else
2306 adapter->hw.fc.requested_mode = e1000_fc_none;
2307
2308 if (e1000_init_hw(&adapter->hw) < 0) {
2309 device_printf(dev, "Hardware Initialization Failed\n");
2310 return (EIO);
2311 }
2312
2313 e1000_check_for_link(&adapter->hw);
2314
2315 return (0);
2316}
2317
2318/*********************************************************************
2319 *
2320 * Setup networking device structure and register an interface.
2321 *
2322 **********************************************************************/
2323static int
2324lem_setup_interface(device_t dev, struct adapter *adapter)
2325{
2326 struct ifnet *ifp;
2327
2328 INIT_DEBUGOUT("lem_setup_interface: begin");
2329
2330 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2331 if (ifp == NULL) {
2332 device_printf(dev, "can not allocate ifnet structure\n");
2333 return (-1);
2334 }
2335 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2336 ifp->if_init = lem_init;
2337 ifp->if_softc = adapter;
2338 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2339 ifp->if_ioctl = lem_ioctl;
2340 ifp->if_start = lem_start;
2341 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2342 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2343 IFQ_SET_READY(&ifp->if_snd);
2344
2345 ether_ifattach(ifp, adapter->hw.mac.addr);
2346
2347 ifp->if_capabilities = ifp->if_capenable = 0;
2348
2349 if (adapter->hw.mac.type >= e1000_82543) {
2350 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2351 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2352 }
2353
2354 /*
2355 * Tell the upper layer(s) we support long frames.
2356 */
2357 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2358 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2359 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2360
2361 /*
2362 ** Dont turn this on by default, if vlans are
2363 ** created on another pseudo device (eg. lagg)
2364 ** then vlan events are not passed thru, breaking
2365 ** operation, but with HW FILTER off it works. If
2366 ** using vlans directly on the em driver you can
2367 ** enable this and get full hardware tag filtering.
2368 */
2369 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2370
2371#ifdef DEVICE_POLLING
2372 ifp->if_capabilities |= IFCAP_POLLING;
2373#endif
2374
2375 /* Enable only WOL MAGIC by default */
2376 if (adapter->wol) {
2377 ifp->if_capabilities |= IFCAP_WOL;
2378 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2379 }
2380
2381 /*
2382 * Specify the media types supported by this adapter and register
2383 * callbacks to update media and link information
2384 */
2385 ifmedia_init(&adapter->media, IFM_IMASK,
2386 lem_media_change, lem_media_status);
2387 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2388 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2389 u_char fiber_type = IFM_1000_SX; /* default type */
2390
2391 if (adapter->hw.mac.type == e1000_82545)
2392 fiber_type = IFM_1000_LX;
2393 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2394 0, NULL);
2395 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2396 } else {
2397 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2398 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2399 0, NULL);
2400 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2401 0, NULL);
2402 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2403 0, NULL);
2404 if (adapter->hw.phy.type != e1000_phy_ife) {
2405 ifmedia_add(&adapter->media,
2406 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2407 ifmedia_add(&adapter->media,
2408 IFM_ETHER | IFM_1000_T, 0, NULL);
2409 }
2410 }
2411 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2412 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2413 return (0);
2414}
2415
2416
2417/*********************************************************************
2418 *
2419 * Workaround for SmartSpeed on 82541 and 82547 controllers
2420 *
2421 **********************************************************************/
2422static void
2423lem_smartspeed(struct adapter *adapter)
2424{
2425 u16 phy_tmp;
2426
2427 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2428 adapter->hw.mac.autoneg == 0 ||
2429 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2430 return;
2431
2432 if (adapter->smartspeed == 0) {
2433 /* If Master/Slave config fault is asserted twice,
2434 * we assume back-to-back */
2435 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2436 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2437 return;
2438 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2439 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2440 e1000_read_phy_reg(&adapter->hw,
2441 PHY_1000T_CTRL, &phy_tmp);
2442 if(phy_tmp & CR_1000T_MS_ENABLE) {
2443 phy_tmp &= ~CR_1000T_MS_ENABLE;
2444 e1000_write_phy_reg(&adapter->hw,
2445 PHY_1000T_CTRL, phy_tmp);
2446 adapter->smartspeed++;
2447 if(adapter->hw.mac.autoneg &&
2448 !e1000_copper_link_autoneg(&adapter->hw) &&
2449 !e1000_read_phy_reg(&adapter->hw,
2450 PHY_CONTROL, &phy_tmp)) {
2451 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2452 MII_CR_RESTART_AUTO_NEG);
2453 e1000_write_phy_reg(&adapter->hw,
2454 PHY_CONTROL, phy_tmp);
2455 }
2456 }
2457 }
2458 return;
2459 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2460 /* If still no link, perhaps using 2/3 pair cable */
2461 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2462 phy_tmp |= CR_1000T_MS_ENABLE;
2463 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2464 if(adapter->hw.mac.autoneg &&
2465 !e1000_copper_link_autoneg(&adapter->hw) &&
2466 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2467 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2468 MII_CR_RESTART_AUTO_NEG);
2469 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2470 }
2471 }
2472 /* Restart process after EM_SMARTSPEED_MAX iterations */
2473 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2474 adapter->smartspeed = 0;
2475}
2476
2477
2478/*
2479 * Manage DMA'able memory.
2480 */
2481static void
2482lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2483{
2484 if (error)
2485 return;
2486 *(bus_addr_t *) arg = segs[0].ds_addr;
2487}
2488
2489static int
2490lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2491 struct em_dma_alloc *dma, int mapflags)
2492{
2493 int error;
2494
2495 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2496 EM_DBA_ALIGN, 0, /* alignment, bounds */
2497 BUS_SPACE_MAXADDR, /* lowaddr */
2498 BUS_SPACE_MAXADDR, /* highaddr */
2499 NULL, NULL, /* filter, filterarg */
2500 size, /* maxsize */
2501 1, /* nsegments */
2502 size, /* maxsegsize */
2503 0, /* flags */
2504 NULL, /* lockfunc */
2505 NULL, /* lockarg */
2506 &dma->dma_tag);
2507 if (error) {
2508 device_printf(adapter->dev,
2509 "%s: bus_dma_tag_create failed: %d\n",
2510 __func__, error);
2511 goto fail_0;
2512 }
2513
2514 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2515 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2516 if (error) {
2517 device_printf(adapter->dev,
2518 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2519 __func__, (uintmax_t)size, error);
2520 goto fail_2;
2521 }
2522
2523 dma->dma_paddr = 0;
2524 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2525 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2526 if (error || dma->dma_paddr == 0) {
2527 device_printf(adapter->dev,
2528 "%s: bus_dmamap_load failed: %d\n",
2529 __func__, error);
2530 goto fail_3;
2531 }
2532
2533 return (0);
2534
2535fail_3:
2536 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2537fail_2:
2538 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2539 bus_dma_tag_destroy(dma->dma_tag);
2540fail_0:
2541 dma->dma_map = NULL;
2542 dma->dma_tag = NULL;
2543
2544 return (error);
2545}
2546
2547static void
2548lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2549{
2550 if (dma->dma_tag == NULL)
2551 return;
2552 if (dma->dma_map != NULL) {
2553 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2554 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2555 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2556 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2557 dma->dma_map = NULL;
2558 }
2559 bus_dma_tag_destroy(dma->dma_tag);
2560 dma->dma_tag = NULL;
2561}
2562
2563
2564/*********************************************************************
2565 *
2566 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2567 * the information needed to transmit a packet on the wire.
2568 *
2569 **********************************************************************/
2570static int
2571lem_allocate_transmit_structures(struct adapter *adapter)
2572{
2573 device_t dev = adapter->dev;
2574 struct em_buffer *tx_buffer;
2575 int error;
2576
2577 /*
2578 * Create DMA tags for tx descriptors
2579 */
2580 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2581 1, 0, /* alignment, bounds */
2582 BUS_SPACE_MAXADDR, /* lowaddr */
2583 BUS_SPACE_MAXADDR, /* highaddr */
2584 NULL, NULL, /* filter, filterarg */
2585 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2586 EM_MAX_SCATTER, /* nsegments */
2587 MCLBYTES, /* maxsegsize */
2588 0, /* flags */
2589 NULL, /* lockfunc */
2590 NULL, /* lockarg */
2591 &adapter->txtag)) != 0) {
2592 device_printf(dev, "Unable to allocate TX DMA tag\n");
2593 goto fail;
2594 }
2595
2596 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2597 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2598 if (adapter->tx_buffer_area == NULL) {
2599 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2600 error = ENOMEM;
2601 goto fail;
2602 }
2603
2604 /* Create the descriptor buffer dma maps */
2605 for (int i = 0; i < adapter->num_tx_desc; i++) {
2606 tx_buffer = &adapter->tx_buffer_area[i];
2607 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2608 if (error != 0) {
2609 device_printf(dev, "Unable to create TX DMA map\n");
2610 goto fail;
2611 }
2612 tx_buffer->next_eop = -1;
2613 }
2614
2615 return (0);
2616fail:
2617 lem_free_transmit_structures(adapter);
2618 return (error);
2619}
2620
2621/*********************************************************************
2622 *
2623 * (Re)Initialize transmit structures.
2624 *
2625 **********************************************************************/
2626static void
2627lem_setup_transmit_structures(struct adapter *adapter)
2628{
2629 struct em_buffer *tx_buffer;
2630#ifdef DEV_NETMAP
2631 /* we are already locked */
2632 struct netmap_adapter *na = NA(adapter->ifp);
2633 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2634#endif /* DEV_NETMAP */
2635
2636 /* Clear the old ring contents */
2637 bzero(adapter->tx_desc_base,
2638 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2639
2640 /* Free any existing TX buffers */
2641 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2642 tx_buffer = &adapter->tx_buffer_area[i];
2643 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2644 BUS_DMASYNC_POSTWRITE);
2645 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2646 m_freem(tx_buffer->m_head);
2647 tx_buffer->m_head = NULL;
2648#ifdef DEV_NETMAP
2649 if (slot) {
2650 /* the i-th NIC entry goes to slot si */
2651 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2652 uint64_t paddr;
2653 void *addr;
2654
2655 addr = PNMB(slot + si, &paddr);
2656 adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2657 /* reload the map for netmap mode */
2658 netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2659 }
2660#endif /* DEV_NETMAP */
2661 tx_buffer->next_eop = -1;
2662 }
2663
2664 /* Reset state */
2665 adapter->last_hw_offload = 0;
2666 adapter->next_avail_tx_desc = 0;
2667 adapter->next_tx_to_clean = 0;
2668 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2669
2670 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2671 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2672
2673 return;
2674}
2675
2676/*********************************************************************
2677 *
2678 * Enable transmit unit.
2679 *
2680 **********************************************************************/
2681static void
2682lem_initialize_transmit_unit(struct adapter *adapter)
2683{
2684 u32 tctl, tipg = 0;
2685 u64 bus_addr;
2686
2687 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2688 /* Setup the Base and Length of the Tx Descriptor Ring */
2689 bus_addr = adapter->txdma.dma_paddr;
2690 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2691 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2692 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2693 (u32)(bus_addr >> 32));
2694 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2695 (u32)bus_addr);
2696 /* Setup the HW Tx Head and Tail descriptor pointers */
2697 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2698 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2699
2700 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2701 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2702 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2703
2704 /* Set the default values for the Tx Inter Packet Gap timer */
2705 switch (adapter->hw.mac.type) {
2706 case e1000_82542:
2707 tipg = DEFAULT_82542_TIPG_IPGT;
2708 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2709 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2710 break;
2711 default:
2712 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2713 (adapter->hw.phy.media_type ==
2714 e1000_media_type_internal_serdes))
2715 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2716 else
2717 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2718 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2719 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2720 }
2721
2722 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2723 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2724 if(adapter->hw.mac.type >= e1000_82540)
2725 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2726 adapter->tx_abs_int_delay.value);
2727
2728 /* Program the Transmit Control Register */
2729 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2730 tctl &= ~E1000_TCTL_CT;
2731 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2732 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2733
2734 /* This write will effectively turn on the transmit unit. */
2735 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2736
2737 /* Setup Transmit Descriptor Base Settings */
2738 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2739
2740 if (adapter->tx_int_delay.value > 0)
2741 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2742}
2743
2744/*********************************************************************
2745 *
2746 * Free all transmit related data structures.
2747 *
2748 **********************************************************************/
2749static void
2750lem_free_transmit_structures(struct adapter *adapter)
2751{
2752 struct em_buffer *tx_buffer;
2753
2754 INIT_DEBUGOUT("free_transmit_structures: begin");
2755
2756 if (adapter->tx_buffer_area != NULL) {
2757 for (int i = 0; i < adapter->num_tx_desc; i++) {
2758 tx_buffer = &adapter->tx_buffer_area[i];
2759 if (tx_buffer->m_head != NULL) {
2760 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2761 BUS_DMASYNC_POSTWRITE);
2762 bus_dmamap_unload(adapter->txtag,
2763 tx_buffer->map);
2764 m_freem(tx_buffer->m_head);
2765 tx_buffer->m_head = NULL;
2766 } else if (tx_buffer->map != NULL)
2767 bus_dmamap_unload(adapter->txtag,
2768 tx_buffer->map);
2769 if (tx_buffer->map != NULL) {
2770 bus_dmamap_destroy(adapter->txtag,
2771 tx_buffer->map);
2772 tx_buffer->map = NULL;
2773 }
2774 }
2775 }
2776 if (adapter->tx_buffer_area != NULL) {
2777 free(adapter->tx_buffer_area, M_DEVBUF);
2778 adapter->tx_buffer_area = NULL;
2779 }
2780 if (adapter->txtag != NULL) {
2781 bus_dma_tag_destroy(adapter->txtag);
2782 adapter->txtag = NULL;
2783 }
2784#if __FreeBSD_version >= 800000
2785 if (adapter->br != NULL)
2786 buf_ring_free(adapter->br, M_DEVBUF);
2787#endif
2788}
2789
2790/*********************************************************************
2791 *
2792 * The offload context needs to be set when we transfer the first
2793 * packet of a particular protocol (TCP/UDP). This routine has been
2794 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2795 *
2796 * Added back the old method of keeping the current context type
2797 * and not setting if unnecessary, as this is reported to be a
2798 * big performance win. -jfv
2799 **********************************************************************/
2800static void
2801lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2802 u32 *txd_upper, u32 *txd_lower)
2803{
2804 struct e1000_context_desc *TXD = NULL;
2805 struct em_buffer *tx_buffer;
2806 struct ether_vlan_header *eh;
2807 struct ip *ip = NULL;
2808 struct ip6_hdr *ip6;
2809 int curr_txd, ehdrlen;
2810 u32 cmd, hdr_len, ip_hlen;
2811 u16 etype;
2812 u8 ipproto;
2813
2814
2815 cmd = hdr_len = ipproto = 0;
2816 *txd_upper = *txd_lower = 0;
2817 curr_txd = adapter->next_avail_tx_desc;
2818
2819 /*
2820 * Determine where frame payload starts.
2821 * Jump over vlan headers if already present,
2822 * helpful for QinQ too.
2823 */
2824 eh = mtod(mp, struct ether_vlan_header *);
2825 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2826 etype = ntohs(eh->evl_proto);
2827 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2828 } else {
2829 etype = ntohs(eh->evl_encap_proto);
2830 ehdrlen = ETHER_HDR_LEN;
2831 }
2832
2833 /*
2834 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2835 * TODO: Support SCTP too when it hits the tree.
2836 */
2837 switch (etype) {
2838 case ETHERTYPE_IP:
2839 ip = (struct ip *)(mp->m_data + ehdrlen);
2840 ip_hlen = ip->ip_hl << 2;
2841
2842 /* Setup of IP header checksum. */
2843 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2844 /*
2845 * Start offset for header checksum calculation.
2846 * End offset for header checksum calculation.
2847 * Offset of place to put the checksum.
2848 */
2849 TXD = (struct e1000_context_desc *)
2850 &adapter->tx_desc_base[curr_txd];
2851 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2852 TXD->lower_setup.ip_fields.ipcse =
2853 htole16(ehdrlen + ip_hlen);
2854 TXD->lower_setup.ip_fields.ipcso =
2855 ehdrlen + offsetof(struct ip, ip_sum);
2856 cmd |= E1000_TXD_CMD_IP;
2857 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2858 }
2859
2860 hdr_len = ehdrlen + ip_hlen;
2861 ipproto = ip->ip_p;
2862
2863 break;
2864 case ETHERTYPE_IPV6:
2865 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2866 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2867
2868 /* IPv6 doesn't have a header checksum. */
2869
2870 hdr_len = ehdrlen + ip_hlen;
2871 ipproto = ip6->ip6_nxt;
2872 break;
2873
2874 default:
2875 return;
2876 }
2877
2878 switch (ipproto) {
2879 case IPPROTO_TCP:
2880 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2881 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2882 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2883 /* no need for context if already set */
2884 if (adapter->last_hw_offload == CSUM_TCP)
2885 return;
2886 adapter->last_hw_offload = CSUM_TCP;
2887 /*
2888 * Start offset for payload checksum calculation.
2889 * End offset for payload checksum calculation.
2890 * Offset of place to put the checksum.
2891 */
2892 TXD = (struct e1000_context_desc *)
2893 &adapter->tx_desc_base[curr_txd];
2894 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2895 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2896 TXD->upper_setup.tcp_fields.tucso =
2897 hdr_len + offsetof(struct tcphdr, th_sum);
2898 cmd |= E1000_TXD_CMD_TCP;
2899 }
2900 break;
2901 case IPPROTO_UDP:
2902 {
2903 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2904 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2905 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2906 /* no need for context if already set */
2907 if (adapter->last_hw_offload == CSUM_UDP)
2908 return;
2909 adapter->last_hw_offload = CSUM_UDP;
2910 /*
2911 * Start offset for header checksum calculation.
2912 * End offset for header checksum calculation.
2913 * Offset of place to put the checksum.
2914 */
2915 TXD = (struct e1000_context_desc *)
2916 &adapter->tx_desc_base[curr_txd];
2917 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2918 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2919 TXD->upper_setup.tcp_fields.tucso =
2920 hdr_len + offsetof(struct udphdr, uh_sum);
2921 }
2922 /* Fall Thru */
2923 }
2924 default:
2925 break;
2926 }
2927
2928 if (TXD == NULL)
2929 return;
2930 TXD->tcp_seg_setup.data = htole32(0);
2931 TXD->cmd_and_length =
2932 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2933 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2934 tx_buffer->m_head = NULL;
2935 tx_buffer->next_eop = -1;
2936
2937 if (++curr_txd == adapter->num_tx_desc)
2938 curr_txd = 0;
2939
2940 adapter->num_tx_desc_avail--;
2941 adapter->next_avail_tx_desc = curr_txd;
2942}
2943
2944
2945/**********************************************************************
2946 *
2947 * Examine each tx_buffer in the used queue. If the hardware is done
2948 * processing the packet then free associated resources. The
2949 * tx_buffer is put back on the free queue.
2950 *
2951 **********************************************************************/
2952static void
2953lem_txeof(struct adapter *adapter)
2954{
2955 int first, last, done, num_avail;
2956 struct em_buffer *tx_buffer;
2957 struct e1000_tx_desc *tx_desc, *eop_desc;
2958 struct ifnet *ifp = adapter->ifp;
2959
2960 EM_TX_LOCK_ASSERT(adapter);
2961
2962#ifdef DEV_NETMAP
2963 if (ifp->if_capenable & IFCAP_NETMAP) {
2964 selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET);
2965 return;
2966 }
2967#endif /* DEV_NETMAP */
2968 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2969 return;
2970
2971 num_avail = adapter->num_tx_desc_avail;
2972 first = adapter->next_tx_to_clean;
2973 tx_desc = &adapter->tx_desc_base[first];
2974 tx_buffer = &adapter->tx_buffer_area[first];
2975 last = tx_buffer->next_eop;
2976 eop_desc = &adapter->tx_desc_base[last];
2977
2978 /*
2979 * What this does is get the index of the
2980 * first descriptor AFTER the EOP of the
2981 * first packet, that way we can do the
2982 * simple comparison on the inner while loop.
2983 */
2984 if (++last == adapter->num_tx_desc)
2985 last = 0;
2986 done = last;
2987
2988 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2989 BUS_DMASYNC_POSTREAD);
2990
2991 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2992 /* We clean the range of the packet */
2993 while (first != done) {
2994 tx_desc->upper.data = 0;
2995 tx_desc->lower.data = 0;
2996 tx_desc->buffer_addr = 0;
2997 ++num_avail;
2998
2999 if (tx_buffer->m_head) {
3000 ifp->if_opackets++;
3001 bus_dmamap_sync(adapter->txtag,
3002 tx_buffer->map,
3003 BUS_DMASYNC_POSTWRITE);
3004 bus_dmamap_unload(adapter->txtag,
3005 tx_buffer->map);
3006
3007 m_freem(tx_buffer->m_head);
3008 tx_buffer->m_head = NULL;
3009 }
3010 tx_buffer->next_eop = -1;
3011 adapter->watchdog_time = ticks;
3012
3013 if (++first == adapter->num_tx_desc)
3014 first = 0;
3015
3016 tx_buffer = &adapter->tx_buffer_area[first];
3017 tx_desc = &adapter->tx_desc_base[first];
3018 }
3019 /* See if we can continue to the next packet */
3020 last = tx_buffer->next_eop;
3021 if (last != -1) {
3022 eop_desc = &adapter->tx_desc_base[last];
3023 /* Get new done point */
3024 if (++last == adapter->num_tx_desc) last = 0;
3025 done = last;
3026 } else
3027 break;
3028 }
3029 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3030 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3031
3032 adapter->next_tx_to_clean = first;
3033 adapter->num_tx_desc_avail = num_avail;
3034
3035 /*
3036 * If we have enough room, clear IFF_DRV_OACTIVE to
3037 * tell the stack that it is OK to send packets.
3038 * If there are no pending descriptors, clear the watchdog.
3039 */
3040 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3041 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3042 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3043 adapter->watchdog_check = FALSE;
3044 return;
3045 }
3046 }
3047}
3048
3049/*********************************************************************
3050 *
3051 * When Link is lost sometimes there is work still in the TX ring
3052 * which may result in a watchdog, rather than allow that we do an
3053 * attempted cleanup and then reinit here. Note that this has been
3054 * seens mostly with fiber adapters.
3055 *
3056 **********************************************************************/
3057static void
3058lem_tx_purge(struct adapter *adapter)
3059{
3060 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3061 EM_TX_LOCK(adapter);
3062 lem_txeof(adapter);
3063 EM_TX_UNLOCK(adapter);
3064 if (adapter->watchdog_check) /* Still outstanding? */
3065 lem_init_locked(adapter);
3066 }
3067}
3068
3069/*********************************************************************
3070 *
3071 * Get a buffer from system mbuf buffer pool.
3072 *
3073 **********************************************************************/
3074static int
3075lem_get_buf(struct adapter *adapter, int i)
3076{
3077 struct mbuf *m;
3078 bus_dma_segment_t segs[1];
3079 bus_dmamap_t map;
3080 struct em_buffer *rx_buffer;
3081 int error, nsegs;
3082
3083 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3084 if (m == NULL) {
3085 adapter->mbuf_cluster_failed++;
3086 return (ENOBUFS);
3087 }
3088 m->m_len = m->m_pkthdr.len = MCLBYTES;
3089
3090 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3091 m_adj(m, ETHER_ALIGN);
3092
3093 /*
3094 * Using memory from the mbuf cluster pool, invoke the
3095 * bus_dma machinery to arrange the memory mapping.
3096 */
3097 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3098 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3099 if (error != 0) {
3100 m_free(m);
3101 return (error);
3102 }
3103
3104 /* If nsegs is wrong then the stack is corrupt. */
3105 KASSERT(nsegs == 1, ("Too many segments returned!"));
3106
3107 rx_buffer = &adapter->rx_buffer_area[i];
3108 if (rx_buffer->m_head != NULL)
3109 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3110
3111 map = rx_buffer->map;
3112 rx_buffer->map = adapter->rx_sparemap;
3113 adapter->rx_sparemap = map;
3114 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3115 rx_buffer->m_head = m;
3116
3117 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3118 return (0);
3119}
3120
3121/*********************************************************************
3122 *
3123 * Allocate memory for rx_buffer structures. Since we use one
3124 * rx_buffer per received packet, the maximum number of rx_buffer's
3125 * that we'll need is equal to the number of receive descriptors
3126 * that we've allocated.
3127 *
3128 **********************************************************************/
3129static int
3130lem_allocate_receive_structures(struct adapter *adapter)
3131{
3132 device_t dev = adapter->dev;
3133 struct em_buffer *rx_buffer;
3134 int i, error;
3135
3136 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3137 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3138 if (adapter->rx_buffer_area == NULL) {
3139 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3140 return (ENOMEM);
3141 }
3142
3143 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3144 1, 0, /* alignment, bounds */
3145 BUS_SPACE_MAXADDR, /* lowaddr */
3146 BUS_SPACE_MAXADDR, /* highaddr */
3147 NULL, NULL, /* filter, filterarg */
3148 MCLBYTES, /* maxsize */
3149 1, /* nsegments */
3150 MCLBYTES, /* maxsegsize */
3151 0, /* flags */
3152 NULL, /* lockfunc */
3153 NULL, /* lockarg */
3154 &adapter->rxtag);
3155 if (error) {
3156 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3157 __func__, error);
3158 goto fail;
3159 }
3160
3161 /* Create the spare map (used by getbuf) */
3162 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3163 &adapter->rx_sparemap);
3164 if (error) {
3165 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3166 __func__, error);
3167 goto fail;
3168 }
3169
3170 rx_buffer = adapter->rx_buffer_area;
3171 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3172 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3173 &rx_buffer->map);
3174 if (error) {
3175 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3176 __func__, error);
3177 goto fail;
3178 }
3179 }
3180
3181 return (0);
3182
3183fail:
3184 lem_free_receive_structures(adapter);
3185 return (error);
3186}
3187
3188/*********************************************************************
3189 *
3190 * (Re)initialize receive structures.
3191 *
3192 **********************************************************************/
3193static int
3194lem_setup_receive_structures(struct adapter *adapter)
3195{
3196 struct em_buffer *rx_buffer;
3197 int i, error;
3198#ifdef DEV_NETMAP
3199 /* we are already under lock */
3200 struct netmap_adapter *na = NA(adapter->ifp);
3201 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3202#endif
3203
3204 /* Reset descriptor ring */
3205 bzero(adapter->rx_desc_base,
3206 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3207
3208 /* Free current RX buffers. */
3209 rx_buffer = adapter->rx_buffer_area;
3210 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3211 if (rx_buffer->m_head != NULL) {
3212 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3213 BUS_DMASYNC_POSTREAD);
3214 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3215 m_freem(rx_buffer->m_head);
3216 rx_buffer->m_head = NULL;
3217 }
3218 }
3219
3220 /* Allocate new ones. */
3221 for (i = 0; i < adapter->num_rx_desc; i++) {
3222#ifdef DEV_NETMAP
3223 if (slot) {
3224 /* the i-th NIC entry goes to slot si */
3225 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3226 uint64_t paddr;
3227 void *addr;
3228
3229 addr = PNMB(slot + si, &paddr);
3230 netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3231 /* Update descriptor */
3232 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3233 continue;
3234 }
3235#endif /* DEV_NETMAP */
3236 error = lem_get_buf(adapter, i);
3237 if (error)
3238 return (error);
3239 }
3240
3241 /* Setup our descriptor pointers */
3242 adapter->next_rx_desc_to_check = 0;
3243 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3244 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3245
3246 return (0);
3247}
3248
3249/*********************************************************************
3250 *
3251 * Enable receive unit.
3252 *
3253 **********************************************************************/
3254#define MAX_INTS_PER_SEC 8000
3255#define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
3256
3257static void
3258lem_initialize_receive_unit(struct adapter *adapter)
3259{
3260 struct ifnet *ifp = adapter->ifp;
3261 u64 bus_addr;
3262 u32 rctl, rxcsum;
3263
3264 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3265
3266 /*
3267 * Make sure receives are disabled while setting
3268 * up the descriptor ring
3269 */
3270 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3271 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3272
3273 if (adapter->hw.mac.type >= e1000_82540) {
3274 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3275 adapter->rx_abs_int_delay.value);
3276 /*
3277 * Set the interrupt throttling rate. Value is calculated
3278 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3279 */
3280 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3281 }
3282
3283 /* Setup the Base and Length of the Rx Descriptor Ring */
3284 bus_addr = adapter->rxdma.dma_paddr;
3285 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3286 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3287 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3288 (u32)(bus_addr >> 32));
3289 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3290 (u32)bus_addr);
3291
3292 /* Setup the Receive Control Register */
3293 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3294 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3295 E1000_RCTL_RDMTS_HALF |
3296 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3297
3298 /* Make sure VLAN Filters are off */
3299 rctl &= ~E1000_RCTL_VFE;
3300
3301 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3302 rctl |= E1000_RCTL_SBP;
3303 else
3304 rctl &= ~E1000_RCTL_SBP;
3305
3306 switch (adapter->rx_buffer_len) {
3307 default:
3308 case 2048:
3309 rctl |= E1000_RCTL_SZ_2048;
3310 break;
3311 case 4096:
3312 rctl |= E1000_RCTL_SZ_4096 |
3313 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3314 break;
3315 case 8192:
3316 rctl |= E1000_RCTL_SZ_8192 |
3317 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3318 break;
3319 case 16384:
3320 rctl |= E1000_RCTL_SZ_16384 |
3321 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3322 break;
3323 }
3324
3325 if (ifp->if_mtu > ETHERMTU)
3326 rctl |= E1000_RCTL_LPE;
3327 else
3328 rctl &= ~E1000_RCTL_LPE;
3329
3330 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3331 if ((adapter->hw.mac.type >= e1000_82543) &&
3332 (ifp->if_capenable & IFCAP_RXCSUM)) {
3333 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3334 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3335 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3336 }
3337
3338 /* Enable Receives */
3339 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3340
3341 /*
3342 * Setup the HW Rx Head and
3343 * Tail Descriptor Pointers
3344 */
3345 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3346#ifdef DEV_NETMAP
3347 /* preserve buffers already made available to clients */
3348 if (ifp->if_capenable & IFCAP_NETMAP) {
3349 struct netmap_adapter *na = NA(adapter->ifp);
3350 struct netmap_kring *kring = &na->rx_rings[0];
3351 int t = na->num_rx_desc - 1 - kring->nr_hwavail;
3352
3353 if (t >= na->num_rx_desc)
3354 t -= na->num_rx_desc;
3355 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), t);
3356 } else
3357#endif /* DEV_NETMAP */
3358 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3359
3360 return;
3361}
3362
3363/*********************************************************************
3364 *
3365 * Free receive related data structures.
3366 *
3367 **********************************************************************/
3368static void
3369lem_free_receive_structures(struct adapter *adapter)
3370{
3371 struct em_buffer *rx_buffer;
3372 int i;
3373
3374 INIT_DEBUGOUT("free_receive_structures: begin");
3375
3376 if (adapter->rx_sparemap) {
3377 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3378 adapter->rx_sparemap = NULL;
3379 }
3380
3381 /* Cleanup any existing buffers */
3382 if (adapter->rx_buffer_area != NULL) {
3383 rx_buffer = adapter->rx_buffer_area;
3384 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3385 if (rx_buffer->m_head != NULL) {
3386 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3387 BUS_DMASYNC_POSTREAD);
3388 bus_dmamap_unload(adapter->rxtag,
3389 rx_buffer->map);
3390 m_freem(rx_buffer->m_head);
3391 rx_buffer->m_head = NULL;
3392 } else if (rx_buffer->map != NULL)
3393 bus_dmamap_unload(adapter->rxtag,
3394 rx_buffer->map);
3395 if (rx_buffer->map != NULL) {
3396 bus_dmamap_destroy(adapter->rxtag,
3397 rx_buffer->map);
3398 rx_buffer->map = NULL;
3399 }
3400 }
3401 }
3402
3403 if (adapter->rx_buffer_area != NULL) {
3404 free(adapter->rx_buffer_area, M_DEVBUF);
3405 adapter->rx_buffer_area = NULL;
3406 }
3407
3408 if (adapter->rxtag != NULL) {
3409 bus_dma_tag_destroy(adapter->rxtag);
3410 adapter->rxtag = NULL;
3411 }
3412}
3413
3414/*********************************************************************
3415 *
3416 * This routine executes in interrupt context. It replenishes
3417 * the mbufs in the descriptor and sends data which has been
3418 * dma'ed into host memory to upper layer.
3419 *
3420 * We loop at most count times if count is > 0, or until done if
3421 * count < 0.
3422 *
3423 * For polling we also now return the number of cleaned packets
3424 *********************************************************************/
3425static bool
3426lem_rxeof(struct adapter *adapter, int count, int *done)
3427{
3428 struct ifnet *ifp = adapter->ifp;
3429 struct mbuf *mp;
3430 u8 status = 0, accept_frame = 0, eop = 0;
3431 u16 len, desc_len, prev_len_adj;
3432 int i, rx_sent = 0;
3433 struct e1000_rx_desc *current_desc;
3434
3435 EM_RX_LOCK(adapter);
3436 i = adapter->next_rx_desc_to_check;
3437 current_desc = &adapter->rx_desc_base[i];
3438 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3439 BUS_DMASYNC_POSTREAD);
3440
3441#ifdef DEV_NETMAP
3442 if (ifp->if_capenable & IFCAP_NETMAP) {
3443 struct netmap_adapter *na = NA(ifp);
3444 na->rx_rings[0].nr_kflags |= NKR_PENDINTR;
3445 selwakeuppri(&na->rx_rings[0].si, PI_NET);
3446 EM_RX_UNLOCK(adapter);
3447 return (0);
3448 }
3449#endif /* DEV_NETMAP */
3450
3451 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3452 if (done != NULL)
3453 *done = rx_sent;
3454 EM_RX_UNLOCK(adapter);
3455 return (FALSE);
3456 }
3457
3458 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3459 struct mbuf *m = NULL;
3460
3461 status = current_desc->status;
3462 if ((status & E1000_RXD_STAT_DD) == 0)
3463 break;
3464
3465 mp = adapter->rx_buffer_area[i].m_head;
3466 /*
3467 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3468 * needs to access the last received byte in the mbuf.
3469 */
3470 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3471 BUS_DMASYNC_POSTREAD);
3472
3473 accept_frame = 1;
3474 prev_len_adj = 0;
3475 desc_len = le16toh(current_desc->length);
3476 if (status & E1000_RXD_STAT_EOP) {
3477 count--;
3478 eop = 1;
3479 if (desc_len < ETHER_CRC_LEN) {
3480 len = 0;
3481 prev_len_adj = ETHER_CRC_LEN - desc_len;
3482 } else
3483 len = desc_len - ETHER_CRC_LEN;
3484 } else {
3485 eop = 0;
3486 len = desc_len;
3487 }
3488
3489 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3490 u8 last_byte;
3491 u32 pkt_len = desc_len;
3492
3493 if (adapter->fmp != NULL)
3494 pkt_len += adapter->fmp->m_pkthdr.len;
3495
3496 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3497 if (TBI_ACCEPT(&adapter->hw, status,
3498 current_desc->errors, pkt_len, last_byte,
3499 adapter->min_frame_size, adapter->max_frame_size)) {
3500 e1000_tbi_adjust_stats_82543(&adapter->hw,
3501 &adapter->stats, pkt_len,
3502 adapter->hw.mac.addr,
3503 adapter->max_frame_size);
3504 if (len > 0)
3505 len--;
3506 } else
3507 accept_frame = 0;
3508 }
3509
3510 if (accept_frame) {
3511 if (lem_get_buf(adapter, i) != 0) {
3512 ifp->if_iqdrops++;
3513 goto discard;
3514 }
3515
3516 /* Assign correct length to the current fragment */
3517 mp->m_len = len;
3518
3519 if (adapter->fmp == NULL) {
3520 mp->m_pkthdr.len = len;
3521 adapter->fmp = mp; /* Store the first mbuf */
3522 adapter->lmp = mp;
3523 } else {
3524 /* Chain mbuf's together */
3525 mp->m_flags &= ~M_PKTHDR;
3526 /*
3527 * Adjust length of previous mbuf in chain if
3528 * we received less than 4 bytes in the last
3529 * descriptor.
3530 */
3531 if (prev_len_adj > 0) {
3532 adapter->lmp->m_len -= prev_len_adj;
3533 adapter->fmp->m_pkthdr.len -=
3534 prev_len_adj;
3535 }
3536 adapter->lmp->m_next = mp;
3537 adapter->lmp = adapter->lmp->m_next;
3538 adapter->fmp->m_pkthdr.len += len;
3539 }
3540
3541 if (eop) {
3542 adapter->fmp->m_pkthdr.rcvif = ifp;
3543 ifp->if_ipackets++;
3544 lem_receive_checksum(adapter, current_desc,
3545 adapter->fmp);
3546#ifndef __NO_STRICT_ALIGNMENT
3547 if (adapter->max_frame_size >
3548 (MCLBYTES - ETHER_ALIGN) &&
3549 lem_fixup_rx(adapter) != 0)
3550 goto skip;
3551#endif
3552 if (status & E1000_RXD_STAT_VP) {
3553 adapter->fmp->m_pkthdr.ether_vtag =
3554 le16toh(current_desc->special);
3555 adapter->fmp->m_flags |= M_VLANTAG;
3556 }
3557#ifndef __NO_STRICT_ALIGNMENT
3558skip:
3559#endif
3560 m = adapter->fmp;
3561 adapter->fmp = NULL;
3562 adapter->lmp = NULL;
3563 }
3564 } else {
3565 adapter->dropped_pkts++;
3566discard:
3567 /* Reuse loaded DMA map and just update mbuf chain */
3568 mp = adapter->rx_buffer_area[i].m_head;
3569 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3570 mp->m_data = mp->m_ext.ext_buf;
3571 mp->m_next = NULL;
3572 if (adapter->max_frame_size <=
3573 (MCLBYTES - ETHER_ALIGN))
3574 m_adj(mp, ETHER_ALIGN);
3575 if (adapter->fmp != NULL) {
3576 m_freem(adapter->fmp);
3577 adapter->fmp = NULL;
3578 adapter->lmp = NULL;
3579 }
3580 m = NULL;
3581 }
3582
3583 /* Zero out the receive descriptors status. */
3584 current_desc->status = 0;
3585 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3586 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3587
3588 /* Advance our pointers to the next descriptor. */
3589 if (++i == adapter->num_rx_desc)
3590 i = 0;
3591 /* Call into the stack */
3592 if (m != NULL) {
3593 adapter->next_rx_desc_to_check = i;
3594 EM_RX_UNLOCK(adapter);
3595 (*ifp->if_input)(ifp, m);
3596 EM_RX_LOCK(adapter);
3597 rx_sent++;
3598 i = adapter->next_rx_desc_to_check;
3599 }
3600 current_desc = &adapter->rx_desc_base[i];
3601 }
3602 adapter->next_rx_desc_to_check = i;
3603
3604 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3605 if (--i < 0)
3606 i = adapter->num_rx_desc - 1;
3607 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3608 if (done != NULL)
3609 *done = rx_sent;
3610 EM_RX_UNLOCK(adapter);
3611 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3612}
3613
3614#ifndef __NO_STRICT_ALIGNMENT
3615/*
3616 * When jumbo frames are enabled we should realign entire payload on
3617 * architecures with strict alignment. This is serious design mistake of 8254x
3618 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3619 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3620 * payload. On architecures without strict alignment restrictions 8254x still
3621 * performs unaligned memory access which would reduce the performance too.
3622 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3623 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3624 * existing mbuf chain.
3625 *
3626 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3627 * not used at all on architectures with strict alignment.
3628 */
3629static int
3630lem_fixup_rx(struct adapter *adapter)
3631{
3632 struct mbuf *m, *n;
3633 int error;
3634
3635 error = 0;
3636 m = adapter->fmp;
3637 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3638 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3639 m->m_data += ETHER_HDR_LEN;
3640 } else {
3641 MGETHDR(n, M_DONTWAIT, MT_DATA);
3642 if (n != NULL) {
3643 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3644 m->m_data += ETHER_HDR_LEN;
3645 m->m_len -= ETHER_HDR_LEN;
3646 n->m_len = ETHER_HDR_LEN;
3647 M_MOVE_PKTHDR(n, m);
3648 n->m_next = m;
3649 adapter->fmp = n;
3650 } else {
3651 adapter->dropped_pkts++;
3652 m_freem(adapter->fmp);
3653 adapter->fmp = NULL;
3654 error = ENOMEM;
3655 }
3656 }
3657
3658 return (error);
3659}
3660#endif
3661
3662/*********************************************************************
3663 *
3664 * Verify that the hardware indicated that the checksum is valid.
3665 * Inform the stack about the status of checksum so that stack
3666 * doesn't spend time verifying the checksum.
3667 *
3668 *********************************************************************/
3669static void
3670lem_receive_checksum(struct adapter *adapter,
3671 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3672{
3673 /* 82543 or newer only */
3674 if ((adapter->hw.mac.type < e1000_82543) ||
3675 /* Ignore Checksum bit is set */
3676 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3677 mp->m_pkthdr.csum_flags = 0;
3678 return;
3679 }
3680
3681 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3682 /* Did it pass? */
3683 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3684 /* IP Checksum Good */
3685 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3686 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3687
3688 } else {
3689 mp->m_pkthdr.csum_flags = 0;
3690 }
3691 }
3692
3693 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3694 /* Did it pass? */
3695 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3696 mp->m_pkthdr.csum_flags |=
3697 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3698 mp->m_pkthdr.csum_data = htons(0xffff);
3699 }
3700 }
3701}
3702
3703/*
3704 * This routine is run via an vlan
3705 * config EVENT
3706 */
3707static void
3708lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3709{
3710 struct adapter *adapter = ifp->if_softc;
3711 u32 index, bit;
3712
3713 if (ifp->if_softc != arg) /* Not our event */
3714 return;
3715
3716 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3717 return;
3718
3719 EM_CORE_LOCK(adapter);
3720 index = (vtag >> 5) & 0x7F;
3721 bit = vtag & 0x1F;
3722 adapter->shadow_vfta[index] |= (1 << bit);
3723 ++adapter->num_vlans;
3724 /* Re-init to load the changes */
3725 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3726 lem_init_locked(adapter);
3727 EM_CORE_UNLOCK(adapter);
3728}
3729
3730/*
3731 * This routine is run via an vlan
3732 * unconfig EVENT
3733 */
3734static void
3735lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3736{
3737 struct adapter *adapter = ifp->if_softc;
3738 u32 index, bit;
3739
3740 if (ifp->if_softc != arg)
3741 return;
3742
3743 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3744 return;
3745
3746 EM_CORE_LOCK(adapter);
3747 index = (vtag >> 5) & 0x7F;
3748 bit = vtag & 0x1F;
3749 adapter->shadow_vfta[index] &= ~(1 << bit);
3750 --adapter->num_vlans;
3751 /* Re-init to load the changes */
3752 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3753 lem_init_locked(adapter);
3754 EM_CORE_UNLOCK(adapter);
3755}
3756
3757static void
3758lem_setup_vlan_hw_support(struct adapter *adapter)
3759{
3760 struct e1000_hw *hw = &adapter->hw;
3761 u32 reg;
3762
3763 /*
3764 ** We get here thru init_locked, meaning
3765 ** a soft reset, this has already cleared
3766 ** the VFTA and other state, so if there
3767 ** have been no vlan's registered do nothing.
3768 */
3769 if (adapter->num_vlans == 0)
3770 return;
3771
3772 /*
3773 ** A soft reset zero's out the VFTA, so
3774 ** we need to repopulate it now.
3775 */
3776 for (int i = 0; i < EM_VFTA_SIZE; i++)
3777 if (adapter->shadow_vfta[i] != 0)
3778 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3779 i, adapter->shadow_vfta[i]);
3780
3781 reg = E1000_READ_REG(hw, E1000_CTRL);
3782 reg |= E1000_CTRL_VME;
3783 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3784
3785 /* Enable the Filter Table */
3786 reg = E1000_READ_REG(hw, E1000_RCTL);
3787 reg &= ~E1000_RCTL_CFIEN;
3788 reg |= E1000_RCTL_VFE;
3789 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3790
3791 /* Update the frame size */
3792 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3793 adapter->max_frame_size + VLAN_TAG_SIZE);
3794}
3795
3796static void
3797lem_enable_intr(struct adapter *adapter)
3798{
3799 struct e1000_hw *hw = &adapter->hw;
3800 u32 ims_mask = IMS_ENABLE_MASK;
3801
3802 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3803}
3804
3805static void
3806lem_disable_intr(struct adapter *adapter)
3807{
3808 struct e1000_hw *hw = &adapter->hw;
3809
3810 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3811}
3812
3813/*
3814 * Bit of a misnomer, what this really means is
3815 * to enable OS management of the system... aka
3816 * to disable special hardware management features
3817 */
3818static void
3819lem_init_manageability(struct adapter *adapter)
3820{
3821 /* A shared code workaround */
3822 if (adapter->has_manage) {
3823 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3824 /* disable hardware interception of ARP */
3825 manc &= ~(E1000_MANC_ARP_EN);
3826 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3827 }
3828}
3829
3830/*
3831 * Give control back to hardware management
3832 * controller if there is one.
3833 */
3834static void
3835lem_release_manageability(struct adapter *adapter)
3836{
3837 if (adapter->has_manage) {
3838 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3839
3840 /* re-enable hardware interception of ARP */
3841 manc |= E1000_MANC_ARP_EN;
3842 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3843 }
3844}
3845
3846/*
3847 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3848 * For ASF and Pass Through versions of f/w this means
3849 * that the driver is loaded. For AMT version type f/w
3850 * this means that the network i/f is open.
3851 */
3852static void
3853lem_get_hw_control(struct adapter *adapter)
3854{
3855 u32 ctrl_ext;
3856
3857 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3858 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3859 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3860 return;
3861}
3862
3863/*
3864 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3865 * For ASF and Pass Through versions of f/w this means that
3866 * the driver is no longer loaded. For AMT versions of the
3867 * f/w this means that the network i/f is closed.
3868 */
3869static void
3870lem_release_hw_control(struct adapter *adapter)
3871{
3872 u32 ctrl_ext;
3873
3874 if (!adapter->has_manage)
3875 return;
3876
3877 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3878 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3879 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3880 return;
3881}
3882
3883static int
3884lem_is_valid_ether_addr(u8 *addr)
3885{
3886 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3887
3888 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3889 return (FALSE);
3890 }
3891
3892 return (TRUE);
3893}
3894
3895/*
3896** Parse the interface capabilities with regard
3897** to both system management and wake-on-lan for
3898** later use.
3899*/
3900static void
3901lem_get_wakeup(device_t dev)
3902{
3903 struct adapter *adapter = device_get_softc(dev);
3904 u16 eeprom_data = 0, device_id, apme_mask;
3905
3906 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3907 apme_mask = EM_EEPROM_APME;
3908
3909 switch (adapter->hw.mac.type) {
3910 case e1000_82542:
3911 case e1000_82543:
3912 break;
3913 case e1000_82544:
3914 e1000_read_nvm(&adapter->hw,
3915 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3916 apme_mask = EM_82544_APME;
3917 break;
3918 case e1000_82546:
3919 case e1000_82546_rev_3:
3920 if (adapter->hw.bus.func == 1) {
3921 e1000_read_nvm(&adapter->hw,
3922 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3923 break;
3924 } else
3925 e1000_read_nvm(&adapter->hw,
3926 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3927 break;
3928 default:
3929 e1000_read_nvm(&adapter->hw,
3930 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3931 break;
3932 }
3933 if (eeprom_data & apme_mask)
3934 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3935 /*
3936 * We have the eeprom settings, now apply the special cases
3937 * where the eeprom may be wrong or the board won't support
3938 * wake on lan on a particular port
3939 */
3940 device_id = pci_get_device(dev);
3941 switch (device_id) {
3942 case E1000_DEV_ID_82546GB_PCIE:
3943 adapter->wol = 0;
3944 break;
3945 case E1000_DEV_ID_82546EB_FIBER:
3946 case E1000_DEV_ID_82546GB_FIBER:
3947 /* Wake events only supported on port A for dual fiber
3948 * regardless of eeprom setting */
3949 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3950 E1000_STATUS_FUNC_1)
3951 adapter->wol = 0;
3952 break;
3953 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3954 /* if quad port adapter, disable WoL on all but port A */
3955 if (global_quad_port_a != 0)
3956 adapter->wol = 0;
3957 /* Reset for multiple quad port adapters */
3958 if (++global_quad_port_a == 4)
3959 global_quad_port_a = 0;
3960 break;
3961 }
3962 return;
3963}
3964
3965
3966/*
3967 * Enable PCI Wake On Lan capability
3968 */
3969static void
3970lem_enable_wakeup(device_t dev)
3971{
3972 struct adapter *adapter = device_get_softc(dev);
3973 struct ifnet *ifp = adapter->ifp;
3974 u32 pmc, ctrl, ctrl_ext, rctl;
3975 u16 status;
3976
3977 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3978 return;
3979
3980 /* Advertise the wakeup capability */
3981 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3982 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3983 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3984 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3985
3986 /* Keep the laser running on Fiber adapters */
3987 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3988 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3989 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3990 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3991 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3992 }
3993
3994 /*
3995 ** Determine type of Wakeup: note that wol
3996 ** is set with all bits on by default.
3997 */
3998 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3999 adapter->wol &= ~E1000_WUFC_MAG;
4000
4001 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4002 adapter->wol &= ~E1000_WUFC_MC;
4003 else {
4004 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4005 rctl |= E1000_RCTL_MPE;
4006 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4007 }
4008
4009 if (adapter->hw.mac.type == e1000_pchlan) {
4010 if (lem_enable_phy_wakeup(adapter))
4011 return;
4012 } else {
4013 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4014 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4015 }
4016
4017
4018 /* Request PME */
4019 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4020 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4021 if (ifp->if_capenable & IFCAP_WOL)
4022 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4023 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4024
4025 return;
4026}
4027
4028/*
4029** WOL in the newer chipset interfaces (pchlan)
4030** require thing to be copied into the phy
4031*/
4032static int
4033lem_enable_phy_wakeup(struct adapter *adapter)
4034{
4035 struct e1000_hw *hw = &adapter->hw;
4036 u32 mreg, ret = 0;
4037 u16 preg;
4038
4039 /* copy MAC RARs to PHY RARs */
4040 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4041 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4042 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4043 e1000_write_phy_reg(hw, BM_RAR_M(i),
4044 (u16)((mreg >> 16) & 0xFFFF));
4045 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4046 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4047 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4048 (u16)((mreg >> 16) & 0xFFFF));
4049 }
4050
4051 /* copy MAC MTA to PHY MTA */
4052 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4053 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4054 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4055 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4056 (u16)((mreg >> 16) & 0xFFFF));
4057 }
4058
4059 /* configure PHY Rx Control register */
4060 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4061 mreg = E1000_READ_REG(hw, E1000_RCTL);
4062 if (mreg & E1000_RCTL_UPE)
4063 preg |= BM_RCTL_UPE;
4064 if (mreg & E1000_RCTL_MPE)
4065 preg |= BM_RCTL_MPE;
4066 preg &= ~(BM_RCTL_MO_MASK);
4067 if (mreg & E1000_RCTL_MO_3)
4068 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4069 << BM_RCTL_MO_SHIFT);
4070 if (mreg & E1000_RCTL_BAM)
4071 preg |= BM_RCTL_BAM;
4072 if (mreg & E1000_RCTL_PMCF)
4073 preg |= BM_RCTL_PMCF;
4074 mreg = E1000_READ_REG(hw, E1000_CTRL);
4075 if (mreg & E1000_CTRL_RFCE)
4076 preg |= BM_RCTL_RFCE;
4077 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4078
4079 /* enable PHY wakeup in MAC register */
4080 E1000_WRITE_REG(hw, E1000_WUC,
4081 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4082 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4083
4084 /* configure and enable PHY wakeup in PHY registers */
4085 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4086 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4087
4088 /* activate PHY wakeup */
4089 ret = hw->phy.ops.acquire(hw);
4090 if (ret) {
4091 printf("Could not acquire PHY\n");
4092 return ret;
4093 }
4094 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4095 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4096 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4097 if (ret) {
4098 printf("Could not read PHY page 769\n");
4099 goto out;
4100 }
4101 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4102 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4103 if (ret)
4104 printf("Could not set PHY Host Wakeup bit\n");
4105out:
4106 hw->phy.ops.release(hw);
4107
4108 return ret;
4109}
4110
4111static void
4112lem_led_func(void *arg, int onoff)
4113{
4114 struct adapter *adapter = arg;
4115
4116 EM_CORE_LOCK(adapter);
4117 if (onoff) {
4118 e1000_setup_led(&adapter->hw);
4119 e1000_led_on(&adapter->hw);
4120 } else {
4121 e1000_led_off(&adapter->hw);
4122 e1000_cleanup_led(&adapter->hw);
4123 }
4124 EM_CORE_UNLOCK(adapter);
4125}
4126
4127/*********************************************************************
4128* 82544 Coexistence issue workaround.
4129* There are 2 issues.
4130* 1. Transmit Hang issue.
4131* To detect this issue, following equation can be used...
4132* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4133* If SUM[3:0] is in between 1 to 4, we will have this issue.
4134*
4135* 2. DAC issue.
4136* To detect this issue, following equation can be used...
4137* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4138* If SUM[3:0] is in between 9 to c, we will have this issue.
4139*
4140*
4141* WORKAROUND:
4142* Make sure we do not have ending address
4143* as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4144*
4145*************************************************************************/
4146static u32
4147lem_fill_descriptors (bus_addr_t address, u32 length,
4148 PDESC_ARRAY desc_array)
4149{
4150 u32 safe_terminator;
4151
4152 /* Since issue is sensitive to length and address.*/
4153 /* Let us first check the address...*/
4154 if (length <= 4) {
4155 desc_array->descriptor[0].address = address;
4156 desc_array->descriptor[0].length = length;
4157 desc_array->elements = 1;
4158 return (desc_array->elements);
4159 }
4160 safe_terminator = (u32)((((u32)address & 0x7) +
4161 (length & 0xF)) & 0xF);
4162 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4163 if (safe_terminator == 0 ||
4164 (safe_terminator > 4 &&
4165 safe_terminator < 9) ||
4166 (safe_terminator > 0xC &&
4167 safe_terminator <= 0xF)) {
4168 desc_array->descriptor[0].address = address;
4169 desc_array->descriptor[0].length = length;
4170 desc_array->elements = 1;
4171 return (desc_array->elements);
4172 }
4173
4174 desc_array->descriptor[0].address = address;
4175 desc_array->descriptor[0].length = length - 4;
4176 desc_array->descriptor[1].address = address + (length - 4);
4177 desc_array->descriptor[1].length = 4;
4178 desc_array->elements = 2;
4179 return (desc_array->elements);
4180}
4181
4182/**********************************************************************
4183 *
4184 * Update the board statistics counters.
4185 *
4186 **********************************************************************/
4187static void
4188lem_update_stats_counters(struct adapter *adapter)
4189{
4190 struct ifnet *ifp;
4191
4192 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4193 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4194 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4195 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4196 }
4197 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4198 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4199 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4200 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4201
4202 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4203 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4204 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4205 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4206 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4207 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4208 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4209 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4210 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4211 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4212 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4213 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4214 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4215 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4216 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4217 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4218 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4219 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4220 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4221 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4222
4223 /* For the 64-bit byte counters the low dword must be read first. */
4224 /* Both registers clear on the read of the high dword */
4225
4226 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4227 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4228 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4229 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4230
4231 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4232 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4233 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4234 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4235 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4236
4237 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4238 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4239
4240 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4241 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4242 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4243 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4244 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4245 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4246 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4247 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4248 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4249 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4250
4251 if (adapter->hw.mac.type >= e1000_82543) {
4252 adapter->stats.algnerrc +=
4253 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4254 adapter->stats.rxerrc +=
4255 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4256 adapter->stats.tncrs +=
4257 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4258 adapter->stats.cexterr +=
4259 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4260 adapter->stats.tsctc +=
4261 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4262 adapter->stats.tsctfc +=
4263 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4264 }
4265 ifp = adapter->ifp;
4266
4267 ifp->if_collisions = adapter->stats.colc;
4268
4269 /* Rx Errors */
4270 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4271 adapter->stats.crcerrs + adapter->stats.algnerrc +
4272 adapter->stats.ruc + adapter->stats.roc +
4273 adapter->stats.mpc + adapter->stats.cexterr;
4274
4275 /* Tx Errors */
4276 ifp->if_oerrors = adapter->stats.ecol +
4277 adapter->stats.latecol + adapter->watchdog_events;
4278}
4279
4280/* Export a single 32-bit register via a read-only sysctl. */
4281static int
4282lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4283{
4284 struct adapter *adapter;
4285 u_int val;
4286
4287 adapter = oidp->oid_arg1;
4288 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4289 return (sysctl_handle_int(oidp, &val, 0, req));
4290}
4291
4292/*
4293 * Add sysctl variables, one per statistic, to the system.
4294 */
4295static void
4296lem_add_hw_stats(struct adapter *adapter)
4297{
4298 device_t dev = adapter->dev;
4299
4300 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4301 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4302 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4303 struct e1000_hw_stats *stats = &adapter->stats;
4304
4305 struct sysctl_oid *stat_node;
4306 struct sysctl_oid_list *stat_list;
4307
4308 /* Driver Statistics */
4309 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4310 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4311 "Std mbuf failed");
4312 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4313 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4314 "Std mbuf cluster failed");
4315 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4316 CTLFLAG_RD, &adapter->dropped_pkts,
4317 "Driver dropped packets");
4318 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4319 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4320 "Driver tx dma failure in xmit");
4321 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4322 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4323 "Not enough tx descriptors failure in xmit");
4324 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4325 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4326 "Not enough tx descriptors failure in xmit");
4327 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4328 CTLFLAG_RD, &adapter->rx_overruns,
4329 "RX overruns");
4330 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4331 CTLFLAG_RD, &adapter->watchdog_events,
4332 "Watchdog timeouts");
4333
4334 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4335 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4336 lem_sysctl_reg_handler, "IU",
4337 "Device Control Register");
4338 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4339 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4340 lem_sysctl_reg_handler, "IU",
4341 "Receiver Control Register");
4342 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4343 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4344 "Flow Control High Watermark");
4345 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4346 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4347 "Flow Control Low Watermark");
4348 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4349 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4350 "TX FIFO workaround events");
4351 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4352 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4353 "TX FIFO resets");
4354
4355 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4356 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4357 lem_sysctl_reg_handler, "IU",
4358 "Transmit Descriptor Head");
4359 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4360 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4361 lem_sysctl_reg_handler, "IU",
4362 "Transmit Descriptor Tail");
4363 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4364 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4365 lem_sysctl_reg_handler, "IU",
4366 "Receive Descriptor Head");
4367 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4368 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4369 lem_sysctl_reg_handler, "IU",
4370 "Receive Descriptor Tail");
4371
4372
4373 /* MAC stats get their own sub node */
4374
4375 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4376 CTLFLAG_RD, NULL, "Statistics");
4377 stat_list = SYSCTL_CHILDREN(stat_node);
4378
4379 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4380 CTLFLAG_RD, &stats->ecol,
4381 "Excessive collisions");
4382 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4383 CTLFLAG_RD, &stats->scc,
4384 "Single collisions");
4385 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4386 CTLFLAG_RD, &stats->mcc,
4387 "Multiple collisions");
4388 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4389 CTLFLAG_RD, &stats->latecol,
4390 "Late collisions");
4391 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4392 CTLFLAG_RD, &stats->colc,
4393 "Collision Count");
4394 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4395 CTLFLAG_RD, &adapter->stats.symerrs,
4396 "Symbol Errors");
4397 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4398 CTLFLAG_RD, &adapter->stats.sec,
4399 "Sequence Errors");
4400 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4401 CTLFLAG_RD, &adapter->stats.dc,
4402 "Defer Count");
4403 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4404 CTLFLAG_RD, &adapter->stats.mpc,
4405 "Missed Packets");
4406 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4407 CTLFLAG_RD, &adapter->stats.rnbc,
4408 "Receive No Buffers");
4409 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4410 CTLFLAG_RD, &adapter->stats.ruc,
4411 "Receive Undersize");
4412 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4413 CTLFLAG_RD, &adapter->stats.rfc,
4414 "Fragmented Packets Received ");
4415 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4416 CTLFLAG_RD, &adapter->stats.roc,
4417 "Oversized Packets Received");
4418 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4419 CTLFLAG_RD, &adapter->stats.rjc,
4420 "Recevied Jabber");
4421 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4422 CTLFLAG_RD, &adapter->stats.rxerrc,
4423 "Receive Errors");
4424 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4425 CTLFLAG_RD, &adapter->stats.crcerrs,
4426 "CRC errors");
4427 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4428 CTLFLAG_RD, &adapter->stats.algnerrc,
4429 "Alignment Errors");
4430 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4431 CTLFLAG_RD, &adapter->stats.cexterr,
4432 "Collision/Carrier extension errors");
4433 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4434 CTLFLAG_RD, &adapter->stats.xonrxc,
4435 "XON Received");
4436 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4437 CTLFLAG_RD, &adapter->stats.xontxc,
4438 "XON Transmitted");
4439 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4440 CTLFLAG_RD, &adapter->stats.xoffrxc,
4441 "XOFF Received");
4442 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4443 CTLFLAG_RD, &adapter->stats.xofftxc,
4444 "XOFF Transmitted");
4445
4446 /* Packet Reception Stats */
4447 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4448 CTLFLAG_RD, &adapter->stats.tpr,
4449 "Total Packets Received ");
4450 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4451 CTLFLAG_RD, &adapter->stats.gprc,
4452 "Good Packets Received");
4453 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4454 CTLFLAG_RD, &adapter->stats.bprc,
4455 "Broadcast Packets Received");
4456 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4457 CTLFLAG_RD, &adapter->stats.mprc,
4458 "Multicast Packets Received");
4459 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4460 CTLFLAG_RD, &adapter->stats.prc64,
4461 "64 byte frames received ");
4462 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4463 CTLFLAG_RD, &adapter->stats.prc127,
4464 "65-127 byte frames received");
4465 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4466 CTLFLAG_RD, &adapter->stats.prc255,
4467 "128-255 byte frames received");
4468 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4469 CTLFLAG_RD, &adapter->stats.prc511,
4470 "256-511 byte frames received");
4471 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4472 CTLFLAG_RD, &adapter->stats.prc1023,
4473 "512-1023 byte frames received");
4474 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4475 CTLFLAG_RD, &adapter->stats.prc1522,
4476 "1023-1522 byte frames received");
4477 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4478 CTLFLAG_RD, &adapter->stats.gorc,
4479 "Good Octets Received");
4480
4481 /* Packet Transmission Stats */
4482 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4483 CTLFLAG_RD, &adapter->stats.gotc,
4484 "Good Octets Transmitted");
4485 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4486 CTLFLAG_RD, &adapter->stats.tpt,
4487 "Total Packets Transmitted");
4488 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4489 CTLFLAG_RD, &adapter->stats.gptc,
4490 "Good Packets Transmitted");
4491 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4492 CTLFLAG_RD, &adapter->stats.bptc,
4493 "Broadcast Packets Transmitted");
4494 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4495 CTLFLAG_RD, &adapter->stats.mptc,
4496 "Multicast Packets Transmitted");
4497 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4498 CTLFLAG_RD, &adapter->stats.ptc64,
4499 "64 byte frames transmitted ");
4500 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4501 CTLFLAG_RD, &adapter->stats.ptc127,
4502 "65-127 byte frames transmitted");
4503 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4504 CTLFLAG_RD, &adapter->stats.ptc255,
4505 "128-255 byte frames transmitted");
4506 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4507 CTLFLAG_RD, &adapter->stats.ptc511,
4508 "256-511 byte frames transmitted");
4509 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4510 CTLFLAG_RD, &adapter->stats.ptc1023,
4511 "512-1023 byte frames transmitted");
4512 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4513 CTLFLAG_RD, &adapter->stats.ptc1522,
4514 "1024-1522 byte frames transmitted");
4515 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4516 CTLFLAG_RD, &adapter->stats.tsctc,
4517 "TSO Contexts Transmitted");
4518 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4519 CTLFLAG_RD, &adapter->stats.tsctfc,
4520 "TSO Contexts Failed");
4521}
4522
4523/**********************************************************************
4524 *
4525 * This routine provides a way to dump out the adapter eeprom,
4526 * often a useful debug/service tool. This only dumps the first
4527 * 32 words, stuff that matters is in that extent.
4528 *
4529 **********************************************************************/
4530
4531static int
4532lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4533{
4534 struct adapter *adapter;
4535 int error;
4536 int result;
4537
4538 result = -1;
4539 error = sysctl_handle_int(oidp, &result, 0, req);
4540
4541 if (error || !req->newptr)
4542 return (error);
4543
4544 /*
4545 * This value will cause a hex dump of the
4546 * first 32 16-bit words of the EEPROM to
4547 * the screen.
4548 */
4549 if (result == 1) {
4550 adapter = (struct adapter *)arg1;
4551 lem_print_nvm_info(adapter);
4552 }
4553
4554 return (error);
4555}
4556
4557static void
4558lem_print_nvm_info(struct adapter *adapter)
4559{
4560 u16 eeprom_data;
4561 int i, j, row = 0;
4562
4563 /* Its a bit crude, but it gets the job done */
4564 printf("\nInterface EEPROM Dump:\n");
4565 printf("Offset\n0x0000 ");
4566 for (i = 0, j = 0; i < 32; i++, j++) {
4567 if (j == 8) { /* Make the offset block */
4568 j = 0; ++row;
4569 printf("\n0x00%x0 ",row);
4570 }
4571 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4572 printf("%04x ", eeprom_data);
4573 }
4574 printf("\n");
4575}
4576
4577static int
4578lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4579{
4580 struct em_int_delay_info *info;
4581 struct adapter *adapter;
4582 u32 regval;
4583 int error;
4584 int usecs;
4585 int ticks;
4586
4587 info = (struct em_int_delay_info *)arg1;
4588 usecs = info->value;
4589 error = sysctl_handle_int(oidp, &usecs, 0, req);
4590 if (error != 0 || req->newptr == NULL)
4591 return (error);
4592 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4593 return (EINVAL);
4594 info->value = usecs;
4595 ticks = EM_USECS_TO_TICKS(usecs);
4596
4597 adapter = info->adapter;
4598
4599 EM_CORE_LOCK(adapter);
4600 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4601 regval = (regval & ~0xffff) | (ticks & 0xffff);
4602 /* Handle a few special cases. */
4603 switch (info->offset) {
4604 case E1000_RDTR:
4605 break;
4606 case E1000_TIDV:
4607 if (ticks == 0) {
4608 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4609 /* Don't write 0 into the TIDV register. */
4610 regval++;
4611 } else
4612 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4613 break;
4614 }
4615 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4616 EM_CORE_UNLOCK(adapter);
4617 return (0);
4618}
4619
4620static void
4621lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4622 const char *description, struct em_int_delay_info *info,
4623 int offset, int value)
4624{
4625 info->adapter = adapter;
4626 info->offset = offset;
4627 info->value = value;
4628 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4629 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4630 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4631 info, 0, lem_sysctl_int_delay, "I", description);
4632}
4633
4634static void
4635lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4636 const char *description, int *limit, int value)
4637{
4638 *limit = value;
4639 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4640 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4641 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4642}
4643
4644static void
4645lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4646 const char *description, int *limit, int value)
4647{
4648 *limit = value;
4649 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4650 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4651 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4652}