Deleted Added
sdiff udiff text old ( 229939 ) new ( 231796 )
full compact
1/******************************************************************************
2
3 Copyright (c) 2001-2011, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/e1000/if_lem.c 229939 2012-01-10 19:57:23Z luigi $*/
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/endian.h>
45#include <sys/kernel.h>
46#include <sys/kthread.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/module.h>
50#include <sys/rman.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55#include <sys/eventhandler.h>
56#include <machine/bus.h>
57#include <machine/resource.h>
58
59#include <net/bpf.h>
60#include <net/ethernet.h>
61#include <net/if.h>
62#include <net/if_arp.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <netinet/in_systm.h>
70#include <netinet/in.h>
71#include <netinet/if_ether.h>
72#include <netinet/ip.h>
73#include <netinet/ip6.h>
74#include <netinet/tcp.h>
75#include <netinet/udp.h>
76
77#include <machine/in_cksum.h>
78#include <dev/led/led.h>
79#include <dev/pci/pcivar.h>
80#include <dev/pci/pcireg.h>
81
82#include "e1000_api.h"
83#include "if_lem.h"
84
85/*********************************************************************
86 * Legacy Em Driver version:
87 *********************************************************************/
88char lem_driver_version[] = "1.0.4";
89
90/*********************************************************************
91 * PCI Device ID Table
92 *
93 * Used by probe to select devices to load on
94 * Last field stores an index into e1000_strings
95 * Last entry must be all 0s
96 *
97 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
98 *********************************************************************/
99
100static em_vendor_info_t lem_vendor_info_array[] =
101{
102 /* Intel(R) PRO/1000 Network Connection */
103 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
105 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
108
109 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
116
117 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
118
119 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
121
122 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
126
127 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
132
133 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
142 PCI_ANY_ID, PCI_ANY_ID, 0},
143
144 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
147 /* required last entry */
148 { 0, 0, 0, 0, 0}
149};
150
151/*********************************************************************
152 * Table of branding strings for all supported NICs.
153 *********************************************************************/
154
155static char *lem_strings[] = {
156 "Intel(R) PRO/1000 Legacy Network Connection"
157};
158
159/*********************************************************************
160 * Function prototypes
161 *********************************************************************/
162static int lem_probe(device_t);
163static int lem_attach(device_t);
164static int lem_detach(device_t);
165static int lem_shutdown(device_t);
166static int lem_suspend(device_t);
167static int lem_resume(device_t);
168static void lem_start(struct ifnet *);
169static void lem_start_locked(struct ifnet *ifp);
170static int lem_ioctl(struct ifnet *, u_long, caddr_t);
171static void lem_init(void *);
172static void lem_init_locked(struct adapter *);
173static void lem_stop(void *);
174static void lem_media_status(struct ifnet *, struct ifmediareq *);
175static int lem_media_change(struct ifnet *);
176static void lem_identify_hardware(struct adapter *);
177static int lem_allocate_pci_resources(struct adapter *);
178static int lem_allocate_irq(struct adapter *adapter);
179static void lem_free_pci_resources(struct adapter *);
180static void lem_local_timer(void *);
181static int lem_hardware_init(struct adapter *);
182static int lem_setup_interface(device_t, struct adapter *);
183static void lem_setup_transmit_structures(struct adapter *);
184static void lem_initialize_transmit_unit(struct adapter *);
185static int lem_setup_receive_structures(struct adapter *);
186static void lem_initialize_receive_unit(struct adapter *);
187static void lem_enable_intr(struct adapter *);
188static void lem_disable_intr(struct adapter *);
189static void lem_free_transmit_structures(struct adapter *);
190static void lem_free_receive_structures(struct adapter *);
191static void lem_update_stats_counters(struct adapter *);
192static void lem_add_hw_stats(struct adapter *adapter);
193static void lem_txeof(struct adapter *);
194static void lem_tx_purge(struct adapter *);
195static int lem_allocate_receive_structures(struct adapter *);
196static int lem_allocate_transmit_structures(struct adapter *);
197static bool lem_rxeof(struct adapter *, int, int *);
198#ifndef __NO_STRICT_ALIGNMENT
199static int lem_fixup_rx(struct adapter *);
200#endif
201static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
202 struct mbuf *);
203static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
204 u32 *, u32 *);
205static void lem_set_promisc(struct adapter *);
206static void lem_disable_promisc(struct adapter *);
207static void lem_set_multi(struct adapter *);
208static void lem_update_link_status(struct adapter *);
209static int lem_get_buf(struct adapter *, int);
210static void lem_register_vlan(void *, struct ifnet *, u16);
211static void lem_unregister_vlan(void *, struct ifnet *, u16);
212static void lem_setup_vlan_hw_support(struct adapter *);
213static int lem_xmit(struct adapter *, struct mbuf **);
214static void lem_smartspeed(struct adapter *);
215static int lem_82547_fifo_workaround(struct adapter *, int);
216static void lem_82547_update_fifo_head(struct adapter *, int);
217static int lem_82547_tx_fifo_reset(struct adapter *);
218static void lem_82547_move_tail(void *);
219static int lem_dma_malloc(struct adapter *, bus_size_t,
220 struct em_dma_alloc *, int);
221static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
222static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
223static void lem_print_nvm_info(struct adapter *);
224static int lem_is_valid_ether_addr(u8 *);
225static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
226 PDESC_ARRAY desc_array);
227static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
228static void lem_add_int_delay_sysctl(struct adapter *, const char *,
229 const char *, struct em_int_delay_info *, int, int);
230static void lem_set_flow_cntrl(struct adapter *, const char *,
231 const char *, int *, int);
232/* Management and WOL Support */
233static void lem_init_manageability(struct adapter *);
234static void lem_release_manageability(struct adapter *);
235static void lem_get_hw_control(struct adapter *);
236static void lem_release_hw_control(struct adapter *);
237static void lem_get_wakeup(device_t);
238static void lem_enable_wakeup(device_t);
239static int lem_enable_phy_wakeup(struct adapter *);
240static void lem_led_func(void *, int);
241
242#ifdef EM_LEGACY_IRQ
243static void lem_intr(void *);
244#else /* FAST IRQ */
245static int lem_irq_fast(void *);
246static void lem_handle_rxtx(void *context, int pending);
247static void lem_handle_link(void *context, int pending);
248static void lem_add_rx_process_limit(struct adapter *, const char *,
249 const char *, int *, int);
250#endif /* ~EM_LEGACY_IRQ */
251
252#ifdef DEVICE_POLLING
253static poll_handler_t lem_poll;
254#endif /* POLLING */
255
256/*********************************************************************
257 * FreeBSD Device Interface Entry Points
258 *********************************************************************/
259
260static device_method_t lem_methods[] = {
261 /* Device interface */
262 DEVMETHOD(device_probe, lem_probe),
263 DEVMETHOD(device_attach, lem_attach),
264 DEVMETHOD(device_detach, lem_detach),
265 DEVMETHOD(device_shutdown, lem_shutdown),
266 DEVMETHOD(device_suspend, lem_suspend),
267 DEVMETHOD(device_resume, lem_resume),
268 {0, 0}
269};
270
271static driver_t lem_driver = {
272 "em", lem_methods, sizeof(struct adapter),
273};
274
275extern devclass_t em_devclass;
276DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
277MODULE_DEPEND(lem, pci, 1, 1, 1);
278MODULE_DEPEND(lem, ether, 1, 1, 1);
279
280/*********************************************************************
281 * Tunable default values.
282 *********************************************************************/
283
284#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
285#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
286
287static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
288static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
289static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
290static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
291static int lem_rxd = EM_DEFAULT_RXD;
292static int lem_txd = EM_DEFAULT_TXD;
293static int lem_smart_pwr_down = FALSE;
294
295/* Controls whether promiscuous also shows bad packets */
296static int lem_debug_sbp = FALSE;
297
298TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
299TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
300TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
301TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
302TUNABLE_INT("hw.em.rxd", &lem_rxd);
303TUNABLE_INT("hw.em.txd", &lem_txd);
304TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
305TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
306
307#ifndef EM_LEGACY_IRQ
308/* How many packets rxeof tries to clean at a time */
309static int lem_rx_process_limit = 100;
310TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
311#endif
312
313/* Flow control setting - default to FULL */
314static int lem_fc_setting = e1000_fc_full;
315TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
316
317/* Global used in WOL setup with multiport cards */
318static int global_quad_port_a = 0;
319
320#ifdef DEV_NETMAP /* see ixgbe.c for details */
321#include <dev/netmap/if_lem_netmap.h>
322#endif /* DEV_NETMAP */
323
324/*********************************************************************
325 * Device identification routine
326 *
327 * em_probe determines if the driver should be loaded on
328 * adapter based on PCI vendor/device id of the adapter.
329 *
330 * return BUS_PROBE_DEFAULT on success, positive on failure
331 *********************************************************************/
332
333static int
334lem_probe(device_t dev)
335{
336 char adapter_name[60];
337 u16 pci_vendor_id = 0;
338 u16 pci_device_id = 0;
339 u16 pci_subvendor_id = 0;
340 u16 pci_subdevice_id = 0;
341 em_vendor_info_t *ent;
342
343 INIT_DEBUGOUT("em_probe: begin");
344
345 pci_vendor_id = pci_get_vendor(dev);
346 if (pci_vendor_id != EM_VENDOR_ID)
347 return (ENXIO);
348
349 pci_device_id = pci_get_device(dev);
350 pci_subvendor_id = pci_get_subvendor(dev);
351 pci_subdevice_id = pci_get_subdevice(dev);
352
353 ent = lem_vendor_info_array;
354 while (ent->vendor_id != 0) {
355 if ((pci_vendor_id == ent->vendor_id) &&
356 (pci_device_id == ent->device_id) &&
357
358 ((pci_subvendor_id == ent->subvendor_id) ||
359 (ent->subvendor_id == PCI_ANY_ID)) &&
360
361 ((pci_subdevice_id == ent->subdevice_id) ||
362 (ent->subdevice_id == PCI_ANY_ID))) {
363 sprintf(adapter_name, "%s %s",
364 lem_strings[ent->index],
365 lem_driver_version);
366 device_set_desc_copy(dev, adapter_name);
367 return (BUS_PROBE_DEFAULT);
368 }
369 ent++;
370 }
371
372 return (ENXIO);
373}
374
375/*********************************************************************
376 * Device initialization routine
377 *
378 * The attach entry point is called when the driver is being loaded.
379 * This routine identifies the type of hardware, allocates all resources
380 * and initializes the hardware.
381 *
382 * return 0 on success, positive on failure
383 *********************************************************************/
384
385static int
386lem_attach(device_t dev)
387{
388 struct adapter *adapter;
389 int tsize, rsize;
390 int error = 0;
391
392 INIT_DEBUGOUT("lem_attach: begin");
393
394 if (resource_disabled("lem", device_get_unit(dev))) {
395 device_printf(dev, "Disabled by device hint\n");
396 return (ENXIO);
397 }
398
399 adapter = device_get_softc(dev);
400 adapter->dev = adapter->osdep.dev = dev;
401 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
402 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
403 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
404
405 /* SYSCTL stuff */
406 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
407 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
409 lem_sysctl_nvm_info, "I", "NVM Information");
410
411 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
412 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
413
414 /* Determine hardware and mac info */
415 lem_identify_hardware(adapter);
416
417 /* Setup PCI resources */
418 if (lem_allocate_pci_resources(adapter)) {
419 device_printf(dev, "Allocation of PCI resources failed\n");
420 error = ENXIO;
421 goto err_pci;
422 }
423
424 /* Do Shared Code initialization */
425 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
426 device_printf(dev, "Setup of Shared code failed\n");
427 error = ENXIO;
428 goto err_pci;
429 }
430
431 e1000_get_bus_info(&adapter->hw);
432
433 /* Set up some sysctls for the tunable interrupt delays */
434 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
435 "receive interrupt delay in usecs", &adapter->rx_int_delay,
436 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
437 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
438 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
439 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
440 if (adapter->hw.mac.type >= e1000_82540) {
441 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
442 "receive interrupt delay limit in usecs",
443 &adapter->rx_abs_int_delay,
444 E1000_REGISTER(&adapter->hw, E1000_RADV),
445 lem_rx_abs_int_delay_dflt);
446 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
447 "transmit interrupt delay limit in usecs",
448 &adapter->tx_abs_int_delay,
449 E1000_REGISTER(&adapter->hw, E1000_TADV),
450 lem_tx_abs_int_delay_dflt);
451 }
452
453#ifndef EM_LEGACY_IRQ
454 /* Sysctls for limiting the amount of work done in the taskqueue */
455 lem_add_rx_process_limit(adapter, "rx_processing_limit",
456 "max number of rx packets to process", &adapter->rx_process_limit,
457 lem_rx_process_limit);
458#endif
459
460 /* Sysctl for setting the interface flow control */
461 lem_set_flow_cntrl(adapter, "flow_control",
462 "flow control setting",
463 &adapter->fc_setting, lem_fc_setting);
464
465 /*
466 * Validate number of transmit and receive descriptors. It
467 * must not exceed hardware maximum, and must be multiple
468 * of E1000_DBA_ALIGN.
469 */
470 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
471 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
472 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
473 (lem_txd < EM_MIN_TXD)) {
474 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
475 EM_DEFAULT_TXD, lem_txd);
476 adapter->num_tx_desc = EM_DEFAULT_TXD;
477 } else
478 adapter->num_tx_desc = lem_txd;
479 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
480 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
481 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
482 (lem_rxd < EM_MIN_RXD)) {
483 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
484 EM_DEFAULT_RXD, lem_rxd);
485 adapter->num_rx_desc = EM_DEFAULT_RXD;
486 } else
487 adapter->num_rx_desc = lem_rxd;
488
489 adapter->hw.mac.autoneg = DO_AUTO_NEG;
490 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
491 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
492 adapter->rx_buffer_len = 2048;
493
494 e1000_init_script_state_82541(&adapter->hw, TRUE);
495 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
496
497 /* Copper options */
498 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
499 adapter->hw.phy.mdix = AUTO_ALL_MODES;
500 adapter->hw.phy.disable_polarity_correction = FALSE;
501 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
502 }
503
504 /*
505 * Set the frame limits assuming
506 * standard ethernet sized frames.
507 */
508 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
509 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
510
511 /*
512 * This controls when hardware reports transmit completion
513 * status.
514 */
515 adapter->hw.mac.report_tx_early = 1;
516
517 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
518 EM_DBA_ALIGN);
519
520 /* Allocate Transmit Descriptor ring */
521 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
522 device_printf(dev, "Unable to allocate tx_desc memory\n");
523 error = ENOMEM;
524 goto err_tx_desc;
525 }
526 adapter->tx_desc_base =
527 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
528
529 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
530 EM_DBA_ALIGN);
531
532 /* Allocate Receive Descriptor ring */
533 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
534 device_printf(dev, "Unable to allocate rx_desc memory\n");
535 error = ENOMEM;
536 goto err_rx_desc;
537 }
538 adapter->rx_desc_base =
539 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
540
541 /* Allocate multicast array memory. */
542 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
543 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
544 if (adapter->mta == NULL) {
545 device_printf(dev, "Can not allocate multicast setup array\n");
546 error = ENOMEM;
547 goto err_hw_init;
548 }
549
550 /*
551 ** Start from a known state, this is
552 ** important in reading the nvm and
553 ** mac from that.
554 */
555 e1000_reset_hw(&adapter->hw);
556
557 /* Make sure we have a good EEPROM before we read from it */
558 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
559 /*
560 ** Some PCI-E parts fail the first check due to
561 ** the link being in sleep state, call it again,
562 ** if it fails a second time its a real issue.
563 */
564 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
565 device_printf(dev,
566 "The EEPROM Checksum Is Not Valid\n");
567 error = EIO;
568 goto err_hw_init;
569 }
570 }
571
572 /* Copy the permanent MAC address out of the EEPROM */
573 if (e1000_read_mac_addr(&adapter->hw) < 0) {
574 device_printf(dev, "EEPROM read error while reading MAC"
575 " address\n");
576 error = EIO;
577 goto err_hw_init;
578 }
579
580 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
581 device_printf(dev, "Invalid MAC address\n");
582 error = EIO;
583 goto err_hw_init;
584 }
585
586 /* Initialize the hardware */
587 if (lem_hardware_init(adapter)) {
588 device_printf(dev, "Unable to initialize the hardware\n");
589 error = EIO;
590 goto err_hw_init;
591 }
592
593 /* Allocate transmit descriptors and buffers */
594 if (lem_allocate_transmit_structures(adapter)) {
595 device_printf(dev, "Could not setup transmit structures\n");
596 error = ENOMEM;
597 goto err_tx_struct;
598 }
599
600 /* Allocate receive descriptors and buffers */
601 if (lem_allocate_receive_structures(adapter)) {
602 device_printf(dev, "Could not setup receive structures\n");
603 error = ENOMEM;
604 goto err_rx_struct;
605 }
606
607 /*
608 ** Do interrupt configuration
609 */
610 error = lem_allocate_irq(adapter);
611 if (error)
612 goto err_rx_struct;
613
614 /*
615 * Get Wake-on-Lan and Management info for later use
616 */
617 lem_get_wakeup(dev);
618
619 /* Setup OS specific network interface */
620 if (lem_setup_interface(dev, adapter) != 0)
621 goto err_rx_struct;
622
623 /* Initialize statistics */
624 lem_update_stats_counters(adapter);
625
626 adapter->hw.mac.get_link_status = 1;
627 lem_update_link_status(adapter);
628
629 /* Indicate SOL/IDER usage */
630 if (e1000_check_reset_block(&adapter->hw))
631 device_printf(dev,
632 "PHY reset is blocked due to SOL/IDER session.\n");
633
634 /* Do we need workaround for 82544 PCI-X adapter? */
635 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
636 adapter->hw.mac.type == e1000_82544)
637 adapter->pcix_82544 = TRUE;
638 else
639 adapter->pcix_82544 = FALSE;
640
641 /* Register for VLAN events */
642 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
643 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
644 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
645 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
646
647 lem_add_hw_stats(adapter);
648
649 /* Non-AMT based hardware can now take control from firmware */
650 if (adapter->has_manage && !adapter->has_amt)
651 lem_get_hw_control(adapter);
652
653 /* Tell the stack that the interface is not active */
654 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
655
656 adapter->led_dev = led_create(lem_led_func, adapter,
657 device_get_nameunit(dev));
658
659#ifdef DEV_NETMAP
660 lem_netmap_attach(adapter);
661#endif /* DEV_NETMAP */
662 INIT_DEBUGOUT("lem_attach: end");
663
664 return (0);
665
666err_rx_struct:
667 lem_free_transmit_structures(adapter);
668err_tx_struct:
669err_hw_init:
670 lem_release_hw_control(adapter);
671 lem_dma_free(adapter, &adapter->rxdma);
672err_rx_desc:
673 lem_dma_free(adapter, &adapter->txdma);
674err_tx_desc:
675err_pci:
676 if (adapter->ifp != NULL)
677 if_free(adapter->ifp);
678 lem_free_pci_resources(adapter);
679 free(adapter->mta, M_DEVBUF);
680 EM_TX_LOCK_DESTROY(adapter);
681 EM_RX_LOCK_DESTROY(adapter);
682 EM_CORE_LOCK_DESTROY(adapter);
683
684 return (error);
685}
686
687/*********************************************************************
688 * Device removal routine
689 *
690 * The detach entry point is called when the driver is being removed.
691 * This routine stops the adapter and deallocates all the resources
692 * that were allocated for driver operation.
693 *
694 * return 0 on success, positive on failure
695 *********************************************************************/
696
697static int
698lem_detach(device_t dev)
699{
700 struct adapter *adapter = device_get_softc(dev);
701 struct ifnet *ifp = adapter->ifp;
702
703 INIT_DEBUGOUT("em_detach: begin");
704
705 /* Make sure VLANS are not using driver */
706 if (adapter->ifp->if_vlantrunk != NULL) {
707 device_printf(dev,"Vlan in use, detach first\n");
708 return (EBUSY);
709 }
710
711#ifdef DEVICE_POLLING
712 if (ifp->if_capenable & IFCAP_POLLING)
713 ether_poll_deregister(ifp);
714#endif
715
716 if (adapter->led_dev != NULL)
717 led_destroy(adapter->led_dev);
718
719 EM_CORE_LOCK(adapter);
720 EM_TX_LOCK(adapter);
721 adapter->in_detach = 1;
722 lem_stop(adapter);
723 e1000_phy_hw_reset(&adapter->hw);
724
725 lem_release_manageability(adapter);
726
727 EM_TX_UNLOCK(adapter);
728 EM_CORE_UNLOCK(adapter);
729
730 /* Unregister VLAN events */
731 if (adapter->vlan_attach != NULL)
732 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
733 if (adapter->vlan_detach != NULL)
734 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
735
736 ether_ifdetach(adapter->ifp);
737 callout_drain(&adapter->timer);
738 callout_drain(&adapter->tx_fifo_timer);
739
740#ifdef DEV_NETMAP
741 netmap_detach(ifp);
742#endif /* DEV_NETMAP */
743 lem_free_pci_resources(adapter);
744 bus_generic_detach(dev);
745 if_free(ifp);
746
747 lem_free_transmit_structures(adapter);
748 lem_free_receive_structures(adapter);
749
750 /* Free Transmit Descriptor ring */
751 if (adapter->tx_desc_base) {
752 lem_dma_free(adapter, &adapter->txdma);
753 adapter->tx_desc_base = NULL;
754 }
755
756 /* Free Receive Descriptor ring */
757 if (adapter->rx_desc_base) {
758 lem_dma_free(adapter, &adapter->rxdma);
759 adapter->rx_desc_base = NULL;
760 }
761
762 lem_release_hw_control(adapter);
763 free(adapter->mta, M_DEVBUF);
764 EM_TX_LOCK_DESTROY(adapter);
765 EM_RX_LOCK_DESTROY(adapter);
766 EM_CORE_LOCK_DESTROY(adapter);
767
768 return (0);
769}
770
771/*********************************************************************
772 *
773 * Shutdown entry point
774 *
775 **********************************************************************/
776
777static int
778lem_shutdown(device_t dev)
779{
780 return lem_suspend(dev);
781}
782
783/*
784 * Suspend/resume device methods.
785 */
786static int
787lem_suspend(device_t dev)
788{
789 struct adapter *adapter = device_get_softc(dev);
790
791 EM_CORE_LOCK(adapter);
792
793 lem_release_manageability(adapter);
794 lem_release_hw_control(adapter);
795 lem_enable_wakeup(dev);
796
797 EM_CORE_UNLOCK(adapter);
798
799 return bus_generic_suspend(dev);
800}
801
802static int
803lem_resume(device_t dev)
804{
805 struct adapter *adapter = device_get_softc(dev);
806 struct ifnet *ifp = adapter->ifp;
807
808 EM_CORE_LOCK(adapter);
809 lem_init_locked(adapter);
810 lem_init_manageability(adapter);
811 EM_CORE_UNLOCK(adapter);
812 lem_start(ifp);
813
814 return bus_generic_resume(dev);
815}
816
817
818static void
819lem_start_locked(struct ifnet *ifp)
820{
821 struct adapter *adapter = ifp->if_softc;
822 struct mbuf *m_head;
823
824 EM_TX_LOCK_ASSERT(adapter);
825
826 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
827 IFF_DRV_RUNNING)
828 return;
829 if (!adapter->link_active)
830 return;
831
832 /*
833 * Force a cleanup if number of TX descriptors
834 * available hits the threshold
835 */
836 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
837 lem_txeof(adapter);
838 /* Now do we at least have a minimal? */
839 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
840 adapter->no_tx_desc_avail1++;
841 return;
842 }
843 }
844
845 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
846
847 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
848 if (m_head == NULL)
849 break;
850 /*
851 * Encapsulation can modify our pointer, and or make it
852 * NULL on failure. In that event, we can't requeue.
853 */
854 if (lem_xmit(adapter, &m_head)) {
855 if (m_head == NULL)
856 break;
857 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
858 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
859 break;
860 }
861
862 /* Send a copy of the frame to the BPF listener */
863 ETHER_BPF_MTAP(ifp, m_head);
864
865 /* Set timeout in case hardware has problems transmitting. */
866 adapter->watchdog_check = TRUE;
867 adapter->watchdog_time = ticks;
868 }
869 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
870 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
871
872 return;
873}
874
875static void
876lem_start(struct ifnet *ifp)
877{
878 struct adapter *adapter = ifp->if_softc;
879
880 EM_TX_LOCK(adapter);
881 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
882 lem_start_locked(ifp);
883 EM_TX_UNLOCK(adapter);
884}
885
886/*********************************************************************
887 * Ioctl entry point
888 *
889 * em_ioctl is called when the user wants to configure the
890 * interface.
891 *
892 * return 0 on success, positive on failure
893 **********************************************************************/
894
895static int
896lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
897{
898 struct adapter *adapter = ifp->if_softc;
899 struct ifreq *ifr = (struct ifreq *)data;
900#if defined(INET) || defined(INET6)
901 struct ifaddr *ifa = (struct ifaddr *)data;
902#endif
903 bool avoid_reset = FALSE;
904 int error = 0;
905
906 if (adapter->in_detach)
907 return (error);
908
909 switch (command) {
910 case SIOCSIFADDR:
911#ifdef INET
912 if (ifa->ifa_addr->sa_family == AF_INET)
913 avoid_reset = TRUE;
914#endif
915#ifdef INET6
916 if (ifa->ifa_addr->sa_family == AF_INET6)
917 avoid_reset = TRUE;
918#endif
919 /*
920 ** Calling init results in link renegotiation,
921 ** so we avoid doing it when possible.
922 */
923 if (avoid_reset) {
924 ifp->if_flags |= IFF_UP;
925 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
926 lem_init(adapter);
927#ifdef INET
928 if (!(ifp->if_flags & IFF_NOARP))
929 arp_ifinit(ifp, ifa);
930#endif
931 } else
932 error = ether_ioctl(ifp, command, data);
933 break;
934 case SIOCSIFMTU:
935 {
936 int max_frame_size;
937
938 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
939
940 EM_CORE_LOCK(adapter);
941 switch (adapter->hw.mac.type) {
942 case e1000_82542:
943 max_frame_size = ETHER_MAX_LEN;
944 break;
945 default:
946 max_frame_size = MAX_JUMBO_FRAME_SIZE;
947 }
948 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
949 ETHER_CRC_LEN) {
950 EM_CORE_UNLOCK(adapter);
951 error = EINVAL;
952 break;
953 }
954
955 ifp->if_mtu = ifr->ifr_mtu;
956 adapter->max_frame_size =
957 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
958 lem_init_locked(adapter);
959 EM_CORE_UNLOCK(adapter);
960 break;
961 }
962 case SIOCSIFFLAGS:
963 IOCTL_DEBUGOUT("ioctl rcv'd:\
964 SIOCSIFFLAGS (Set Interface Flags)");
965 EM_CORE_LOCK(adapter);
966 if (ifp->if_flags & IFF_UP) {
967 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
968 if ((ifp->if_flags ^ adapter->if_flags) &
969 (IFF_PROMISC | IFF_ALLMULTI)) {
970 lem_disable_promisc(adapter);
971 lem_set_promisc(adapter);
972 }
973 } else
974 lem_init_locked(adapter);
975 } else
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977 EM_TX_LOCK(adapter);
978 lem_stop(adapter);
979 EM_TX_UNLOCK(adapter);
980 }
981 adapter->if_flags = ifp->if_flags;
982 EM_CORE_UNLOCK(adapter);
983 break;
984 case SIOCADDMULTI:
985 case SIOCDELMULTI:
986 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
987 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
988 EM_CORE_LOCK(adapter);
989 lem_disable_intr(adapter);
990 lem_set_multi(adapter);
991 if (adapter->hw.mac.type == e1000_82542 &&
992 adapter->hw.revision_id == E1000_REVISION_2) {
993 lem_initialize_receive_unit(adapter);
994 }
995#ifdef DEVICE_POLLING
996 if (!(ifp->if_capenable & IFCAP_POLLING))
997#endif
998 lem_enable_intr(adapter);
999 EM_CORE_UNLOCK(adapter);
1000 }
1001 break;
1002 case SIOCSIFMEDIA:
1003 /* Check SOL/IDER usage */
1004 EM_CORE_LOCK(adapter);
1005 if (e1000_check_reset_block(&adapter->hw)) {
1006 EM_CORE_UNLOCK(adapter);
1007 device_printf(adapter->dev, "Media change is"
1008 " blocked due to SOL/IDER session.\n");
1009 break;
1010 }
1011 EM_CORE_UNLOCK(adapter);
1012 case SIOCGIFMEDIA:
1013 IOCTL_DEBUGOUT("ioctl rcv'd: \
1014 SIOCxIFMEDIA (Get/Set Interface Media)");
1015 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1016 break;
1017 case SIOCSIFCAP:
1018 {
1019 int mask, reinit;
1020
1021 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1022 reinit = 0;
1023 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1024#ifdef DEVICE_POLLING
1025 if (mask & IFCAP_POLLING) {
1026 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1027 error = ether_poll_register(lem_poll, ifp);
1028 if (error)
1029 return (error);
1030 EM_CORE_LOCK(adapter);
1031 lem_disable_intr(adapter);
1032 ifp->if_capenable |= IFCAP_POLLING;
1033 EM_CORE_UNLOCK(adapter);
1034 } else {
1035 error = ether_poll_deregister(ifp);
1036 /* Enable interrupt even in error case */
1037 EM_CORE_LOCK(adapter);
1038 lem_enable_intr(adapter);
1039 ifp->if_capenable &= ~IFCAP_POLLING;
1040 EM_CORE_UNLOCK(adapter);
1041 }
1042 }
1043#endif
1044 if (mask & IFCAP_HWCSUM) {
1045 ifp->if_capenable ^= IFCAP_HWCSUM;
1046 reinit = 1;
1047 }
1048 if (mask & IFCAP_VLAN_HWTAGGING) {
1049 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1050 reinit = 1;
1051 }
1052 if ((mask & IFCAP_WOL) &&
1053 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1054 if (mask & IFCAP_WOL_MCAST)
1055 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1056 if (mask & IFCAP_WOL_MAGIC)
1057 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1058 }
1059 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1060 lem_init(adapter);
1061 VLAN_CAPABILITIES(ifp);
1062 break;
1063 }
1064
1065 default:
1066 error = ether_ioctl(ifp, command, data);
1067 break;
1068 }
1069
1070 return (error);
1071}
1072
1073
1074/*********************************************************************
1075 * Init entry point
1076 *
1077 * This routine is used in two ways. It is used by the stack as
1078 * init entry point in network interface structure. It is also used
1079 * by the driver as a hw/sw initialization routine to get to a
1080 * consistent state.
1081 *
1082 * return 0 on success, positive on failure
1083 **********************************************************************/
1084
1085static void
1086lem_init_locked(struct adapter *adapter)
1087{
1088 struct ifnet *ifp = adapter->ifp;
1089 device_t dev = adapter->dev;
1090 u32 pba;
1091
1092 INIT_DEBUGOUT("lem_init: begin");
1093
1094 EM_CORE_LOCK_ASSERT(adapter);
1095
1096 EM_TX_LOCK(adapter);
1097 lem_stop(adapter);
1098 EM_TX_UNLOCK(adapter);
1099
1100 /*
1101 * Packet Buffer Allocation (PBA)
1102 * Writing PBA sets the receive portion of the buffer
1103 * the remainder is used for the transmit buffer.
1104 *
1105 * Devices before the 82547 had a Packet Buffer of 64K.
1106 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1107 * After the 82547 the buffer was reduced to 40K.
1108 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1109 * Note: default does not leave enough room for Jumbo Frame >10k.
1110 */
1111 switch (adapter->hw.mac.type) {
1112 case e1000_82547:
1113 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1114 if (adapter->max_frame_size > 8192)
1115 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1116 else
1117 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1118 adapter->tx_fifo_head = 0;
1119 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1120 adapter->tx_fifo_size =
1121 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1122 break;
1123 default:
1124 /* Devices before 82547 had a Packet Buffer of 64K. */
1125 if (adapter->max_frame_size > 8192)
1126 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1127 else
1128 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1129 }
1130
1131 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1132 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1133
1134 /* Get the latest mac address, User can use a LAA */
1135 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1136 ETHER_ADDR_LEN);
1137
1138 /* Put the address into the Receive Address Array */
1139 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1140
1141 /* Initialize the hardware */
1142 if (lem_hardware_init(adapter)) {
1143 device_printf(dev, "Unable to initialize the hardware\n");
1144 return;
1145 }
1146 lem_update_link_status(adapter);
1147
1148 /* Setup VLAN support, basic and offload if available */
1149 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1150
1151 /* Set hardware offload abilities */
1152 ifp->if_hwassist = 0;
1153 if (adapter->hw.mac.type >= e1000_82543) {
1154 if (ifp->if_capenable & IFCAP_TXCSUM)
1155 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1156 }
1157
1158 /* Configure for OS presence */
1159 lem_init_manageability(adapter);
1160
1161 /* Prepare transmit descriptors and buffers */
1162 lem_setup_transmit_structures(adapter);
1163 lem_initialize_transmit_unit(adapter);
1164
1165 /* Setup Multicast table */
1166 lem_set_multi(adapter);
1167
1168 /* Prepare receive descriptors and buffers */
1169 if (lem_setup_receive_structures(adapter)) {
1170 device_printf(dev, "Could not setup receive structures\n");
1171 EM_TX_LOCK(adapter);
1172 lem_stop(adapter);
1173 EM_TX_UNLOCK(adapter);
1174 return;
1175 }
1176 lem_initialize_receive_unit(adapter);
1177
1178 /* Use real VLAN Filter support? */
1179 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1180 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1181 /* Use real VLAN Filter support */
1182 lem_setup_vlan_hw_support(adapter);
1183 else {
1184 u32 ctrl;
1185 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1186 ctrl |= E1000_CTRL_VME;
1187 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1188 }
1189 }
1190
1191 /* Don't lose promiscuous settings */
1192 lem_set_promisc(adapter);
1193
1194 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1195 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1196
1197 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1198 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1199
1200 /* MSI/X configuration for 82574 */
1201 if (adapter->hw.mac.type == e1000_82574) {
1202 int tmp;
1203 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1204 tmp |= E1000_CTRL_EXT_PBA_CLR;
1205 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1206 /*
1207 ** Set the IVAR - interrupt vector routing.
1208 ** Each nibble represents a vector, high bit
1209 ** is enable, other 3 bits are the MSIX table
1210 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1211 ** Link (other) to 2, hence the magic number.
1212 */
1213 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1214 }
1215
1216#ifdef DEVICE_POLLING
1217 /*
1218 * Only enable interrupts if we are not polling, make sure
1219 * they are off otherwise.
1220 */
1221 if (ifp->if_capenable & IFCAP_POLLING)
1222 lem_disable_intr(adapter);
1223 else
1224#endif /* DEVICE_POLLING */
1225 lem_enable_intr(adapter);
1226
1227 /* AMT based hardware can now take control from firmware */
1228 if (adapter->has_manage && adapter->has_amt)
1229 lem_get_hw_control(adapter);
1230}
1231
1232static void
1233lem_init(void *arg)
1234{
1235 struct adapter *adapter = arg;
1236
1237 EM_CORE_LOCK(adapter);
1238 lem_init_locked(adapter);
1239 EM_CORE_UNLOCK(adapter);
1240}
1241
1242
1243#ifdef DEVICE_POLLING
1244/*********************************************************************
1245 *
1246 * Legacy polling routine
1247 *
1248 *********************************************************************/
1249static int
1250lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1251{
1252 struct adapter *adapter = ifp->if_softc;
1253 u32 reg_icr, rx_done = 0;
1254
1255 EM_CORE_LOCK(adapter);
1256 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1257 EM_CORE_UNLOCK(adapter);
1258 return (rx_done);
1259 }
1260
1261 if (cmd == POLL_AND_CHECK_STATUS) {
1262 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1263 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1264 callout_stop(&adapter->timer);
1265 adapter->hw.mac.get_link_status = 1;
1266 lem_update_link_status(adapter);
1267 callout_reset(&adapter->timer, hz,
1268 lem_local_timer, adapter);
1269 }
1270 }
1271 EM_CORE_UNLOCK(adapter);
1272
1273 lem_rxeof(adapter, count, &rx_done);
1274
1275 EM_TX_LOCK(adapter);
1276 lem_txeof(adapter);
1277 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1278 lem_start_locked(ifp);
1279 EM_TX_UNLOCK(adapter);
1280 return (rx_done);
1281}
1282#endif /* DEVICE_POLLING */
1283
1284#ifdef EM_LEGACY_IRQ
1285/*********************************************************************
1286 *
1287 * Legacy Interrupt Service routine
1288 *
1289 *********************************************************************/
1290static void
1291lem_intr(void *arg)
1292{
1293 struct adapter *adapter = arg;
1294 struct ifnet *ifp = adapter->ifp;
1295 u32 reg_icr;
1296
1297
1298 if (ifp->if_capenable & IFCAP_POLLING)
1299 return;
1300
1301 EM_CORE_LOCK(adapter);
1302 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1303 if (reg_icr & E1000_ICR_RXO)
1304 adapter->rx_overruns++;
1305
1306 if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1307 goto out;
1308
1309 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1310 goto out;
1311
1312 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1313 callout_stop(&adapter->timer);
1314 adapter->hw.mac.get_link_status = 1;
1315 lem_update_link_status(adapter);
1316 /* Deal with TX cruft when link lost */
1317 lem_tx_purge(adapter);
1318 callout_reset(&adapter->timer, hz,
1319 lem_local_timer, adapter);
1320 goto out;
1321 }
1322
1323 EM_TX_LOCK(adapter);
1324 lem_rxeof(adapter, -1, NULL);
1325 lem_txeof(adapter);
1326 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1327 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1328 lem_start_locked(ifp);
1329 EM_TX_UNLOCK(adapter);
1330
1331out:
1332 EM_CORE_UNLOCK(adapter);
1333 return;
1334}
1335
1336#else /* EM_FAST_IRQ, then fast interrupt routines only */
1337
1338static void
1339lem_handle_link(void *context, int pending)
1340{
1341 struct adapter *adapter = context;
1342 struct ifnet *ifp = adapter->ifp;
1343
1344 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1345 return;
1346
1347 EM_CORE_LOCK(adapter);
1348 callout_stop(&adapter->timer);
1349 lem_update_link_status(adapter);
1350 /* Deal with TX cruft when link lost */
1351 lem_tx_purge(adapter);
1352 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1353 EM_CORE_UNLOCK(adapter);
1354}
1355
1356
1357/* Combined RX/TX handler, used by Legacy and MSI */
1358static void
1359lem_handle_rxtx(void *context, int pending)
1360{
1361 struct adapter *adapter = context;
1362 struct ifnet *ifp = adapter->ifp;
1363
1364
1365 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1366 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1367 EM_TX_LOCK(adapter);
1368 lem_txeof(adapter);
1369 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1370 lem_start_locked(ifp);
1371 EM_TX_UNLOCK(adapter);
1372 }
1373
1374 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1375 lem_enable_intr(adapter);
1376}
1377
1378/*********************************************************************
1379 *
1380 * Fast Legacy/MSI Combined Interrupt Service routine
1381 *
1382 *********************************************************************/
1383static int
1384lem_irq_fast(void *arg)
1385{
1386 struct adapter *adapter = arg;
1387 struct ifnet *ifp;
1388 u32 reg_icr;
1389
1390 ifp = adapter->ifp;
1391
1392 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1393
1394 /* Hot eject? */
1395 if (reg_icr == 0xffffffff)
1396 return FILTER_STRAY;
1397
1398 /* Definitely not our interrupt. */
1399 if (reg_icr == 0x0)
1400 return FILTER_STRAY;
1401
1402 /*
1403 * Mask interrupts until the taskqueue is finished running. This is
1404 * cheap, just assume that it is needed. This also works around the
1405 * MSI message reordering errata on certain systems.
1406 */
1407 lem_disable_intr(adapter);
1408 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1409
1410 /* Link status change */
1411 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1412 adapter->hw.mac.get_link_status = 1;
1413 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1414 }
1415
1416 if (reg_icr & E1000_ICR_RXO)
1417 adapter->rx_overruns++;
1418 return FILTER_HANDLED;
1419}
1420#endif /* ~EM_LEGACY_IRQ */
1421
1422
1423/*********************************************************************
1424 *
1425 * Media Ioctl callback
1426 *
1427 * This routine is called whenever the user queries the status of
1428 * the interface using ifconfig.
1429 *
1430 **********************************************************************/
1431static void
1432lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1433{
1434 struct adapter *adapter = ifp->if_softc;
1435 u_char fiber_type = IFM_1000_SX;
1436
1437 INIT_DEBUGOUT("lem_media_status: begin");
1438
1439 EM_CORE_LOCK(adapter);
1440 lem_update_link_status(adapter);
1441
1442 ifmr->ifm_status = IFM_AVALID;
1443 ifmr->ifm_active = IFM_ETHER;
1444
1445 if (!adapter->link_active) {
1446 EM_CORE_UNLOCK(adapter);
1447 return;
1448 }
1449
1450 ifmr->ifm_status |= IFM_ACTIVE;
1451
1452 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1453 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1454 if (adapter->hw.mac.type == e1000_82545)
1455 fiber_type = IFM_1000_LX;
1456 ifmr->ifm_active |= fiber_type | IFM_FDX;
1457 } else {
1458 switch (adapter->link_speed) {
1459 case 10:
1460 ifmr->ifm_active |= IFM_10_T;
1461 break;
1462 case 100:
1463 ifmr->ifm_active |= IFM_100_TX;
1464 break;
1465 case 1000:
1466 ifmr->ifm_active |= IFM_1000_T;
1467 break;
1468 }
1469 if (adapter->link_duplex == FULL_DUPLEX)
1470 ifmr->ifm_active |= IFM_FDX;
1471 else
1472 ifmr->ifm_active |= IFM_HDX;
1473 }
1474 EM_CORE_UNLOCK(adapter);
1475}
1476
1477/*********************************************************************
1478 *
1479 * Media Ioctl callback
1480 *
1481 * This routine is called when the user changes speed/duplex using
1482 * media/mediopt option with ifconfig.
1483 *
1484 **********************************************************************/
1485static int
1486lem_media_change(struct ifnet *ifp)
1487{
1488 struct adapter *adapter = ifp->if_softc;
1489 struct ifmedia *ifm = &adapter->media;
1490
1491 INIT_DEBUGOUT("lem_media_change: begin");
1492
1493 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1494 return (EINVAL);
1495
1496 EM_CORE_LOCK(adapter);
1497 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1498 case IFM_AUTO:
1499 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1500 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1501 break;
1502 case IFM_1000_LX:
1503 case IFM_1000_SX:
1504 case IFM_1000_T:
1505 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1506 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1507 break;
1508 case IFM_100_TX:
1509 adapter->hw.mac.autoneg = FALSE;
1510 adapter->hw.phy.autoneg_advertised = 0;
1511 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1512 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1513 else
1514 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1515 break;
1516 case IFM_10_T:
1517 adapter->hw.mac.autoneg = FALSE;
1518 adapter->hw.phy.autoneg_advertised = 0;
1519 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1520 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1521 else
1522 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1523 break;
1524 default:
1525 device_printf(adapter->dev, "Unsupported media type\n");
1526 }
1527
1528 lem_init_locked(adapter);
1529 EM_CORE_UNLOCK(adapter);
1530
1531 return (0);
1532}
1533
1534/*********************************************************************
1535 *
1536 * This routine maps the mbufs to tx descriptors.
1537 *
1538 * return 0 on success, positive on failure
1539 **********************************************************************/
1540
1541static int
1542lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1543{
1544 bus_dma_segment_t segs[EM_MAX_SCATTER];
1545 bus_dmamap_t map;
1546 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1547 struct e1000_tx_desc *ctxd = NULL;
1548 struct mbuf *m_head;
1549 u32 txd_upper, txd_lower, txd_used, txd_saved;
1550 int error, nsegs, i, j, first, last = 0;
1551
1552 m_head = *m_headp;
1553 txd_upper = txd_lower = txd_used = txd_saved = 0;
1554
1555 /*
1556 ** When doing checksum offload, it is critical to
1557 ** make sure the first mbuf has more than header,
1558 ** because that routine expects data to be present.
1559 */
1560 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1561 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1562 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1563 *m_headp = m_head;
1564 if (m_head == NULL)
1565 return (ENOBUFS);
1566 }
1567
1568 /*
1569 * Map the packet for DMA
1570 *
1571 * Capture the first descriptor index,
1572 * this descriptor will have the index
1573 * of the EOP which is the only one that
1574 * now gets a DONE bit writeback.
1575 */
1576 first = adapter->next_avail_tx_desc;
1577 tx_buffer = &adapter->tx_buffer_area[first];
1578 tx_buffer_mapped = tx_buffer;
1579 map = tx_buffer->map;
1580
1581 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1582 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1583
1584 /*
1585 * There are two types of errors we can (try) to handle:
1586 * - EFBIG means the mbuf chain was too long and bus_dma ran
1587 * out of segments. Defragment the mbuf chain and try again.
1588 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1589 * at this point in time. Defer sending and try again later.
1590 * All other errors, in particular EINVAL, are fatal and prevent the
1591 * mbuf chain from ever going through. Drop it and report error.
1592 */
1593 if (error == EFBIG) {
1594 struct mbuf *m;
1595
1596 m = m_defrag(*m_headp, M_DONTWAIT);
1597 if (m == NULL) {
1598 adapter->mbuf_alloc_failed++;
1599 m_freem(*m_headp);
1600 *m_headp = NULL;
1601 return (ENOBUFS);
1602 }
1603 *m_headp = m;
1604
1605 /* Try it again */
1606 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1607 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1608
1609 if (error) {
1610 adapter->no_tx_dma_setup++;
1611 m_freem(*m_headp);
1612 *m_headp = NULL;
1613 return (error);
1614 }
1615 } else if (error != 0) {
1616 adapter->no_tx_dma_setup++;
1617 return (error);
1618 }
1619
1620 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1621 adapter->no_tx_desc_avail2++;
1622 bus_dmamap_unload(adapter->txtag, map);
1623 return (ENOBUFS);
1624 }
1625 m_head = *m_headp;
1626
1627 /* Do hardware assists */
1628 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1629 lem_transmit_checksum_setup(adapter, m_head,
1630 &txd_upper, &txd_lower);
1631
1632 i = adapter->next_avail_tx_desc;
1633 if (adapter->pcix_82544)
1634 txd_saved = i;
1635
1636 /* Set up our transmit descriptors */
1637 for (j = 0; j < nsegs; j++) {
1638 bus_size_t seg_len;
1639 bus_addr_t seg_addr;
1640 /* If adapter is 82544 and on PCIX bus */
1641 if(adapter->pcix_82544) {
1642 DESC_ARRAY desc_array;
1643 u32 array_elements, counter;
1644 /*
1645 * Check the Address and Length combination and
1646 * split the data accordingly
1647 */
1648 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1649 segs[j].ds_len, &desc_array);
1650 for (counter = 0; counter < array_elements; counter++) {
1651 if (txd_used == adapter->num_tx_desc_avail) {
1652 adapter->next_avail_tx_desc = txd_saved;
1653 adapter->no_tx_desc_avail2++;
1654 bus_dmamap_unload(adapter->txtag, map);
1655 return (ENOBUFS);
1656 }
1657 tx_buffer = &adapter->tx_buffer_area[i];
1658 ctxd = &adapter->tx_desc_base[i];
1659 ctxd->buffer_addr = htole64(
1660 desc_array.descriptor[counter].address);
1661 ctxd->lower.data = htole32(
1662 (adapter->txd_cmd | txd_lower | (u16)
1663 desc_array.descriptor[counter].length));
1664 ctxd->upper.data =
1665 htole32((txd_upper));
1666 last = i;
1667 if (++i == adapter->num_tx_desc)
1668 i = 0;
1669 tx_buffer->m_head = NULL;
1670 tx_buffer->next_eop = -1;
1671 txd_used++;
1672 }
1673 } else {
1674 tx_buffer = &adapter->tx_buffer_area[i];
1675 ctxd = &adapter->tx_desc_base[i];
1676 seg_addr = segs[j].ds_addr;
1677 seg_len = segs[j].ds_len;
1678 ctxd->buffer_addr = htole64(seg_addr);
1679 ctxd->lower.data = htole32(
1680 adapter->txd_cmd | txd_lower | seg_len);
1681 ctxd->upper.data =
1682 htole32(txd_upper);
1683 last = i;
1684 if (++i == adapter->num_tx_desc)
1685 i = 0;
1686 tx_buffer->m_head = NULL;
1687 tx_buffer->next_eop = -1;
1688 }
1689 }
1690
1691 adapter->next_avail_tx_desc = i;
1692
1693 if (adapter->pcix_82544)
1694 adapter->num_tx_desc_avail -= txd_used;
1695 else
1696 adapter->num_tx_desc_avail -= nsegs;
1697
1698 if (m_head->m_flags & M_VLANTAG) {
1699 /* Set the vlan id. */
1700 ctxd->upper.fields.special =
1701 htole16(m_head->m_pkthdr.ether_vtag);
1702 /* Tell hardware to add tag */
1703 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1704 }
1705
1706 tx_buffer->m_head = m_head;
1707 tx_buffer_mapped->map = tx_buffer->map;
1708 tx_buffer->map = map;
1709 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1710
1711 /*
1712 * Last Descriptor of Packet
1713 * needs End Of Packet (EOP)
1714 * and Report Status (RS)
1715 */
1716 ctxd->lower.data |=
1717 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1718 /*
1719 * Keep track in the first buffer which
1720 * descriptor will be written back
1721 */
1722 tx_buffer = &adapter->tx_buffer_area[first];
1723 tx_buffer->next_eop = last;
1724 adapter->watchdog_time = ticks;
1725
1726 /*
1727 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1728 * that this frame is available to transmit.
1729 */
1730 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1731 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1732 if (adapter->hw.mac.type == e1000_82547 &&
1733 adapter->link_duplex == HALF_DUPLEX)
1734 lem_82547_move_tail(adapter);
1735 else {
1736 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1737 if (adapter->hw.mac.type == e1000_82547)
1738 lem_82547_update_fifo_head(adapter,
1739 m_head->m_pkthdr.len);
1740 }
1741
1742 return (0);
1743}
1744
1745/*********************************************************************
1746 *
1747 * 82547 workaround to avoid controller hang in half-duplex environment.
1748 * The workaround is to avoid queuing a large packet that would span
1749 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1750 * in this case. We do that only when FIFO is quiescent.
1751 *
1752 **********************************************************************/
1753static void
1754lem_82547_move_tail(void *arg)
1755{
1756 struct adapter *adapter = arg;
1757 struct e1000_tx_desc *tx_desc;
1758 u16 hw_tdt, sw_tdt, length = 0;
1759 bool eop = 0;
1760
1761 EM_TX_LOCK_ASSERT(adapter);
1762
1763 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1764 sw_tdt = adapter->next_avail_tx_desc;
1765
1766 while (hw_tdt != sw_tdt) {
1767 tx_desc = &adapter->tx_desc_base[hw_tdt];
1768 length += tx_desc->lower.flags.length;
1769 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1770 if (++hw_tdt == adapter->num_tx_desc)
1771 hw_tdt = 0;
1772
1773 if (eop) {
1774 if (lem_82547_fifo_workaround(adapter, length)) {
1775 adapter->tx_fifo_wrk_cnt++;
1776 callout_reset(&adapter->tx_fifo_timer, 1,
1777 lem_82547_move_tail, adapter);
1778 break;
1779 }
1780 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1781 lem_82547_update_fifo_head(adapter, length);
1782 length = 0;
1783 }
1784 }
1785}
1786
1787static int
1788lem_82547_fifo_workaround(struct adapter *adapter, int len)
1789{
1790 int fifo_space, fifo_pkt_len;
1791
1792 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1793
1794 if (adapter->link_duplex == HALF_DUPLEX) {
1795 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1796
1797 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1798 if (lem_82547_tx_fifo_reset(adapter))
1799 return (0);
1800 else
1801 return (1);
1802 }
1803 }
1804
1805 return (0);
1806}
1807
1808static void
1809lem_82547_update_fifo_head(struct adapter *adapter, int len)
1810{
1811 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1812
1813 /* tx_fifo_head is always 16 byte aligned */
1814 adapter->tx_fifo_head += fifo_pkt_len;
1815 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1816 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1817 }
1818}
1819
1820
1821static int
1822lem_82547_tx_fifo_reset(struct adapter *adapter)
1823{
1824 u32 tctl;
1825
1826 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1827 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1828 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1829 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1830 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1831 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1832 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1833 /* Disable TX unit */
1834 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1835 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1836 tctl & ~E1000_TCTL_EN);
1837
1838 /* Reset FIFO pointers */
1839 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1840 adapter->tx_head_addr);
1841 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1842 adapter->tx_head_addr);
1843 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1844 adapter->tx_head_addr);
1845 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1846 adapter->tx_head_addr);
1847
1848 /* Re-enable TX unit */
1849 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1850 E1000_WRITE_FLUSH(&adapter->hw);
1851
1852 adapter->tx_fifo_head = 0;
1853 adapter->tx_fifo_reset_cnt++;
1854
1855 return (TRUE);
1856 }
1857 else {
1858 return (FALSE);
1859 }
1860}
1861
1862static void
1863lem_set_promisc(struct adapter *adapter)
1864{
1865 struct ifnet *ifp = adapter->ifp;
1866 u32 reg_rctl;
1867
1868 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1869
1870 if (ifp->if_flags & IFF_PROMISC) {
1871 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1872 /* Turn this on if you want to see bad packets */
1873 if (lem_debug_sbp)
1874 reg_rctl |= E1000_RCTL_SBP;
1875 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1876 } else if (ifp->if_flags & IFF_ALLMULTI) {
1877 reg_rctl |= E1000_RCTL_MPE;
1878 reg_rctl &= ~E1000_RCTL_UPE;
1879 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1880 }
1881}
1882
1883static void
1884lem_disable_promisc(struct adapter *adapter)
1885{
1886 u32 reg_rctl;
1887
1888 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1889
1890 reg_rctl &= (~E1000_RCTL_UPE);
1891 reg_rctl &= (~E1000_RCTL_MPE);
1892 reg_rctl &= (~E1000_RCTL_SBP);
1893 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1894}
1895
1896
1897/*********************************************************************
1898 * Multicast Update
1899 *
1900 * This routine is called whenever multicast address list is updated.
1901 *
1902 **********************************************************************/
1903
1904static void
1905lem_set_multi(struct adapter *adapter)
1906{
1907 struct ifnet *ifp = adapter->ifp;
1908 struct ifmultiaddr *ifma;
1909 u32 reg_rctl = 0;
1910 u8 *mta; /* Multicast array memory */
1911 int mcnt = 0;
1912
1913 IOCTL_DEBUGOUT("lem_set_multi: begin");
1914
1915 mta = adapter->mta;
1916 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1917
1918 if (adapter->hw.mac.type == e1000_82542 &&
1919 adapter->hw.revision_id == E1000_REVISION_2) {
1920 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1921 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1922 e1000_pci_clear_mwi(&adapter->hw);
1923 reg_rctl |= E1000_RCTL_RST;
1924 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1925 msec_delay(5);
1926 }
1927
1928#if __FreeBSD_version < 800000
1929 IF_ADDR_LOCK(ifp);
1930#else
1931 if_maddr_rlock(ifp);
1932#endif
1933 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1934 if (ifma->ifma_addr->sa_family != AF_LINK)
1935 continue;
1936
1937 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1938 break;
1939
1940 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1941 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1942 mcnt++;
1943 }
1944#if __FreeBSD_version < 800000
1945 IF_ADDR_UNLOCK(ifp);
1946#else
1947 if_maddr_runlock(ifp);
1948#endif
1949 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1950 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1951 reg_rctl |= E1000_RCTL_MPE;
1952 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1953 } else
1954 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1955
1956 if (adapter->hw.mac.type == e1000_82542 &&
1957 adapter->hw.revision_id == E1000_REVISION_2) {
1958 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1959 reg_rctl &= ~E1000_RCTL_RST;
1960 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1961 msec_delay(5);
1962 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1963 e1000_pci_set_mwi(&adapter->hw);
1964 }
1965}
1966
1967
1968/*********************************************************************
1969 * Timer routine
1970 *
1971 * This routine checks for link status and updates statistics.
1972 *
1973 **********************************************************************/
1974
1975static void
1976lem_local_timer(void *arg)
1977{
1978 struct adapter *adapter = arg;
1979
1980 EM_CORE_LOCK_ASSERT(adapter);
1981
1982 lem_update_link_status(adapter);
1983 lem_update_stats_counters(adapter);
1984
1985 lem_smartspeed(adapter);
1986
1987 /*
1988 * We check the watchdog: the time since
1989 * the last TX descriptor was cleaned.
1990 * This implies a functional TX engine.
1991 */
1992 if ((adapter->watchdog_check == TRUE) &&
1993 (ticks - adapter->watchdog_time > EM_WATCHDOG))
1994 goto hung;
1995
1996 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1997 return;
1998hung:
1999 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2000 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2001 adapter->watchdog_events++;
2002 lem_init_locked(adapter);
2003}
2004
2005static void
2006lem_update_link_status(struct adapter *adapter)
2007{
2008 struct e1000_hw *hw = &adapter->hw;
2009 struct ifnet *ifp = adapter->ifp;
2010 device_t dev = adapter->dev;
2011 u32 link_check = 0;
2012
2013 /* Get the cached link value or read phy for real */
2014 switch (hw->phy.media_type) {
2015 case e1000_media_type_copper:
2016 if (hw->mac.get_link_status) {
2017 /* Do the work to read phy */
2018 e1000_check_for_link(hw);
2019 link_check = !hw->mac.get_link_status;
2020 if (link_check) /* ESB2 fix */
2021 e1000_cfg_on_link_up(hw);
2022 } else
2023 link_check = TRUE;
2024 break;
2025 case e1000_media_type_fiber:
2026 e1000_check_for_link(hw);
2027 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2028 E1000_STATUS_LU);
2029 break;
2030 case e1000_media_type_internal_serdes:
2031 e1000_check_for_link(hw);
2032 link_check = adapter->hw.mac.serdes_has_link;
2033 break;
2034 default:
2035 case e1000_media_type_unknown:
2036 break;
2037 }
2038
2039 /* Now check for a transition */
2040 if (link_check && (adapter->link_active == 0)) {
2041 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2042 &adapter->link_duplex);
2043 if (bootverbose)
2044 device_printf(dev, "Link is up %d Mbps %s\n",
2045 adapter->link_speed,
2046 ((adapter->link_duplex == FULL_DUPLEX) ?
2047 "Full Duplex" : "Half Duplex"));
2048 adapter->link_active = 1;
2049 adapter->smartspeed = 0;
2050 ifp->if_baudrate = adapter->link_speed * 1000000;
2051 if_link_state_change(ifp, LINK_STATE_UP);
2052 } else if (!link_check && (adapter->link_active == 1)) {
2053 ifp->if_baudrate = adapter->link_speed = 0;
2054 adapter->link_duplex = 0;
2055 if (bootverbose)
2056 device_printf(dev, "Link is Down\n");
2057 adapter->link_active = 0;
2058 /* Link down, disable watchdog */
2059 adapter->watchdog_check = FALSE;
2060 if_link_state_change(ifp, LINK_STATE_DOWN);
2061 }
2062}
2063
2064/*********************************************************************
2065 *
2066 * This routine disables all traffic on the adapter by issuing a
2067 * global reset on the MAC and deallocates TX/RX buffers.
2068 *
2069 * This routine should always be called with BOTH the CORE
2070 * and TX locks.
2071 **********************************************************************/
2072
2073static void
2074lem_stop(void *arg)
2075{
2076 struct adapter *adapter = arg;
2077 struct ifnet *ifp = adapter->ifp;
2078
2079 EM_CORE_LOCK_ASSERT(adapter);
2080 EM_TX_LOCK_ASSERT(adapter);
2081
2082 INIT_DEBUGOUT("lem_stop: begin");
2083
2084 lem_disable_intr(adapter);
2085 callout_stop(&adapter->timer);
2086 callout_stop(&adapter->tx_fifo_timer);
2087
2088 /* Tell the stack that the interface is no longer active */
2089 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2090
2091 e1000_reset_hw(&adapter->hw);
2092 if (adapter->hw.mac.type >= e1000_82544)
2093 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2094
2095 e1000_led_off(&adapter->hw);
2096 e1000_cleanup_led(&adapter->hw);
2097}
2098
2099
2100/*********************************************************************
2101 *
2102 * Determine hardware revision.
2103 *
2104 **********************************************************************/
2105static void
2106lem_identify_hardware(struct adapter *adapter)
2107{
2108 device_t dev = adapter->dev;
2109
2110 /* Make sure our PCI config space has the necessary stuff set */
2111 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2112 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2113 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2114 device_printf(dev, "Memory Access and/or Bus Master bits "
2115 "were not set!\n");
2116 adapter->hw.bus.pci_cmd_word |=
2117 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2118 pci_write_config(dev, PCIR_COMMAND,
2119 adapter->hw.bus.pci_cmd_word, 2);
2120 }
2121
2122 /* Save off the information about this board */
2123 adapter->hw.vendor_id = pci_get_vendor(dev);
2124 adapter->hw.device_id = pci_get_device(dev);
2125 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2126 adapter->hw.subsystem_vendor_id =
2127 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2128 adapter->hw.subsystem_device_id =
2129 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2130
2131 /* Do Shared Code Init and Setup */
2132 if (e1000_set_mac_type(&adapter->hw)) {
2133 device_printf(dev, "Setup init failure\n");
2134 return;
2135 }
2136}
2137
2138static int
2139lem_allocate_pci_resources(struct adapter *adapter)
2140{
2141 device_t dev = adapter->dev;
2142 int val, rid, error = E1000_SUCCESS;
2143
2144 rid = PCIR_BAR(0);
2145 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2146 &rid, RF_ACTIVE);
2147 if (adapter->memory == NULL) {
2148 device_printf(dev, "Unable to allocate bus resource: memory\n");
2149 return (ENXIO);
2150 }
2151 adapter->osdep.mem_bus_space_tag =
2152 rman_get_bustag(adapter->memory);
2153 adapter->osdep.mem_bus_space_handle =
2154 rman_get_bushandle(adapter->memory);
2155 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2156
2157 /* Only older adapters use IO mapping */
2158 if (adapter->hw.mac.type > e1000_82543) {
2159 /* Figure our where our IO BAR is ? */
2160 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2161 val = pci_read_config(dev, rid, 4);
2162 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2163 adapter->io_rid = rid;
2164 break;
2165 }
2166 rid += 4;
2167 /* check for 64bit BAR */
2168 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2169 rid += 4;
2170 }
2171 if (rid >= PCIR_CIS) {
2172 device_printf(dev, "Unable to locate IO BAR\n");
2173 return (ENXIO);
2174 }
2175 adapter->ioport = bus_alloc_resource_any(dev,
2176 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2177 if (adapter->ioport == NULL) {
2178 device_printf(dev, "Unable to allocate bus resource: "
2179 "ioport\n");
2180 return (ENXIO);
2181 }
2182 adapter->hw.io_base = 0;
2183 adapter->osdep.io_bus_space_tag =
2184 rman_get_bustag(adapter->ioport);
2185 adapter->osdep.io_bus_space_handle =
2186 rman_get_bushandle(adapter->ioport);
2187 }
2188
2189 adapter->hw.back = &adapter->osdep;
2190
2191 return (error);
2192}
2193
2194/*********************************************************************
2195 *
2196 * Setup the Legacy or MSI Interrupt handler
2197 *
2198 **********************************************************************/
2199int
2200lem_allocate_irq(struct adapter *adapter)
2201{
2202 device_t dev = adapter->dev;
2203 int error, rid = 0;
2204
2205 /* Manually turn off all interrupts */
2206 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2207
2208 /* We allocate a single interrupt resource */
2209 adapter->res[0] = bus_alloc_resource_any(dev,
2210 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2211 if (adapter->res[0] == NULL) {
2212 device_printf(dev, "Unable to allocate bus resource: "
2213 "interrupt\n");
2214 return (ENXIO);
2215 }
2216
2217#ifdef EM_LEGACY_IRQ
2218 /* We do Legacy setup */
2219 if ((error = bus_setup_intr(dev, adapter->res[0],
2220 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2221 &adapter->tag[0])) != 0) {
2222 device_printf(dev, "Failed to register interrupt handler");
2223 return (error);
2224 }
2225
2226#else /* FAST_IRQ */
2227 /*
2228 * Try allocating a fast interrupt and the associated deferred
2229 * processing contexts.
2230 */
2231 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2232 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2233 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2234 taskqueue_thread_enqueue, &adapter->tq);
2235 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2236 device_get_nameunit(adapter->dev));
2237 if ((error = bus_setup_intr(dev, adapter->res[0],
2238 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2239 &adapter->tag[0])) != 0) {
2240 device_printf(dev, "Failed to register fast interrupt "
2241 "handler: %d\n", error);
2242 taskqueue_free(adapter->tq);
2243 adapter->tq = NULL;
2244 return (error);
2245 }
2246#endif /* EM_LEGACY_IRQ */
2247
2248 return (0);
2249}
2250
2251
2252static void
2253lem_free_pci_resources(struct adapter *adapter)
2254{
2255 device_t dev = adapter->dev;
2256
2257
2258 if (adapter->tag[0] != NULL) {
2259 bus_teardown_intr(dev, adapter->res[0],
2260 adapter->tag[0]);
2261 adapter->tag[0] = NULL;
2262 }
2263
2264 if (adapter->res[0] != NULL) {
2265 bus_release_resource(dev, SYS_RES_IRQ,
2266 0, adapter->res[0]);
2267 }
2268
2269 if (adapter->memory != NULL)
2270 bus_release_resource(dev, SYS_RES_MEMORY,
2271 PCIR_BAR(0), adapter->memory);
2272
2273 if (adapter->ioport != NULL)
2274 bus_release_resource(dev, SYS_RES_IOPORT,
2275 adapter->io_rid, adapter->ioport);
2276}
2277
2278
2279/*********************************************************************
2280 *
2281 * Initialize the hardware to a configuration
2282 * as specified by the adapter structure.
2283 *
2284 **********************************************************************/
2285static int
2286lem_hardware_init(struct adapter *adapter)
2287{
2288 device_t dev = adapter->dev;
2289 u16 rx_buffer_size;
2290
2291 INIT_DEBUGOUT("lem_hardware_init: begin");
2292
2293 /* Issue a global reset */
2294 e1000_reset_hw(&adapter->hw);
2295
2296 /* When hardware is reset, fifo_head is also reset */
2297 adapter->tx_fifo_head = 0;
2298
2299 /*
2300 * These parameters control the automatic generation (Tx) and
2301 * response (Rx) to Ethernet PAUSE frames.
2302 * - High water mark should allow for at least two frames to be
2303 * received after sending an XOFF.
2304 * - Low water mark works best when it is very near the high water mark.
2305 * This allows the receiver to restart by sending XON when it has
2306 * drained a bit. Here we use an arbitary value of 1500 which will
2307 * restart after one full frame is pulled from the buffer. There
2308 * could be several smaller frames in the buffer and if so they will
2309 * not trigger the XON until their total number reduces the buffer
2310 * by 1500.
2311 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2312 */
2313 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2314 0xffff) << 10 );
2315
2316 adapter->hw.fc.high_water = rx_buffer_size -
2317 roundup2(adapter->max_frame_size, 1024);
2318 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2319
2320 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2321 adapter->hw.fc.send_xon = TRUE;
2322
2323 /* Set Flow control, use the tunable location if sane */
2324 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2325 adapter->hw.fc.requested_mode = lem_fc_setting;
2326 else
2327 adapter->hw.fc.requested_mode = e1000_fc_none;
2328
2329 if (e1000_init_hw(&adapter->hw) < 0) {
2330 device_printf(dev, "Hardware Initialization Failed\n");
2331 return (EIO);
2332 }
2333
2334 e1000_check_for_link(&adapter->hw);
2335
2336 return (0);
2337}
2338
2339/*********************************************************************
2340 *
2341 * Setup networking device structure and register an interface.
2342 *
2343 **********************************************************************/
2344static int
2345lem_setup_interface(device_t dev, struct adapter *adapter)
2346{
2347 struct ifnet *ifp;
2348
2349 INIT_DEBUGOUT("lem_setup_interface: begin");
2350
2351 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2352 if (ifp == NULL) {
2353 device_printf(dev, "can not allocate ifnet structure\n");
2354 return (-1);
2355 }
2356 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2357 ifp->if_init = lem_init;
2358 ifp->if_softc = adapter;
2359 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2360 ifp->if_ioctl = lem_ioctl;
2361 ifp->if_start = lem_start;
2362 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2363 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2364 IFQ_SET_READY(&ifp->if_snd);
2365
2366 ether_ifattach(ifp, adapter->hw.mac.addr);
2367
2368 ifp->if_capabilities = ifp->if_capenable = 0;
2369
2370 if (adapter->hw.mac.type >= e1000_82543) {
2371 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2372 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2373 }
2374
2375 /*
2376 * Tell the upper layer(s) we support long frames.
2377 */
2378 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2379 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2380 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2381
2382 /*
2383 ** Dont turn this on by default, if vlans are
2384 ** created on another pseudo device (eg. lagg)
2385 ** then vlan events are not passed thru, breaking
2386 ** operation, but with HW FILTER off it works. If
2387 ** using vlans directly on the em driver you can
2388 ** enable this and get full hardware tag filtering.
2389 */
2390 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2391
2392#ifdef DEVICE_POLLING
2393 ifp->if_capabilities |= IFCAP_POLLING;
2394#endif
2395
2396 /* Enable only WOL MAGIC by default */
2397 if (adapter->wol) {
2398 ifp->if_capabilities |= IFCAP_WOL;
2399 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2400 }
2401
2402 /*
2403 * Specify the media types supported by this adapter and register
2404 * callbacks to update media and link information
2405 */
2406 ifmedia_init(&adapter->media, IFM_IMASK,
2407 lem_media_change, lem_media_status);
2408 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2409 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2410 u_char fiber_type = IFM_1000_SX; /* default type */
2411
2412 if (adapter->hw.mac.type == e1000_82545)
2413 fiber_type = IFM_1000_LX;
2414 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2415 0, NULL);
2416 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2417 } else {
2418 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2419 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2420 0, NULL);
2421 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2422 0, NULL);
2423 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2424 0, NULL);
2425 if (adapter->hw.phy.type != e1000_phy_ife) {
2426 ifmedia_add(&adapter->media,
2427 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2428 ifmedia_add(&adapter->media,
2429 IFM_ETHER | IFM_1000_T, 0, NULL);
2430 }
2431 }
2432 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2433 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2434 return (0);
2435}
2436
2437
2438/*********************************************************************
2439 *
2440 * Workaround for SmartSpeed on 82541 and 82547 controllers
2441 *
2442 **********************************************************************/
2443static void
2444lem_smartspeed(struct adapter *adapter)
2445{
2446 u16 phy_tmp;
2447
2448 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2449 adapter->hw.mac.autoneg == 0 ||
2450 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2451 return;
2452
2453 if (adapter->smartspeed == 0) {
2454 /* If Master/Slave config fault is asserted twice,
2455 * we assume back-to-back */
2456 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2457 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2458 return;
2459 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2460 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2461 e1000_read_phy_reg(&adapter->hw,
2462 PHY_1000T_CTRL, &phy_tmp);
2463 if(phy_tmp & CR_1000T_MS_ENABLE) {
2464 phy_tmp &= ~CR_1000T_MS_ENABLE;
2465 e1000_write_phy_reg(&adapter->hw,
2466 PHY_1000T_CTRL, phy_tmp);
2467 adapter->smartspeed++;
2468 if(adapter->hw.mac.autoneg &&
2469 !e1000_copper_link_autoneg(&adapter->hw) &&
2470 !e1000_read_phy_reg(&adapter->hw,
2471 PHY_CONTROL, &phy_tmp)) {
2472 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2473 MII_CR_RESTART_AUTO_NEG);
2474 e1000_write_phy_reg(&adapter->hw,
2475 PHY_CONTROL, phy_tmp);
2476 }
2477 }
2478 }
2479 return;
2480 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2481 /* If still no link, perhaps using 2/3 pair cable */
2482 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2483 phy_tmp |= CR_1000T_MS_ENABLE;
2484 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2485 if(adapter->hw.mac.autoneg &&
2486 !e1000_copper_link_autoneg(&adapter->hw) &&
2487 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2488 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2489 MII_CR_RESTART_AUTO_NEG);
2490 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2491 }
2492 }
2493 /* Restart process after EM_SMARTSPEED_MAX iterations */
2494 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2495 adapter->smartspeed = 0;
2496}
2497
2498
2499/*
2500 * Manage DMA'able memory.
2501 */
2502static void
2503lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2504{
2505 if (error)
2506 return;
2507 *(bus_addr_t *) arg = segs[0].ds_addr;
2508}
2509
2510static int
2511lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2512 struct em_dma_alloc *dma, int mapflags)
2513{
2514 int error;
2515
2516 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2517 EM_DBA_ALIGN, 0, /* alignment, bounds */
2518 BUS_SPACE_MAXADDR, /* lowaddr */
2519 BUS_SPACE_MAXADDR, /* highaddr */
2520 NULL, NULL, /* filter, filterarg */
2521 size, /* maxsize */
2522 1, /* nsegments */
2523 size, /* maxsegsize */
2524 0, /* flags */
2525 NULL, /* lockfunc */
2526 NULL, /* lockarg */
2527 &dma->dma_tag);
2528 if (error) {
2529 device_printf(adapter->dev,
2530 "%s: bus_dma_tag_create failed: %d\n",
2531 __func__, error);
2532 goto fail_0;
2533 }
2534
2535 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2536 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2537 if (error) {
2538 device_printf(adapter->dev,
2539 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2540 __func__, (uintmax_t)size, error);
2541 goto fail_2;
2542 }
2543
2544 dma->dma_paddr = 0;
2545 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2546 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2547 if (error || dma->dma_paddr == 0) {
2548 device_printf(adapter->dev,
2549 "%s: bus_dmamap_load failed: %d\n",
2550 __func__, error);
2551 goto fail_3;
2552 }
2553
2554 return (0);
2555
2556fail_3:
2557 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2558fail_2:
2559 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2560 bus_dma_tag_destroy(dma->dma_tag);
2561fail_0:
2562 dma->dma_map = NULL;
2563 dma->dma_tag = NULL;
2564
2565 return (error);
2566}
2567
2568static void
2569lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2570{
2571 if (dma->dma_tag == NULL)
2572 return;
2573 if (dma->dma_map != NULL) {
2574 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2575 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2576 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2577 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2578 dma->dma_map = NULL;
2579 }
2580 bus_dma_tag_destroy(dma->dma_tag);
2581 dma->dma_tag = NULL;
2582}
2583
2584
2585/*********************************************************************
2586 *
2587 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2588 * the information needed to transmit a packet on the wire.
2589 *
2590 **********************************************************************/
2591static int
2592lem_allocate_transmit_structures(struct adapter *adapter)
2593{
2594 device_t dev = adapter->dev;
2595 struct em_buffer *tx_buffer;
2596 int error;
2597
2598 /*
2599 * Create DMA tags for tx descriptors
2600 */
2601 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2602 1, 0, /* alignment, bounds */
2603 BUS_SPACE_MAXADDR, /* lowaddr */
2604 BUS_SPACE_MAXADDR, /* highaddr */
2605 NULL, NULL, /* filter, filterarg */
2606 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2607 EM_MAX_SCATTER, /* nsegments */
2608 MCLBYTES, /* maxsegsize */
2609 0, /* flags */
2610 NULL, /* lockfunc */
2611 NULL, /* lockarg */
2612 &adapter->txtag)) != 0) {
2613 device_printf(dev, "Unable to allocate TX DMA tag\n");
2614 goto fail;
2615 }
2616
2617 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2618 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2619 if (adapter->tx_buffer_area == NULL) {
2620 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2621 error = ENOMEM;
2622 goto fail;
2623 }
2624
2625 /* Create the descriptor buffer dma maps */
2626 for (int i = 0; i < adapter->num_tx_desc; i++) {
2627 tx_buffer = &adapter->tx_buffer_area[i];
2628 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2629 if (error != 0) {
2630 device_printf(dev, "Unable to create TX DMA map\n");
2631 goto fail;
2632 }
2633 tx_buffer->next_eop = -1;
2634 }
2635
2636 return (0);
2637fail:
2638 lem_free_transmit_structures(adapter);
2639 return (error);
2640}
2641
2642/*********************************************************************
2643 *
2644 * (Re)Initialize transmit structures.
2645 *
2646 **********************************************************************/
2647static void
2648lem_setup_transmit_structures(struct adapter *adapter)
2649{
2650 struct em_buffer *tx_buffer;
2651#ifdef DEV_NETMAP
2652 /* we are already locked */
2653 struct netmap_adapter *na = NA(adapter->ifp);
2654 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2655#endif /* DEV_NETMAP */
2656
2657 /* Clear the old ring contents */
2658 bzero(adapter->tx_desc_base,
2659 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2660
2661 /* Free any existing TX buffers */
2662 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2663 tx_buffer = &adapter->tx_buffer_area[i];
2664 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2665 BUS_DMASYNC_POSTWRITE);
2666 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2667 m_freem(tx_buffer->m_head);
2668 tx_buffer->m_head = NULL;
2669#ifdef DEV_NETMAP
2670 if (slot) {
2671 /* slot si is mapped to the i-th NIC-ring entry */
2672 int si = i + na->tx_rings[0].nkr_hwofs;
2673 uint64_t paddr;
2674 void *addr;
2675
2676 if (si > na->num_tx_desc)
2677 si -= na->num_tx_desc;
2678 addr = PNMB(slot + si, &paddr);
2679 adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2680 /* reload the map for netmap mode */
2681 netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2682 }
2683#endif /* DEV_NETMAP */
2684 tx_buffer->next_eop = -1;
2685 }
2686
2687 /* Reset state */
2688 adapter->last_hw_offload = 0;
2689 adapter->next_avail_tx_desc = 0;
2690 adapter->next_tx_to_clean = 0;
2691 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2692
2693 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2694 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2695
2696 return;
2697}
2698
2699/*********************************************************************
2700 *
2701 * Enable transmit unit.
2702 *
2703 **********************************************************************/
2704static void
2705lem_initialize_transmit_unit(struct adapter *adapter)
2706{
2707 u32 tctl, tipg = 0;
2708 u64 bus_addr;
2709
2710 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2711 /* Setup the Base and Length of the Tx Descriptor Ring */
2712 bus_addr = adapter->txdma.dma_paddr;
2713 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2714 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2715 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2716 (u32)(bus_addr >> 32));
2717 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2718 (u32)bus_addr);
2719 /* Setup the HW Tx Head and Tail descriptor pointers */
2720 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2721 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2722
2723 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2724 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2725 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2726
2727 /* Set the default values for the Tx Inter Packet Gap timer */
2728 switch (adapter->hw.mac.type) {
2729 case e1000_82542:
2730 tipg = DEFAULT_82542_TIPG_IPGT;
2731 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2732 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2733 break;
2734 default:
2735 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2736 (adapter->hw.phy.media_type ==
2737 e1000_media_type_internal_serdes))
2738 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2739 else
2740 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2741 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2742 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2743 }
2744
2745 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2746 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2747 if(adapter->hw.mac.type >= e1000_82540)
2748 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2749 adapter->tx_abs_int_delay.value);
2750
2751 /* Program the Transmit Control Register */
2752 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2753 tctl &= ~E1000_TCTL_CT;
2754 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2755 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2756
2757 /* This write will effectively turn on the transmit unit. */
2758 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2759
2760 /* Setup Transmit Descriptor Base Settings */
2761 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2762
2763 if (adapter->tx_int_delay.value > 0)
2764 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2765}
2766
2767/*********************************************************************
2768 *
2769 * Free all transmit related data structures.
2770 *
2771 **********************************************************************/
2772static void
2773lem_free_transmit_structures(struct adapter *adapter)
2774{
2775 struct em_buffer *tx_buffer;
2776
2777 INIT_DEBUGOUT("free_transmit_structures: begin");
2778
2779 if (adapter->tx_buffer_area != NULL) {
2780 for (int i = 0; i < adapter->num_tx_desc; i++) {
2781 tx_buffer = &adapter->tx_buffer_area[i];
2782 if (tx_buffer->m_head != NULL) {
2783 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2784 BUS_DMASYNC_POSTWRITE);
2785 bus_dmamap_unload(adapter->txtag,
2786 tx_buffer->map);
2787 m_freem(tx_buffer->m_head);
2788 tx_buffer->m_head = NULL;
2789 } else if (tx_buffer->map != NULL)
2790 bus_dmamap_unload(adapter->txtag,
2791 tx_buffer->map);
2792 if (tx_buffer->map != NULL) {
2793 bus_dmamap_destroy(adapter->txtag,
2794 tx_buffer->map);
2795 tx_buffer->map = NULL;
2796 }
2797 }
2798 }
2799 if (adapter->tx_buffer_area != NULL) {
2800 free(adapter->tx_buffer_area, M_DEVBUF);
2801 adapter->tx_buffer_area = NULL;
2802 }
2803 if (adapter->txtag != NULL) {
2804 bus_dma_tag_destroy(adapter->txtag);
2805 adapter->txtag = NULL;
2806 }
2807#if __FreeBSD_version >= 800000
2808 if (adapter->br != NULL)
2809 buf_ring_free(adapter->br, M_DEVBUF);
2810#endif
2811}
2812
2813/*********************************************************************
2814 *
2815 * The offload context needs to be set when we transfer the first
2816 * packet of a particular protocol (TCP/UDP). This routine has been
2817 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2818 *
2819 * Added back the old method of keeping the current context type
2820 * and not setting if unnecessary, as this is reported to be a
2821 * big performance win. -jfv
2822 **********************************************************************/
2823static void
2824lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2825 u32 *txd_upper, u32 *txd_lower)
2826{
2827 struct e1000_context_desc *TXD = NULL;
2828 struct em_buffer *tx_buffer;
2829 struct ether_vlan_header *eh;
2830 struct ip *ip = NULL;
2831 struct ip6_hdr *ip6;
2832 int curr_txd, ehdrlen;
2833 u32 cmd, hdr_len, ip_hlen;
2834 u16 etype;
2835 u8 ipproto;
2836
2837
2838 cmd = hdr_len = ipproto = 0;
2839 *txd_upper = *txd_lower = 0;
2840 curr_txd = adapter->next_avail_tx_desc;
2841
2842 /*
2843 * Determine where frame payload starts.
2844 * Jump over vlan headers if already present,
2845 * helpful for QinQ too.
2846 */
2847 eh = mtod(mp, struct ether_vlan_header *);
2848 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2849 etype = ntohs(eh->evl_proto);
2850 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2851 } else {
2852 etype = ntohs(eh->evl_encap_proto);
2853 ehdrlen = ETHER_HDR_LEN;
2854 }
2855
2856 /*
2857 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2858 * TODO: Support SCTP too when it hits the tree.
2859 */
2860 switch (etype) {
2861 case ETHERTYPE_IP:
2862 ip = (struct ip *)(mp->m_data + ehdrlen);
2863 ip_hlen = ip->ip_hl << 2;
2864
2865 /* Setup of IP header checksum. */
2866 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2867 /*
2868 * Start offset for header checksum calculation.
2869 * End offset for header checksum calculation.
2870 * Offset of place to put the checksum.
2871 */
2872 TXD = (struct e1000_context_desc *)
2873 &adapter->tx_desc_base[curr_txd];
2874 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2875 TXD->lower_setup.ip_fields.ipcse =
2876 htole16(ehdrlen + ip_hlen);
2877 TXD->lower_setup.ip_fields.ipcso =
2878 ehdrlen + offsetof(struct ip, ip_sum);
2879 cmd |= E1000_TXD_CMD_IP;
2880 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2881 }
2882
2883 hdr_len = ehdrlen + ip_hlen;
2884 ipproto = ip->ip_p;
2885
2886 break;
2887 case ETHERTYPE_IPV6:
2888 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2889 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2890
2891 /* IPv6 doesn't have a header checksum. */
2892
2893 hdr_len = ehdrlen + ip_hlen;
2894 ipproto = ip6->ip6_nxt;
2895 break;
2896
2897 default:
2898 return;
2899 }
2900
2901 switch (ipproto) {
2902 case IPPROTO_TCP:
2903 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2904 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2905 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2906 /* no need for context if already set */
2907 if (adapter->last_hw_offload == CSUM_TCP)
2908 return;
2909 adapter->last_hw_offload = CSUM_TCP;
2910 /*
2911 * Start offset for payload checksum calculation.
2912 * End offset for payload checksum calculation.
2913 * Offset of place to put the checksum.
2914 */
2915 TXD = (struct e1000_context_desc *)
2916 &adapter->tx_desc_base[curr_txd];
2917 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2918 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2919 TXD->upper_setup.tcp_fields.tucso =
2920 hdr_len + offsetof(struct tcphdr, th_sum);
2921 cmd |= E1000_TXD_CMD_TCP;
2922 }
2923 break;
2924 case IPPROTO_UDP:
2925 {
2926 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2927 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2928 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2929 /* no need for context if already set */
2930 if (adapter->last_hw_offload == CSUM_UDP)
2931 return;
2932 adapter->last_hw_offload = CSUM_UDP;
2933 /*
2934 * Start offset for header checksum calculation.
2935 * End offset for header checksum calculation.
2936 * Offset of place to put the checksum.
2937 */
2938 TXD = (struct e1000_context_desc *)
2939 &adapter->tx_desc_base[curr_txd];
2940 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2941 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2942 TXD->upper_setup.tcp_fields.tucso =
2943 hdr_len + offsetof(struct udphdr, uh_sum);
2944 }
2945 /* Fall Thru */
2946 }
2947 default:
2948 break;
2949 }
2950
2951 if (TXD == NULL)
2952 return;
2953 TXD->tcp_seg_setup.data = htole32(0);
2954 TXD->cmd_and_length =
2955 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2956 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2957 tx_buffer->m_head = NULL;
2958 tx_buffer->next_eop = -1;
2959
2960 if (++curr_txd == adapter->num_tx_desc)
2961 curr_txd = 0;
2962
2963 adapter->num_tx_desc_avail--;
2964 adapter->next_avail_tx_desc = curr_txd;
2965}
2966
2967
2968/**********************************************************************
2969 *
2970 * Examine each tx_buffer in the used queue. If the hardware is done
2971 * processing the packet then free associated resources. The
2972 * tx_buffer is put back on the free queue.
2973 *
2974 **********************************************************************/
2975static void
2976lem_txeof(struct adapter *adapter)
2977{
2978 int first, last, done, num_avail;
2979 struct em_buffer *tx_buffer;
2980 struct e1000_tx_desc *tx_desc, *eop_desc;
2981 struct ifnet *ifp = adapter->ifp;
2982
2983 EM_TX_LOCK_ASSERT(adapter);
2984
2985#ifdef DEV_NETMAP
2986 if (ifp->if_capenable & IFCAP_NETMAP) {
2987 selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET);
2988 return;
2989 }
2990#endif /* DEV_NETMAP */
2991 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2992 return;
2993
2994 num_avail = adapter->num_tx_desc_avail;
2995 first = adapter->next_tx_to_clean;
2996 tx_desc = &adapter->tx_desc_base[first];
2997 tx_buffer = &adapter->tx_buffer_area[first];
2998 last = tx_buffer->next_eop;
2999 eop_desc = &adapter->tx_desc_base[last];
3000
3001 /*
3002 * What this does is get the index of the
3003 * first descriptor AFTER the EOP of the
3004 * first packet, that way we can do the
3005 * simple comparison on the inner while loop.
3006 */
3007 if (++last == adapter->num_tx_desc)
3008 last = 0;
3009 done = last;
3010
3011 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3012 BUS_DMASYNC_POSTREAD);
3013
3014 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3015 /* We clean the range of the packet */
3016 while (first != done) {
3017 tx_desc->upper.data = 0;
3018 tx_desc->lower.data = 0;
3019 tx_desc->buffer_addr = 0;
3020 ++num_avail;
3021
3022 if (tx_buffer->m_head) {
3023 ifp->if_opackets++;
3024 bus_dmamap_sync(adapter->txtag,
3025 tx_buffer->map,
3026 BUS_DMASYNC_POSTWRITE);
3027 bus_dmamap_unload(adapter->txtag,
3028 tx_buffer->map);
3029
3030 m_freem(tx_buffer->m_head);
3031 tx_buffer->m_head = NULL;
3032 }
3033 tx_buffer->next_eop = -1;
3034 adapter->watchdog_time = ticks;
3035
3036 if (++first == adapter->num_tx_desc)
3037 first = 0;
3038
3039 tx_buffer = &adapter->tx_buffer_area[first];
3040 tx_desc = &adapter->tx_desc_base[first];
3041 }
3042 /* See if we can continue to the next packet */
3043 last = tx_buffer->next_eop;
3044 if (last != -1) {
3045 eop_desc = &adapter->tx_desc_base[last];
3046 /* Get new done point */
3047 if (++last == adapter->num_tx_desc) last = 0;
3048 done = last;
3049 } else
3050 break;
3051 }
3052 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3053 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3054
3055 adapter->next_tx_to_clean = first;
3056 adapter->num_tx_desc_avail = num_avail;
3057
3058 /*
3059 * If we have enough room, clear IFF_DRV_OACTIVE to
3060 * tell the stack that it is OK to send packets.
3061 * If there are no pending descriptors, clear the watchdog.
3062 */
3063 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3064 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3065 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3066 adapter->watchdog_check = FALSE;
3067 return;
3068 }
3069 }
3070}
3071
3072/*********************************************************************
3073 *
3074 * When Link is lost sometimes there is work still in the TX ring
3075 * which may result in a watchdog, rather than allow that we do an
3076 * attempted cleanup and then reinit here. Note that this has been
3077 * seens mostly with fiber adapters.
3078 *
3079 **********************************************************************/
3080static void
3081lem_tx_purge(struct adapter *adapter)
3082{
3083 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3084 EM_TX_LOCK(adapter);
3085 lem_txeof(adapter);
3086 EM_TX_UNLOCK(adapter);
3087 if (adapter->watchdog_check) /* Still outstanding? */
3088 lem_init_locked(adapter);
3089 }
3090}
3091
3092/*********************************************************************
3093 *
3094 * Get a buffer from system mbuf buffer pool.
3095 *
3096 **********************************************************************/
3097static int
3098lem_get_buf(struct adapter *adapter, int i)
3099{
3100 struct mbuf *m;
3101 bus_dma_segment_t segs[1];
3102 bus_dmamap_t map;
3103 struct em_buffer *rx_buffer;
3104 int error, nsegs;
3105
3106 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3107 if (m == NULL) {
3108 adapter->mbuf_cluster_failed++;
3109 return (ENOBUFS);
3110 }
3111 m->m_len = m->m_pkthdr.len = MCLBYTES;
3112
3113 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3114 m_adj(m, ETHER_ALIGN);
3115
3116 /*
3117 * Using memory from the mbuf cluster pool, invoke the
3118 * bus_dma machinery to arrange the memory mapping.
3119 */
3120 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3121 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3122 if (error != 0) {
3123 m_free(m);
3124 return (error);
3125 }
3126
3127 /* If nsegs is wrong then the stack is corrupt. */
3128 KASSERT(nsegs == 1, ("Too many segments returned!"));
3129
3130 rx_buffer = &adapter->rx_buffer_area[i];
3131 if (rx_buffer->m_head != NULL)
3132 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3133
3134 map = rx_buffer->map;
3135 rx_buffer->map = adapter->rx_sparemap;
3136 adapter->rx_sparemap = map;
3137 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3138 rx_buffer->m_head = m;
3139
3140 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3141 return (0);
3142}
3143
3144/*********************************************************************
3145 *
3146 * Allocate memory for rx_buffer structures. Since we use one
3147 * rx_buffer per received packet, the maximum number of rx_buffer's
3148 * that we'll need is equal to the number of receive descriptors
3149 * that we've allocated.
3150 *
3151 **********************************************************************/
3152static int
3153lem_allocate_receive_structures(struct adapter *adapter)
3154{
3155 device_t dev = adapter->dev;
3156 struct em_buffer *rx_buffer;
3157 int i, error;
3158
3159 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3160 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3161 if (adapter->rx_buffer_area == NULL) {
3162 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3163 return (ENOMEM);
3164 }
3165
3166 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3167 1, 0, /* alignment, bounds */
3168 BUS_SPACE_MAXADDR, /* lowaddr */
3169 BUS_SPACE_MAXADDR, /* highaddr */
3170 NULL, NULL, /* filter, filterarg */
3171 MCLBYTES, /* maxsize */
3172 1, /* nsegments */
3173 MCLBYTES, /* maxsegsize */
3174 0, /* flags */
3175 NULL, /* lockfunc */
3176 NULL, /* lockarg */
3177 &adapter->rxtag);
3178 if (error) {
3179 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3180 __func__, error);
3181 goto fail;
3182 }
3183
3184 /* Create the spare map (used by getbuf) */
3185 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3186 &adapter->rx_sparemap);
3187 if (error) {
3188 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3189 __func__, error);
3190 goto fail;
3191 }
3192
3193 rx_buffer = adapter->rx_buffer_area;
3194 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3195 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3196 &rx_buffer->map);
3197 if (error) {
3198 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3199 __func__, error);
3200 goto fail;
3201 }
3202 }
3203
3204 return (0);
3205
3206fail:
3207 lem_free_receive_structures(adapter);
3208 return (error);
3209}
3210
3211/*********************************************************************
3212 *
3213 * (Re)initialize receive structures.
3214 *
3215 **********************************************************************/
3216static int
3217lem_setup_receive_structures(struct adapter *adapter)
3218{
3219 struct em_buffer *rx_buffer;
3220 int i, error;
3221#ifdef DEV_NETMAP
3222 /* we are already under lock */
3223 struct netmap_adapter *na = NA(adapter->ifp);
3224 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3225#endif
3226
3227 /* Reset descriptor ring */
3228 bzero(adapter->rx_desc_base,
3229 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3230
3231 /* Free current RX buffers. */
3232 rx_buffer = adapter->rx_buffer_area;
3233 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3234 if (rx_buffer->m_head != NULL) {
3235 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3236 BUS_DMASYNC_POSTREAD);
3237 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3238 m_freem(rx_buffer->m_head);
3239 rx_buffer->m_head = NULL;
3240 }
3241 }
3242
3243 /* Allocate new ones. */
3244 for (i = 0; i < adapter->num_rx_desc; i++) {
3245#ifdef DEV_NETMAP
3246 if (slot) {
3247 /* slot si is mapped to the i-th NIC-ring entry */
3248 int si = i + na->rx_rings[0].nkr_hwofs;
3249 uint64_t paddr;
3250 void *addr;
3251
3252 if (si > na->num_rx_desc)
3253 si -= na->num_rx_desc;
3254 addr = PNMB(slot + si, &paddr);
3255 netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3256 /* Update descriptor */
3257 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3258 continue;
3259 }
3260#endif /* DEV_NETMAP */
3261 error = lem_get_buf(adapter, i);
3262 if (error)
3263 return (error);
3264 }
3265
3266 /* Setup our descriptor pointers */
3267 adapter->next_rx_desc_to_check = 0;
3268 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3269 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3270
3271 return (0);
3272}
3273
3274/*********************************************************************
3275 *
3276 * Enable receive unit.
3277 *
3278 **********************************************************************/
3279#define MAX_INTS_PER_SEC 8000
3280#define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
3281
3282static void
3283lem_initialize_receive_unit(struct adapter *adapter)
3284{
3285 struct ifnet *ifp = adapter->ifp;
3286 u64 bus_addr;
3287 u32 rctl, rxcsum;
3288
3289 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3290
3291 /*
3292 * Make sure receives are disabled while setting
3293 * up the descriptor ring
3294 */
3295 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3296 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3297
3298 if (adapter->hw.mac.type >= e1000_82540) {
3299 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3300 adapter->rx_abs_int_delay.value);
3301 /*
3302 * Set the interrupt throttling rate. Value is calculated
3303 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3304 */
3305 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3306 }
3307
3308 /*
3309 ** When using MSIX interrupts we need to throttle
3310 ** using the EITR register (82574 only)
3311 */
3312 if (adapter->msix)
3313 for (int i = 0; i < 4; i++)
3314 E1000_WRITE_REG(&adapter->hw,
3315 E1000_EITR_82574(i), DEFAULT_ITR);
3316
3317 /* Disable accelerated ackknowledge */
3318 if (adapter->hw.mac.type == e1000_82574)
3319 E1000_WRITE_REG(&adapter->hw,
3320 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3321
3322 /* Setup the Base and Length of the Rx Descriptor Ring */
3323 bus_addr = adapter->rxdma.dma_paddr;
3324 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3325 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3326 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3327 (u32)(bus_addr >> 32));
3328 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3329 (u32)bus_addr);
3330
3331 /* Setup the Receive Control Register */
3332 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3333 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3334 E1000_RCTL_RDMTS_HALF |
3335 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3336
3337 /* Make sure VLAN Filters are off */
3338 rctl &= ~E1000_RCTL_VFE;
3339
3340 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3341 rctl |= E1000_RCTL_SBP;
3342 else
3343 rctl &= ~E1000_RCTL_SBP;
3344
3345 switch (adapter->rx_buffer_len) {
3346 default:
3347 case 2048:
3348 rctl |= E1000_RCTL_SZ_2048;
3349 break;
3350 case 4096:
3351 rctl |= E1000_RCTL_SZ_4096 |
3352 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3353 break;
3354 case 8192:
3355 rctl |= E1000_RCTL_SZ_8192 |
3356 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3357 break;
3358 case 16384:
3359 rctl |= E1000_RCTL_SZ_16384 |
3360 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3361 break;
3362 }
3363
3364 if (ifp->if_mtu > ETHERMTU)
3365 rctl |= E1000_RCTL_LPE;
3366 else
3367 rctl &= ~E1000_RCTL_LPE;
3368
3369 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3370 if ((adapter->hw.mac.type >= e1000_82543) &&
3371 (ifp->if_capenable & IFCAP_RXCSUM)) {
3372 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3373 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3374 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3375 }
3376
3377 /* Enable Receives */
3378 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3379
3380 /*
3381 * Setup the HW Rx Head and
3382 * Tail Descriptor Pointers
3383 */
3384 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3385#ifdef DEV_NETMAP
3386 /* preserve buffers already made available to clients */
3387 if (ifp->if_capenable & IFCAP_NETMAP) {
3388 struct netmap_adapter *na = NA(adapter->ifp);
3389 struct netmap_kring *kring = &na->rx_rings[0];
3390 int t = na->num_rx_desc - 1 - kring->nr_hwavail;
3391
3392 if (t >= na->num_rx_desc)
3393 t -= na->num_rx_desc;
3394 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), t);
3395 } else
3396#endif /* DEV_NETMAP */
3397 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3398
3399 return;
3400}
3401
3402/*********************************************************************
3403 *
3404 * Free receive related data structures.
3405 *
3406 **********************************************************************/
3407static void
3408lem_free_receive_structures(struct adapter *adapter)
3409{
3410 struct em_buffer *rx_buffer;
3411 int i;
3412
3413 INIT_DEBUGOUT("free_receive_structures: begin");
3414
3415 if (adapter->rx_sparemap) {
3416 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3417 adapter->rx_sparemap = NULL;
3418 }
3419
3420 /* Cleanup any existing buffers */
3421 if (adapter->rx_buffer_area != NULL) {
3422 rx_buffer = adapter->rx_buffer_area;
3423 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3424 if (rx_buffer->m_head != NULL) {
3425 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3426 BUS_DMASYNC_POSTREAD);
3427 bus_dmamap_unload(adapter->rxtag,
3428 rx_buffer->map);
3429 m_freem(rx_buffer->m_head);
3430 rx_buffer->m_head = NULL;
3431 } else if (rx_buffer->map != NULL)
3432 bus_dmamap_unload(adapter->rxtag,
3433 rx_buffer->map);
3434 if (rx_buffer->map != NULL) {
3435 bus_dmamap_destroy(adapter->rxtag,
3436 rx_buffer->map);
3437 rx_buffer->map = NULL;
3438 }
3439 }
3440 }
3441
3442 if (adapter->rx_buffer_area != NULL) {
3443 free(adapter->rx_buffer_area, M_DEVBUF);
3444 adapter->rx_buffer_area = NULL;
3445 }
3446
3447 if (adapter->rxtag != NULL) {
3448 bus_dma_tag_destroy(adapter->rxtag);
3449 adapter->rxtag = NULL;
3450 }
3451}
3452
3453/*********************************************************************
3454 *
3455 * This routine executes in interrupt context. It replenishes
3456 * the mbufs in the descriptor and sends data which has been
3457 * dma'ed into host memory to upper layer.
3458 *
3459 * We loop at most count times if count is > 0, or until done if
3460 * count < 0.
3461 *
3462 * For polling we also now return the number of cleaned packets
3463 *********************************************************************/
3464static bool
3465lem_rxeof(struct adapter *adapter, int count, int *done)
3466{
3467 struct ifnet *ifp = adapter->ifp;;
3468 struct mbuf *mp;
3469 u8 status = 0, accept_frame = 0, eop = 0;
3470 u16 len, desc_len, prev_len_adj;
3471 int i, rx_sent = 0;
3472 struct e1000_rx_desc *current_desc;
3473
3474 EM_RX_LOCK(adapter);
3475 i = adapter->next_rx_desc_to_check;
3476 current_desc = &adapter->rx_desc_base[i];
3477 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3478 BUS_DMASYNC_POSTREAD);
3479
3480#ifdef DEV_NETMAP
3481 if (ifp->if_capenable & IFCAP_NETMAP) {
3482 selwakeuppri(&NA(ifp)->rx_rings[0].si, PI_NET);
3483 EM_RX_UNLOCK(adapter);
3484 return (0);
3485 }
3486#endif /* DEV_NETMAP */
3487
3488 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3489 if (done != NULL)
3490 *done = rx_sent;
3491 EM_RX_UNLOCK(adapter);
3492 return (FALSE);
3493 }
3494
3495 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3496 struct mbuf *m = NULL;
3497
3498 status = current_desc->status;
3499 if ((status & E1000_RXD_STAT_DD) == 0)
3500 break;
3501
3502 mp = adapter->rx_buffer_area[i].m_head;
3503 /*
3504 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3505 * needs to access the last received byte in the mbuf.
3506 */
3507 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3508 BUS_DMASYNC_POSTREAD);
3509
3510 accept_frame = 1;
3511 prev_len_adj = 0;
3512 desc_len = le16toh(current_desc->length);
3513 if (status & E1000_RXD_STAT_EOP) {
3514 count--;
3515 eop = 1;
3516 if (desc_len < ETHER_CRC_LEN) {
3517 len = 0;
3518 prev_len_adj = ETHER_CRC_LEN - desc_len;
3519 } else
3520 len = desc_len - ETHER_CRC_LEN;
3521 } else {
3522 eop = 0;
3523 len = desc_len;
3524 }
3525
3526 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3527 u8 last_byte;
3528 u32 pkt_len = desc_len;
3529
3530 if (adapter->fmp != NULL)
3531 pkt_len += adapter->fmp->m_pkthdr.len;
3532
3533 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3534 if (TBI_ACCEPT(&adapter->hw, status,
3535 current_desc->errors, pkt_len, last_byte,
3536 adapter->min_frame_size, adapter->max_frame_size)) {
3537 e1000_tbi_adjust_stats_82543(&adapter->hw,
3538 &adapter->stats, pkt_len,
3539 adapter->hw.mac.addr,
3540 adapter->max_frame_size);
3541 if (len > 0)
3542 len--;
3543 } else
3544 accept_frame = 0;
3545 }
3546
3547 if (accept_frame) {
3548 if (lem_get_buf(adapter, i) != 0) {
3549 ifp->if_iqdrops++;
3550 goto discard;
3551 }
3552
3553 /* Assign correct length to the current fragment */
3554 mp->m_len = len;
3555
3556 if (adapter->fmp == NULL) {
3557 mp->m_pkthdr.len = len;
3558 adapter->fmp = mp; /* Store the first mbuf */
3559 adapter->lmp = mp;
3560 } else {
3561 /* Chain mbuf's together */
3562 mp->m_flags &= ~M_PKTHDR;
3563 /*
3564 * Adjust length of previous mbuf in chain if
3565 * we received less than 4 bytes in the last
3566 * descriptor.
3567 */
3568 if (prev_len_adj > 0) {
3569 adapter->lmp->m_len -= prev_len_adj;
3570 adapter->fmp->m_pkthdr.len -=
3571 prev_len_adj;
3572 }
3573 adapter->lmp->m_next = mp;
3574 adapter->lmp = adapter->lmp->m_next;
3575 adapter->fmp->m_pkthdr.len += len;
3576 }
3577
3578 if (eop) {
3579 adapter->fmp->m_pkthdr.rcvif = ifp;
3580 ifp->if_ipackets++;
3581 lem_receive_checksum(adapter, current_desc,
3582 adapter->fmp);
3583#ifndef __NO_STRICT_ALIGNMENT
3584 if (adapter->max_frame_size >
3585 (MCLBYTES - ETHER_ALIGN) &&
3586 lem_fixup_rx(adapter) != 0)
3587 goto skip;
3588#endif
3589 if (status & E1000_RXD_STAT_VP) {
3590 adapter->fmp->m_pkthdr.ether_vtag =
3591 le16toh(current_desc->special);
3592 adapter->fmp->m_flags |= M_VLANTAG;
3593 }
3594#ifndef __NO_STRICT_ALIGNMENT
3595skip:
3596#endif
3597 m = adapter->fmp;
3598 adapter->fmp = NULL;
3599 adapter->lmp = NULL;
3600 }
3601 } else {
3602 ifp->if_ierrors++;
3603discard:
3604 /* Reuse loaded DMA map and just update mbuf chain */
3605 mp = adapter->rx_buffer_area[i].m_head;
3606 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3607 mp->m_data = mp->m_ext.ext_buf;
3608 mp->m_next = NULL;
3609 if (adapter->max_frame_size <=
3610 (MCLBYTES - ETHER_ALIGN))
3611 m_adj(mp, ETHER_ALIGN);
3612 if (adapter->fmp != NULL) {
3613 m_freem(adapter->fmp);
3614 adapter->fmp = NULL;
3615 adapter->lmp = NULL;
3616 }
3617 m = NULL;
3618 }
3619
3620 /* Zero out the receive descriptors status. */
3621 current_desc->status = 0;
3622 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3623 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3624
3625 /* Advance our pointers to the next descriptor. */
3626 if (++i == adapter->num_rx_desc)
3627 i = 0;
3628 /* Call into the stack */
3629 if (m != NULL) {
3630 adapter->next_rx_desc_to_check = i;
3631 EM_RX_UNLOCK(adapter);
3632 (*ifp->if_input)(ifp, m);
3633 EM_RX_LOCK(adapter);
3634 rx_sent++;
3635 i = adapter->next_rx_desc_to_check;
3636 }
3637 current_desc = &adapter->rx_desc_base[i];
3638 }
3639 adapter->next_rx_desc_to_check = i;
3640
3641 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3642 if (--i < 0)
3643 i = adapter->num_rx_desc - 1;
3644 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3645 if (done != NULL)
3646 *done = rx_sent;
3647 EM_RX_UNLOCK(adapter);
3648 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3649}
3650
3651#ifndef __NO_STRICT_ALIGNMENT
3652/*
3653 * When jumbo frames are enabled we should realign entire payload on
3654 * architecures with strict alignment. This is serious design mistake of 8254x
3655 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3656 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3657 * payload. On architecures without strict alignment restrictions 8254x still
3658 * performs unaligned memory access which would reduce the performance too.
3659 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3660 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3661 * existing mbuf chain.
3662 *
3663 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3664 * not used at all on architectures with strict alignment.
3665 */
3666static int
3667lem_fixup_rx(struct adapter *adapter)
3668{
3669 struct mbuf *m, *n;
3670 int error;
3671
3672 error = 0;
3673 m = adapter->fmp;
3674 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3675 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3676 m->m_data += ETHER_HDR_LEN;
3677 } else {
3678 MGETHDR(n, M_DONTWAIT, MT_DATA);
3679 if (n != NULL) {
3680 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3681 m->m_data += ETHER_HDR_LEN;
3682 m->m_len -= ETHER_HDR_LEN;
3683 n->m_len = ETHER_HDR_LEN;
3684 M_MOVE_PKTHDR(n, m);
3685 n->m_next = m;
3686 adapter->fmp = n;
3687 } else {
3688 adapter->dropped_pkts++;
3689 m_freem(adapter->fmp);
3690 adapter->fmp = NULL;
3691 error = ENOMEM;
3692 }
3693 }
3694
3695 return (error);
3696}
3697#endif
3698
3699/*********************************************************************
3700 *
3701 * Verify that the hardware indicated that the checksum is valid.
3702 * Inform the stack about the status of checksum so that stack
3703 * doesn't spend time verifying the checksum.
3704 *
3705 *********************************************************************/
3706static void
3707lem_receive_checksum(struct adapter *adapter,
3708 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3709{
3710 /* 82543 or newer only */
3711 if ((adapter->hw.mac.type < e1000_82543) ||
3712 /* Ignore Checksum bit is set */
3713 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3714 mp->m_pkthdr.csum_flags = 0;
3715 return;
3716 }
3717
3718 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3719 /* Did it pass? */
3720 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3721 /* IP Checksum Good */
3722 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3723 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3724
3725 } else {
3726 mp->m_pkthdr.csum_flags = 0;
3727 }
3728 }
3729
3730 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3731 /* Did it pass? */
3732 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3733 mp->m_pkthdr.csum_flags |=
3734 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3735 mp->m_pkthdr.csum_data = htons(0xffff);
3736 }
3737 }
3738}
3739
3740/*
3741 * This routine is run via an vlan
3742 * config EVENT
3743 */
3744static void
3745lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3746{
3747 struct adapter *adapter = ifp->if_softc;
3748 u32 index, bit;
3749
3750 if (ifp->if_softc != arg) /* Not our event */
3751 return;
3752
3753 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3754 return;
3755
3756 EM_CORE_LOCK(adapter);
3757 index = (vtag >> 5) & 0x7F;
3758 bit = vtag & 0x1F;
3759 adapter->shadow_vfta[index] |= (1 << bit);
3760 ++adapter->num_vlans;
3761 /* Re-init to load the changes */
3762 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3763 lem_init_locked(adapter);
3764 EM_CORE_UNLOCK(adapter);
3765}
3766
3767/*
3768 * This routine is run via an vlan
3769 * unconfig EVENT
3770 */
3771static void
3772lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3773{
3774 struct adapter *adapter = ifp->if_softc;
3775 u32 index, bit;
3776
3777 if (ifp->if_softc != arg)
3778 return;
3779
3780 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3781 return;
3782
3783 EM_CORE_LOCK(adapter);
3784 index = (vtag >> 5) & 0x7F;
3785 bit = vtag & 0x1F;
3786 adapter->shadow_vfta[index] &= ~(1 << bit);
3787 --adapter->num_vlans;
3788 /* Re-init to load the changes */
3789 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3790 lem_init_locked(adapter);
3791 EM_CORE_UNLOCK(adapter);
3792}
3793
3794static void
3795lem_setup_vlan_hw_support(struct adapter *adapter)
3796{
3797 struct e1000_hw *hw = &adapter->hw;
3798 u32 reg;
3799
3800 /*
3801 ** We get here thru init_locked, meaning
3802 ** a soft reset, this has already cleared
3803 ** the VFTA and other state, so if there
3804 ** have been no vlan's registered do nothing.
3805 */
3806 if (adapter->num_vlans == 0)
3807 return;
3808
3809 /*
3810 ** A soft reset zero's out the VFTA, so
3811 ** we need to repopulate it now.
3812 */
3813 for (int i = 0; i < EM_VFTA_SIZE; i++)
3814 if (adapter->shadow_vfta[i] != 0)
3815 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3816 i, adapter->shadow_vfta[i]);
3817
3818 reg = E1000_READ_REG(hw, E1000_CTRL);
3819 reg |= E1000_CTRL_VME;
3820 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3821
3822 /* Enable the Filter Table */
3823 reg = E1000_READ_REG(hw, E1000_RCTL);
3824 reg &= ~E1000_RCTL_CFIEN;
3825 reg |= E1000_RCTL_VFE;
3826 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3827
3828 /* Update the frame size */
3829 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3830 adapter->max_frame_size + VLAN_TAG_SIZE);
3831}
3832
3833static void
3834lem_enable_intr(struct adapter *adapter)
3835{
3836 struct e1000_hw *hw = &adapter->hw;
3837 u32 ims_mask = IMS_ENABLE_MASK;
3838
3839 if (adapter->msix) {
3840 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3841 ims_mask |= EM_MSIX_MASK;
3842 }
3843 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3844}
3845
3846static void
3847lem_disable_intr(struct adapter *adapter)
3848{
3849 struct e1000_hw *hw = &adapter->hw;
3850
3851 if (adapter->msix)
3852 E1000_WRITE_REG(hw, EM_EIAC, 0);
3853 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3854}
3855
3856/*
3857 * Bit of a misnomer, what this really means is
3858 * to enable OS management of the system... aka
3859 * to disable special hardware management features
3860 */
3861static void
3862lem_init_manageability(struct adapter *adapter)
3863{
3864 /* A shared code workaround */
3865 if (adapter->has_manage) {
3866 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3867 /* disable hardware interception of ARP */
3868 manc &= ~(E1000_MANC_ARP_EN);
3869 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3870 }
3871}
3872
3873/*
3874 * Give control back to hardware management
3875 * controller if there is one.
3876 */
3877static void
3878lem_release_manageability(struct adapter *adapter)
3879{
3880 if (adapter->has_manage) {
3881 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3882
3883 /* re-enable hardware interception of ARP */
3884 manc |= E1000_MANC_ARP_EN;
3885 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3886 }
3887}
3888
3889/*
3890 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3891 * For ASF and Pass Through versions of f/w this means
3892 * that the driver is loaded. For AMT version type f/w
3893 * this means that the network i/f is open.
3894 */
3895static void
3896lem_get_hw_control(struct adapter *adapter)
3897{
3898 u32 ctrl_ext;
3899
3900 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3901 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3902 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3903 return;
3904}
3905
3906/*
3907 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3908 * For ASF and Pass Through versions of f/w this means that
3909 * the driver is no longer loaded. For AMT versions of the
3910 * f/w this means that the network i/f is closed.
3911 */
3912static void
3913lem_release_hw_control(struct adapter *adapter)
3914{
3915 u32 ctrl_ext;
3916
3917 if (!adapter->has_manage)
3918 return;
3919
3920 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3921 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3922 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3923 return;
3924}
3925
3926static int
3927lem_is_valid_ether_addr(u8 *addr)
3928{
3929 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3930
3931 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3932 return (FALSE);
3933 }
3934
3935 return (TRUE);
3936}
3937
3938/*
3939** Parse the interface capabilities with regard
3940** to both system management and wake-on-lan for
3941** later use.
3942*/
3943static void
3944lem_get_wakeup(device_t dev)
3945{
3946 struct adapter *adapter = device_get_softc(dev);
3947 u16 eeprom_data = 0, device_id, apme_mask;
3948
3949 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3950 apme_mask = EM_EEPROM_APME;
3951
3952 switch (adapter->hw.mac.type) {
3953 case e1000_82542:
3954 case e1000_82543:
3955 break;
3956 case e1000_82544:
3957 e1000_read_nvm(&adapter->hw,
3958 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3959 apme_mask = EM_82544_APME;
3960 break;
3961 case e1000_82546:
3962 case e1000_82546_rev_3:
3963 if (adapter->hw.bus.func == 1) {
3964 e1000_read_nvm(&adapter->hw,
3965 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3966 break;
3967 } else
3968 e1000_read_nvm(&adapter->hw,
3969 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3970 break;
3971 default:
3972 e1000_read_nvm(&adapter->hw,
3973 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3974 break;
3975 }
3976 if (eeprom_data & apme_mask)
3977 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3978 /*
3979 * We have the eeprom settings, now apply the special cases
3980 * where the eeprom may be wrong or the board won't support
3981 * wake on lan on a particular port
3982 */
3983 device_id = pci_get_device(dev);
3984 switch (device_id) {
3985 case E1000_DEV_ID_82546GB_PCIE:
3986 adapter->wol = 0;
3987 break;
3988 case E1000_DEV_ID_82546EB_FIBER:
3989 case E1000_DEV_ID_82546GB_FIBER:
3990 /* Wake events only supported on port A for dual fiber
3991 * regardless of eeprom setting */
3992 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3993 E1000_STATUS_FUNC_1)
3994 adapter->wol = 0;
3995 break;
3996 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3997 /* if quad port adapter, disable WoL on all but port A */
3998 if (global_quad_port_a != 0)
3999 adapter->wol = 0;
4000 /* Reset for multiple quad port adapters */
4001 if (++global_quad_port_a == 4)
4002 global_quad_port_a = 0;
4003 break;
4004 }
4005 return;
4006}
4007
4008
4009/*
4010 * Enable PCI Wake On Lan capability
4011 */
4012static void
4013lem_enable_wakeup(device_t dev)
4014{
4015 struct adapter *adapter = device_get_softc(dev);
4016 struct ifnet *ifp = adapter->ifp;
4017 u32 pmc, ctrl, ctrl_ext, rctl;
4018 u16 status;
4019
4020 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
4021 return;
4022
4023 /* Advertise the wakeup capability */
4024 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4025 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4026 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4027 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4028
4029 /* Keep the laser running on Fiber adapters */
4030 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4031 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4032 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4033 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4034 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4035 }
4036
4037 /*
4038 ** Determine type of Wakeup: note that wol
4039 ** is set with all bits on by default.
4040 */
4041 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4042 adapter->wol &= ~E1000_WUFC_MAG;
4043
4044 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4045 adapter->wol &= ~E1000_WUFC_MC;
4046 else {
4047 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4048 rctl |= E1000_RCTL_MPE;
4049 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4050 }
4051
4052 if (adapter->hw.mac.type == e1000_pchlan) {
4053 if (lem_enable_phy_wakeup(adapter))
4054 return;
4055 } else {
4056 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4057 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4058 }
4059
4060
4061 /* Request PME */
4062 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4063 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4064 if (ifp->if_capenable & IFCAP_WOL)
4065 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4066 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4067
4068 return;
4069}
4070
4071/*
4072** WOL in the newer chipset interfaces (pchlan)
4073** require thing to be copied into the phy
4074*/
4075static int
4076lem_enable_phy_wakeup(struct adapter *adapter)
4077{
4078 struct e1000_hw *hw = &adapter->hw;
4079 u32 mreg, ret = 0;
4080 u16 preg;
4081
4082 /* copy MAC RARs to PHY RARs */
4083 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4084 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4085 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4086 e1000_write_phy_reg(hw, BM_RAR_M(i),
4087 (u16)((mreg >> 16) & 0xFFFF));
4088 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4089 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4090 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4091 (u16)((mreg >> 16) & 0xFFFF));
4092 }
4093
4094 /* copy MAC MTA to PHY MTA */
4095 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4096 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4097 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4098 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4099 (u16)((mreg >> 16) & 0xFFFF));
4100 }
4101
4102 /* configure PHY Rx Control register */
4103 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4104 mreg = E1000_READ_REG(hw, E1000_RCTL);
4105 if (mreg & E1000_RCTL_UPE)
4106 preg |= BM_RCTL_UPE;
4107 if (mreg & E1000_RCTL_MPE)
4108 preg |= BM_RCTL_MPE;
4109 preg &= ~(BM_RCTL_MO_MASK);
4110 if (mreg & E1000_RCTL_MO_3)
4111 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4112 << BM_RCTL_MO_SHIFT);
4113 if (mreg & E1000_RCTL_BAM)
4114 preg |= BM_RCTL_BAM;
4115 if (mreg & E1000_RCTL_PMCF)
4116 preg |= BM_RCTL_PMCF;
4117 mreg = E1000_READ_REG(hw, E1000_CTRL);
4118 if (mreg & E1000_CTRL_RFCE)
4119 preg |= BM_RCTL_RFCE;
4120 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4121
4122 /* enable PHY wakeup in MAC register */
4123 E1000_WRITE_REG(hw, E1000_WUC,
4124 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4125 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4126
4127 /* configure and enable PHY wakeup in PHY registers */
4128 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4129 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4130
4131 /* activate PHY wakeup */
4132 ret = hw->phy.ops.acquire(hw);
4133 if (ret) {
4134 printf("Could not acquire PHY\n");
4135 return ret;
4136 }
4137 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4138 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4139 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4140 if (ret) {
4141 printf("Could not read PHY page 769\n");
4142 goto out;
4143 }
4144 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4145 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4146 if (ret)
4147 printf("Could not set PHY Host Wakeup bit\n");
4148out:
4149 hw->phy.ops.release(hw);
4150
4151 return ret;
4152}
4153
4154static void
4155lem_led_func(void *arg, int onoff)
4156{
4157 struct adapter *adapter = arg;
4158
4159 EM_CORE_LOCK(adapter);
4160 if (onoff) {
4161 e1000_setup_led(&adapter->hw);
4162 e1000_led_on(&adapter->hw);
4163 } else {
4164 e1000_led_off(&adapter->hw);
4165 e1000_cleanup_led(&adapter->hw);
4166 }
4167 EM_CORE_UNLOCK(adapter);
4168}
4169
4170/*********************************************************************
4171* 82544 Coexistence issue workaround.
4172* There are 2 issues.
4173* 1. Transmit Hang issue.
4174* To detect this issue, following equation can be used...
4175* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4176* If SUM[3:0] is in between 1 to 4, we will have this issue.
4177*
4178* 2. DAC issue.
4179* To detect this issue, following equation can be used...
4180* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4181* If SUM[3:0] is in between 9 to c, we will have this issue.
4182*
4183*
4184* WORKAROUND:
4185* Make sure we do not have ending address
4186* as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4187*
4188*************************************************************************/
4189static u32
4190lem_fill_descriptors (bus_addr_t address, u32 length,
4191 PDESC_ARRAY desc_array)
4192{
4193 u32 safe_terminator;
4194
4195 /* Since issue is sensitive to length and address.*/
4196 /* Let us first check the address...*/
4197 if (length <= 4) {
4198 desc_array->descriptor[0].address = address;
4199 desc_array->descriptor[0].length = length;
4200 desc_array->elements = 1;
4201 return (desc_array->elements);
4202 }
4203 safe_terminator = (u32)((((u32)address & 0x7) +
4204 (length & 0xF)) & 0xF);
4205 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4206 if (safe_terminator == 0 ||
4207 (safe_terminator > 4 &&
4208 safe_terminator < 9) ||
4209 (safe_terminator > 0xC &&
4210 safe_terminator <= 0xF)) {
4211 desc_array->descriptor[0].address = address;
4212 desc_array->descriptor[0].length = length;
4213 desc_array->elements = 1;
4214 return (desc_array->elements);
4215 }
4216
4217 desc_array->descriptor[0].address = address;
4218 desc_array->descriptor[0].length = length - 4;
4219 desc_array->descriptor[1].address = address + (length - 4);
4220 desc_array->descriptor[1].length = 4;
4221 desc_array->elements = 2;
4222 return (desc_array->elements);
4223}
4224
4225/**********************************************************************
4226 *
4227 * Update the board statistics counters.
4228 *
4229 **********************************************************************/
4230static void
4231lem_update_stats_counters(struct adapter *adapter)
4232{
4233 struct ifnet *ifp;
4234
4235 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4236 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4237 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4238 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4239 }
4240 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4241 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4242 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4243 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4244
4245 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4246 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4247 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4248 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4249 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4250 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4251 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4252 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4253 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4254 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4255 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4256 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4257 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4258 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4259 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4260 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4261 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4262 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4263 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4264 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4265
4266 /* For the 64-bit byte counters the low dword must be read first. */
4267 /* Both registers clear on the read of the high dword */
4268
4269 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4270 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4271 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4272 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4273
4274 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4275 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4276 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4277 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4278 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4279
4280 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4281 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4282
4283 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4284 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4285 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4286 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4287 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4288 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4289 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4290 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4291 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4292 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4293
4294 if (adapter->hw.mac.type >= e1000_82543) {
4295 adapter->stats.algnerrc +=
4296 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4297 adapter->stats.rxerrc +=
4298 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4299 adapter->stats.tncrs +=
4300 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4301 adapter->stats.cexterr +=
4302 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4303 adapter->stats.tsctc +=
4304 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4305 adapter->stats.tsctfc +=
4306 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4307 }
4308 ifp = adapter->ifp;
4309
4310 ifp->if_collisions = adapter->stats.colc;
4311
4312 /* Rx Errors */
4313 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4314 adapter->stats.crcerrs + adapter->stats.algnerrc +
4315 adapter->stats.ruc + adapter->stats.roc +
4316 adapter->stats.mpc + adapter->stats.cexterr;
4317
4318 /* Tx Errors */
4319 ifp->if_oerrors = adapter->stats.ecol +
4320 adapter->stats.latecol + adapter->watchdog_events;
4321}
4322
4323/* Export a single 32-bit register via a read-only sysctl. */
4324static int
4325lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4326{
4327 struct adapter *adapter;
4328 u_int val;
4329
4330 adapter = oidp->oid_arg1;
4331 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4332 return (sysctl_handle_int(oidp, &val, 0, req));
4333}
4334
4335/*
4336 * Add sysctl variables, one per statistic, to the system.
4337 */
4338static void
4339lem_add_hw_stats(struct adapter *adapter)
4340{
4341 device_t dev = adapter->dev;
4342
4343 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4344 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4345 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4346 struct e1000_hw_stats *stats = &adapter->stats;
4347
4348 struct sysctl_oid *stat_node;
4349 struct sysctl_oid_list *stat_list;
4350
4351 /* Driver Statistics */
4352 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4353 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4354 "Std mbuf failed");
4355 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4356 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4357 "Std mbuf cluster failed");
4358 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4359 CTLFLAG_RD, &adapter->dropped_pkts,
4360 "Driver dropped packets");
4361 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4362 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4363 "Driver tx dma failure in xmit");
4364 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4365 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4366 "Not enough tx descriptors failure in xmit");
4367 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4368 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4369 "Not enough tx descriptors failure in xmit");
4370 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4371 CTLFLAG_RD, &adapter->rx_overruns,
4372 "RX overruns");
4373 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4374 CTLFLAG_RD, &adapter->watchdog_events,
4375 "Watchdog timeouts");
4376
4377 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4378 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4379 lem_sysctl_reg_handler, "IU",
4380 "Device Control Register");
4381 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4382 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4383 lem_sysctl_reg_handler, "IU",
4384 "Receiver Control Register");
4385 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4386 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4387 "Flow Control High Watermark");
4388 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4389 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4390 "Flow Control Low Watermark");
4391 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4392 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4393 "TX FIFO workaround events");
4394 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4395 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4396 "TX FIFO resets");
4397
4398 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4399 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4400 lem_sysctl_reg_handler, "IU",
4401 "Transmit Descriptor Head");
4402 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4403 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4404 lem_sysctl_reg_handler, "IU",
4405 "Transmit Descriptor Tail");
4406 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4407 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4408 lem_sysctl_reg_handler, "IU",
4409 "Receive Descriptor Head");
4410 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4411 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4412 lem_sysctl_reg_handler, "IU",
4413 "Receive Descriptor Tail");
4414
4415
4416 /* MAC stats get their own sub node */
4417
4418 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4419 CTLFLAG_RD, NULL, "Statistics");
4420 stat_list = SYSCTL_CHILDREN(stat_node);
4421
4422 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4423 CTLFLAG_RD, &stats->ecol,
4424 "Excessive collisions");
4425 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4426 CTLFLAG_RD, &stats->scc,
4427 "Single collisions");
4428 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4429 CTLFLAG_RD, &stats->mcc,
4430 "Multiple collisions");
4431 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4432 CTLFLAG_RD, &stats->latecol,
4433 "Late collisions");
4434 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4435 CTLFLAG_RD, &stats->colc,
4436 "Collision Count");
4437 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4438 CTLFLAG_RD, &adapter->stats.symerrs,
4439 "Symbol Errors");
4440 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4441 CTLFLAG_RD, &adapter->stats.sec,
4442 "Sequence Errors");
4443 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4444 CTLFLAG_RD, &adapter->stats.dc,
4445 "Defer Count");
4446 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4447 CTLFLAG_RD, &adapter->stats.mpc,
4448 "Missed Packets");
4449 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4450 CTLFLAG_RD, &adapter->stats.rnbc,
4451 "Receive No Buffers");
4452 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4453 CTLFLAG_RD, &adapter->stats.ruc,
4454 "Receive Undersize");
4455 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4456 CTLFLAG_RD, &adapter->stats.rfc,
4457 "Fragmented Packets Received ");
4458 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4459 CTLFLAG_RD, &adapter->stats.roc,
4460 "Oversized Packets Received");
4461 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4462 CTLFLAG_RD, &adapter->stats.rjc,
4463 "Recevied Jabber");
4464 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4465 CTLFLAG_RD, &adapter->stats.rxerrc,
4466 "Receive Errors");
4467 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4468 CTLFLAG_RD, &adapter->stats.crcerrs,
4469 "CRC errors");
4470 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4471 CTLFLAG_RD, &adapter->stats.algnerrc,
4472 "Alignment Errors");
4473 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4474 CTLFLAG_RD, &adapter->stats.cexterr,
4475 "Collision/Carrier extension errors");
4476 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4477 CTLFLAG_RD, &adapter->stats.xonrxc,
4478 "XON Received");
4479 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4480 CTLFLAG_RD, &adapter->stats.xontxc,
4481 "XON Transmitted");
4482 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4483 CTLFLAG_RD, &adapter->stats.xoffrxc,
4484 "XOFF Received");
4485 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4486 CTLFLAG_RD, &adapter->stats.xofftxc,
4487 "XOFF Transmitted");
4488
4489 /* Packet Reception Stats */
4490 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4491 CTLFLAG_RD, &adapter->stats.tpr,
4492 "Total Packets Received ");
4493 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4494 CTLFLAG_RD, &adapter->stats.gprc,
4495 "Good Packets Received");
4496 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4497 CTLFLAG_RD, &adapter->stats.bprc,
4498 "Broadcast Packets Received");
4499 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4500 CTLFLAG_RD, &adapter->stats.mprc,
4501 "Multicast Packets Received");
4502 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4503 CTLFLAG_RD, &adapter->stats.prc64,
4504 "64 byte frames received ");
4505 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4506 CTLFLAG_RD, &adapter->stats.prc127,
4507 "65-127 byte frames received");
4508 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4509 CTLFLAG_RD, &adapter->stats.prc255,
4510 "128-255 byte frames received");
4511 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4512 CTLFLAG_RD, &adapter->stats.prc511,
4513 "256-511 byte frames received");
4514 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4515 CTLFLAG_RD, &adapter->stats.prc1023,
4516 "512-1023 byte frames received");
4517 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4518 CTLFLAG_RD, &adapter->stats.prc1522,
4519 "1023-1522 byte frames received");
4520 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4521 CTLFLAG_RD, &adapter->stats.gorc,
4522 "Good Octets Received");
4523
4524 /* Packet Transmission Stats */
4525 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4526 CTLFLAG_RD, &adapter->stats.gotc,
4527 "Good Octets Transmitted");
4528 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4529 CTLFLAG_RD, &adapter->stats.tpt,
4530 "Total Packets Transmitted");
4531 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4532 CTLFLAG_RD, &adapter->stats.gptc,
4533 "Good Packets Transmitted");
4534 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4535 CTLFLAG_RD, &adapter->stats.bptc,
4536 "Broadcast Packets Transmitted");
4537 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4538 CTLFLAG_RD, &adapter->stats.mptc,
4539 "Multicast Packets Transmitted");
4540 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4541 CTLFLAG_RD, &adapter->stats.ptc64,
4542 "64 byte frames transmitted ");
4543 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4544 CTLFLAG_RD, &adapter->stats.ptc127,
4545 "65-127 byte frames transmitted");
4546 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4547 CTLFLAG_RD, &adapter->stats.ptc255,
4548 "128-255 byte frames transmitted");
4549 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4550 CTLFLAG_RD, &adapter->stats.ptc511,
4551 "256-511 byte frames transmitted");
4552 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4553 CTLFLAG_RD, &adapter->stats.ptc1023,
4554 "512-1023 byte frames transmitted");
4555 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4556 CTLFLAG_RD, &adapter->stats.ptc1522,
4557 "1024-1522 byte frames transmitted");
4558 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4559 CTLFLAG_RD, &adapter->stats.tsctc,
4560 "TSO Contexts Transmitted");
4561 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4562 CTLFLAG_RD, &adapter->stats.tsctfc,
4563 "TSO Contexts Failed");
4564}
4565
4566/**********************************************************************
4567 *
4568 * This routine provides a way to dump out the adapter eeprom,
4569 * often a useful debug/service tool. This only dumps the first
4570 * 32 words, stuff that matters is in that extent.
4571 *
4572 **********************************************************************/
4573
4574static int
4575lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4576{
4577 struct adapter *adapter;
4578 int error;
4579 int result;
4580
4581 result = -1;
4582 error = sysctl_handle_int(oidp, &result, 0, req);
4583
4584 if (error || !req->newptr)
4585 return (error);
4586
4587 /*
4588 * This value will cause a hex dump of the
4589 * first 32 16-bit words of the EEPROM to
4590 * the screen.
4591 */
4592 if (result == 1) {
4593 adapter = (struct adapter *)arg1;
4594 lem_print_nvm_info(adapter);
4595 }
4596
4597 return (error);
4598}
4599
4600static void
4601lem_print_nvm_info(struct adapter *adapter)
4602{
4603 u16 eeprom_data;
4604 int i, j, row = 0;
4605
4606 /* Its a bit crude, but it gets the job done */
4607 printf("\nInterface EEPROM Dump:\n");
4608 printf("Offset\n0x0000 ");
4609 for (i = 0, j = 0; i < 32; i++, j++) {
4610 if (j == 8) { /* Make the offset block */
4611 j = 0; ++row;
4612 printf("\n0x00%x0 ",row);
4613 }
4614 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4615 printf("%04x ", eeprom_data);
4616 }
4617 printf("\n");
4618}
4619
4620static int
4621lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4622{
4623 struct em_int_delay_info *info;
4624 struct adapter *adapter;
4625 u32 regval;
4626 int error;
4627 int usecs;
4628 int ticks;
4629
4630 info = (struct em_int_delay_info *)arg1;
4631 usecs = info->value;
4632 error = sysctl_handle_int(oidp, &usecs, 0, req);
4633 if (error != 0 || req->newptr == NULL)
4634 return (error);
4635 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4636 return (EINVAL);
4637 info->value = usecs;
4638 ticks = EM_USECS_TO_TICKS(usecs);
4639
4640 adapter = info->adapter;
4641
4642 EM_CORE_LOCK(adapter);
4643 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4644 regval = (regval & ~0xffff) | (ticks & 0xffff);
4645 /* Handle a few special cases. */
4646 switch (info->offset) {
4647 case E1000_RDTR:
4648 break;
4649 case E1000_TIDV:
4650 if (ticks == 0) {
4651 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4652 /* Don't write 0 into the TIDV register. */
4653 regval++;
4654 } else
4655 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4656 break;
4657 }
4658 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4659 EM_CORE_UNLOCK(adapter);
4660 return (0);
4661}
4662
4663static void
4664lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4665 const char *description, struct em_int_delay_info *info,
4666 int offset, int value)
4667{
4668 info->adapter = adapter;
4669 info->offset = offset;
4670 info->value = value;
4671 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4672 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4673 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4674 info, 0, lem_sysctl_int_delay, "I", description);
4675}
4676
4677static void
4678lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4679 const char *description, int *limit, int value)
4680{
4681 *limit = value;
4682 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4683 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4684 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4685}
4686
4687#ifndef EM_LEGACY_IRQ
4688static void
4689lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4690 const char *description, int *limit, int value)
4691{
4692 *limit = value;
4693 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4694 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4695 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4696}
4697#endif