1/******************************************************************************
2
3  Copyright (c) 2001-2011, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/endian.h>
45#include <sys/kernel.h>
46#include <sys/kthread.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/module.h>
50#include <sys/rman.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55#include <sys/eventhandler.h>
56#include <machine/bus.h>
57#include <machine/resource.h>
58
59#include <net/bpf.h>
60#include <net/ethernet.h>
61#include <net/if.h>
62#include <net/if_arp.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <netinet/in_systm.h>
70#include <netinet/in.h>
71#include <netinet/if_ether.h>
72#include <netinet/ip.h>
73#include <netinet/ip6.h>
74#include <netinet/tcp.h>
75#include <netinet/udp.h>
76
77#include <machine/in_cksum.h>
78#ifndef __HAIKU__
79#include <dev/led/led.h>
80#endif
81#include <dev/pci/pcivar.h>
82#include <dev/pci/pcireg.h>
83
84#include "e1000_api.h"
85#include "if_lem.h"
86
87/*********************************************************************
88 *  Legacy Em Driver version:
89 *********************************************************************/
90char lem_driver_version[] = "1.0.4";
91
92/*********************************************************************
93 *  PCI Device ID Table
94 *
95 *  Used by probe to select devices to load on
96 *  Last field stores an index into e1000_strings
97 *  Last entry must be all 0s
98 *
99 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
100 *********************************************************************/
101
102static em_vendor_info_t lem_vendor_info_array[] =
103{
104	/* Intel(R) PRO/1000 Network Connection */
105	{ 0x8086, E1000_DEV_ID_82540EM,		PCI_ANY_ID, PCI_ANY_ID, 0},
106	{ 0x8086, E1000_DEV_ID_82540EM_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
107	{ 0x8086, E1000_DEV_ID_82540EP,		PCI_ANY_ID, PCI_ANY_ID, 0},
108	{ 0x8086, E1000_DEV_ID_82540EP_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
109	{ 0x8086, E1000_DEV_ID_82540EP_LP,	PCI_ANY_ID, PCI_ANY_ID, 0},
110
111	{ 0x8086, E1000_DEV_ID_82541EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
112	{ 0x8086, E1000_DEV_ID_82541ER,		PCI_ANY_ID, PCI_ANY_ID, 0},
113	{ 0x8086, E1000_DEV_ID_82541ER_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
114	{ 0x8086, E1000_DEV_ID_82541EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
115	{ 0x8086, E1000_DEV_ID_82541GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
116	{ 0x8086, E1000_DEV_ID_82541GI_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
117	{ 0x8086, E1000_DEV_ID_82541GI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
118
119	{ 0x8086, E1000_DEV_ID_82542,		PCI_ANY_ID, PCI_ANY_ID, 0},
120
121	{ 0x8086, E1000_DEV_ID_82543GC_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
122	{ 0x8086, E1000_DEV_ID_82543GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
123
124	{ 0x8086, E1000_DEV_ID_82544EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
125	{ 0x8086, E1000_DEV_ID_82544EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
126	{ 0x8086, E1000_DEV_ID_82544GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
127	{ 0x8086, E1000_DEV_ID_82544GC_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
128
129	{ 0x8086, E1000_DEV_ID_82545EM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
130	{ 0x8086, E1000_DEV_ID_82545EM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
131	{ 0x8086, E1000_DEV_ID_82545GM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
132	{ 0x8086, E1000_DEV_ID_82545GM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
133	{ 0x8086, E1000_DEV_ID_82545GM_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
134
135	{ 0x8086, E1000_DEV_ID_82546EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
136	{ 0x8086, E1000_DEV_ID_82546EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
137	{ 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
138	{ 0x8086, E1000_DEV_ID_82546GB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
139	{ 0x8086, E1000_DEV_ID_82546GB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
140	{ 0x8086, E1000_DEV_ID_82546GB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
141	{ 0x8086, E1000_DEV_ID_82546GB_PCIE,	PCI_ANY_ID, PCI_ANY_ID, 0},
142	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
143	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
144						PCI_ANY_ID, PCI_ANY_ID, 0},
145
146	{ 0x8086, E1000_DEV_ID_82547EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
147	{ 0x8086, E1000_DEV_ID_82547EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
148	{ 0x8086, E1000_DEV_ID_82547GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
149	/* required last entry */
150	{ 0, 0, 0, 0, 0}
151};
152
153/*********************************************************************
154 *  Table of branding strings for all supported NICs.
155 *********************************************************************/
156
157static char *lem_strings[] = {
158	"Intel(R) PRO/1000 Legacy Network Connection"
159};
160
161/*********************************************************************
162 *  Function prototypes
163 *********************************************************************/
164static int	lem_probe(device_t);
165static int	lem_attach(device_t);
166static int	lem_detach(device_t);
167static int	lem_shutdown(device_t);
168static int	lem_suspend(device_t);
169static int	lem_resume(device_t);
170static void	lem_start(struct ifnet *);
171static void	lem_start_locked(struct ifnet *ifp);
172static int	lem_ioctl(struct ifnet *, u_long, caddr_t);
173static void	lem_init(void *);
174static void	lem_init_locked(struct adapter *);
175static void	lem_stop(void *);
176static void	lem_media_status(struct ifnet *, struct ifmediareq *);
177static int	lem_media_change(struct ifnet *);
178static void	lem_identify_hardware(struct adapter *);
179static int	lem_allocate_pci_resources(struct adapter *);
180static int	lem_allocate_irq(struct adapter *adapter);
181static void	lem_free_pci_resources(struct adapter *);
182static void	lem_local_timer(void *);
183static int	lem_hardware_init(struct adapter *);
184static int	lem_setup_interface(device_t, struct adapter *);
185static void	lem_setup_transmit_structures(struct adapter *);
186static void	lem_initialize_transmit_unit(struct adapter *);
187static int	lem_setup_receive_structures(struct adapter *);
188static void	lem_initialize_receive_unit(struct adapter *);
189static void	lem_enable_intr(struct adapter *);
190static void	lem_disable_intr(struct adapter *);
191static void	lem_free_transmit_structures(struct adapter *);
192static void	lem_free_receive_structures(struct adapter *);
193static void	lem_update_stats_counters(struct adapter *);
194static void	lem_add_hw_stats(struct adapter *adapter);
195static void	lem_txeof(struct adapter *);
196static void	lem_tx_purge(struct adapter *);
197static int	lem_allocate_receive_structures(struct adapter *);
198static int	lem_allocate_transmit_structures(struct adapter *);
199static bool	lem_rxeof(struct adapter *, int, int *);
200#ifndef __NO_STRICT_ALIGNMENT
201static int	lem_fixup_rx(struct adapter *);
202#endif
203static void	lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
204		    struct mbuf *);
205static void	lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
206		    u32 *, u32 *);
207static void	lem_set_promisc(struct adapter *);
208static void	lem_disable_promisc(struct adapter *);
209static void	lem_set_multi(struct adapter *);
210static void	lem_update_link_status(struct adapter *);
211static int	lem_get_buf(struct adapter *, int);
212static void	lem_register_vlan(void *, struct ifnet *, u16);
213static void	lem_unregister_vlan(void *, struct ifnet *, u16);
214static void	lem_setup_vlan_hw_support(struct adapter *);
215static int	lem_xmit(struct adapter *, struct mbuf **);
216static void	lem_smartspeed(struct adapter *);
217static int	lem_82547_fifo_workaround(struct adapter *, int);
218static void	lem_82547_update_fifo_head(struct adapter *, int);
219static int	lem_82547_tx_fifo_reset(struct adapter *);
220static void	lem_82547_move_tail(void *);
221static int	lem_dma_malloc(struct adapter *, bus_size_t,
222		    struct em_dma_alloc *, int);
223static void	lem_dma_free(struct adapter *, struct em_dma_alloc *);
224static int	lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
225static void	lem_print_nvm_info(struct adapter *);
226static int 	lem_is_valid_ether_addr(u8 *);
227static u32	lem_fill_descriptors (bus_addr_t address, u32 length,
228		    PDESC_ARRAY desc_array);
229static int	lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
230static void	lem_add_int_delay_sysctl(struct adapter *, const char *,
231		    const char *, struct em_int_delay_info *, int, int);
232static void	lem_set_flow_cntrl(struct adapter *, const char *,
233		    const char *, int *, int);
234/* Management and WOL Support */
235static void	lem_init_manageability(struct adapter *);
236static void	lem_release_manageability(struct adapter *);
237static void     lem_get_hw_control(struct adapter *);
238static void     lem_release_hw_control(struct adapter *);
239static void	lem_get_wakeup(device_t);
240static void     lem_enable_wakeup(device_t);
241static int	lem_enable_phy_wakeup(struct adapter *);
242static void	lem_led_func(void *, int);
243
244#ifdef EM_LEGACY_IRQ
245static void	lem_intr(void *);
246#else /* FAST IRQ */
247static int	lem_irq_fast(void *);
248static void	lem_handle_rxtx(void *context, int pending);
249static void	lem_handle_link(void *context, int pending);
250static void	lem_add_rx_process_limit(struct adapter *, const char *,
251		    const char *, int *, int);
252#endif /* ~EM_LEGACY_IRQ */
253
254#ifdef DEVICE_POLLING
255static poll_handler_t lem_poll;
256#endif /* POLLING */
257
258/*********************************************************************
259 *  FreeBSD Device Interface Entry Points
260 *********************************************************************/
261
262static device_method_t lem_methods[] = {
263	/* Device interface */
264	DEVMETHOD(device_probe, lem_probe),
265	DEVMETHOD(device_attach, lem_attach),
266	DEVMETHOD(device_detach, lem_detach),
267	DEVMETHOD(device_shutdown, lem_shutdown),
268	DEVMETHOD(device_suspend, lem_suspend),
269	DEVMETHOD(device_resume, lem_resume),
270	{0, 0}
271};
272
273#ifndef __HAIKU__
274static driver_t lem_driver = {
275	"em", lem_methods, sizeof(struct adapter),
276};
277
278extern devclass_t em_devclass;
279DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
280#else
281static driver_t lem_driver = {
282	"lem", lem_methods, sizeof(struct adapter),
283};
284
285devclass_t lem_devclass;
286DRIVER_MODULE(lem, pci, lem_driver, lem_devclass, 0, 0);
287#endif
288MODULE_DEPEND(lem, pci, 1, 1, 1);
289MODULE_DEPEND(lem, ether, 1, 1, 1);
290
291/*********************************************************************
292 *  Tunable default values.
293 *********************************************************************/
294
295#define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
296#define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
297
298static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
299static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
300static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
301static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
302static int lem_rxd = EM_DEFAULT_RXD;
303static int lem_txd = EM_DEFAULT_TXD;
304static int lem_smart_pwr_down = FALSE;
305
306/* Controls whether promiscuous also shows bad packets */
307static int lem_debug_sbp = FALSE;
308
309TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
310TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
311TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
312TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
313TUNABLE_INT("hw.em.rxd", &lem_rxd);
314TUNABLE_INT("hw.em.txd", &lem_txd);
315TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
316TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
317
318#ifndef EM_LEGACY_IRQ
319/* How many packets rxeof tries to clean at a time */
320static int lem_rx_process_limit = 100;
321TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
322#endif
323
324/* Flow control setting - default to FULL */
325static int lem_fc_setting = e1000_fc_full;
326TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
327
328/* Global used in WOL setup with multiport cards */
329static int global_quad_port_a = 0;
330
331#ifdef DEV_NETMAP	/* see ixgbe.c for details */
332#include <dev/netmap/if_lem_netmap.h>
333#endif /* DEV_NETMAP */
334
335/*********************************************************************
336 *  Device identification routine
337 *
338 *  em_probe determines if the driver should be loaded on
339 *  adapter based on PCI vendor/device id of the adapter.
340 *
341 *  return BUS_PROBE_DEFAULT on success, positive on failure
342 *********************************************************************/
343
344static int
345lem_probe(device_t dev)
346{
347	char		adapter_name[60];
348	u16		pci_vendor_id = 0;
349	u16		pci_device_id = 0;
350	u16		pci_subvendor_id = 0;
351	u16		pci_subdevice_id = 0;
352	em_vendor_info_t *ent;
353
354	INIT_DEBUGOUT("em_probe: begin");
355
356	pci_vendor_id = pci_get_vendor(dev);
357	if (pci_vendor_id != EM_VENDOR_ID)
358		return (ENXIO);
359
360	pci_device_id = pci_get_device(dev);
361	pci_subvendor_id = pci_get_subvendor(dev);
362	pci_subdevice_id = pci_get_subdevice(dev);
363
364	ent = lem_vendor_info_array;
365	while (ent->vendor_id != 0) {
366		if ((pci_vendor_id == ent->vendor_id) &&
367		    (pci_device_id == ent->device_id) &&
368
369		    ((pci_subvendor_id == ent->subvendor_id) ||
370		    (ent->subvendor_id == PCI_ANY_ID)) &&
371
372		    ((pci_subdevice_id == ent->subdevice_id) ||
373		    (ent->subdevice_id == PCI_ANY_ID))) {
374			sprintf(adapter_name, "%s %s",
375				lem_strings[ent->index],
376				lem_driver_version);
377			device_set_desc_copy(dev, adapter_name);
378			return (BUS_PROBE_DEFAULT);
379		}
380		ent++;
381	}
382
383	return (ENXIO);
384}
385
386/*********************************************************************
387 *  Device initialization routine
388 *
389 *  The attach entry point is called when the driver is being loaded.
390 *  This routine identifies the type of hardware, allocates all resources
391 *  and initializes the hardware.
392 *
393 *  return 0 on success, positive on failure
394 *********************************************************************/
395
396static int
397lem_attach(device_t dev)
398{
399	struct adapter	*adapter;
400	int		tsize, rsize;
401	int		error = 0;
402
403	INIT_DEBUGOUT("lem_attach: begin");
404
405#ifndef __HAIKU__
406	if (resource_disabled("lem", device_get_unit(dev))) {
407		device_printf(dev, "Disabled by device hint\n");
408		return (ENXIO);
409	}
410#endif
411
412	adapter = device_get_softc(dev);
413	adapter->dev = adapter->osdep.dev = dev;
414	EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
415	EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
416	EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
417
418	/* SYSCTL stuff */
419	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421	    OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
422	    lem_sysctl_nvm_info, "I", "NVM Information");
423
424	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
425	callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
426
427	/* Determine hardware and mac info */
428	lem_identify_hardware(adapter);
429
430	/* Setup PCI resources */
431	if (lem_allocate_pci_resources(adapter)) {
432		device_printf(dev, "Allocation of PCI resources failed\n");
433		error = ENXIO;
434		goto err_pci;
435	}
436
437	/* Do Shared Code initialization */
438	if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
439		device_printf(dev, "Setup of Shared code failed\n");
440		error = ENXIO;
441		goto err_pci;
442	}
443
444	e1000_get_bus_info(&adapter->hw);
445
446	/* Set up some sysctls for the tunable interrupt delays */
447	lem_add_int_delay_sysctl(adapter, "rx_int_delay",
448	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
449	    E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
450	lem_add_int_delay_sysctl(adapter, "tx_int_delay",
451	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
452	    E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
453	if (adapter->hw.mac.type >= e1000_82540) {
454		lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
455		    "receive interrupt delay limit in usecs",
456		    &adapter->rx_abs_int_delay,
457		    E1000_REGISTER(&adapter->hw, E1000_RADV),
458		    lem_rx_abs_int_delay_dflt);
459		lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
460		    "transmit interrupt delay limit in usecs",
461		    &adapter->tx_abs_int_delay,
462		    E1000_REGISTER(&adapter->hw, E1000_TADV),
463		    lem_tx_abs_int_delay_dflt);
464	}
465
466#ifndef EM_LEGACY_IRQ
467	/* Sysctls for limiting the amount of work done in the taskqueue */
468	lem_add_rx_process_limit(adapter, "rx_processing_limit",
469	    "max number of rx packets to process", &adapter->rx_process_limit,
470	    lem_rx_process_limit);
471#endif
472
473        /* Sysctl for setting the interface flow control */
474	lem_set_flow_cntrl(adapter, "flow_control",
475	    "flow control setting",
476	    &adapter->fc_setting, lem_fc_setting);
477
478	/*
479	 * Validate number of transmit and receive descriptors. It
480	 * must not exceed hardware maximum, and must be multiple
481	 * of E1000_DBA_ALIGN.
482	 */
483	if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
484	    (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
485	    (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
486	    (lem_txd < EM_MIN_TXD)) {
487		device_printf(dev, "Using %d TX descriptors instead of %d!\n",
488		    EM_DEFAULT_TXD, lem_txd);
489		adapter->num_tx_desc = EM_DEFAULT_TXD;
490	} else
491		adapter->num_tx_desc = lem_txd;
492	if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
493	    (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
494	    (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
495	    (lem_rxd < EM_MIN_RXD)) {
496		device_printf(dev, "Using %d RX descriptors instead of %d!\n",
497		    EM_DEFAULT_RXD, lem_rxd);
498		adapter->num_rx_desc = EM_DEFAULT_RXD;
499	} else
500		adapter->num_rx_desc = lem_rxd;
501
502	adapter->hw.mac.autoneg = DO_AUTO_NEG;
503	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
504	adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
505	adapter->rx_buffer_len = 2048;
506
507	e1000_init_script_state_82541(&adapter->hw, TRUE);
508	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
509
510	/* Copper options */
511	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
512		adapter->hw.phy.mdix = AUTO_ALL_MODES;
513		adapter->hw.phy.disable_polarity_correction = FALSE;
514		adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
515	}
516
517	/*
518	 * Set the frame limits assuming
519	 * standard ethernet sized frames.
520	 */
521	adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
522	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
523
524	/*
525	 * This controls when hardware reports transmit completion
526	 * status.
527	 */
528	adapter->hw.mac.report_tx_early = 1;
529
530	tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
531	    EM_DBA_ALIGN);
532
533	/* Allocate Transmit Descriptor ring */
534	if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
535		device_printf(dev, "Unable to allocate tx_desc memory\n");
536		error = ENOMEM;
537		goto err_tx_desc;
538	}
539	adapter->tx_desc_base =
540	    (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
541
542	rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
543	    EM_DBA_ALIGN);
544
545	/* Allocate Receive Descriptor ring */
546	if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
547		device_printf(dev, "Unable to allocate rx_desc memory\n");
548		error = ENOMEM;
549		goto err_rx_desc;
550	}
551	adapter->rx_desc_base =
552	    (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
553
554	/* Allocate multicast array memory. */
555	adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
556	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
557	if (adapter->mta == NULL) {
558		device_printf(dev, "Can not allocate multicast setup array\n");
559		error = ENOMEM;
560		goto err_hw_init;
561	}
562
563	/*
564	** Start from a known state, this is
565	** important in reading the nvm and
566	** mac from that.
567	*/
568	e1000_reset_hw(&adapter->hw);
569
570	/* Make sure we have a good EEPROM before we read from it */
571	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
572		/*
573		** Some PCI-E parts fail the first check due to
574		** the link being in sleep state, call it again,
575		** if it fails a second time its a real issue.
576		*/
577		if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
578			device_printf(dev,
579			    "The EEPROM Checksum Is Not Valid\n");
580			error = EIO;
581			goto err_hw_init;
582		}
583	}
584
585	/* Copy the permanent MAC address out of the EEPROM */
586	if (e1000_read_mac_addr(&adapter->hw) < 0) {
587		device_printf(dev, "EEPROM read error while reading MAC"
588		    " address\n");
589		error = EIO;
590		goto err_hw_init;
591	}
592
593	if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
594		device_printf(dev, "Invalid MAC address\n");
595		error = EIO;
596		goto err_hw_init;
597	}
598
599	/* Initialize the hardware */
600	if (lem_hardware_init(adapter)) {
601		device_printf(dev, "Unable to initialize the hardware\n");
602		error = EIO;
603		goto err_hw_init;
604	}
605
606	/* Allocate transmit descriptors and buffers */
607	if (lem_allocate_transmit_structures(adapter)) {
608		device_printf(dev, "Could not setup transmit structures\n");
609		error = ENOMEM;
610		goto err_tx_struct;
611	}
612
613	/* Allocate receive descriptors and buffers */
614	if (lem_allocate_receive_structures(adapter)) {
615		device_printf(dev, "Could not setup receive structures\n");
616		error = ENOMEM;
617		goto err_rx_struct;
618	}
619
620	/*
621	**  Do interrupt configuration
622	*/
623	error = lem_allocate_irq(adapter);
624	if (error)
625		goto err_rx_struct;
626
627	/*
628	 * Get Wake-on-Lan and Management info for later use
629	 */
630	lem_get_wakeup(dev);
631
632	/* Setup OS specific network interface */
633	if (lem_setup_interface(dev, adapter) != 0)
634		goto err_rx_struct;
635
636	/* Initialize statistics */
637	lem_update_stats_counters(adapter);
638
639	adapter->hw.mac.get_link_status = 1;
640	lem_update_link_status(adapter);
641
642	/* Indicate SOL/IDER usage */
643	if (e1000_check_reset_block(&adapter->hw))
644		device_printf(dev,
645		    "PHY reset is blocked due to SOL/IDER session.\n");
646
647	/* Do we need workaround for 82544 PCI-X adapter? */
648	if (adapter->hw.bus.type == e1000_bus_type_pcix &&
649	    adapter->hw.mac.type == e1000_82544)
650		adapter->pcix_82544 = TRUE;
651	else
652		adapter->pcix_82544 = FALSE;
653
654	/* Register for VLAN events */
655	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
656	    lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
657	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
658	    lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
659
660	lem_add_hw_stats(adapter);
661
662	/* Non-AMT based hardware can now take control from firmware */
663	if (adapter->has_manage && !adapter->has_amt)
664		lem_get_hw_control(adapter);
665
666	/* Tell the stack that the interface is not active */
667	adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
668
669#ifndef __HAIKU__
670	adapter->led_dev = led_create(lem_led_func, adapter,
671	    device_get_nameunit(dev));
672#endif
673
674#ifdef DEV_NETMAP
675	lem_netmap_attach(adapter);
676#endif /* DEV_NETMAP */
677	INIT_DEBUGOUT("lem_attach: end");
678
679	return (0);
680
681err_rx_struct:
682	lem_free_transmit_structures(adapter);
683err_tx_struct:
684err_hw_init:
685	lem_release_hw_control(adapter);
686	lem_dma_free(adapter, &adapter->rxdma);
687err_rx_desc:
688	lem_dma_free(adapter, &adapter->txdma);
689err_tx_desc:
690err_pci:
691	if (adapter->ifp != NULL)
692		if_free(adapter->ifp);
693	lem_free_pci_resources(adapter);
694	free(adapter->mta, M_DEVBUF);
695	EM_TX_LOCK_DESTROY(adapter);
696	EM_RX_LOCK_DESTROY(adapter);
697	EM_CORE_LOCK_DESTROY(adapter);
698
699	return (error);
700}
701
702/*********************************************************************
703 *  Device removal routine
704 *
705 *  The detach entry point is called when the driver is being removed.
706 *  This routine stops the adapter and deallocates all the resources
707 *  that were allocated for driver operation.
708 *
709 *  return 0 on success, positive on failure
710 *********************************************************************/
711
712static int
713lem_detach(device_t dev)
714{
715	struct adapter	*adapter = device_get_softc(dev);
716	struct ifnet	*ifp = adapter->ifp;
717
718	INIT_DEBUGOUT("em_detach: begin");
719
720	/* Make sure VLANS are not using driver */
721	if (adapter->ifp->if_vlantrunk != NULL) {
722		device_printf(dev,"Vlan in use, detach first\n");
723		return (EBUSY);
724	}
725
726#ifdef DEVICE_POLLING
727	if (ifp->if_capenable & IFCAP_POLLING)
728		ether_poll_deregister(ifp);
729#endif
730
731#ifndef __HAIKU__
732	if (adapter->led_dev != NULL)
733		led_destroy(adapter->led_dev);
734#endif
735
736	EM_CORE_LOCK(adapter);
737	EM_TX_LOCK(adapter);
738	adapter->in_detach = 1;
739	lem_stop(adapter);
740	e1000_phy_hw_reset(&adapter->hw);
741
742	lem_release_manageability(adapter);
743
744	EM_TX_UNLOCK(adapter);
745	EM_CORE_UNLOCK(adapter);
746
747	/* Unregister VLAN events */
748	if (adapter->vlan_attach != NULL)
749		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
750	if (adapter->vlan_detach != NULL)
751		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
752
753	ether_ifdetach(adapter->ifp);
754	callout_drain(&adapter->timer);
755	callout_drain(&adapter->tx_fifo_timer);
756
757#ifdef DEV_NETMAP
758	netmap_detach(ifp);
759#endif /* DEV_NETMAP */
760	lem_free_pci_resources(adapter);
761	bus_generic_detach(dev);
762	if_free(ifp);
763
764	lem_free_transmit_structures(adapter);
765	lem_free_receive_structures(adapter);
766
767	/* Free Transmit Descriptor ring */
768	if (adapter->tx_desc_base) {
769		lem_dma_free(adapter, &adapter->txdma);
770		adapter->tx_desc_base = NULL;
771	}
772
773	/* Free Receive Descriptor ring */
774	if (adapter->rx_desc_base) {
775		lem_dma_free(adapter, &adapter->rxdma);
776		adapter->rx_desc_base = NULL;
777	}
778
779	lem_release_hw_control(adapter);
780	free(adapter->mta, M_DEVBUF);
781	EM_TX_LOCK_DESTROY(adapter);
782	EM_RX_LOCK_DESTROY(adapter);
783	EM_CORE_LOCK_DESTROY(adapter);
784
785	return (0);
786}
787
788/*********************************************************************
789 *
790 *  Shutdown entry point
791 *
792 **********************************************************************/
793
794static int
795lem_shutdown(device_t dev)
796{
797	return lem_suspend(dev);
798}
799
800/*
801 * Suspend/resume device methods.
802 */
803static int
804lem_suspend(device_t dev)
805{
806	struct adapter *adapter = device_get_softc(dev);
807
808	EM_CORE_LOCK(adapter);
809
810	lem_release_manageability(adapter);
811	lem_release_hw_control(adapter);
812	lem_enable_wakeup(dev);
813
814	EM_CORE_UNLOCK(adapter);
815
816	return bus_generic_suspend(dev);
817}
818
819static int
820lem_resume(device_t dev)
821{
822	struct adapter *adapter = device_get_softc(dev);
823	struct ifnet *ifp = adapter->ifp;
824
825	EM_CORE_LOCK(adapter);
826	lem_init_locked(adapter);
827	lem_init_manageability(adapter);
828	EM_CORE_UNLOCK(adapter);
829	lem_start(ifp);
830
831	return bus_generic_resume(dev);
832}
833
834
835static void
836lem_start_locked(struct ifnet *ifp)
837{
838	struct adapter	*adapter = ifp->if_softc;
839	struct mbuf	*m_head;
840
841	EM_TX_LOCK_ASSERT(adapter);
842
843	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
844	    IFF_DRV_RUNNING)
845		return;
846	if (!adapter->link_active)
847		return;
848
849        /*
850         * Force a cleanup if number of TX descriptors
851         * available hits the threshold
852         */
853	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
854		lem_txeof(adapter);
855		/* Now do we at least have a minimal? */
856		if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
857			adapter->no_tx_desc_avail1++;
858			return;
859		}
860	}
861
862	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
863
864                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
865		if (m_head == NULL)
866			break;
867		/*
868		 *  Encapsulation can modify our pointer, and or make it
869		 *  NULL on failure.  In that event, we can't requeue.
870		 */
871		if (lem_xmit(adapter, &m_head)) {
872			if (m_head == NULL)
873				break;
874			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
875			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
876			break;
877		}
878
879		/* Send a copy of the frame to the BPF listener */
880		ETHER_BPF_MTAP(ifp, m_head);
881
882		/* Set timeout in case hardware has problems transmitting. */
883		adapter->watchdog_check = TRUE;
884		adapter->watchdog_time = ticks;
885	}
886	if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
887		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
888
889	return;
890}
891
892static void
893lem_start(struct ifnet *ifp)
894{
895	struct adapter *adapter = ifp->if_softc;
896
897	EM_TX_LOCK(adapter);
898	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
899		lem_start_locked(ifp);
900	EM_TX_UNLOCK(adapter);
901}
902
903/*********************************************************************
904 *  Ioctl entry point
905 *
906 *  em_ioctl is called when the user wants to configure the
907 *  interface.
908 *
909 *  return 0 on success, positive on failure
910 **********************************************************************/
911
912static int
913lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
914{
915	struct adapter	*adapter = ifp->if_softc;
916	struct ifreq	*ifr = (struct ifreq *)data;
917#if defined(INET) || defined(INET6)
918	struct ifaddr	*ifa = (struct ifaddr *)data;
919#endif
920	bool		avoid_reset = FALSE;
921	int		error = 0;
922
923	if (adapter->in_detach)
924		return (error);
925
926	switch (command) {
927	case SIOCSIFADDR:
928#ifdef INET
929		if (ifa->ifa_addr->sa_family == AF_INET)
930			avoid_reset = TRUE;
931#endif
932#ifdef INET6
933		if (ifa->ifa_addr->sa_family == AF_INET6)
934			avoid_reset = TRUE;
935#endif
936		/*
937		** Calling init results in link renegotiation,
938		** so we avoid doing it when possible.
939		*/
940		if (avoid_reset) {
941			ifp->if_flags |= IFF_UP;
942			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
943				lem_init(adapter);
944#ifdef INET
945			if (!(ifp->if_flags & IFF_NOARP))
946				arp_ifinit(ifp, ifa);
947#endif
948		} else
949			error = ether_ioctl(ifp, command, data);
950		break;
951	case SIOCSIFMTU:
952	    {
953		int max_frame_size;
954
955		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
956
957		EM_CORE_LOCK(adapter);
958		switch (adapter->hw.mac.type) {
959		case e1000_82542:
960			max_frame_size = ETHER_MAX_LEN;
961			break;
962		default:
963			max_frame_size = MAX_JUMBO_FRAME_SIZE;
964		}
965		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
966		    ETHER_CRC_LEN) {
967			EM_CORE_UNLOCK(adapter);
968			error = EINVAL;
969			break;
970		}
971
972		ifp->if_mtu = ifr->ifr_mtu;
973		adapter->max_frame_size =
974		    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
975		lem_init_locked(adapter);
976		EM_CORE_UNLOCK(adapter);
977		break;
978	    }
979	case SIOCSIFFLAGS:
980		IOCTL_DEBUGOUT("ioctl rcv'd:\
981		    SIOCSIFFLAGS (Set Interface Flags)");
982		EM_CORE_LOCK(adapter);
983		if (ifp->if_flags & IFF_UP) {
984			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
985				if ((ifp->if_flags ^ adapter->if_flags) &
986				    (IFF_PROMISC | IFF_ALLMULTI)) {
987					lem_disable_promisc(adapter);
988					lem_set_promisc(adapter);
989				}
990			} else
991				lem_init_locked(adapter);
992		} else
993			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
994				EM_TX_LOCK(adapter);
995				lem_stop(adapter);
996				EM_TX_UNLOCK(adapter);
997			}
998		adapter->if_flags = ifp->if_flags;
999		EM_CORE_UNLOCK(adapter);
1000		break;
1001	case SIOCADDMULTI:
1002	case SIOCDELMULTI:
1003		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1004		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1005			EM_CORE_LOCK(adapter);
1006			lem_disable_intr(adapter);
1007			lem_set_multi(adapter);
1008			if (adapter->hw.mac.type == e1000_82542 &&
1009	    		    adapter->hw.revision_id == E1000_REVISION_2) {
1010				lem_initialize_receive_unit(adapter);
1011			}
1012#ifdef DEVICE_POLLING
1013			if (!(ifp->if_capenable & IFCAP_POLLING))
1014#endif
1015				lem_enable_intr(adapter);
1016			EM_CORE_UNLOCK(adapter);
1017		}
1018		break;
1019	case SIOCSIFMEDIA:
1020		/* Check SOL/IDER usage */
1021		EM_CORE_LOCK(adapter);
1022		if (e1000_check_reset_block(&adapter->hw)) {
1023			EM_CORE_UNLOCK(adapter);
1024			device_printf(adapter->dev, "Media change is"
1025			    " blocked due to SOL/IDER session.\n");
1026			break;
1027		}
1028		EM_CORE_UNLOCK(adapter);
1029	case SIOCGIFMEDIA:
1030		IOCTL_DEBUGOUT("ioctl rcv'd: \
1031		    SIOCxIFMEDIA (Get/Set Interface Media)");
1032		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1033		break;
1034	case SIOCSIFCAP:
1035	    {
1036		int mask, reinit;
1037
1038		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1039		reinit = 0;
1040		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1041#ifdef DEVICE_POLLING
1042		if (mask & IFCAP_POLLING) {
1043			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1044				error = ether_poll_register(lem_poll, ifp);
1045				if (error)
1046					return (error);
1047				EM_CORE_LOCK(adapter);
1048				lem_disable_intr(adapter);
1049				ifp->if_capenable |= IFCAP_POLLING;
1050				EM_CORE_UNLOCK(adapter);
1051			} else {
1052				error = ether_poll_deregister(ifp);
1053				/* Enable interrupt even in error case */
1054				EM_CORE_LOCK(adapter);
1055				lem_enable_intr(adapter);
1056				ifp->if_capenable &= ~IFCAP_POLLING;
1057				EM_CORE_UNLOCK(adapter);
1058			}
1059		}
1060#endif
1061		if (mask & IFCAP_HWCSUM) {
1062			ifp->if_capenable ^= IFCAP_HWCSUM;
1063			reinit = 1;
1064		}
1065		if (mask & IFCAP_VLAN_HWTAGGING) {
1066			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1067			reinit = 1;
1068		}
1069		if ((mask & IFCAP_WOL) &&
1070		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1071			if (mask & IFCAP_WOL_MCAST)
1072				ifp->if_capenable ^= IFCAP_WOL_MCAST;
1073			if (mask & IFCAP_WOL_MAGIC)
1074				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1075		}
1076		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1077			lem_init(adapter);
1078		VLAN_CAPABILITIES(ifp);
1079		break;
1080	    }
1081
1082	default:
1083		error = ether_ioctl(ifp, command, data);
1084		break;
1085	}
1086
1087	return (error);
1088}
1089
1090
1091/*********************************************************************
1092 *  Init entry point
1093 *
1094 *  This routine is used in two ways. It is used by the stack as
1095 *  init entry point in network interface structure. It is also used
1096 *  by the driver as a hw/sw initialization routine to get to a
1097 *  consistent state.
1098 *
1099 *  return 0 on success, positive on failure
1100 **********************************************************************/
1101
1102static void
1103lem_init_locked(struct adapter *adapter)
1104{
1105	struct ifnet	*ifp = adapter->ifp;
1106	device_t	dev = adapter->dev;
1107	u32		pba;
1108
1109	INIT_DEBUGOUT("lem_init: begin");
1110
1111	EM_CORE_LOCK_ASSERT(adapter);
1112
1113	EM_TX_LOCK(adapter);
1114	lem_stop(adapter);
1115	EM_TX_UNLOCK(adapter);
1116
1117	/*
1118	 * Packet Buffer Allocation (PBA)
1119	 * Writing PBA sets the receive portion of the buffer
1120	 * the remainder is used for the transmit buffer.
1121	 *
1122	 * Devices before the 82547 had a Packet Buffer of 64K.
1123	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1124	 * After the 82547 the buffer was reduced to 40K.
1125	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1126	 *   Note: default does not leave enough room for Jumbo Frame >10k.
1127	 */
1128	switch (adapter->hw.mac.type) {
1129	case e1000_82547:
1130	case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1131		if (adapter->max_frame_size > 8192)
1132			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1133		else
1134			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1135		adapter->tx_fifo_head = 0;
1136		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1137		adapter->tx_fifo_size =
1138		    (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1139		break;
1140	default:
1141		/* Devices before 82547 had a Packet Buffer of 64K.   */
1142		if (adapter->max_frame_size > 8192)
1143			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1144		else
1145			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1146	}
1147
1148	INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1149	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1150
1151	/* Get the latest mac address, User can use a LAA */
1152        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1153              ETHER_ADDR_LEN);
1154
1155	/* Put the address into the Receive Address Array */
1156	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1157
1158	/* Initialize the hardware */
1159	if (lem_hardware_init(adapter)) {
1160		device_printf(dev, "Unable to initialize the hardware\n");
1161		return;
1162	}
1163	lem_update_link_status(adapter);
1164
1165	/* Setup VLAN support, basic and offload if available */
1166	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1167
1168	/* Set hardware offload abilities */
1169	ifp->if_hwassist = 0;
1170	if (adapter->hw.mac.type >= e1000_82543) {
1171		if (ifp->if_capenable & IFCAP_TXCSUM)
1172			ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1173	}
1174
1175	/* Configure for OS presence */
1176	lem_init_manageability(adapter);
1177
1178	/* Prepare transmit descriptors and buffers */
1179	lem_setup_transmit_structures(adapter);
1180	lem_initialize_transmit_unit(adapter);
1181
1182	/* Setup Multicast table */
1183	lem_set_multi(adapter);
1184
1185	/* Prepare receive descriptors and buffers */
1186	if (lem_setup_receive_structures(adapter)) {
1187		device_printf(dev, "Could not setup receive structures\n");
1188		EM_TX_LOCK(adapter);
1189		lem_stop(adapter);
1190		EM_TX_UNLOCK(adapter);
1191		return;
1192	}
1193	lem_initialize_receive_unit(adapter);
1194
1195	/* Use real VLAN Filter support? */
1196	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1197		if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1198			/* Use real VLAN Filter support */
1199			lem_setup_vlan_hw_support(adapter);
1200		else {
1201			u32 ctrl;
1202			ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1203			ctrl |= E1000_CTRL_VME;
1204			E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1205                }
1206	}
1207
1208	/* Don't lose promiscuous settings */
1209	lem_set_promisc(adapter);
1210
1211	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1212	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1213
1214	callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1215	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1216
1217	/* MSI/X configuration for 82574 */
1218	if (adapter->hw.mac.type == e1000_82574) {
1219		int tmp;
1220		tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1221		tmp |= E1000_CTRL_EXT_PBA_CLR;
1222		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1223		/*
1224		** Set the IVAR - interrupt vector routing.
1225		** Each nibble represents a vector, high bit
1226		** is enable, other 3 bits are the MSIX table
1227		** entry, we map RXQ0 to 0, TXQ0 to 1, and
1228		** Link (other) to 2, hence the magic number.
1229		*/
1230		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1231	}
1232
1233#ifdef DEVICE_POLLING
1234	/*
1235	 * Only enable interrupts if we are not polling, make sure
1236	 * they are off otherwise.
1237	 */
1238	if (ifp->if_capenable & IFCAP_POLLING)
1239		lem_disable_intr(adapter);
1240	else
1241#endif /* DEVICE_POLLING */
1242		lem_enable_intr(adapter);
1243
1244	/* AMT based hardware can now take control from firmware */
1245	if (adapter->has_manage && adapter->has_amt)
1246		lem_get_hw_control(adapter);
1247}
1248
1249static void
1250lem_init(void *arg)
1251{
1252	struct adapter *adapter = arg;
1253
1254	EM_CORE_LOCK(adapter);
1255	lem_init_locked(adapter);
1256	EM_CORE_UNLOCK(adapter);
1257}
1258
1259
1260#ifdef DEVICE_POLLING
1261/*********************************************************************
1262 *
1263 *  Legacy polling routine
1264 *
1265 *********************************************************************/
1266static int
1267lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1268{
1269	struct adapter *adapter = ifp->if_softc;
1270	u32		reg_icr, rx_done = 0;
1271
1272	EM_CORE_LOCK(adapter);
1273	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1274		EM_CORE_UNLOCK(adapter);
1275		return (rx_done);
1276	}
1277
1278	if (cmd == POLL_AND_CHECK_STATUS) {
1279		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1280		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1281			callout_stop(&adapter->timer);
1282			adapter->hw.mac.get_link_status = 1;
1283			lem_update_link_status(adapter);
1284			callout_reset(&adapter->timer, hz,
1285			    lem_local_timer, adapter);
1286		}
1287	}
1288	EM_CORE_UNLOCK(adapter);
1289
1290	lem_rxeof(adapter, count, &rx_done);
1291
1292	EM_TX_LOCK(adapter);
1293	lem_txeof(adapter);
1294	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1295		lem_start_locked(ifp);
1296	EM_TX_UNLOCK(adapter);
1297	return (rx_done);
1298}
1299#endif /* DEVICE_POLLING */
1300
1301#ifdef EM_LEGACY_IRQ
1302/*********************************************************************
1303 *
1304 *  Legacy Interrupt Service routine
1305 *
1306 *********************************************************************/
1307static void
1308lem_intr(void *arg)
1309{
1310	struct adapter	*adapter = arg;
1311	struct ifnet	*ifp = adapter->ifp;
1312	u32		reg_icr;
1313
1314
1315	if (ifp->if_capenable & IFCAP_POLLING)
1316		return;
1317
1318	EM_CORE_LOCK(adapter);
1319	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1320	if (reg_icr & E1000_ICR_RXO)
1321		adapter->rx_overruns++;
1322
1323	if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1324			goto out;
1325
1326	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1327			goto out;
1328
1329	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1330		callout_stop(&adapter->timer);
1331		adapter->hw.mac.get_link_status = 1;
1332		lem_update_link_status(adapter);
1333		/* Deal with TX cruft when link lost */
1334		lem_tx_purge(adapter);
1335		callout_reset(&adapter->timer, hz,
1336		    lem_local_timer, adapter);
1337		goto out;
1338	}
1339
1340	EM_TX_LOCK(adapter);
1341	lem_rxeof(adapter, -1, NULL);
1342	lem_txeof(adapter);
1343	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1344	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1345		lem_start_locked(ifp);
1346	EM_TX_UNLOCK(adapter);
1347
1348out:
1349	EM_CORE_UNLOCK(adapter);
1350	return;
1351}
1352
1353#else /* EM_FAST_IRQ, then fast interrupt routines only */
1354
1355static void
1356lem_handle_link(void *context, int pending)
1357{
1358	struct adapter	*adapter = context;
1359	struct ifnet *ifp = adapter->ifp;
1360
1361	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1362		return;
1363
1364	EM_CORE_LOCK(adapter);
1365	callout_stop(&adapter->timer);
1366	lem_update_link_status(adapter);
1367	/* Deal with TX cruft when link lost */
1368	lem_tx_purge(adapter);
1369	callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1370	EM_CORE_UNLOCK(adapter);
1371}
1372
1373
1374/* Combined RX/TX handler, used by Legacy and MSI */
1375static void
1376lem_handle_rxtx(void *context, int pending)
1377{
1378	struct adapter	*adapter = context;
1379	struct ifnet	*ifp = adapter->ifp;
1380
1381
1382	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1383		lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1384		EM_TX_LOCK(adapter);
1385		lem_txeof(adapter);
1386		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1387			lem_start_locked(ifp);
1388		EM_TX_UNLOCK(adapter);
1389	}
1390
1391	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1392		lem_enable_intr(adapter);
1393}
1394
1395/*********************************************************************
1396 *
1397 *  Fast Legacy/MSI Combined Interrupt Service routine
1398 *
1399 *********************************************************************/
1400static int
1401lem_irq_fast(void *arg)
1402{
1403	struct adapter	*adapter = arg;
1404	struct ifnet	*ifp;
1405	u32		reg_icr;
1406
1407	ifp = adapter->ifp;
1408
1409	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1410
1411	/* Hot eject?  */
1412	if (reg_icr == 0xffffffff)
1413		return FILTER_STRAY;
1414
1415	/* Definitely not our interrupt.  */
1416	if (reg_icr == 0x0)
1417		return FILTER_STRAY;
1418
1419	/*
1420	 * Mask interrupts until the taskqueue is finished running.  This is
1421	 * cheap, just assume that it is needed.  This also works around the
1422	 * MSI message reordering errata on certain systems.
1423	 */
1424	lem_disable_intr(adapter);
1425	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1426
1427	/* Link status change */
1428	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1429		adapter->hw.mac.get_link_status = 1;
1430		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1431	}
1432
1433	if (reg_icr & E1000_ICR_RXO)
1434		adapter->rx_overruns++;
1435	return FILTER_HANDLED;
1436}
1437#endif /* ~EM_LEGACY_IRQ */
1438
1439
1440/*********************************************************************
1441 *
1442 *  Media Ioctl callback
1443 *
1444 *  This routine is called whenever the user queries the status of
1445 *  the interface using ifconfig.
1446 *
1447 **********************************************************************/
1448static void
1449lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1450{
1451	struct adapter *adapter = ifp->if_softc;
1452	u_char fiber_type = IFM_1000_SX;
1453
1454	INIT_DEBUGOUT("lem_media_status: begin");
1455
1456	EM_CORE_LOCK(adapter);
1457	lem_update_link_status(adapter);
1458
1459	ifmr->ifm_status = IFM_AVALID;
1460	ifmr->ifm_active = IFM_ETHER;
1461
1462	if (!adapter->link_active) {
1463		EM_CORE_UNLOCK(adapter);
1464		return;
1465	}
1466
1467	ifmr->ifm_status |= IFM_ACTIVE;
1468
1469	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1470	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1471		if (adapter->hw.mac.type == e1000_82545)
1472			fiber_type = IFM_1000_LX;
1473		ifmr->ifm_active |= fiber_type | IFM_FDX;
1474	} else {
1475		switch (adapter->link_speed) {
1476		case 10:
1477			ifmr->ifm_active |= IFM_10_T;
1478			break;
1479		case 100:
1480			ifmr->ifm_active |= IFM_100_TX;
1481			break;
1482		case 1000:
1483			ifmr->ifm_active |= IFM_1000_T;
1484			break;
1485		}
1486		if (adapter->link_duplex == FULL_DUPLEX)
1487			ifmr->ifm_active |= IFM_FDX;
1488		else
1489			ifmr->ifm_active |= IFM_HDX;
1490	}
1491	EM_CORE_UNLOCK(adapter);
1492}
1493
1494/*********************************************************************
1495 *
1496 *  Media Ioctl callback
1497 *
1498 *  This routine is called when the user changes speed/duplex using
1499 *  media/mediopt option with ifconfig.
1500 *
1501 **********************************************************************/
1502static int
1503lem_media_change(struct ifnet *ifp)
1504{
1505	struct adapter *adapter = ifp->if_softc;
1506	struct ifmedia  *ifm = &adapter->media;
1507
1508	INIT_DEBUGOUT("lem_media_change: begin");
1509
1510	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1511		return (EINVAL);
1512
1513	EM_CORE_LOCK(adapter);
1514	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1515	case IFM_AUTO:
1516		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1517		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1518		break;
1519	case IFM_1000_LX:
1520	case IFM_1000_SX:
1521	case IFM_1000_T:
1522		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1523		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1524		break;
1525	case IFM_100_TX:
1526		adapter->hw.mac.autoneg = FALSE;
1527		adapter->hw.phy.autoneg_advertised = 0;
1528		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1529			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1530		else
1531			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1532		break;
1533	case IFM_10_T:
1534		adapter->hw.mac.autoneg = FALSE;
1535		adapter->hw.phy.autoneg_advertised = 0;
1536		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1537			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1538		else
1539			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1540		break;
1541	default:
1542		device_printf(adapter->dev, "Unsupported media type\n");
1543	}
1544
1545	lem_init_locked(adapter);
1546	EM_CORE_UNLOCK(adapter);
1547
1548	return (0);
1549}
1550
1551/*********************************************************************
1552 *
1553 *  This routine maps the mbufs to tx descriptors.
1554 *
1555 *  return 0 on success, positive on failure
1556 **********************************************************************/
1557
1558static int
1559lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1560{
1561	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1562	bus_dmamap_t		map;
1563	struct em_buffer	*tx_buffer, *tx_buffer_mapped;
1564	struct e1000_tx_desc	*ctxd = NULL;
1565	struct mbuf		*m_head;
1566	u32			txd_upper, txd_lower, txd_used, txd_saved;
1567	int			error, nsegs, i, j, first, last = 0;
1568
1569	m_head = *m_headp;
1570	txd_upper = txd_lower = txd_used = txd_saved = 0;
1571
1572	/*
1573	** When doing checksum offload, it is critical to
1574	** make sure the first mbuf has more than header,
1575	** because that routine expects data to be present.
1576	*/
1577	if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1578	    (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1579		m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1580		*m_headp = m_head;
1581		if (m_head == NULL)
1582			return (ENOBUFS);
1583	}
1584
1585	/*
1586	 * Map the packet for DMA
1587	 *
1588	 * Capture the first descriptor index,
1589	 * this descriptor will have the index
1590	 * of the EOP which is the only one that
1591	 * now gets a DONE bit writeback.
1592	 */
1593	first = adapter->next_avail_tx_desc;
1594	tx_buffer = &adapter->tx_buffer_area[first];
1595	tx_buffer_mapped = tx_buffer;
1596	map = tx_buffer->map;
1597
1598	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1599	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1600
1601	/*
1602	 * There are two types of errors we can (try) to handle:
1603	 * - EFBIG means the mbuf chain was too long and bus_dma ran
1604	 *   out of segments.  Defragment the mbuf chain and try again.
1605	 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1606	 *   at this point in time.  Defer sending and try again later.
1607	 * All other errors, in particular EINVAL, are fatal and prevent the
1608	 * mbuf chain from ever going through.  Drop it and report error.
1609	 */
1610	if (error == EFBIG) {
1611		struct mbuf *m;
1612
1613		m = m_defrag(*m_headp, M_DONTWAIT);
1614		if (m == NULL) {
1615			adapter->mbuf_alloc_failed++;
1616			m_freem(*m_headp);
1617			*m_headp = NULL;
1618			return (ENOBUFS);
1619		}
1620		*m_headp = m;
1621
1622		/* Try it again */
1623		error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1624		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1625
1626		if (error) {
1627			adapter->no_tx_dma_setup++;
1628			m_freem(*m_headp);
1629			*m_headp = NULL;
1630			return (error);
1631		}
1632	} else if (error != 0) {
1633		adapter->no_tx_dma_setup++;
1634		return (error);
1635	}
1636
1637        if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1638                adapter->no_tx_desc_avail2++;
1639		bus_dmamap_unload(adapter->txtag, map);
1640		return (ENOBUFS);
1641        }
1642	m_head = *m_headp;
1643
1644	/* Do hardware assists */
1645	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1646		lem_transmit_checksum_setup(adapter,  m_head,
1647		    &txd_upper, &txd_lower);
1648
1649	i = adapter->next_avail_tx_desc;
1650	if (adapter->pcix_82544)
1651		txd_saved = i;
1652
1653	/* Set up our transmit descriptors */
1654	for (j = 0; j < nsegs; j++) {
1655		bus_size_t seg_len;
1656		bus_addr_t seg_addr;
1657		/* If adapter is 82544 and on PCIX bus */
1658		if(adapter->pcix_82544) {
1659			DESC_ARRAY	desc_array;
1660			u32		array_elements, counter;
1661			/*
1662			 * Check the Address and Length combination and
1663			 * split the data accordingly
1664			 */
1665			array_elements = lem_fill_descriptors(segs[j].ds_addr,
1666			    segs[j].ds_len, &desc_array);
1667			for (counter = 0; counter < array_elements; counter++) {
1668				if (txd_used == adapter->num_tx_desc_avail) {
1669					adapter->next_avail_tx_desc = txd_saved;
1670					adapter->no_tx_desc_avail2++;
1671					bus_dmamap_unload(adapter->txtag, map);
1672					return (ENOBUFS);
1673				}
1674				tx_buffer = &adapter->tx_buffer_area[i];
1675				ctxd = &adapter->tx_desc_base[i];
1676				ctxd->buffer_addr = htole64(
1677				    desc_array.descriptor[counter].address);
1678				ctxd->lower.data = htole32(
1679				    (adapter->txd_cmd | txd_lower | (u16)
1680				    desc_array.descriptor[counter].length));
1681				ctxd->upper.data =
1682				    htole32((txd_upper));
1683				last = i;
1684				if (++i == adapter->num_tx_desc)
1685                                         i = 0;
1686				tx_buffer->m_head = NULL;
1687				tx_buffer->next_eop = -1;
1688				txd_used++;
1689                        }
1690		} else {
1691			tx_buffer = &adapter->tx_buffer_area[i];
1692			ctxd = &adapter->tx_desc_base[i];
1693			seg_addr = segs[j].ds_addr;
1694			seg_len  = segs[j].ds_len;
1695			ctxd->buffer_addr = htole64(seg_addr);
1696			ctxd->lower.data = htole32(
1697			adapter->txd_cmd | txd_lower | seg_len);
1698			ctxd->upper.data =
1699			    htole32(txd_upper);
1700			last = i;
1701			if (++i == adapter->num_tx_desc)
1702				i = 0;
1703			tx_buffer->m_head = NULL;
1704			tx_buffer->next_eop = -1;
1705		}
1706	}
1707
1708	adapter->next_avail_tx_desc = i;
1709
1710	if (adapter->pcix_82544)
1711		adapter->num_tx_desc_avail -= txd_used;
1712	else
1713		adapter->num_tx_desc_avail -= nsegs;
1714
1715	if (m_head->m_flags & M_VLANTAG) {
1716		/* Set the vlan id. */
1717		ctxd->upper.fields.special =
1718		    htole16(m_head->m_pkthdr.ether_vtag);
1719                /* Tell hardware to add tag */
1720                ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1721        }
1722
1723        tx_buffer->m_head = m_head;
1724	tx_buffer_mapped->map = tx_buffer->map;
1725	tx_buffer->map = map;
1726        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1727
1728        /*
1729         * Last Descriptor of Packet
1730	 * needs End Of Packet (EOP)
1731	 * and Report Status (RS)
1732         */
1733        ctxd->lower.data |=
1734	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1735	/*
1736	 * Keep track in the first buffer which
1737	 * descriptor will be written back
1738	 */
1739	tx_buffer = &adapter->tx_buffer_area[first];
1740	tx_buffer->next_eop = last;
1741	adapter->watchdog_time = ticks;
1742
1743	/*
1744	 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1745	 * that this frame is available to transmit.
1746	 */
1747	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1748	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1749	if (adapter->hw.mac.type == e1000_82547 &&
1750	    adapter->link_duplex == HALF_DUPLEX)
1751		lem_82547_move_tail(adapter);
1752	else {
1753		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1754		if (adapter->hw.mac.type == e1000_82547)
1755			lem_82547_update_fifo_head(adapter,
1756			    m_head->m_pkthdr.len);
1757	}
1758
1759	return (0);
1760}
1761
1762/*********************************************************************
1763 *
1764 * 82547 workaround to avoid controller hang in half-duplex environment.
1765 * The workaround is to avoid queuing a large packet that would span
1766 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1767 * in this case. We do that only when FIFO is quiescent.
1768 *
1769 **********************************************************************/
1770static void
1771lem_82547_move_tail(void *arg)
1772{
1773	struct adapter *adapter = arg;
1774	struct e1000_tx_desc *tx_desc;
1775	u16	hw_tdt, sw_tdt, length = 0;
1776	bool	eop = 0;
1777
1778	EM_TX_LOCK_ASSERT(adapter);
1779
1780	hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1781	sw_tdt = adapter->next_avail_tx_desc;
1782
1783	while (hw_tdt != sw_tdt) {
1784		tx_desc = &adapter->tx_desc_base[hw_tdt];
1785		length += tx_desc->lower.flags.length;
1786		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1787		if (++hw_tdt == adapter->num_tx_desc)
1788			hw_tdt = 0;
1789
1790		if (eop) {
1791			if (lem_82547_fifo_workaround(adapter, length)) {
1792				adapter->tx_fifo_wrk_cnt++;
1793				callout_reset(&adapter->tx_fifo_timer, 1,
1794					lem_82547_move_tail, adapter);
1795				break;
1796			}
1797			E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1798			lem_82547_update_fifo_head(adapter, length);
1799			length = 0;
1800		}
1801	}
1802}
1803
1804static int
1805lem_82547_fifo_workaround(struct adapter *adapter, int len)
1806{
1807	int fifo_space, fifo_pkt_len;
1808
1809	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1810
1811	if (adapter->link_duplex == HALF_DUPLEX) {
1812		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1813
1814		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1815			if (lem_82547_tx_fifo_reset(adapter))
1816				return (0);
1817			else
1818				return (1);
1819		}
1820	}
1821
1822	return (0);
1823}
1824
1825static void
1826lem_82547_update_fifo_head(struct adapter *adapter, int len)
1827{
1828	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1829
1830	/* tx_fifo_head is always 16 byte aligned */
1831	adapter->tx_fifo_head += fifo_pkt_len;
1832	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1833		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1834	}
1835}
1836
1837
1838static int
1839lem_82547_tx_fifo_reset(struct adapter *adapter)
1840{
1841	u32 tctl;
1842
1843	if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1844	    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1845	    (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1846	    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1847	    (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1848	    E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1849	    (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1850		/* Disable TX unit */
1851		tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1852		E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1853		    tctl & ~E1000_TCTL_EN);
1854
1855		/* Reset FIFO pointers */
1856		E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1857		    adapter->tx_head_addr);
1858		E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1859		    adapter->tx_head_addr);
1860		E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1861		    adapter->tx_head_addr);
1862		E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1863		    adapter->tx_head_addr);
1864
1865		/* Re-enable TX unit */
1866		E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1867		E1000_WRITE_FLUSH(&adapter->hw);
1868
1869		adapter->tx_fifo_head = 0;
1870		adapter->tx_fifo_reset_cnt++;
1871
1872		return (TRUE);
1873	}
1874	else {
1875		return (FALSE);
1876	}
1877}
1878
1879static void
1880lem_set_promisc(struct adapter *adapter)
1881{
1882	struct ifnet	*ifp = adapter->ifp;
1883	u32		reg_rctl;
1884
1885	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1886
1887	if (ifp->if_flags & IFF_PROMISC) {
1888		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1889		/* Turn this on if you want to see bad packets */
1890		if (lem_debug_sbp)
1891			reg_rctl |= E1000_RCTL_SBP;
1892		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1893	} else if (ifp->if_flags & IFF_ALLMULTI) {
1894		reg_rctl |= E1000_RCTL_MPE;
1895		reg_rctl &= ~E1000_RCTL_UPE;
1896		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1897	}
1898}
1899
1900static void
1901lem_disable_promisc(struct adapter *adapter)
1902{
1903	u32	reg_rctl;
1904
1905	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1906
1907	reg_rctl &=  (~E1000_RCTL_UPE);
1908	reg_rctl &=  (~E1000_RCTL_MPE);
1909	reg_rctl &=  (~E1000_RCTL_SBP);
1910	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1911}
1912
1913
1914/*********************************************************************
1915 *  Multicast Update
1916 *
1917 *  This routine is called whenever multicast address list is updated.
1918 *
1919 **********************************************************************/
1920
1921static void
1922lem_set_multi(struct adapter *adapter)
1923{
1924	struct ifnet	*ifp = adapter->ifp;
1925	struct ifmultiaddr *ifma;
1926	u32 reg_rctl = 0;
1927	u8  *mta; /* Multicast array memory */
1928	int mcnt = 0;
1929
1930	IOCTL_DEBUGOUT("lem_set_multi: begin");
1931
1932	mta = adapter->mta;
1933	bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1934
1935	if (adapter->hw.mac.type == e1000_82542 &&
1936	    adapter->hw.revision_id == E1000_REVISION_2) {
1937		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1938		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1939			e1000_pci_clear_mwi(&adapter->hw);
1940		reg_rctl |= E1000_RCTL_RST;
1941		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1942		msec_delay(5);
1943	}
1944
1945#if __FreeBSD_version < 800000
1946	IF_ADDR_LOCK(ifp);
1947#else
1948	if_maddr_rlock(ifp);
1949#endif
1950	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1951		if (ifma->ifma_addr->sa_family != AF_LINK)
1952			continue;
1953
1954		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1955			break;
1956
1957		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1958		    &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1959		mcnt++;
1960	}
1961#if __FreeBSD_version < 800000
1962	IF_ADDR_UNLOCK(ifp);
1963#else
1964	if_maddr_runlock(ifp);
1965#endif
1966	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1967		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1968		reg_rctl |= E1000_RCTL_MPE;
1969		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1970	} else
1971		e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1972
1973	if (adapter->hw.mac.type == e1000_82542 &&
1974	    adapter->hw.revision_id == E1000_REVISION_2) {
1975		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1976		reg_rctl &= ~E1000_RCTL_RST;
1977		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1978		msec_delay(5);
1979		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1980			e1000_pci_set_mwi(&adapter->hw);
1981	}
1982}
1983
1984
1985/*********************************************************************
1986 *  Timer routine
1987 *
1988 *  This routine checks for link status and updates statistics.
1989 *
1990 **********************************************************************/
1991
1992static void
1993lem_local_timer(void *arg)
1994{
1995	struct adapter	*adapter = arg;
1996
1997	EM_CORE_LOCK_ASSERT(adapter);
1998
1999	lem_update_link_status(adapter);
2000	lem_update_stats_counters(adapter);
2001
2002	lem_smartspeed(adapter);
2003
2004	/*
2005	 * We check the watchdog: the time since
2006	 * the last TX descriptor was cleaned.
2007	 * This implies a functional TX engine.
2008	 */
2009	if ((adapter->watchdog_check == TRUE) &&
2010	    (ticks - adapter->watchdog_time > EM_WATCHDOG))
2011		goto hung;
2012
2013	callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2014	return;
2015hung:
2016	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2017	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2018	adapter->watchdog_events++;
2019	lem_init_locked(adapter);
2020}
2021
2022static void
2023lem_update_link_status(struct adapter *adapter)
2024{
2025	struct e1000_hw *hw = &adapter->hw;
2026	struct ifnet *ifp = adapter->ifp;
2027	device_t dev = adapter->dev;
2028	u32 link_check = 0;
2029
2030	/* Get the cached link value or read phy for real */
2031	switch (hw->phy.media_type) {
2032	case e1000_media_type_copper:
2033		if (hw->mac.get_link_status) {
2034			/* Do the work to read phy */
2035			e1000_check_for_link(hw);
2036			link_check = !hw->mac.get_link_status;
2037			if (link_check) /* ESB2 fix */
2038				e1000_cfg_on_link_up(hw);
2039		} else
2040			link_check = TRUE;
2041		break;
2042	case e1000_media_type_fiber:
2043		e1000_check_for_link(hw);
2044		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2045                                 E1000_STATUS_LU);
2046		break;
2047	case e1000_media_type_internal_serdes:
2048		e1000_check_for_link(hw);
2049		link_check = adapter->hw.mac.serdes_has_link;
2050		break;
2051	default:
2052	case e1000_media_type_unknown:
2053		break;
2054	}
2055
2056	/* Now check for a transition */
2057	if (link_check && (adapter->link_active == 0)) {
2058		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2059		    &adapter->link_duplex);
2060		if (bootverbose)
2061			device_printf(dev, "Link is up %d Mbps %s\n",
2062			    adapter->link_speed,
2063			    ((adapter->link_duplex == FULL_DUPLEX) ?
2064			    "Full Duplex" : "Half Duplex"));
2065		adapter->link_active = 1;
2066		adapter->smartspeed = 0;
2067		ifp->if_baudrate = adapter->link_speed * 1000000;
2068		if_link_state_change(ifp, LINK_STATE_UP);
2069	} else if (!link_check && (adapter->link_active == 1)) {
2070		ifp->if_baudrate = adapter->link_speed = 0;
2071		adapter->link_duplex = 0;
2072		if (bootverbose)
2073			device_printf(dev, "Link is Down\n");
2074		adapter->link_active = 0;
2075		/* Link down, disable watchdog */
2076		adapter->watchdog_check = FALSE;
2077		if_link_state_change(ifp, LINK_STATE_DOWN);
2078	}
2079}
2080
2081/*********************************************************************
2082 *
2083 *  This routine disables all traffic on the adapter by issuing a
2084 *  global reset on the MAC and deallocates TX/RX buffers.
2085 *
2086 *  This routine should always be called with BOTH the CORE
2087 *  and TX locks.
2088 **********************************************************************/
2089
2090static void
2091lem_stop(void *arg)
2092{
2093	struct adapter	*adapter = arg;
2094	struct ifnet	*ifp = adapter->ifp;
2095
2096	EM_CORE_LOCK_ASSERT(adapter);
2097	EM_TX_LOCK_ASSERT(adapter);
2098
2099	INIT_DEBUGOUT("lem_stop: begin");
2100
2101	lem_disable_intr(adapter);
2102	callout_stop(&adapter->timer);
2103	callout_stop(&adapter->tx_fifo_timer);
2104
2105	/* Tell the stack that the interface is no longer active */
2106	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2107
2108	e1000_reset_hw(&adapter->hw);
2109	if (adapter->hw.mac.type >= e1000_82544)
2110		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2111
2112	e1000_led_off(&adapter->hw);
2113	e1000_cleanup_led(&adapter->hw);
2114}
2115
2116
2117/*********************************************************************
2118 *
2119 *  Determine hardware revision.
2120 *
2121 **********************************************************************/
2122static void
2123lem_identify_hardware(struct adapter *adapter)
2124{
2125	device_t dev = adapter->dev;
2126
2127	/* Make sure our PCI config space has the necessary stuff set */
2128	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2129	if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2130	    (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2131		device_printf(dev, "Memory Access and/or Bus Master bits "
2132		    "were not set!\n");
2133		adapter->hw.bus.pci_cmd_word |=
2134		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2135		pci_write_config(dev, PCIR_COMMAND,
2136		    adapter->hw.bus.pci_cmd_word, 2);
2137	}
2138
2139	/* Save off the information about this board */
2140	adapter->hw.vendor_id = pci_get_vendor(dev);
2141	adapter->hw.device_id = pci_get_device(dev);
2142	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2143	adapter->hw.subsystem_vendor_id =
2144	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2145	adapter->hw.subsystem_device_id =
2146	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2147
2148	/* Do Shared Code Init and Setup */
2149	if (e1000_set_mac_type(&adapter->hw)) {
2150		device_printf(dev, "Setup init failure\n");
2151		return;
2152	}
2153}
2154
2155static int
2156lem_allocate_pci_resources(struct adapter *adapter)
2157{
2158	device_t	dev = adapter->dev;
2159	int		val, rid, error = E1000_SUCCESS;
2160
2161	rid = PCIR_BAR(0);
2162	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2163	    &rid, RF_ACTIVE);
2164	if (adapter->memory == NULL) {
2165		device_printf(dev, "Unable to allocate bus resource: memory\n");
2166		return (ENXIO);
2167	}
2168	adapter->osdep.mem_bus_space_tag =
2169	    rman_get_bustag(adapter->memory);
2170	adapter->osdep.mem_bus_space_handle =
2171	    rman_get_bushandle(adapter->memory);
2172	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2173
2174	/* Only older adapters use IO mapping */
2175	if (adapter->hw.mac.type > e1000_82543) {
2176		/* Figure our where our IO BAR is ? */
2177		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2178			val = pci_read_config(dev, rid, 4);
2179			if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2180				adapter->io_rid = rid;
2181				break;
2182			}
2183			rid += 4;
2184			/* check for 64bit BAR */
2185			if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2186				rid += 4;
2187		}
2188		if (rid >= PCIR_CIS) {
2189			device_printf(dev, "Unable to locate IO BAR\n");
2190			return (ENXIO);
2191		}
2192		adapter->ioport = bus_alloc_resource_any(dev,
2193		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2194		if (adapter->ioport == NULL) {
2195			device_printf(dev, "Unable to allocate bus resource: "
2196			    "ioport\n");
2197			return (ENXIO);
2198		}
2199		adapter->hw.io_base = 0;
2200		adapter->osdep.io_bus_space_tag =
2201		    rman_get_bustag(adapter->ioport);
2202		adapter->osdep.io_bus_space_handle =
2203		    rman_get_bushandle(adapter->ioport);
2204	}
2205
2206	adapter->hw.back = &adapter->osdep;
2207
2208	return (error);
2209}
2210
2211/*********************************************************************
2212 *
2213 *  Setup the Legacy or MSI Interrupt handler
2214 *
2215 **********************************************************************/
2216int
2217lem_allocate_irq(struct adapter *adapter)
2218{
2219	device_t dev = adapter->dev;
2220	int error, rid = 0;
2221
2222	/* Manually turn off all interrupts */
2223	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2224
2225	/* We allocate a single interrupt resource */
2226	adapter->res[0] = bus_alloc_resource_any(dev,
2227	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2228	if (adapter->res[0] == NULL) {
2229		device_printf(dev, "Unable to allocate bus resource: "
2230		    "interrupt\n");
2231		return (ENXIO);
2232	}
2233
2234#ifdef EM_LEGACY_IRQ
2235	/* We do Legacy setup */
2236	if ((error = bus_setup_intr(dev, adapter->res[0],
2237	    INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2238	    &adapter->tag[0])) != 0) {
2239		device_printf(dev, "Failed to register interrupt handler");
2240		return (error);
2241	}
2242
2243#else /* FAST_IRQ */
2244	/*
2245	 * Try allocating a fast interrupt and the associated deferred
2246	 * processing contexts.
2247	 */
2248	TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2249	TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2250	adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2251	    taskqueue_thread_enqueue, &adapter->tq);
2252	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2253	    device_get_nameunit(adapter->dev));
2254	if ((error = bus_setup_intr(dev, adapter->res[0],
2255	    INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2256	    &adapter->tag[0])) != 0) {
2257		device_printf(dev, "Failed to register fast interrupt "
2258			    "handler: %d\n", error);
2259		taskqueue_free(adapter->tq);
2260		adapter->tq = NULL;
2261		return (error);
2262	}
2263#endif  /* EM_LEGACY_IRQ */
2264
2265	return (0);
2266}
2267
2268
2269static void
2270lem_free_pci_resources(struct adapter *adapter)
2271{
2272	device_t dev = adapter->dev;
2273
2274
2275	if (adapter->tag[0] != NULL) {
2276		bus_teardown_intr(dev, adapter->res[0],
2277		    adapter->tag[0]);
2278		adapter->tag[0] = NULL;
2279	}
2280
2281	if (adapter->res[0] != NULL) {
2282		bus_release_resource(dev, SYS_RES_IRQ,
2283		    0, adapter->res[0]);
2284	}
2285
2286	if (adapter->memory != NULL)
2287		bus_release_resource(dev, SYS_RES_MEMORY,
2288		    PCIR_BAR(0), adapter->memory);
2289
2290	if (adapter->ioport != NULL)
2291		bus_release_resource(dev, SYS_RES_IOPORT,
2292		    adapter->io_rid, adapter->ioport);
2293}
2294
2295
2296/*********************************************************************
2297 *
2298 *  Initialize the hardware to a configuration
2299 *  as specified by the adapter structure.
2300 *
2301 **********************************************************************/
2302static int
2303lem_hardware_init(struct adapter *adapter)
2304{
2305	device_t dev = adapter->dev;
2306	u16 	rx_buffer_size;
2307
2308	INIT_DEBUGOUT("lem_hardware_init: begin");
2309
2310	/* Issue a global reset */
2311	e1000_reset_hw(&adapter->hw);
2312
2313	/* When hardware is reset, fifo_head is also reset */
2314	adapter->tx_fifo_head = 0;
2315
2316	/*
2317	 * These parameters control the automatic generation (Tx) and
2318	 * response (Rx) to Ethernet PAUSE frames.
2319	 * - High water mark should allow for at least two frames to be
2320	 *   received after sending an XOFF.
2321	 * - Low water mark works best when it is very near the high water mark.
2322	 *   This allows the receiver to restart by sending XON when it has
2323	 *   drained a bit. Here we use an arbitary value of 1500 which will
2324	 *   restart after one full frame is pulled from the buffer. There
2325	 *   could be several smaller frames in the buffer and if so they will
2326	 *   not trigger the XON until their total number reduces the buffer
2327	 *   by 1500.
2328	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2329	 */
2330	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2331	    0xffff) << 10 );
2332
2333	adapter->hw.fc.high_water = rx_buffer_size -
2334	    roundup2(adapter->max_frame_size, 1024);
2335	adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2336
2337	adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2338	adapter->hw.fc.send_xon = TRUE;
2339
2340        /* Set Flow control, use the tunable location if sane */
2341        if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2342                adapter->hw.fc.requested_mode = lem_fc_setting;
2343        else
2344                adapter->hw.fc.requested_mode = e1000_fc_none;
2345
2346	if (e1000_init_hw(&adapter->hw) < 0) {
2347		device_printf(dev, "Hardware Initialization Failed\n");
2348		return (EIO);
2349	}
2350
2351	e1000_check_for_link(&adapter->hw);
2352
2353	return (0);
2354}
2355
2356/*********************************************************************
2357 *
2358 *  Setup networking device structure and register an interface.
2359 *
2360 **********************************************************************/
2361static int
2362lem_setup_interface(device_t dev, struct adapter *adapter)
2363{
2364	struct ifnet   *ifp;
2365
2366	INIT_DEBUGOUT("lem_setup_interface: begin");
2367
2368	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2369	if (ifp == NULL) {
2370		device_printf(dev, "can not allocate ifnet structure\n");
2371		return (-1);
2372	}
2373	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2374	ifp->if_init =  lem_init;
2375	ifp->if_softc = adapter;
2376	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2377	ifp->if_ioctl = lem_ioctl;
2378	ifp->if_start = lem_start;
2379	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2380	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2381	IFQ_SET_READY(&ifp->if_snd);
2382
2383	ether_ifattach(ifp, adapter->hw.mac.addr);
2384
2385	ifp->if_capabilities = ifp->if_capenable = 0;
2386
2387	if (adapter->hw.mac.type >= e1000_82543) {
2388		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2389		ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2390	}
2391
2392	/*
2393	 * Tell the upper layer(s) we support long frames.
2394	 */
2395	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2396	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2397	ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2398
2399	/*
2400	** Dont turn this on by default, if vlans are
2401	** created on another pseudo device (eg. lagg)
2402	** then vlan events are not passed thru, breaking
2403	** operation, but with HW FILTER off it works. If
2404	** using vlans directly on the em driver you can
2405	** enable this and get full hardware tag filtering.
2406	*/
2407	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2408
2409#ifdef DEVICE_POLLING
2410	ifp->if_capabilities |= IFCAP_POLLING;
2411#endif
2412
2413	/* Enable only WOL MAGIC by default */
2414	if (adapter->wol) {
2415		ifp->if_capabilities |= IFCAP_WOL;
2416		ifp->if_capenable |= IFCAP_WOL_MAGIC;
2417	}
2418
2419	/*
2420	 * Specify the media types supported by this adapter and register
2421	 * callbacks to update media and link information
2422	 */
2423	ifmedia_init(&adapter->media, IFM_IMASK,
2424	    lem_media_change, lem_media_status);
2425	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2426	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2427		u_char fiber_type = IFM_1000_SX;	/* default type */
2428
2429		if (adapter->hw.mac.type == e1000_82545)
2430			fiber_type = IFM_1000_LX;
2431		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2432			    0, NULL);
2433		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2434	} else {
2435		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2436		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2437			    0, NULL);
2438		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2439			    0, NULL);
2440		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2441			    0, NULL);
2442		if (adapter->hw.phy.type != e1000_phy_ife) {
2443			ifmedia_add(&adapter->media,
2444				IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2445			ifmedia_add(&adapter->media,
2446				IFM_ETHER | IFM_1000_T, 0, NULL);
2447		}
2448	}
2449	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2450	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2451	return (0);
2452}
2453
2454
2455/*********************************************************************
2456 *
2457 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2458 *
2459 **********************************************************************/
2460static void
2461lem_smartspeed(struct adapter *adapter)
2462{
2463	u16 phy_tmp;
2464
2465	if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2466	    adapter->hw.mac.autoneg == 0 ||
2467	    (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2468		return;
2469
2470	if (adapter->smartspeed == 0) {
2471		/* If Master/Slave config fault is asserted twice,
2472		 * we assume back-to-back */
2473		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2474		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2475			return;
2476		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2477		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2478			e1000_read_phy_reg(&adapter->hw,
2479			    PHY_1000T_CTRL, &phy_tmp);
2480			if(phy_tmp & CR_1000T_MS_ENABLE) {
2481				phy_tmp &= ~CR_1000T_MS_ENABLE;
2482				e1000_write_phy_reg(&adapter->hw,
2483				    PHY_1000T_CTRL, phy_tmp);
2484				adapter->smartspeed++;
2485				if(adapter->hw.mac.autoneg &&
2486				   !e1000_copper_link_autoneg(&adapter->hw) &&
2487				   !e1000_read_phy_reg(&adapter->hw,
2488				    PHY_CONTROL, &phy_tmp)) {
2489					phy_tmp |= (MII_CR_AUTO_NEG_EN |
2490						    MII_CR_RESTART_AUTO_NEG);
2491					e1000_write_phy_reg(&adapter->hw,
2492					    PHY_CONTROL, phy_tmp);
2493				}
2494			}
2495		}
2496		return;
2497	} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2498		/* If still no link, perhaps using 2/3 pair cable */
2499		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2500		phy_tmp |= CR_1000T_MS_ENABLE;
2501		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2502		if(adapter->hw.mac.autoneg &&
2503		   !e1000_copper_link_autoneg(&adapter->hw) &&
2504		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2505			phy_tmp |= (MII_CR_AUTO_NEG_EN |
2506				    MII_CR_RESTART_AUTO_NEG);
2507			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2508		}
2509	}
2510	/* Restart process after EM_SMARTSPEED_MAX iterations */
2511	if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2512		adapter->smartspeed = 0;
2513}
2514
2515
2516/*
2517 * Manage DMA'able memory.
2518 */
2519static void
2520lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2521{
2522	if (error)
2523		return;
2524	*(bus_addr_t *) arg = segs[0].ds_addr;
2525}
2526
2527static int
2528lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2529        struct em_dma_alloc *dma, int mapflags)
2530{
2531	int error;
2532
2533	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2534				EM_DBA_ALIGN, 0,	/* alignment, bounds */
2535				BUS_SPACE_MAXADDR,	/* lowaddr */
2536				BUS_SPACE_MAXADDR,	/* highaddr */
2537				NULL, NULL,		/* filter, filterarg */
2538				size,			/* maxsize */
2539				1,			/* nsegments */
2540				size,			/* maxsegsize */
2541				0,			/* flags */
2542				NULL,			/* lockfunc */
2543				NULL,			/* lockarg */
2544				&dma->dma_tag);
2545	if (error) {
2546		device_printf(adapter->dev,
2547		    "%s: bus_dma_tag_create failed: %d\n",
2548		    __func__, error);
2549		goto fail_0;
2550	}
2551
2552	error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2553	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2554	if (error) {
2555		device_printf(adapter->dev,
2556		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2557		    __func__, (uintmax_t)size, error);
2558		goto fail_2;
2559	}
2560
2561	dma->dma_paddr = 0;
2562	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2563	    size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2564	if (error || dma->dma_paddr == 0) {
2565		device_printf(adapter->dev,
2566		    "%s: bus_dmamap_load failed: %d\n",
2567		    __func__, error);
2568		goto fail_3;
2569	}
2570
2571	return (0);
2572
2573fail_3:
2574	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2575fail_2:
2576	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2577	bus_dma_tag_destroy(dma->dma_tag);
2578fail_0:
2579	dma->dma_map = NULL;
2580	dma->dma_tag = NULL;
2581
2582	return (error);
2583}
2584
2585static void
2586lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2587{
2588	if (dma->dma_tag == NULL)
2589		return;
2590	if (dma->dma_map != NULL) {
2591		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2592		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2593		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2594		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2595		dma->dma_map = NULL;
2596	}
2597	bus_dma_tag_destroy(dma->dma_tag);
2598	dma->dma_tag = NULL;
2599}
2600
2601
2602/*********************************************************************
2603 *
2604 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2605 *  the information needed to transmit a packet on the wire.
2606 *
2607 **********************************************************************/
2608static int
2609lem_allocate_transmit_structures(struct adapter *adapter)
2610{
2611	int i;
2612	device_t dev = adapter->dev;
2613	struct em_buffer *tx_buffer;
2614	int error;
2615
2616	/*
2617	 * Create DMA tags for tx descriptors
2618	 */
2619	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2620				1, 0,			/* alignment, bounds */
2621				BUS_SPACE_MAXADDR,	/* lowaddr */
2622				BUS_SPACE_MAXADDR,	/* highaddr */
2623				NULL, NULL,		/* filter, filterarg */
2624				MCLBYTES * EM_MAX_SCATTER,	/* maxsize */
2625				EM_MAX_SCATTER,		/* nsegments */
2626				MCLBYTES,		/* maxsegsize */
2627				0,			/* flags */
2628				NULL,			/* lockfunc */
2629				NULL,			/* lockarg */
2630				&adapter->txtag)) != 0) {
2631		device_printf(dev, "Unable to allocate TX DMA tag\n");
2632		goto fail;
2633	}
2634
2635	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2636	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2637	if (adapter->tx_buffer_area == NULL) {
2638		device_printf(dev, "Unable to allocate tx_buffer memory\n");
2639		error = ENOMEM;
2640		goto fail;
2641	}
2642
2643	/* Create the descriptor buffer dma maps */
2644	for (i = 0; i < adapter->num_tx_desc; i++) {
2645		tx_buffer = &adapter->tx_buffer_area[i];
2646		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2647		if (error != 0) {
2648			device_printf(dev, "Unable to create TX DMA map\n");
2649			goto fail;
2650		}
2651		tx_buffer->next_eop = -1;
2652	}
2653
2654	return (0);
2655fail:
2656	lem_free_transmit_structures(adapter);
2657	return (error);
2658}
2659
2660/*********************************************************************
2661 *
2662 *  (Re)Initialize transmit structures.
2663 *
2664 **********************************************************************/
2665static void
2666lem_setup_transmit_structures(struct adapter *adapter)
2667{
2668	int i;
2669	struct em_buffer *tx_buffer;
2670#ifdef DEV_NETMAP
2671	/* we are already locked */
2672	struct netmap_adapter *na = NA(adapter->ifp);
2673	struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2674#endif /* DEV_NETMAP */
2675
2676	/* Clear the old ring contents */
2677	bzero(adapter->tx_desc_base,
2678	    (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2679
2680	/* Free any existing TX buffers */
2681	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2682		tx_buffer = &adapter->tx_buffer_area[i];
2683		bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2684		    BUS_DMASYNC_POSTWRITE);
2685		bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2686		m_freem(tx_buffer->m_head);
2687		tx_buffer->m_head = NULL;
2688#ifdef DEV_NETMAP
2689		if (slot) {
2690			/* the i-th NIC entry goes to slot si */
2691			int si = netmap_idx_n2k(&na->tx_rings[0], i);
2692			uint64_t paddr;
2693			void *addr;
2694
2695			addr = PNMB(slot + si, &paddr);
2696			adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2697			/* reload the map for netmap mode */
2698			netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2699		}
2700#endif /* DEV_NETMAP */
2701		tx_buffer->next_eop = -1;
2702	}
2703
2704	/* Reset state */
2705	adapter->last_hw_offload = 0;
2706	adapter->next_avail_tx_desc = 0;
2707	adapter->next_tx_to_clean = 0;
2708	adapter->num_tx_desc_avail = adapter->num_tx_desc;
2709
2710	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2711	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2712
2713	return;
2714}
2715
2716/*********************************************************************
2717 *
2718 *  Enable transmit unit.
2719 *
2720 **********************************************************************/
2721static void
2722lem_initialize_transmit_unit(struct adapter *adapter)
2723{
2724	u32	tctl, tipg = 0;
2725	u64	bus_addr;
2726
2727	 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2728	/* Setup the Base and Length of the Tx Descriptor Ring */
2729	bus_addr = adapter->txdma.dma_paddr;
2730	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2731	    adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2732	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2733	    (u32)(bus_addr >> 32));
2734	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2735	    (u32)bus_addr);
2736	/* Setup the HW Tx Head and Tail descriptor pointers */
2737	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2738	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2739
2740	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2741	    E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2742	    E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2743
2744	/* Set the default values for the Tx Inter Packet Gap timer */
2745	switch (adapter->hw.mac.type) {
2746	case e1000_82542:
2747		tipg = DEFAULT_82542_TIPG_IPGT;
2748		tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2749		tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2750		break;
2751	default:
2752		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2753		    (adapter->hw.phy.media_type ==
2754		    e1000_media_type_internal_serdes))
2755			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2756		else
2757			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2758		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2759		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2760	}
2761
2762	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2763	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2764	if(adapter->hw.mac.type >= e1000_82540)
2765		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2766		    adapter->tx_abs_int_delay.value);
2767
2768	/* Program the Transmit Control Register */
2769	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2770	tctl &= ~E1000_TCTL_CT;
2771	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2772		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2773
2774	/* This write will effectively turn on the transmit unit. */
2775	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2776
2777	/* Setup Transmit Descriptor Base Settings */
2778	adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2779
2780	if (adapter->tx_int_delay.value > 0)
2781		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2782}
2783
2784/*********************************************************************
2785 *
2786 *  Free all transmit related data structures.
2787 *
2788 **********************************************************************/
2789static void
2790lem_free_transmit_structures(struct adapter *adapter)
2791{
2792	struct em_buffer *tx_buffer;
2793
2794	INIT_DEBUGOUT("free_transmit_structures: begin");
2795
2796	if (adapter->tx_buffer_area != NULL) {
2797		int i;
2798		for (i = 0; i < adapter->num_tx_desc; i++) {
2799			tx_buffer = &adapter->tx_buffer_area[i];
2800			if (tx_buffer->m_head != NULL) {
2801				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2802				    BUS_DMASYNC_POSTWRITE);
2803				bus_dmamap_unload(adapter->txtag,
2804				    tx_buffer->map);
2805				m_freem(tx_buffer->m_head);
2806				tx_buffer->m_head = NULL;
2807			} else if (tx_buffer->map != NULL)
2808				bus_dmamap_unload(adapter->txtag,
2809				    tx_buffer->map);
2810			if (tx_buffer->map != NULL) {
2811				bus_dmamap_destroy(adapter->txtag,
2812				    tx_buffer->map);
2813				tx_buffer->map = NULL;
2814			}
2815		}
2816	}
2817	if (adapter->tx_buffer_area != NULL) {
2818		free(adapter->tx_buffer_area, M_DEVBUF);
2819		adapter->tx_buffer_area = NULL;
2820	}
2821	if (adapter->txtag != NULL) {
2822		bus_dma_tag_destroy(adapter->txtag);
2823		adapter->txtag = NULL;
2824	}
2825#ifndef __HAIKU__
2826#if __FreeBSD_version >= 800000
2827	if (adapter->br != NULL)
2828        	buf_ring_free(adapter->br, M_DEVBUF);
2829#endif
2830#endif
2831}
2832
2833/*********************************************************************
2834 *
2835 *  The offload context needs to be set when we transfer the first
2836 *  packet of a particular protocol (TCP/UDP). This routine has been
2837 *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2838 *
2839 *  Added back the old method of keeping the current context type
2840 *  and not setting if unnecessary, as this is reported to be a
2841 *  big performance win.  -jfv
2842 **********************************************************************/
2843static void
2844lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2845    u32 *txd_upper, u32 *txd_lower)
2846{
2847	struct e1000_context_desc *TXD = NULL;
2848	struct em_buffer *tx_buffer;
2849	struct ether_vlan_header *eh;
2850	struct ip *ip = NULL;
2851	struct ip6_hdr *ip6;
2852	int curr_txd, ehdrlen;
2853	u32 cmd, hdr_len, ip_hlen;
2854	u16 etype;
2855	u8 ipproto;
2856
2857
2858	cmd = hdr_len = ipproto = 0;
2859	*txd_upper = *txd_lower = 0;
2860	curr_txd = adapter->next_avail_tx_desc;
2861
2862	/*
2863	 * Determine where frame payload starts.
2864	 * Jump over vlan headers if already present,
2865	 * helpful for QinQ too.
2866	 */
2867	eh = mtod(mp, struct ether_vlan_header *);
2868	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2869		etype = ntohs(eh->evl_proto);
2870		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2871	} else {
2872		etype = ntohs(eh->evl_encap_proto);
2873		ehdrlen = ETHER_HDR_LEN;
2874	}
2875
2876	/*
2877	 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2878	 * TODO: Support SCTP too when it hits the tree.
2879	 */
2880	switch (etype) {
2881	case ETHERTYPE_IP:
2882		ip = (struct ip *)(mp->m_data + ehdrlen);
2883		ip_hlen = ip->ip_hl << 2;
2884
2885		/* Setup of IP header checksum. */
2886		if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2887			/*
2888			 * Start offset for header checksum calculation.
2889			 * End offset for header checksum calculation.
2890			 * Offset of place to put the checksum.
2891			 */
2892			TXD = (struct e1000_context_desc *)
2893			    &adapter->tx_desc_base[curr_txd];
2894			TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2895			TXD->lower_setup.ip_fields.ipcse =
2896			    htole16(ehdrlen + ip_hlen);
2897			TXD->lower_setup.ip_fields.ipcso =
2898			    ehdrlen + offsetof(struct ip, ip_sum);
2899			cmd |= E1000_TXD_CMD_IP;
2900			*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2901		}
2902
2903		hdr_len = ehdrlen + ip_hlen;
2904		ipproto = ip->ip_p;
2905
2906		break;
2907	case ETHERTYPE_IPV6:
2908		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2909		ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2910
2911		/* IPv6 doesn't have a header checksum. */
2912
2913		hdr_len = ehdrlen + ip_hlen;
2914		ipproto = ip6->ip6_nxt;
2915		break;
2916
2917	default:
2918		return;
2919	}
2920
2921	switch (ipproto) {
2922	case IPPROTO_TCP:
2923		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2924			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2925			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2926			/* no need for context if already set */
2927			if (adapter->last_hw_offload == CSUM_TCP)
2928				return;
2929			adapter->last_hw_offload = CSUM_TCP;
2930			/*
2931			 * Start offset for payload checksum calculation.
2932			 * End offset for payload checksum calculation.
2933			 * Offset of place to put the checksum.
2934			 */
2935			TXD = (struct e1000_context_desc *)
2936			    &adapter->tx_desc_base[curr_txd];
2937			TXD->upper_setup.tcp_fields.tucss = hdr_len;
2938			TXD->upper_setup.tcp_fields.tucse = htole16(0);
2939			TXD->upper_setup.tcp_fields.tucso =
2940			    hdr_len + offsetof(struct tcphdr, th_sum);
2941			cmd |= E1000_TXD_CMD_TCP;
2942		}
2943		break;
2944	case IPPROTO_UDP:
2945	{
2946		if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2947			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2948			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2949			/* no need for context if already set */
2950			if (adapter->last_hw_offload == CSUM_UDP)
2951				return;
2952			adapter->last_hw_offload = CSUM_UDP;
2953			/*
2954			 * Start offset for header checksum calculation.
2955			 * End offset for header checksum calculation.
2956			 * Offset of place to put the checksum.
2957			 */
2958			TXD = (struct e1000_context_desc *)
2959			    &adapter->tx_desc_base[curr_txd];
2960			TXD->upper_setup.tcp_fields.tucss = hdr_len;
2961			TXD->upper_setup.tcp_fields.tucse = htole16(0);
2962			TXD->upper_setup.tcp_fields.tucso =
2963			    hdr_len + offsetof(struct udphdr, uh_sum);
2964		}
2965		/* Fall Thru */
2966	}
2967	default:
2968		break;
2969	}
2970
2971	if (TXD == NULL)
2972		return;
2973	TXD->tcp_seg_setup.data = htole32(0);
2974	TXD->cmd_and_length =
2975	    htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2976	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2977	tx_buffer->m_head = NULL;
2978	tx_buffer->next_eop = -1;
2979
2980	if (++curr_txd == adapter->num_tx_desc)
2981		curr_txd = 0;
2982
2983	adapter->num_tx_desc_avail--;
2984	adapter->next_avail_tx_desc = curr_txd;
2985}
2986
2987
2988/**********************************************************************
2989 *
2990 *  Examine each tx_buffer in the used queue. If the hardware is done
2991 *  processing the packet then free associated resources. The
2992 *  tx_buffer is put back on the free queue.
2993 *
2994 **********************************************************************/
2995static void
2996lem_txeof(struct adapter *adapter)
2997{
2998        int first, last, done, num_avail;
2999        struct em_buffer *tx_buffer;
3000        struct e1000_tx_desc   *tx_desc, *eop_desc;
3001	struct ifnet   *ifp = adapter->ifp;
3002
3003	EM_TX_LOCK_ASSERT(adapter);
3004
3005#ifdef DEV_NETMAP
3006	if (ifp->if_capenable & IFCAP_NETMAP) {
3007		selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET);
3008		return;
3009	}
3010#endif /* DEV_NETMAP */
3011        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3012                return;
3013
3014        num_avail = adapter->num_tx_desc_avail;
3015        first = adapter->next_tx_to_clean;
3016        tx_desc = &adapter->tx_desc_base[first];
3017        tx_buffer = &adapter->tx_buffer_area[first];
3018	last = tx_buffer->next_eop;
3019        eop_desc = &adapter->tx_desc_base[last];
3020
3021	/*
3022	 * What this does is get the index of the
3023	 * first descriptor AFTER the EOP of the
3024	 * first packet, that way we can do the
3025	 * simple comparison on the inner while loop.
3026	 */
3027	if (++last == adapter->num_tx_desc)
3028 		last = 0;
3029	done = last;
3030
3031        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3032            BUS_DMASYNC_POSTREAD);
3033
3034        while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3035		/* We clean the range of the packet */
3036		while (first != done) {
3037                	tx_desc->upper.data = 0;
3038                	tx_desc->lower.data = 0;
3039                	tx_desc->buffer_addr = 0;
3040                	++num_avail;
3041
3042			if (tx_buffer->m_head) {
3043				ifp->if_opackets++;
3044				bus_dmamap_sync(adapter->txtag,
3045				    tx_buffer->map,
3046				    BUS_DMASYNC_POSTWRITE);
3047				bus_dmamap_unload(adapter->txtag,
3048				    tx_buffer->map);
3049
3050                        	m_freem(tx_buffer->m_head);
3051                        	tx_buffer->m_head = NULL;
3052                	}
3053			tx_buffer->next_eop = -1;
3054			adapter->watchdog_time = ticks;
3055
3056	                if (++first == adapter->num_tx_desc)
3057				first = 0;
3058
3059	                tx_buffer = &adapter->tx_buffer_area[first];
3060			tx_desc = &adapter->tx_desc_base[first];
3061		}
3062		/* See if we can continue to the next packet */
3063		last = tx_buffer->next_eop;
3064		if (last != -1) {
3065        		eop_desc = &adapter->tx_desc_base[last];
3066			/* Get new done point */
3067			if (++last == adapter->num_tx_desc) last = 0;
3068			done = last;
3069		} else
3070			break;
3071        }
3072        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3073            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3074
3075        adapter->next_tx_to_clean = first;
3076        adapter->num_tx_desc_avail = num_avail;
3077
3078        /*
3079         * If we have enough room, clear IFF_DRV_OACTIVE to
3080         * tell the stack that it is OK to send packets.
3081         * If there are no pending descriptors, clear the watchdog.
3082         */
3083        if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3084                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3085                if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3086			adapter->watchdog_check = FALSE;
3087			return;
3088		}
3089        }
3090}
3091
3092/*********************************************************************
3093 *
3094 *  When Link is lost sometimes there is work still in the TX ring
3095 *  which may result in a watchdog, rather than allow that we do an
3096 *  attempted cleanup and then reinit here. Note that this has been
3097 *  seens mostly with fiber adapters.
3098 *
3099 **********************************************************************/
3100static void
3101lem_tx_purge(struct adapter *adapter)
3102{
3103	if ((!adapter->link_active) && (adapter->watchdog_check)) {
3104		EM_TX_LOCK(adapter);
3105		lem_txeof(adapter);
3106		EM_TX_UNLOCK(adapter);
3107		if (adapter->watchdog_check) /* Still outstanding? */
3108			lem_init_locked(adapter);
3109	}
3110}
3111
3112/*********************************************************************
3113 *
3114 *  Get a buffer from system mbuf buffer pool.
3115 *
3116 **********************************************************************/
3117static int
3118lem_get_buf(struct adapter *adapter, int i)
3119{
3120	struct mbuf		*m;
3121	bus_dma_segment_t	segs[1];
3122	bus_dmamap_t		map;
3123	struct em_buffer	*rx_buffer;
3124	int			error, nsegs;
3125
3126	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3127	if (m == NULL) {
3128		adapter->mbuf_cluster_failed++;
3129		return (ENOBUFS);
3130	}
3131	m->m_len = m->m_pkthdr.len = MCLBYTES;
3132
3133	if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3134		m_adj(m, ETHER_ALIGN);
3135
3136	/*
3137	 * Using memory from the mbuf cluster pool, invoke the
3138	 * bus_dma machinery to arrange the memory mapping.
3139	 */
3140	error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3141	    adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3142	if (error != 0) {
3143		m_free(m);
3144		return (error);
3145	}
3146
3147	/* If nsegs is wrong then the stack is corrupt. */
3148	KASSERT(nsegs == 1, ("Too many segments returned!"));
3149
3150	rx_buffer = &adapter->rx_buffer_area[i];
3151	if (rx_buffer->m_head != NULL)
3152		bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3153
3154	map = rx_buffer->map;
3155	rx_buffer->map = adapter->rx_sparemap;
3156	adapter->rx_sparemap = map;
3157	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3158	rx_buffer->m_head = m;
3159
3160	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3161	return (0);
3162}
3163
3164/*********************************************************************
3165 *
3166 *  Allocate memory for rx_buffer structures. Since we use one
3167 *  rx_buffer per received packet, the maximum number of rx_buffer's
3168 *  that we'll need is equal to the number of receive descriptors
3169 *  that we've allocated.
3170 *
3171 **********************************************************************/
3172static int
3173lem_allocate_receive_structures(struct adapter *adapter)
3174{
3175	device_t dev = adapter->dev;
3176	struct em_buffer *rx_buffer;
3177	int i, error;
3178
3179	adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3180	    adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3181	if (adapter->rx_buffer_area == NULL) {
3182		device_printf(dev, "Unable to allocate rx_buffer memory\n");
3183		return (ENOMEM);
3184	}
3185
3186	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3187				1, 0,			/* alignment, bounds */
3188				BUS_SPACE_MAXADDR,	/* lowaddr */
3189				BUS_SPACE_MAXADDR,	/* highaddr */
3190				NULL, NULL,		/* filter, filterarg */
3191				MCLBYTES,		/* maxsize */
3192				1,			/* nsegments */
3193				MCLBYTES,		/* maxsegsize */
3194				0,			/* flags */
3195				NULL,			/* lockfunc */
3196				NULL,			/* lockarg */
3197				&adapter->rxtag);
3198	if (error) {
3199		device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3200		    __func__, error);
3201		goto fail;
3202	}
3203
3204	/* Create the spare map (used by getbuf) */
3205	error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3206	     &adapter->rx_sparemap);
3207	if (error) {
3208		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3209		    __func__, error);
3210		goto fail;
3211	}
3212
3213	rx_buffer = adapter->rx_buffer_area;
3214	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3215		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3216		    &rx_buffer->map);
3217		if (error) {
3218			device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3219			    __func__, error);
3220			goto fail;
3221		}
3222	}
3223
3224	return (0);
3225
3226fail:
3227	lem_free_receive_structures(adapter);
3228	return (error);
3229}
3230
3231/*********************************************************************
3232 *
3233 *  (Re)initialize receive structures.
3234 *
3235 **********************************************************************/
3236static int
3237lem_setup_receive_structures(struct adapter *adapter)
3238{
3239	struct em_buffer *rx_buffer;
3240	int i, error;
3241#ifdef DEV_NETMAP
3242	/* we are already under lock */
3243	struct netmap_adapter *na = NA(adapter->ifp);
3244	struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3245#endif
3246
3247	/* Reset descriptor ring */
3248	bzero(adapter->rx_desc_base,
3249	    (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3250
3251	/* Free current RX buffers. */
3252	rx_buffer = adapter->rx_buffer_area;
3253	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3254		if (rx_buffer->m_head != NULL) {
3255			bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3256			    BUS_DMASYNC_POSTREAD);
3257			bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3258			m_freem(rx_buffer->m_head);
3259			rx_buffer->m_head = NULL;
3260		}
3261        }
3262
3263	/* Allocate new ones. */
3264	for (i = 0; i < adapter->num_rx_desc; i++) {
3265#ifdef DEV_NETMAP
3266		if (slot) {
3267			/* the i-th NIC entry goes to slot si */
3268			int si = netmap_idx_n2k(&na->rx_rings[0], i);
3269			uint64_t paddr;
3270			void *addr;
3271
3272			addr = PNMB(slot + si, &paddr);
3273			netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3274			/* Update descriptor */
3275			adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3276			continue;
3277		}
3278#endif /* DEV_NETMAP */
3279		error = lem_get_buf(adapter, i);
3280		if (error)
3281                        return (error);
3282	}
3283
3284	/* Setup our descriptor pointers */
3285	adapter->next_rx_desc_to_check = 0;
3286	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3287	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3288
3289	return (0);
3290}
3291
3292/*********************************************************************
3293 *
3294 *  Enable receive unit.
3295 *
3296 **********************************************************************/
3297#define MAX_INTS_PER_SEC	8000
3298#define DEFAULT_ITR	     1000000000/(MAX_INTS_PER_SEC * 256)
3299
3300static void
3301lem_initialize_receive_unit(struct adapter *adapter)
3302{
3303	int i;
3304	struct ifnet	*ifp = adapter->ifp;
3305	u64	bus_addr;
3306	u32	rctl, rxcsum;
3307
3308	INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3309
3310	/*
3311	 * Make sure receives are disabled while setting
3312	 * up the descriptor ring
3313	 */
3314	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3315	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3316
3317	if (adapter->hw.mac.type >= e1000_82540) {
3318		E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3319		    adapter->rx_abs_int_delay.value);
3320		/*
3321		 * Set the interrupt throttling rate. Value is calculated
3322		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3323		 */
3324		E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3325	}
3326
3327	/*
3328	** When using MSIX interrupts we need to throttle
3329	** using the EITR register (82574 only)
3330	*/
3331	if (adapter->msix)
3332		for (i = 0; i < 4; i++)
3333			E1000_WRITE_REG(&adapter->hw,
3334			    E1000_EITR_82574(i), DEFAULT_ITR);
3335
3336	/* Disable accelerated ackknowledge */
3337	if (adapter->hw.mac.type == e1000_82574)
3338		E1000_WRITE_REG(&adapter->hw,
3339		    E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3340
3341	/* Setup the Base and Length of the Rx Descriptor Ring */
3342	bus_addr = adapter->rxdma.dma_paddr;
3343	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3344	    adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3345	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3346	    (u32)(bus_addr >> 32));
3347	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3348	    (u32)bus_addr);
3349
3350	/* Setup the Receive Control Register */
3351	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3352	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3353		   E1000_RCTL_RDMTS_HALF |
3354		   (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3355
3356	/* Make sure VLAN Filters are off */
3357	rctl &= ~E1000_RCTL_VFE;
3358
3359	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3360		rctl |= E1000_RCTL_SBP;
3361	else
3362		rctl &= ~E1000_RCTL_SBP;
3363
3364	switch (adapter->rx_buffer_len) {
3365	default:
3366	case 2048:
3367		rctl |= E1000_RCTL_SZ_2048;
3368		break;
3369	case 4096:
3370		rctl |= E1000_RCTL_SZ_4096 |
3371		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
3372		break;
3373	case 8192:
3374		rctl |= E1000_RCTL_SZ_8192 |
3375		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
3376		break;
3377	case 16384:
3378		rctl |= E1000_RCTL_SZ_16384 |
3379		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
3380		break;
3381	}
3382
3383	if (ifp->if_mtu > ETHERMTU)
3384		rctl |= E1000_RCTL_LPE;
3385	else
3386		rctl &= ~E1000_RCTL_LPE;
3387
3388	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
3389	if ((adapter->hw.mac.type >= e1000_82543) &&
3390	    (ifp->if_capenable & IFCAP_RXCSUM)) {
3391		rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3392		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3393		E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3394	}
3395
3396	/* Enable Receives */
3397	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3398
3399	/*
3400	 * Setup the HW Rx Head and
3401	 * Tail Descriptor Pointers
3402	 */
3403	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3404#ifdef DEV_NETMAP
3405	/* preserve buffers already made available to clients */
3406	if (ifp->if_capenable & IFCAP_NETMAP) {
3407		struct netmap_adapter *na = NA(adapter->ifp);
3408		struct netmap_kring *kring = &na->rx_rings[0];
3409		int t = na->num_rx_desc - 1 - kring->nr_hwavail;
3410
3411		if (t >= na->num_rx_desc)
3412			t -= na->num_rx_desc;
3413		E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), t);
3414	} else
3415#endif /* DEV_NETMAP */
3416	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3417
3418	return;
3419}
3420
3421/*********************************************************************
3422 *
3423 *  Free receive related data structures.
3424 *
3425 **********************************************************************/
3426static void
3427lem_free_receive_structures(struct adapter *adapter)
3428{
3429	struct em_buffer *rx_buffer;
3430	int i;
3431
3432	INIT_DEBUGOUT("free_receive_structures: begin");
3433
3434	if (adapter->rx_sparemap) {
3435		bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3436		adapter->rx_sparemap = NULL;
3437	}
3438
3439	/* Cleanup any existing buffers */
3440	if (adapter->rx_buffer_area != NULL) {
3441		rx_buffer = adapter->rx_buffer_area;
3442		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3443			if (rx_buffer->m_head != NULL) {
3444				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3445				    BUS_DMASYNC_POSTREAD);
3446				bus_dmamap_unload(adapter->rxtag,
3447				    rx_buffer->map);
3448				m_freem(rx_buffer->m_head);
3449				rx_buffer->m_head = NULL;
3450			} else if (rx_buffer->map != NULL)
3451				bus_dmamap_unload(adapter->rxtag,
3452				    rx_buffer->map);
3453			if (rx_buffer->map != NULL) {
3454				bus_dmamap_destroy(adapter->rxtag,
3455				    rx_buffer->map);
3456				rx_buffer->map = NULL;
3457			}
3458		}
3459	}
3460
3461	if (adapter->rx_buffer_area != NULL) {
3462		free(adapter->rx_buffer_area, M_DEVBUF);
3463		adapter->rx_buffer_area = NULL;
3464	}
3465
3466	if (adapter->rxtag != NULL) {
3467		bus_dma_tag_destroy(adapter->rxtag);
3468		adapter->rxtag = NULL;
3469	}
3470}
3471
3472/*********************************************************************
3473 *
3474 *  This routine executes in interrupt context. It replenishes
3475 *  the mbufs in the descriptor and sends data which has been
3476 *  dma'ed into host memory to upper layer.
3477 *
3478 *  We loop at most count times if count is > 0, or until done if
3479 *  count < 0.
3480 *
3481 *  For polling we also now return the number of cleaned packets
3482 *********************************************************************/
3483static bool
3484lem_rxeof(struct adapter *adapter, int count, int *done)
3485{
3486	struct ifnet	*ifp = adapter->ifp;
3487	struct mbuf	*mp;
3488	u8		status = 0, accept_frame = 0, eop = 0;
3489	u16 		len, desc_len, prev_len_adj;
3490	int		i, rx_sent = 0;
3491	struct e1000_rx_desc   *current_desc;
3492
3493	EM_RX_LOCK(adapter);
3494	i = adapter->next_rx_desc_to_check;
3495	current_desc = &adapter->rx_desc_base[i];
3496	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3497	    BUS_DMASYNC_POSTREAD);
3498
3499#ifdef DEV_NETMAP
3500	if (ifp->if_capenable & IFCAP_NETMAP) {
3501		struct netmap_adapter *na = NA(ifp);
3502		na->rx_rings[0].nr_kflags |= NKR_PENDINTR;
3503		selwakeuppri(&na->rx_rings[0].si, PI_NET);
3504		EM_RX_UNLOCK(adapter);
3505		return (0);
3506	}
3507#endif /* DEV_NETMAP */
3508
3509	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3510		if (done != NULL)
3511			*done = rx_sent;
3512		EM_RX_UNLOCK(adapter);
3513		return (FALSE);
3514	}
3515
3516	while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3517		struct mbuf *m = NULL;
3518
3519		status = current_desc->status;
3520		if ((status & E1000_RXD_STAT_DD) == 0)
3521			break;
3522
3523		mp = adapter->rx_buffer_area[i].m_head;
3524		/*
3525		 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3526		 * needs to access the last received byte in the mbuf.
3527		 */
3528		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3529		    BUS_DMASYNC_POSTREAD);
3530
3531		accept_frame = 1;
3532		prev_len_adj = 0;
3533		desc_len = le16toh(current_desc->length);
3534		if (status & E1000_RXD_STAT_EOP) {
3535			count--;
3536			eop = 1;
3537			if (desc_len < ETHER_CRC_LEN) {
3538				len = 0;
3539				prev_len_adj = ETHER_CRC_LEN - desc_len;
3540			} else
3541				len = desc_len - ETHER_CRC_LEN;
3542		} else {
3543			eop = 0;
3544			len = desc_len;
3545		}
3546
3547		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3548			u8	last_byte;
3549			u32	pkt_len = desc_len;
3550
3551			if (adapter->fmp != NULL)
3552				pkt_len += adapter->fmp->m_pkthdr.len;
3553
3554			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3555			if (TBI_ACCEPT(&adapter->hw, status,
3556			    current_desc->errors, pkt_len, last_byte,
3557			    adapter->min_frame_size, adapter->max_frame_size)) {
3558				e1000_tbi_adjust_stats_82543(&adapter->hw,
3559				    &adapter->stats, pkt_len,
3560				    adapter->hw.mac.addr,
3561				    adapter->max_frame_size);
3562				if (len > 0)
3563					len--;
3564			} else
3565				accept_frame = 0;
3566		}
3567
3568		if (accept_frame) {
3569			if (lem_get_buf(adapter, i) != 0) {
3570				ifp->if_iqdrops++;
3571				goto discard;
3572			}
3573
3574			/* Assign correct length to the current fragment */
3575			mp->m_len = len;
3576
3577			if (adapter->fmp == NULL) {
3578				mp->m_pkthdr.len = len;
3579				adapter->fmp = mp; /* Store the first mbuf */
3580				adapter->lmp = mp;
3581			} else {
3582				/* Chain mbuf's together */
3583				mp->m_flags &= ~M_PKTHDR;
3584				/*
3585				 * Adjust length of previous mbuf in chain if
3586				 * we received less than 4 bytes in the last
3587				 * descriptor.
3588				 */
3589				if (prev_len_adj > 0) {
3590					adapter->lmp->m_len -= prev_len_adj;
3591					adapter->fmp->m_pkthdr.len -=
3592					    prev_len_adj;
3593				}
3594				adapter->lmp->m_next = mp;
3595				adapter->lmp = adapter->lmp->m_next;
3596				adapter->fmp->m_pkthdr.len += len;
3597			}
3598
3599			if (eop) {
3600				adapter->fmp->m_pkthdr.rcvif = ifp;
3601				ifp->if_ipackets++;
3602				lem_receive_checksum(adapter, current_desc,
3603				    adapter->fmp);
3604#ifndef __NO_STRICT_ALIGNMENT
3605				if (adapter->max_frame_size >
3606				    (MCLBYTES - ETHER_ALIGN) &&
3607				    lem_fixup_rx(adapter) != 0)
3608					goto skip;
3609#endif
3610				if (status & E1000_RXD_STAT_VP) {
3611					adapter->fmp->m_pkthdr.ether_vtag =
3612					    le16toh(current_desc->special);
3613					adapter->fmp->m_flags |= M_VLANTAG;
3614				}
3615#ifndef __NO_STRICT_ALIGNMENT
3616skip:
3617#endif
3618				m = adapter->fmp;
3619				adapter->fmp = NULL;
3620				adapter->lmp = NULL;
3621			}
3622		} else {
3623			ifp->if_ierrors++;
3624discard:
3625			/* Reuse loaded DMA map and just update mbuf chain */
3626			mp = adapter->rx_buffer_area[i].m_head;
3627			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3628			mp->m_data = mp->m_ext.ext_buf;
3629			mp->m_next = NULL;
3630			if (adapter->max_frame_size <=
3631			    (MCLBYTES - ETHER_ALIGN))
3632				m_adj(mp, ETHER_ALIGN);
3633			if (adapter->fmp != NULL) {
3634				m_freem(adapter->fmp);
3635				adapter->fmp = NULL;
3636				adapter->lmp = NULL;
3637			}
3638			m = NULL;
3639		}
3640
3641		/* Zero out the receive descriptors status. */
3642		current_desc->status = 0;
3643		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3644		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3645
3646		/* Advance our pointers to the next descriptor. */
3647		if (++i == adapter->num_rx_desc)
3648			i = 0;
3649		/* Call into the stack */
3650		if (m != NULL) {
3651			adapter->next_rx_desc_to_check = i;
3652			EM_RX_UNLOCK(adapter);
3653			(*ifp->if_input)(ifp, m);
3654			EM_RX_LOCK(adapter);
3655			rx_sent++;
3656			i = adapter->next_rx_desc_to_check;
3657		}
3658		current_desc = &adapter->rx_desc_base[i];
3659	}
3660	adapter->next_rx_desc_to_check = i;
3661
3662	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3663	if (--i < 0)
3664		i = adapter->num_rx_desc - 1;
3665	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3666	if (done != NULL)
3667		*done = rx_sent;
3668	EM_RX_UNLOCK(adapter);
3669	return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3670}
3671
3672#ifndef __NO_STRICT_ALIGNMENT
3673/*
3674 * When jumbo frames are enabled we should realign entire payload on
3675 * architecures with strict alignment. This is serious design mistake of 8254x
3676 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3677 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3678 * payload. On architecures without strict alignment restrictions 8254x still
3679 * performs unaligned memory access which would reduce the performance too.
3680 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3681 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3682 * existing mbuf chain.
3683 *
3684 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3685 * not used at all on architectures with strict alignment.
3686 */
3687static int
3688lem_fixup_rx(struct adapter *adapter)
3689{
3690	struct mbuf *m, *n;
3691	int error;
3692
3693	error = 0;
3694	m = adapter->fmp;
3695	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3696		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3697		m->m_data += ETHER_HDR_LEN;
3698	} else {
3699		MGETHDR(n, M_DONTWAIT, MT_DATA);
3700		if (n != NULL) {
3701			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3702			m->m_data += ETHER_HDR_LEN;
3703			m->m_len -= ETHER_HDR_LEN;
3704			n->m_len = ETHER_HDR_LEN;
3705			M_MOVE_PKTHDR(n, m);
3706			n->m_next = m;
3707			adapter->fmp = n;
3708		} else {
3709			adapter->dropped_pkts++;
3710			m_freem(adapter->fmp);
3711			adapter->fmp = NULL;
3712			error = ENOMEM;
3713		}
3714	}
3715
3716	return (error);
3717}
3718#endif
3719
3720/*********************************************************************
3721 *
3722 *  Verify that the hardware indicated that the checksum is valid.
3723 *  Inform the stack about the status of checksum so that stack
3724 *  doesn't spend time verifying the checksum.
3725 *
3726 *********************************************************************/
3727static void
3728lem_receive_checksum(struct adapter *adapter,
3729	    struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3730{
3731	/* 82543 or newer only */
3732	if ((adapter->hw.mac.type < e1000_82543) ||
3733	    /* Ignore Checksum bit is set */
3734	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3735		mp->m_pkthdr.csum_flags = 0;
3736		return;
3737	}
3738
3739	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3740		/* Did it pass? */
3741		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3742			/* IP Checksum Good */
3743			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3744			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3745
3746		} else {
3747			mp->m_pkthdr.csum_flags = 0;
3748		}
3749	}
3750
3751	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3752		/* Did it pass? */
3753		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3754			mp->m_pkthdr.csum_flags |=
3755			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3756			mp->m_pkthdr.csum_data = htons(0xffff);
3757		}
3758	}
3759}
3760
3761/*
3762 * This routine is run via an vlan
3763 * config EVENT
3764 */
3765static void
3766lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3767{
3768	struct adapter	*adapter = ifp->if_softc;
3769	u32		index, bit;
3770
3771	if (ifp->if_softc !=  arg)   /* Not our event */
3772		return;
3773
3774	if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
3775                return;
3776
3777	EM_CORE_LOCK(adapter);
3778	index = (vtag >> 5) & 0x7F;
3779	bit = vtag & 0x1F;
3780	adapter->shadow_vfta[index] |= (1 << bit);
3781	++adapter->num_vlans;
3782	/* Re-init to load the changes */
3783	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3784		lem_init_locked(adapter);
3785	EM_CORE_UNLOCK(adapter);
3786}
3787
3788/*
3789 * This routine is run via an vlan
3790 * unconfig EVENT
3791 */
3792static void
3793lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3794{
3795	struct adapter	*adapter = ifp->if_softc;
3796	u32		index, bit;
3797
3798	if (ifp->if_softc !=  arg)
3799		return;
3800
3801	if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3802                return;
3803
3804	EM_CORE_LOCK(adapter);
3805	index = (vtag >> 5) & 0x7F;
3806	bit = vtag & 0x1F;
3807	adapter->shadow_vfta[index] &= ~(1 << bit);
3808	--adapter->num_vlans;
3809	/* Re-init to load the changes */
3810	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3811		lem_init_locked(adapter);
3812	EM_CORE_UNLOCK(adapter);
3813}
3814
3815static void
3816lem_setup_vlan_hw_support(struct adapter *adapter)
3817{
3818	int i;
3819	struct e1000_hw *hw = &adapter->hw;
3820	u32             reg;
3821
3822	/*
3823	** We get here thru init_locked, meaning
3824	** a soft reset, this has already cleared
3825	** the VFTA and other state, so if there
3826	** have been no vlan's registered do nothing.
3827	*/
3828	if (adapter->num_vlans == 0)
3829                return;
3830
3831	/*
3832	** A soft reset zero's out the VFTA, so
3833	** we need to repopulate it now.
3834	*/
3835	for (i = 0; i < EM_VFTA_SIZE; i++)
3836                if (adapter->shadow_vfta[i] != 0)
3837			E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3838                            i, adapter->shadow_vfta[i]);
3839
3840	reg = E1000_READ_REG(hw, E1000_CTRL);
3841	reg |= E1000_CTRL_VME;
3842	E1000_WRITE_REG(hw, E1000_CTRL, reg);
3843
3844	/* Enable the Filter Table */
3845	reg = E1000_READ_REG(hw, E1000_RCTL);
3846	reg &= ~E1000_RCTL_CFIEN;
3847	reg |= E1000_RCTL_VFE;
3848	E1000_WRITE_REG(hw, E1000_RCTL, reg);
3849
3850	/* Update the frame size */
3851	E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3852	    adapter->max_frame_size + VLAN_TAG_SIZE);
3853}
3854
3855static void
3856lem_enable_intr(struct adapter *adapter)
3857{
3858	struct e1000_hw *hw = &adapter->hw;
3859	u32 ims_mask = IMS_ENABLE_MASK;
3860
3861	if (adapter->msix) {
3862		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3863		ims_mask |= EM_MSIX_MASK;
3864	}
3865	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3866}
3867
3868static void
3869lem_disable_intr(struct adapter *adapter)
3870{
3871	struct e1000_hw *hw = &adapter->hw;
3872
3873	if (adapter->msix)
3874		E1000_WRITE_REG(hw, EM_EIAC, 0);
3875	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3876}
3877
3878/*
3879 * Bit of a misnomer, what this really means is
3880 * to enable OS management of the system... aka
3881 * to disable special hardware management features
3882 */
3883static void
3884lem_init_manageability(struct adapter *adapter)
3885{
3886	/* A shared code workaround */
3887	if (adapter->has_manage) {
3888		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3889		/* disable hardware interception of ARP */
3890		manc &= ~(E1000_MANC_ARP_EN);
3891		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3892	}
3893}
3894
3895/*
3896 * Give control back to hardware management
3897 * controller if there is one.
3898 */
3899static void
3900lem_release_manageability(struct adapter *adapter)
3901{
3902	if (adapter->has_manage) {
3903		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3904
3905		/* re-enable hardware interception of ARP */
3906		manc |= E1000_MANC_ARP_EN;
3907		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3908	}
3909}
3910
3911/*
3912 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3913 * For ASF and Pass Through versions of f/w this means
3914 * that the driver is loaded. For AMT version type f/w
3915 * this means that the network i/f is open.
3916 */
3917static void
3918lem_get_hw_control(struct adapter *adapter)
3919{
3920	u32 ctrl_ext;
3921
3922	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3923	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3924	    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3925	return;
3926}
3927
3928/*
3929 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3930 * For ASF and Pass Through versions of f/w this means that
3931 * the driver is no longer loaded. For AMT versions of the
3932 * f/w this means that the network i/f is closed.
3933 */
3934static void
3935lem_release_hw_control(struct adapter *adapter)
3936{
3937	u32 ctrl_ext;
3938
3939	if (!adapter->has_manage)
3940		return;
3941
3942	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3943	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3944	    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3945	return;
3946}
3947
3948static int
3949lem_is_valid_ether_addr(u8 *addr)
3950{
3951	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3952
3953	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3954		return (FALSE);
3955	}
3956
3957	return (TRUE);
3958}
3959
3960/*
3961** Parse the interface capabilities with regard
3962** to both system management and wake-on-lan for
3963** later use.
3964*/
3965static void
3966lem_get_wakeup(device_t dev)
3967{
3968	struct adapter	*adapter = device_get_softc(dev);
3969	u16		eeprom_data = 0, device_id, apme_mask;
3970
3971	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3972	apme_mask = EM_EEPROM_APME;
3973
3974	switch (adapter->hw.mac.type) {
3975	case e1000_82542:
3976	case e1000_82543:
3977		break;
3978	case e1000_82544:
3979		e1000_read_nvm(&adapter->hw,
3980		    NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3981		apme_mask = EM_82544_APME;
3982		break;
3983	case e1000_82546:
3984	case e1000_82546_rev_3:
3985		if (adapter->hw.bus.func == 1) {
3986			e1000_read_nvm(&adapter->hw,
3987			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3988			break;
3989		} else
3990			e1000_read_nvm(&adapter->hw,
3991			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3992		break;
3993	default:
3994		e1000_read_nvm(&adapter->hw,
3995		    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3996		break;
3997	}
3998	if (eeprom_data & apme_mask)
3999		adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4000	/*
4001         * We have the eeprom settings, now apply the special cases
4002         * where the eeprom may be wrong or the board won't support
4003         * wake on lan on a particular port
4004	 */
4005	device_id = pci_get_device(dev);
4006        switch (device_id) {
4007	case E1000_DEV_ID_82546GB_PCIE:
4008		adapter->wol = 0;
4009		break;
4010	case E1000_DEV_ID_82546EB_FIBER:
4011	case E1000_DEV_ID_82546GB_FIBER:
4012		/* Wake events only supported on port A for dual fiber
4013		 * regardless of eeprom setting */
4014		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4015		    E1000_STATUS_FUNC_1)
4016			adapter->wol = 0;
4017		break;
4018	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4019                /* if quad port adapter, disable WoL on all but port A */
4020		if (global_quad_port_a != 0)
4021			adapter->wol = 0;
4022		/* Reset for multiple quad port adapters */
4023		if (++global_quad_port_a == 4)
4024			global_quad_port_a = 0;
4025                break;
4026	}
4027	return;
4028}
4029
4030
4031/*
4032 * Enable PCI Wake On Lan capability
4033 */
4034static void
4035lem_enable_wakeup(device_t dev)
4036{
4037	struct adapter	*adapter = device_get_softc(dev);
4038	struct ifnet	*ifp = adapter->ifp;
4039	u32		pmc, ctrl, ctrl_ext, rctl;
4040	u16     	status;
4041
4042	if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
4043		return;
4044
4045	/* Advertise the wakeup capability */
4046	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4047	ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4048	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4049	E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4050
4051	/* Keep the laser running on Fiber adapters */
4052	if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4053	    adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4054		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4055		ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4056		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4057	}
4058
4059	/*
4060	** Determine type of Wakeup: note that wol
4061	** is set with all bits on by default.
4062	*/
4063	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4064		adapter->wol &= ~E1000_WUFC_MAG;
4065
4066	if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4067		adapter->wol &= ~E1000_WUFC_MC;
4068	else {
4069		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4070		rctl |= E1000_RCTL_MPE;
4071		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4072	}
4073
4074	if (adapter->hw.mac.type == e1000_pchlan) {
4075		if (lem_enable_phy_wakeup(adapter))
4076			return;
4077	} else {
4078		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4079		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4080	}
4081
4082
4083        /* Request PME */
4084        status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4085	status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4086	if (ifp->if_capenable & IFCAP_WOL)
4087		status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4088        pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4089
4090	return;
4091}
4092
4093/*
4094** WOL in the newer chipset interfaces (pchlan)
4095** require thing to be copied into the phy
4096*/
4097static int
4098lem_enable_phy_wakeup(struct adapter *adapter)
4099{
4100	int i;
4101	struct e1000_hw *hw = &adapter->hw;
4102	u32 mreg, ret = 0;
4103	u16 preg;
4104
4105	/* copy MAC RARs to PHY RARs */
4106	for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4107		mreg = E1000_READ_REG(hw, E1000_RAL(i));
4108		e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4109		e1000_write_phy_reg(hw, BM_RAR_M(i),
4110		    (u16)((mreg >> 16) & 0xFFFF));
4111		mreg = E1000_READ_REG(hw, E1000_RAH(i));
4112		e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4113		e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4114		    (u16)((mreg >> 16) & 0xFFFF));
4115	}
4116
4117	/* copy MAC MTA to PHY MTA */
4118	for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4119		mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4120		e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4121		e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4122		    (u16)((mreg >> 16) & 0xFFFF));
4123	}
4124
4125	/* configure PHY Rx Control register */
4126	e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4127	mreg = E1000_READ_REG(hw, E1000_RCTL);
4128	if (mreg & E1000_RCTL_UPE)
4129		preg |= BM_RCTL_UPE;
4130	if (mreg & E1000_RCTL_MPE)
4131		preg |= BM_RCTL_MPE;
4132	preg &= ~(BM_RCTL_MO_MASK);
4133	if (mreg & E1000_RCTL_MO_3)
4134		preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4135				<< BM_RCTL_MO_SHIFT);
4136	if (mreg & E1000_RCTL_BAM)
4137		preg |= BM_RCTL_BAM;
4138	if (mreg & E1000_RCTL_PMCF)
4139		preg |= BM_RCTL_PMCF;
4140	mreg = E1000_READ_REG(hw, E1000_CTRL);
4141	if (mreg & E1000_CTRL_RFCE)
4142		preg |= BM_RCTL_RFCE;
4143	e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4144
4145	/* enable PHY wakeup in MAC register */
4146	E1000_WRITE_REG(hw, E1000_WUC,
4147	    E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4148	E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4149
4150	/* configure and enable PHY wakeup in PHY registers */
4151	e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4152	e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4153
4154	/* activate PHY wakeup */
4155	ret = hw->phy.ops.acquire(hw);
4156	if (ret) {
4157		printf("Could not acquire PHY\n");
4158		return ret;
4159	}
4160	e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4161	                         (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4162	ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4163	if (ret) {
4164		printf("Could not read PHY page 769\n");
4165		goto out;
4166	}
4167	preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4168	ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4169	if (ret)
4170		printf("Could not set PHY Host Wakeup bit\n");
4171out:
4172	hw->phy.ops.release(hw);
4173
4174	return ret;
4175}
4176
4177static void
4178lem_led_func(void *arg, int onoff)
4179{
4180	struct adapter	*adapter = arg;
4181
4182	EM_CORE_LOCK(adapter);
4183	if (onoff) {
4184		e1000_setup_led(&adapter->hw);
4185		e1000_led_on(&adapter->hw);
4186	} else {
4187		e1000_led_off(&adapter->hw);
4188		e1000_cleanup_led(&adapter->hw);
4189	}
4190	EM_CORE_UNLOCK(adapter);
4191}
4192
4193/*********************************************************************
4194* 82544 Coexistence issue workaround.
4195*    There are 2 issues.
4196*       1. Transmit Hang issue.
4197*    To detect this issue, following equation can be used...
4198*	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4199*	  If SUM[3:0] is in between 1 to 4, we will have this issue.
4200*
4201*       2. DAC issue.
4202*    To detect this issue, following equation can be used...
4203*	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4204*	  If SUM[3:0] is in between 9 to c, we will have this issue.
4205*
4206*
4207*    WORKAROUND:
4208*	  Make sure we do not have ending address
4209*	  as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4210*
4211*************************************************************************/
4212static u32
4213lem_fill_descriptors (bus_addr_t address, u32 length,
4214		PDESC_ARRAY desc_array)
4215{
4216	u32 safe_terminator;
4217
4218	/* Since issue is sensitive to length and address.*/
4219	/* Let us first check the address...*/
4220	if (length <= 4) {
4221		desc_array->descriptor[0].address = address;
4222		desc_array->descriptor[0].length = length;
4223		desc_array->elements = 1;
4224		return (desc_array->elements);
4225	}
4226	safe_terminator = (u32)((((u32)address & 0x7) +
4227	    (length & 0xF)) & 0xF);
4228	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4229	if (safe_terminator == 0   ||
4230	(safe_terminator > 4   &&
4231	safe_terminator < 9)   ||
4232	(safe_terminator > 0xC &&
4233	safe_terminator <= 0xF)) {
4234		desc_array->descriptor[0].address = address;
4235		desc_array->descriptor[0].length = length;
4236		desc_array->elements = 1;
4237		return (desc_array->elements);
4238	}
4239
4240	desc_array->descriptor[0].address = address;
4241	desc_array->descriptor[0].length = length - 4;
4242	desc_array->descriptor[1].address = address + (length - 4);
4243	desc_array->descriptor[1].length = 4;
4244	desc_array->elements = 2;
4245	return (desc_array->elements);
4246}
4247
4248/**********************************************************************
4249 *
4250 *  Update the board statistics counters.
4251 *
4252 **********************************************************************/
4253static void
4254lem_update_stats_counters(struct adapter *adapter)
4255{
4256	struct ifnet   *ifp;
4257
4258	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4259	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4260		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4261		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4262	}
4263	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4264	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4265	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4266	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4267
4268	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4269	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4270	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4271	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4272	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4273	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4274	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4275	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4276	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4277	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4278	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4279	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4280	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4281	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4282	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4283	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4284	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4285	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4286	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4287	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4288
4289	/* For the 64-bit byte counters the low dword must be read first. */
4290	/* Both registers clear on the read of the high dword */
4291
4292	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4293	    ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4294	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4295	    ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4296
4297	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4298	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4299	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4300	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4301	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4302
4303	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4304	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4305
4306	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4307	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4308	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4309	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4310	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4311	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4312	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4313	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4314	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4315	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4316
4317	if (adapter->hw.mac.type >= e1000_82543) {
4318		adapter->stats.algnerrc +=
4319		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4320		adapter->stats.rxerrc +=
4321		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4322		adapter->stats.tncrs +=
4323		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4324		adapter->stats.cexterr +=
4325		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4326		adapter->stats.tsctc +=
4327		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4328		adapter->stats.tsctfc +=
4329		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4330	}
4331	ifp = adapter->ifp;
4332
4333	ifp->if_collisions = adapter->stats.colc;
4334
4335	/* Rx Errors */
4336	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4337	    adapter->stats.crcerrs + adapter->stats.algnerrc +
4338	    adapter->stats.ruc + adapter->stats.roc +
4339	    adapter->stats.mpc + adapter->stats.cexterr;
4340
4341	/* Tx Errors */
4342	ifp->if_oerrors = adapter->stats.ecol +
4343	    adapter->stats.latecol + adapter->watchdog_events;
4344}
4345
4346/* Export a single 32-bit register via a read-only sysctl. */
4347static int
4348lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4349{
4350	struct adapter *adapter;
4351	u_int val;
4352
4353#ifndef __HAIKU__
4354	adapter = oidp->oid_arg1;
4355	val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4356#endif
4357	return (sysctl_handle_int(oidp, &val, 0, req));
4358}
4359
4360/*
4361 * Add sysctl variables, one per statistic, to the system.
4362 */
4363static void
4364lem_add_hw_stats(struct adapter *adapter)
4365{
4366	device_t dev = adapter->dev;
4367
4368	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4369	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4370	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4371	struct e1000_hw_stats *stats = &adapter->stats;
4372
4373	struct sysctl_oid *stat_node;
4374	struct sysctl_oid_list *stat_list;
4375
4376	/* Driver Statistics */
4377	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4378			 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4379			 "Std mbuf failed");
4380	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4381			 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4382			 "Std mbuf cluster failed");
4383	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4384			CTLFLAG_RD, &adapter->dropped_pkts,
4385			"Driver dropped packets");
4386	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4387			CTLFLAG_RD, &adapter->no_tx_dma_setup,
4388			"Driver tx dma failure in xmit");
4389	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4390			CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4391			"Not enough tx descriptors failure in xmit");
4392	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4393			CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4394			"Not enough tx descriptors failure in xmit");
4395	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4396			CTLFLAG_RD, &adapter->rx_overruns,
4397			"RX overruns");
4398	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4399			CTLFLAG_RD, &adapter->watchdog_events,
4400			"Watchdog timeouts");
4401
4402	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4403			CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4404			lem_sysctl_reg_handler, "IU",
4405			"Device Control Register");
4406	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4407			CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4408			lem_sysctl_reg_handler, "IU",
4409			"Receiver Control Register");
4410	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4411			CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4412			"Flow Control High Watermark");
4413	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4414			CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4415			"Flow Control Low Watermark");
4416	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4417			CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4418			"TX FIFO workaround events");
4419	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4420			CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4421			"TX FIFO resets");
4422
4423	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4424			CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4425			lem_sysctl_reg_handler, "IU",
4426 			"Transmit Descriptor Head");
4427	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4428			CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4429			lem_sysctl_reg_handler, "IU",
4430 			"Transmit Descriptor Tail");
4431	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4432			CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4433			lem_sysctl_reg_handler, "IU",
4434			"Receive Descriptor Head");
4435	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4436			CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4437			lem_sysctl_reg_handler, "IU",
4438			"Receive Descriptor Tail");
4439
4440
4441	/* MAC stats get their own sub node */
4442
4443	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4444				    CTLFLAG_RD, NULL, "Statistics");
4445	stat_list = SYSCTL_CHILDREN(stat_node);
4446
4447	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4448			CTLFLAG_RD, &stats->ecol,
4449			"Excessive collisions");
4450	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4451			CTLFLAG_RD, &stats->scc,
4452			"Single collisions");
4453	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4454			CTLFLAG_RD, &stats->mcc,
4455			"Multiple collisions");
4456	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4457			CTLFLAG_RD, &stats->latecol,
4458			"Late collisions");
4459	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4460			CTLFLAG_RD, &stats->colc,
4461			"Collision Count");
4462	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4463			CTLFLAG_RD, &adapter->stats.symerrs,
4464			"Symbol Errors");
4465	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4466			CTLFLAG_RD, &adapter->stats.sec,
4467			"Sequence Errors");
4468	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4469			CTLFLAG_RD, &adapter->stats.dc,
4470			"Defer Count");
4471	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4472			CTLFLAG_RD, &adapter->stats.mpc,
4473			"Missed Packets");
4474	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4475			CTLFLAG_RD, &adapter->stats.rnbc,
4476			"Receive No Buffers");
4477	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4478			CTLFLAG_RD, &adapter->stats.ruc,
4479			"Receive Undersize");
4480	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4481			CTLFLAG_RD, &adapter->stats.rfc,
4482			"Fragmented Packets Received ");
4483	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4484			CTLFLAG_RD, &adapter->stats.roc,
4485			"Oversized Packets Received");
4486	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4487			CTLFLAG_RD, &adapter->stats.rjc,
4488			"Recevied Jabber");
4489	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4490			CTLFLAG_RD, &adapter->stats.rxerrc,
4491			"Receive Errors");
4492	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4493			CTLFLAG_RD, &adapter->stats.crcerrs,
4494			"CRC errors");
4495	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4496			CTLFLAG_RD, &adapter->stats.algnerrc,
4497			"Alignment Errors");
4498	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4499			CTLFLAG_RD, &adapter->stats.cexterr,
4500			"Collision/Carrier extension errors");
4501	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4502			CTLFLAG_RD, &adapter->stats.xonrxc,
4503			"XON Received");
4504	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4505			CTLFLAG_RD, &adapter->stats.xontxc,
4506			"XON Transmitted");
4507	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4508			CTLFLAG_RD, &adapter->stats.xoffrxc,
4509			"XOFF Received");
4510	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4511			CTLFLAG_RD, &adapter->stats.xofftxc,
4512			"XOFF Transmitted");
4513
4514	/* Packet Reception Stats */
4515	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4516			CTLFLAG_RD, &adapter->stats.tpr,
4517			"Total Packets Received ");
4518	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4519			CTLFLAG_RD, &adapter->stats.gprc,
4520			"Good Packets Received");
4521	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4522			CTLFLAG_RD, &adapter->stats.bprc,
4523			"Broadcast Packets Received");
4524	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4525			CTLFLAG_RD, &adapter->stats.mprc,
4526			"Multicast Packets Received");
4527	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4528			CTLFLAG_RD, &adapter->stats.prc64,
4529			"64 byte frames received ");
4530	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4531			CTLFLAG_RD, &adapter->stats.prc127,
4532			"65-127 byte frames received");
4533	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4534			CTLFLAG_RD, &adapter->stats.prc255,
4535			"128-255 byte frames received");
4536	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4537			CTLFLAG_RD, &adapter->stats.prc511,
4538			"256-511 byte frames received");
4539	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4540			CTLFLAG_RD, &adapter->stats.prc1023,
4541			"512-1023 byte frames received");
4542	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4543			CTLFLAG_RD, &adapter->stats.prc1522,
4544			"1023-1522 byte frames received");
4545 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4546 			CTLFLAG_RD, &adapter->stats.gorc,
4547 			"Good Octets Received");
4548
4549	/* Packet Transmission Stats */
4550 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4551 			CTLFLAG_RD, &adapter->stats.gotc,
4552 			"Good Octets Transmitted");
4553	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4554			CTLFLAG_RD, &adapter->stats.tpt,
4555			"Total Packets Transmitted");
4556	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4557			CTLFLAG_RD, &adapter->stats.gptc,
4558			"Good Packets Transmitted");
4559	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4560			CTLFLAG_RD, &adapter->stats.bptc,
4561			"Broadcast Packets Transmitted");
4562	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4563			CTLFLAG_RD, &adapter->stats.mptc,
4564			"Multicast Packets Transmitted");
4565	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4566			CTLFLAG_RD, &adapter->stats.ptc64,
4567			"64 byte frames transmitted ");
4568	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4569			CTLFLAG_RD, &adapter->stats.ptc127,
4570			"65-127 byte frames transmitted");
4571	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4572			CTLFLAG_RD, &adapter->stats.ptc255,
4573			"128-255 byte frames transmitted");
4574	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4575			CTLFLAG_RD, &adapter->stats.ptc511,
4576			"256-511 byte frames transmitted");
4577	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4578			CTLFLAG_RD, &adapter->stats.ptc1023,
4579			"512-1023 byte frames transmitted");
4580	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4581			CTLFLAG_RD, &adapter->stats.ptc1522,
4582			"1024-1522 byte frames transmitted");
4583	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4584			CTLFLAG_RD, &adapter->stats.tsctc,
4585			"TSO Contexts Transmitted");
4586	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4587			CTLFLAG_RD, &adapter->stats.tsctfc,
4588			"TSO Contexts Failed");
4589}
4590
4591/**********************************************************************
4592 *
4593 *  This routine provides a way to dump out the adapter eeprom,
4594 *  often a useful debug/service tool. This only dumps the first
4595 *  32 words, stuff that matters is in that extent.
4596 *
4597 **********************************************************************/
4598
4599static int
4600lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4601{
4602	struct adapter *adapter;
4603	int error;
4604	int result;
4605
4606	result = -1;
4607	error = sysctl_handle_int(oidp, &result, 0, req);
4608
4609	if (error || !req->newptr)
4610		return (error);
4611
4612	/*
4613	 * This value will cause a hex dump of the
4614	 * first 32 16-bit words of the EEPROM to
4615	 * the screen.
4616	 */
4617	if (result == 1) {
4618		adapter = (struct adapter *)arg1;
4619		lem_print_nvm_info(adapter);
4620        }
4621
4622	return (error);
4623}
4624
4625static void
4626lem_print_nvm_info(struct adapter *adapter)
4627{
4628	u16	eeprom_data;
4629	int	i, j, row = 0;
4630
4631	/* Its a bit crude, but it gets the job done */
4632	printf("\nInterface EEPROM Dump:\n");
4633	printf("Offset\n0x0000  ");
4634	for (i = 0, j = 0; i < 32; i++, j++) {
4635		if (j == 8) { /* Make the offset block */
4636			j = 0; ++row;
4637			printf("\n0x00%x0  ",row);
4638		}
4639		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4640		printf("%04x ", eeprom_data);
4641	}
4642	printf("\n");
4643}
4644
4645static int
4646lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4647{
4648	struct em_int_delay_info *info;
4649	struct adapter *adapter;
4650	u32 regval;
4651	int error;
4652	int usecs;
4653	int ticks;
4654
4655	info = (struct em_int_delay_info *)arg1;
4656	usecs = info->value;
4657	error = sysctl_handle_int(oidp, &usecs, 0, req);
4658	if (error != 0 || req->newptr == NULL)
4659		return (error);
4660	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4661		return (EINVAL);
4662	info->value = usecs;
4663	ticks = EM_USECS_TO_TICKS(usecs);
4664
4665	adapter = info->adapter;
4666
4667	EM_CORE_LOCK(adapter);
4668	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4669	regval = (regval & ~0xffff) | (ticks & 0xffff);
4670	/* Handle a few special cases. */
4671	switch (info->offset) {
4672	case E1000_RDTR:
4673		break;
4674	case E1000_TIDV:
4675		if (ticks == 0) {
4676			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4677			/* Don't write 0 into the TIDV register. */
4678			regval++;
4679		} else
4680			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4681		break;
4682	}
4683	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4684	EM_CORE_UNLOCK(adapter);
4685	return (0);
4686}
4687
4688static void
4689lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4690	const char *description, struct em_int_delay_info *info,
4691	int offset, int value)
4692{
4693	info->adapter = adapter;
4694	info->offset = offset;
4695	info->value = value;
4696	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4697	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4698	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4699	    info, 0, lem_sysctl_int_delay, "I", description);
4700}
4701
4702static void
4703lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4704        const char *description, int *limit, int value)
4705{
4706	*limit = value;
4707	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4708	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4709	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4710}
4711
4712#ifndef EM_LEGACY_IRQ
4713static void
4714lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4715	const char *description, int *limit, int value)
4716{
4717	*limit = value;
4718	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4719	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4720	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4721}
4722#endif
4723