if_lem.c revision 206001
1200483Srwatson/******************************************************************************
2200483Srwatson
3200483Srwatson  Copyright (c) 2001-2010, Intel Corporation
4200483Srwatson  All rights reserved.
5200483Srwatson
6200483Srwatson  Redistribution and use in source and binary forms, with or without
7200483Srwatson  modification, are permitted provided that the following conditions are met:
8200483Srwatson
9200483Srwatson   1. Redistributions of source code must retain the above copyright notice,
10200483Srwatson      this list of conditions and the following disclaimer.
11200483Srwatson
12200483Srwatson   2. Redistributions in binary form must reproduce the above copyright
13200483Srwatson      notice, this list of conditions and the following disclaimer in the
14200483Srwatson      documentation and/or other materials provided with the distribution.
15200483Srwatson
16200483Srwatson   3. Neither the name of the Intel Corporation nor the names of its
17200483Srwatson      contributors may be used to endorse or promote products derived from
18200483Srwatson      this software without specific prior written permission.
19200483Srwatson
20200483Srwatson  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21200483Srwatson  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22200483Srwatson  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23200483Srwatson  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24200483Srwatson  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25200483Srwatson  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26200483Srwatson  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27200483Srwatson  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28200483Srwatson  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29200483Srwatson  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30200483Srwatson  POSSIBILITY OF SUCH DAMAGE.
31200483Srwatson
32200483Srwatson******************************************************************************/
33200483Srwatson/*$FreeBSD: head/sys/dev/e1000/if_lem.c 206001 2010-03-31 20:43:24Z marius $*/
34200483Srwatson
35200483Srwatson#ifdef HAVE_KERNEL_OPTION_HEADERS
36200483Srwatson#include "opt_device_polling.h"
37200483Srwatson#include "opt_inet.h"
38200483Srwatson#endif
39200483Srwatson
40200483Srwatson#include <sys/param.h>
41200483Srwatson#include <sys/systm.h>
42200483Srwatson#if __FreeBSD_version >= 800000
43200483Srwatson#include <sys/buf_ring.h>
44200483Srwatson#endif
45200483Srwatson#include <sys/bus.h>
46200483Srwatson#include <sys/endian.h>
47200483Srwatson#include <sys/kernel.h>
48200483Srwatson#include <sys/kthread.h>
49200483Srwatson#include <sys/malloc.h>
50200483Srwatson#include <sys/mbuf.h>
51200483Srwatson#include <sys/module.h>
52200483Srwatson#include <sys/rman.h>
53200483Srwatson#include <sys/socket.h>
54200483Srwatson#include <sys/sockio.h>
55200483Srwatson#include <sys/sysctl.h>
56200483Srwatson#include <sys/taskqueue.h>
57200483Srwatson#if __FreeBSD_version >= 700029
58200483Srwatson#include <sys/eventhandler.h>
59200483Srwatson#endif
60200483Srwatson#include <machine/bus.h>
61200483Srwatson#include <machine/resource.h>
62200483Srwatson
63200483Srwatson#include <net/bpf.h>
64200483Srwatson#include <net/ethernet.h>
65200483Srwatson#include <net/if.h>
66200483Srwatson#include <net/if_arp.h>
67200483Srwatson#include <net/if_dl.h>
68200483Srwatson#include <net/if_media.h>
69200483Srwatson
70200483Srwatson#include <net/if_types.h>
71200483Srwatson#include <net/if_vlan_var.h>
72200483Srwatson
73200483Srwatson#include <netinet/in_systm.h>
74200483Srwatson#include <netinet/in.h>
75200483Srwatson#include <netinet/if_ether.h>
76200483Srwatson#include <netinet/ip.h>
77200483Srwatson#include <netinet/ip6.h>
78200483Srwatson#include <netinet/tcp.h>
79200483Srwatson#include <netinet/udp.h>
80200483Srwatson
81200483Srwatson#include <machine/in_cksum.h>
82200483Srwatson#include <dev/led/led.h>
83200483Srwatson#include <dev/pci/pcivar.h>
84200483Srwatson#include <dev/pci/pcireg.h>
85200483Srwatson
86200483Srwatson#include "e1000_api.h"
87200483Srwatson#include "if_lem.h"
88200483Srwatson
89200483Srwatson/*********************************************************************
90200483Srwatson *  Set this to one to display debug statistics
91200483Srwatson *********************************************************************/
92200483Srwatsonint	lem_display_debug_stats = 0;
93200483Srwatson
94200483Srwatson/*********************************************************************
95200483Srwatson *  Legacy Em Driver version:
96200483Srwatson *********************************************************************/
97200483Srwatsonchar lem_driver_version[] = "1.0.0";
98200483Srwatson
99200483Srwatson
100200483Srwatson/*********************************************************************
101200483Srwatson *  PCI Device ID Table
102200483Srwatson *
103200483Srwatson *  Used by probe to select devices to load on
104200483Srwatson *  Last field stores an index into e1000_strings
105200483Srwatson *  Last entry must be all 0s
106200483Srwatson *
107200483Srwatson *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108200483Srwatson *********************************************************************/
109200483Srwatson
110200483Srwatsonstatic em_vendor_info_t lem_vendor_info_array[] =
111200483Srwatson{
112200483Srwatson	/* Intel(R) PRO/1000 Network Connection */
113200483Srwatson	{ 0x8086, E1000_DEV_ID_82540EM,		PCI_ANY_ID, PCI_ANY_ID, 0},
114200483Srwatson	{ 0x8086, E1000_DEV_ID_82540EM_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
115200483Srwatson	{ 0x8086, E1000_DEV_ID_82540EP,		PCI_ANY_ID, PCI_ANY_ID, 0},
116200483Srwatson	{ 0x8086, E1000_DEV_ID_82540EP_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
117200483Srwatson	{ 0x8086, E1000_DEV_ID_82540EP_LP,	PCI_ANY_ID, PCI_ANY_ID, 0},
118200483Srwatson
119200483Srwatson	{ 0x8086, E1000_DEV_ID_82541EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
120200483Srwatson	{ 0x8086, E1000_DEV_ID_82541ER,		PCI_ANY_ID, PCI_ANY_ID, 0},
121200483Srwatson	{ 0x8086, E1000_DEV_ID_82541ER_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
122200483Srwatson	{ 0x8086, E1000_DEV_ID_82541EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
123200483Srwatson	{ 0x8086, E1000_DEV_ID_82541GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
124200483Srwatson	{ 0x8086, E1000_DEV_ID_82541GI_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
125200483Srwatson	{ 0x8086, E1000_DEV_ID_82541GI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
126200483Srwatson
127200483Srwatson	{ 0x8086, E1000_DEV_ID_82542,		PCI_ANY_ID, PCI_ANY_ID, 0},
128200483Srwatson
129200483Srwatson	{ 0x8086, E1000_DEV_ID_82543GC_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
130200483Srwatson	{ 0x8086, E1000_DEV_ID_82543GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
131200483Srwatson
132200483Srwatson	{ 0x8086, E1000_DEV_ID_82544EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
133200483Srwatson	{ 0x8086, E1000_DEV_ID_82544EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
134200483Srwatson	{ 0x8086, E1000_DEV_ID_82544GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
135200483Srwatson	{ 0x8086, E1000_DEV_ID_82544GC_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
136200483Srwatson
137200483Srwatson	{ 0x8086, E1000_DEV_ID_82545EM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
138200483Srwatson	{ 0x8086, E1000_DEV_ID_82545EM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
139200483Srwatson	{ 0x8086, E1000_DEV_ID_82545GM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
140200483Srwatson	{ 0x8086, E1000_DEV_ID_82545GM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
141200483Srwatson	{ 0x8086, E1000_DEV_ID_82545GM_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
142200483Srwatson
143200483Srwatson	{ 0x8086, E1000_DEV_ID_82546EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
144200483Srwatson	{ 0x8086, E1000_DEV_ID_82546EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
145200483Srwatson	{ 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146200483Srwatson	{ 0x8086, E1000_DEV_ID_82546GB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
147200483Srwatson	{ 0x8086, E1000_DEV_ID_82546GB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
148200483Srwatson	{ 0x8086, E1000_DEV_ID_82546GB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
149200483Srwatson	{ 0x8086, E1000_DEV_ID_82546GB_PCIE,	PCI_ANY_ID, PCI_ANY_ID, 0},
150200483Srwatson	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151200483Srwatson	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152200483Srwatson						PCI_ANY_ID, PCI_ANY_ID, 0},
153200483Srwatson
154200483Srwatson	{ 0x8086, E1000_DEV_ID_82547EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
155200483Srwatson	{ 0x8086, E1000_DEV_ID_82547EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
156200483Srwatson	{ 0x8086, E1000_DEV_ID_82547GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
157200483Srwatson	/* required last entry */
158200483Srwatson	{ 0, 0, 0, 0, 0}
159200483Srwatson};
160200483Srwatson
161200483Srwatson/*********************************************************************
162200483Srwatson *  Table of branding strings for all supported NICs.
163200483Srwatson *********************************************************************/
164200483Srwatson
165200483Srwatsonstatic char *lem_strings[] = {
166200483Srwatson	"Intel(R) PRO/1000 Legacy Network Connection"
167200483Srwatson};
168200483Srwatson
169200483Srwatson/*********************************************************************
170200483Srwatson *  Function prototypes
171200483Srwatson *********************************************************************/
172200483Srwatsonstatic int	lem_probe(device_t);
173200483Srwatsonstatic int	lem_attach(device_t);
174200483Srwatsonstatic int	lem_detach(device_t);
175200483Srwatsonstatic int	lem_shutdown(device_t);
176200483Srwatsonstatic int	lem_suspend(device_t);
177200483Srwatsonstatic int	lem_resume(device_t);
178200483Srwatsonstatic void	lem_start(struct ifnet *);
179static void	lem_start_locked(struct ifnet *ifp);
180#if __FreeBSD_version >= 800000
181static int	lem_mq_start(struct ifnet *, struct mbuf *);
182static int	lem_mq_start_locked(struct ifnet *, struct mbuf *);
183static void	lem_qflush(struct ifnet *);
184#endif
185static int	lem_ioctl(struct ifnet *, u_long, caddr_t);
186static void	lem_init(void *);
187static void	lem_init_locked(struct adapter *);
188static void	lem_stop(void *);
189static void	lem_media_status(struct ifnet *, struct ifmediareq *);
190static int	lem_media_change(struct ifnet *);
191static void	lem_identify_hardware(struct adapter *);
192static int	lem_allocate_pci_resources(struct adapter *);
193static int	lem_allocate_irq(struct adapter *adapter);
194static void	lem_free_pci_resources(struct adapter *);
195static void	lem_local_timer(void *);
196static int	lem_hardware_init(struct adapter *);
197static void	lem_setup_interface(device_t, struct adapter *);
198static void	lem_setup_transmit_structures(struct adapter *);
199static void	lem_initialize_transmit_unit(struct adapter *);
200static int	lem_setup_receive_structures(struct adapter *);
201static void	lem_initialize_receive_unit(struct adapter *);
202static void	lem_enable_intr(struct adapter *);
203static void	lem_disable_intr(struct adapter *);
204static void	lem_free_transmit_structures(struct adapter *);
205static void	lem_free_receive_structures(struct adapter *);
206static void	lem_update_stats_counters(struct adapter *);
207static void	lem_txeof(struct adapter *);
208static void	lem_tx_purge(struct adapter *);
209static int	lem_allocate_receive_structures(struct adapter *);
210static int	lem_allocate_transmit_structures(struct adapter *);
211static int	lem_rxeof(struct adapter *, int);
212#ifndef __NO_STRICT_ALIGNMENT
213static int	lem_fixup_rx(struct adapter *);
214#endif
215static void	lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
216		    struct mbuf *);
217static void	lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
218		    u32 *, u32 *);
219static void	lem_set_promisc(struct adapter *);
220static void	lem_disable_promisc(struct adapter *);
221static void	lem_set_multi(struct adapter *);
222static void	lem_print_hw_stats(struct adapter *);
223static void	lem_update_link_status(struct adapter *);
224static int	lem_get_buf(struct adapter *, int);
225#if __FreeBSD_version >= 700029
226static void	lem_register_vlan(void *, struct ifnet *, u16);
227static void	lem_unregister_vlan(void *, struct ifnet *, u16);
228static void	lem_setup_vlan_hw_support(struct adapter *);
229#endif
230static int	lem_xmit(struct adapter *, struct mbuf **);
231static void	lem_smartspeed(struct adapter *);
232static int	lem_82547_fifo_workaround(struct adapter *, int);
233static void	lem_82547_update_fifo_head(struct adapter *, int);
234static int	lem_82547_tx_fifo_reset(struct adapter *);
235static void	lem_82547_move_tail(void *);
236static int	lem_dma_malloc(struct adapter *, bus_size_t,
237		    struct em_dma_alloc *, int);
238static void	lem_dma_free(struct adapter *, struct em_dma_alloc *);
239static void	lem_print_debug_info(struct adapter *);
240static void	lem_print_nvm_info(struct adapter *);
241static int 	lem_is_valid_ether_addr(u8 *);
242static int	lem_sysctl_stats(SYSCTL_HANDLER_ARGS);
243static int	lem_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
244static u32	lem_fill_descriptors (bus_addr_t address, u32 length,
245		    PDESC_ARRAY desc_array);
246static int	lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
247static void	lem_add_int_delay_sysctl(struct adapter *, const char *,
248		    const char *, struct em_int_delay_info *, int, int);
249/* Management and WOL Support */
250static void	lem_init_manageability(struct adapter *);
251static void	lem_release_manageability(struct adapter *);
252static void     lem_get_hw_control(struct adapter *);
253static void     lem_release_hw_control(struct adapter *);
254static void	lem_get_wakeup(device_t);
255static void     lem_enable_wakeup(device_t);
256static int	lem_enable_phy_wakeup(struct adapter *);
257static void	lem_led_func(void *, int);
258
259#ifdef EM_LEGACY_IRQ
260static void	lem_intr(void *);
261#else /* FAST IRQ */
262#if __FreeBSD_version < 700000
263static void	lem_irq_fast(void *);
264#else
265static int	lem_irq_fast(void *);
266#endif
267static void	lem_handle_rxtx(void *context, int pending);
268static void	lem_handle_link(void *context, int pending);
269static void	lem_add_rx_process_limit(struct adapter *, const char *,
270		    const char *, int *, int);
271#endif /* ~EM_LEGACY_IRQ */
272
273#ifdef DEVICE_POLLING
274static poll_handler_t lem_poll;
275#endif /* POLLING */
276
277/*********************************************************************
278 *  FreeBSD Device Interface Entry Points
279 *********************************************************************/
280
281static device_method_t lem_methods[] = {
282	/* Device interface */
283	DEVMETHOD(device_probe, lem_probe),
284	DEVMETHOD(device_attach, lem_attach),
285	DEVMETHOD(device_detach, lem_detach),
286	DEVMETHOD(device_shutdown, lem_shutdown),
287	DEVMETHOD(device_suspend, lem_suspend),
288	DEVMETHOD(device_resume, lem_resume),
289	{0, 0}
290};
291
292static driver_t lem_driver = {
293	"em", lem_methods, sizeof(struct adapter),
294};
295
296extern devclass_t em_devclass;
297DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
298MODULE_DEPEND(lem, pci, 1, 1, 1);
299MODULE_DEPEND(lem, ether, 1, 1, 1);
300
301/*********************************************************************
302 *  Tunable default values.
303 *********************************************************************/
304
305#define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
306#define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
307#define M_TSO_LEN			66
308
309/* Allow common code without TSO */
310#ifndef CSUM_TSO
311#define CSUM_TSO	0
312#endif
313
314static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
315static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
316static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
317static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
318static int lem_rxd = EM_DEFAULT_RXD;
319static int lem_txd = EM_DEFAULT_TXD;
320static int lem_smart_pwr_down = FALSE;
321
322/* Controls whether promiscuous also shows bad packets */
323static int lem_debug_sbp = FALSE;
324
325TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
326TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
327TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
328TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
329TUNABLE_INT("hw.em.rxd", &lem_rxd);
330TUNABLE_INT("hw.em.txd", &lem_txd);
331TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
332TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
333
334#ifndef EM_LEGACY_IRQ
335/* How many packets rxeof tries to clean at a time */
336static int lem_rx_process_limit = 100;
337TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
338#endif
339
340/* Flow control setting - default to FULL */
341static int lem_fc_setting = e1000_fc_full;
342TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
343
344/*
345** Shadow VFTA table, this is needed because
346** the real vlan filter table gets cleared during
347** a soft reset and the driver needs to be able
348** to repopulate it.
349*/
350static u32 lem_shadow_vfta[EM_VFTA_SIZE];
351
352/* Global used in WOL setup with multiport cards */
353static int global_quad_port_a = 0;
354
355/*********************************************************************
356 *  Device identification routine
357 *
358 *  em_probe determines if the driver should be loaded on
359 *  adapter based on PCI vendor/device id of the adapter.
360 *
361 *  return BUS_PROBE_DEFAULT on success, positive on failure
362 *********************************************************************/
363
364static int
365lem_probe(device_t dev)
366{
367	char		adapter_name[60];
368	u16		pci_vendor_id = 0;
369	u16		pci_device_id = 0;
370	u16		pci_subvendor_id = 0;
371	u16		pci_subdevice_id = 0;
372	em_vendor_info_t *ent;
373
374	INIT_DEBUGOUT("em_probe: begin");
375
376	pci_vendor_id = pci_get_vendor(dev);
377	if (pci_vendor_id != EM_VENDOR_ID)
378		return (ENXIO);
379
380	pci_device_id = pci_get_device(dev);
381	pci_subvendor_id = pci_get_subvendor(dev);
382	pci_subdevice_id = pci_get_subdevice(dev);
383
384	ent = lem_vendor_info_array;
385	while (ent->vendor_id != 0) {
386		if ((pci_vendor_id == ent->vendor_id) &&
387		    (pci_device_id == ent->device_id) &&
388
389		    ((pci_subvendor_id == ent->subvendor_id) ||
390		    (ent->subvendor_id == PCI_ANY_ID)) &&
391
392		    ((pci_subdevice_id == ent->subdevice_id) ||
393		    (ent->subdevice_id == PCI_ANY_ID))) {
394			sprintf(adapter_name, "%s %s",
395				lem_strings[ent->index],
396				lem_driver_version);
397			device_set_desc_copy(dev, adapter_name);
398			return (BUS_PROBE_DEFAULT);
399		}
400		ent++;
401	}
402
403	return (ENXIO);
404}
405
406/*********************************************************************
407 *  Device initialization routine
408 *
409 *  The attach entry point is called when the driver is being loaded.
410 *  This routine identifies the type of hardware, allocates all resources
411 *  and initializes the hardware.
412 *
413 *  return 0 on success, positive on failure
414 *********************************************************************/
415
416static int
417lem_attach(device_t dev)
418{
419	struct adapter	*adapter;
420	int		tsize, rsize;
421	int		error = 0;
422
423	INIT_DEBUGOUT("lem_attach: begin");
424
425	adapter = device_get_softc(dev);
426	adapter->dev = adapter->osdep.dev = dev;
427	EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
428	EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
429	EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
430
431	/* SYSCTL stuff */
432	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
433	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
434	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
435	    lem_sysctl_debug_info, "I", "Debug Information");
436
437	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
438	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
439	    OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
440	    lem_sysctl_stats, "I", "Statistics");
441
442	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
443	callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
444
445	/* Determine hardware and mac info */
446	lem_identify_hardware(adapter);
447
448	/* Setup PCI resources */
449	if (lem_allocate_pci_resources(adapter)) {
450		device_printf(dev, "Allocation of PCI resources failed\n");
451		error = ENXIO;
452		goto err_pci;
453	}
454
455	/* Do Shared Code initialization */
456	if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
457		device_printf(dev, "Setup of Shared code failed\n");
458		error = ENXIO;
459		goto err_pci;
460	}
461
462	e1000_get_bus_info(&adapter->hw);
463
464	/* Set up some sysctls for the tunable interrupt delays */
465	lem_add_int_delay_sysctl(adapter, "rx_int_delay",
466	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
467	    E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
468	lem_add_int_delay_sysctl(adapter, "tx_int_delay",
469	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
470	    E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
471	if (adapter->hw.mac.type >= e1000_82540) {
472		lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
473		    "receive interrupt delay limit in usecs",
474		    &adapter->rx_abs_int_delay,
475		    E1000_REGISTER(&adapter->hw, E1000_RADV),
476		    lem_rx_abs_int_delay_dflt);
477		lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
478		    "transmit interrupt delay limit in usecs",
479		    &adapter->tx_abs_int_delay,
480		    E1000_REGISTER(&adapter->hw, E1000_TADV),
481		    lem_tx_abs_int_delay_dflt);
482	}
483
484#ifndef EM_LEGACY_IRQ
485	/* Sysctls for limiting the amount of work done in the taskqueue */
486	lem_add_rx_process_limit(adapter, "rx_processing_limit",
487	    "max number of rx packets to process", &adapter->rx_process_limit,
488	    lem_rx_process_limit);
489#endif
490
491	/*
492	 * Validate number of transmit and receive descriptors. It
493	 * must not exceed hardware maximum, and must be multiple
494	 * of E1000_DBA_ALIGN.
495	 */
496	if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
497	    (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
498	    (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
499	    (lem_txd < EM_MIN_TXD)) {
500		device_printf(dev, "Using %d TX descriptors instead of %d!\n",
501		    EM_DEFAULT_TXD, lem_txd);
502		adapter->num_tx_desc = EM_DEFAULT_TXD;
503	} else
504		adapter->num_tx_desc = lem_txd;
505	if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
506	    (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
507	    (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
508	    (lem_rxd < EM_MIN_RXD)) {
509		device_printf(dev, "Using %d RX descriptors instead of %d!\n",
510		    EM_DEFAULT_RXD, lem_rxd);
511		adapter->num_rx_desc = EM_DEFAULT_RXD;
512	} else
513		adapter->num_rx_desc = lem_rxd;
514
515	adapter->hw.mac.autoneg = DO_AUTO_NEG;
516	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
517	adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
518	adapter->rx_buffer_len = 2048;
519
520	e1000_init_script_state_82541(&adapter->hw, TRUE);
521	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
522
523	/* Copper options */
524	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
525		adapter->hw.phy.mdix = AUTO_ALL_MODES;
526		adapter->hw.phy.disable_polarity_correction = FALSE;
527		adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
528	}
529
530	/*
531	 * Set the frame limits assuming
532	 * standard ethernet sized frames.
533	 */
534	adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
535	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
536
537	/*
538	 * This controls when hardware reports transmit completion
539	 * status.
540	 */
541	adapter->hw.mac.report_tx_early = 1;
542
543	tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
544	    EM_DBA_ALIGN);
545
546	/* Allocate Transmit Descriptor ring */
547	if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
548		device_printf(dev, "Unable to allocate tx_desc memory\n");
549		error = ENOMEM;
550		goto err_tx_desc;
551	}
552	adapter->tx_desc_base =
553	    (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
554
555	rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
556	    EM_DBA_ALIGN);
557
558	/* Allocate Receive Descriptor ring */
559	if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
560		device_printf(dev, "Unable to allocate rx_desc memory\n");
561		error = ENOMEM;
562		goto err_rx_desc;
563	}
564	adapter->rx_desc_base =
565	    (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
566
567	/*
568	** Start from a known state, this is
569	** important in reading the nvm and
570	** mac from that.
571	*/
572	e1000_reset_hw(&adapter->hw);
573
574	/* Make sure we have a good EEPROM before we read from it */
575	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
576		/*
577		** Some PCI-E parts fail the first check due to
578		** the link being in sleep state, call it again,
579		** if it fails a second time its a real issue.
580		*/
581		if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
582			device_printf(dev,
583			    "The EEPROM Checksum Is Not Valid\n");
584			error = EIO;
585			goto err_hw_init;
586		}
587	}
588
589	/* Copy the permanent MAC address out of the EEPROM */
590	if (e1000_read_mac_addr(&adapter->hw) < 0) {
591		device_printf(dev, "EEPROM read error while reading MAC"
592		    " address\n");
593		error = EIO;
594		goto err_hw_init;
595	}
596
597	if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
598		device_printf(dev, "Invalid MAC address\n");
599		error = EIO;
600		goto err_hw_init;
601	}
602
603	/* Initialize the hardware */
604	if (lem_hardware_init(adapter)) {
605		device_printf(dev, "Unable to initialize the hardware\n");
606		error = EIO;
607		goto err_hw_init;
608	}
609
610	/* Allocate transmit descriptors and buffers */
611	if (lem_allocate_transmit_structures(adapter)) {
612		device_printf(dev, "Could not setup transmit structures\n");
613		error = ENOMEM;
614		goto err_tx_struct;
615	}
616
617	/* Allocate receive descriptors and buffers */
618	if (lem_allocate_receive_structures(adapter)) {
619		device_printf(dev, "Could not setup receive structures\n");
620		error = ENOMEM;
621		goto err_rx_struct;
622	}
623
624	/*
625	**  Do interrupt configuration
626	*/
627	error = lem_allocate_irq(adapter);
628	if (error)
629		goto err_rx_struct;
630
631	/*
632	 * Get Wake-on-Lan and Management info for later use
633	 */
634	lem_get_wakeup(dev);
635
636	/* Setup OS specific network interface */
637	lem_setup_interface(dev, adapter);
638
639	/* Initialize statistics */
640	lem_update_stats_counters(adapter);
641
642	adapter->hw.mac.get_link_status = 1;
643	lem_update_link_status(adapter);
644
645	/* Indicate SOL/IDER usage */
646	if (e1000_check_reset_block(&adapter->hw))
647		device_printf(dev,
648		    "PHY reset is blocked due to SOL/IDER session.\n");
649
650	/* Do we need workaround for 82544 PCI-X adapter? */
651	if (adapter->hw.bus.type == e1000_bus_type_pcix &&
652	    adapter->hw.mac.type == e1000_82544)
653		adapter->pcix_82544 = TRUE;
654	else
655		adapter->pcix_82544 = FALSE;
656
657#if __FreeBSD_version >= 700029
658	/* Register for VLAN events */
659	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
660	    lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
661	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
662	    lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
663#endif
664
665	/* Non-AMT based hardware can now take control from firmware */
666	if (adapter->has_manage && !adapter->has_amt)
667		lem_get_hw_control(adapter);
668
669	/* Tell the stack that the interface is not active */
670	adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
671
672	adapter->led_dev = led_create(lem_led_func, adapter,
673	    device_get_nameunit(dev));
674
675	INIT_DEBUGOUT("lem_attach: end");
676
677	return (0);
678
679err_rx_struct:
680	lem_free_transmit_structures(adapter);
681err_tx_struct:
682err_hw_init:
683	lem_release_hw_control(adapter);
684	lem_dma_free(adapter, &adapter->rxdma);
685err_rx_desc:
686	lem_dma_free(adapter, &adapter->txdma);
687err_tx_desc:
688err_pci:
689	lem_free_pci_resources(adapter);
690	EM_TX_LOCK_DESTROY(adapter);
691	EM_RX_LOCK_DESTROY(adapter);
692	EM_CORE_LOCK_DESTROY(adapter);
693
694	return (error);
695}
696
697/*********************************************************************
698 *  Device removal routine
699 *
700 *  The detach entry point is called when the driver is being removed.
701 *  This routine stops the adapter and deallocates all the resources
702 *  that were allocated for driver operation.
703 *
704 *  return 0 on success, positive on failure
705 *********************************************************************/
706
707static int
708lem_detach(device_t dev)
709{
710	struct adapter	*adapter = device_get_softc(dev);
711	struct ifnet	*ifp = adapter->ifp;
712
713	INIT_DEBUGOUT("em_detach: begin");
714
715	/* Make sure VLANS are not using driver */
716#if __FreeBSD_version >= 700000
717	if (adapter->ifp->if_vlantrunk != NULL) {
718#else
719	if (adapter->ifp->if_nvlans != 0) {
720#endif
721		device_printf(dev,"Vlan in use, detach first\n");
722		return (EBUSY);
723	}
724
725#ifdef DEVICE_POLLING
726	if (ifp->if_capenable & IFCAP_POLLING)
727		ether_poll_deregister(ifp);
728#endif
729
730	if (adapter->led_dev != NULL)
731		led_destroy(adapter->led_dev);
732
733	EM_CORE_LOCK(adapter);
734	EM_TX_LOCK(adapter);
735	adapter->in_detach = 1;
736	lem_stop(adapter);
737	e1000_phy_hw_reset(&adapter->hw);
738
739	lem_release_manageability(adapter);
740
741	EM_TX_UNLOCK(adapter);
742	EM_CORE_UNLOCK(adapter);
743
744#if __FreeBSD_version >= 700029
745	/* Unregister VLAN events */
746	if (adapter->vlan_attach != NULL)
747		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
748	if (adapter->vlan_detach != NULL)
749		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
750#endif
751
752	ether_ifdetach(adapter->ifp);
753	callout_drain(&adapter->timer);
754	callout_drain(&adapter->tx_fifo_timer);
755
756	lem_free_pci_resources(adapter);
757	bus_generic_detach(dev);
758	if_free(ifp);
759
760	lem_free_transmit_structures(adapter);
761	lem_free_receive_structures(adapter);
762
763	/* Free Transmit Descriptor ring */
764	if (adapter->tx_desc_base) {
765		lem_dma_free(adapter, &adapter->txdma);
766		adapter->tx_desc_base = NULL;
767	}
768
769	/* Free Receive Descriptor ring */
770	if (adapter->rx_desc_base) {
771		lem_dma_free(adapter, &adapter->rxdma);
772		adapter->rx_desc_base = NULL;
773	}
774
775	lem_release_hw_control(adapter);
776	EM_TX_LOCK_DESTROY(adapter);
777	EM_RX_LOCK_DESTROY(adapter);
778	EM_CORE_LOCK_DESTROY(adapter);
779
780	return (0);
781}
782
783/*********************************************************************
784 *
785 *  Shutdown entry point
786 *
787 **********************************************************************/
788
789static int
790lem_shutdown(device_t dev)
791{
792	return lem_suspend(dev);
793}
794
795/*
796 * Suspend/resume device methods.
797 */
798static int
799lem_suspend(device_t dev)
800{
801	struct adapter *adapter = device_get_softc(dev);
802
803	EM_CORE_LOCK(adapter);
804
805	lem_release_manageability(adapter);
806	lem_release_hw_control(adapter);
807	lem_enable_wakeup(dev);
808
809	EM_CORE_UNLOCK(adapter);
810
811	return bus_generic_suspend(dev);
812}
813
814static int
815lem_resume(device_t dev)
816{
817	struct adapter *adapter = device_get_softc(dev);
818	struct ifnet *ifp = adapter->ifp;
819
820	EM_CORE_LOCK(adapter);
821	lem_init_locked(adapter);
822	lem_init_manageability(adapter);
823	EM_CORE_UNLOCK(adapter);
824	lem_start(ifp);
825
826	return bus_generic_resume(dev);
827}
828
829
830/*********************************************************************
831 *  Transmit entry point
832 *
833 *  em_start is called by the stack to initiate a transmit.
834 *  The driver will remain in this routine as long as there are
835 *  packets to transmit and transmit resources are available.
836 *  In case resources are not available stack is notified and
837 *  the packet is requeued.
838 **********************************************************************/
839
840#if __FreeBSD_version >= 800000
841static int
842lem_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
843{
844	struct adapter	*adapter = ifp->if_softc;
845	struct mbuf	*next;
846	int error = E1000_SUCCESS;
847
848	EM_TX_LOCK_ASSERT(adapter);
849	/* To allow being called from a tasklet */
850	if (m == NULL)
851		goto process;
852
853	if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
854	    IFF_DRV_RUNNING)
855	    || (!adapter->link_active)) {
856		error = drbr_enqueue(ifp, adapter->br, m);
857		return (error);
858	} else if (drbr_empty(ifp, adapter->br) &&
859	    (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
860		if ((error = lem_xmit(adapter, &m)) != 0) {
861			if (m)
862				error = drbr_enqueue(ifp, adapter->br, m);
863			return (error);
864		} else {
865			/*
866			 * We've bypassed the buf ring so we need to update
867			 * ifp directly
868			 */
869			drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
870			/*
871			** Send a copy of the frame to the BPF
872			** listener and set the watchdog on.
873			*/
874			ETHER_BPF_MTAP(ifp, m);
875			adapter->watchdog_check = TRUE;
876		}
877	} else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
878		return (error);
879
880process:
881	if (drbr_empty(ifp, adapter->br))
882		return(error);
883        /* Process the queue */
884        while (TRUE) {
885                if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
886                        break;
887                next = drbr_dequeue(ifp, adapter->br);
888                if (next == NULL)
889                        break;
890                if ((error = lem_xmit(adapter, &next)) != 0) {
891			if (next != NULL)
892				error = drbr_enqueue(ifp, adapter->br, next);
893                        break;
894		}
895		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
896                ETHER_BPF_MTAP(ifp, next);
897                /* Set the watchdog */
898		adapter->watchdog_check = TRUE;
899        }
900
901        if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
902                ifp->if_drv_flags |= IFF_DRV_OACTIVE;
903
904	return (error);
905}
906
907/*
908** Multiqueue capable stack interface, this is not
909** yet truely multiqueue, but that is coming...
910*/
911static int
912lem_mq_start(struct ifnet *ifp, struct mbuf *m)
913{
914
915	struct adapter *adapter = ifp->if_softc;
916	int error = 0;
917
918	if (EM_TX_TRYLOCK(adapter)) {
919		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
920			error = lem_mq_start_locked(ifp, m);
921		EM_TX_UNLOCK(adapter);
922	} else
923		error = drbr_enqueue(ifp, adapter->br, m);
924
925	return (error);
926}
927
928static void
929lem_qflush(struct ifnet *ifp)
930{
931	struct mbuf *m;
932	struct adapter *adapter = (struct adapter *)ifp->if_softc;
933
934	EM_TX_LOCK(adapter);
935	while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
936		m_freem(m);
937	if_qflush(ifp);
938	EM_TX_UNLOCK(adapter);
939}
940#endif /* FreeBSD_version */
941
942static void
943lem_start_locked(struct ifnet *ifp)
944{
945	struct adapter	*adapter = ifp->if_softc;
946	struct mbuf	*m_head;
947
948	EM_TX_LOCK_ASSERT(adapter);
949
950	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
951	    IFF_DRV_RUNNING)
952		return;
953	if (!adapter->link_active)
954		return;
955
956	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
957
958                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
959		if (m_head == NULL)
960			break;
961		/*
962		 *  Encapsulation can modify our pointer, and or make it
963		 *  NULL on failure.  In that event, we can't requeue.
964		 */
965		if (lem_xmit(adapter, &m_head)) {
966			if (m_head == NULL)
967				break;
968			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
969			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
970			break;
971		}
972
973		/* Send a copy of the frame to the BPF listener */
974		ETHER_BPF_MTAP(ifp, m_head);
975
976		/* Set timeout in case hardware has problems transmitting. */
977		adapter->watchdog_check = TRUE;
978	}
979	if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
980		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
981
982	return;
983}
984
985static void
986lem_start(struct ifnet *ifp)
987{
988	struct adapter *adapter = ifp->if_softc;
989
990	EM_TX_LOCK(adapter);
991	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
992		lem_start_locked(ifp);
993	EM_TX_UNLOCK(adapter);
994}
995
996/*********************************************************************
997 *  Ioctl entry point
998 *
999 *  em_ioctl is called when the user wants to configure the
1000 *  interface.
1001 *
1002 *  return 0 on success, positive on failure
1003 **********************************************************************/
1004
1005static int
1006lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1007{
1008	struct adapter	*adapter = ifp->if_softc;
1009	struct ifreq *ifr = (struct ifreq *)data;
1010#ifdef INET
1011	struct ifaddr *ifa = (struct ifaddr *)data;
1012#endif
1013	int error = 0;
1014
1015	if (adapter->in_detach)
1016		return (error);
1017
1018	switch (command) {
1019	case SIOCSIFADDR:
1020#ifdef INET
1021		if (ifa->ifa_addr->sa_family == AF_INET) {
1022			/*
1023			 * XXX
1024			 * Since resetting hardware takes a very long time
1025			 * and results in link renegotiation we only
1026			 * initialize the hardware only when it is absolutely
1027			 * required.
1028			 */
1029			ifp->if_flags |= IFF_UP;
1030			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1031				EM_CORE_LOCK(adapter);
1032				lem_init_locked(adapter);
1033				EM_CORE_UNLOCK(adapter);
1034			}
1035			arp_ifinit(ifp, ifa);
1036		} else
1037#endif
1038			error = ether_ioctl(ifp, command, data);
1039		break;
1040	case SIOCSIFMTU:
1041	    {
1042		int max_frame_size;
1043
1044		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1045
1046		EM_CORE_LOCK(adapter);
1047		switch (adapter->hw.mac.type) {
1048		case e1000_82542:
1049			max_frame_size = ETHER_MAX_LEN;
1050			break;
1051		default:
1052			max_frame_size = MAX_JUMBO_FRAME_SIZE;
1053		}
1054		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1055		    ETHER_CRC_LEN) {
1056			EM_CORE_UNLOCK(adapter);
1057			error = EINVAL;
1058			break;
1059		}
1060
1061		ifp->if_mtu = ifr->ifr_mtu;
1062		adapter->max_frame_size =
1063		    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1064		lem_init_locked(adapter);
1065		EM_CORE_UNLOCK(adapter);
1066		break;
1067	    }
1068	case SIOCSIFFLAGS:
1069		IOCTL_DEBUGOUT("ioctl rcv'd:\
1070		    SIOCSIFFLAGS (Set Interface Flags)");
1071		EM_CORE_LOCK(adapter);
1072		if (ifp->if_flags & IFF_UP) {
1073			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1074				if ((ifp->if_flags ^ adapter->if_flags) &
1075				    (IFF_PROMISC | IFF_ALLMULTI)) {
1076					lem_disable_promisc(adapter);
1077					lem_set_promisc(adapter);
1078				}
1079			} else
1080				lem_init_locked(adapter);
1081		} else
1082			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1083				EM_TX_LOCK(adapter);
1084				lem_stop(adapter);
1085				EM_TX_UNLOCK(adapter);
1086			}
1087		adapter->if_flags = ifp->if_flags;
1088		EM_CORE_UNLOCK(adapter);
1089		break;
1090	case SIOCADDMULTI:
1091	case SIOCDELMULTI:
1092		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1093		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1094			EM_CORE_LOCK(adapter);
1095			lem_disable_intr(adapter);
1096			lem_set_multi(adapter);
1097			if (adapter->hw.mac.type == e1000_82542 &&
1098	    		    adapter->hw.revision_id == E1000_REVISION_2) {
1099				lem_initialize_receive_unit(adapter);
1100			}
1101#ifdef DEVICE_POLLING
1102			if (!(ifp->if_capenable & IFCAP_POLLING))
1103#endif
1104				lem_enable_intr(adapter);
1105			EM_CORE_UNLOCK(adapter);
1106		}
1107		break;
1108	case SIOCSIFMEDIA:
1109		/* Check SOL/IDER usage */
1110		EM_CORE_LOCK(adapter);
1111		if (e1000_check_reset_block(&adapter->hw)) {
1112			EM_CORE_UNLOCK(adapter);
1113			device_printf(adapter->dev, "Media change is"
1114			    " blocked due to SOL/IDER session.\n");
1115			break;
1116		}
1117		EM_CORE_UNLOCK(adapter);
1118	case SIOCGIFMEDIA:
1119		IOCTL_DEBUGOUT("ioctl rcv'd: \
1120		    SIOCxIFMEDIA (Get/Set Interface Media)");
1121		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1122		break;
1123	case SIOCSIFCAP:
1124	    {
1125		int mask, reinit;
1126
1127		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1128		reinit = 0;
1129		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1130#ifdef DEVICE_POLLING
1131		if (mask & IFCAP_POLLING) {
1132			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1133				error = ether_poll_register(lem_poll, ifp);
1134				if (error)
1135					return (error);
1136				EM_CORE_LOCK(adapter);
1137				lem_disable_intr(adapter);
1138				ifp->if_capenable |= IFCAP_POLLING;
1139				EM_CORE_UNLOCK(adapter);
1140			} else {
1141				error = ether_poll_deregister(ifp);
1142				/* Enable interrupt even in error case */
1143				EM_CORE_LOCK(adapter);
1144				lem_enable_intr(adapter);
1145				ifp->if_capenable &= ~IFCAP_POLLING;
1146				EM_CORE_UNLOCK(adapter);
1147			}
1148		}
1149#endif
1150		if (mask & IFCAP_HWCSUM) {
1151			ifp->if_capenable ^= IFCAP_HWCSUM;
1152			reinit = 1;
1153		}
1154#if __FreeBSD_version >= 700000
1155		if (mask & IFCAP_TSO4) {
1156			ifp->if_capenable ^= IFCAP_TSO4;
1157			reinit = 1;
1158		}
1159#endif
1160		if (mask & IFCAP_VLAN_HWTAGGING) {
1161			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1162			reinit = 1;
1163		}
1164		if ((mask & IFCAP_WOL) &&
1165		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1166			if (mask & IFCAP_WOL_MCAST)
1167				ifp->if_capenable ^= IFCAP_WOL_MCAST;
1168			if (mask & IFCAP_WOL_MAGIC)
1169				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1170		}
1171		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1172			lem_init(adapter);
1173#if __FreeBSD_version >= 700000
1174		VLAN_CAPABILITIES(ifp);
1175#endif
1176		break;
1177	    }
1178
1179	default:
1180		error = ether_ioctl(ifp, command, data);
1181		break;
1182	}
1183
1184	return (error);
1185}
1186
1187
1188/*********************************************************************
1189 *  Init entry point
1190 *
1191 *  This routine is used in two ways. It is used by the stack as
1192 *  init entry point in network interface structure. It is also used
1193 *  by the driver as a hw/sw initialization routine to get to a
1194 *  consistent state.
1195 *
1196 *  return 0 on success, positive on failure
1197 **********************************************************************/
1198
1199static void
1200lem_init_locked(struct adapter *adapter)
1201{
1202	struct ifnet	*ifp = adapter->ifp;
1203	device_t	dev = adapter->dev;
1204	u32		pba;
1205
1206	INIT_DEBUGOUT("lem_init: begin");
1207
1208	EM_CORE_LOCK_ASSERT(adapter);
1209
1210	EM_TX_LOCK(adapter);
1211	lem_stop(adapter);
1212	EM_TX_UNLOCK(adapter);
1213
1214	/*
1215	 * Packet Buffer Allocation (PBA)
1216	 * Writing PBA sets the receive portion of the buffer
1217	 * the remainder is used for the transmit buffer.
1218	 *
1219	 * Devices before the 82547 had a Packet Buffer of 64K.
1220	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1221	 * After the 82547 the buffer was reduced to 40K.
1222	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1223	 *   Note: default does not leave enough room for Jumbo Frame >10k.
1224	 */
1225	switch (adapter->hw.mac.type) {
1226	case e1000_82547:
1227	case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1228		if (adapter->max_frame_size > 8192)
1229			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1230		else
1231			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1232		adapter->tx_fifo_head = 0;
1233		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1234		adapter->tx_fifo_size =
1235		    (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1236		break;
1237	default:
1238		/* Devices before 82547 had a Packet Buffer of 64K.   */
1239		if (adapter->max_frame_size > 8192)
1240			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1241		else
1242			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1243	}
1244
1245	INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1246	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1247
1248	/* Get the latest mac address, User can use a LAA */
1249        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1250              ETHER_ADDR_LEN);
1251
1252	/* Put the address into the Receive Address Array */
1253	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1254
1255	/* Initialize the hardware */
1256	if (lem_hardware_init(adapter)) {
1257		device_printf(dev, "Unable to initialize the hardware\n");
1258		return;
1259	}
1260	lem_update_link_status(adapter);
1261
1262	/* Setup VLAN support, basic and offload if available */
1263	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1264
1265#if __FreeBSD_version < 700029
1266	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1267		u32 ctrl;
1268		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1269		ctrl |= E1000_CTRL_VME;
1270		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1271	}
1272#else
1273	/* Use real VLAN Filter support */
1274	lem_setup_vlan_hw_support(adapter);
1275#endif
1276
1277	/* Set hardware offload abilities */
1278	ifp->if_hwassist = 0;
1279	if (adapter->hw.mac.type >= e1000_82543) {
1280		if (ifp->if_capenable & IFCAP_TXCSUM)
1281			ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1282#if __FreeBSD_version >= 700000
1283		if (ifp->if_capenable & IFCAP_TSO4)
1284			ifp->if_hwassist |= CSUM_TSO;
1285#endif
1286	}
1287
1288	/* Configure for OS presence */
1289	lem_init_manageability(adapter);
1290
1291	/* Prepare transmit descriptors and buffers */
1292	lem_setup_transmit_structures(adapter);
1293	lem_initialize_transmit_unit(adapter);
1294
1295	/* Setup Multicast table */
1296	lem_set_multi(adapter);
1297
1298	/* Prepare receive descriptors and buffers */
1299	if (lem_setup_receive_structures(adapter)) {
1300		device_printf(dev, "Could not setup receive structures\n");
1301		EM_TX_LOCK(adapter);
1302		lem_stop(adapter);
1303		EM_TX_UNLOCK(adapter);
1304		return;
1305	}
1306	lem_initialize_receive_unit(adapter);
1307
1308	/* Don't lose promiscuous settings */
1309	lem_set_promisc(adapter);
1310
1311	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1312	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1313
1314	callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1315	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1316
1317	/* MSI/X configuration for 82574 */
1318	if (adapter->hw.mac.type == e1000_82574) {
1319		int tmp;
1320		tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1321		tmp |= E1000_CTRL_EXT_PBA_CLR;
1322		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1323		/*
1324		** Set the IVAR - interrupt vector routing.
1325		** Each nibble represents a vector, high bit
1326		** is enable, other 3 bits are the MSIX table
1327		** entry, we map RXQ0 to 0, TXQ0 to 1, and
1328		** Link (other) to 2, hence the magic number.
1329		*/
1330		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1331	}
1332
1333#ifdef DEVICE_POLLING
1334	/*
1335	 * Only enable interrupts if we are not polling, make sure
1336	 * they are off otherwise.
1337	 */
1338	if (ifp->if_capenable & IFCAP_POLLING)
1339		lem_disable_intr(adapter);
1340	else
1341#endif /* DEVICE_POLLING */
1342		lem_enable_intr(adapter);
1343
1344	/* AMT based hardware can now take control from firmware */
1345	if (adapter->has_manage && adapter->has_amt)
1346		lem_get_hw_control(adapter);
1347
1348	/* Don't reset the phy next time init gets called */
1349	adapter->hw.phy.reset_disable = TRUE;
1350}
1351
1352static void
1353lem_init(void *arg)
1354{
1355	struct adapter *adapter = arg;
1356
1357	EM_CORE_LOCK(adapter);
1358	lem_init_locked(adapter);
1359	EM_CORE_UNLOCK(adapter);
1360}
1361
1362
1363#ifdef DEVICE_POLLING
1364/*********************************************************************
1365 *
1366 *  Legacy polling routine
1367 *
1368 *********************************************************************/
1369static int
1370lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1371{
1372	struct adapter *adapter = ifp->if_softc;
1373	u32		reg_icr, rx_done = 0;
1374
1375	EM_CORE_LOCK(adapter);
1376	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1377		EM_CORE_UNLOCK(adapter);
1378		return (rx_done);
1379	}
1380
1381	if (cmd == POLL_AND_CHECK_STATUS) {
1382		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1383		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1384			callout_stop(&adapter->timer);
1385			adapter->hw.mac.get_link_status = 1;
1386			lem_update_link_status(adapter);
1387			callout_reset(&adapter->timer, hz,
1388			    lem_local_timer, adapter);
1389		}
1390	}
1391	EM_CORE_UNLOCK(adapter);
1392
1393	rx_done = lem_rxeof(adapter, count);
1394
1395	EM_TX_LOCK(adapter);
1396	lem_txeof(adapter);
1397#if __FreeBSD_version >= 800000
1398	if (!drbr_empty(ifp, adapter->br))
1399		lem_mq_start_locked(ifp, NULL);
1400#else
1401	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1402		lem_start_locked(ifp);
1403#endif
1404	EM_TX_UNLOCK(adapter);
1405	return (rx_done);
1406}
1407#endif /* DEVICE_POLLING */
1408
1409#ifdef EM_LEGACY_IRQ
1410/*********************************************************************
1411 *
1412 *  Legacy Interrupt Service routine
1413 *
1414 *********************************************************************/
1415
1416static void
1417lem_intr(void *arg)
1418{
1419	struct adapter	*adapter = arg;
1420	struct ifnet	*ifp = adapter->ifp;
1421	u32		reg_icr;
1422
1423
1424	if (ifp->if_capenable & IFCAP_POLLING)
1425		return;
1426
1427	EM_CORE_LOCK(adapter);
1428	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1429	if (reg_icr & E1000_ICR_RXO)
1430		adapter->rx_overruns++;
1431
1432	if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1433			goto out;
1434
1435	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1436			goto out;
1437
1438	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1439		callout_stop(&adapter->timer);
1440		adapter->hw.mac.get_link_status = 1;
1441		lem_update_link_status(adapter);
1442		/* Deal with TX cruft when link lost */
1443		lem_tx_purge(adapter);
1444		callout_reset(&adapter->timer, hz,
1445		    lem_local_timer, adapter);
1446		goto out;
1447	}
1448
1449	EM_TX_LOCK(adapter);
1450	lem_txeof(adapter);
1451	lem_rxeof(adapter, -1);
1452	lem_txeof(adapter);
1453	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1454	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1455		lem_start_locked(ifp);
1456	EM_TX_UNLOCK(adapter);
1457
1458out:
1459	EM_CORE_UNLOCK(adapter);
1460	return;
1461}
1462
1463#else /* EM_FAST_IRQ, then fast interrupt routines only */
1464
1465static void
1466lem_handle_link(void *context, int pending)
1467{
1468	struct adapter	*adapter = context;
1469	struct ifnet *ifp = adapter->ifp;
1470
1471	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1472		return;
1473
1474	EM_CORE_LOCK(adapter);
1475	callout_stop(&adapter->timer);
1476	lem_update_link_status(adapter);
1477	/* Deal with TX cruft when link lost */
1478	lem_tx_purge(adapter);
1479	callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1480	EM_CORE_UNLOCK(adapter);
1481}
1482
1483
1484/* Combined RX/TX handler, used by Legacy and MSI */
1485static void
1486lem_handle_rxtx(void *context, int pending)
1487{
1488	struct adapter	*adapter = context;
1489	struct ifnet	*ifp = adapter->ifp;
1490
1491
1492	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1493		if (lem_rxeof(adapter, adapter->rx_process_limit) != 0)
1494			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1495		EM_TX_LOCK(adapter);
1496		lem_txeof(adapter);
1497
1498#if __FreeBSD_version >= 800000
1499		if (!drbr_empty(ifp, adapter->br))
1500			lem_mq_start_locked(ifp, NULL);
1501#else
1502		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1503			lem_start_locked(ifp);
1504#endif
1505		EM_TX_UNLOCK(adapter);
1506	}
1507
1508	lem_enable_intr(adapter);
1509}
1510
1511/*********************************************************************
1512 *
1513 *  Fast Legacy/MSI Combined Interrupt Service routine
1514 *
1515 *********************************************************************/
1516#if __FreeBSD_version < 700000
1517#define FILTER_STRAY
1518#define FILTER_HANDLED
1519static void
1520#else
1521static int
1522#endif
1523lem_irq_fast(void *arg)
1524{
1525	struct adapter	*adapter = arg;
1526	struct ifnet	*ifp;
1527	u32		reg_icr;
1528
1529	ifp = adapter->ifp;
1530
1531	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1532
1533	/* Hot eject?  */
1534	if (reg_icr == 0xffffffff)
1535		return FILTER_STRAY;
1536
1537	/* Definitely not our interrupt.  */
1538	if (reg_icr == 0x0)
1539		return FILTER_STRAY;
1540
1541	/*
1542	 * Mask interrupts until the taskqueue is finished running.  This is
1543	 * cheap, just assume that it is needed.  This also works around the
1544	 * MSI message reordering errata on certain systems.
1545	 */
1546	lem_disable_intr(adapter);
1547	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1548
1549	/* Link status change */
1550	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1551		adapter->hw.mac.get_link_status = 1;
1552		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1553	}
1554
1555	if (reg_icr & E1000_ICR_RXO)
1556		adapter->rx_overruns++;
1557	return FILTER_HANDLED;
1558}
1559#endif /* ~EM_LEGACY_IRQ */
1560
1561
1562/*********************************************************************
1563 *
1564 *  Media Ioctl callback
1565 *
1566 *  This routine is called whenever the user queries the status of
1567 *  the interface using ifconfig.
1568 *
1569 **********************************************************************/
1570static void
1571lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1572{
1573	struct adapter *adapter = ifp->if_softc;
1574	u_char fiber_type = IFM_1000_SX;
1575
1576	INIT_DEBUGOUT("lem_media_status: begin");
1577
1578	EM_CORE_LOCK(adapter);
1579	lem_update_link_status(adapter);
1580
1581	ifmr->ifm_status = IFM_AVALID;
1582	ifmr->ifm_active = IFM_ETHER;
1583
1584	if (!adapter->link_active) {
1585		EM_CORE_UNLOCK(adapter);
1586		return;
1587	}
1588
1589	ifmr->ifm_status |= IFM_ACTIVE;
1590
1591	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1592	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1593		if (adapter->hw.mac.type == e1000_82545)
1594			fiber_type = IFM_1000_LX;
1595		ifmr->ifm_active |= fiber_type | IFM_FDX;
1596	} else {
1597		switch (adapter->link_speed) {
1598		case 10:
1599			ifmr->ifm_active |= IFM_10_T;
1600			break;
1601		case 100:
1602			ifmr->ifm_active |= IFM_100_TX;
1603			break;
1604		case 1000:
1605			ifmr->ifm_active |= IFM_1000_T;
1606			break;
1607		}
1608		if (adapter->link_duplex == FULL_DUPLEX)
1609			ifmr->ifm_active |= IFM_FDX;
1610		else
1611			ifmr->ifm_active |= IFM_HDX;
1612	}
1613	EM_CORE_UNLOCK(adapter);
1614}
1615
1616/*********************************************************************
1617 *
1618 *  Media Ioctl callback
1619 *
1620 *  This routine is called when the user changes speed/duplex using
1621 *  media/mediopt option with ifconfig.
1622 *
1623 **********************************************************************/
1624static int
1625lem_media_change(struct ifnet *ifp)
1626{
1627	struct adapter *adapter = ifp->if_softc;
1628	struct ifmedia  *ifm = &adapter->media;
1629
1630	INIT_DEBUGOUT("lem_media_change: begin");
1631
1632	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1633		return (EINVAL);
1634
1635	EM_CORE_LOCK(adapter);
1636	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1637	case IFM_AUTO:
1638		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1639		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1640		break;
1641	case IFM_1000_LX:
1642	case IFM_1000_SX:
1643	case IFM_1000_T:
1644		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1645		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1646		break;
1647	case IFM_100_TX:
1648		adapter->hw.mac.autoneg = FALSE;
1649		adapter->hw.phy.autoneg_advertised = 0;
1650		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1651			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1652		else
1653			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1654		break;
1655	case IFM_10_T:
1656		adapter->hw.mac.autoneg = FALSE;
1657		adapter->hw.phy.autoneg_advertised = 0;
1658		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1659			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1660		else
1661			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1662		break;
1663	default:
1664		device_printf(adapter->dev, "Unsupported media type\n");
1665	}
1666
1667	/* As the speed/duplex settings my have changed we need to
1668	 * reset the PHY.
1669	 */
1670	adapter->hw.phy.reset_disable = FALSE;
1671
1672	lem_init_locked(adapter);
1673	EM_CORE_UNLOCK(adapter);
1674
1675	return (0);
1676}
1677
1678/*********************************************************************
1679 *
1680 *  This routine maps the mbufs to tx descriptors.
1681 *
1682 *  return 0 on success, positive on failure
1683 **********************************************************************/
1684
1685static int
1686lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1687{
1688	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1689	bus_dmamap_t		map;
1690	struct em_buffer	*tx_buffer, *tx_buffer_mapped;
1691	struct e1000_tx_desc	*ctxd = NULL;
1692	struct mbuf		*m_head;
1693	u32			txd_upper, txd_lower, txd_used, txd_saved;
1694	int			error, nsegs, i, j, first, last = 0;
1695#if __FreeBSD_version < 700000
1696	struct m_tag		*mtag;
1697#endif
1698	m_head = *m_headp;
1699	txd_upper = txd_lower = txd_used = txd_saved = 0;
1700
1701        /*
1702         * Force a cleanup if number of TX descriptors
1703         * available hits the threshold
1704         */
1705	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1706		lem_txeof(adapter);
1707		/* Now do we at least have a minimal? */
1708		if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1709			adapter->no_tx_desc_avail1++;
1710			return (ENOBUFS);
1711		}
1712	}
1713
1714	/*
1715	 * Map the packet for DMA
1716	 *
1717	 * Capture the first descriptor index,
1718	 * this descriptor will have the index
1719	 * of the EOP which is the only one that
1720	 * now gets a DONE bit writeback.
1721	 */
1722	first = adapter->next_avail_tx_desc;
1723	tx_buffer = &adapter->tx_buffer_area[first];
1724	tx_buffer_mapped = tx_buffer;
1725	map = tx_buffer->map;
1726
1727	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1728	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1729
1730	/*
1731	 * There are two types of errors we can (try) to handle:
1732	 * - EFBIG means the mbuf chain was too long and bus_dma ran
1733	 *   out of segments.  Defragment the mbuf chain and try again.
1734	 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1735	 *   at this point in time.  Defer sending and try again later.
1736	 * All other errors, in particular EINVAL, are fatal and prevent the
1737	 * mbuf chain from ever going through.  Drop it and report error.
1738	 */
1739	if (error == EFBIG) {
1740		struct mbuf *m;
1741
1742		m = m_defrag(*m_headp, M_DONTWAIT);
1743		if (m == NULL) {
1744			adapter->mbuf_alloc_failed++;
1745			m_freem(*m_headp);
1746			*m_headp = NULL;
1747			return (ENOBUFS);
1748		}
1749		*m_headp = m;
1750
1751		/* Try it again */
1752		error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1753		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1754
1755		if (error) {
1756			adapter->no_tx_dma_setup++;
1757			m_freem(*m_headp);
1758			*m_headp = NULL;
1759			return (error);
1760		}
1761	} else if (error != 0) {
1762		adapter->no_tx_dma_setup++;
1763		return (error);
1764	}
1765
1766        if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1767                adapter->no_tx_desc_avail2++;
1768		bus_dmamap_unload(adapter->txtag, map);
1769		return (ENOBUFS);
1770        }
1771	m_head = *m_headp;
1772
1773	/* Do hardware assists */
1774	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1775		lem_transmit_checksum_setup(adapter,  m_head,
1776		    &txd_upper, &txd_lower);
1777
1778	i = adapter->next_avail_tx_desc;
1779	if (adapter->pcix_82544)
1780		txd_saved = i;
1781
1782	/* Set up our transmit descriptors */
1783	for (j = 0; j < nsegs; j++) {
1784		bus_size_t seg_len;
1785		bus_addr_t seg_addr;
1786		/* If adapter is 82544 and on PCIX bus */
1787		if(adapter->pcix_82544) {
1788			DESC_ARRAY	desc_array;
1789			u32		array_elements, counter;
1790			/*
1791			 * Check the Address and Length combination and
1792			 * split the data accordingly
1793			 */
1794			array_elements = lem_fill_descriptors(segs[j].ds_addr,
1795			    segs[j].ds_len, &desc_array);
1796			for (counter = 0; counter < array_elements; counter++) {
1797				if (txd_used == adapter->num_tx_desc_avail) {
1798					adapter->next_avail_tx_desc = txd_saved;
1799					adapter->no_tx_desc_avail2++;
1800					bus_dmamap_unload(adapter->txtag, map);
1801					return (ENOBUFS);
1802				}
1803				tx_buffer = &adapter->tx_buffer_area[i];
1804				ctxd = &adapter->tx_desc_base[i];
1805				ctxd->buffer_addr = htole64(
1806				    desc_array.descriptor[counter].address);
1807				ctxd->lower.data = htole32(
1808				    (adapter->txd_cmd | txd_lower | (u16)
1809				    desc_array.descriptor[counter].length));
1810				ctxd->upper.data =
1811				    htole32((txd_upper));
1812				last = i;
1813				if (++i == adapter->num_tx_desc)
1814                                         i = 0;
1815				tx_buffer->m_head = NULL;
1816				tx_buffer->next_eop = -1;
1817				txd_used++;
1818                        }
1819		} else {
1820			tx_buffer = &adapter->tx_buffer_area[i];
1821			ctxd = &adapter->tx_desc_base[i];
1822			seg_addr = segs[j].ds_addr;
1823			seg_len  = segs[j].ds_len;
1824			ctxd->buffer_addr = htole64(seg_addr);
1825			ctxd->lower.data = htole32(
1826			adapter->txd_cmd | txd_lower | seg_len);
1827			ctxd->upper.data =
1828			    htole32(txd_upper);
1829			last = i;
1830			if (++i == adapter->num_tx_desc)
1831				i = 0;
1832			tx_buffer->m_head = NULL;
1833			tx_buffer->next_eop = -1;
1834		}
1835	}
1836
1837	adapter->next_avail_tx_desc = i;
1838
1839	if (adapter->pcix_82544)
1840		adapter->num_tx_desc_avail -= txd_used;
1841	else
1842		adapter->num_tx_desc_avail -= nsegs;
1843
1844        /*
1845	** Handle VLAN tag, this is the
1846	** biggest difference between
1847	** 6.x and 7
1848	*/
1849#if __FreeBSD_version < 700000
1850        /* Find out if we are in vlan mode. */
1851        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1852        if (mtag != NULL) {
1853                ctxd->upper.fields.special =
1854                    htole16(VLAN_TAG_VALUE(mtag));
1855#else /* FreeBSD 7 */
1856	if (m_head->m_flags & M_VLANTAG) {
1857		/* Set the vlan id. */
1858		ctxd->upper.fields.special =
1859		    htole16(m_head->m_pkthdr.ether_vtag);
1860#endif
1861                /* Tell hardware to add tag */
1862                ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1863        }
1864
1865        tx_buffer->m_head = m_head;
1866	tx_buffer_mapped->map = tx_buffer->map;
1867	tx_buffer->map = map;
1868        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1869
1870        /*
1871         * Last Descriptor of Packet
1872	 * needs End Of Packet (EOP)
1873	 * and Report Status (RS)
1874         */
1875        ctxd->lower.data |=
1876	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1877	/*
1878	 * Keep track in the first buffer which
1879	 * descriptor will be written back
1880	 */
1881	tx_buffer = &adapter->tx_buffer_area[first];
1882	tx_buffer->next_eop = last;
1883
1884	/*
1885	 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1886	 * that this frame is available to transmit.
1887	 */
1888	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1889	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1890	if (adapter->hw.mac.type == e1000_82547 &&
1891	    adapter->link_duplex == HALF_DUPLEX)
1892		lem_82547_move_tail(adapter);
1893	else {
1894		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1895		if (adapter->hw.mac.type == e1000_82547)
1896			lem_82547_update_fifo_head(adapter,
1897			    m_head->m_pkthdr.len);
1898	}
1899
1900	return (0);
1901}
1902
1903/*********************************************************************
1904 *
1905 * 82547 workaround to avoid controller hang in half-duplex environment.
1906 * The workaround is to avoid queuing a large packet that would span
1907 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1908 * in this case. We do that only when FIFO is quiescent.
1909 *
1910 **********************************************************************/
1911static void
1912lem_82547_move_tail(void *arg)
1913{
1914	struct adapter *adapter = arg;
1915	struct e1000_tx_desc *tx_desc;
1916	u16	hw_tdt, sw_tdt, length = 0;
1917	bool	eop = 0;
1918
1919	EM_TX_LOCK_ASSERT(adapter);
1920
1921	hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1922	sw_tdt = adapter->next_avail_tx_desc;
1923
1924	while (hw_tdt != sw_tdt) {
1925		tx_desc = &adapter->tx_desc_base[hw_tdt];
1926		length += tx_desc->lower.flags.length;
1927		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1928		if (++hw_tdt == adapter->num_tx_desc)
1929			hw_tdt = 0;
1930
1931		if (eop) {
1932			if (lem_82547_fifo_workaround(adapter, length)) {
1933				adapter->tx_fifo_wrk_cnt++;
1934				callout_reset(&adapter->tx_fifo_timer, 1,
1935					lem_82547_move_tail, adapter);
1936				break;
1937			}
1938			E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1939			lem_82547_update_fifo_head(adapter, length);
1940			length = 0;
1941		}
1942	}
1943}
1944
1945static int
1946lem_82547_fifo_workaround(struct adapter *adapter, int len)
1947{
1948	int fifo_space, fifo_pkt_len;
1949
1950	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1951
1952	if (adapter->link_duplex == HALF_DUPLEX) {
1953		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1954
1955		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1956			if (lem_82547_tx_fifo_reset(adapter))
1957				return (0);
1958			else
1959				return (1);
1960		}
1961	}
1962
1963	return (0);
1964}
1965
1966static void
1967lem_82547_update_fifo_head(struct adapter *adapter, int len)
1968{
1969	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1970
1971	/* tx_fifo_head is always 16 byte aligned */
1972	adapter->tx_fifo_head += fifo_pkt_len;
1973	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1974		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1975	}
1976}
1977
1978
1979static int
1980lem_82547_tx_fifo_reset(struct adapter *adapter)
1981{
1982	u32 tctl;
1983
1984	if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1985	    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1986	    (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1987	    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1988	    (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1989	    E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1990	    (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1991		/* Disable TX unit */
1992		tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1993		E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1994		    tctl & ~E1000_TCTL_EN);
1995
1996		/* Reset FIFO pointers */
1997		E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1998		    adapter->tx_head_addr);
1999		E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2000		    adapter->tx_head_addr);
2001		E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2002		    adapter->tx_head_addr);
2003		E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2004		    adapter->tx_head_addr);
2005
2006		/* Re-enable TX unit */
2007		E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2008		E1000_WRITE_FLUSH(&adapter->hw);
2009
2010		adapter->tx_fifo_head = 0;
2011		adapter->tx_fifo_reset_cnt++;
2012
2013		return (TRUE);
2014	}
2015	else {
2016		return (FALSE);
2017	}
2018}
2019
2020static void
2021lem_set_promisc(struct adapter *adapter)
2022{
2023	struct ifnet	*ifp = adapter->ifp;
2024	u32		reg_rctl;
2025
2026	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2027
2028	if (ifp->if_flags & IFF_PROMISC) {
2029		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2030		/* Turn this on if you want to see bad packets */
2031		if (lem_debug_sbp)
2032			reg_rctl |= E1000_RCTL_SBP;
2033		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2034	} else if (ifp->if_flags & IFF_ALLMULTI) {
2035		reg_rctl |= E1000_RCTL_MPE;
2036		reg_rctl &= ~E1000_RCTL_UPE;
2037		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2038	}
2039}
2040
2041static void
2042lem_disable_promisc(struct adapter *adapter)
2043{
2044	u32	reg_rctl;
2045
2046	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2047
2048	reg_rctl &=  (~E1000_RCTL_UPE);
2049	reg_rctl &=  (~E1000_RCTL_MPE);
2050	reg_rctl &=  (~E1000_RCTL_SBP);
2051	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2052}
2053
2054
2055/*********************************************************************
2056 *  Multicast Update
2057 *
2058 *  This routine is called whenever multicast address list is updated.
2059 *
2060 **********************************************************************/
2061
2062static void
2063lem_set_multi(struct adapter *adapter)
2064{
2065	struct ifnet	*ifp = adapter->ifp;
2066	struct ifmultiaddr *ifma;
2067	u32 reg_rctl = 0;
2068	u8  *mta; /* Multicast array memory */
2069	int mcnt = 0;
2070
2071	IOCTL_DEBUGOUT("lem_set_multi: begin");
2072
2073	if (adapter->hw.mac.type == e1000_82542 &&
2074	    adapter->hw.revision_id == E1000_REVISION_2) {
2075		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2076		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2077			e1000_pci_clear_mwi(&adapter->hw);
2078		reg_rctl |= E1000_RCTL_RST;
2079		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2080		msec_delay(5);
2081	}
2082
2083	/* Allocate temporary memory to setup array */
2084	mta = malloc(sizeof(u8) *
2085	    (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2086	    M_DEVBUF, M_NOWAIT | M_ZERO);
2087	if (mta == NULL)
2088		panic("lem_set_multi memory failure\n");
2089
2090#if __FreeBSD_version < 800000
2091	IF_ADDR_LOCK(ifp);
2092#else
2093	if_maddr_rlock(ifp);
2094#endif
2095	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2096		if (ifma->ifma_addr->sa_family != AF_LINK)
2097			continue;
2098
2099		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2100			break;
2101
2102		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2103		    &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2104		mcnt++;
2105	}
2106#if __FreeBSD_version < 800000
2107	IF_ADDR_UNLOCK(ifp);
2108#else
2109	if_maddr_runlock(ifp);
2110#endif
2111	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2112		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2113		reg_rctl |= E1000_RCTL_MPE;
2114		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2115	} else
2116		e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2117
2118	if (adapter->hw.mac.type == e1000_82542 &&
2119	    adapter->hw.revision_id == E1000_REVISION_2) {
2120		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2121		reg_rctl &= ~E1000_RCTL_RST;
2122		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2123		msec_delay(5);
2124		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2125			e1000_pci_set_mwi(&adapter->hw);
2126	}
2127	free(mta, M_DEVBUF);
2128}
2129
2130
2131/*********************************************************************
2132 *  Timer routine
2133 *
2134 *  This routine checks for link status and updates statistics.
2135 *
2136 **********************************************************************/
2137
2138static void
2139lem_local_timer(void *arg)
2140{
2141	struct adapter	*adapter = arg;
2142	struct ifnet	*ifp = adapter->ifp;
2143
2144	EM_CORE_LOCK_ASSERT(adapter);
2145
2146	taskqueue_enqueue(adapter->tq,
2147	    &adapter->rxtx_task);
2148	lem_update_link_status(adapter);
2149	lem_update_stats_counters(adapter);
2150
2151	if (lem_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2152		lem_print_hw_stats(adapter);
2153
2154	lem_smartspeed(adapter);
2155
2156	/*
2157	 * We check the watchdog: the time since
2158	 * the last TX descriptor was cleaned.
2159	 * This implies a functional TX engine.
2160	 */
2161	if ((adapter->watchdog_check == TRUE) &&
2162	    (ticks - adapter->watchdog_time > EM_WATCHDOG))
2163		goto hung;
2164
2165	callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2166	return;
2167hung:
2168	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2169	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2170	adapter->watchdog_events++;
2171	lem_init_locked(adapter);
2172}
2173
2174static void
2175lem_update_link_status(struct adapter *adapter)
2176{
2177	struct e1000_hw *hw = &adapter->hw;
2178	struct ifnet *ifp = adapter->ifp;
2179	device_t dev = adapter->dev;
2180	u32 link_check = 0;
2181
2182	/* Get the cached link value or read phy for real */
2183	switch (hw->phy.media_type) {
2184	case e1000_media_type_copper:
2185		if (hw->mac.get_link_status) {
2186			/* Do the work to read phy */
2187			e1000_check_for_link(hw);
2188			link_check = !hw->mac.get_link_status;
2189			if (link_check) /* ESB2 fix */
2190				e1000_cfg_on_link_up(hw);
2191		} else
2192			link_check = TRUE;
2193		break;
2194	case e1000_media_type_fiber:
2195		e1000_check_for_link(hw);
2196		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2197                                 E1000_STATUS_LU);
2198		break;
2199	case e1000_media_type_internal_serdes:
2200		e1000_check_for_link(hw);
2201		link_check = adapter->hw.mac.serdes_has_link;
2202		break;
2203	default:
2204	case e1000_media_type_unknown:
2205		break;
2206	}
2207
2208	/* Now check for a transition */
2209	if (link_check && (adapter->link_active == 0)) {
2210		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2211		    &adapter->link_duplex);
2212		if (bootverbose)
2213			device_printf(dev, "Link is up %d Mbps %s\n",
2214			    adapter->link_speed,
2215			    ((adapter->link_duplex == FULL_DUPLEX) ?
2216			    "Full Duplex" : "Half Duplex"));
2217		adapter->link_active = 1;
2218		adapter->smartspeed = 0;
2219		ifp->if_baudrate = adapter->link_speed * 1000000;
2220		if_link_state_change(ifp, LINK_STATE_UP);
2221	} else if (!link_check && (adapter->link_active == 1)) {
2222		ifp->if_baudrate = adapter->link_speed = 0;
2223		adapter->link_duplex = 0;
2224		if (bootverbose)
2225			device_printf(dev, "Link is Down\n");
2226		adapter->link_active = 0;
2227		/* Link down, disable watchdog */
2228		adapter->watchdog_check = FALSE;
2229		if_link_state_change(ifp, LINK_STATE_DOWN);
2230	}
2231}
2232
2233/*********************************************************************
2234 *
2235 *  This routine disables all traffic on the adapter by issuing a
2236 *  global reset on the MAC and deallocates TX/RX buffers.
2237 *
2238 *  This routine should always be called with BOTH the CORE
2239 *  and TX locks.
2240 **********************************************************************/
2241
2242static void
2243lem_stop(void *arg)
2244{
2245	struct adapter	*adapter = arg;
2246	struct ifnet	*ifp = adapter->ifp;
2247
2248	EM_CORE_LOCK_ASSERT(adapter);
2249	EM_TX_LOCK_ASSERT(adapter);
2250
2251	INIT_DEBUGOUT("lem_stop: begin");
2252
2253	lem_disable_intr(adapter);
2254	callout_stop(&adapter->timer);
2255	callout_stop(&adapter->tx_fifo_timer);
2256
2257	/* Tell the stack that the interface is no longer active */
2258	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2259
2260	e1000_reset_hw(&adapter->hw);
2261	if (adapter->hw.mac.type >= e1000_82544)
2262		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2263
2264	e1000_led_off(&adapter->hw);
2265	e1000_cleanup_led(&adapter->hw);
2266}
2267
2268
2269/*********************************************************************
2270 *
2271 *  Determine hardware revision.
2272 *
2273 **********************************************************************/
2274static void
2275lem_identify_hardware(struct adapter *adapter)
2276{
2277	device_t dev = adapter->dev;
2278
2279	/* Make sure our PCI config space has the necessary stuff set */
2280	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2281	if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2282	    (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2283		device_printf(dev, "Memory Access and/or Bus Master bits "
2284		    "were not set!\n");
2285		adapter->hw.bus.pci_cmd_word |=
2286		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2287		pci_write_config(dev, PCIR_COMMAND,
2288		    adapter->hw.bus.pci_cmd_word, 2);
2289	}
2290
2291	/* Save off the information about this board */
2292	adapter->hw.vendor_id = pci_get_vendor(dev);
2293	adapter->hw.device_id = pci_get_device(dev);
2294	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2295	adapter->hw.subsystem_vendor_id =
2296	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2297	adapter->hw.subsystem_device_id =
2298	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2299
2300	/* Do Shared Code Init and Setup */
2301	if (e1000_set_mac_type(&adapter->hw)) {
2302		device_printf(dev, "Setup init failure\n");
2303		return;
2304	}
2305}
2306
2307static int
2308lem_allocate_pci_resources(struct adapter *adapter)
2309{
2310	device_t	dev = adapter->dev;
2311	int		val, rid, error = E1000_SUCCESS;
2312
2313	rid = PCIR_BAR(0);
2314	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2315	    &rid, RF_ACTIVE);
2316	if (adapter->memory == NULL) {
2317		device_printf(dev, "Unable to allocate bus resource: memory\n");
2318		return (ENXIO);
2319	}
2320	adapter->osdep.mem_bus_space_tag =
2321	    rman_get_bustag(adapter->memory);
2322	adapter->osdep.mem_bus_space_handle =
2323	    rman_get_bushandle(adapter->memory);
2324	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2325
2326	/* Only older adapters use IO mapping */
2327	if (adapter->hw.mac.type > e1000_82543) {
2328		/* Figure our where our IO BAR is ? */
2329		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2330			val = pci_read_config(dev, rid, 4);
2331			if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2332				adapter->io_rid = rid;
2333				break;
2334			}
2335			rid += 4;
2336			/* check for 64bit BAR */
2337			if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2338				rid += 4;
2339		}
2340		if (rid >= PCIR_CIS) {
2341			device_printf(dev, "Unable to locate IO BAR\n");
2342			return (ENXIO);
2343		}
2344		adapter->ioport = bus_alloc_resource_any(dev,
2345		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2346		if (adapter->ioport == NULL) {
2347			device_printf(dev, "Unable to allocate bus resource: "
2348			    "ioport\n");
2349			return (ENXIO);
2350		}
2351		adapter->hw.io_base = 0;
2352		adapter->osdep.io_bus_space_tag =
2353		    rman_get_bustag(adapter->ioport);
2354		adapter->osdep.io_bus_space_handle =
2355		    rman_get_bushandle(adapter->ioport);
2356	}
2357
2358	adapter->hw.back = &adapter->osdep;
2359
2360	return (error);
2361}
2362
2363/*********************************************************************
2364 *
2365 *  Setup the Legacy or MSI Interrupt handler
2366 *
2367 **********************************************************************/
2368int
2369lem_allocate_irq(struct adapter *adapter)
2370{
2371	device_t dev = adapter->dev;
2372	int error, rid = 0;
2373
2374	/* Manually turn off all interrupts */
2375	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2376
2377	/* We allocate a single interrupt resource */
2378	adapter->res[0] = bus_alloc_resource_any(dev,
2379	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2380	if (adapter->res[0] == NULL) {
2381		device_printf(dev, "Unable to allocate bus resource: "
2382		    "interrupt\n");
2383		return (ENXIO);
2384	}
2385
2386#ifdef EM_LEGACY_IRQ
2387	/* We do Legacy setup */
2388	if ((error = bus_setup_intr(dev, adapter->res[0],
2389#if __FreeBSD_version > 700000
2390	    INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2391#else /* 6.X */
2392	    INTR_TYPE_NET | INTR_MPSAFE, lem_intr, adapter,
2393#endif
2394	    &adapter->tag[0])) != 0) {
2395		device_printf(dev, "Failed to register interrupt handler");
2396		return (error);
2397	}
2398
2399#else /* FAST_IRQ */
2400	/*
2401	 * Try allocating a fast interrupt and the associated deferred
2402	 * processing contexts.
2403	 */
2404	TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2405	TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2406	adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2407	    taskqueue_thread_enqueue, &adapter->tq);
2408	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2409	    device_get_nameunit(adapter->dev));
2410#if __FreeBSD_version < 700000
2411	if ((error = bus_setup_intr(dev, adapter->res[0],
2412	    INTR_TYPE_NET | INTR_FAST, lem_irq_fast, adapter,
2413#else
2414	if ((error = bus_setup_intr(dev, adapter->res[0],
2415	    INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2416#endif
2417	    &adapter->tag[0])) != 0) {
2418		device_printf(dev, "Failed to register fast interrupt "
2419			    "handler: %d\n", error);
2420		taskqueue_free(adapter->tq);
2421		adapter->tq = NULL;
2422		return (error);
2423	}
2424#endif  /* EM_LEGACY_IRQ */
2425
2426	return (0);
2427}
2428
2429
2430static void
2431lem_free_pci_resources(struct adapter *adapter)
2432{
2433	device_t dev = adapter->dev;
2434
2435
2436	if (adapter->tag[0] != NULL) {
2437		bus_teardown_intr(dev, adapter->res[0],
2438		    adapter->tag[0]);
2439		adapter->tag[0] = NULL;
2440	}
2441
2442	if (adapter->res[0] != NULL) {
2443		bus_release_resource(dev, SYS_RES_IRQ,
2444		    0, adapter->res[0]);
2445	}
2446
2447	if (adapter->memory != NULL)
2448		bus_release_resource(dev, SYS_RES_MEMORY,
2449		    PCIR_BAR(0), adapter->memory);
2450
2451	if (adapter->ioport != NULL)
2452		bus_release_resource(dev, SYS_RES_IOPORT,
2453		    adapter->io_rid, adapter->ioport);
2454}
2455
2456
2457/*********************************************************************
2458 *
2459 *  Initialize the hardware to a configuration
2460 *  as specified by the adapter structure.
2461 *
2462 **********************************************************************/
2463static int
2464lem_hardware_init(struct adapter *adapter)
2465{
2466	device_t dev = adapter->dev;
2467	u16 	rx_buffer_size;
2468
2469	INIT_DEBUGOUT("lem_hardware_init: begin");
2470
2471	/* Issue a global reset */
2472	e1000_reset_hw(&adapter->hw);
2473
2474	/* When hardware is reset, fifo_head is also reset */
2475	adapter->tx_fifo_head = 0;
2476
2477	/*
2478	 * These parameters control the automatic generation (Tx) and
2479	 * response (Rx) to Ethernet PAUSE frames.
2480	 * - High water mark should allow for at least two frames to be
2481	 *   received after sending an XOFF.
2482	 * - Low water mark works best when it is very near the high water mark.
2483	 *   This allows the receiver to restart by sending XON when it has
2484	 *   drained a bit. Here we use an arbitary value of 1500 which will
2485	 *   restart after one full frame is pulled from the buffer. There
2486	 *   could be several smaller frames in the buffer and if so they will
2487	 *   not trigger the XON until their total number reduces the buffer
2488	 *   by 1500.
2489	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2490	 */
2491	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2492	    0xffff) << 10 );
2493
2494	adapter->hw.fc.high_water = rx_buffer_size -
2495	    roundup2(adapter->max_frame_size, 1024);
2496	adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2497
2498	adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2499	adapter->hw.fc.send_xon = TRUE;
2500
2501        /* Set Flow control, use the tunable location if sane */
2502        if ((lem_fc_setting >= 0) || (lem_fc_setting < 4))
2503                adapter->hw.fc.requested_mode = lem_fc_setting;
2504        else
2505                adapter->hw.fc.requested_mode = e1000_fc_none;
2506
2507	if (e1000_init_hw(&adapter->hw) < 0) {
2508		device_printf(dev, "Hardware Initialization Failed\n");
2509		return (EIO);
2510	}
2511
2512	e1000_check_for_link(&adapter->hw);
2513
2514	return (0);
2515}
2516
2517/*********************************************************************
2518 *
2519 *  Setup networking device structure and register an interface.
2520 *
2521 **********************************************************************/
2522static void
2523lem_setup_interface(device_t dev, struct adapter *adapter)
2524{
2525	struct ifnet   *ifp;
2526
2527	INIT_DEBUGOUT("lem_setup_interface: begin");
2528
2529	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2530	if (ifp == NULL)
2531		panic("%s: can not if_alloc()", device_get_nameunit(dev));
2532	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2533	ifp->if_mtu = ETHERMTU;
2534	ifp->if_init =  lem_init;
2535	ifp->if_softc = adapter;
2536	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2537	ifp->if_ioctl = lem_ioctl;
2538	ifp->if_start = lem_start;
2539	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2540	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2541	IFQ_SET_READY(&ifp->if_snd);
2542
2543	ether_ifattach(ifp, adapter->hw.mac.addr);
2544
2545	ifp->if_capabilities = ifp->if_capenable = 0;
2546
2547#if __FreeBSD_version >= 800000
2548	/* Multiqueue tx functions */
2549	ifp->if_transmit = lem_mq_start;
2550	ifp->if_qflush = lem_qflush;
2551	adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
2552#endif
2553	if (adapter->hw.mac.type >= e1000_82543) {
2554		int version_cap;
2555#if __FreeBSD_version < 700000
2556		version_cap = IFCAP_HWCSUM;
2557#else
2558		version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2559#endif
2560		ifp->if_capabilities |= version_cap;
2561		ifp->if_capenable |= version_cap;
2562	}
2563
2564	/*
2565	 * Tell the upper layer(s) we support long frames.
2566	 */
2567	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2568	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2569	ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2570
2571#ifdef DEVICE_POLLING
2572	ifp->if_capabilities |= IFCAP_POLLING;
2573#endif
2574
2575	/* Enable All WOL methods by default */
2576	if (adapter->wol) {
2577		ifp->if_capabilities |= IFCAP_WOL;
2578		ifp->if_capenable |= IFCAP_WOL;
2579	}
2580
2581	/*
2582	 * Specify the media types supported by this adapter and register
2583	 * callbacks to update media and link information
2584	 */
2585	ifmedia_init(&adapter->media, IFM_IMASK,
2586	    lem_media_change, lem_media_status);
2587	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2588	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2589		u_char fiber_type = IFM_1000_SX;	/* default type */
2590
2591		if (adapter->hw.mac.type == e1000_82545)
2592			fiber_type = IFM_1000_LX;
2593		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2594			    0, NULL);
2595		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2596	} else {
2597		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2598		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2599			    0, NULL);
2600		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2601			    0, NULL);
2602		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2603			    0, NULL);
2604		if (adapter->hw.phy.type != e1000_phy_ife) {
2605			ifmedia_add(&adapter->media,
2606				IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2607			ifmedia_add(&adapter->media,
2608				IFM_ETHER | IFM_1000_T, 0, NULL);
2609		}
2610	}
2611	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2612	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2613}
2614
2615
2616/*********************************************************************
2617 *
2618 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2619 *
2620 **********************************************************************/
2621static void
2622lem_smartspeed(struct adapter *adapter)
2623{
2624	u16 phy_tmp;
2625
2626	if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2627	    adapter->hw.mac.autoneg == 0 ||
2628	    (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2629		return;
2630
2631	if (adapter->smartspeed == 0) {
2632		/* If Master/Slave config fault is asserted twice,
2633		 * we assume back-to-back */
2634		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2635		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2636			return;
2637		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2638		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2639			e1000_read_phy_reg(&adapter->hw,
2640			    PHY_1000T_CTRL, &phy_tmp);
2641			if(phy_tmp & CR_1000T_MS_ENABLE) {
2642				phy_tmp &= ~CR_1000T_MS_ENABLE;
2643				e1000_write_phy_reg(&adapter->hw,
2644				    PHY_1000T_CTRL, phy_tmp);
2645				adapter->smartspeed++;
2646				if(adapter->hw.mac.autoneg &&
2647				   !e1000_copper_link_autoneg(&adapter->hw) &&
2648				   !e1000_read_phy_reg(&adapter->hw,
2649				    PHY_CONTROL, &phy_tmp)) {
2650					phy_tmp |= (MII_CR_AUTO_NEG_EN |
2651						    MII_CR_RESTART_AUTO_NEG);
2652					e1000_write_phy_reg(&adapter->hw,
2653					    PHY_CONTROL, phy_tmp);
2654				}
2655			}
2656		}
2657		return;
2658	} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2659		/* If still no link, perhaps using 2/3 pair cable */
2660		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2661		phy_tmp |= CR_1000T_MS_ENABLE;
2662		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2663		if(adapter->hw.mac.autoneg &&
2664		   !e1000_copper_link_autoneg(&adapter->hw) &&
2665		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2666			phy_tmp |= (MII_CR_AUTO_NEG_EN |
2667				    MII_CR_RESTART_AUTO_NEG);
2668			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2669		}
2670	}
2671	/* Restart process after EM_SMARTSPEED_MAX iterations */
2672	if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2673		adapter->smartspeed = 0;
2674}
2675
2676
2677/*
2678 * Manage DMA'able memory.
2679 */
2680static void
2681lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2682{
2683	if (error)
2684		return;
2685	*(bus_addr_t *) arg = segs[0].ds_addr;
2686}
2687
2688static int
2689lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2690        struct em_dma_alloc *dma, int mapflags)
2691{
2692	int error;
2693
2694#if __FreeBSD_version >= 700000
2695	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2696#else
2697	error = bus_dma_tag_create(NULL,		 /* parent */
2698#endif
2699				EM_DBA_ALIGN, 0,	/* alignment, bounds */
2700				BUS_SPACE_MAXADDR,	/* lowaddr */
2701				BUS_SPACE_MAXADDR,	/* highaddr */
2702				NULL, NULL,		/* filter, filterarg */
2703				size,			/* maxsize */
2704				1,			/* nsegments */
2705				size,			/* maxsegsize */
2706				0,			/* flags */
2707				NULL,			/* lockfunc */
2708				NULL,			/* lockarg */
2709				&dma->dma_tag);
2710	if (error) {
2711		device_printf(adapter->dev,
2712		    "%s: bus_dma_tag_create failed: %d\n",
2713		    __func__, error);
2714		goto fail_0;
2715	}
2716
2717	error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2718	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2719	if (error) {
2720		device_printf(adapter->dev,
2721		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2722		    __func__, (uintmax_t)size, error);
2723		goto fail_2;
2724	}
2725
2726	dma->dma_paddr = 0;
2727	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2728	    size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2729	if (error || dma->dma_paddr == 0) {
2730		device_printf(adapter->dev,
2731		    "%s: bus_dmamap_load failed: %d\n",
2732		    __func__, error);
2733		goto fail_3;
2734	}
2735
2736	return (0);
2737
2738fail_3:
2739	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2740fail_2:
2741	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2742	bus_dma_tag_destroy(dma->dma_tag);
2743fail_0:
2744	dma->dma_map = NULL;
2745	dma->dma_tag = NULL;
2746
2747	return (error);
2748}
2749
2750static void
2751lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2752{
2753	if (dma->dma_tag == NULL)
2754		return;
2755	if (dma->dma_map != NULL) {
2756		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2757		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2758		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2759		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2760		dma->dma_map = NULL;
2761	}
2762	bus_dma_tag_destroy(dma->dma_tag);
2763	dma->dma_tag = NULL;
2764}
2765
2766
2767/*********************************************************************
2768 *
2769 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2770 *  the information needed to transmit a packet on the wire.
2771 *
2772 **********************************************************************/
2773static int
2774lem_allocate_transmit_structures(struct adapter *adapter)
2775{
2776	device_t dev = adapter->dev;
2777	struct em_buffer *tx_buffer;
2778	int error;
2779
2780	/*
2781	 * Create DMA tags for tx descriptors
2782	 */
2783#if __FreeBSD_version >= 700000
2784	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2785#else
2786	if ((error = bus_dma_tag_create(NULL,		 /* parent */
2787#endif
2788				1, 0,			/* alignment, bounds */
2789				BUS_SPACE_MAXADDR,	/* lowaddr */
2790				BUS_SPACE_MAXADDR,	/* highaddr */
2791				NULL, NULL,		/* filter, filterarg */
2792				EM_TSO_SIZE,		/* maxsize */
2793				EM_MAX_SCATTER,		/* nsegments */
2794				EM_TSO_SEG_SIZE,	/* maxsegsize */
2795				0,			/* flags */
2796				NULL,		/* lockfunc */
2797				NULL,		/* lockarg */
2798				&adapter->txtag)) != 0) {
2799		device_printf(dev, "Unable to allocate TX DMA tag\n");
2800		goto fail;
2801	}
2802
2803	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2804	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2805	if (adapter->tx_buffer_area == NULL) {
2806		device_printf(dev, "Unable to allocate tx_buffer memory\n");
2807		error = ENOMEM;
2808		goto fail;
2809	}
2810
2811	/* Create the descriptor buffer dma maps */
2812	for (int i = 0; i < adapter->num_tx_desc; i++) {
2813		tx_buffer = &adapter->tx_buffer_area[i];
2814		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2815		if (error != 0) {
2816			device_printf(dev, "Unable to create TX DMA map\n");
2817			goto fail;
2818		}
2819		tx_buffer->next_eop = -1;
2820	}
2821
2822	return (0);
2823fail:
2824	lem_free_transmit_structures(adapter);
2825	return (error);
2826}
2827
2828/*********************************************************************
2829 *
2830 *  (Re)Initialize transmit structures.
2831 *
2832 **********************************************************************/
2833static void
2834lem_setup_transmit_structures(struct adapter *adapter)
2835{
2836	struct em_buffer *tx_buffer;
2837
2838	/* Clear the old ring contents */
2839	bzero(adapter->tx_desc_base,
2840	    (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2841
2842	/* Free any existing TX buffers */
2843	for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2844		tx_buffer = &adapter->tx_buffer_area[i];
2845		bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2846		    BUS_DMASYNC_POSTWRITE);
2847		bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2848		m_freem(tx_buffer->m_head);
2849		tx_buffer->m_head = NULL;
2850		tx_buffer->next_eop = -1;
2851	}
2852
2853	/* Reset state */
2854	adapter->next_avail_tx_desc = 0;
2855	adapter->next_tx_to_clean = 0;
2856	adapter->num_tx_desc_avail = adapter->num_tx_desc;
2857
2858	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2859	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2860
2861	return;
2862}
2863
2864/*********************************************************************
2865 *
2866 *  Enable transmit unit.
2867 *
2868 **********************************************************************/
2869static void
2870lem_initialize_transmit_unit(struct adapter *adapter)
2871{
2872	u32	tctl, tipg = 0;
2873	u64	bus_addr;
2874
2875	 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2876	/* Setup the Base and Length of the Tx Descriptor Ring */
2877	bus_addr = adapter->txdma.dma_paddr;
2878	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2879	    adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2880	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2881	    (u32)(bus_addr >> 32));
2882	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2883	    (u32)bus_addr);
2884	/* Setup the HW Tx Head and Tail descriptor pointers */
2885	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2886	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2887
2888	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2889	    E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2890	    E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2891
2892	/* Set the default values for the Tx Inter Packet Gap timer */
2893	switch (adapter->hw.mac.type) {
2894	case e1000_82542:
2895		tipg = DEFAULT_82542_TIPG_IPGT;
2896		tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2897		tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2898		break;
2899	default:
2900		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2901		    (adapter->hw.phy.media_type ==
2902		    e1000_media_type_internal_serdes))
2903			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2904		else
2905			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2906		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2907		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2908	}
2909
2910	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2911	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2912	if(adapter->hw.mac.type >= e1000_82540)
2913		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2914		    adapter->tx_abs_int_delay.value);
2915
2916	/* Program the Transmit Control Register */
2917	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2918	tctl &= ~E1000_TCTL_CT;
2919	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2920		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2921
2922	/* This write will effectively turn on the transmit unit. */
2923	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2924
2925	/* Setup Transmit Descriptor Base Settings */
2926	adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2927
2928	if (adapter->tx_int_delay.value > 0)
2929		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2930}
2931
2932/*********************************************************************
2933 *
2934 *  Free all transmit related data structures.
2935 *
2936 **********************************************************************/
2937static void
2938lem_free_transmit_structures(struct adapter *adapter)
2939{
2940	struct em_buffer *tx_buffer;
2941
2942	INIT_DEBUGOUT("free_transmit_structures: begin");
2943
2944	if (adapter->tx_buffer_area != NULL) {
2945		for (int i = 0; i < adapter->num_tx_desc; i++) {
2946			tx_buffer = &adapter->tx_buffer_area[i];
2947			if (tx_buffer->m_head != NULL) {
2948				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2949				    BUS_DMASYNC_POSTWRITE);
2950				bus_dmamap_unload(adapter->txtag,
2951				    tx_buffer->map);
2952				m_freem(tx_buffer->m_head);
2953				tx_buffer->m_head = NULL;
2954			} else if (tx_buffer->map != NULL)
2955				bus_dmamap_unload(adapter->txtag,
2956				    tx_buffer->map);
2957			if (tx_buffer->map != NULL) {
2958				bus_dmamap_destroy(adapter->txtag,
2959				    tx_buffer->map);
2960				tx_buffer->map = NULL;
2961			}
2962		}
2963	}
2964	if (adapter->tx_buffer_area != NULL) {
2965		free(adapter->tx_buffer_area, M_DEVBUF);
2966		adapter->tx_buffer_area = NULL;
2967	}
2968	if (adapter->txtag != NULL) {
2969		bus_dma_tag_destroy(adapter->txtag);
2970		adapter->txtag = NULL;
2971	}
2972#if __FreeBSD_version >= 800000
2973	if (adapter->br != NULL)
2974        	buf_ring_free(adapter->br, M_DEVBUF);
2975#endif
2976}
2977
2978/*********************************************************************
2979 *
2980 *  The offload context needs to be set when we transfer the first
2981 *  packet of a particular protocol (TCP/UDP). This routine has been
2982 *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2983 *
2984 *  Added back the old method of keeping the current context type
2985 *  and not setting if unnecessary, as this is reported to be a
2986 *  big performance win.  -jfv
2987 **********************************************************************/
2988static void
2989lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2990    u32 *txd_upper, u32 *txd_lower)
2991{
2992	struct e1000_context_desc *TXD = NULL;
2993	struct em_buffer *tx_buffer;
2994	struct ether_vlan_header *eh;
2995	struct ip *ip = NULL;
2996	struct ip6_hdr *ip6;
2997	int curr_txd, ehdrlen;
2998	u32 cmd, hdr_len, ip_hlen;
2999	u16 etype;
3000	u8 ipproto;
3001
3002
3003	cmd = hdr_len = ipproto = 0;
3004	curr_txd = adapter->next_avail_tx_desc;
3005
3006	/*
3007	 * Determine where frame payload starts.
3008	 * Jump over vlan headers if already present,
3009	 * helpful for QinQ too.
3010	 */
3011	eh = mtod(mp, struct ether_vlan_header *);
3012	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3013		etype = ntohs(eh->evl_proto);
3014		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3015	} else {
3016		etype = ntohs(eh->evl_encap_proto);
3017		ehdrlen = ETHER_HDR_LEN;
3018	}
3019
3020	/*
3021	 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3022	 * TODO: Support SCTP too when it hits the tree.
3023	 */
3024	switch (etype) {
3025	case ETHERTYPE_IP:
3026		ip = (struct ip *)(mp->m_data + ehdrlen);
3027		ip_hlen = ip->ip_hl << 2;
3028
3029		/* Setup of IP header checksum. */
3030		if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3031			/*
3032			 * Start offset for header checksum calculation.
3033			 * End offset for header checksum calculation.
3034			 * Offset of place to put the checksum.
3035			 */
3036			TXD = (struct e1000_context_desc *)
3037			    &adapter->tx_desc_base[curr_txd];
3038			TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3039			TXD->lower_setup.ip_fields.ipcse =
3040			    htole16(ehdrlen + ip_hlen);
3041			TXD->lower_setup.ip_fields.ipcso =
3042			    ehdrlen + offsetof(struct ip, ip_sum);
3043			cmd |= E1000_TXD_CMD_IP;
3044			*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3045		}
3046
3047		if (mp->m_len < ehdrlen + ip_hlen)
3048			return;	/* failure */
3049
3050		hdr_len = ehdrlen + ip_hlen;
3051		ipproto = ip->ip_p;
3052
3053		break;
3054	case ETHERTYPE_IPV6:
3055		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3056		ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3057
3058		if (mp->m_len < ehdrlen + ip_hlen)
3059			return;	/* failure */
3060
3061		/* IPv6 doesn't have a header checksum. */
3062
3063		hdr_len = ehdrlen + ip_hlen;
3064		ipproto = ip6->ip6_nxt;
3065
3066		break;
3067	default:
3068		*txd_upper = 0;
3069		*txd_lower = 0;
3070		return;
3071	}
3072
3073	switch (ipproto) {
3074	case IPPROTO_TCP:
3075		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3076			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3077			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3078			/* no need for context if already set */
3079			if (adapter->last_hw_offload == CSUM_TCP)
3080				return;
3081			adapter->last_hw_offload = CSUM_TCP;
3082			/*
3083			 * Start offset for payload checksum calculation.
3084			 * End offset for payload checksum calculation.
3085			 * Offset of place to put the checksum.
3086			 */
3087			TXD = (struct e1000_context_desc *)
3088			    &adapter->tx_desc_base[curr_txd];
3089			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3090			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3091			TXD->upper_setup.tcp_fields.tucso =
3092			    hdr_len + offsetof(struct tcphdr, th_sum);
3093			cmd |= E1000_TXD_CMD_TCP;
3094		}
3095		break;
3096	case IPPROTO_UDP:
3097	{
3098		if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3099			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3100			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3101			/* no need for context if already set */
3102			if (adapter->last_hw_offload == CSUM_UDP)
3103				return;
3104			adapter->last_hw_offload = CSUM_UDP;
3105			/*
3106			 * Start offset for header checksum calculation.
3107			 * End offset for header checksum calculation.
3108			 * Offset of place to put the checksum.
3109			 */
3110			TXD = (struct e1000_context_desc *)
3111			    &adapter->tx_desc_base[curr_txd];
3112			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3113			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3114			TXD->upper_setup.tcp_fields.tucso =
3115			    hdr_len + offsetof(struct udphdr, uh_sum);
3116		}
3117		/* Fall Thru */
3118	}
3119	default:
3120		break;
3121	}
3122
3123	TXD->tcp_seg_setup.data = htole32(0);
3124	TXD->cmd_and_length =
3125	    htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3126	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3127	tx_buffer->m_head = NULL;
3128	tx_buffer->next_eop = -1;
3129
3130	if (++curr_txd == adapter->num_tx_desc)
3131		curr_txd = 0;
3132
3133	adapter->num_tx_desc_avail--;
3134	adapter->next_avail_tx_desc = curr_txd;
3135}
3136
3137
3138/**********************************************************************
3139 *
3140 *  Examine each tx_buffer in the used queue. If the hardware is done
3141 *  processing the packet then free associated resources. The
3142 *  tx_buffer is put back on the free queue.
3143 *
3144 **********************************************************************/
3145static void
3146lem_txeof(struct adapter *adapter)
3147{
3148        int first, last, done, num_avail;
3149        struct em_buffer *tx_buffer;
3150        struct e1000_tx_desc   *tx_desc, *eop_desc;
3151	struct ifnet   *ifp = adapter->ifp;
3152
3153	EM_TX_LOCK_ASSERT(adapter);
3154
3155        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3156                return;
3157
3158        num_avail = adapter->num_tx_desc_avail;
3159        first = adapter->next_tx_to_clean;
3160        tx_desc = &adapter->tx_desc_base[first];
3161        tx_buffer = &adapter->tx_buffer_area[first];
3162	last = tx_buffer->next_eop;
3163        eop_desc = &adapter->tx_desc_base[last];
3164
3165	/*
3166	 * What this does is get the index of the
3167	 * first descriptor AFTER the EOP of the
3168	 * first packet, that way we can do the
3169	 * simple comparison on the inner while loop.
3170	 */
3171	if (++last == adapter->num_tx_desc)
3172 		last = 0;
3173	done = last;
3174
3175        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3176            BUS_DMASYNC_POSTREAD);
3177
3178        while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3179		/* We clean the range of the packet */
3180		while (first != done) {
3181                	tx_desc->upper.data = 0;
3182                	tx_desc->lower.data = 0;
3183                	tx_desc->buffer_addr = 0;
3184                	++num_avail;
3185
3186			if (tx_buffer->m_head) {
3187				ifp->if_opackets++;
3188				bus_dmamap_sync(adapter->txtag,
3189				    tx_buffer->map,
3190				    BUS_DMASYNC_POSTWRITE);
3191				bus_dmamap_unload(adapter->txtag,
3192				    tx_buffer->map);
3193
3194                        	m_freem(tx_buffer->m_head);
3195                        	tx_buffer->m_head = NULL;
3196                	}
3197			tx_buffer->next_eop = -1;
3198			adapter->watchdog_time = ticks;
3199
3200	                if (++first == adapter->num_tx_desc)
3201				first = 0;
3202
3203	                tx_buffer = &adapter->tx_buffer_area[first];
3204			tx_desc = &adapter->tx_desc_base[first];
3205		}
3206		/* See if we can continue to the next packet */
3207		last = tx_buffer->next_eop;
3208		if (last != -1) {
3209        		eop_desc = &adapter->tx_desc_base[last];
3210			/* Get new done point */
3211			if (++last == adapter->num_tx_desc) last = 0;
3212			done = last;
3213		} else
3214			break;
3215        }
3216        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3217            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3218
3219        adapter->next_tx_to_clean = first;
3220
3221        /*
3222         * If we have enough room, clear IFF_DRV_OACTIVE to
3223         * tell the stack that it is OK to send packets.
3224         * If there are no pending descriptors, clear the watchdog.
3225         */
3226        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3227                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3228                if (num_avail == adapter->num_tx_desc) {
3229			adapter->watchdog_check = FALSE;
3230        		adapter->num_tx_desc_avail = num_avail;
3231			return;
3232		}
3233        }
3234
3235        adapter->num_tx_desc_avail = num_avail;
3236	return;
3237}
3238
3239/*********************************************************************
3240 *
3241 *  When Link is lost sometimes there is work still in the TX ring
3242 *  which may result in a watchdog, rather than allow that we do an
3243 *  attempted cleanup and then reinit here. Note that this has been
3244 *  seens mostly with fiber adapters.
3245 *
3246 **********************************************************************/
3247static void
3248lem_tx_purge(struct adapter *adapter)
3249{
3250	if ((!adapter->link_active) && (adapter->watchdog_check)) {
3251		EM_TX_LOCK(adapter);
3252		lem_txeof(adapter);
3253		EM_TX_UNLOCK(adapter);
3254		if (adapter->watchdog_check) /* Still outstanding? */
3255			lem_init_locked(adapter);
3256	}
3257}
3258
3259/*********************************************************************
3260 *
3261 *  Get a buffer from system mbuf buffer pool.
3262 *
3263 **********************************************************************/
3264static int
3265lem_get_buf(struct adapter *adapter, int i)
3266{
3267	struct mbuf		*m;
3268	bus_dma_segment_t	segs[1];
3269	bus_dmamap_t		map;
3270	struct em_buffer	*rx_buffer;
3271	int			error, nsegs;
3272
3273	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3274	if (m == NULL) {
3275		adapter->mbuf_cluster_failed++;
3276		return (ENOBUFS);
3277	}
3278	m->m_len = m->m_pkthdr.len = MCLBYTES;
3279
3280	if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3281		m_adj(m, ETHER_ALIGN);
3282
3283	/*
3284	 * Using memory from the mbuf cluster pool, invoke the
3285	 * bus_dma machinery to arrange the memory mapping.
3286	 */
3287	error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3288	    adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3289	if (error != 0) {
3290		m_free(m);
3291		return (error);
3292	}
3293
3294	/* If nsegs is wrong then the stack is corrupt. */
3295	KASSERT(nsegs == 1, ("Too many segments returned!"));
3296
3297	rx_buffer = &adapter->rx_buffer_area[i];
3298	if (rx_buffer->m_head != NULL)
3299		bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3300
3301	map = rx_buffer->map;
3302	rx_buffer->map = adapter->rx_sparemap;
3303	adapter->rx_sparemap = map;
3304	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3305	rx_buffer->m_head = m;
3306
3307	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3308	return (0);
3309}
3310
3311/*********************************************************************
3312 *
3313 *  Allocate memory for rx_buffer structures. Since we use one
3314 *  rx_buffer per received packet, the maximum number of rx_buffer's
3315 *  that we'll need is equal to the number of receive descriptors
3316 *  that we've allocated.
3317 *
3318 **********************************************************************/
3319static int
3320lem_allocate_receive_structures(struct adapter *adapter)
3321{
3322	device_t dev = adapter->dev;
3323	struct em_buffer *rx_buffer;
3324	int i, error;
3325
3326	adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3327	    adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3328	if (adapter->rx_buffer_area == NULL) {
3329		device_printf(dev, "Unable to allocate rx_buffer memory\n");
3330		return (ENOMEM);
3331	}
3332
3333#if __FreeBSD_version >= 700000
3334	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3335#else
3336	error = bus_dma_tag_create(NULL,		 /* parent */
3337#endif
3338				1, 0,			/* alignment, bounds */
3339				BUS_SPACE_MAXADDR,	/* lowaddr */
3340				BUS_SPACE_MAXADDR,	/* highaddr */
3341				NULL, NULL,		/* filter, filterarg */
3342				MCLBYTES,		/* maxsize */
3343				1,			/* nsegments */
3344				MCLBYTES,		/* maxsegsize */
3345				0,			/* flags */
3346				NULL,			/* lockfunc */
3347				NULL,			/* lockarg */
3348				&adapter->rxtag);
3349	if (error) {
3350		device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3351		    __func__, error);
3352		goto fail;
3353	}
3354
3355	/* Create the spare map (used by getbuf) */
3356	error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3357	     &adapter->rx_sparemap);
3358	if (error) {
3359		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3360		    __func__, error);
3361		goto fail;
3362	}
3363
3364	rx_buffer = adapter->rx_buffer_area;
3365	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3366		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3367		    &rx_buffer->map);
3368		if (error) {
3369			device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3370			    __func__, error);
3371			goto fail;
3372		}
3373	}
3374
3375	return (0);
3376
3377fail:
3378	lem_free_receive_structures(adapter);
3379	return (error);
3380}
3381
3382/*********************************************************************
3383 *
3384 *  (Re)initialize receive structures.
3385 *
3386 **********************************************************************/
3387static int
3388lem_setup_receive_structures(struct adapter *adapter)
3389{
3390	struct em_buffer *rx_buffer;
3391	int i, error;
3392
3393	/* Reset descriptor ring */
3394	bzero(adapter->rx_desc_base,
3395	    (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3396
3397	/* Free current RX buffers. */
3398	rx_buffer = adapter->rx_buffer_area;
3399	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3400		if (rx_buffer->m_head != NULL) {
3401			bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3402			    BUS_DMASYNC_POSTREAD);
3403			bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3404			m_freem(rx_buffer->m_head);
3405			rx_buffer->m_head = NULL;
3406		}
3407        }
3408
3409	/* Allocate new ones. */
3410	for (i = 0; i < adapter->num_rx_desc; i++) {
3411		error = lem_get_buf(adapter, i);
3412		if (error)
3413                        return (error);
3414	}
3415
3416	/* Setup our descriptor pointers */
3417	adapter->next_rx_desc_to_check = 0;
3418	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3419	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3420
3421	return (0);
3422}
3423
3424/*********************************************************************
3425 *
3426 *  Enable receive unit.
3427 *
3428 **********************************************************************/
3429#define MAX_INTS_PER_SEC	8000
3430#define DEFAULT_ITR	     1000000000/(MAX_INTS_PER_SEC * 256)
3431
3432static void
3433lem_initialize_receive_unit(struct adapter *adapter)
3434{
3435	struct ifnet	*ifp = adapter->ifp;
3436	u64	bus_addr;
3437	u32	rctl, rxcsum;
3438
3439	INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3440
3441	/*
3442	 * Make sure receives are disabled while setting
3443	 * up the descriptor ring
3444	 */
3445	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3446	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3447
3448	if (adapter->hw.mac.type >= e1000_82540) {
3449		E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3450		    adapter->rx_abs_int_delay.value);
3451		/*
3452		 * Set the interrupt throttling rate. Value is calculated
3453		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3454		 */
3455		E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3456	}
3457
3458	/*
3459	** When using MSIX interrupts we need to throttle
3460	** using the EITR register (82574 only)
3461	*/
3462	if (adapter->msix)
3463		for (int i = 0; i < 4; i++)
3464			E1000_WRITE_REG(&adapter->hw,
3465			    E1000_EITR_82574(i), DEFAULT_ITR);
3466
3467	/* Disable accelerated ackknowledge */
3468	if (adapter->hw.mac.type == e1000_82574)
3469		E1000_WRITE_REG(&adapter->hw,
3470		    E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3471
3472	/* Setup the Base and Length of the Rx Descriptor Ring */
3473	bus_addr = adapter->rxdma.dma_paddr;
3474	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3475	    adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3476	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3477	    (u32)(bus_addr >> 32));
3478	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3479	    (u32)bus_addr);
3480
3481	/* Setup the Receive Control Register */
3482	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3483	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3484		   E1000_RCTL_RDMTS_HALF |
3485		   (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3486
3487	/* Make sure VLAN Filters are off */
3488	rctl &= ~E1000_RCTL_VFE;
3489
3490	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3491		rctl |= E1000_RCTL_SBP;
3492	else
3493		rctl &= ~E1000_RCTL_SBP;
3494
3495	switch (adapter->rx_buffer_len) {
3496	default:
3497	case 2048:
3498		rctl |= E1000_RCTL_SZ_2048;
3499		break;
3500	case 4096:
3501		rctl |= E1000_RCTL_SZ_4096 |
3502		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
3503		break;
3504	case 8192:
3505		rctl |= E1000_RCTL_SZ_8192 |
3506		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
3507		break;
3508	case 16384:
3509		rctl |= E1000_RCTL_SZ_16384 |
3510		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
3511		break;
3512	}
3513
3514	if (ifp->if_mtu > ETHERMTU)
3515		rctl |= E1000_RCTL_LPE;
3516	else
3517		rctl &= ~E1000_RCTL_LPE;
3518
3519	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
3520	if ((adapter->hw.mac.type >= e1000_82543) &&
3521	    (ifp->if_capenable & IFCAP_RXCSUM)) {
3522		rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3523		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3524		E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3525	}
3526
3527	/* Enable Receives */
3528	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3529
3530	/*
3531	 * Setup the HW Rx Head and
3532	 * Tail Descriptor Pointers
3533	 */
3534	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3535	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3536
3537	return;
3538}
3539
3540/*********************************************************************
3541 *
3542 *  Free receive related data structures.
3543 *
3544 **********************************************************************/
3545static void
3546lem_free_receive_structures(struct adapter *adapter)
3547{
3548	struct em_buffer *rx_buffer;
3549	int i;
3550
3551	INIT_DEBUGOUT("free_receive_structures: begin");
3552
3553	if (adapter->rx_sparemap) {
3554		bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3555		adapter->rx_sparemap = NULL;
3556	}
3557
3558	/* Cleanup any existing buffers */
3559	if (adapter->rx_buffer_area != NULL) {
3560		rx_buffer = adapter->rx_buffer_area;
3561		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3562			if (rx_buffer->m_head != NULL) {
3563				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3564				    BUS_DMASYNC_POSTREAD);
3565				bus_dmamap_unload(adapter->rxtag,
3566				    rx_buffer->map);
3567				m_freem(rx_buffer->m_head);
3568				rx_buffer->m_head = NULL;
3569			} else if (rx_buffer->map != NULL)
3570				bus_dmamap_unload(adapter->rxtag,
3571				    rx_buffer->map);
3572			if (rx_buffer->map != NULL) {
3573				bus_dmamap_destroy(adapter->rxtag,
3574				    rx_buffer->map);
3575				rx_buffer->map = NULL;
3576			}
3577		}
3578	}
3579
3580	if (adapter->rx_buffer_area != NULL) {
3581		free(adapter->rx_buffer_area, M_DEVBUF);
3582		adapter->rx_buffer_area = NULL;
3583	}
3584
3585	if (adapter->rxtag != NULL) {
3586		bus_dma_tag_destroy(adapter->rxtag);
3587		adapter->rxtag = NULL;
3588	}
3589}
3590
3591/*********************************************************************
3592 *
3593 *  This routine executes in interrupt context. It replenishes
3594 *  the mbufs in the descriptor and sends data which has been
3595 *  dma'ed into host memory to upper layer.
3596 *
3597 *  We loop at most count times if count is > 0, or until done if
3598 *  count < 0.
3599 *
3600 *  For polling we also now return the number of cleaned packets
3601 *********************************************************************/
3602static int
3603lem_rxeof(struct adapter *adapter, int count)
3604{
3605	struct ifnet	*ifp = adapter->ifp;;
3606	struct mbuf	*mp;
3607	u8		status, accept_frame = 0, eop = 0;
3608	u16 		len, desc_len, prev_len_adj;
3609	int		i, rx_sent = 0;
3610	struct e1000_rx_desc   *current_desc;
3611
3612	EM_RX_LOCK(adapter);
3613	i = adapter->next_rx_desc_to_check;
3614	current_desc = &adapter->rx_desc_base[i];
3615	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3616	    BUS_DMASYNC_POSTREAD);
3617
3618	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3619		EM_RX_UNLOCK(adapter);
3620		return (rx_sent);
3621	}
3622
3623	while ((current_desc->status & E1000_RXD_STAT_DD) &&
3624	    (count != 0) &&
3625	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3626		struct mbuf *m = NULL;
3627
3628		mp = adapter->rx_buffer_area[i].m_head;
3629		/*
3630		 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3631		 * needs to access the last received byte in the mbuf.
3632		 */
3633		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3634		    BUS_DMASYNC_POSTREAD);
3635
3636		accept_frame = 1;
3637		prev_len_adj = 0;
3638		desc_len = le16toh(current_desc->length);
3639		status = current_desc->status;
3640		if (status & E1000_RXD_STAT_EOP) {
3641			count--;
3642			eop = 1;
3643			if (desc_len < ETHER_CRC_LEN) {
3644				len = 0;
3645				prev_len_adj = ETHER_CRC_LEN - desc_len;
3646			} else
3647				len = desc_len - ETHER_CRC_LEN;
3648		} else {
3649			eop = 0;
3650			len = desc_len;
3651		}
3652
3653		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3654			u8	last_byte;
3655			u32	pkt_len = desc_len;
3656
3657			if (adapter->fmp != NULL)
3658				pkt_len += adapter->fmp->m_pkthdr.len;
3659
3660			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3661			if (TBI_ACCEPT(&adapter->hw, status,
3662			    current_desc->errors, pkt_len, last_byte,
3663			    adapter->min_frame_size, adapter->max_frame_size)) {
3664				e1000_tbi_adjust_stats_82543(&adapter->hw,
3665				    &adapter->stats, pkt_len,
3666				    adapter->hw.mac.addr,
3667				    adapter->max_frame_size);
3668				if (len > 0)
3669					len--;
3670			} else
3671				accept_frame = 0;
3672		}
3673
3674		if (accept_frame) {
3675			if (lem_get_buf(adapter, i) != 0) {
3676				ifp->if_iqdrops++;
3677				goto discard;
3678			}
3679
3680			/* Assign correct length to the current fragment */
3681			mp->m_len = len;
3682
3683			if (adapter->fmp == NULL) {
3684				mp->m_pkthdr.len = len;
3685				adapter->fmp = mp; /* Store the first mbuf */
3686				adapter->lmp = mp;
3687			} else {
3688				/* Chain mbuf's together */
3689				mp->m_flags &= ~M_PKTHDR;
3690				/*
3691				 * Adjust length of previous mbuf in chain if
3692				 * we received less than 4 bytes in the last
3693				 * descriptor.
3694				 */
3695				if (prev_len_adj > 0) {
3696					adapter->lmp->m_len -= prev_len_adj;
3697					adapter->fmp->m_pkthdr.len -=
3698					    prev_len_adj;
3699				}
3700				adapter->lmp->m_next = mp;
3701				adapter->lmp = adapter->lmp->m_next;
3702				adapter->fmp->m_pkthdr.len += len;
3703			}
3704
3705			if (eop) {
3706				adapter->fmp->m_pkthdr.rcvif = ifp;
3707				ifp->if_ipackets++;
3708				lem_receive_checksum(adapter, current_desc,
3709				    adapter->fmp);
3710#ifndef __NO_STRICT_ALIGNMENT
3711				if (adapter->max_frame_size >
3712				    (MCLBYTES - ETHER_ALIGN) &&
3713				    lem_fixup_rx(adapter) != 0)
3714					goto skip;
3715#endif
3716				if (status & E1000_RXD_STAT_VP) {
3717#if __FreeBSD_version < 700000
3718					VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
3719					    (le16toh(current_desc->special) &
3720					    E1000_RXD_SPC_VLAN_MASK));
3721#else
3722					adapter->fmp->m_pkthdr.ether_vtag =
3723					    (le16toh(current_desc->special) &
3724					    E1000_RXD_SPC_VLAN_MASK);
3725					adapter->fmp->m_flags |= M_VLANTAG;
3726#endif
3727				}
3728#ifndef __NO_STRICT_ALIGNMENT
3729skip:
3730#endif
3731				m = adapter->fmp;
3732				adapter->fmp = NULL;
3733				adapter->lmp = NULL;
3734			}
3735		} else {
3736			ifp->if_ierrors++;
3737discard:
3738			/* Reuse loaded DMA map and just update mbuf chain */
3739			mp = adapter->rx_buffer_area[i].m_head;
3740			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3741			mp->m_data = mp->m_ext.ext_buf;
3742			mp->m_next = NULL;
3743			if (adapter->max_frame_size <=
3744			    (MCLBYTES - ETHER_ALIGN))
3745				m_adj(mp, ETHER_ALIGN);
3746			if (adapter->fmp != NULL) {
3747				m_freem(adapter->fmp);
3748				adapter->fmp = NULL;
3749				adapter->lmp = NULL;
3750			}
3751			m = NULL;
3752		}
3753
3754		/* Zero out the receive descriptors status. */
3755		current_desc->status = 0;
3756		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3757		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3758
3759		/* Advance our pointers to the next descriptor. */
3760		if (++i == adapter->num_rx_desc)
3761			i = 0;
3762		/* Call into the stack */
3763		if (m != NULL) {
3764			adapter->next_rx_desc_to_check = i;
3765			EM_RX_UNLOCK(adapter);
3766			(*ifp->if_input)(ifp, m);
3767			EM_RX_LOCK(adapter);
3768			rx_sent++;
3769			i = adapter->next_rx_desc_to_check;
3770		}
3771		current_desc = &adapter->rx_desc_base[i];
3772	}
3773	adapter->next_rx_desc_to_check = i;
3774
3775	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3776	if (--i < 0)
3777		i = adapter->num_rx_desc - 1;
3778	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3779	EM_RX_UNLOCK(adapter);
3780	return (rx_sent);
3781}
3782
3783#ifndef __NO_STRICT_ALIGNMENT
3784/*
3785 * When jumbo frames are enabled we should realign entire payload on
3786 * architecures with strict alignment. This is serious design mistake of 8254x
3787 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3788 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3789 * payload. On architecures without strict alignment restrictions 8254x still
3790 * performs unaligned memory access which would reduce the performance too.
3791 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3792 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3793 * existing mbuf chain.
3794 *
3795 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3796 * not used at all on architectures with strict alignment.
3797 */
3798static int
3799lem_fixup_rx(struct adapter *adapter)
3800{
3801	struct mbuf *m, *n;
3802	int error;
3803
3804	error = 0;
3805	m = adapter->fmp;
3806	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3807		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3808		m->m_data += ETHER_HDR_LEN;
3809	} else {
3810		MGETHDR(n, M_DONTWAIT, MT_DATA);
3811		if (n != NULL) {
3812			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3813			m->m_data += ETHER_HDR_LEN;
3814			m->m_len -= ETHER_HDR_LEN;
3815			n->m_len = ETHER_HDR_LEN;
3816			M_MOVE_PKTHDR(n, m);
3817			n->m_next = m;
3818			adapter->fmp = n;
3819		} else {
3820			adapter->dropped_pkts++;
3821			m_freem(adapter->fmp);
3822			adapter->fmp = NULL;
3823			error = ENOMEM;
3824		}
3825	}
3826
3827	return (error);
3828}
3829#endif
3830
3831/*********************************************************************
3832 *
3833 *  Verify that the hardware indicated that the checksum is valid.
3834 *  Inform the stack about the status of checksum so that stack
3835 *  doesn't spend time verifying the checksum.
3836 *
3837 *********************************************************************/
3838static void
3839lem_receive_checksum(struct adapter *adapter,
3840	    struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3841{
3842	/* 82543 or newer only */
3843	if ((adapter->hw.mac.type < e1000_82543) ||
3844	    /* Ignore Checksum bit is set */
3845	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3846		mp->m_pkthdr.csum_flags = 0;
3847		return;
3848	}
3849
3850	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3851		/* Did it pass? */
3852		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3853			/* IP Checksum Good */
3854			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3855			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3856
3857		} else {
3858			mp->m_pkthdr.csum_flags = 0;
3859		}
3860	}
3861
3862	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3863		/* Did it pass? */
3864		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3865			mp->m_pkthdr.csum_flags |=
3866			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3867			mp->m_pkthdr.csum_data = htons(0xffff);
3868		}
3869	}
3870}
3871
3872#if __FreeBSD_version >= 700029
3873/*
3874 * This routine is run via an vlan
3875 * config EVENT
3876 */
3877static void
3878lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3879{
3880	struct adapter	*adapter = ifp->if_softc;
3881	u32		index, bit;
3882
3883	if (ifp->if_softc !=  arg)   /* Not our event */
3884		return;
3885
3886	if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
3887                return;
3888
3889	index = (vtag >> 5) & 0x7F;
3890	bit = vtag & 0x1F;
3891	lem_shadow_vfta[index] |= (1 << bit);
3892	++adapter->num_vlans;
3893	/* Re-init to load the changes */
3894	lem_init(adapter);
3895}
3896
3897/*
3898 * This routine is run via an vlan
3899 * unconfig EVENT
3900 */
3901static void
3902lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3903{
3904	struct adapter	*adapter = ifp->if_softc;
3905	u32		index, bit;
3906
3907	if (ifp->if_softc !=  arg)
3908		return;
3909
3910	if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3911                return;
3912
3913	index = (vtag >> 5) & 0x7F;
3914	bit = vtag & 0x1F;
3915	lem_shadow_vfta[index] &= ~(1 << bit);
3916	--adapter->num_vlans;
3917	/* Re-init to load the changes */
3918	lem_init(adapter);
3919}
3920
3921static void
3922lem_setup_vlan_hw_support(struct adapter *adapter)
3923{
3924	struct e1000_hw *hw = &adapter->hw;
3925	u32             reg;
3926
3927	/*
3928	** We get here thru init_locked, meaning
3929	** a soft reset, this has already cleared
3930	** the VFTA and other state, so if there
3931	** have been no vlan's registered do nothing.
3932	*/
3933	if (adapter->num_vlans == 0)
3934                return;
3935
3936	/*
3937	** A soft reset zero's out the VFTA, so
3938	** we need to repopulate it now.
3939	*/
3940	for (int i = 0; i < EM_VFTA_SIZE; i++)
3941                if (lem_shadow_vfta[i] != 0)
3942			E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3943                            i, lem_shadow_vfta[i]);
3944
3945	reg = E1000_READ_REG(hw, E1000_CTRL);
3946	reg |= E1000_CTRL_VME;
3947	E1000_WRITE_REG(hw, E1000_CTRL, reg);
3948
3949	/* Enable the Filter Table */
3950	reg = E1000_READ_REG(hw, E1000_RCTL);
3951	reg &= ~E1000_RCTL_CFIEN;
3952	reg |= E1000_RCTL_VFE;
3953	E1000_WRITE_REG(hw, E1000_RCTL, reg);
3954
3955	/* Update the frame size */
3956	E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3957	    adapter->max_frame_size + VLAN_TAG_SIZE);
3958}
3959#endif
3960
3961static void
3962lem_enable_intr(struct adapter *adapter)
3963{
3964	struct e1000_hw *hw = &adapter->hw;
3965	u32 ims_mask = IMS_ENABLE_MASK;
3966
3967	if (adapter->msix) {
3968		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3969		ims_mask |= EM_MSIX_MASK;
3970	}
3971	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3972}
3973
3974static void
3975lem_disable_intr(struct adapter *adapter)
3976{
3977	struct e1000_hw *hw = &adapter->hw;
3978
3979	if (adapter->msix)
3980		E1000_WRITE_REG(hw, EM_EIAC, 0);
3981	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3982}
3983
3984/*
3985 * Bit of a misnomer, what this really means is
3986 * to enable OS management of the system... aka
3987 * to disable special hardware management features
3988 */
3989static void
3990lem_init_manageability(struct adapter *adapter)
3991{
3992	/* A shared code workaround */
3993	if (adapter->has_manage) {
3994		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3995		/* disable hardware interception of ARP */
3996		manc &= ~(E1000_MANC_ARP_EN);
3997		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3998	}
3999}
4000
4001/*
4002 * Give control back to hardware management
4003 * controller if there is one.
4004 */
4005static void
4006lem_release_manageability(struct adapter *adapter)
4007{
4008	if (adapter->has_manage) {
4009		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4010
4011		/* re-enable hardware interception of ARP */
4012		manc |= E1000_MANC_ARP_EN;
4013		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4014	}
4015}
4016
4017/*
4018 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4019 * For ASF and Pass Through versions of f/w this means
4020 * that the driver is loaded. For AMT version type f/w
4021 * this means that the network i/f is open.
4022 */
4023static void
4024lem_get_hw_control(struct adapter *adapter)
4025{
4026	u32 ctrl_ext;
4027
4028	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4029	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4030	    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4031	return;
4032}
4033
4034/*
4035 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4036 * For ASF and Pass Through versions of f/w this means that
4037 * the driver is no longer loaded. For AMT versions of the
4038 * f/w this means that the network i/f is closed.
4039 */
4040static void
4041lem_release_hw_control(struct adapter *adapter)
4042{
4043	u32 ctrl_ext;
4044
4045	if (!adapter->has_manage)
4046		return;
4047
4048	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4049	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4050	    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4051	return;
4052}
4053
4054static int
4055lem_is_valid_ether_addr(u8 *addr)
4056{
4057	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4058
4059	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4060		return (FALSE);
4061	}
4062
4063	return (TRUE);
4064}
4065
4066/*
4067** Parse the interface capabilities with regard
4068** to both system management and wake-on-lan for
4069** later use.
4070*/
4071static void
4072lem_get_wakeup(device_t dev)
4073{
4074	struct adapter	*adapter = device_get_softc(dev);
4075	u16		eeprom_data = 0, device_id, apme_mask;
4076
4077	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4078	apme_mask = EM_EEPROM_APME;
4079
4080	switch (adapter->hw.mac.type) {
4081	case e1000_82542:
4082	case e1000_82543:
4083		break;
4084	case e1000_82544:
4085		e1000_read_nvm(&adapter->hw,
4086		    NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4087		apme_mask = EM_82544_APME;
4088		break;
4089	case e1000_82546:
4090	case e1000_82546_rev_3:
4091		if (adapter->hw.bus.func == 1) {
4092			e1000_read_nvm(&adapter->hw,
4093			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4094			break;
4095		} else
4096			e1000_read_nvm(&adapter->hw,
4097			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4098		break;
4099	default:
4100		e1000_read_nvm(&adapter->hw,
4101		    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4102		break;
4103	}
4104	if (eeprom_data & apme_mask)
4105		adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4106	/*
4107         * We have the eeprom settings, now apply the special cases
4108         * where the eeprom may be wrong or the board won't support
4109         * wake on lan on a particular port
4110	 */
4111	device_id = pci_get_device(dev);
4112        switch (device_id) {
4113	case E1000_DEV_ID_82546GB_PCIE:
4114		adapter->wol = 0;
4115		break;
4116	case E1000_DEV_ID_82546EB_FIBER:
4117	case E1000_DEV_ID_82546GB_FIBER:
4118		/* Wake events only supported on port A for dual fiber
4119		 * regardless of eeprom setting */
4120		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4121		    E1000_STATUS_FUNC_1)
4122			adapter->wol = 0;
4123		break;
4124	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4125                /* if quad port adapter, disable WoL on all but port A */
4126		if (global_quad_port_a != 0)
4127			adapter->wol = 0;
4128		/* Reset for multiple quad port adapters */
4129		if (++global_quad_port_a == 4)
4130			global_quad_port_a = 0;
4131                break;
4132	}
4133	return;
4134}
4135
4136
4137/*
4138 * Enable PCI Wake On Lan capability
4139 */
4140static void
4141lem_enable_wakeup(device_t dev)
4142{
4143	struct adapter	*adapter = device_get_softc(dev);
4144	struct ifnet	*ifp = adapter->ifp;
4145	u32		pmc, ctrl, ctrl_ext, rctl;
4146	u16     	status;
4147
4148	if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
4149		return;
4150
4151	/* Advertise the wakeup capability */
4152	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4153	ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4154	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4155	E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4156
4157	/* Keep the laser running on Fiber adapters */
4158	if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4159	    adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4160		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4161		ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4162		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4163	}
4164
4165	/*
4166	** Determine type of Wakeup: note that wol
4167	** is set with all bits on by default.
4168	*/
4169	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4170		adapter->wol &= ~E1000_WUFC_MAG;
4171
4172	if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4173		adapter->wol &= ~E1000_WUFC_MC;
4174	else {
4175		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4176		rctl |= E1000_RCTL_MPE;
4177		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4178	}
4179
4180	if (adapter->hw.mac.type == e1000_pchlan) {
4181		if (lem_enable_phy_wakeup(adapter))
4182			return;
4183	} else {
4184		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4185		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4186	}
4187
4188
4189        /* Request PME */
4190        status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4191	status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4192	if (ifp->if_capenable & IFCAP_WOL)
4193		status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4194        pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4195
4196	return;
4197}
4198
4199/*
4200** WOL in the newer chipset interfaces (pchlan)
4201** require thing to be copied into the phy
4202*/
4203static int
4204lem_enable_phy_wakeup(struct adapter *adapter)
4205{
4206	struct e1000_hw *hw = &adapter->hw;
4207	u32 mreg, ret = 0;
4208	u16 preg;
4209
4210	/* copy MAC RARs to PHY RARs */
4211	for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4212		mreg = E1000_READ_REG(hw, E1000_RAL(i));
4213		e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4214		e1000_write_phy_reg(hw, BM_RAR_M(i),
4215		    (u16)((mreg >> 16) & 0xFFFF));
4216		mreg = E1000_READ_REG(hw, E1000_RAH(i));
4217		e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4218		e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4219		    (u16)((mreg >> 16) & 0xFFFF));
4220	}
4221
4222	/* copy MAC MTA to PHY MTA */
4223	for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4224		mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4225		e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4226		e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4227		    (u16)((mreg >> 16) & 0xFFFF));
4228	}
4229
4230	/* configure PHY Rx Control register */
4231	e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4232	mreg = E1000_READ_REG(hw, E1000_RCTL);
4233	if (mreg & E1000_RCTL_UPE)
4234		preg |= BM_RCTL_UPE;
4235	if (mreg & E1000_RCTL_MPE)
4236		preg |= BM_RCTL_MPE;
4237	preg &= ~(BM_RCTL_MO_MASK);
4238	if (mreg & E1000_RCTL_MO_3)
4239		preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4240				<< BM_RCTL_MO_SHIFT);
4241	if (mreg & E1000_RCTL_BAM)
4242		preg |= BM_RCTL_BAM;
4243	if (mreg & E1000_RCTL_PMCF)
4244		preg |= BM_RCTL_PMCF;
4245	mreg = E1000_READ_REG(hw, E1000_CTRL);
4246	if (mreg & E1000_CTRL_RFCE)
4247		preg |= BM_RCTL_RFCE;
4248	e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4249
4250	/* enable PHY wakeup in MAC register */
4251	E1000_WRITE_REG(hw, E1000_WUC,
4252	    E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4253	E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4254
4255	/* configure and enable PHY wakeup in PHY registers */
4256	e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4257	e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4258
4259	/* activate PHY wakeup */
4260	ret = hw->phy.ops.acquire(hw);
4261	if (ret) {
4262		printf("Could not acquire PHY\n");
4263		return ret;
4264	}
4265	e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4266	                         (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4267	ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4268	if (ret) {
4269		printf("Could not read PHY page 769\n");
4270		goto out;
4271	}
4272	preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4273	ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4274	if (ret)
4275		printf("Could not set PHY Host Wakeup bit\n");
4276out:
4277	hw->phy.ops.release(hw);
4278
4279	return ret;
4280}
4281
4282static void
4283lem_led_func(void *arg, int onoff)
4284{
4285	struct adapter	*adapter = arg;
4286
4287	EM_CORE_LOCK(adapter);
4288	if (onoff) {
4289		e1000_setup_led(&adapter->hw);
4290		e1000_led_on(&adapter->hw);
4291	} else {
4292		e1000_led_off(&adapter->hw);
4293		e1000_cleanup_led(&adapter->hw);
4294	}
4295	EM_CORE_UNLOCK(adapter);
4296}
4297
4298/*********************************************************************
4299* 82544 Coexistence issue workaround.
4300*    There are 2 issues.
4301*       1. Transmit Hang issue.
4302*    To detect this issue, following equation can be used...
4303*	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4304*	  If SUM[3:0] is in between 1 to 4, we will have this issue.
4305*
4306*       2. DAC issue.
4307*    To detect this issue, following equation can be used...
4308*	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4309*	  If SUM[3:0] is in between 9 to c, we will have this issue.
4310*
4311*
4312*    WORKAROUND:
4313*	  Make sure we do not have ending address
4314*	  as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4315*
4316*************************************************************************/
4317static u32
4318lem_fill_descriptors (bus_addr_t address, u32 length,
4319		PDESC_ARRAY desc_array)
4320{
4321	u32 safe_terminator;
4322
4323	/* Since issue is sensitive to length and address.*/
4324	/* Let us first check the address...*/
4325	if (length <= 4) {
4326		desc_array->descriptor[0].address = address;
4327		desc_array->descriptor[0].length = length;
4328		desc_array->elements = 1;
4329		return (desc_array->elements);
4330	}
4331	safe_terminator = (u32)((((u32)address & 0x7) +
4332	    (length & 0xF)) & 0xF);
4333	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4334	if (safe_terminator == 0   ||
4335	(safe_terminator > 4   &&
4336	safe_terminator < 9)   ||
4337	(safe_terminator > 0xC &&
4338	safe_terminator <= 0xF)) {
4339		desc_array->descriptor[0].address = address;
4340		desc_array->descriptor[0].length = length;
4341		desc_array->elements = 1;
4342		return (desc_array->elements);
4343	}
4344
4345	desc_array->descriptor[0].address = address;
4346	desc_array->descriptor[0].length = length - 4;
4347	desc_array->descriptor[1].address = address + (length - 4);
4348	desc_array->descriptor[1].length = 4;
4349	desc_array->elements = 2;
4350	return (desc_array->elements);
4351}
4352
4353/**********************************************************************
4354 *
4355 *  Update the board statistics counters.
4356 *
4357 **********************************************************************/
4358static void
4359lem_update_stats_counters(struct adapter *adapter)
4360{
4361	struct ifnet   *ifp;
4362
4363	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4364	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4365		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4366		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4367	}
4368	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4369	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4370	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4371	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4372
4373	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4374	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4375	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4376	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4377	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4378	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4379	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4380	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4381	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4382	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4383	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4384	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4385	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4386	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4387	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4388	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4389	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4390	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4391	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4392	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4393
4394	/* For the 64-bit byte counters the low dword must be read first. */
4395	/* Both registers clear on the read of the high dword */
4396
4397	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
4398	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
4399
4400	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4401	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4402	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4403	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4404	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4405
4406	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4407	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4408
4409	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4410	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4411	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4412	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4413	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4414	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4415	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4416	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4417	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4418	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4419
4420	if (adapter->hw.mac.type >= e1000_82543) {
4421		adapter->stats.algnerrc +=
4422		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4423		adapter->stats.rxerrc +=
4424		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4425		adapter->stats.tncrs +=
4426		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4427		adapter->stats.cexterr +=
4428		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4429		adapter->stats.tsctc +=
4430		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4431		adapter->stats.tsctfc +=
4432		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4433	}
4434	ifp = adapter->ifp;
4435
4436	ifp->if_collisions = adapter->stats.colc;
4437
4438	/* Rx Errors */
4439	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4440	    adapter->stats.crcerrs + adapter->stats.algnerrc +
4441	    adapter->stats.ruc + adapter->stats.roc +
4442	    adapter->stats.mpc + adapter->stats.cexterr;
4443
4444	/* Tx Errors */
4445	ifp->if_oerrors = adapter->stats.ecol +
4446	    adapter->stats.latecol + adapter->watchdog_events;
4447}
4448
4449
4450/**********************************************************************
4451 *
4452 *  This routine is called only when lem_display_debug_stats is enabled.
4453 *  This routine provides a way to take a look at important statistics
4454 *  maintained by the driver and hardware.
4455 *
4456 **********************************************************************/
4457static void
4458lem_print_debug_info(struct adapter *adapter)
4459{
4460	device_t dev = adapter->dev;
4461	u8 *hw_addr = adapter->hw.hw_addr;
4462
4463	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
4464	device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
4465	    E1000_READ_REG(&adapter->hw, E1000_CTRL),
4466	    E1000_READ_REG(&adapter->hw, E1000_RCTL));
4467	device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
4468	    ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
4469	    (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
4470	device_printf(dev, "Flow control watermarks high = %d low = %d\n",
4471	    adapter->hw.fc.high_water,
4472	    adapter->hw.fc.low_water);
4473	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
4474	    E1000_READ_REG(&adapter->hw, E1000_TIDV),
4475	    E1000_READ_REG(&adapter->hw, E1000_TADV));
4476	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
4477	    E1000_READ_REG(&adapter->hw, E1000_RDTR),
4478	    E1000_READ_REG(&adapter->hw, E1000_RADV));
4479	device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
4480	    (long long)adapter->tx_fifo_wrk_cnt,
4481	    (long long)adapter->tx_fifo_reset_cnt);
4482	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
4483	    E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
4484	    E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
4485	device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
4486	    E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
4487	    E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
4488	device_printf(dev, "Num Tx descriptors avail = %d\n",
4489	    adapter->num_tx_desc_avail);
4490	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
4491	    adapter->no_tx_desc_avail1);
4492	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
4493	    adapter->no_tx_desc_avail2);
4494	device_printf(dev, "Std mbuf failed = %ld\n",
4495	    adapter->mbuf_alloc_failed);
4496	device_printf(dev, "Std mbuf cluster failed = %ld\n",
4497	    adapter->mbuf_cluster_failed);
4498	device_printf(dev, "Driver dropped packets = %ld\n",
4499	    adapter->dropped_pkts);
4500	device_printf(dev, "Driver tx dma failure in encap = %ld\n",
4501		adapter->no_tx_dma_setup);
4502}
4503
4504static void
4505lem_print_hw_stats(struct adapter *adapter)
4506{
4507	device_t dev = adapter->dev;
4508
4509	device_printf(dev, "Excessive collisions = %lld\n",
4510	    (long long)adapter->stats.ecol);
4511#if	(DEBUG_HW > 0)  /* Dont output these errors normally */
4512	device_printf(dev, "Symbol errors = %lld\n",
4513	    (long long)adapter->stats.symerrs);
4514#endif
4515	device_printf(dev, "Sequence errors = %lld\n",
4516	    (long long)adapter->stats.sec);
4517	device_printf(dev, "Defer count = %lld\n",
4518	    (long long)adapter->stats.dc);
4519	device_printf(dev, "Missed Packets = %lld\n",
4520	    (long long)adapter->stats.mpc);
4521	device_printf(dev, "Receive No Buffers = %lld\n",
4522	    (long long)adapter->stats.rnbc);
4523	/* RLEC is inaccurate on some hardware, calculate our own. */
4524	device_printf(dev, "Receive Length Errors = %lld\n",
4525	    ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
4526	device_printf(dev, "Receive errors = %lld\n",
4527	    (long long)adapter->stats.rxerrc);
4528	device_printf(dev, "Crc errors = %lld\n",
4529	    (long long)adapter->stats.crcerrs);
4530	device_printf(dev, "Alignment errors = %lld\n",
4531	    (long long)adapter->stats.algnerrc);
4532	device_printf(dev, "Collision/Carrier extension errors = %lld\n",
4533	    (long long)adapter->stats.cexterr);
4534	device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
4535	device_printf(dev, "watchdog timeouts = %ld\n",
4536	    adapter->watchdog_events);
4537	device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
4538	    " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
4539	    adapter->tx_irq , adapter->link_irq);
4540	device_printf(dev, "XON Rcvd = %lld\n",
4541	    (long long)adapter->stats.xonrxc);
4542	device_printf(dev, "XON Xmtd = %lld\n",
4543	    (long long)adapter->stats.xontxc);
4544	device_printf(dev, "XOFF Rcvd = %lld\n",
4545	    (long long)adapter->stats.xoffrxc);
4546	device_printf(dev, "XOFF Xmtd = %lld\n",
4547	    (long long)adapter->stats.xofftxc);
4548	device_printf(dev, "Good Packets Rcvd = %lld\n",
4549	    (long long)adapter->stats.gprc);
4550	device_printf(dev, "Good Packets Xmtd = %lld\n",
4551	    (long long)adapter->stats.gptc);
4552	device_printf(dev, "TSO Contexts Xmtd = %lld\n",
4553	    (long long)adapter->stats.tsctc);
4554	device_printf(dev, "TSO Contexts Failed = %lld\n",
4555	    (long long)adapter->stats.tsctfc);
4556}
4557
4558/**********************************************************************
4559 *
4560 *  This routine provides a way to dump out the adapter eeprom,
4561 *  often a useful debug/service tool. This only dumps the first
4562 *  32 words, stuff that matters is in that extent.
4563 *
4564 **********************************************************************/
4565static void
4566lem_print_nvm_info(struct adapter *adapter)
4567{
4568	u16	eeprom_data;
4569	int	i, j, row = 0;
4570
4571	/* Its a bit crude, but it gets the job done */
4572	printf("\nInterface EEPROM Dump:\n");
4573	printf("Offset\n0x0000  ");
4574	for (i = 0, j = 0; i < 32; i++, j++) {
4575		if (j == 8) { /* Make the offset block */
4576			j = 0; ++row;
4577			printf("\n0x00%x0  ",row);
4578		}
4579		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4580		printf("%04x ", eeprom_data);
4581	}
4582	printf("\n");
4583}
4584
4585static int
4586lem_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4587{
4588	struct adapter *adapter;
4589	int error;
4590	int result;
4591
4592	result = -1;
4593	error = sysctl_handle_int(oidp, &result, 0, req);
4594
4595	if (error || !req->newptr)
4596		return (error);
4597
4598	if (result == 1) {
4599		adapter = (struct adapter *)arg1;
4600		lem_print_debug_info(adapter);
4601	}
4602	/*
4603	 * This value will cause a hex dump of the
4604	 * first 32 16-bit words of the EEPROM to
4605	 * the screen.
4606	 */
4607	if (result == 2) {
4608		adapter = (struct adapter *)arg1;
4609		lem_print_nvm_info(adapter);
4610        }
4611
4612	return (error);
4613}
4614
4615
4616static int
4617lem_sysctl_stats(SYSCTL_HANDLER_ARGS)
4618{
4619	struct adapter *adapter;
4620	int error;
4621	int result;
4622
4623	result = -1;
4624	error = sysctl_handle_int(oidp, &result, 0, req);
4625
4626	if (error || !req->newptr)
4627		return (error);
4628
4629	if (result == 1) {
4630		adapter = (struct adapter *)arg1;
4631		lem_print_hw_stats(adapter);
4632	}
4633
4634	return (error);
4635}
4636
4637static int
4638lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4639{
4640	struct em_int_delay_info *info;
4641	struct adapter *adapter;
4642	u32 regval;
4643	int error;
4644	int usecs;
4645	int ticks;
4646
4647	info = (struct em_int_delay_info *)arg1;
4648	usecs = info->value;
4649	error = sysctl_handle_int(oidp, &usecs, 0, req);
4650	if (error != 0 || req->newptr == NULL)
4651		return (error);
4652	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4653		return (EINVAL);
4654	info->value = usecs;
4655	ticks = EM_USECS_TO_TICKS(usecs);
4656
4657	adapter = info->adapter;
4658
4659	EM_CORE_LOCK(adapter);
4660	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4661	regval = (regval & ~0xffff) | (ticks & 0xffff);
4662	/* Handle a few special cases. */
4663	switch (info->offset) {
4664	case E1000_RDTR:
4665		break;
4666	case E1000_TIDV:
4667		if (ticks == 0) {
4668			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4669			/* Don't write 0 into the TIDV register. */
4670			regval++;
4671		} else
4672			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4673		break;
4674	}
4675	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4676	EM_CORE_UNLOCK(adapter);
4677	return (0);
4678}
4679
4680static void
4681lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4682	const char *description, struct em_int_delay_info *info,
4683	int offset, int value)
4684{
4685	info->adapter = adapter;
4686	info->offset = offset;
4687	info->value = value;
4688	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4689	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4690	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4691	    info, 0, lem_sysctl_int_delay, "I", description);
4692}
4693
4694#ifndef EM_LEGACY_IRQ
4695static void
4696lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4697	const char *description, int *limit, int value)
4698{
4699	*limit = value;
4700	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4701	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4702	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4703}
4704#endif
4705
4706
4707