if_em.c revision 153635
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 153635 2005-12-22 09:09:39Z glebius $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "Version - 3.2.18";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
111	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
112	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
113
114	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
115	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
116	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
117
118        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
119        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
120        { 0x8086, E1000_DEV_ID_82573L,              PCI_ANY_ID, PCI_ANY_ID, 0},
121
122        /* required last entry */
123        { 0, 0, 0, 0, 0}
124};
125
126/*********************************************************************
127 *  Table of branding strings for all supported NICs.
128 *********************************************************************/
129
130static char *em_strings[] = {
131	"Intel(R) PRO/1000 Network Connection"
132};
133
134/*********************************************************************
135 *  Function prototypes
136 *********************************************************************/
137static int  em_probe(device_t);
138static int  em_attach(device_t);
139static int  em_detach(device_t);
140static int  em_shutdown(device_t);
141static void em_intr(void *);
142static void em_start(struct ifnet *);
143static int  em_ioctl(struct ifnet *, u_long, caddr_t);
144static void em_watchdog(struct ifnet *);
145static void em_init(void *);
146static void em_init_locked(struct adapter *);
147static void em_stop(void *);
148static void em_media_status(struct ifnet *, struct ifmediareq *);
149static int  em_media_change(struct ifnet *);
150static void em_identify_hardware(struct adapter *);
151static int  em_allocate_pci_resources(struct adapter *);
152static void em_free_pci_resources(struct adapter *);
153static void em_local_timer(void *);
154static int  em_hardware_init(struct adapter *);
155static void em_setup_interface(device_t, struct adapter *);
156static int  em_setup_transmit_structures(struct adapter *);
157static void em_initialize_transmit_unit(struct adapter *);
158static int  em_setup_receive_structures(struct adapter *);
159static void em_initialize_receive_unit(struct adapter *);
160static void em_enable_intr(struct adapter *);
161static void em_disable_intr(struct adapter *);
162static void em_free_transmit_structures(struct adapter *);
163static void em_free_receive_structures(struct adapter *);
164static void em_update_stats_counters(struct adapter *);
165static void em_clean_transmit_interrupts(struct adapter *);
166static int  em_allocate_receive_structures(struct adapter *);
167static int  em_allocate_transmit_structures(struct adapter *);
168static void em_process_receive_interrupts(struct adapter *, int);
169#ifndef __NO_STRICT_ALIGNMENT
170static int  em_fixup_rx(struct adapter *);
171#endif
172static void em_receive_checksum(struct adapter *,
173				struct em_rx_desc *,
174				struct mbuf *);
175static void em_transmit_checksum_setup(struct adapter *,
176				       struct mbuf *,
177				       u_int32_t *,
178				       u_int32_t *);
179static void em_set_promisc(struct adapter *);
180static void em_disable_promisc(struct adapter *);
181static void em_set_multi(struct adapter *);
182static void em_print_hw_stats(struct adapter *);
183static void em_print_link_status(struct adapter *);
184static int  em_get_buf(int i, struct adapter *,
185		       struct mbuf *);
186static void em_enable_vlans(struct adapter *);
187static void em_disable_vlans(struct adapter *);
188static int  em_encap(struct adapter *, struct mbuf **);
189static void em_smartspeed(struct adapter *);
190static int  em_82547_fifo_workaround(struct adapter *, int);
191static void em_82547_update_fifo_head(struct adapter *, int);
192static int  em_82547_tx_fifo_reset(struct adapter *);
193static void em_82547_move_tail(void *arg);
194static void em_82547_move_tail_locked(struct adapter *);
195static int  em_dma_malloc(struct adapter *, bus_size_t,
196			  struct em_dma_alloc *, int);
197static void em_dma_free(struct adapter *, struct em_dma_alloc *);
198static void em_print_debug_info(struct adapter *);
199static int  em_is_valid_ether_addr(u_int8_t *);
200static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
201static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
202static u_int32_t em_fill_descriptors (bus_addr_t address,
203				      u_int32_t length,
204				      PDESC_ARRAY desc_array);
205static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
206static void em_add_int_delay_sysctl(struct adapter *, const char *,
207				    const char *, struct em_int_delay_info *,
208				    int, int);
209#ifdef DEVICE_POLLING
210static poll_handler_t em_poll;
211#endif
212
213/*********************************************************************
214 *  FreeBSD Device Interface Entry Points
215 *********************************************************************/
216
217static device_method_t em_methods[] = {
218	/* Device interface */
219	DEVMETHOD(device_probe, em_probe),
220	DEVMETHOD(device_attach, em_attach),
221	DEVMETHOD(device_detach, em_detach),
222	DEVMETHOD(device_shutdown, em_shutdown),
223	{0, 0}
224};
225
226static driver_t em_driver = {
227	"em", em_methods, sizeof(struct adapter ),
228};
229
230static devclass_t em_devclass;
231DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
232MODULE_DEPEND(em, pci, 1, 1, 1);
233MODULE_DEPEND(em, ether, 1, 1, 1);
234
235/*********************************************************************
236 *  Tunable default values.
237 *********************************************************************/
238
239#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
240#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
241
242static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
243static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
244static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
245static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
246static int em_rxd = EM_DEFAULT_RXD;
247static int em_txd = EM_DEFAULT_TXD;
248
249TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
250TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
251TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
252TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
253TUNABLE_INT("hw.em.rxd", &em_rxd);
254TUNABLE_INT("hw.em.txd", &em_txd);
255
256/*********************************************************************
257 *  Device identification routine
258 *
259 *  em_probe determines if the driver should be loaded on
260 *  adapter based on PCI vendor/device id of the adapter.
261 *
262 *  return BUS_PROBE_DEFAULT on success, positive on failure
263 *********************************************************************/
264
265static int
266em_probe(device_t dev)
267{
268	em_vendor_info_t *ent;
269
270	u_int16_t       pci_vendor_id = 0;
271	u_int16_t       pci_device_id = 0;
272	u_int16_t       pci_subvendor_id = 0;
273	u_int16_t       pci_subdevice_id = 0;
274	char            adapter_name[60];
275
276	INIT_DEBUGOUT("em_probe: begin");
277
278	pci_vendor_id = pci_get_vendor(dev);
279	if (pci_vendor_id != EM_VENDOR_ID)
280		return(ENXIO);
281
282	pci_device_id = pci_get_device(dev);
283	pci_subvendor_id = pci_get_subvendor(dev);
284	pci_subdevice_id = pci_get_subdevice(dev);
285
286	ent = em_vendor_info_array;
287	while (ent->vendor_id != 0) {
288		if ((pci_vendor_id == ent->vendor_id) &&
289		    (pci_device_id == ent->device_id) &&
290
291		    ((pci_subvendor_id == ent->subvendor_id) ||
292		     (ent->subvendor_id == PCI_ANY_ID)) &&
293
294		    ((pci_subdevice_id == ent->subdevice_id) ||
295		     (ent->subdevice_id == PCI_ANY_ID))) {
296			sprintf(adapter_name, "%s %s",
297				em_strings[ent->index],
298				em_driver_version);
299			device_set_desc_copy(dev, adapter_name);
300			return(BUS_PROBE_DEFAULT);
301		}
302		ent++;
303	}
304
305	return(ENXIO);
306}
307
308/*********************************************************************
309 *  Device initialization routine
310 *
311 *  The attach entry point is called when the driver is being loaded.
312 *  This routine identifies the type of hardware, allocates all resources
313 *  and initializes the hardware.
314 *
315 *  return 0 on success, positive on failure
316 *********************************************************************/
317
318static int
319em_attach(device_t dev)
320{
321	struct adapter * adapter;
322	int             tsize, rsize;
323	int		error = 0;
324
325	INIT_DEBUGOUT("em_attach: begin");
326
327	/* Allocate, clear, and link in our adapter structure */
328	if (!(adapter = device_get_softc(dev))) {
329		printf("em: adapter structure allocation failed\n");
330		return(ENOMEM);
331	}
332	bzero(adapter, sizeof(struct adapter ));
333	adapter->dev = dev;
334	adapter->osdep.dev = dev;
335	adapter->unit = device_get_unit(dev);
336	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
337
338	/* SYSCTL stuff */
339        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
340                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
341                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
342                        (void *)adapter, 0,
343                        em_sysctl_debug_info, "I", "Debug Information");
344
345        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
346                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
347                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
348                        (void *)adapter, 0,
349                        em_sysctl_stats, "I", "Statistics");
350
351	callout_init(&adapter->timer, CALLOUT_MPSAFE);
352	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
353
354	/* Determine hardware revision */
355	em_identify_hardware(adapter);
356
357	/* Set up some sysctls for the tunable interrupt delays */
358	em_add_int_delay_sysctl(adapter, "rx_int_delay",
359	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
360	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
361	em_add_int_delay_sysctl(adapter, "tx_int_delay",
362	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
363	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
364	if (adapter->hw.mac_type >= em_82540) {
365		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
366		    "receive interrupt delay limit in usecs",
367		    &adapter->rx_abs_int_delay,
368		    E1000_REG_OFFSET(&adapter->hw, RADV),
369		    em_rx_abs_int_delay_dflt);
370		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
371		    "transmit interrupt delay limit in usecs",
372		    &adapter->tx_abs_int_delay,
373		    E1000_REG_OFFSET(&adapter->hw, TADV),
374		    em_tx_abs_int_delay_dflt);
375	}
376
377	/*
378	 * Validate number of transmit and receive descriptors. It
379	 * must not exceed hardware maximum, and must be multiple
380	 * of E1000_DBA_ALIGN.
381	 */
382	if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
383	    (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
384	    (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
385	    (em_txd < EM_MIN_TXD)) {
386		printf("em%d: Using %d TX descriptors instead of %d!\n",
387		    adapter->unit, EM_DEFAULT_TXD, em_txd);
388		adapter->num_tx_desc = EM_DEFAULT_TXD;
389	} else
390		adapter->num_tx_desc = em_txd;
391	if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
392	    (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
393	    (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
394	    (em_rxd < EM_MIN_RXD)) {
395		printf("em%d: Using %d RX descriptors instead of %d!\n",
396		    adapter->unit, EM_DEFAULT_RXD, em_rxd);
397		adapter->num_rx_desc = EM_DEFAULT_RXD;
398	} else
399		adapter->num_rx_desc = em_rxd;
400
401        adapter->hw.autoneg = DO_AUTO_NEG;
402        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
403        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
404        adapter->hw.tbi_compatibility_en = TRUE;
405        adapter->rx_buffer_len = EM_RXBUFFER_2048;
406
407	adapter->hw.phy_init_script = 1;
408	adapter->hw.phy_reset_disable = FALSE;
409
410#ifndef EM_MASTER_SLAVE
411	adapter->hw.master_slave = em_ms_hw_default;
412#else
413	adapter->hw.master_slave = EM_MASTER_SLAVE;
414#endif
415	/*
416	 * Set the max frame size assuming standard ethernet
417	 * sized frames
418	 */
419	adapter->hw.max_frame_size =
420		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
421
422	adapter->hw.min_frame_size =
423		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
424
425	/*
426	 * This controls when hardware reports transmit completion
427	 * status.
428	 */
429	adapter->hw.report_tx_early = 1;
430
431
432	if (em_allocate_pci_resources(adapter)) {
433		printf("em%d: Allocation of PCI resources failed\n",
434		       adapter->unit);
435                error = ENXIO;
436                goto err_pci;
437	}
438
439
440	/* Initialize eeprom parameters */
441        em_init_eeprom_params(&adapter->hw);
442
443	tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
444	    E1000_DBA_ALIGN);
445
446	/* Allocate Transmit Descriptor ring */
447        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
448                printf("em%d: Unable to allocate tx_desc memory\n",
449                       adapter->unit);
450		error = ENOMEM;
451                goto err_tx_desc;
452        }
453        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
454
455	rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
456	    E1000_DBA_ALIGN);
457
458	/* Allocate Receive Descriptor ring */
459        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
460                printf("em%d: Unable to allocate rx_desc memory\n",
461                        adapter->unit);
462		error = ENOMEM;
463                goto err_rx_desc;
464        }
465        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
466
467	/* Initialize the hardware */
468	if (em_hardware_init(adapter)) {
469		printf("em%d: Unable to initialize the hardware\n",
470		       adapter->unit);
471		error = EIO;
472                goto err_hw_init;
473	}
474
475	/* Copy the permanent MAC address out of the EEPROM */
476	if (em_read_mac_addr(&adapter->hw) < 0) {
477		printf("em%d: EEPROM read error while reading mac address\n",
478		       adapter->unit);
479		error = EIO;
480                goto err_mac_addr;
481	}
482
483	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
484                printf("em%d: Invalid mac address\n", adapter->unit);
485                error = EIO;
486                goto err_mac_addr;
487        }
488
489	/* Setup OS specific network interface */
490	em_setup_interface(dev, adapter);
491
492	/* Initialize statistics */
493	em_clear_hw_cntrs(&adapter->hw);
494	em_update_stats_counters(adapter);
495	adapter->hw.get_link_status = 1;
496	em_check_for_link(&adapter->hw);
497
498	if (bootverbose) {
499		/* Print the link status */
500		if (adapter->link_active == 1) {
501			em_get_speed_and_duplex(&adapter->hw,
502			    &adapter->link_speed, &adapter->link_duplex);
503			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
504			       adapter->unit,
505			       adapter->link_speed,
506			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
507				"Half");
508		} else
509			printf("em%d:  Speed:N/A  Duplex:N/A\n",
510			    adapter->unit);
511	}
512
513	/* Identify 82544 on PCIX */
514        em_get_bus_info(&adapter->hw);
515        if(adapter->hw.bus_type == em_bus_type_pcix &&
516           adapter->hw.mac_type == em_82544) {
517                adapter->pcix_82544 = TRUE;
518        }
519        else {
520                adapter->pcix_82544 = FALSE;
521        }
522	INIT_DEBUGOUT("em_attach: end");
523	return(0);
524
525err_mac_addr:
526err_hw_init:
527        em_dma_free(adapter, &adapter->rxdma);
528err_rx_desc:
529        em_dma_free(adapter, &adapter->txdma);
530err_tx_desc:
531err_pci:
532        em_free_pci_resources(adapter);
533	EM_LOCK_DESTROY(adapter);
534        return(error);
535
536}
537
538/*********************************************************************
539 *  Device removal routine
540 *
541 *  The detach entry point is called when the driver is being removed.
542 *  This routine stops the adapter and deallocates all the resources
543 *  that were allocated for driver operation.
544 *
545 *  return 0 on success, positive on failure
546 *********************************************************************/
547
548static int
549em_detach(device_t dev)
550{
551	struct adapter * adapter = device_get_softc(dev);
552	struct ifnet   *ifp = adapter->ifp;
553
554	INIT_DEBUGOUT("em_detach: begin");
555
556#ifdef DEVICE_POLLING
557	if (ifp->if_capenable & IFCAP_POLLING)
558		ether_poll_deregister(ifp);
559#endif
560
561	EM_LOCK(adapter);
562	adapter->in_detach = 1;
563	em_stop(adapter);
564	em_phy_hw_reset(&adapter->hw);
565	EM_UNLOCK(adapter);
566        ether_ifdetach(adapter->ifp);
567
568	em_free_pci_resources(adapter);
569	bus_generic_detach(dev);
570	if_free(ifp);
571
572	/* Free Transmit Descriptor ring */
573        if (adapter->tx_desc_base) {
574                em_dma_free(adapter, &adapter->txdma);
575                adapter->tx_desc_base = NULL;
576        }
577
578        /* Free Receive Descriptor ring */
579        if (adapter->rx_desc_base) {
580                em_dma_free(adapter, &adapter->rxdma);
581                adapter->rx_desc_base = NULL;
582        }
583
584	EM_LOCK_DESTROY(adapter);
585
586	return(0);
587}
588
589/*********************************************************************
590 *
591 *  Shutdown entry point
592 *
593 **********************************************************************/
594
595static int
596em_shutdown(device_t dev)
597{
598	struct adapter *adapter = device_get_softc(dev);
599	EM_LOCK(adapter);
600	em_stop(adapter);
601	EM_UNLOCK(adapter);
602	return(0);
603}
604
605
606/*********************************************************************
607 *  Transmit entry point
608 *
609 *  em_start is called by the stack to initiate a transmit.
610 *  The driver will remain in this routine as long as there are
611 *  packets to transmit and transmit resources are available.
612 *  In case resources are not available stack is notified and
613 *  the packet is requeued.
614 **********************************************************************/
615
616static void
617em_start_locked(struct ifnet *ifp)
618{
619        struct mbuf    *m_head;
620        struct adapter *adapter = ifp->if_softc;
621
622	mtx_assert(&adapter->mtx, MA_OWNED);
623
624        if (!adapter->link_active)
625                return;
626
627        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
628
629                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
630
631                if (m_head == NULL) break;
632
633		/*
634		 * em_encap() can modify our pointer, and or make it NULL on
635		 * failure.  In that event, we can't requeue.
636		 */
637		if (em_encap(adapter, &m_head)) {
638			if (m_head == NULL)
639				break;
640			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
641			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
642			break;
643                }
644
645		/* Send a copy of the frame to the BPF listener */
646		BPF_MTAP(ifp, m_head);
647
648                /* Set timeout in case hardware has problems transmitting */
649                ifp->if_timer = EM_TX_TIMEOUT;
650
651        }
652        return;
653}
654
655static void
656em_start(struct ifnet *ifp)
657{
658	struct adapter *adapter = ifp->if_softc;
659
660	EM_LOCK(adapter);
661	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
662		em_start_locked(ifp);
663	EM_UNLOCK(adapter);
664	return;
665}
666
667/*********************************************************************
668 *  Ioctl entry point
669 *
670 *  em_ioctl is called when the user wants to configure the
671 *  interface.
672 *
673 *  return 0 on success, positive on failure
674 **********************************************************************/
675
676static int
677em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
678{
679	struct ifreq   *ifr = (struct ifreq *) data;
680	struct adapter * adapter = ifp->if_softc;
681	int error = 0;
682
683	if (adapter->in_detach) return(error);
684
685	switch (command) {
686	case SIOCSIFADDR:
687	case SIOCGIFADDR:
688		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
689		ether_ioctl(ifp, command, data);
690		break;
691	case SIOCSIFMTU:
692	    {
693		int max_frame_size;
694
695		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
696
697		switch (adapter->hw.mac_type) {
698		case em_82571:
699		case em_82572:
700			max_frame_size = 10500;
701			break;
702		case em_82573:
703			/* 82573 does not support jumbo frames. */
704			max_frame_size = ETHER_MAX_LEN;
705			break;
706		default:
707			max_frame_size = MAX_JUMBO_FRAME_SIZE;
708		}
709		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
710		    ETHER_CRC_LEN) {
711			error = EINVAL;
712			break;
713		}
714
715		EM_LOCK(adapter);
716		ifp->if_mtu = ifr->ifr_mtu;
717		adapter->hw.max_frame_size =
718		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
719		em_init_locked(adapter);
720		EM_UNLOCK(adapter);
721		break;
722	    }
723	case SIOCSIFFLAGS:
724		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
725		EM_LOCK(adapter);
726		if (ifp->if_flags & IFF_UP) {
727			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
728				em_init_locked(adapter);
729			}
730
731			em_disable_promisc(adapter);
732			em_set_promisc(adapter);
733		} else {
734			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
735				em_stop(adapter);
736			}
737		}
738		EM_UNLOCK(adapter);
739		break;
740	case SIOCADDMULTI:
741	case SIOCDELMULTI:
742		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
743		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
744			EM_LOCK(adapter);
745			em_disable_intr(adapter);
746			em_set_multi(adapter);
747			if (adapter->hw.mac_type == em_82542_rev2_0) {
748				em_initialize_receive_unit(adapter);
749			}
750#ifdef DEVICE_POLLING
751                        if (!(ifp->if_capenable & IFCAP_POLLING))
752#endif
753				em_enable_intr(adapter);
754			EM_UNLOCK(adapter);
755		}
756		break;
757	case SIOCSIFMEDIA:
758	case SIOCGIFMEDIA:
759		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
760		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
761		break;
762	case SIOCSIFCAP:
763	    {
764		int mask, reinit;
765
766		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
767		reinit = 0;
768		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
769#ifdef DEVICE_POLLING
770		if (mask & IFCAP_POLLING) {
771			if (ifr->ifr_reqcap & IFCAP_POLLING) {
772				error = ether_poll_register(em_poll, ifp);
773				if (error)
774					return(error);
775				EM_LOCK(adapter);
776				em_disable_intr(adapter);
777				ifp->if_capenable |= IFCAP_POLLING;
778				EM_UNLOCK(adapter);
779			} else {
780				error = ether_poll_deregister(ifp);
781				/* Enable interrupt even in error case */
782				EM_LOCK(adapter);
783				em_enable_intr(adapter);
784				ifp->if_capenable &= ~IFCAP_POLLING;
785				EM_UNLOCK(adapter);
786			}
787		}
788#endif
789		if (mask & IFCAP_HWCSUM) {
790			ifp->if_capenable ^= IFCAP_HWCSUM;
791			reinit = 1;
792		}
793		if (mask & IFCAP_VLAN_HWTAGGING) {
794			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
795			reinit = 1;
796		}
797		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
798			em_init(adapter);
799		break;
800	    }
801	default:
802		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
803		error = EINVAL;
804	}
805
806	return(error);
807}
808
809/*********************************************************************
810 *  Watchdog entry point
811 *
812 *  This routine is called whenever hardware quits transmitting.
813 *
814 **********************************************************************/
815
816static void
817em_watchdog(struct ifnet *ifp)
818{
819	struct adapter * adapter;
820	adapter = ifp->if_softc;
821
822	EM_LOCK(adapter);
823	/* If we are in this routine because of pause frames, then
824	 * don't reset the hardware.
825	 */
826	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
827		ifp->if_timer = EM_TX_TIMEOUT;
828		EM_UNLOCK(adapter);
829		return;
830	}
831
832	if (em_check_for_link(&adapter->hw))
833		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
834
835	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
836	adapter->watchdog_events++;
837
838	em_init_locked(adapter);
839	EM_UNLOCK(adapter);
840}
841
842/*********************************************************************
843 *  Init entry point
844 *
845 *  This routine is used in two ways. It is used by the stack as
846 *  init entry point in network interface structure. It is also used
847 *  by the driver as a hw/sw initialization routine to get to a
848 *  consistent state.
849 *
850 *  return 0 on success, positive on failure
851 **********************************************************************/
852
853static void
854em_init_locked(struct adapter * adapter)
855{
856	struct ifnet   *ifp;
857
858	uint32_t	pba;
859	ifp = adapter->ifp;
860
861	INIT_DEBUGOUT("em_init: begin");
862
863	mtx_assert(&adapter->mtx, MA_OWNED);
864
865	em_stop(adapter);
866
867	/*
868	 * Packet Buffer Allocation (PBA)
869	 * Writing PBA sets the receive portion of the buffer
870	 * the remainder is used for the transmit buffer.
871	 */
872	switch (adapter->hw.mac_type) {
873	case em_82547:
874	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
875		if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
876			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
877		else
878			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
879		adapter->tx_fifo_head = 0;
880		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
881		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
882		break;
883	case em_82571: /* 82571: Total Packet Buffer is 48K */
884	case em_82572: /* 82572: Total Packet Buffer is 48K */
885			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
886		break;
887	case em_82573: /* 82573: Total Packet Buffer is 32K */
888		/* Jumbo frames not supported */
889			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
890		break;
891	default:
892		/* Devices before 82547 had a Packet Buffer of 64K.   */
893		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
894			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
895		else
896			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
897	}
898
899	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
900	E1000_WRITE_REG(&adapter->hw, PBA, pba);
901
902	/* Get the latest mac address, User can use a LAA */
903        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
904              ETHER_ADDR_LEN);
905
906	/* Initialize the hardware */
907	if (em_hardware_init(adapter)) {
908		printf("em%d: Unable to initialize the hardware\n",
909		       adapter->unit);
910		return;
911	}
912
913	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
914		em_enable_vlans(adapter);
915
916	/* Prepare transmit descriptors and buffers */
917	if (em_setup_transmit_structures(adapter)) {
918		printf("em%d: Could not setup transmit structures\n",
919		       adapter->unit);
920		em_stop(adapter);
921		return;
922	}
923	em_initialize_transmit_unit(adapter);
924
925	/* Setup Multicast table */
926	em_set_multi(adapter);
927
928	/* Prepare receive descriptors and buffers */
929	if (em_setup_receive_structures(adapter)) {
930		printf("em%d: Could not setup receive structures\n",
931		       adapter->unit);
932		em_stop(adapter);
933		return;
934	}
935	em_initialize_receive_unit(adapter);
936
937	/* Don't loose promiscuous settings */
938	em_set_promisc(adapter);
939
940	ifp->if_drv_flags |= IFF_DRV_RUNNING;
941	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
942
943	if (adapter->hw.mac_type >= em_82543) {
944		if (ifp->if_capenable & IFCAP_TXCSUM)
945			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
946		else
947			ifp->if_hwassist = 0;
948	}
949
950	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
951	em_clear_hw_cntrs(&adapter->hw);
952#ifdef DEVICE_POLLING
953        /*
954         * Only enable interrupts if we are not polling, make sure
955         * they are off otherwise.
956         */
957        if (ifp->if_capenable & IFCAP_POLLING)
958                em_disable_intr(adapter);
959        else
960#endif /* DEVICE_POLLING */
961		em_enable_intr(adapter);
962
963	/* Don't reset the phy next time init gets called */
964	adapter->hw.phy_reset_disable = TRUE;
965
966	return;
967}
968
969static void
970em_init(void *arg)
971{
972	struct adapter * adapter = arg;
973
974	EM_LOCK(adapter);
975	em_init_locked(adapter);
976	EM_UNLOCK(adapter);
977	return;
978}
979
980
981#ifdef DEVICE_POLLING
982static void
983em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
984{
985        struct adapter *adapter = ifp->if_softc;
986        u_int32_t reg_icr;
987
988	mtx_assert(&adapter->mtx, MA_OWNED);
989
990        if (cmd == POLL_AND_CHECK_STATUS) {
991                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
992                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
993			callout_stop(&adapter->timer);
994                        adapter->hw.get_link_status = 1;
995                        em_check_for_link(&adapter->hw);
996                        em_print_link_status(adapter);
997			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
998                }
999        }
1000	em_process_receive_interrupts(adapter, count);
1001	em_clean_transmit_interrupts(adapter);
1002
1003        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1004                em_start_locked(ifp);
1005}
1006
1007static void
1008em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1009{
1010        struct adapter *adapter = ifp->if_softc;
1011
1012	EM_LOCK(adapter);
1013	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1014		em_poll_locked(ifp, cmd, count);
1015	EM_UNLOCK(adapter);
1016}
1017#endif /* DEVICE_POLLING */
1018
1019/*********************************************************************
1020 *
1021 *  Interrupt Service routine
1022 *
1023 **********************************************************************/
1024static void
1025em_intr(void *arg)
1026{
1027	struct adapter	*adapter = arg;
1028	struct ifnet	*ifp;
1029	uint32_t	reg_icr;
1030	int		wantinit = 0;
1031
1032	EM_LOCK(adapter);
1033
1034	ifp = adapter->ifp;
1035
1036#ifdef DEVICE_POLLING
1037	if (ifp->if_capenable & IFCAP_POLLING) {
1038		EM_UNLOCK(adapter);
1039		return;
1040	}
1041#endif /* DEVICE_POLLING */
1042
1043	for (;;) {
1044		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1045		if (adapter->hw.mac_type >= em_82571 &&
1046		    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1047			break;
1048		else if (reg_icr == 0)
1049			break;
1050
1051		/*
1052		 * XXX: some laptops trigger several spurious interrupts
1053		 * on em(4) when in the resume cycle. The ICR register
1054		 * reports all-ones value in this case. Processing such
1055		 * interrupts would lead to a freeze. I don't know why.
1056		 */
1057		if (reg_icr == 0xffffffff)
1058			break;
1059
1060		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1061			em_process_receive_interrupts(adapter, -1);
1062			em_clean_transmit_interrupts(adapter);
1063		}
1064
1065		/* Link status change */
1066		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1067			callout_stop(&adapter->timer);
1068			adapter->hw.get_link_status = 1;
1069			em_check_for_link(&adapter->hw);
1070			em_print_link_status(adapter);
1071			callout_reset(&adapter->timer, hz, em_local_timer,
1072			    adapter);
1073		}
1074
1075		if (reg_icr & E1000_ICR_RXO) {
1076			adapter->rx_overruns++;
1077			wantinit = 1;
1078		}
1079	}
1080#if 0
1081	if (wantinit)
1082		em_init_locked(adapter);
1083#endif
1084	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1085	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1086		em_start_locked(ifp);
1087
1088	EM_UNLOCK(adapter);
1089	return;
1090}
1091
1092
1093
1094/*********************************************************************
1095 *
1096 *  Media Ioctl callback
1097 *
1098 *  This routine is called whenever the user queries the status of
1099 *  the interface using ifconfig.
1100 *
1101 **********************************************************************/
1102static void
1103em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1104{
1105	struct adapter * adapter = ifp->if_softc;
1106
1107	INIT_DEBUGOUT("em_media_status: begin");
1108
1109	em_check_for_link(&adapter->hw);
1110	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1111		if (adapter->link_active == 0) {
1112			em_get_speed_and_duplex(&adapter->hw,
1113						&adapter->link_speed,
1114						&adapter->link_duplex);
1115			adapter->link_active = 1;
1116		}
1117	} else {
1118		if (adapter->link_active == 1) {
1119			adapter->link_speed = 0;
1120			adapter->link_duplex = 0;
1121			adapter->link_active = 0;
1122		}
1123	}
1124
1125	ifmr->ifm_status = IFM_AVALID;
1126	ifmr->ifm_active = IFM_ETHER;
1127
1128	if (!adapter->link_active)
1129		return;
1130
1131	ifmr->ifm_status |= IFM_ACTIVE;
1132
1133	if (adapter->hw.media_type == em_media_type_fiber) {
1134		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1135	} else {
1136		switch (adapter->link_speed) {
1137		case 10:
1138			ifmr->ifm_active |= IFM_10_T;
1139			break;
1140		case 100:
1141			ifmr->ifm_active |= IFM_100_TX;
1142			break;
1143		case 1000:
1144			ifmr->ifm_active |= IFM_1000_T;
1145			break;
1146		}
1147		if (adapter->link_duplex == FULL_DUPLEX)
1148			ifmr->ifm_active |= IFM_FDX;
1149		else
1150			ifmr->ifm_active |= IFM_HDX;
1151	}
1152	return;
1153}
1154
1155/*********************************************************************
1156 *
1157 *  Media Ioctl callback
1158 *
1159 *  This routine is called when the user changes speed/duplex using
1160 *  media/mediopt option with ifconfig.
1161 *
1162 **********************************************************************/
1163static int
1164em_media_change(struct ifnet *ifp)
1165{
1166	struct adapter * adapter = ifp->if_softc;
1167	struct ifmedia  *ifm = &adapter->media;
1168
1169	INIT_DEBUGOUT("em_media_change: begin");
1170
1171	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1172		return(EINVAL);
1173
1174	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1175	case IFM_AUTO:
1176		adapter->hw.autoneg = DO_AUTO_NEG;
1177		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1178		break;
1179	case IFM_1000_SX:
1180	case IFM_1000_T:
1181		adapter->hw.autoneg = DO_AUTO_NEG;
1182		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1183		break;
1184	case IFM_100_TX:
1185		adapter->hw.autoneg = FALSE;
1186		adapter->hw.autoneg_advertised = 0;
1187		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1188			adapter->hw.forced_speed_duplex = em_100_full;
1189		else
1190			adapter->hw.forced_speed_duplex	= em_100_half;
1191		break;
1192	case IFM_10_T:
1193		adapter->hw.autoneg = FALSE;
1194		adapter->hw.autoneg_advertised = 0;
1195		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1196			adapter->hw.forced_speed_duplex = em_10_full;
1197		else
1198			adapter->hw.forced_speed_duplex	= em_10_half;
1199		break;
1200	default:
1201		printf("em%d: Unsupported media type\n", adapter->unit);
1202	}
1203
1204	/* As the speed/duplex settings my have changed we need to
1205	 * reset the PHY.
1206	 */
1207	adapter->hw.phy_reset_disable = FALSE;
1208
1209	em_init(adapter);
1210
1211	return(0);
1212}
1213
1214/*********************************************************************
1215 *
1216 *  This routine maps the mbufs to tx descriptors.
1217 *
1218 *  return 0 on success, positive on failure
1219 **********************************************************************/
1220static int
1221em_encap(struct adapter *adapter, struct mbuf **m_headp)
1222{
1223        u_int32_t       txd_upper;
1224        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1225        int             i, j, error = 0;
1226	bus_dmamap_t	map;
1227
1228	struct mbuf	*m_head;
1229
1230	/* For 82544 Workaround */
1231	DESC_ARRAY              desc_array;
1232	u_int32_t               array_elements;
1233	u_int32_t               counter;
1234        struct m_tag    *mtag;
1235	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1236	int			nsegs;
1237        struct em_buffer   *tx_buffer;
1238        struct em_tx_desc *current_tx_desc = NULL;
1239        struct ifnet   *ifp = adapter->ifp;
1240
1241	m_head = *m_headp;
1242
1243        /*
1244         * Force a cleanup if number of TX descriptors
1245         * available hits the threshold
1246         */
1247        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1248                em_clean_transmit_interrupts(adapter);
1249                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1250                        adapter->no_tx_desc_avail1++;
1251                        return(ENOBUFS);
1252                }
1253        }
1254
1255        /*
1256         * Map the packet for DMA.
1257         */
1258	tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1259	error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1260	    segs, &nsegs, BUS_DMA_NOWAIT);
1261	map = tx_buffer->map;
1262        if (error != 0) {
1263                adapter->no_tx_dma_setup++;
1264                return (error);
1265        }
1266        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1267
1268        if (nsegs > adapter->num_tx_desc_avail) {
1269                adapter->no_tx_desc_avail2++;
1270		error = ENOBUFS;
1271		goto encap_fail;
1272        }
1273
1274
1275        if (ifp->if_hwassist > 0) {
1276                em_transmit_checksum_setup(adapter,  m_head,
1277                                           &txd_upper, &txd_lower);
1278        } else
1279                txd_upper = txd_lower = 0;
1280
1281
1282        /* Find out if we are in vlan mode */
1283        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1284
1285	/*
1286	 * When operating in promiscuous mode, hardware encapsulation for
1287	 * packets is disabled.  This means we have to add the vlan
1288	 * encapsulation in the driver, since it will have come down from the
1289	 * VLAN layer with a tag instead of a VLAN header.
1290	 */
1291	if (mtag != NULL && adapter->em_insert_vlan_header) {
1292		struct ether_vlan_header *evl;
1293		struct ether_header eh;
1294
1295		m_head = m_pullup(m_head, sizeof(eh));
1296		if (m_head == NULL) {
1297			*m_headp = NULL;
1298			error = ENOBUFS;
1299			goto encap_fail;
1300		}
1301		eh = *mtod(m_head, struct ether_header *);
1302		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1303		if (m_head == NULL) {
1304			*m_headp = NULL;
1305			error = ENOBUFS;
1306			goto encap_fail;
1307		}
1308		m_head = m_pullup(m_head, sizeof(*evl));
1309		if (m_head == NULL) {
1310			*m_headp = NULL;
1311			error = ENOBUFS;
1312			goto encap_fail;
1313		}
1314		evl = mtod(m_head, struct ether_vlan_header *);
1315		bcopy(&eh, evl, sizeof(*evl));
1316		evl->evl_proto = evl->evl_encap_proto;
1317		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1318		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1319		m_tag_delete(m_head, mtag);
1320		mtag = NULL;
1321		*m_headp = m_head;
1322	}
1323
1324        i = adapter->next_avail_tx_desc;
1325	if (adapter->pcix_82544) {
1326		txd_saved = i;
1327		txd_used = 0;
1328	}
1329        for (j = 0; j < nsegs; j++) {
1330		/* If adapter is 82544 and on PCIX bus */
1331		if(adapter->pcix_82544) {
1332			/*
1333			 * Check the Address and Length combination and
1334			 * split the data accordingly
1335			 */
1336                        array_elements = em_fill_descriptors(segs[j].ds_addr,
1337			    segs[j].ds_len, &desc_array);
1338			for (counter = 0; counter < array_elements; counter++) {
1339				if (txd_used == adapter->num_tx_desc_avail) {
1340					adapter->next_avail_tx_desc = txd_saved;
1341					adapter->no_tx_desc_avail2++;
1342					error = ENOBUFS;
1343					goto encap_fail;
1344                                }
1345                                tx_buffer = &adapter->tx_buffer_area[i];
1346                                current_tx_desc = &adapter->tx_desc_base[i];
1347                                current_tx_desc->buffer_addr = htole64(
1348					desc_array.descriptor[counter].address);
1349                                current_tx_desc->lower.data = htole32(
1350					(adapter->txd_cmd | txd_lower |
1351					 (u_int16_t)desc_array.descriptor[counter].length));
1352                                current_tx_desc->upper.data = htole32((txd_upper));
1353                                if (++i == adapter->num_tx_desc)
1354                                         i = 0;
1355
1356                                tx_buffer->m_head = NULL;
1357                                txd_used++;
1358                        }
1359		} else {
1360			tx_buffer = &adapter->tx_buffer_area[i];
1361			current_tx_desc = &adapter->tx_desc_base[i];
1362
1363			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1364			current_tx_desc->lower.data = htole32(
1365				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1366			current_tx_desc->upper.data = htole32(txd_upper);
1367
1368			if (++i == adapter->num_tx_desc)
1369				i = 0;
1370
1371			tx_buffer->m_head = NULL;
1372		}
1373        }
1374
1375	adapter->next_avail_tx_desc = i;
1376	if (adapter->pcix_82544) {
1377		adapter->num_tx_desc_avail -= txd_used;
1378	}
1379	else {
1380		adapter->num_tx_desc_avail -= nsegs;
1381	}
1382
1383        if (mtag != NULL) {
1384                /* Set the vlan id */
1385                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1386
1387                /* Tell hardware to add tag */
1388                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1389        }
1390
1391        tx_buffer->m_head = m_head;
1392        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1393
1394        /*
1395         * Last Descriptor of Packet needs End Of Packet (EOP)
1396         */
1397        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1398
1399        /*
1400         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1401         * that this frame is available to transmit.
1402         */
1403        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1404            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1405        if (adapter->hw.mac_type == em_82547 &&
1406            adapter->link_duplex == HALF_DUPLEX) {
1407                em_82547_move_tail_locked(adapter);
1408        } else {
1409                E1000_WRITE_REG(&adapter->hw, TDT, i);
1410                if (adapter->hw.mac_type == em_82547) {
1411                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1412                }
1413        }
1414
1415        return(0);
1416
1417encap_fail:
1418	bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1419	return (error);
1420}
1421
1422/*********************************************************************
1423 *
1424 * 82547 workaround to avoid controller hang in half-duplex environment.
1425 * The workaround is to avoid queuing a large packet that would span
1426 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1427 * in this case. We do that only when FIFO is quiescent.
1428 *
1429 **********************************************************************/
1430static void
1431em_82547_move_tail_locked(struct adapter *adapter)
1432{
1433	uint16_t hw_tdt;
1434	uint16_t sw_tdt;
1435	struct em_tx_desc *tx_desc;
1436	uint16_t length = 0;
1437	boolean_t eop = 0;
1438
1439	EM_LOCK_ASSERT(adapter);
1440
1441	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1442	sw_tdt = adapter->next_avail_tx_desc;
1443
1444	while (hw_tdt != sw_tdt) {
1445		tx_desc = &adapter->tx_desc_base[hw_tdt];
1446		length += tx_desc->lower.flags.length;
1447		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1448		if(++hw_tdt == adapter->num_tx_desc)
1449			hw_tdt = 0;
1450
1451		if(eop) {
1452			if (em_82547_fifo_workaround(adapter, length)) {
1453				adapter->tx_fifo_wrk_cnt++;
1454				callout_reset(&adapter->tx_fifo_timer, 1,
1455					em_82547_move_tail, adapter);
1456				break;
1457			}
1458			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1459			em_82547_update_fifo_head(adapter, length);
1460			length = 0;
1461		}
1462	}
1463	return;
1464}
1465
1466static void
1467em_82547_move_tail(void *arg)
1468{
1469        struct adapter *adapter = arg;
1470
1471        EM_LOCK(adapter);
1472        em_82547_move_tail_locked(adapter);
1473        EM_UNLOCK(adapter);
1474}
1475
1476static int
1477em_82547_fifo_workaround(struct adapter *adapter, int len)
1478{
1479	int fifo_space, fifo_pkt_len;
1480
1481	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1482
1483	if (adapter->link_duplex == HALF_DUPLEX) {
1484		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1485
1486		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1487			if (em_82547_tx_fifo_reset(adapter)) {
1488				return(0);
1489			}
1490			else {
1491				return(1);
1492			}
1493		}
1494	}
1495
1496	return(0);
1497}
1498
1499static void
1500em_82547_update_fifo_head(struct adapter *adapter, int len)
1501{
1502	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1503
1504	/* tx_fifo_head is always 16 byte aligned */
1505	adapter->tx_fifo_head += fifo_pkt_len;
1506	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1507		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1508	}
1509
1510	return;
1511}
1512
1513
1514static int
1515em_82547_tx_fifo_reset(struct adapter *adapter)
1516{
1517	uint32_t tctl;
1518
1519	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1520	      E1000_READ_REG(&adapter->hw, TDH)) &&
1521	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1522	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1523	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1524	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1525	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1526
1527		/* Disable TX unit */
1528		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1529		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1530
1531		/* Reset FIFO pointers */
1532		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1533		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1534		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1535		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1536
1537		/* Re-enable TX unit */
1538		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1539		E1000_WRITE_FLUSH(&adapter->hw);
1540
1541		adapter->tx_fifo_head = 0;
1542		adapter->tx_fifo_reset_cnt++;
1543
1544		return(TRUE);
1545	}
1546	else {
1547		return(FALSE);
1548	}
1549}
1550
1551static void
1552em_set_promisc(struct adapter * adapter)
1553{
1554
1555	u_int32_t       reg_rctl;
1556	struct ifnet   *ifp = adapter->ifp;
1557
1558	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1559
1560	if (ifp->if_flags & IFF_PROMISC) {
1561		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1562		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1563		/* Disable VLAN stripping in promiscous mode
1564		 * This enables bridging of vlan tagged frames to occur
1565		 * and also allows vlan tags to be seen in tcpdump
1566		 */
1567		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1568			em_disable_vlans(adapter);
1569		adapter->em_insert_vlan_header = 1;
1570	} else if (ifp->if_flags & IFF_ALLMULTI) {
1571		reg_rctl |= E1000_RCTL_MPE;
1572		reg_rctl &= ~E1000_RCTL_UPE;
1573		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1574		adapter->em_insert_vlan_header = 0;
1575	} else
1576		adapter->em_insert_vlan_header = 0;
1577
1578	return;
1579}
1580
1581static void
1582em_disable_promisc(struct adapter * adapter)
1583{
1584	u_int32_t       reg_rctl;
1585	struct ifnet   *ifp = adapter->ifp;
1586
1587	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1588
1589	reg_rctl &=  (~E1000_RCTL_UPE);
1590	reg_rctl &=  (~E1000_RCTL_MPE);
1591	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1592
1593	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1594		em_enable_vlans(adapter);
1595	adapter->em_insert_vlan_header = 0;
1596
1597	return;
1598}
1599
1600
1601/*********************************************************************
1602 *  Multicast Update
1603 *
1604 *  This routine is called whenever multicast address list is updated.
1605 *
1606 **********************************************************************/
1607
1608static void
1609em_set_multi(struct adapter * adapter)
1610{
1611        u_int32_t reg_rctl = 0;
1612        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1613        struct ifmultiaddr  *ifma;
1614        int mcnt = 0;
1615        struct ifnet   *ifp = adapter->ifp;
1616
1617        IOCTL_DEBUGOUT("em_set_multi: begin");
1618
1619        if (adapter->hw.mac_type == em_82542_rev2_0) {
1620                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1621                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1622                        em_pci_clear_mwi(&adapter->hw);
1623                }
1624                reg_rctl |= E1000_RCTL_RST;
1625                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1626                msec_delay(5);
1627        }
1628
1629	IF_ADDR_LOCK(ifp);
1630        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1631                if (ifma->ifma_addr->sa_family != AF_LINK)
1632                        continue;
1633
1634		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1635
1636                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1637                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1638                mcnt++;
1639        }
1640	IF_ADDR_UNLOCK(ifp);
1641
1642        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1643                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1644                reg_rctl |= E1000_RCTL_MPE;
1645                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1646        } else
1647                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1648
1649        if (adapter->hw.mac_type == em_82542_rev2_0) {
1650                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1651                reg_rctl &= ~E1000_RCTL_RST;
1652                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1653                msec_delay(5);
1654                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1655                        em_pci_set_mwi(&adapter->hw);
1656                }
1657        }
1658
1659        return;
1660}
1661
1662
1663/*********************************************************************
1664 *  Timer routine
1665 *
1666 *  This routine checks for link status and updates statistics.
1667 *
1668 **********************************************************************/
1669
1670static void
1671em_local_timer(void *arg)
1672{
1673	struct ifnet   *ifp;
1674	struct adapter * adapter = arg;
1675	ifp = adapter->ifp;
1676
1677	EM_LOCK(adapter);
1678
1679	em_check_for_link(&adapter->hw);
1680	em_print_link_status(adapter);
1681	em_update_stats_counters(adapter);
1682	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1683		em_print_hw_stats(adapter);
1684	}
1685	em_smartspeed(adapter);
1686
1687	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1688
1689	EM_UNLOCK(adapter);
1690	return;
1691}
1692
1693static void
1694em_print_link_status(struct adapter * adapter)
1695{
1696	struct ifnet *ifp = adapter->ifp;
1697
1698	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1699		if (adapter->link_active == 0) {
1700			em_get_speed_and_duplex(&adapter->hw,
1701						&adapter->link_speed,
1702						&adapter->link_duplex);
1703			if (bootverbose)
1704				printf("em%d: Link is up %d Mbps %s\n",
1705				       adapter->unit,
1706				       adapter->link_speed,
1707				       ((adapter->link_duplex == FULL_DUPLEX) ?
1708					"Full Duplex" : "Half Duplex"));
1709			adapter->link_active = 1;
1710			adapter->smartspeed = 0;
1711			if_link_state_change(ifp, LINK_STATE_UP);
1712		}
1713	} else {
1714		if (adapter->link_active == 1) {
1715			adapter->link_speed = 0;
1716			adapter->link_duplex = 0;
1717			if (bootverbose)
1718				printf("em%d: Link is Down\n", adapter->unit);
1719			adapter->link_active = 0;
1720			if_link_state_change(ifp, LINK_STATE_DOWN);
1721		}
1722	}
1723
1724	return;
1725}
1726
1727/*********************************************************************
1728 *
1729 *  This routine disables all traffic on the adapter by issuing a
1730 *  global reset on the MAC and deallocates TX/RX buffers.
1731 *
1732 **********************************************************************/
1733
1734static void
1735em_stop(void *arg)
1736{
1737	struct ifnet   *ifp;
1738	struct adapter * adapter = arg;
1739	ifp = adapter->ifp;
1740
1741	mtx_assert(&adapter->mtx, MA_OWNED);
1742
1743	INIT_DEBUGOUT("em_stop: begin");
1744
1745	em_disable_intr(adapter);
1746	em_reset_hw(&adapter->hw);
1747	callout_stop(&adapter->timer);
1748	callout_stop(&adapter->tx_fifo_timer);
1749	em_free_transmit_structures(adapter);
1750	em_free_receive_structures(adapter);
1751
1752
1753	/* Tell the stack that the interface is no longer active */
1754	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1755
1756	return;
1757}
1758
1759
1760/*********************************************************************
1761 *
1762 *  Determine hardware revision.
1763 *
1764 **********************************************************************/
1765static void
1766em_identify_hardware(struct adapter * adapter)
1767{
1768	device_t dev = adapter->dev;
1769
1770	/* Make sure our PCI config space has the necessary stuff set */
1771	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1772	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1773	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1774		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1775		       adapter->unit);
1776		adapter->hw.pci_cmd_word |=
1777		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1778		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1779	}
1780
1781	/* Save off the information about this board */
1782	adapter->hw.vendor_id = pci_get_vendor(dev);
1783	adapter->hw.device_id = pci_get_device(dev);
1784	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1785	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1786	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1787
1788	/* Identify the MAC */
1789        if (em_set_mac_type(&adapter->hw))
1790                printf("em%d: Unknown MAC Type\n", adapter->unit);
1791
1792	if(adapter->hw.mac_type == em_82541 ||
1793	   adapter->hw.mac_type == em_82541_rev_2 ||
1794	   adapter->hw.mac_type == em_82547 ||
1795	   adapter->hw.mac_type == em_82547_rev_2)
1796		adapter->hw.phy_init_script = TRUE;
1797
1798        return;
1799}
1800
1801static int
1802em_allocate_pci_resources(struct adapter * adapter)
1803{
1804	int             val, rid;
1805	device_t        dev = adapter->dev;
1806
1807	rid = PCIR_BAR(0);
1808	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1809						     &rid, RF_ACTIVE);
1810	if (!(adapter->res_memory)) {
1811		printf("em%d: Unable to allocate bus resource: memory\n",
1812		       adapter->unit);
1813		return(ENXIO);
1814	}
1815	adapter->osdep.mem_bus_space_tag =
1816	rman_get_bustag(adapter->res_memory);
1817	adapter->osdep.mem_bus_space_handle =
1818	rman_get_bushandle(adapter->res_memory);
1819	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1820
1821
1822	if (adapter->hw.mac_type > em_82543) {
1823		/* Figure our where our IO BAR is ? */
1824		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1825			val = pci_read_config(dev, rid, 4);
1826			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1827				adapter->io_rid = rid;
1828				break;
1829			}
1830			rid += 4;
1831			/* check for 64bit BAR */
1832			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1833				rid += 4;
1834		}
1835		if (rid >= PCIR_CIS) {
1836			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1837			return (ENXIO);
1838		}
1839		adapter->res_ioport = bus_alloc_resource_any(dev,
1840							     SYS_RES_IOPORT,
1841							     &adapter->io_rid,
1842							     RF_ACTIVE);
1843		if (!(adapter->res_ioport)) {
1844			printf("em%d: Unable to allocate bus resource: ioport\n",
1845			       adapter->unit);
1846			return(ENXIO);
1847		}
1848		adapter->hw.io_base = 0;
1849		adapter->osdep.io_bus_space_tag =
1850		    rman_get_bustag(adapter->res_ioport);
1851		adapter->osdep.io_bus_space_handle =
1852		    rman_get_bushandle(adapter->res_ioport);
1853	}
1854
1855	rid = 0x0;
1856	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1857						        RF_SHAREABLE |
1858							RF_ACTIVE);
1859	if (!(adapter->res_interrupt)) {
1860		printf("em%d: Unable to allocate bus resource: interrupt\n",
1861		       adapter->unit);
1862		return(ENXIO);
1863	}
1864	if (bus_setup_intr(dev, adapter->res_interrupt,
1865			   INTR_TYPE_NET | INTR_MPSAFE,
1866			   (void (*)(void *)) em_intr, adapter,
1867			   &adapter->int_handler_tag)) {
1868		printf("em%d: Error registering interrupt handler!\n",
1869		       adapter->unit);
1870		return(ENXIO);
1871	}
1872
1873	adapter->hw.back = &adapter->osdep;
1874
1875	return(0);
1876}
1877
1878static void
1879em_free_pci_resources(struct adapter * adapter)
1880{
1881	device_t dev = adapter->dev;
1882
1883	if (adapter->res_interrupt != NULL) {
1884		bus_teardown_intr(dev, adapter->res_interrupt,
1885				  adapter->int_handler_tag);
1886		bus_release_resource(dev, SYS_RES_IRQ, 0,
1887				     adapter->res_interrupt);
1888	}
1889	if (adapter->res_memory != NULL) {
1890		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1891				     adapter->res_memory);
1892	}
1893
1894	if (adapter->res_ioport != NULL) {
1895		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1896				     adapter->res_ioport);
1897	}
1898	return;
1899}
1900
1901/*********************************************************************
1902 *
1903 *  Initialize the hardware to a configuration as specified by the
1904 *  adapter structure. The controller is reset, the EEPROM is
1905 *  verified, the MAC address is set, then the shared initialization
1906 *  routines are called.
1907 *
1908 **********************************************************************/
1909static int
1910em_hardware_init(struct adapter * adapter)
1911{
1912	uint16_t rx_buffer_size;
1913
1914        INIT_DEBUGOUT("em_hardware_init: begin");
1915	/* Issue a global reset */
1916	em_reset_hw(&adapter->hw);
1917
1918	/* When hardware is reset, fifo_head is also reset */
1919	adapter->tx_fifo_head = 0;
1920
1921	/* Make sure we have a good EEPROM before we read from it */
1922	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1923		printf("em%d: The EEPROM Checksum Is Not Valid\n",
1924		       adapter->unit);
1925		return(EIO);
1926	}
1927
1928	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1929		printf("em%d: EEPROM read error while reading part number\n",
1930		       adapter->unit);
1931		return(EIO);
1932	}
1933
1934	/*
1935	 * These parameters control the automatic generation (Tx) and
1936	 * response (Rx) to Ethernet PAUSE frames.
1937	 * - High water mark should allow for at least two frames to be
1938	 *   received after sending an XOFF.
1939	 * - Low water mark works best when it is very near the high water mark.
1940	 *   This allows the receiver to restart by sending XON when it has drained
1941	 *   a bit.  Here we use an arbitary value of 1500 which will restart after
1942	 *   one full frame is pulled from the buffer.  There could be several smaller
1943	 *   frames in the buffer and if so they will not trigger the XON until their
1944	 *   total number reduces the buffer by 1500.
1945	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1946	 */
1947	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
1948
1949	adapter->hw.fc_high_water = rx_buffer_size -
1950	    roundup2(adapter->hw.max_frame_size, 1024);
1951	adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
1952	adapter->hw.fc_pause_time = 0x1000;
1953	adapter->hw.fc_send_xon = TRUE;
1954	adapter->hw.fc = em_fc_full;
1955
1956	if (em_init_hw(&adapter->hw) < 0) {
1957		printf("em%d: Hardware Initialization Failed",
1958		       adapter->unit);
1959		return(EIO);
1960	}
1961
1962	em_check_for_link(&adapter->hw);
1963	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1964		adapter->link_active = 1;
1965	else
1966		adapter->link_active = 0;
1967
1968	if (adapter->link_active) {
1969		em_get_speed_and_duplex(&adapter->hw,
1970					&adapter->link_speed,
1971					&adapter->link_duplex);
1972	} else {
1973		adapter->link_speed = 0;
1974		adapter->link_duplex = 0;
1975	}
1976
1977	return(0);
1978}
1979
1980/*********************************************************************
1981 *
1982 *  Setup networking device structure and register an interface.
1983 *
1984 **********************************************************************/
1985static void
1986em_setup_interface(device_t dev, struct adapter * adapter)
1987{
1988	struct ifnet   *ifp;
1989	INIT_DEBUGOUT("em_setup_interface: begin");
1990
1991	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1992	if (ifp == NULL)
1993		panic("%s: can not if_alloc()", device_get_nameunit(dev));
1994	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1995	ifp->if_mtu = ETHERMTU;
1996	ifp->if_baudrate = 1000000000;
1997	ifp->if_init =  em_init;
1998	ifp->if_softc = adapter;
1999	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2000	ifp->if_ioctl = em_ioctl;
2001	ifp->if_start = em_start;
2002	ifp->if_watchdog = em_watchdog;
2003	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2004	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2005	IFQ_SET_READY(&ifp->if_snd);
2006
2007        ether_ifattach(ifp, adapter->hw.mac_addr);
2008
2009	ifp->if_capabilities = ifp->if_capenable = 0;
2010
2011	if (adapter->hw.mac_type >= em_82543) {
2012		ifp->if_capabilities |= IFCAP_HWCSUM;
2013		ifp->if_capenable |= IFCAP_HWCSUM;
2014	}
2015
2016	/*
2017	 * Tell the upper layer(s) we support long frames.
2018	 */
2019	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2020	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2021	ifp->if_capenable |= IFCAP_VLAN_MTU;
2022
2023#ifdef DEVICE_POLLING
2024	ifp->if_capabilities |= IFCAP_POLLING;
2025#endif
2026
2027	/*
2028	 * Specify the media types supported by this adapter and register
2029	 * callbacks to update media and link information
2030	 */
2031	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2032		     em_media_status);
2033	if (adapter->hw.media_type == em_media_type_fiber) {
2034		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2035			    0, NULL);
2036		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
2037			    0, NULL);
2038	} else {
2039		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2040		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2041			    0, NULL);
2042		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2043			    0, NULL);
2044		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2045			    0, NULL);
2046		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2047			    0, NULL);
2048		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2049	}
2050	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2051	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2052
2053	return;
2054}
2055
2056
2057/*********************************************************************
2058 *
2059 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2060 *
2061 **********************************************************************/
2062static void
2063em_smartspeed(struct adapter *adapter)
2064{
2065        uint16_t phy_tmp;
2066
2067	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2068	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2069		return;
2070
2071        if(adapter->smartspeed == 0) {
2072                /* If Master/Slave config fault is asserted twice,
2073                 * we assume back-to-back */
2074                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2075                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2076                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2077                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2078                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2079					&phy_tmp);
2080                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2081                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2082                                em_write_phy_reg(&adapter->hw,
2083                                                    PHY_1000T_CTRL, phy_tmp);
2084                                adapter->smartspeed++;
2085                                if(adapter->hw.autoneg &&
2086                                   !em_phy_setup_autoneg(&adapter->hw) &&
2087				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2088                                                       &phy_tmp)) {
2089                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2090                                                    MII_CR_RESTART_AUTO_NEG);
2091                                        em_write_phy_reg(&adapter->hw,
2092							 PHY_CTRL, phy_tmp);
2093                                }
2094                        }
2095                }
2096                return;
2097        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2098                /* If still no link, perhaps using 2/3 pair cable */
2099                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2100                phy_tmp |= CR_1000T_MS_ENABLE;
2101                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2102                if(adapter->hw.autoneg &&
2103                   !em_phy_setup_autoneg(&adapter->hw) &&
2104                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2105                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2106                                    MII_CR_RESTART_AUTO_NEG);
2107                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2108                }
2109        }
2110        /* Restart process after EM_SMARTSPEED_MAX iterations */
2111        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2112                adapter->smartspeed = 0;
2113
2114	return;
2115}
2116
2117
2118/*
2119 * Manage DMA'able memory.
2120 */
2121static void
2122em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2123{
2124        if (error)
2125                return;
2126        *(bus_addr_t *) arg = segs[0].ds_addr;
2127}
2128
2129static int
2130em_dma_malloc(struct adapter *adapter, bus_size_t size,
2131        struct em_dma_alloc *dma, int mapflags)
2132{
2133        int r;
2134
2135	r = bus_dma_tag_create(NULL,                    /* parent */
2136                               E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2137                               BUS_SPACE_MAXADDR,       /* lowaddr */
2138                               BUS_SPACE_MAXADDR,       /* highaddr */
2139                               NULL, NULL,              /* filter, filterarg */
2140                               size,                    /* maxsize */
2141                               1,                       /* nsegments */
2142                               size,                    /* maxsegsize */
2143                               BUS_DMA_ALLOCNOW,        /* flags */
2144			       NULL,			/* lockfunc */
2145			       NULL,			/* lockarg */
2146                               &dma->dma_tag);
2147        if (r != 0) {
2148                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2149                        "error %u\n", adapter->unit, r);
2150                goto fail_0;
2151        }
2152
2153        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2154                             BUS_DMA_NOWAIT, &dma->dma_map);
2155        if (r != 0) {
2156                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2157                        "size %ju, error %d\n", adapter->unit,
2158			(uintmax_t)size, r);
2159                goto fail_2;
2160        }
2161
2162	dma->dma_paddr = 0;
2163        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2164                            size,
2165                            em_dmamap_cb,
2166                            &dma->dma_paddr,
2167                            mapflags | BUS_DMA_NOWAIT);
2168        if (r != 0 || dma->dma_paddr == 0) {
2169                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2170                        "error %u\n", adapter->unit, r);
2171                goto fail_3;
2172        }
2173
2174        return (0);
2175
2176fail_3:
2177        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2178fail_2:
2179        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2180        bus_dma_tag_destroy(dma->dma_tag);
2181fail_0:
2182        dma->dma_map = NULL;
2183        dma->dma_tag = NULL;
2184        return (r);
2185}
2186
2187static void
2188em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2189{
2190	if (dma->dma_tag == NULL)
2191		return;
2192	if (dma->dma_map != NULL) {
2193		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2194		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2195		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2196		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2197		dma->dma_map = NULL;
2198	}
2199        bus_dma_tag_destroy(dma->dma_tag);
2200	dma->dma_tag = NULL;
2201}
2202
2203
2204/*********************************************************************
2205 *
2206 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2207 *  the information needed to transmit a packet on the wire.
2208 *
2209 **********************************************************************/
2210static int
2211em_allocate_transmit_structures(struct adapter * adapter)
2212{
2213	if (!(adapter->tx_buffer_area =
2214	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2215					     adapter->num_tx_desc, M_DEVBUF,
2216					     M_NOWAIT))) {
2217		printf("em%d: Unable to allocate tx_buffer memory\n",
2218		       adapter->unit);
2219		return ENOMEM;
2220	}
2221
2222	bzero(adapter->tx_buffer_area,
2223	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2224
2225	return 0;
2226}
2227
2228/*********************************************************************
2229 *
2230 *  Allocate and initialize transmit structures.
2231 *
2232 **********************************************************************/
2233static int
2234em_setup_transmit_structures(struct adapter * adapter)
2235{
2236	struct em_buffer *tx_buffer;
2237	bus_size_t size;
2238	int error, i;
2239
2240        /*
2241         * Setup DMA descriptor areas.
2242         */
2243	size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2244	if ((error = bus_dma_tag_create(NULL,           /* parent */
2245                               1, 0,                    /* alignment, bounds */
2246                               BUS_SPACE_MAXADDR,       /* lowaddr */
2247                               BUS_SPACE_MAXADDR,       /* highaddr */
2248                               NULL, NULL,              /* filter, filterarg */
2249                               size,                    /* maxsize */
2250                               EM_MAX_SCATTER,          /* nsegments */
2251                               size,                    /* maxsegsize */
2252                               0,                       /* flags */
2253			       NULL,			/* lockfunc */
2254			       NULL,			/* lockarg */
2255                               &adapter->txtag)) != 0) {
2256		printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2257		goto fail;
2258        }
2259
2260        if ((error = em_allocate_transmit_structures(adapter)) != 0)
2261		goto fail;
2262
2263        bzero((void *) adapter->tx_desc_base,
2264              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2265	tx_buffer = adapter->tx_buffer_area;
2266	for (i = 0; i < adapter->num_tx_desc; i++) {
2267		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2268		if (error != 0) {
2269			printf("em%d: Unable to create TX DMA map\n",
2270			    adapter->unit);
2271			goto fail;
2272		}
2273		tx_buffer++;
2274	}
2275
2276        adapter->next_avail_tx_desc = 0;
2277        adapter->oldest_used_tx_desc = 0;
2278
2279        /* Set number of descriptors available */
2280        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2281
2282        /* Set checksum context */
2283        adapter->active_checksum_context = OFFLOAD_NONE;
2284	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2285	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2286
2287        return (0);
2288
2289fail:
2290	em_free_transmit_structures(adapter);
2291	return (error);
2292}
2293
2294/*********************************************************************
2295 *
2296 *  Enable transmit unit.
2297 *
2298 **********************************************************************/
2299static void
2300em_initialize_transmit_unit(struct adapter * adapter)
2301{
2302	u_int32_t       reg_tctl;
2303	u_int32_t       reg_tipg = 0;
2304	u_int64_t	bus_addr;
2305
2306         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2307	/* Setup the Base and Length of the Tx Descriptor Ring */
2308	bus_addr = adapter->txdma.dma_paddr;
2309	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2310	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2311	E1000_WRITE_REG(&adapter->hw, TDLEN,
2312			adapter->num_tx_desc *
2313			sizeof(struct em_tx_desc));
2314
2315	/* Setup the HW Tx Head and Tail descriptor pointers */
2316	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2317	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2318
2319
2320	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2321		     E1000_READ_REG(&adapter->hw, TDBAL),
2322		     E1000_READ_REG(&adapter->hw, TDLEN));
2323
2324	/* Set the default values for the Tx Inter Packet Gap timer */
2325	switch (adapter->hw.mac_type) {
2326	case em_82542_rev2_0:
2327        case em_82542_rev2_1:
2328                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2329                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2330                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2331                break;
2332        default:
2333                if (adapter->hw.media_type == em_media_type_fiber)
2334                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2335                else
2336                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2337                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2338                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2339        }
2340
2341	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2342	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2343	if(adapter->hw.mac_type >= em_82540)
2344		E1000_WRITE_REG(&adapter->hw, TADV,
2345		    adapter->tx_abs_int_delay.value);
2346
2347	/* Program the Transmit Control Register */
2348	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2349		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2350	if (adapter->hw.mac_type >= em_82571)
2351		reg_tctl |= E1000_TCTL_MULR;
2352	if (adapter->link_duplex == 1) {
2353		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2354	} else {
2355		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2356	}
2357	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2358
2359	/* Setup Transmit Descriptor Settings for this adapter */
2360	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2361
2362	if (adapter->tx_int_delay.value > 0)
2363		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2364
2365	return;
2366}
2367
2368/*********************************************************************
2369 *
2370 *  Free all transmit related data structures.
2371 *
2372 **********************************************************************/
2373static void
2374em_free_transmit_structures(struct adapter * adapter)
2375{
2376        struct em_buffer   *tx_buffer;
2377        int             i;
2378
2379        INIT_DEBUGOUT("free_transmit_structures: begin");
2380
2381        if (adapter->tx_buffer_area != NULL) {
2382                tx_buffer = adapter->tx_buffer_area;
2383                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2384                        if (tx_buffer->m_head != NULL) {
2385				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2386				    BUS_DMASYNC_POSTWRITE);
2387				bus_dmamap_unload(adapter->txtag,
2388				    tx_buffer->map);
2389                                m_freem(tx_buffer->m_head);
2390				tx_buffer->m_head = NULL;
2391                        } else if (tx_buffer->map != NULL)
2392				bus_dmamap_unload(adapter->txtag,
2393				    tx_buffer->map);
2394			if (tx_buffer->map != NULL) {
2395				bus_dmamap_destroy(adapter->txtag,
2396				    tx_buffer->map);
2397				tx_buffer->map = NULL;
2398			}
2399                }
2400        }
2401        if (adapter->tx_buffer_area != NULL) {
2402                free(adapter->tx_buffer_area, M_DEVBUF);
2403                adapter->tx_buffer_area = NULL;
2404        }
2405        if (adapter->txtag != NULL) {
2406                bus_dma_tag_destroy(adapter->txtag);
2407                adapter->txtag = NULL;
2408        }
2409        return;
2410}
2411
2412/*********************************************************************
2413 *
2414 *  The offload context needs to be set when we transfer the first
2415 *  packet of a particular protocol (TCP/UDP). We change the
2416 *  context only if the protocol type changes.
2417 *
2418 **********************************************************************/
2419static void
2420em_transmit_checksum_setup(struct adapter * adapter,
2421			   struct mbuf *mp,
2422			   u_int32_t *txd_upper,
2423			   u_int32_t *txd_lower)
2424{
2425	struct em_context_desc *TXD;
2426	struct em_buffer *tx_buffer;
2427	int curr_txd;
2428
2429	if (mp->m_pkthdr.csum_flags) {
2430
2431		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2432			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2433			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2434			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2435				return;
2436			else
2437				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2438
2439		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2440			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2441			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2442			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2443				return;
2444			else
2445				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2446		} else {
2447			*txd_upper = 0;
2448			*txd_lower = 0;
2449			return;
2450		}
2451	} else {
2452		*txd_upper = 0;
2453		*txd_lower = 0;
2454		return;
2455	}
2456
2457	/* If we reach this point, the checksum offload context
2458	 * needs to be reset.
2459	 */
2460	curr_txd = adapter->next_avail_tx_desc;
2461	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2462	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2463
2464	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2465	TXD->lower_setup.ip_fields.ipcso =
2466		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2467	TXD->lower_setup.ip_fields.ipcse =
2468		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2469
2470	TXD->upper_setup.tcp_fields.tucss =
2471		ETHER_HDR_LEN + sizeof(struct ip);
2472	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2473
2474	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2475		TXD->upper_setup.tcp_fields.tucso =
2476			ETHER_HDR_LEN + sizeof(struct ip) +
2477			offsetof(struct tcphdr, th_sum);
2478	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2479		TXD->upper_setup.tcp_fields.tucso =
2480			ETHER_HDR_LEN + sizeof(struct ip) +
2481			offsetof(struct udphdr, uh_sum);
2482	}
2483
2484	TXD->tcp_seg_setup.data = htole32(0);
2485	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2486
2487	tx_buffer->m_head = NULL;
2488
2489	if (++curr_txd == adapter->num_tx_desc)
2490		curr_txd = 0;
2491
2492	adapter->num_tx_desc_avail--;
2493	adapter->next_avail_tx_desc = curr_txd;
2494
2495	return;
2496}
2497
2498/**********************************************************************
2499 *
2500 *  Examine each tx_buffer in the used queue. If the hardware is done
2501 *  processing the packet then free associated resources. The
2502 *  tx_buffer is put back on the free queue.
2503 *
2504 **********************************************************************/
2505static void
2506em_clean_transmit_interrupts(struct adapter * adapter)
2507{
2508        int i, num_avail;
2509        struct em_buffer *tx_buffer;
2510        struct em_tx_desc   *tx_desc;
2511	struct ifnet   *ifp = adapter->ifp;
2512
2513	mtx_assert(&adapter->mtx, MA_OWNED);
2514
2515        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2516                return;
2517
2518        num_avail = adapter->num_tx_desc_avail;
2519        i = adapter->oldest_used_tx_desc;
2520
2521        tx_buffer = &adapter->tx_buffer_area[i];
2522        tx_desc = &adapter->tx_desc_base[i];
2523
2524        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2525            BUS_DMASYNC_POSTREAD);
2526        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2527
2528                tx_desc->upper.data = 0;
2529                num_avail++;
2530
2531                if (tx_buffer->m_head) {
2532			ifp->if_opackets++;
2533			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2534			    BUS_DMASYNC_POSTWRITE);
2535			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2536
2537                        m_freem(tx_buffer->m_head);
2538                        tx_buffer->m_head = NULL;
2539                }
2540
2541                if (++i == adapter->num_tx_desc)
2542                        i = 0;
2543
2544                tx_buffer = &adapter->tx_buffer_area[i];
2545                tx_desc = &adapter->tx_desc_base[i];
2546        }
2547        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2548            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2549
2550        adapter->oldest_used_tx_desc = i;
2551
2552        /*
2553         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2554         * that it is OK to send packets.
2555         * If there are no pending descriptors, clear the timeout. Otherwise,
2556         * if some descriptors have been freed, restart the timeout.
2557         */
2558        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2559                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2560                if (num_avail == adapter->num_tx_desc)
2561                        ifp->if_timer = 0;
2562                else if (num_avail == adapter->num_tx_desc_avail)
2563                        ifp->if_timer = EM_TX_TIMEOUT;
2564        }
2565        adapter->num_tx_desc_avail = num_avail;
2566        return;
2567}
2568
2569/*********************************************************************
2570 *
2571 *  Get a buffer from system mbuf buffer pool.
2572 *
2573 **********************************************************************/
2574static int
2575em_get_buf(int i, struct adapter *adapter,
2576           struct mbuf *nmp)
2577{
2578        struct mbuf    *mp = nmp;
2579        struct em_buffer *rx_buffer;
2580        struct ifnet   *ifp;
2581	bus_dma_segment_t segs[1];
2582	int error, nsegs;
2583
2584        ifp = adapter->ifp;
2585
2586        if (mp == NULL) {
2587                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2588                if (mp == NULL) {
2589                        adapter->mbuf_cluster_failed++;
2590                        return(ENOBUFS);
2591                }
2592                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2593        } else {
2594                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2595                mp->m_data = mp->m_ext.ext_buf;
2596                mp->m_next = NULL;
2597        }
2598
2599        if (ifp->if_mtu <= ETHERMTU) {
2600                m_adj(mp, ETHER_ALIGN);
2601        }
2602
2603        rx_buffer = &adapter->rx_buffer_area[i];
2604
2605        /*
2606         * Using memory from the mbuf cluster pool, invoke the
2607         * bus_dma machinery to arrange the memory mapping.
2608         */
2609        error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2610	    mp, segs, &nsegs, 0);
2611        if (error != 0) {
2612                m_free(mp);
2613                return(error);
2614        }
2615	/* If nsegs is wrong then the stack is corrupt */
2616	KASSERT(nsegs == 1, ("Too many segments returned!"));
2617        rx_buffer->m_head = mp;
2618        adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2619        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2620
2621        return(0);
2622}
2623
2624/*********************************************************************
2625 *
2626 *  Allocate memory for rx_buffer structures. Since we use one
2627 *  rx_buffer per received packet, the maximum number of rx_buffer's
2628 *  that we'll need is equal to the number of receive descriptors
2629 *  that we've allocated.
2630 *
2631 **********************************************************************/
2632static int
2633em_allocate_receive_structures(struct adapter * adapter)
2634{
2635        int             i, error;
2636        struct em_buffer *rx_buffer;
2637
2638        if (!(adapter->rx_buffer_area =
2639              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2640                                          adapter->num_rx_desc, M_DEVBUF,
2641                                          M_NOWAIT))) {
2642                printf("em%d: Unable to allocate rx_buffer memory\n",
2643                       adapter->unit);
2644                return(ENOMEM);
2645        }
2646
2647        bzero(adapter->rx_buffer_area,
2648              sizeof(struct em_buffer) * adapter->num_rx_desc);
2649
2650        error = bus_dma_tag_create(NULL,                /* parent */
2651                               1, 0,                    /* alignment, bounds */
2652                               BUS_SPACE_MAXADDR,       /* lowaddr */
2653                               BUS_SPACE_MAXADDR,       /* highaddr */
2654                               NULL, NULL,              /* filter, filterarg */
2655                               MCLBYTES,                /* maxsize */
2656                               1,                       /* nsegments */
2657                               MCLBYTES,                /* maxsegsize */
2658                               BUS_DMA_ALLOCNOW,        /* flags */
2659			       NULL,			/* lockfunc */
2660			       NULL,			/* lockarg */
2661                               &adapter->rxtag);
2662        if (error != 0) {
2663                printf("em%d: em_allocate_receive_structures: "
2664                        "bus_dma_tag_create failed; error %u\n",
2665                       adapter->unit, error);
2666                goto fail;
2667        }
2668
2669        rx_buffer = adapter->rx_buffer_area;
2670        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2671                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2672                                          &rx_buffer->map);
2673                if (error != 0) {
2674                        printf("em%d: em_allocate_receive_structures: "
2675                                "bus_dmamap_create failed; error %u\n",
2676                                adapter->unit, error);
2677                        goto fail;
2678                }
2679        }
2680
2681        for (i = 0; i < adapter->num_rx_desc; i++) {
2682                error = em_get_buf(i, adapter, NULL);
2683		if (error != 0)
2684			goto fail;
2685        }
2686        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2687            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2688
2689        return(0);
2690
2691fail:
2692	em_free_receive_structures(adapter);
2693        return (error);
2694}
2695
2696/*********************************************************************
2697 *
2698 *  Allocate and initialize receive structures.
2699 *
2700 **********************************************************************/
2701static int
2702em_setup_receive_structures(struct adapter * adapter)
2703{
2704	bzero((void *) adapter->rx_desc_base,
2705              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2706
2707	if (em_allocate_receive_structures(adapter))
2708		return ENOMEM;
2709
2710	/* Setup our descriptor pointers */
2711        adapter->next_rx_desc_to_check = 0;
2712	return(0);
2713}
2714
2715/*********************************************************************
2716 *
2717 *  Enable receive unit.
2718 *
2719 **********************************************************************/
2720static void
2721em_initialize_receive_unit(struct adapter * adapter)
2722{
2723	u_int32_t       reg_rctl;
2724	u_int32_t       reg_rxcsum;
2725	struct ifnet    *ifp;
2726	u_int64_t	bus_addr;
2727
2728        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2729	ifp = adapter->ifp;
2730
2731	/* Make sure receives are disabled while setting up the descriptor ring */
2732	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2733
2734	/* Set the Receive Delay Timer Register */
2735	E1000_WRITE_REG(&adapter->hw, RDTR,
2736			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2737
2738	if(adapter->hw.mac_type >= em_82540) {
2739		E1000_WRITE_REG(&adapter->hw, RADV,
2740		    adapter->rx_abs_int_delay.value);
2741
2742                /* Set the interrupt throttling rate.  Value is calculated
2743                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2744#define MAX_INTS_PER_SEC        8000
2745#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2746                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2747        }
2748
2749	/* Setup the Base and Length of the Rx Descriptor Ring */
2750	bus_addr = adapter->rxdma.dma_paddr;
2751	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2752	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2753	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2754			sizeof(struct em_rx_desc));
2755
2756	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2757	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2758	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2759
2760	/* Setup the Receive Control Register */
2761	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2762		   E1000_RCTL_RDMTS_HALF |
2763		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2764
2765	if (adapter->hw.tbi_compatibility_on == TRUE)
2766		reg_rctl |= E1000_RCTL_SBP;
2767
2768
2769	switch (adapter->rx_buffer_len) {
2770	default:
2771	case EM_RXBUFFER_2048:
2772		reg_rctl |= E1000_RCTL_SZ_2048;
2773		break;
2774	case EM_RXBUFFER_4096:
2775		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2776		break;
2777	case EM_RXBUFFER_8192:
2778		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2779		break;
2780	case EM_RXBUFFER_16384:
2781		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2782		break;
2783	}
2784
2785	if (ifp->if_mtu > ETHERMTU)
2786		reg_rctl |= E1000_RCTL_LPE;
2787
2788	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2789	if ((adapter->hw.mac_type >= em_82543) &&
2790	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2791		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2792		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2793		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2794	}
2795
2796	/* Enable Receives */
2797	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2798
2799	return;
2800}
2801
2802/*********************************************************************
2803 *
2804 *  Free receive related data structures.
2805 *
2806 **********************************************************************/
2807static void
2808em_free_receive_structures(struct adapter *adapter)
2809{
2810        struct em_buffer   *rx_buffer;
2811        int             i;
2812
2813        INIT_DEBUGOUT("free_receive_structures: begin");
2814
2815        if (adapter->rx_buffer_area != NULL) {
2816                rx_buffer = adapter->rx_buffer_area;
2817                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2818			if (rx_buffer->m_head != NULL) {
2819				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
2820				    BUS_DMASYNC_POSTREAD);
2821				bus_dmamap_unload(adapter->rxtag,
2822				    rx_buffer->map);
2823				m_freem(rx_buffer->m_head);
2824				rx_buffer->m_head = NULL;
2825			} else if (rx_buffer->map != NULL)
2826				bus_dmamap_unload(adapter->rxtag,
2827				    rx_buffer->map);
2828                        if (rx_buffer->map != NULL) {
2829				bus_dmamap_destroy(adapter->rxtag,
2830				    rx_buffer->map);
2831				rx_buffer->map = NULL;
2832			}
2833                }
2834        }
2835        if (adapter->rx_buffer_area != NULL) {
2836                free(adapter->rx_buffer_area, M_DEVBUF);
2837                adapter->rx_buffer_area = NULL;
2838        }
2839        if (adapter->rxtag != NULL) {
2840                bus_dma_tag_destroy(adapter->rxtag);
2841                adapter->rxtag = NULL;
2842        }
2843        return;
2844}
2845
2846/*********************************************************************
2847 *
2848 *  This routine executes in interrupt context. It replenishes
2849 *  the mbufs in the descriptor and sends data which has been
2850 *  dma'ed into host memory to upper layer.
2851 *
2852 *  We loop at most count times if count is > 0, or until done if
2853 *  count < 0.
2854 *
2855 *********************************************************************/
2856static void
2857em_process_receive_interrupts(struct adapter * adapter, int count)
2858{
2859	struct ifnet        *ifp;
2860	struct mbuf         *mp;
2861	u_int8_t            accept_frame = 0;
2862 	u_int8_t            eop = 0;
2863	u_int16_t           len, desc_len, prev_len_adj;
2864	int                 i;
2865
2866	/* Pointer to the receive descriptor being examined. */
2867	struct em_rx_desc   *current_desc;
2868
2869	mtx_assert(&adapter->mtx, MA_OWNED);
2870
2871	ifp = adapter->ifp;
2872	i = adapter->next_rx_desc_to_check;
2873        current_desc = &adapter->rx_desc_base[i];
2874	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2875	    BUS_DMASYNC_POSTREAD);
2876
2877	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2878		return;
2879	}
2880
2881	while ((current_desc->status & E1000_RXD_STAT_DD) &&
2882		    (count != 0) &&
2883		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2884		struct mbuf *m = NULL;
2885
2886		mp = adapter->rx_buffer_area[i].m_head;
2887		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2888		    BUS_DMASYNC_POSTREAD);
2889		bus_dmamap_unload(adapter->rxtag,
2890		    adapter->rx_buffer_area[i].map);
2891
2892		accept_frame = 1;
2893		prev_len_adj = 0;
2894                desc_len = le16toh(current_desc->length);
2895		if (current_desc->status & E1000_RXD_STAT_EOP) {
2896			count--;
2897			eop = 1;
2898			if (desc_len < ETHER_CRC_LEN) {
2899                                len = 0;
2900                                prev_len_adj = ETHER_CRC_LEN - desc_len;
2901                        }
2902                        else {
2903                                len = desc_len - ETHER_CRC_LEN;
2904                        }
2905		} else {
2906			eop = 0;
2907			len = desc_len;
2908		}
2909
2910		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2911			u_int8_t            last_byte;
2912			u_int32_t           pkt_len = desc_len;
2913
2914			if (adapter->fmp != NULL)
2915				pkt_len += adapter->fmp->m_pkthdr.len;
2916
2917			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2918
2919			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2920				       current_desc->errors,
2921				       pkt_len, last_byte)) {
2922				em_tbi_adjust_stats(&adapter->hw,
2923						    &adapter->stats,
2924						    pkt_len,
2925						    adapter->hw.mac_addr);
2926				if (len > 0) len--;
2927			}
2928			else {
2929				accept_frame = 0;
2930			}
2931		}
2932
2933		if (accept_frame) {
2934
2935			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2936				adapter->dropped_pkts++;
2937				em_get_buf(i, adapter, mp);
2938				if (adapter->fmp != NULL)
2939					m_freem(adapter->fmp);
2940				adapter->fmp = NULL;
2941				adapter->lmp = NULL;
2942				break;
2943			}
2944
2945			/* Assign correct length to the current fragment */
2946			mp->m_len = len;
2947
2948			if (adapter->fmp == NULL) {
2949				mp->m_pkthdr.len = len;
2950				adapter->fmp = mp;	 /* Store the first mbuf */
2951				adapter->lmp = mp;
2952			} else {
2953				/* Chain mbuf's together */
2954				mp->m_flags &= ~M_PKTHDR;
2955				/*
2956                                 * Adjust length of previous mbuf in chain if we
2957                                 * received less than 4 bytes in the last descriptor.
2958                                 */
2959				if (prev_len_adj > 0) {
2960					adapter->lmp->m_len -= prev_len_adj;
2961					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2962				}
2963				adapter->lmp->m_next = mp;
2964				adapter->lmp = adapter->lmp->m_next;
2965				adapter->fmp->m_pkthdr.len += len;
2966			}
2967
2968                        if (eop) {
2969                                adapter->fmp->m_pkthdr.rcvif = ifp;
2970				ifp->if_ipackets++;
2971                                em_receive_checksum(adapter, current_desc,
2972                                                    adapter->fmp);
2973#ifndef __NO_STRICT_ALIGNMENT
2974				if (ifp->if_mtu > ETHERMTU &&
2975				    em_fixup_rx(adapter) != 0)
2976					goto skip;
2977
2978#endif
2979                                if (current_desc->status & E1000_RXD_STAT_VP)
2980					VLAN_INPUT_TAG(ifp, adapter->fmp,
2981					    (le16toh(current_desc->special) &
2982					    E1000_RXD_SPC_VLAN_MASK));
2983#ifndef __NO_STRICT_ALIGNMENT
2984skip:
2985#endif
2986				m = adapter->fmp;
2987				adapter->fmp = NULL;
2988				adapter->lmp = NULL;
2989                        }
2990		} else {
2991			adapter->dropped_pkts++;
2992			em_get_buf(i, adapter, mp);
2993			if (adapter->fmp != NULL)
2994				m_freem(adapter->fmp);
2995			adapter->fmp = NULL;
2996			adapter->lmp = NULL;
2997		}
2998
2999		/* Zero out the receive descriptors status  */
3000		current_desc->status = 0;
3001		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3002		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3003
3004		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3005                E1000_WRITE_REG(&adapter->hw, RDT, i);
3006
3007                /* Advance our pointers to the next descriptor */
3008		if (++i == adapter->num_rx_desc)
3009			i = 0;
3010		if (m != NULL) {
3011			adapter->next_rx_desc_to_check = i;
3012			EM_UNLOCK(adapter);
3013			(*ifp->if_input)(ifp, m);
3014			EM_LOCK(adapter);
3015			i = adapter->next_rx_desc_to_check;
3016		}
3017		current_desc = &adapter->rx_desc_base[i];
3018	}
3019	adapter->next_rx_desc_to_check = i;
3020	return;
3021}
3022
3023#ifndef __NO_STRICT_ALIGNMENT
3024/*
3025 * When jumbo frames are enabled we should realign entire payload on
3026 * architecures with strict alignment. This is serious design mistake of 8254x
3027 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3028 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3029 * payload. On architecures without strict alignment restrictions 8254x still
3030 * performs unaligned memory access which would reduce the performance too.
3031 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3032 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3033 * existing mbuf chain.
3034 *
3035 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3036 * not used at all on architectures with strict alignment.
3037 */
3038static int
3039em_fixup_rx(struct adapter *adapter)
3040{
3041	struct mbuf *m, *n;
3042	int error;
3043
3044	error = 0;
3045	m = adapter->fmp;
3046	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3047		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3048		m->m_data += ETHER_HDR_LEN;
3049	} else {
3050		MGETHDR(n, M_DONTWAIT, MT_DATA);
3051		if (n != NULL) {
3052			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3053			m->m_data += ETHER_HDR_LEN;
3054			m->m_len -= ETHER_HDR_LEN;
3055			n->m_len = ETHER_HDR_LEN;
3056			M_MOVE_PKTHDR(n, m);
3057			n->m_next = m;
3058			adapter->fmp = n;
3059		} else {
3060			adapter->dropped_pkts++;
3061			m_freem(adapter->fmp);
3062			adapter->fmp = NULL;
3063			error = ENOMEM;
3064		}
3065	}
3066
3067	return (error);
3068}
3069#endif
3070
3071/*********************************************************************
3072 *
3073 *  Verify that the hardware indicated that the checksum is valid.
3074 *  Inform the stack about the status of checksum so that stack
3075 *  doesn't spend time verifying the checksum.
3076 *
3077 *********************************************************************/
3078static void
3079em_receive_checksum(struct adapter *adapter,
3080		    struct em_rx_desc *rx_desc,
3081		    struct mbuf *mp)
3082{
3083	/* 82543 or newer only */
3084	if ((adapter->hw.mac_type < em_82543) ||
3085	    /* Ignore Checksum bit is set */
3086	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3087		mp->m_pkthdr.csum_flags = 0;
3088		return;
3089	}
3090
3091	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3092		/* Did it pass? */
3093		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3094			/* IP Checksum Good */
3095			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3096			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3097
3098		} else {
3099			mp->m_pkthdr.csum_flags = 0;
3100		}
3101	}
3102
3103	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3104		/* Did it pass? */
3105		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3106			mp->m_pkthdr.csum_flags |=
3107			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3108			mp->m_pkthdr.csum_data = htons(0xffff);
3109		}
3110	}
3111
3112	return;
3113}
3114
3115
3116static void
3117em_enable_vlans(struct adapter *adapter)
3118{
3119	uint32_t ctrl;
3120
3121	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3122
3123	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3124	ctrl |= E1000_CTRL_VME;
3125	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3126
3127	return;
3128}
3129
3130static void
3131em_disable_vlans(struct adapter *adapter)
3132{
3133	uint32_t ctrl;
3134
3135	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3136	ctrl &= ~E1000_CTRL_VME;
3137	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3138
3139	return;
3140}
3141
3142static void
3143em_enable_intr(struct adapter * adapter)
3144{
3145	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3146	return;
3147}
3148
3149static void
3150em_disable_intr(struct adapter *adapter)
3151{
3152	/*
3153	 * The first version of 82542 had an errata where when link was forced it
3154	 * would stay up even up even if the cable was disconnected.  Sequence errors
3155	 * were used to detect the disconnect and then the driver would unforce the link.
3156	 * This code in the in the ISR.  For this to work correctly the Sequence error
3157	 * interrupt had to be enabled all the time.
3158	 */
3159
3160	if (adapter->hw.mac_type == em_82542_rev2_0)
3161	    E1000_WRITE_REG(&adapter->hw, IMC,
3162	        (0xffffffff & ~E1000_IMC_RXSEQ));
3163	else
3164	    E1000_WRITE_REG(&adapter->hw, IMC,
3165	        0xffffffff);
3166	return;
3167}
3168
3169static int
3170em_is_valid_ether_addr(u_int8_t *addr)
3171{
3172        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3173
3174        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3175                return (FALSE);
3176        }
3177
3178        return(TRUE);
3179}
3180
3181void
3182em_write_pci_cfg(struct em_hw *hw,
3183		      uint32_t reg,
3184		      uint16_t *value)
3185{
3186	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3187			 *value, 2);
3188}
3189
3190void
3191em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3192		     uint16_t *value)
3193{
3194	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3195				 reg, 2);
3196	return;
3197}
3198
3199void
3200em_pci_set_mwi(struct em_hw *hw)
3201{
3202        pci_write_config(((struct em_osdep *)hw->back)->dev,
3203                         PCIR_COMMAND,
3204                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3205        return;
3206}
3207
3208void
3209em_pci_clear_mwi(struct em_hw *hw)
3210{
3211        pci_write_config(((struct em_osdep *)hw->back)->dev,
3212                         PCIR_COMMAND,
3213                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3214        return;
3215}
3216
3217/*********************************************************************
3218* 82544 Coexistence issue workaround.
3219*    There are 2 issues.
3220*       1. Transmit Hang issue.
3221*    To detect this issue, following equation can be used...
3222*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3223*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3224*
3225*       2. DAC issue.
3226*    To detect this issue, following equation can be used...
3227*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3228*          If SUM[3:0] is in between 9 to c, we will have this issue.
3229*
3230*
3231*    WORKAROUND:
3232*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3233*
3234*** *********************************************************************/
3235static u_int32_t
3236em_fill_descriptors (bus_addr_t address,
3237                              u_int32_t length,
3238                              PDESC_ARRAY desc_array)
3239{
3240        /* Since issue is sensitive to length and address.*/
3241        /* Let us first check the address...*/
3242        u_int32_t safe_terminator;
3243        if (length <= 4) {
3244                desc_array->descriptor[0].address = address;
3245                desc_array->descriptor[0].length = length;
3246                desc_array->elements = 1;
3247                return desc_array->elements;
3248        }
3249        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3250        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3251        if (safe_terminator == 0   ||
3252        (safe_terminator > 4   &&
3253        safe_terminator < 9)   ||
3254        (safe_terminator > 0xC &&
3255        safe_terminator <= 0xF)) {
3256                desc_array->descriptor[0].address = address;
3257                desc_array->descriptor[0].length = length;
3258                desc_array->elements = 1;
3259                return desc_array->elements;
3260        }
3261
3262        desc_array->descriptor[0].address = address;
3263        desc_array->descriptor[0].length = length - 4;
3264        desc_array->descriptor[1].address = address + (length - 4);
3265        desc_array->descriptor[1].length = 4;
3266        desc_array->elements = 2;
3267        return desc_array->elements;
3268}
3269
3270/**********************************************************************
3271 *
3272 *  Update the board statistics counters.
3273 *
3274 **********************************************************************/
3275static void
3276em_update_stats_counters(struct adapter *adapter)
3277{
3278	struct ifnet   *ifp;
3279
3280	if(adapter->hw.media_type == em_media_type_copper ||
3281	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3282		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3283		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3284	}
3285	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3286	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3287	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3288	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3289
3290	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3291	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3292	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3293	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3294	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3295	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3296	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3297	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3298	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3299	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3300	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3301	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3302	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3303	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3304	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3305	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3306	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3307	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3308	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3309	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3310
3311	/* For the 64-bit byte counters the low dword must be read first. */
3312	/* Both registers clear on the read of the high dword */
3313
3314	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3315	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3316	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3317	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3318
3319	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3320	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3321	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3322	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3323	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3324
3325	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3326	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3327	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3328	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3329
3330	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3331	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3332	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3333	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3334	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3335	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3336	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3337	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3338	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3339	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3340
3341	if (adapter->hw.mac_type >= em_82543) {
3342		adapter->stats.algnerrc +=
3343		E1000_READ_REG(&adapter->hw, ALGNERRC);
3344		adapter->stats.rxerrc +=
3345		E1000_READ_REG(&adapter->hw, RXERRC);
3346		adapter->stats.tncrs +=
3347		E1000_READ_REG(&adapter->hw, TNCRS);
3348		adapter->stats.cexterr +=
3349		E1000_READ_REG(&adapter->hw, CEXTERR);
3350		adapter->stats.tsctc +=
3351		E1000_READ_REG(&adapter->hw, TSCTC);
3352		adapter->stats.tsctfc +=
3353		E1000_READ_REG(&adapter->hw, TSCTFC);
3354	}
3355	ifp = adapter->ifp;
3356
3357	ifp->if_collisions = adapter->stats.colc;
3358
3359	/* Rx Errors */
3360	ifp->if_ierrors =
3361	adapter->dropped_pkts +
3362	adapter->stats.rxerrc +
3363	adapter->stats.crcerrs +
3364	adapter->stats.algnerrc +
3365	adapter->stats.rlec +
3366	adapter->stats.mpc + adapter->stats.cexterr;
3367
3368	/* Tx Errors */
3369	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3370	    adapter->watchdog_events;
3371
3372}
3373
3374
3375/**********************************************************************
3376 *
3377 *  This routine is called only when em_display_debug_stats is enabled.
3378 *  This routine provides a way to take a look at important statistics
3379 *  maintained by the driver and hardware.
3380 *
3381 **********************************************************************/
3382static void
3383em_print_debug_info(struct adapter *adapter)
3384{
3385	int unit = adapter->unit;
3386	uint8_t *hw_addr = adapter->hw.hw_addr;
3387
3388	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3389	printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit,
3390	    E1000_READ_REG(&adapter->hw, CTRL),
3391	    E1000_READ_REG(&adapter->hw, RCTL));
3392	printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit,
3393	    ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3394	    (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3395	printf("em%d: Flow control watermarks high = %d low = %d\n", unit,
3396	    adapter->hw.fc_high_water,
3397	    adapter->hw.fc_low_water);
3398	printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3399	    E1000_READ_REG(&adapter->hw, TIDV),
3400	    E1000_READ_REG(&adapter->hw, TADV));
3401	printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3402	    E1000_READ_REG(&adapter->hw, RDTR),
3403	    E1000_READ_REG(&adapter->hw, RADV));
3404	printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3405	    unit, (long long)adapter->tx_fifo_wrk_cnt,
3406	    (long long)adapter->tx_fifo_reset_cnt);
3407	printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3408	    E1000_READ_REG(&adapter->hw, TDH),
3409	    E1000_READ_REG(&adapter->hw, TDT));
3410	printf("em%d: Num Tx descriptors avail = %d\n", unit,
3411	    adapter->num_tx_desc_avail);
3412	printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3413	    adapter->no_tx_desc_avail1);
3414	printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3415	    adapter->no_tx_desc_avail2);
3416	printf("em%d: Std mbuf failed = %ld\n", unit,
3417	    adapter->mbuf_alloc_failed);
3418	printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3419	    adapter->mbuf_cluster_failed);
3420	printf("em%d: Driver dropped packets = %ld\n", unit,
3421	    adapter->dropped_pkts);
3422
3423	return;
3424}
3425
3426static void
3427em_print_hw_stats(struct adapter *adapter)
3428{
3429        int unit = adapter->unit;
3430
3431        printf("em%d: Excessive collisions = %lld\n", unit,
3432               (long long)adapter->stats.ecol);
3433        printf("em%d: Symbol errors = %lld\n", unit,
3434               (long long)adapter->stats.symerrs);
3435        printf("em%d: Sequence errors = %lld\n", unit,
3436               (long long)adapter->stats.sec);
3437        printf("em%d: Defer count = %lld\n", unit,
3438               (long long)adapter->stats.dc);
3439
3440        printf("em%d: Missed Packets = %lld\n", unit,
3441               (long long)adapter->stats.mpc);
3442        printf("em%d: Receive No Buffers = %lld\n", unit,
3443               (long long)adapter->stats.rnbc);
3444        printf("em%d: Receive length errors = %lld\n", unit,
3445               (long long)adapter->stats.rlec);
3446        printf("em%d: Receive errors = %lld\n", unit,
3447               (long long)adapter->stats.rxerrc);
3448        printf("em%d: Crc errors = %lld\n", unit,
3449               (long long)adapter->stats.crcerrs);
3450        printf("em%d: Alignment errors = %lld\n", unit,
3451               (long long)adapter->stats.algnerrc);
3452        printf("em%d: Carrier extension errors = %lld\n", unit,
3453               (long long)adapter->stats.cexterr);
3454	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3455	printf("em%d: watchdog timeouts = %ld\n", unit,
3456		adapter->watchdog_events);
3457
3458        printf("em%d: XON Rcvd = %lld\n", unit,
3459               (long long)adapter->stats.xonrxc);
3460        printf("em%d: XON Xmtd = %lld\n", unit,
3461               (long long)adapter->stats.xontxc);
3462        printf("em%d: XOFF Rcvd = %lld\n", unit,
3463               (long long)adapter->stats.xoffrxc);
3464        printf("em%d: XOFF Xmtd = %lld\n", unit,
3465               (long long)adapter->stats.xofftxc);
3466
3467        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3468               (long long)adapter->stats.gprc);
3469        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3470               (long long)adapter->stats.gptc);
3471
3472        return;
3473}
3474
3475static int
3476em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3477{
3478        int error;
3479        int result;
3480        struct adapter *adapter;
3481
3482        result = -1;
3483        error = sysctl_handle_int(oidp, &result, 0, req);
3484
3485        if (error || !req->newptr)
3486                return (error);
3487
3488        if (result == 1) {
3489                adapter = (struct adapter *)arg1;
3490                em_print_debug_info(adapter);
3491        }
3492
3493        return error;
3494}
3495
3496
3497static int
3498em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3499{
3500        int error;
3501        int result;
3502        struct adapter *adapter;
3503
3504        result = -1;
3505        error = sysctl_handle_int(oidp, &result, 0, req);
3506
3507        if (error || !req->newptr)
3508                return (error);
3509
3510        if (result == 1) {
3511                adapter = (struct adapter *)arg1;
3512                em_print_hw_stats(adapter);
3513        }
3514
3515        return error;
3516}
3517
3518static int
3519em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3520{
3521	struct em_int_delay_info *info;
3522	struct adapter *adapter;
3523	u_int32_t regval;
3524	int error;
3525	int usecs;
3526	int ticks;
3527
3528	info = (struct em_int_delay_info *)arg1;
3529	usecs = info->value;
3530	error = sysctl_handle_int(oidp, &usecs, 0, req);
3531	if (error != 0 || req->newptr == NULL)
3532		return error;
3533	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3534		return EINVAL;
3535	info->value = usecs;
3536	ticks = E1000_USECS_TO_TICKS(usecs);
3537
3538	adapter = info->adapter;
3539
3540	EM_LOCK(adapter);
3541	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3542	regval = (regval & ~0xffff) | (ticks & 0xffff);
3543	/* Handle a few special cases. */
3544	switch (info->offset) {
3545	case E1000_RDTR:
3546	case E1000_82542_RDTR:
3547		regval |= E1000_RDT_FPDB;
3548		break;
3549	case E1000_TIDV:
3550	case E1000_82542_TIDV:
3551		if (ticks == 0) {
3552			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3553			/* Don't write 0 into the TIDV register. */
3554			regval++;
3555		} else
3556			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3557		break;
3558	}
3559	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3560	EM_UNLOCK(adapter);
3561	return 0;
3562}
3563
3564static void
3565em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3566    const char *description, struct em_int_delay_info *info,
3567    int offset, int value)
3568{
3569	info->adapter = adapter;
3570	info->offset = offset;
3571	info->value = value;
3572	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3573	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3574	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3575	    info, 0, em_sysctl_int_delay, "I", description);
3576}
3577