if_em.c revision 153512
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 153512 2005-12-18 18:24:27Z glebius $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "Version - 3.2.18";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
111	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
112	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
113
114	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
115	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
116	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
117
118        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
119        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
120        { 0x8086, E1000_DEV_ID_82573L,              PCI_ANY_ID, PCI_ANY_ID, 0},
121
122        /* required last entry */
123        { 0, 0, 0, 0, 0}
124};
125
126/*********************************************************************
127 *  Table of branding strings for all supported NICs.
128 *********************************************************************/
129
130static char *em_strings[] = {
131	"Intel(R) PRO/1000 Network Connection"
132};
133
134/*********************************************************************
135 *  Function prototypes
136 *********************************************************************/
137static int  em_probe(device_t);
138static int  em_attach(device_t);
139static int  em_detach(device_t);
140static int  em_shutdown(device_t);
141static void em_intr(void *);
142static void em_start(struct ifnet *);
143static int  em_ioctl(struct ifnet *, u_long, caddr_t);
144static void em_watchdog(struct ifnet *);
145static void em_init(void *);
146static void em_init_locked(struct adapter *);
147static void em_stop(void *);
148static void em_media_status(struct ifnet *, struct ifmediareq *);
149static int  em_media_change(struct ifnet *);
150static void em_identify_hardware(struct adapter *);
151static int  em_allocate_pci_resources(struct adapter *);
152static void em_free_pci_resources(struct adapter *);
153static void em_local_timer(void *);
154static int  em_hardware_init(struct adapter *);
155static void em_setup_interface(device_t, struct adapter *);
156static int  em_setup_transmit_structures(struct adapter *);
157static void em_initialize_transmit_unit(struct adapter *);
158static int  em_setup_receive_structures(struct adapter *);
159static void em_initialize_receive_unit(struct adapter *);
160static void em_enable_intr(struct adapter *);
161static void em_disable_intr(struct adapter *);
162static void em_free_transmit_structures(struct adapter *);
163static void em_free_receive_structures(struct adapter *);
164static void em_update_stats_counters(struct adapter *);
165static void em_clean_transmit_interrupts(struct adapter *);
166static int  em_allocate_receive_structures(struct adapter *);
167static int  em_allocate_transmit_structures(struct adapter *);
168static void em_process_receive_interrupts(struct adapter *, int);
169#ifndef __NO_STRICT_ALIGNMENT
170static int  em_fixup_rx(struct adapter *);
171#endif
172static void em_receive_checksum(struct adapter *,
173				struct em_rx_desc *,
174				struct mbuf *);
175static void em_transmit_checksum_setup(struct adapter *,
176				       struct mbuf *,
177				       u_int32_t *,
178				       u_int32_t *);
179static void em_set_promisc(struct adapter *);
180static void em_disable_promisc(struct adapter *);
181static void em_set_multi(struct adapter *);
182static void em_print_hw_stats(struct adapter *);
183static void em_print_link_status(struct adapter *);
184static int  em_get_buf(int i, struct adapter *,
185		       struct mbuf *);
186static void em_enable_vlans(struct adapter *);
187static void em_disable_vlans(struct adapter *);
188static int  em_encap(struct adapter *, struct mbuf **);
189static void em_smartspeed(struct adapter *);
190static int  em_82547_fifo_workaround(struct adapter *, int);
191static void em_82547_update_fifo_head(struct adapter *, int);
192static int  em_82547_tx_fifo_reset(struct adapter *);
193static void em_82547_move_tail(void *arg);
194static void em_82547_move_tail_locked(struct adapter *);
195static int  em_dma_malloc(struct adapter *, bus_size_t,
196			  struct em_dma_alloc *, int);
197static void em_dma_free(struct adapter *, struct em_dma_alloc *);
198static void em_print_debug_info(struct adapter *);
199static int  em_is_valid_ether_addr(u_int8_t *);
200static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
201static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
202static u_int32_t em_fill_descriptors (bus_addr_t address,
203				      u_int32_t length,
204				      PDESC_ARRAY desc_array);
205static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
206static void em_add_int_delay_sysctl(struct adapter *, const char *,
207				    const char *, struct em_int_delay_info *,
208				    int, int);
209#ifdef DEVICE_POLLING
210static poll_handler_t em_poll;
211#endif
212
213/*********************************************************************
214 *  FreeBSD Device Interface Entry Points
215 *********************************************************************/
216
217static device_method_t em_methods[] = {
218	/* Device interface */
219	DEVMETHOD(device_probe, em_probe),
220	DEVMETHOD(device_attach, em_attach),
221	DEVMETHOD(device_detach, em_detach),
222	DEVMETHOD(device_shutdown, em_shutdown),
223	{0, 0}
224};
225
226static driver_t em_driver = {
227	"em", em_methods, sizeof(struct adapter ),
228};
229
230static devclass_t em_devclass;
231DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
232MODULE_DEPEND(em, pci, 1, 1, 1);
233MODULE_DEPEND(em, ether, 1, 1, 1);
234
235/*********************************************************************
236 *  Tunable default values.
237 *********************************************************************/
238
239#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
240#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
241
242static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
243static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
244static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
245static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
246static int em_rxd = EM_DEFAULT_RXD;
247static int em_txd = EM_DEFAULT_TXD;
248
249TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
250TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
251TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
252TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
253TUNABLE_INT("hw.em.rxd", &em_rxd);
254TUNABLE_INT("hw.em.txd", &em_txd);
255
256/*********************************************************************
257 *  Device identification routine
258 *
259 *  em_probe determines if the driver should be loaded on
260 *  adapter based on PCI vendor/device id of the adapter.
261 *
262 *  return BUS_PROBE_DEFAULT on success, positive on failure
263 *********************************************************************/
264
265static int
266em_probe(device_t dev)
267{
268	em_vendor_info_t *ent;
269
270	u_int16_t       pci_vendor_id = 0;
271	u_int16_t       pci_device_id = 0;
272	u_int16_t       pci_subvendor_id = 0;
273	u_int16_t       pci_subdevice_id = 0;
274	char            adapter_name[60];
275
276	INIT_DEBUGOUT("em_probe: begin");
277
278	pci_vendor_id = pci_get_vendor(dev);
279	if (pci_vendor_id != EM_VENDOR_ID)
280		return(ENXIO);
281
282	pci_device_id = pci_get_device(dev);
283	pci_subvendor_id = pci_get_subvendor(dev);
284	pci_subdevice_id = pci_get_subdevice(dev);
285
286	ent = em_vendor_info_array;
287	while (ent->vendor_id != 0) {
288		if ((pci_vendor_id == ent->vendor_id) &&
289		    (pci_device_id == ent->device_id) &&
290
291		    ((pci_subvendor_id == ent->subvendor_id) ||
292		     (ent->subvendor_id == PCI_ANY_ID)) &&
293
294		    ((pci_subdevice_id == ent->subdevice_id) ||
295		     (ent->subdevice_id == PCI_ANY_ID))) {
296			sprintf(adapter_name, "%s %s",
297				em_strings[ent->index],
298				em_driver_version);
299			device_set_desc_copy(dev, adapter_name);
300			return(BUS_PROBE_DEFAULT);
301		}
302		ent++;
303	}
304
305	return(ENXIO);
306}
307
308/*********************************************************************
309 *  Device initialization routine
310 *
311 *  The attach entry point is called when the driver is being loaded.
312 *  This routine identifies the type of hardware, allocates all resources
313 *  and initializes the hardware.
314 *
315 *  return 0 on success, positive on failure
316 *********************************************************************/
317
318static int
319em_attach(device_t dev)
320{
321	struct adapter * adapter;
322	int             tsize, rsize;
323	int		error = 0;
324
325	INIT_DEBUGOUT("em_attach: begin");
326
327	/* Allocate, clear, and link in our adapter structure */
328	if (!(adapter = device_get_softc(dev))) {
329		printf("em: adapter structure allocation failed\n");
330		return(ENOMEM);
331	}
332	bzero(adapter, sizeof(struct adapter ));
333	adapter->dev = dev;
334	adapter->osdep.dev = dev;
335	adapter->unit = device_get_unit(dev);
336	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
337
338	/* SYSCTL stuff */
339        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
340                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
341                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
342                        (void *)adapter, 0,
343                        em_sysctl_debug_info, "I", "Debug Information");
344
345        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
346                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
347                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
348                        (void *)adapter, 0,
349                        em_sysctl_stats, "I", "Statistics");
350
351	callout_init(&adapter->timer, CALLOUT_MPSAFE);
352	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
353
354	/* Determine hardware revision */
355	em_identify_hardware(adapter);
356
357	/* Set up some sysctls for the tunable interrupt delays */
358	em_add_int_delay_sysctl(adapter, "rx_int_delay",
359	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
360	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
361	em_add_int_delay_sysctl(adapter, "tx_int_delay",
362	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
363	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
364	if (adapter->hw.mac_type >= em_82540) {
365		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
366		    "receive interrupt delay limit in usecs",
367		    &adapter->rx_abs_int_delay,
368		    E1000_REG_OFFSET(&adapter->hw, RADV),
369		    em_rx_abs_int_delay_dflt);
370		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
371		    "transmit interrupt delay limit in usecs",
372		    &adapter->tx_abs_int_delay,
373		    E1000_REG_OFFSET(&adapter->hw, TADV),
374		    em_tx_abs_int_delay_dflt);
375	}
376
377	/*
378	 * Validate number of transmit and receive descriptors. It
379	 * must not exceed hardware maximum, and must be multiple
380	 * of E1000_DBA_ALIGN.
381	 */
382	if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
383	    (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
384	    (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
385	    (em_txd < EM_MIN_TXD)) {
386		printf("em%d: Using %d TX descriptors instead of %d!\n",
387		    adapter->unit, EM_DEFAULT_TXD, em_txd);
388		adapter->num_tx_desc = EM_DEFAULT_TXD;
389	} else
390		adapter->num_tx_desc = em_txd;
391	if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
392	    (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
393	    (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
394	    (em_rxd < EM_MIN_RXD)) {
395		printf("em%d: Using %d RX descriptors instead of %d!\n",
396		    adapter->unit, EM_DEFAULT_RXD, em_rxd);
397		adapter->num_rx_desc = EM_DEFAULT_RXD;
398	} else
399		adapter->num_rx_desc = em_rxd;
400
401        adapter->hw.autoneg = DO_AUTO_NEG;
402        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
403        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
404        adapter->hw.tbi_compatibility_en = TRUE;
405        adapter->rx_buffer_len = EM_RXBUFFER_2048;
406
407	adapter->hw.phy_init_script = 1;
408	adapter->hw.phy_reset_disable = FALSE;
409
410#ifndef EM_MASTER_SLAVE
411	adapter->hw.master_slave = em_ms_hw_default;
412#else
413	adapter->hw.master_slave = EM_MASTER_SLAVE;
414#endif
415	/*
416	 * Set the max frame size assuming standard ethernet
417	 * sized frames
418	 */
419	adapter->hw.max_frame_size =
420		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
421
422	adapter->hw.min_frame_size =
423		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
424
425	/*
426	 * This controls when hardware reports transmit completion
427	 * status.
428	 */
429	adapter->hw.report_tx_early = 1;
430
431
432	if (em_allocate_pci_resources(adapter)) {
433		printf("em%d: Allocation of PCI resources failed\n",
434		       adapter->unit);
435                error = ENXIO;
436                goto err_pci;
437	}
438
439
440	/* Initialize eeprom parameters */
441        em_init_eeprom_params(&adapter->hw);
442
443	tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
444	    E1000_DBA_ALIGN);
445
446	/* Allocate Transmit Descriptor ring */
447        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
448                printf("em%d: Unable to allocate tx_desc memory\n",
449                       adapter->unit);
450		error = ENOMEM;
451                goto err_tx_desc;
452        }
453        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
454
455	rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
456	    E1000_DBA_ALIGN);
457
458	/* Allocate Receive Descriptor ring */
459        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
460                printf("em%d: Unable to allocate rx_desc memory\n",
461                        adapter->unit);
462		error = ENOMEM;
463                goto err_rx_desc;
464        }
465        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
466
467	/* Initialize the hardware */
468	if (em_hardware_init(adapter)) {
469		printf("em%d: Unable to initialize the hardware\n",
470		       adapter->unit);
471		error = EIO;
472                goto err_hw_init;
473	}
474
475	/* Copy the permanent MAC address out of the EEPROM */
476	if (em_read_mac_addr(&adapter->hw) < 0) {
477		printf("em%d: EEPROM read error while reading mac address\n",
478		       adapter->unit);
479		error = EIO;
480                goto err_mac_addr;
481	}
482
483	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
484                printf("em%d: Invalid mac address\n", adapter->unit);
485                error = EIO;
486                goto err_mac_addr;
487        }
488
489	/* Setup OS specific network interface */
490	em_setup_interface(dev, adapter);
491
492	/* Initialize statistics */
493	em_clear_hw_cntrs(&adapter->hw);
494	em_update_stats_counters(adapter);
495	adapter->hw.get_link_status = 1;
496	em_check_for_link(&adapter->hw);
497
498	if (bootverbose) {
499		/* Print the link status */
500		if (adapter->link_active == 1) {
501			em_get_speed_and_duplex(&adapter->hw,
502			    &adapter->link_speed, &adapter->link_duplex);
503			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
504			       adapter->unit,
505			       adapter->link_speed,
506			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
507				"Half");
508		} else
509			printf("em%d:  Speed:N/A  Duplex:N/A\n",
510			    adapter->unit);
511	}
512
513	/* Identify 82544 on PCIX */
514        em_get_bus_info(&adapter->hw);
515        if(adapter->hw.bus_type == em_bus_type_pcix &&
516           adapter->hw.mac_type == em_82544) {
517                adapter->pcix_82544 = TRUE;
518        }
519        else {
520                adapter->pcix_82544 = FALSE;
521        }
522	INIT_DEBUGOUT("em_attach: end");
523	return(0);
524
525err_mac_addr:
526err_hw_init:
527        em_dma_free(adapter, &adapter->rxdma);
528err_rx_desc:
529        em_dma_free(adapter, &adapter->txdma);
530err_tx_desc:
531err_pci:
532        em_free_pci_resources(adapter);
533	EM_LOCK_DESTROY(adapter);
534        return(error);
535
536}
537
538/*********************************************************************
539 *  Device removal routine
540 *
541 *  The detach entry point is called when the driver is being removed.
542 *  This routine stops the adapter and deallocates all the resources
543 *  that were allocated for driver operation.
544 *
545 *  return 0 on success, positive on failure
546 *********************************************************************/
547
548static int
549em_detach(device_t dev)
550{
551	struct adapter * adapter = device_get_softc(dev);
552	struct ifnet   *ifp = adapter->ifp;
553
554	INIT_DEBUGOUT("em_detach: begin");
555
556#ifdef DEVICE_POLLING
557	if (ifp->if_capenable & IFCAP_POLLING)
558		ether_poll_deregister(ifp);
559#endif
560
561	EM_LOCK(adapter);
562	adapter->in_detach = 1;
563	em_stop(adapter);
564	em_phy_hw_reset(&adapter->hw);
565	EM_UNLOCK(adapter);
566        ether_ifdetach(adapter->ifp);
567
568	em_free_pci_resources(adapter);
569	bus_generic_detach(dev);
570	if_free(ifp);
571
572	/* Free Transmit Descriptor ring */
573        if (adapter->tx_desc_base) {
574                em_dma_free(adapter, &adapter->txdma);
575                adapter->tx_desc_base = NULL;
576        }
577
578        /* Free Receive Descriptor ring */
579        if (adapter->rx_desc_base) {
580                em_dma_free(adapter, &adapter->rxdma);
581                adapter->rx_desc_base = NULL;
582        }
583
584	EM_LOCK_DESTROY(adapter);
585
586	return(0);
587}
588
589/*********************************************************************
590 *
591 *  Shutdown entry point
592 *
593 **********************************************************************/
594
595static int
596em_shutdown(device_t dev)
597{
598	struct adapter *adapter = device_get_softc(dev);
599	EM_LOCK(adapter);
600	em_stop(adapter);
601	EM_UNLOCK(adapter);
602	return(0);
603}
604
605
606/*********************************************************************
607 *  Transmit entry point
608 *
609 *  em_start is called by the stack to initiate a transmit.
610 *  The driver will remain in this routine as long as there are
611 *  packets to transmit and transmit resources are available.
612 *  In case resources are not available stack is notified and
613 *  the packet is requeued.
614 **********************************************************************/
615
616static void
617em_start_locked(struct ifnet *ifp)
618{
619        struct mbuf    *m_head;
620        struct adapter *adapter = ifp->if_softc;
621
622	mtx_assert(&adapter->mtx, MA_OWNED);
623
624        if (!adapter->link_active)
625                return;
626
627        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
628
629                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
630
631                if (m_head == NULL) break;
632
633		/*
634		 * em_encap() can modify our pointer, and or make it NULL on
635		 * failure.  In that event, we can't requeue.
636		 */
637		if (em_encap(adapter, &m_head)) {
638			if (m_head == NULL)
639				break;
640			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
641			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
642			break;
643                }
644
645		/* Send a copy of the frame to the BPF listener */
646		BPF_MTAP(ifp, m_head);
647
648                /* Set timeout in case hardware has problems transmitting */
649                ifp->if_timer = EM_TX_TIMEOUT;
650
651        }
652        return;
653}
654
655static void
656em_start(struct ifnet *ifp)
657{
658	struct adapter *adapter = ifp->if_softc;
659
660	EM_LOCK(adapter);
661	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
662		em_start_locked(ifp);
663	EM_UNLOCK(adapter);
664	return;
665}
666
667/*********************************************************************
668 *  Ioctl entry point
669 *
670 *  em_ioctl is called when the user wants to configure the
671 *  interface.
672 *
673 *  return 0 on success, positive on failure
674 **********************************************************************/
675
676static int
677em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
678{
679	struct ifreq   *ifr = (struct ifreq *) data;
680	struct adapter * adapter = ifp->if_softc;
681	int error = 0;
682
683	if (adapter->in_detach) return(error);
684
685	switch (command) {
686	case SIOCSIFADDR:
687	case SIOCGIFADDR:
688		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
689		ether_ioctl(ifp, command, data);
690		break;
691	case SIOCSIFMTU:
692	    {
693		int max_frame_size;
694
695		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
696
697		switch (adapter->hw.mac_type) {
698		case em_82571:
699		case em_82572:
700			max_frame_size = 10500;
701			break;
702		case em_82573:
703			/* 82573 does not support jumbo frames. */
704			max_frame_size = ETHER_MAX_LEN;
705			break;
706		default:
707			max_frame_size = MAX_JUMBO_FRAME_SIZE;
708		}
709		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
710		    ETHER_CRC_LEN) {
711			error = EINVAL;
712			break;
713		}
714
715		EM_LOCK(adapter);
716		ifp->if_mtu = ifr->ifr_mtu;
717		adapter->hw.max_frame_size =
718		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
719		em_init_locked(adapter);
720		EM_UNLOCK(adapter);
721		break;
722	    }
723	case SIOCSIFFLAGS:
724		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
725		EM_LOCK(adapter);
726		if (ifp->if_flags & IFF_UP) {
727			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
728				em_init_locked(adapter);
729			}
730
731			em_disable_promisc(adapter);
732			em_set_promisc(adapter);
733		} else {
734			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
735				em_stop(adapter);
736			}
737		}
738		EM_UNLOCK(adapter);
739		break;
740	case SIOCADDMULTI:
741	case SIOCDELMULTI:
742		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
743		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
744			EM_LOCK(adapter);
745			em_disable_intr(adapter);
746			em_set_multi(adapter);
747			if (adapter->hw.mac_type == em_82542_rev2_0) {
748				em_initialize_receive_unit(adapter);
749			}
750#ifdef DEVICE_POLLING
751                        if (!(ifp->if_capenable & IFCAP_POLLING))
752#endif
753				em_enable_intr(adapter);
754			EM_UNLOCK(adapter);
755		}
756		break;
757	case SIOCSIFMEDIA:
758	case SIOCGIFMEDIA:
759		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
760		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
761		break;
762	case SIOCSIFCAP:
763	    {
764		int mask, reinit;
765
766		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
767		reinit = 0;
768		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
769#ifdef DEVICE_POLLING
770		if (mask & IFCAP_POLLING) {
771			if (ifr->ifr_reqcap & IFCAP_POLLING) {
772				error = ether_poll_register(em_poll, ifp);
773				if (error)
774					return(error);
775				EM_LOCK(adapter);
776				em_disable_intr(adapter);
777				ifp->if_capenable |= IFCAP_POLLING;
778				EM_UNLOCK(adapter);
779			} else {
780				error = ether_poll_deregister(ifp);
781				/* Enable interrupt even in error case */
782				EM_LOCK(adapter);
783				em_enable_intr(adapter);
784				ifp->if_capenable &= ~IFCAP_POLLING;
785				EM_UNLOCK(adapter);
786			}
787		}
788#endif
789		if (mask & IFCAP_HWCSUM) {
790			ifp->if_capenable ^= IFCAP_HWCSUM;
791			reinit = 1;
792		}
793		if (mask & IFCAP_VLAN_HWTAGGING) {
794			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
795			reinit = 1;
796		}
797		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
798			em_init(adapter);
799		break;
800	    }
801	default:
802		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
803		error = EINVAL;
804	}
805
806	return(error);
807}
808
809/*********************************************************************
810 *  Watchdog entry point
811 *
812 *  This routine is called whenever hardware quits transmitting.
813 *
814 **********************************************************************/
815
816static void
817em_watchdog(struct ifnet *ifp)
818{
819	struct adapter * adapter;
820	adapter = ifp->if_softc;
821
822	EM_LOCK(adapter);
823	/* If we are in this routine because of pause frames, then
824	 * don't reset the hardware.
825	 */
826	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
827		ifp->if_timer = EM_TX_TIMEOUT;
828		EM_UNLOCK(adapter);
829		return;
830	}
831
832	if (em_check_for_link(&adapter->hw))
833		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
834
835	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
836	adapter->watchdog_events++;
837
838	em_init_locked(adapter);
839	EM_UNLOCK(adapter);
840}
841
842/*********************************************************************
843 *  Init entry point
844 *
845 *  This routine is used in two ways. It is used by the stack as
846 *  init entry point in network interface structure. It is also used
847 *  by the driver as a hw/sw initialization routine to get to a
848 *  consistent state.
849 *
850 *  return 0 on success, positive on failure
851 **********************************************************************/
852
853static void
854em_init_locked(struct adapter * adapter)
855{
856	struct ifnet   *ifp;
857
858	uint32_t	pba;
859	ifp = adapter->ifp;
860
861	INIT_DEBUGOUT("em_init: begin");
862
863	mtx_assert(&adapter->mtx, MA_OWNED);
864
865	em_stop(adapter);
866
867	/*
868	 * Packet Buffer Allocation (PBA)
869	 * Writing PBA sets the receive portion of the buffer
870	 * the remainder is used for the transmit buffer.
871	 */
872	switch (adapter->hw.mac_type) {
873	case em_82547:
874	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
875		if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
876			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
877		else
878			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
879		adapter->tx_fifo_head = 0;
880		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
881		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
882		break;
883	case em_82571: /* 82571: Total Packet Buffer is 48K */
884	case em_82572: /* 82572: Total Packet Buffer is 48K */
885			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
886		break;
887	case em_82573: /* 82573: Total Packet Buffer is 32K */
888		/* Jumbo frames not supported */
889			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
890		break;
891	default:
892		/* Devices before 82547 had a Packet Buffer of 64K.   */
893		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
894			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
895		else
896			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
897	}
898
899	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
900	E1000_WRITE_REG(&adapter->hw, PBA, pba);
901
902	/* Get the latest mac address, User can use a LAA */
903        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
904              ETHER_ADDR_LEN);
905
906	/* Initialize the hardware */
907	if (em_hardware_init(adapter)) {
908		printf("em%d: Unable to initialize the hardware\n",
909		       adapter->unit);
910		return;
911	}
912
913	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
914		em_enable_vlans(adapter);
915
916	/* Prepare transmit descriptors and buffers */
917	if (em_setup_transmit_structures(adapter)) {
918		printf("em%d: Could not setup transmit structures\n",
919		       adapter->unit);
920		em_stop(adapter);
921		return;
922	}
923	em_initialize_transmit_unit(adapter);
924
925	/* Setup Multicast table */
926	em_set_multi(adapter);
927
928	/* Prepare receive descriptors and buffers */
929	if (em_setup_receive_structures(adapter)) {
930		printf("em%d: Could not setup receive structures\n",
931		       adapter->unit);
932		em_stop(adapter);
933		return;
934	}
935	em_initialize_receive_unit(adapter);
936
937	/* Don't loose promiscuous settings */
938	em_set_promisc(adapter);
939
940	ifp->if_drv_flags |= IFF_DRV_RUNNING;
941	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
942
943	if (adapter->hw.mac_type >= em_82543) {
944		if (ifp->if_capenable & IFCAP_TXCSUM)
945			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
946		else
947			ifp->if_hwassist = 0;
948	}
949
950	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
951	em_clear_hw_cntrs(&adapter->hw);
952#ifdef DEVICE_POLLING
953        /*
954         * Only enable interrupts if we are not polling, make sure
955         * they are off otherwise.
956         */
957        if (ifp->if_capenable & IFCAP_POLLING)
958                em_disable_intr(adapter);
959        else
960#endif /* DEVICE_POLLING */
961		em_enable_intr(adapter);
962
963	/* Don't reset the phy next time init gets called */
964	adapter->hw.phy_reset_disable = TRUE;
965
966	return;
967}
968
969static void
970em_init(void *arg)
971{
972	struct adapter * adapter = arg;
973
974	EM_LOCK(adapter);
975	em_init_locked(adapter);
976	EM_UNLOCK(adapter);
977	return;
978}
979
980
981#ifdef DEVICE_POLLING
982static void
983em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
984{
985        struct adapter *adapter = ifp->if_softc;
986        u_int32_t reg_icr;
987
988	mtx_assert(&adapter->mtx, MA_OWNED);
989
990        if (cmd == POLL_AND_CHECK_STATUS) {
991                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
992                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
993			callout_stop(&adapter->timer);
994                        adapter->hw.get_link_status = 1;
995                        em_check_for_link(&adapter->hw);
996                        em_print_link_status(adapter);
997			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
998                }
999        }
1000	em_process_receive_interrupts(adapter, count);
1001	em_clean_transmit_interrupts(adapter);
1002
1003        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1004                em_start_locked(ifp);
1005}
1006
1007static void
1008em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1009{
1010        struct adapter *adapter = ifp->if_softc;
1011
1012	EM_LOCK(adapter);
1013	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1014		em_poll_locked(ifp, cmd, count);
1015	EM_UNLOCK(adapter);
1016}
1017#endif /* DEVICE_POLLING */
1018
1019/*********************************************************************
1020 *
1021 *  Interrupt Service routine
1022 *
1023 **********************************************************************/
1024static void
1025em_intr(void *arg)
1026{
1027	struct adapter	*adapter = arg;
1028	struct ifnet	*ifp;
1029	uint32_t	reg_icr;
1030	int		wantinit = 0;
1031
1032	EM_LOCK(adapter);
1033
1034	ifp = adapter->ifp;
1035
1036#ifdef DEVICE_POLLING
1037	if (ifp->if_capenable & IFCAP_POLLING) {
1038		EM_UNLOCK(adapter);
1039		return;
1040	}
1041#endif /* DEVICE_POLLING */
1042
1043	for (;;) {
1044		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1045		if (adapter->hw.mac_type >= em_82571 &&
1046		    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1047			break;
1048		else if (reg_icr == 0)
1049			break;
1050
1051		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1052			em_process_receive_interrupts(adapter, -1);
1053			em_clean_transmit_interrupts(adapter);
1054		}
1055
1056		/* Link status change */
1057		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1058			callout_stop(&adapter->timer);
1059			adapter->hw.get_link_status = 1;
1060			em_check_for_link(&adapter->hw);
1061			em_print_link_status(adapter);
1062			callout_reset(&adapter->timer, hz, em_local_timer,
1063			    adapter);
1064		}
1065
1066		if (reg_icr & E1000_ICR_RXO) {
1067			adapter->rx_overruns++;
1068			wantinit = 1;
1069		}
1070	}
1071#if 0
1072	if (wantinit)
1073		em_init_locked(adapter);
1074#endif
1075	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1076	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1077		em_start_locked(ifp);
1078
1079	EM_UNLOCK(adapter);
1080	return;
1081}
1082
1083
1084
1085/*********************************************************************
1086 *
1087 *  Media Ioctl callback
1088 *
1089 *  This routine is called whenever the user queries the status of
1090 *  the interface using ifconfig.
1091 *
1092 **********************************************************************/
1093static void
1094em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1095{
1096	struct adapter * adapter = ifp->if_softc;
1097
1098	INIT_DEBUGOUT("em_media_status: begin");
1099
1100	em_check_for_link(&adapter->hw);
1101	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1102		if (adapter->link_active == 0) {
1103			em_get_speed_and_duplex(&adapter->hw,
1104						&adapter->link_speed,
1105						&adapter->link_duplex);
1106			adapter->link_active = 1;
1107		}
1108	} else {
1109		if (adapter->link_active == 1) {
1110			adapter->link_speed = 0;
1111			adapter->link_duplex = 0;
1112			adapter->link_active = 0;
1113		}
1114	}
1115
1116	ifmr->ifm_status = IFM_AVALID;
1117	ifmr->ifm_active = IFM_ETHER;
1118
1119	if (!adapter->link_active)
1120		return;
1121
1122	ifmr->ifm_status |= IFM_ACTIVE;
1123
1124	if (adapter->hw.media_type == em_media_type_fiber) {
1125		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1126	} else {
1127		switch (adapter->link_speed) {
1128		case 10:
1129			ifmr->ifm_active |= IFM_10_T;
1130			break;
1131		case 100:
1132			ifmr->ifm_active |= IFM_100_TX;
1133			break;
1134		case 1000:
1135			ifmr->ifm_active |= IFM_1000_T;
1136			break;
1137		}
1138		if (adapter->link_duplex == FULL_DUPLEX)
1139			ifmr->ifm_active |= IFM_FDX;
1140		else
1141			ifmr->ifm_active |= IFM_HDX;
1142	}
1143	return;
1144}
1145
1146/*********************************************************************
1147 *
1148 *  Media Ioctl callback
1149 *
1150 *  This routine is called when the user changes speed/duplex using
1151 *  media/mediopt option with ifconfig.
1152 *
1153 **********************************************************************/
1154static int
1155em_media_change(struct ifnet *ifp)
1156{
1157	struct adapter * adapter = ifp->if_softc;
1158	struct ifmedia  *ifm = &adapter->media;
1159
1160	INIT_DEBUGOUT("em_media_change: begin");
1161
1162	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1163		return(EINVAL);
1164
1165	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1166	case IFM_AUTO:
1167		adapter->hw.autoneg = DO_AUTO_NEG;
1168		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1169		break;
1170	case IFM_1000_SX:
1171	case IFM_1000_T:
1172		adapter->hw.autoneg = DO_AUTO_NEG;
1173		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1174		break;
1175	case IFM_100_TX:
1176		adapter->hw.autoneg = FALSE;
1177		adapter->hw.autoneg_advertised = 0;
1178		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1179			adapter->hw.forced_speed_duplex = em_100_full;
1180		else
1181			adapter->hw.forced_speed_duplex	= em_100_half;
1182		break;
1183	case IFM_10_T:
1184		adapter->hw.autoneg = FALSE;
1185		adapter->hw.autoneg_advertised = 0;
1186		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1187			adapter->hw.forced_speed_duplex = em_10_full;
1188		else
1189			adapter->hw.forced_speed_duplex	= em_10_half;
1190		break;
1191	default:
1192		printf("em%d: Unsupported media type\n", adapter->unit);
1193	}
1194
1195	/* As the speed/duplex settings my have changed we need to
1196	 * reset the PHY.
1197	 */
1198	adapter->hw.phy_reset_disable = FALSE;
1199
1200	em_init(adapter);
1201
1202	return(0);
1203}
1204
1205/*********************************************************************
1206 *
1207 *  This routine maps the mbufs to tx descriptors.
1208 *
1209 *  return 0 on success, positive on failure
1210 **********************************************************************/
1211static int
1212em_encap(struct adapter *adapter, struct mbuf **m_headp)
1213{
1214        u_int32_t       txd_upper;
1215        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1216        int             i, j, error = 0;
1217	bus_dmamap_t	map;
1218
1219	struct mbuf	*m_head;
1220
1221	/* For 82544 Workaround */
1222	DESC_ARRAY              desc_array;
1223	u_int32_t               array_elements;
1224	u_int32_t               counter;
1225        struct m_tag    *mtag;
1226	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1227	int			nsegs;
1228        struct em_buffer   *tx_buffer;
1229        struct em_tx_desc *current_tx_desc = NULL;
1230        struct ifnet   *ifp = adapter->ifp;
1231
1232	m_head = *m_headp;
1233
1234        /*
1235         * Force a cleanup if number of TX descriptors
1236         * available hits the threshold
1237         */
1238        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1239                em_clean_transmit_interrupts(adapter);
1240                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1241                        adapter->no_tx_desc_avail1++;
1242                        return(ENOBUFS);
1243                }
1244        }
1245
1246        /*
1247         * Map the packet for DMA.
1248         */
1249	tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1250	error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1251	    segs, &nsegs, BUS_DMA_NOWAIT);
1252	map = tx_buffer->map;
1253        if (error != 0) {
1254                adapter->no_tx_dma_setup++;
1255                return (error);
1256        }
1257        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1258
1259        if (nsegs > adapter->num_tx_desc_avail) {
1260                adapter->no_tx_desc_avail2++;
1261		error = ENOBUFS;
1262		goto encap_fail;
1263        }
1264
1265
1266        if (ifp->if_hwassist > 0) {
1267                em_transmit_checksum_setup(adapter,  m_head,
1268                                           &txd_upper, &txd_lower);
1269        } else
1270                txd_upper = txd_lower = 0;
1271
1272
1273        /* Find out if we are in vlan mode */
1274        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1275
1276	/*
1277	 * When operating in promiscuous mode, hardware encapsulation for
1278	 * packets is disabled.  This means we have to add the vlan
1279	 * encapsulation in the driver, since it will have come down from the
1280	 * VLAN layer with a tag instead of a VLAN header.
1281	 */
1282	if (mtag != NULL && adapter->em_insert_vlan_header) {
1283		struct ether_vlan_header *evl;
1284		struct ether_header eh;
1285
1286		m_head = m_pullup(m_head, sizeof(eh));
1287		if (m_head == NULL) {
1288			*m_headp = NULL;
1289			error = ENOBUFS;
1290			goto encap_fail;
1291		}
1292		eh = *mtod(m_head, struct ether_header *);
1293		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1294		if (m_head == NULL) {
1295			*m_headp = NULL;
1296			error = ENOBUFS;
1297			goto encap_fail;
1298		}
1299		m_head = m_pullup(m_head, sizeof(*evl));
1300		if (m_head == NULL) {
1301			*m_headp = NULL;
1302			error = ENOBUFS;
1303			goto encap_fail;
1304		}
1305		evl = mtod(m_head, struct ether_vlan_header *);
1306		bcopy(&eh, evl, sizeof(*evl));
1307		evl->evl_proto = evl->evl_encap_proto;
1308		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1309		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1310		m_tag_delete(m_head, mtag);
1311		mtag = NULL;
1312		*m_headp = m_head;
1313	}
1314
1315        i = adapter->next_avail_tx_desc;
1316	if (adapter->pcix_82544) {
1317		txd_saved = i;
1318		txd_used = 0;
1319	}
1320        for (j = 0; j < nsegs; j++) {
1321		/* If adapter is 82544 and on PCIX bus */
1322		if(adapter->pcix_82544) {
1323			/*
1324			 * Check the Address and Length combination and
1325			 * split the data accordingly
1326			 */
1327                        array_elements = em_fill_descriptors(segs[j].ds_addr,
1328			    segs[j].ds_len, &desc_array);
1329			for (counter = 0; counter < array_elements; counter++) {
1330				if (txd_used == adapter->num_tx_desc_avail) {
1331					adapter->next_avail_tx_desc = txd_saved;
1332					adapter->no_tx_desc_avail2++;
1333					error = ENOBUFS;
1334					goto encap_fail;
1335                                }
1336                                tx_buffer = &adapter->tx_buffer_area[i];
1337                                current_tx_desc = &adapter->tx_desc_base[i];
1338                                current_tx_desc->buffer_addr = htole64(
1339					desc_array.descriptor[counter].address);
1340                                current_tx_desc->lower.data = htole32(
1341					(adapter->txd_cmd | txd_lower |
1342					 (u_int16_t)desc_array.descriptor[counter].length));
1343                                current_tx_desc->upper.data = htole32((txd_upper));
1344                                if (++i == adapter->num_tx_desc)
1345                                         i = 0;
1346
1347                                tx_buffer->m_head = NULL;
1348                                txd_used++;
1349                        }
1350		} else {
1351			tx_buffer = &adapter->tx_buffer_area[i];
1352			current_tx_desc = &adapter->tx_desc_base[i];
1353
1354			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1355			current_tx_desc->lower.data = htole32(
1356				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1357			current_tx_desc->upper.data = htole32(txd_upper);
1358
1359			if (++i == adapter->num_tx_desc)
1360				i = 0;
1361
1362			tx_buffer->m_head = NULL;
1363		}
1364        }
1365
1366	adapter->next_avail_tx_desc = i;
1367	if (adapter->pcix_82544) {
1368		adapter->num_tx_desc_avail -= txd_used;
1369	}
1370	else {
1371		adapter->num_tx_desc_avail -= nsegs;
1372	}
1373
1374        if (mtag != NULL) {
1375                /* Set the vlan id */
1376                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1377
1378                /* Tell hardware to add tag */
1379                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1380        }
1381
1382        tx_buffer->m_head = m_head;
1383        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1384
1385        /*
1386         * Last Descriptor of Packet needs End Of Packet (EOP)
1387         */
1388        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1389
1390        /*
1391         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1392         * that this frame is available to transmit.
1393         */
1394        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1395            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1396        if (adapter->hw.mac_type == em_82547 &&
1397            adapter->link_duplex == HALF_DUPLEX) {
1398                em_82547_move_tail_locked(adapter);
1399        } else {
1400                E1000_WRITE_REG(&adapter->hw, TDT, i);
1401                if (adapter->hw.mac_type == em_82547) {
1402                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1403                }
1404        }
1405
1406        return(0);
1407
1408encap_fail:
1409	bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1410	return (error);
1411}
1412
1413/*********************************************************************
1414 *
1415 * 82547 workaround to avoid controller hang in half-duplex environment.
1416 * The workaround is to avoid queuing a large packet that would span
1417 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1418 * in this case. We do that only when FIFO is quiescent.
1419 *
1420 **********************************************************************/
1421static void
1422em_82547_move_tail_locked(struct adapter *adapter)
1423{
1424	uint16_t hw_tdt;
1425	uint16_t sw_tdt;
1426	struct em_tx_desc *tx_desc;
1427	uint16_t length = 0;
1428	boolean_t eop = 0;
1429
1430	EM_LOCK_ASSERT(adapter);
1431
1432	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1433	sw_tdt = adapter->next_avail_tx_desc;
1434
1435	while (hw_tdt != sw_tdt) {
1436		tx_desc = &adapter->tx_desc_base[hw_tdt];
1437		length += tx_desc->lower.flags.length;
1438		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1439		if(++hw_tdt == adapter->num_tx_desc)
1440			hw_tdt = 0;
1441
1442		if(eop) {
1443			if (em_82547_fifo_workaround(adapter, length)) {
1444				adapter->tx_fifo_wrk_cnt++;
1445				callout_reset(&adapter->tx_fifo_timer, 1,
1446					em_82547_move_tail, adapter);
1447				break;
1448			}
1449			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1450			em_82547_update_fifo_head(adapter, length);
1451			length = 0;
1452		}
1453	}
1454	return;
1455}
1456
1457static void
1458em_82547_move_tail(void *arg)
1459{
1460        struct adapter *adapter = arg;
1461
1462        EM_LOCK(adapter);
1463        em_82547_move_tail_locked(adapter);
1464        EM_UNLOCK(adapter);
1465}
1466
1467static int
1468em_82547_fifo_workaround(struct adapter *adapter, int len)
1469{
1470	int fifo_space, fifo_pkt_len;
1471
1472	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1473
1474	if (adapter->link_duplex == HALF_DUPLEX) {
1475		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1476
1477		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1478			if (em_82547_tx_fifo_reset(adapter)) {
1479				return(0);
1480			}
1481			else {
1482				return(1);
1483			}
1484		}
1485	}
1486
1487	return(0);
1488}
1489
1490static void
1491em_82547_update_fifo_head(struct adapter *adapter, int len)
1492{
1493	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1494
1495	/* tx_fifo_head is always 16 byte aligned */
1496	adapter->tx_fifo_head += fifo_pkt_len;
1497	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1498		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1499	}
1500
1501	return;
1502}
1503
1504
1505static int
1506em_82547_tx_fifo_reset(struct adapter *adapter)
1507{
1508	uint32_t tctl;
1509
1510	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1511	      E1000_READ_REG(&adapter->hw, TDH)) &&
1512	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1513	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1514	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1515	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1516	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1517
1518		/* Disable TX unit */
1519		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1520		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1521
1522		/* Reset FIFO pointers */
1523		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1524		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1525		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1526		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1527
1528		/* Re-enable TX unit */
1529		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1530		E1000_WRITE_FLUSH(&adapter->hw);
1531
1532		adapter->tx_fifo_head = 0;
1533		adapter->tx_fifo_reset_cnt++;
1534
1535		return(TRUE);
1536	}
1537	else {
1538		return(FALSE);
1539	}
1540}
1541
1542static void
1543em_set_promisc(struct adapter * adapter)
1544{
1545
1546	u_int32_t       reg_rctl;
1547	struct ifnet   *ifp = adapter->ifp;
1548
1549	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1550
1551	if (ifp->if_flags & IFF_PROMISC) {
1552		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1553		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1554		/* Disable VLAN stripping in promiscous mode
1555		 * This enables bridging of vlan tagged frames to occur
1556		 * and also allows vlan tags to be seen in tcpdump
1557		 */
1558		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1559			em_disable_vlans(adapter);
1560		adapter->em_insert_vlan_header = 1;
1561	} else if (ifp->if_flags & IFF_ALLMULTI) {
1562		reg_rctl |= E1000_RCTL_MPE;
1563		reg_rctl &= ~E1000_RCTL_UPE;
1564		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1565		adapter->em_insert_vlan_header = 0;
1566	} else
1567		adapter->em_insert_vlan_header = 0;
1568
1569	return;
1570}
1571
1572static void
1573em_disable_promisc(struct adapter * adapter)
1574{
1575	u_int32_t       reg_rctl;
1576	struct ifnet   *ifp = adapter->ifp;
1577
1578	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1579
1580	reg_rctl &=  (~E1000_RCTL_UPE);
1581	reg_rctl &=  (~E1000_RCTL_MPE);
1582	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1583
1584	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1585		em_enable_vlans(adapter);
1586	adapter->em_insert_vlan_header = 0;
1587
1588	return;
1589}
1590
1591
1592/*********************************************************************
1593 *  Multicast Update
1594 *
1595 *  This routine is called whenever multicast address list is updated.
1596 *
1597 **********************************************************************/
1598
1599static void
1600em_set_multi(struct adapter * adapter)
1601{
1602        u_int32_t reg_rctl = 0;
1603        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1604        struct ifmultiaddr  *ifma;
1605        int mcnt = 0;
1606        struct ifnet   *ifp = adapter->ifp;
1607
1608        IOCTL_DEBUGOUT("em_set_multi: begin");
1609
1610        if (adapter->hw.mac_type == em_82542_rev2_0) {
1611                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1612                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1613                        em_pci_clear_mwi(&adapter->hw);
1614                }
1615                reg_rctl |= E1000_RCTL_RST;
1616                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1617                msec_delay(5);
1618        }
1619
1620	IF_ADDR_LOCK(ifp);
1621        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1622                if (ifma->ifma_addr->sa_family != AF_LINK)
1623                        continue;
1624
1625		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1626
1627                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1628                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1629                mcnt++;
1630        }
1631	IF_ADDR_UNLOCK(ifp);
1632
1633        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1634                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1635                reg_rctl |= E1000_RCTL_MPE;
1636                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1637        } else
1638                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1639
1640        if (adapter->hw.mac_type == em_82542_rev2_0) {
1641                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1642                reg_rctl &= ~E1000_RCTL_RST;
1643                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1644                msec_delay(5);
1645                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1646                        em_pci_set_mwi(&adapter->hw);
1647                }
1648        }
1649
1650        return;
1651}
1652
1653
1654/*********************************************************************
1655 *  Timer routine
1656 *
1657 *  This routine checks for link status and updates statistics.
1658 *
1659 **********************************************************************/
1660
1661static void
1662em_local_timer(void *arg)
1663{
1664	struct ifnet   *ifp;
1665	struct adapter * adapter = arg;
1666	ifp = adapter->ifp;
1667
1668	EM_LOCK(adapter);
1669
1670	em_check_for_link(&adapter->hw);
1671	em_print_link_status(adapter);
1672	em_update_stats_counters(adapter);
1673	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1674		em_print_hw_stats(adapter);
1675	}
1676	em_smartspeed(adapter);
1677
1678	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1679
1680	EM_UNLOCK(adapter);
1681	return;
1682}
1683
1684static void
1685em_print_link_status(struct adapter * adapter)
1686{
1687	struct ifnet *ifp = adapter->ifp;
1688
1689	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1690		if (adapter->link_active == 0) {
1691			em_get_speed_and_duplex(&adapter->hw,
1692						&adapter->link_speed,
1693						&adapter->link_duplex);
1694			if (bootverbose)
1695				printf("em%d: Link is up %d Mbps %s\n",
1696				       adapter->unit,
1697				       adapter->link_speed,
1698				       ((adapter->link_duplex == FULL_DUPLEX) ?
1699					"Full Duplex" : "Half Duplex"));
1700			adapter->link_active = 1;
1701			adapter->smartspeed = 0;
1702			if_link_state_change(ifp, LINK_STATE_UP);
1703		}
1704	} else {
1705		if (adapter->link_active == 1) {
1706			adapter->link_speed = 0;
1707			adapter->link_duplex = 0;
1708			if (bootverbose)
1709				printf("em%d: Link is Down\n", adapter->unit);
1710			adapter->link_active = 0;
1711			if_link_state_change(ifp, LINK_STATE_DOWN);
1712		}
1713	}
1714
1715	return;
1716}
1717
1718/*********************************************************************
1719 *
1720 *  This routine disables all traffic on the adapter by issuing a
1721 *  global reset on the MAC and deallocates TX/RX buffers.
1722 *
1723 **********************************************************************/
1724
1725static void
1726em_stop(void *arg)
1727{
1728	struct ifnet   *ifp;
1729	struct adapter * adapter = arg;
1730	ifp = adapter->ifp;
1731
1732	mtx_assert(&adapter->mtx, MA_OWNED);
1733
1734	INIT_DEBUGOUT("em_stop: begin");
1735
1736	em_disable_intr(adapter);
1737	em_reset_hw(&adapter->hw);
1738	callout_stop(&adapter->timer);
1739	callout_stop(&adapter->tx_fifo_timer);
1740	em_free_transmit_structures(adapter);
1741	em_free_receive_structures(adapter);
1742
1743
1744	/* Tell the stack that the interface is no longer active */
1745	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1746
1747	return;
1748}
1749
1750
1751/*********************************************************************
1752 *
1753 *  Determine hardware revision.
1754 *
1755 **********************************************************************/
1756static void
1757em_identify_hardware(struct adapter * adapter)
1758{
1759	device_t dev = adapter->dev;
1760
1761	/* Make sure our PCI config space has the necessary stuff set */
1762	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1763	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1764	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1765		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1766		       adapter->unit);
1767		adapter->hw.pci_cmd_word |=
1768		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1769		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1770	}
1771
1772	/* Save off the information about this board */
1773	adapter->hw.vendor_id = pci_get_vendor(dev);
1774	adapter->hw.device_id = pci_get_device(dev);
1775	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1776	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1777	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1778
1779	/* Identify the MAC */
1780        if (em_set_mac_type(&adapter->hw))
1781                printf("em%d: Unknown MAC Type\n", adapter->unit);
1782
1783	if(adapter->hw.mac_type == em_82541 ||
1784	   adapter->hw.mac_type == em_82541_rev_2 ||
1785	   adapter->hw.mac_type == em_82547 ||
1786	   adapter->hw.mac_type == em_82547_rev_2)
1787		adapter->hw.phy_init_script = TRUE;
1788
1789        return;
1790}
1791
1792static int
1793em_allocate_pci_resources(struct adapter * adapter)
1794{
1795	int             val, rid;
1796	device_t        dev = adapter->dev;
1797
1798	rid = PCIR_BAR(0);
1799	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1800						     &rid, RF_ACTIVE);
1801	if (!(adapter->res_memory)) {
1802		printf("em%d: Unable to allocate bus resource: memory\n",
1803		       adapter->unit);
1804		return(ENXIO);
1805	}
1806	adapter->osdep.mem_bus_space_tag =
1807	rman_get_bustag(adapter->res_memory);
1808	adapter->osdep.mem_bus_space_handle =
1809	rman_get_bushandle(adapter->res_memory);
1810	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1811
1812
1813	if (adapter->hw.mac_type > em_82543) {
1814		/* Figure our where our IO BAR is ? */
1815		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1816			val = pci_read_config(dev, rid, 4);
1817			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1818				adapter->io_rid = rid;
1819				break;
1820			}
1821			rid += 4;
1822			/* check for 64bit BAR */
1823			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1824				rid += 4;
1825		}
1826		if (rid >= PCIR_CIS) {
1827			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1828			return (ENXIO);
1829		}
1830		adapter->res_ioport = bus_alloc_resource_any(dev,
1831							     SYS_RES_IOPORT,
1832							     &adapter->io_rid,
1833							     RF_ACTIVE);
1834		if (!(adapter->res_ioport)) {
1835			printf("em%d: Unable to allocate bus resource: ioport\n",
1836			       adapter->unit);
1837			return(ENXIO);
1838		}
1839		adapter->hw.io_base = 0;
1840		adapter->osdep.io_bus_space_tag =
1841		    rman_get_bustag(adapter->res_ioport);
1842		adapter->osdep.io_bus_space_handle =
1843		    rman_get_bushandle(adapter->res_ioport);
1844	}
1845
1846	rid = 0x0;
1847	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1848						        RF_SHAREABLE |
1849							RF_ACTIVE);
1850	if (!(adapter->res_interrupt)) {
1851		printf("em%d: Unable to allocate bus resource: interrupt\n",
1852		       adapter->unit);
1853		return(ENXIO);
1854	}
1855	if (bus_setup_intr(dev, adapter->res_interrupt,
1856			   INTR_TYPE_NET | INTR_MPSAFE,
1857			   (void (*)(void *)) em_intr, adapter,
1858			   &adapter->int_handler_tag)) {
1859		printf("em%d: Error registering interrupt handler!\n",
1860		       adapter->unit);
1861		return(ENXIO);
1862	}
1863
1864	adapter->hw.back = &adapter->osdep;
1865
1866	return(0);
1867}
1868
1869static void
1870em_free_pci_resources(struct adapter * adapter)
1871{
1872	device_t dev = adapter->dev;
1873
1874	if (adapter->res_interrupt != NULL) {
1875		bus_teardown_intr(dev, adapter->res_interrupt,
1876				  adapter->int_handler_tag);
1877		bus_release_resource(dev, SYS_RES_IRQ, 0,
1878				     adapter->res_interrupt);
1879	}
1880	if (adapter->res_memory != NULL) {
1881		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1882				     adapter->res_memory);
1883	}
1884
1885	if (adapter->res_ioport != NULL) {
1886		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1887				     adapter->res_ioport);
1888	}
1889	return;
1890}
1891
1892/*********************************************************************
1893 *
1894 *  Initialize the hardware to a configuration as specified by the
1895 *  adapter structure. The controller is reset, the EEPROM is
1896 *  verified, the MAC address is set, then the shared initialization
1897 *  routines are called.
1898 *
1899 **********************************************************************/
1900static int
1901em_hardware_init(struct adapter * adapter)
1902{
1903	uint16_t rx_buffer_size;
1904
1905        INIT_DEBUGOUT("em_hardware_init: begin");
1906	/* Issue a global reset */
1907	em_reset_hw(&adapter->hw);
1908
1909	/* When hardware is reset, fifo_head is also reset */
1910	adapter->tx_fifo_head = 0;
1911
1912	/* Make sure we have a good EEPROM before we read from it */
1913	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1914		printf("em%d: The EEPROM Checksum Is Not Valid\n",
1915		       adapter->unit);
1916		return(EIO);
1917	}
1918
1919	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1920		printf("em%d: EEPROM read error while reading part number\n",
1921		       adapter->unit);
1922		return(EIO);
1923	}
1924
1925	/*
1926	 * These parameters control the automatic generation (Tx) and
1927	 * response (Rx) to Ethernet PAUSE frames.
1928	 * - High water mark should allow for at least two frames to be
1929	 *   received after sending an XOFF.
1930	 * - Low water mark works best when it is very near the high water mark.
1931	 *   This allows the receiver to restart by sending XON when it has drained
1932	 *   a bit.  Here we use an arbitary value of 1500 which will restart after
1933	 *   one full frame is pulled from the buffer.  There could be several smaller
1934	 *   frames in the buffer and if so they will not trigger the XON until their
1935	 *   total number reduces the buffer by 1500.
1936	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1937	 */
1938	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
1939
1940	adapter->hw.fc_high_water = rx_buffer_size -
1941	    roundup2(adapter->hw.max_frame_size, 1024);
1942	adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
1943	adapter->hw.fc_pause_time = 0x1000;
1944	adapter->hw.fc_send_xon = TRUE;
1945	adapter->hw.fc = em_fc_full;
1946
1947	if (em_init_hw(&adapter->hw) < 0) {
1948		printf("em%d: Hardware Initialization Failed",
1949		       adapter->unit);
1950		return(EIO);
1951	}
1952
1953	em_check_for_link(&adapter->hw);
1954	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1955		adapter->link_active = 1;
1956	else
1957		adapter->link_active = 0;
1958
1959	if (adapter->link_active) {
1960		em_get_speed_and_duplex(&adapter->hw,
1961					&adapter->link_speed,
1962					&adapter->link_duplex);
1963	} else {
1964		adapter->link_speed = 0;
1965		adapter->link_duplex = 0;
1966	}
1967
1968	return(0);
1969}
1970
1971/*********************************************************************
1972 *
1973 *  Setup networking device structure and register an interface.
1974 *
1975 **********************************************************************/
1976static void
1977em_setup_interface(device_t dev, struct adapter * adapter)
1978{
1979	struct ifnet   *ifp;
1980	INIT_DEBUGOUT("em_setup_interface: begin");
1981
1982	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1983	if (ifp == NULL)
1984		panic("%s: can not if_alloc()", device_get_nameunit(dev));
1985	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1986	ifp->if_mtu = ETHERMTU;
1987	ifp->if_baudrate = 1000000000;
1988	ifp->if_init =  em_init;
1989	ifp->if_softc = adapter;
1990	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1991	ifp->if_ioctl = em_ioctl;
1992	ifp->if_start = em_start;
1993	ifp->if_watchdog = em_watchdog;
1994	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
1995	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
1996	IFQ_SET_READY(&ifp->if_snd);
1997
1998        ether_ifattach(ifp, adapter->hw.mac_addr);
1999
2000	ifp->if_capabilities = ifp->if_capenable = 0;
2001
2002	if (adapter->hw.mac_type >= em_82543) {
2003		ifp->if_capabilities |= IFCAP_HWCSUM;
2004		ifp->if_capenable |= IFCAP_HWCSUM;
2005	}
2006
2007	/*
2008	 * Tell the upper layer(s) we support long frames.
2009	 */
2010	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2011	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2012	ifp->if_capenable |= IFCAP_VLAN_MTU;
2013
2014#ifdef DEVICE_POLLING
2015	ifp->if_capabilities |= IFCAP_POLLING;
2016#endif
2017
2018	/*
2019	 * Specify the media types supported by this adapter and register
2020	 * callbacks to update media and link information
2021	 */
2022	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2023		     em_media_status);
2024	if (adapter->hw.media_type == em_media_type_fiber) {
2025		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2026			    0, NULL);
2027		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
2028			    0, NULL);
2029	} else {
2030		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2031		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2032			    0, NULL);
2033		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2034			    0, NULL);
2035		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2036			    0, NULL);
2037		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2038			    0, NULL);
2039		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2040	}
2041	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2042	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2043
2044	return;
2045}
2046
2047
2048/*********************************************************************
2049 *
2050 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2051 *
2052 **********************************************************************/
2053static void
2054em_smartspeed(struct adapter *adapter)
2055{
2056        uint16_t phy_tmp;
2057
2058	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2059	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2060		return;
2061
2062        if(adapter->smartspeed == 0) {
2063                /* If Master/Slave config fault is asserted twice,
2064                 * we assume back-to-back */
2065                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2066                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2067                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2068                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2069                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2070					&phy_tmp);
2071                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2072                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2073                                em_write_phy_reg(&adapter->hw,
2074                                                    PHY_1000T_CTRL, phy_tmp);
2075                                adapter->smartspeed++;
2076                                if(adapter->hw.autoneg &&
2077                                   !em_phy_setup_autoneg(&adapter->hw) &&
2078				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2079                                                       &phy_tmp)) {
2080                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2081                                                    MII_CR_RESTART_AUTO_NEG);
2082                                        em_write_phy_reg(&adapter->hw,
2083							 PHY_CTRL, phy_tmp);
2084                                }
2085                        }
2086                }
2087                return;
2088        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2089                /* If still no link, perhaps using 2/3 pair cable */
2090                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2091                phy_tmp |= CR_1000T_MS_ENABLE;
2092                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2093                if(adapter->hw.autoneg &&
2094                   !em_phy_setup_autoneg(&adapter->hw) &&
2095                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2096                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2097                                    MII_CR_RESTART_AUTO_NEG);
2098                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2099                }
2100        }
2101        /* Restart process after EM_SMARTSPEED_MAX iterations */
2102        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2103                adapter->smartspeed = 0;
2104
2105	return;
2106}
2107
2108
2109/*
2110 * Manage DMA'able memory.
2111 */
2112static void
2113em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2114{
2115        if (error)
2116                return;
2117        *(bus_addr_t *) arg = segs[0].ds_addr;
2118}
2119
2120static int
2121em_dma_malloc(struct adapter *adapter, bus_size_t size,
2122        struct em_dma_alloc *dma, int mapflags)
2123{
2124        int r;
2125
2126	r = bus_dma_tag_create(NULL,                    /* parent */
2127                               E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2128                               BUS_SPACE_MAXADDR,       /* lowaddr */
2129                               BUS_SPACE_MAXADDR,       /* highaddr */
2130                               NULL, NULL,              /* filter, filterarg */
2131                               size,                    /* maxsize */
2132                               1,                       /* nsegments */
2133                               size,                    /* maxsegsize */
2134                               BUS_DMA_ALLOCNOW,        /* flags */
2135			       NULL,			/* lockfunc */
2136			       NULL,			/* lockarg */
2137                               &dma->dma_tag);
2138        if (r != 0) {
2139                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2140                        "error %u\n", adapter->unit, r);
2141                goto fail_0;
2142        }
2143
2144        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2145                             BUS_DMA_NOWAIT, &dma->dma_map);
2146        if (r != 0) {
2147                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2148                        "size %ju, error %d\n", adapter->unit,
2149			(uintmax_t)size, r);
2150                goto fail_2;
2151        }
2152
2153	dma->dma_paddr = 0;
2154        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2155                            size,
2156                            em_dmamap_cb,
2157                            &dma->dma_paddr,
2158                            mapflags | BUS_DMA_NOWAIT);
2159        if (r != 0 || dma->dma_paddr == 0) {
2160                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2161                        "error %u\n", adapter->unit, r);
2162                goto fail_3;
2163        }
2164
2165        return (0);
2166
2167fail_3:
2168        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2169fail_2:
2170        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2171        bus_dma_tag_destroy(dma->dma_tag);
2172fail_0:
2173        dma->dma_map = NULL;
2174        dma->dma_tag = NULL;
2175        return (r);
2176}
2177
2178static void
2179em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2180{
2181	if (dma->dma_tag == NULL)
2182		return;
2183	if (dma->dma_map != NULL) {
2184		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2185		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2186		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2187		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2188		dma->dma_map = NULL;
2189	}
2190        bus_dma_tag_destroy(dma->dma_tag);
2191	dma->dma_tag = NULL;
2192}
2193
2194
2195/*********************************************************************
2196 *
2197 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2198 *  the information needed to transmit a packet on the wire.
2199 *
2200 **********************************************************************/
2201static int
2202em_allocate_transmit_structures(struct adapter * adapter)
2203{
2204	if (!(adapter->tx_buffer_area =
2205	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2206					     adapter->num_tx_desc, M_DEVBUF,
2207					     M_NOWAIT))) {
2208		printf("em%d: Unable to allocate tx_buffer memory\n",
2209		       adapter->unit);
2210		return ENOMEM;
2211	}
2212
2213	bzero(adapter->tx_buffer_area,
2214	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2215
2216	return 0;
2217}
2218
2219/*********************************************************************
2220 *
2221 *  Allocate and initialize transmit structures.
2222 *
2223 **********************************************************************/
2224static int
2225em_setup_transmit_structures(struct adapter * adapter)
2226{
2227	struct em_buffer *tx_buffer;
2228	bus_size_t size;
2229	int error, i;
2230
2231        /*
2232         * Setup DMA descriptor areas.
2233         */
2234	size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2235	if ((error = bus_dma_tag_create(NULL,           /* parent */
2236                               1, 0,                    /* alignment, bounds */
2237                               BUS_SPACE_MAXADDR,       /* lowaddr */
2238                               BUS_SPACE_MAXADDR,       /* highaddr */
2239                               NULL, NULL,              /* filter, filterarg */
2240                               size,                    /* maxsize */
2241                               EM_MAX_SCATTER,          /* nsegments */
2242                               size,                    /* maxsegsize */
2243                               0,                       /* flags */
2244			       NULL,			/* lockfunc */
2245			       NULL,			/* lockarg */
2246                               &adapter->txtag)) != 0) {
2247		printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2248		goto fail;
2249        }
2250
2251        if ((error = em_allocate_transmit_structures(adapter)) != 0)
2252		goto fail;
2253
2254        bzero((void *) adapter->tx_desc_base,
2255              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2256	tx_buffer = adapter->tx_buffer_area;
2257	for (i = 0; i < adapter->num_tx_desc; i++) {
2258		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2259		if (error != 0) {
2260			printf("em%d: Unable to create TX DMA map\n",
2261			    adapter->unit);
2262			goto fail;
2263		}
2264		tx_buffer++;
2265	}
2266
2267        adapter->next_avail_tx_desc = 0;
2268        adapter->oldest_used_tx_desc = 0;
2269
2270        /* Set number of descriptors available */
2271        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2272
2273        /* Set checksum context */
2274        adapter->active_checksum_context = OFFLOAD_NONE;
2275	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2276	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2277
2278        return (0);
2279
2280fail:
2281	em_free_transmit_structures(adapter);
2282	return (error);
2283}
2284
2285/*********************************************************************
2286 *
2287 *  Enable transmit unit.
2288 *
2289 **********************************************************************/
2290static void
2291em_initialize_transmit_unit(struct adapter * adapter)
2292{
2293	u_int32_t       reg_tctl;
2294	u_int32_t       reg_tipg = 0;
2295	u_int64_t	bus_addr;
2296
2297         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2298	/* Setup the Base and Length of the Tx Descriptor Ring */
2299	bus_addr = adapter->txdma.dma_paddr;
2300	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2301	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2302	E1000_WRITE_REG(&adapter->hw, TDLEN,
2303			adapter->num_tx_desc *
2304			sizeof(struct em_tx_desc));
2305
2306	/* Setup the HW Tx Head and Tail descriptor pointers */
2307	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2308	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2309
2310
2311	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2312		     E1000_READ_REG(&adapter->hw, TDBAL),
2313		     E1000_READ_REG(&adapter->hw, TDLEN));
2314
2315	/* Set the default values for the Tx Inter Packet Gap timer */
2316	switch (adapter->hw.mac_type) {
2317	case em_82542_rev2_0:
2318        case em_82542_rev2_1:
2319                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2320                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2321                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2322                break;
2323        default:
2324                if (adapter->hw.media_type == em_media_type_fiber)
2325                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2326                else
2327                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2328                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2329                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2330        }
2331
2332	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2333	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2334	if(adapter->hw.mac_type >= em_82540)
2335		E1000_WRITE_REG(&adapter->hw, TADV,
2336		    adapter->tx_abs_int_delay.value);
2337
2338	/* Program the Transmit Control Register */
2339	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2340		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2341	if (adapter->hw.mac_type >= em_82571)
2342		reg_tctl |= E1000_TCTL_MULR;
2343	if (adapter->link_duplex == 1) {
2344		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2345	} else {
2346		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2347	}
2348	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2349
2350	/* Setup Transmit Descriptor Settings for this adapter */
2351	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2352
2353	if (adapter->tx_int_delay.value > 0)
2354		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2355
2356	return;
2357}
2358
2359/*********************************************************************
2360 *
2361 *  Free all transmit related data structures.
2362 *
2363 **********************************************************************/
2364static void
2365em_free_transmit_structures(struct adapter * adapter)
2366{
2367        struct em_buffer   *tx_buffer;
2368        int             i;
2369
2370        INIT_DEBUGOUT("free_transmit_structures: begin");
2371
2372        if (adapter->tx_buffer_area != NULL) {
2373                tx_buffer = adapter->tx_buffer_area;
2374                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2375                        if (tx_buffer->m_head != NULL) {
2376				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2377				    BUS_DMASYNC_POSTWRITE);
2378				bus_dmamap_unload(adapter->txtag,
2379				    tx_buffer->map);
2380                                m_freem(tx_buffer->m_head);
2381				tx_buffer->m_head = NULL;
2382                        } else if (tx_buffer->map != NULL)
2383				bus_dmamap_unload(adapter->txtag,
2384				    tx_buffer->map);
2385			if (tx_buffer->map != NULL) {
2386				bus_dmamap_destroy(adapter->txtag,
2387				    tx_buffer->map);
2388				tx_buffer->map = NULL;
2389			}
2390                }
2391        }
2392        if (adapter->tx_buffer_area != NULL) {
2393                free(adapter->tx_buffer_area, M_DEVBUF);
2394                adapter->tx_buffer_area = NULL;
2395        }
2396        if (adapter->txtag != NULL) {
2397                bus_dma_tag_destroy(adapter->txtag);
2398                adapter->txtag = NULL;
2399        }
2400        return;
2401}
2402
2403/*********************************************************************
2404 *
2405 *  The offload context needs to be set when we transfer the first
2406 *  packet of a particular protocol (TCP/UDP). We change the
2407 *  context only if the protocol type changes.
2408 *
2409 **********************************************************************/
2410static void
2411em_transmit_checksum_setup(struct adapter * adapter,
2412			   struct mbuf *mp,
2413			   u_int32_t *txd_upper,
2414			   u_int32_t *txd_lower)
2415{
2416	struct em_context_desc *TXD;
2417	struct em_buffer *tx_buffer;
2418	int curr_txd;
2419
2420	if (mp->m_pkthdr.csum_flags) {
2421
2422		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2423			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2424			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2425			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2426				return;
2427			else
2428				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2429
2430		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2431			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2432			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2433			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2434				return;
2435			else
2436				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2437		} else {
2438			*txd_upper = 0;
2439			*txd_lower = 0;
2440			return;
2441		}
2442	} else {
2443		*txd_upper = 0;
2444		*txd_lower = 0;
2445		return;
2446	}
2447
2448	/* If we reach this point, the checksum offload context
2449	 * needs to be reset.
2450	 */
2451	curr_txd = adapter->next_avail_tx_desc;
2452	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2453	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2454
2455	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2456	TXD->lower_setup.ip_fields.ipcso =
2457		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2458	TXD->lower_setup.ip_fields.ipcse =
2459		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2460
2461	TXD->upper_setup.tcp_fields.tucss =
2462		ETHER_HDR_LEN + sizeof(struct ip);
2463	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2464
2465	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2466		TXD->upper_setup.tcp_fields.tucso =
2467			ETHER_HDR_LEN + sizeof(struct ip) +
2468			offsetof(struct tcphdr, th_sum);
2469	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2470		TXD->upper_setup.tcp_fields.tucso =
2471			ETHER_HDR_LEN + sizeof(struct ip) +
2472			offsetof(struct udphdr, uh_sum);
2473	}
2474
2475	TXD->tcp_seg_setup.data = htole32(0);
2476	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2477
2478	tx_buffer->m_head = NULL;
2479
2480	if (++curr_txd == adapter->num_tx_desc)
2481		curr_txd = 0;
2482
2483	adapter->num_tx_desc_avail--;
2484	adapter->next_avail_tx_desc = curr_txd;
2485
2486	return;
2487}
2488
2489/**********************************************************************
2490 *
2491 *  Examine each tx_buffer in the used queue. If the hardware is done
2492 *  processing the packet then free associated resources. The
2493 *  tx_buffer is put back on the free queue.
2494 *
2495 **********************************************************************/
2496static void
2497em_clean_transmit_interrupts(struct adapter * adapter)
2498{
2499        int i, num_avail;
2500        struct em_buffer *tx_buffer;
2501        struct em_tx_desc   *tx_desc;
2502	struct ifnet   *ifp = adapter->ifp;
2503
2504	mtx_assert(&adapter->mtx, MA_OWNED);
2505
2506        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2507                return;
2508
2509        num_avail = adapter->num_tx_desc_avail;
2510        i = adapter->oldest_used_tx_desc;
2511
2512        tx_buffer = &adapter->tx_buffer_area[i];
2513        tx_desc = &adapter->tx_desc_base[i];
2514
2515        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2516            BUS_DMASYNC_POSTREAD);
2517        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2518
2519                tx_desc->upper.data = 0;
2520                num_avail++;
2521
2522                if (tx_buffer->m_head) {
2523			ifp->if_opackets++;
2524			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2525			    BUS_DMASYNC_POSTWRITE);
2526			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2527
2528                        m_freem(tx_buffer->m_head);
2529                        tx_buffer->m_head = NULL;
2530                }
2531
2532                if (++i == adapter->num_tx_desc)
2533                        i = 0;
2534
2535                tx_buffer = &adapter->tx_buffer_area[i];
2536                tx_desc = &adapter->tx_desc_base[i];
2537        }
2538        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2539            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2540
2541        adapter->oldest_used_tx_desc = i;
2542
2543        /*
2544         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2545         * that it is OK to send packets.
2546         * If there are no pending descriptors, clear the timeout. Otherwise,
2547         * if some descriptors have been freed, restart the timeout.
2548         */
2549        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2550                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2551                if (num_avail == adapter->num_tx_desc)
2552                        ifp->if_timer = 0;
2553                else if (num_avail == adapter->num_tx_desc_avail)
2554                        ifp->if_timer = EM_TX_TIMEOUT;
2555        }
2556        adapter->num_tx_desc_avail = num_avail;
2557        return;
2558}
2559
2560/*********************************************************************
2561 *
2562 *  Get a buffer from system mbuf buffer pool.
2563 *
2564 **********************************************************************/
2565static int
2566em_get_buf(int i, struct adapter *adapter,
2567           struct mbuf *nmp)
2568{
2569        struct mbuf    *mp = nmp;
2570        struct em_buffer *rx_buffer;
2571        struct ifnet   *ifp;
2572	bus_dma_segment_t segs[1];
2573	int error, nsegs;
2574
2575        ifp = adapter->ifp;
2576
2577        if (mp == NULL) {
2578                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2579                if (mp == NULL) {
2580                        adapter->mbuf_cluster_failed++;
2581                        return(ENOBUFS);
2582                }
2583                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2584        } else {
2585                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2586                mp->m_data = mp->m_ext.ext_buf;
2587                mp->m_next = NULL;
2588        }
2589
2590        if (ifp->if_mtu <= ETHERMTU) {
2591                m_adj(mp, ETHER_ALIGN);
2592        }
2593
2594        rx_buffer = &adapter->rx_buffer_area[i];
2595
2596        /*
2597         * Using memory from the mbuf cluster pool, invoke the
2598         * bus_dma machinery to arrange the memory mapping.
2599         */
2600        error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2601	    mp, segs, &nsegs, 0);
2602        if (error != 0) {
2603                m_free(mp);
2604                return(error);
2605        }
2606	/* If nsegs is wrong then the stack is corrupt */
2607	KASSERT(nsegs == 1, ("Too many segments returned!"));
2608        rx_buffer->m_head = mp;
2609        adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2610        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2611
2612        return(0);
2613}
2614
2615/*********************************************************************
2616 *
2617 *  Allocate memory for rx_buffer structures. Since we use one
2618 *  rx_buffer per received packet, the maximum number of rx_buffer's
2619 *  that we'll need is equal to the number of receive descriptors
2620 *  that we've allocated.
2621 *
2622 **********************************************************************/
2623static int
2624em_allocate_receive_structures(struct adapter * adapter)
2625{
2626        int             i, error;
2627        struct em_buffer *rx_buffer;
2628
2629        if (!(adapter->rx_buffer_area =
2630              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2631                                          adapter->num_rx_desc, M_DEVBUF,
2632                                          M_NOWAIT))) {
2633                printf("em%d: Unable to allocate rx_buffer memory\n",
2634                       adapter->unit);
2635                return(ENOMEM);
2636        }
2637
2638        bzero(adapter->rx_buffer_area,
2639              sizeof(struct em_buffer) * adapter->num_rx_desc);
2640
2641        error = bus_dma_tag_create(NULL,                /* parent */
2642                               1, 0,                    /* alignment, bounds */
2643                               BUS_SPACE_MAXADDR,       /* lowaddr */
2644                               BUS_SPACE_MAXADDR,       /* highaddr */
2645                               NULL, NULL,              /* filter, filterarg */
2646                               MCLBYTES,                /* maxsize */
2647                               1,                       /* nsegments */
2648                               MCLBYTES,                /* maxsegsize */
2649                               BUS_DMA_ALLOCNOW,        /* flags */
2650			       NULL,			/* lockfunc */
2651			       NULL,			/* lockarg */
2652                               &adapter->rxtag);
2653        if (error != 0) {
2654                printf("em%d: em_allocate_receive_structures: "
2655                        "bus_dma_tag_create failed; error %u\n",
2656                       adapter->unit, error);
2657                goto fail;
2658        }
2659
2660        rx_buffer = adapter->rx_buffer_area;
2661        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2662                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2663                                          &rx_buffer->map);
2664                if (error != 0) {
2665                        printf("em%d: em_allocate_receive_structures: "
2666                                "bus_dmamap_create failed; error %u\n",
2667                                adapter->unit, error);
2668                        goto fail;
2669                }
2670        }
2671
2672        for (i = 0; i < adapter->num_rx_desc; i++) {
2673                error = em_get_buf(i, adapter, NULL);
2674		if (error != 0)
2675			goto fail;
2676        }
2677        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2678            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2679
2680        return(0);
2681
2682fail:
2683	em_free_receive_structures(adapter);
2684        return (error);
2685}
2686
2687/*********************************************************************
2688 *
2689 *  Allocate and initialize receive structures.
2690 *
2691 **********************************************************************/
2692static int
2693em_setup_receive_structures(struct adapter * adapter)
2694{
2695	bzero((void *) adapter->rx_desc_base,
2696              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2697
2698	if (em_allocate_receive_structures(adapter))
2699		return ENOMEM;
2700
2701	/* Setup our descriptor pointers */
2702        adapter->next_rx_desc_to_check = 0;
2703	return(0);
2704}
2705
2706/*********************************************************************
2707 *
2708 *  Enable receive unit.
2709 *
2710 **********************************************************************/
2711static void
2712em_initialize_receive_unit(struct adapter * adapter)
2713{
2714	u_int32_t       reg_rctl;
2715	u_int32_t       reg_rxcsum;
2716	struct ifnet    *ifp;
2717	u_int64_t	bus_addr;
2718
2719        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2720	ifp = adapter->ifp;
2721
2722	/* Make sure receives are disabled while setting up the descriptor ring */
2723	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2724
2725	/* Set the Receive Delay Timer Register */
2726	E1000_WRITE_REG(&adapter->hw, RDTR,
2727			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2728
2729	if(adapter->hw.mac_type >= em_82540) {
2730		E1000_WRITE_REG(&adapter->hw, RADV,
2731		    adapter->rx_abs_int_delay.value);
2732
2733                /* Set the interrupt throttling rate.  Value is calculated
2734                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2735#define MAX_INTS_PER_SEC        8000
2736#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2737                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2738        }
2739
2740	/* Setup the Base and Length of the Rx Descriptor Ring */
2741	bus_addr = adapter->rxdma.dma_paddr;
2742	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2743	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2744	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2745			sizeof(struct em_rx_desc));
2746
2747	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2748	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2749	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2750
2751	/* Setup the Receive Control Register */
2752	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2753		   E1000_RCTL_RDMTS_HALF |
2754		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2755
2756	if (adapter->hw.tbi_compatibility_on == TRUE)
2757		reg_rctl |= E1000_RCTL_SBP;
2758
2759
2760	switch (adapter->rx_buffer_len) {
2761	default:
2762	case EM_RXBUFFER_2048:
2763		reg_rctl |= E1000_RCTL_SZ_2048;
2764		break;
2765	case EM_RXBUFFER_4096:
2766		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2767		break;
2768	case EM_RXBUFFER_8192:
2769		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2770		break;
2771	case EM_RXBUFFER_16384:
2772		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2773		break;
2774	}
2775
2776	if (ifp->if_mtu > ETHERMTU)
2777		reg_rctl |= E1000_RCTL_LPE;
2778
2779	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2780	if ((adapter->hw.mac_type >= em_82543) &&
2781	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2782		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2783		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2784		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2785	}
2786
2787	/* Enable Receives */
2788	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2789
2790	return;
2791}
2792
2793/*********************************************************************
2794 *
2795 *  Free receive related data structures.
2796 *
2797 **********************************************************************/
2798static void
2799em_free_receive_structures(struct adapter *adapter)
2800{
2801        struct em_buffer   *rx_buffer;
2802        int             i;
2803
2804        INIT_DEBUGOUT("free_receive_structures: begin");
2805
2806        if (adapter->rx_buffer_area != NULL) {
2807                rx_buffer = adapter->rx_buffer_area;
2808                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2809			if (rx_buffer->m_head != NULL) {
2810				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
2811				    BUS_DMASYNC_POSTREAD);
2812				bus_dmamap_unload(adapter->rxtag,
2813				    rx_buffer->map);
2814				m_freem(rx_buffer->m_head);
2815				rx_buffer->m_head = NULL;
2816			} else if (rx_buffer->map != NULL)
2817				bus_dmamap_unload(adapter->rxtag,
2818				    rx_buffer->map);
2819                        if (rx_buffer->map != NULL) {
2820				bus_dmamap_destroy(adapter->rxtag,
2821				    rx_buffer->map);
2822				rx_buffer->map = NULL;
2823			}
2824                }
2825        }
2826        if (adapter->rx_buffer_area != NULL) {
2827                free(adapter->rx_buffer_area, M_DEVBUF);
2828                adapter->rx_buffer_area = NULL;
2829        }
2830        if (adapter->rxtag != NULL) {
2831                bus_dma_tag_destroy(adapter->rxtag);
2832                adapter->rxtag = NULL;
2833        }
2834        return;
2835}
2836
2837/*********************************************************************
2838 *
2839 *  This routine executes in interrupt context. It replenishes
2840 *  the mbufs in the descriptor and sends data which has been
2841 *  dma'ed into host memory to upper layer.
2842 *
2843 *  We loop at most count times if count is > 0, or until done if
2844 *  count < 0.
2845 *
2846 *********************************************************************/
2847static void
2848em_process_receive_interrupts(struct adapter * adapter, int count)
2849{
2850	struct ifnet        *ifp;
2851	struct mbuf         *mp;
2852	u_int8_t            accept_frame = 0;
2853 	u_int8_t            eop = 0;
2854	u_int16_t           len, desc_len, prev_len_adj;
2855	int                 i;
2856
2857	/* Pointer to the receive descriptor being examined. */
2858	struct em_rx_desc   *current_desc;
2859
2860	mtx_assert(&adapter->mtx, MA_OWNED);
2861
2862	ifp = adapter->ifp;
2863	i = adapter->next_rx_desc_to_check;
2864        current_desc = &adapter->rx_desc_base[i];
2865	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2866	    BUS_DMASYNC_POSTREAD);
2867
2868	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2869		return;
2870	}
2871
2872	while ((current_desc->status & E1000_RXD_STAT_DD) &&
2873		    (count != 0) &&
2874		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2875		struct mbuf *m = NULL;
2876
2877		mp = adapter->rx_buffer_area[i].m_head;
2878		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2879		    BUS_DMASYNC_POSTREAD);
2880		bus_dmamap_unload(adapter->rxtag,
2881		    adapter->rx_buffer_area[i].map);
2882
2883		accept_frame = 1;
2884		prev_len_adj = 0;
2885                desc_len = le16toh(current_desc->length);
2886		if (current_desc->status & E1000_RXD_STAT_EOP) {
2887			count--;
2888			eop = 1;
2889			if (desc_len < ETHER_CRC_LEN) {
2890                                len = 0;
2891                                prev_len_adj = ETHER_CRC_LEN - desc_len;
2892                        }
2893                        else {
2894                                len = desc_len - ETHER_CRC_LEN;
2895                        }
2896		} else {
2897			eop = 0;
2898			len = desc_len;
2899		}
2900
2901		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2902			u_int8_t            last_byte;
2903			u_int32_t           pkt_len = desc_len;
2904
2905			if (adapter->fmp != NULL)
2906				pkt_len += adapter->fmp->m_pkthdr.len;
2907
2908			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2909
2910			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2911				       current_desc->errors,
2912				       pkt_len, last_byte)) {
2913				em_tbi_adjust_stats(&adapter->hw,
2914						    &adapter->stats,
2915						    pkt_len,
2916						    adapter->hw.mac_addr);
2917				if (len > 0) len--;
2918			}
2919			else {
2920				accept_frame = 0;
2921			}
2922		}
2923
2924		if (accept_frame) {
2925
2926			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2927				adapter->dropped_pkts++;
2928				em_get_buf(i, adapter, mp);
2929				if (adapter->fmp != NULL)
2930					m_freem(adapter->fmp);
2931				adapter->fmp = NULL;
2932				adapter->lmp = NULL;
2933				break;
2934			}
2935
2936			/* Assign correct length to the current fragment */
2937			mp->m_len = len;
2938
2939			if (adapter->fmp == NULL) {
2940				mp->m_pkthdr.len = len;
2941				adapter->fmp = mp;	 /* Store the first mbuf */
2942				adapter->lmp = mp;
2943			} else {
2944				/* Chain mbuf's together */
2945				mp->m_flags &= ~M_PKTHDR;
2946				/*
2947                                 * Adjust length of previous mbuf in chain if we
2948                                 * received less than 4 bytes in the last descriptor.
2949                                 */
2950				if (prev_len_adj > 0) {
2951					adapter->lmp->m_len -= prev_len_adj;
2952					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2953				}
2954				adapter->lmp->m_next = mp;
2955				adapter->lmp = adapter->lmp->m_next;
2956				adapter->fmp->m_pkthdr.len += len;
2957			}
2958
2959                        if (eop) {
2960                                adapter->fmp->m_pkthdr.rcvif = ifp;
2961				ifp->if_ipackets++;
2962                                em_receive_checksum(adapter, current_desc,
2963                                                    adapter->fmp);
2964#ifndef __NO_STRICT_ALIGNMENT
2965				if (ifp->if_mtu > ETHERMTU &&
2966				    em_fixup_rx(adapter) != 0)
2967					goto skip;
2968
2969#endif
2970                                if (current_desc->status & E1000_RXD_STAT_VP)
2971					VLAN_INPUT_TAG(ifp, adapter->fmp,
2972					    (le16toh(current_desc->special) &
2973					    E1000_RXD_SPC_VLAN_MASK));
2974#ifndef __NO_STRICT_ALIGNMENT
2975skip:
2976#endif
2977				m = adapter->fmp;
2978				adapter->fmp = NULL;
2979				adapter->lmp = NULL;
2980                        }
2981		} else {
2982			adapter->dropped_pkts++;
2983			em_get_buf(i, adapter, mp);
2984			if (adapter->fmp != NULL)
2985				m_freem(adapter->fmp);
2986			adapter->fmp = NULL;
2987			adapter->lmp = NULL;
2988		}
2989
2990		/* Zero out the receive descriptors status  */
2991		current_desc->status = 0;
2992		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2993		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2994
2995		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2996                E1000_WRITE_REG(&adapter->hw, RDT, i);
2997
2998                /* Advance our pointers to the next descriptor */
2999		if (++i == adapter->num_rx_desc)
3000			i = 0;
3001		if (m != NULL) {
3002			adapter->next_rx_desc_to_check = i;
3003			EM_UNLOCK(adapter);
3004			(*ifp->if_input)(ifp, m);
3005			EM_LOCK(adapter);
3006			i = adapter->next_rx_desc_to_check;
3007		}
3008		current_desc = &adapter->rx_desc_base[i];
3009	}
3010	adapter->next_rx_desc_to_check = i;
3011	return;
3012}
3013
3014#ifndef __NO_STRICT_ALIGNMENT
3015/*
3016 * When jumbo frames are enabled we should realign entire payload on
3017 * architecures with strict alignment. This is serious design mistake of 8254x
3018 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3019 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3020 * payload. On architecures without strict alignment restrictions 8254x still
3021 * performs unaligned memory access which would reduce the performance too.
3022 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3023 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3024 * existing mbuf chain.
3025 *
3026 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3027 * not used at all on architectures with strict alignment.
3028 */
3029static int
3030em_fixup_rx(struct adapter *adapter)
3031{
3032	struct mbuf *m, *n;
3033	int error;
3034
3035	error = 0;
3036	m = adapter->fmp;
3037	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3038		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3039		m->m_data += ETHER_HDR_LEN;
3040	} else {
3041		MGETHDR(n, M_DONTWAIT, MT_DATA);
3042		if (n != NULL) {
3043			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3044			m->m_data += ETHER_HDR_LEN;
3045			m->m_len -= ETHER_HDR_LEN;
3046			n->m_len = ETHER_HDR_LEN;
3047			M_MOVE_PKTHDR(n, m);
3048			n->m_next = m;
3049			adapter->fmp = n;
3050		} else {
3051			adapter->dropped_pkts++;
3052			m_freem(adapter->fmp);
3053			adapter->fmp = NULL;
3054			error = ENOMEM;
3055		}
3056	}
3057
3058	return (error);
3059}
3060#endif
3061
3062/*********************************************************************
3063 *
3064 *  Verify that the hardware indicated that the checksum is valid.
3065 *  Inform the stack about the status of checksum so that stack
3066 *  doesn't spend time verifying the checksum.
3067 *
3068 *********************************************************************/
3069static void
3070em_receive_checksum(struct adapter *adapter,
3071		    struct em_rx_desc *rx_desc,
3072		    struct mbuf *mp)
3073{
3074	/* 82543 or newer only */
3075	if ((adapter->hw.mac_type < em_82543) ||
3076	    /* Ignore Checksum bit is set */
3077	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3078		mp->m_pkthdr.csum_flags = 0;
3079		return;
3080	}
3081
3082	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3083		/* Did it pass? */
3084		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3085			/* IP Checksum Good */
3086			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3087			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3088
3089		} else {
3090			mp->m_pkthdr.csum_flags = 0;
3091		}
3092	}
3093
3094	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3095		/* Did it pass? */
3096		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3097			mp->m_pkthdr.csum_flags |=
3098			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3099			mp->m_pkthdr.csum_data = htons(0xffff);
3100		}
3101	}
3102
3103	return;
3104}
3105
3106
3107static void
3108em_enable_vlans(struct adapter *adapter)
3109{
3110	uint32_t ctrl;
3111
3112	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3113
3114	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3115	ctrl |= E1000_CTRL_VME;
3116	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3117
3118	return;
3119}
3120
3121static void
3122em_disable_vlans(struct adapter *adapter)
3123{
3124	uint32_t ctrl;
3125
3126	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3127	ctrl &= ~E1000_CTRL_VME;
3128	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3129
3130	return;
3131}
3132
3133static void
3134em_enable_intr(struct adapter * adapter)
3135{
3136	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3137	return;
3138}
3139
3140static void
3141em_disable_intr(struct adapter *adapter)
3142{
3143	/*
3144	 * The first version of 82542 had an errata where when link was forced it
3145	 * would stay up even up even if the cable was disconnected.  Sequence errors
3146	 * were used to detect the disconnect and then the driver would unforce the link.
3147	 * This code in the in the ISR.  For this to work correctly the Sequence error
3148	 * interrupt had to be enabled all the time.
3149	 */
3150
3151	if (adapter->hw.mac_type == em_82542_rev2_0)
3152	    E1000_WRITE_REG(&adapter->hw, IMC,
3153	        (0xffffffff & ~E1000_IMC_RXSEQ));
3154	else
3155	    E1000_WRITE_REG(&adapter->hw, IMC,
3156	        0xffffffff);
3157	return;
3158}
3159
3160static int
3161em_is_valid_ether_addr(u_int8_t *addr)
3162{
3163        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3164
3165        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3166                return (FALSE);
3167        }
3168
3169        return(TRUE);
3170}
3171
3172void
3173em_write_pci_cfg(struct em_hw *hw,
3174		      uint32_t reg,
3175		      uint16_t *value)
3176{
3177	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3178			 *value, 2);
3179}
3180
3181void
3182em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3183		     uint16_t *value)
3184{
3185	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3186				 reg, 2);
3187	return;
3188}
3189
3190void
3191em_pci_set_mwi(struct em_hw *hw)
3192{
3193        pci_write_config(((struct em_osdep *)hw->back)->dev,
3194                         PCIR_COMMAND,
3195                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3196        return;
3197}
3198
3199void
3200em_pci_clear_mwi(struct em_hw *hw)
3201{
3202        pci_write_config(((struct em_osdep *)hw->back)->dev,
3203                         PCIR_COMMAND,
3204                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3205        return;
3206}
3207
3208/*********************************************************************
3209* 82544 Coexistence issue workaround.
3210*    There are 2 issues.
3211*       1. Transmit Hang issue.
3212*    To detect this issue, following equation can be used...
3213*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3214*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3215*
3216*       2. DAC issue.
3217*    To detect this issue, following equation can be used...
3218*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3219*          If SUM[3:0] is in between 9 to c, we will have this issue.
3220*
3221*
3222*    WORKAROUND:
3223*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3224*
3225*** *********************************************************************/
3226static u_int32_t
3227em_fill_descriptors (bus_addr_t address,
3228                              u_int32_t length,
3229                              PDESC_ARRAY desc_array)
3230{
3231        /* Since issue is sensitive to length and address.*/
3232        /* Let us first check the address...*/
3233        u_int32_t safe_terminator;
3234        if (length <= 4) {
3235                desc_array->descriptor[0].address = address;
3236                desc_array->descriptor[0].length = length;
3237                desc_array->elements = 1;
3238                return desc_array->elements;
3239        }
3240        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3241        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3242        if (safe_terminator == 0   ||
3243        (safe_terminator > 4   &&
3244        safe_terminator < 9)   ||
3245        (safe_terminator > 0xC &&
3246        safe_terminator <= 0xF)) {
3247                desc_array->descriptor[0].address = address;
3248                desc_array->descriptor[0].length = length;
3249                desc_array->elements = 1;
3250                return desc_array->elements;
3251        }
3252
3253        desc_array->descriptor[0].address = address;
3254        desc_array->descriptor[0].length = length - 4;
3255        desc_array->descriptor[1].address = address + (length - 4);
3256        desc_array->descriptor[1].length = 4;
3257        desc_array->elements = 2;
3258        return desc_array->elements;
3259}
3260
3261/**********************************************************************
3262 *
3263 *  Update the board statistics counters.
3264 *
3265 **********************************************************************/
3266static void
3267em_update_stats_counters(struct adapter *adapter)
3268{
3269	struct ifnet   *ifp;
3270
3271	if(adapter->hw.media_type == em_media_type_copper ||
3272	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3273		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3274		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3275	}
3276	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3277	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3278	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3279	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3280
3281	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3282	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3283	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3284	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3285	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3286	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3287	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3288	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3289	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3290	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3291	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3292	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3293	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3294	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3295	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3296	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3297	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3298	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3299	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3300	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3301
3302	/* For the 64-bit byte counters the low dword must be read first. */
3303	/* Both registers clear on the read of the high dword */
3304
3305	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3306	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3307	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3308	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3309
3310	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3311	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3312	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3313	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3314	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3315
3316	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3317	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3318	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3319	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3320
3321	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3322	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3323	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3324	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3325	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3326	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3327	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3328	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3329	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3330	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3331
3332	if (adapter->hw.mac_type >= em_82543) {
3333		adapter->stats.algnerrc +=
3334		E1000_READ_REG(&adapter->hw, ALGNERRC);
3335		adapter->stats.rxerrc +=
3336		E1000_READ_REG(&adapter->hw, RXERRC);
3337		adapter->stats.tncrs +=
3338		E1000_READ_REG(&adapter->hw, TNCRS);
3339		adapter->stats.cexterr +=
3340		E1000_READ_REG(&adapter->hw, CEXTERR);
3341		adapter->stats.tsctc +=
3342		E1000_READ_REG(&adapter->hw, TSCTC);
3343		adapter->stats.tsctfc +=
3344		E1000_READ_REG(&adapter->hw, TSCTFC);
3345	}
3346	ifp = adapter->ifp;
3347
3348	ifp->if_collisions = adapter->stats.colc;
3349
3350	/* Rx Errors */
3351	ifp->if_ierrors =
3352	adapter->dropped_pkts +
3353	adapter->stats.rxerrc +
3354	adapter->stats.crcerrs +
3355	adapter->stats.algnerrc +
3356	adapter->stats.rlec +
3357	adapter->stats.mpc + adapter->stats.cexterr;
3358
3359	/* Tx Errors */
3360	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3361	    adapter->watchdog_events;
3362
3363}
3364
3365
3366/**********************************************************************
3367 *
3368 *  This routine is called only when em_display_debug_stats is enabled.
3369 *  This routine provides a way to take a look at important statistics
3370 *  maintained by the driver and hardware.
3371 *
3372 **********************************************************************/
3373static void
3374em_print_debug_info(struct adapter *adapter)
3375{
3376	int unit = adapter->unit;
3377	uint8_t *hw_addr = adapter->hw.hw_addr;
3378
3379	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3380	printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit,
3381	    E1000_READ_REG(&adapter->hw, CTRL),
3382	    E1000_READ_REG(&adapter->hw, RCTL));
3383	printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit,
3384	    ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3385	    (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3386	printf("em%d: Flow control watermarks high = %d low = %d\n", unit,
3387	    adapter->hw.fc_high_water,
3388	    adapter->hw.fc_low_water);
3389	printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3390	    E1000_READ_REG(&adapter->hw, TIDV),
3391	    E1000_READ_REG(&adapter->hw, TADV));
3392	printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3393	    E1000_READ_REG(&adapter->hw, RDTR),
3394	    E1000_READ_REG(&adapter->hw, RADV));
3395	printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3396	    unit, (long long)adapter->tx_fifo_wrk_cnt,
3397	    (long long)adapter->tx_fifo_reset_cnt);
3398	printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3399	    E1000_READ_REG(&adapter->hw, TDH),
3400	    E1000_READ_REG(&adapter->hw, TDT));
3401	printf("em%d: Num Tx descriptors avail = %d\n", unit,
3402	    adapter->num_tx_desc_avail);
3403	printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3404	    adapter->no_tx_desc_avail1);
3405	printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3406	    adapter->no_tx_desc_avail2);
3407	printf("em%d: Std mbuf failed = %ld\n", unit,
3408	    adapter->mbuf_alloc_failed);
3409	printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3410	    adapter->mbuf_cluster_failed);
3411	printf("em%d: Driver dropped packets = %ld\n", unit,
3412	    adapter->dropped_pkts);
3413
3414	return;
3415}
3416
3417static void
3418em_print_hw_stats(struct adapter *adapter)
3419{
3420        int unit = adapter->unit;
3421
3422        printf("em%d: Excessive collisions = %lld\n", unit,
3423               (long long)adapter->stats.ecol);
3424        printf("em%d: Symbol errors = %lld\n", unit,
3425               (long long)adapter->stats.symerrs);
3426        printf("em%d: Sequence errors = %lld\n", unit,
3427               (long long)adapter->stats.sec);
3428        printf("em%d: Defer count = %lld\n", unit,
3429               (long long)adapter->stats.dc);
3430
3431        printf("em%d: Missed Packets = %lld\n", unit,
3432               (long long)adapter->stats.mpc);
3433        printf("em%d: Receive No Buffers = %lld\n", unit,
3434               (long long)adapter->stats.rnbc);
3435        printf("em%d: Receive length errors = %lld\n", unit,
3436               (long long)adapter->stats.rlec);
3437        printf("em%d: Receive errors = %lld\n", unit,
3438               (long long)adapter->stats.rxerrc);
3439        printf("em%d: Crc errors = %lld\n", unit,
3440               (long long)adapter->stats.crcerrs);
3441        printf("em%d: Alignment errors = %lld\n", unit,
3442               (long long)adapter->stats.algnerrc);
3443        printf("em%d: Carrier extension errors = %lld\n", unit,
3444               (long long)adapter->stats.cexterr);
3445	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3446	printf("em%d: watchdog timeouts = %ld\n", unit,
3447		adapter->watchdog_events);
3448
3449        printf("em%d: XON Rcvd = %lld\n", unit,
3450               (long long)adapter->stats.xonrxc);
3451        printf("em%d: XON Xmtd = %lld\n", unit,
3452               (long long)adapter->stats.xontxc);
3453        printf("em%d: XOFF Rcvd = %lld\n", unit,
3454               (long long)adapter->stats.xoffrxc);
3455        printf("em%d: XOFF Xmtd = %lld\n", unit,
3456               (long long)adapter->stats.xofftxc);
3457
3458        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3459               (long long)adapter->stats.gprc);
3460        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3461               (long long)adapter->stats.gptc);
3462
3463        return;
3464}
3465
3466static int
3467em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3468{
3469        int error;
3470        int result;
3471        struct adapter *adapter;
3472
3473        result = -1;
3474        error = sysctl_handle_int(oidp, &result, 0, req);
3475
3476        if (error || !req->newptr)
3477                return (error);
3478
3479        if (result == 1) {
3480                adapter = (struct adapter *)arg1;
3481                em_print_debug_info(adapter);
3482        }
3483
3484        return error;
3485}
3486
3487
3488static int
3489em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3490{
3491        int error;
3492        int result;
3493        struct adapter *adapter;
3494
3495        result = -1;
3496        error = sysctl_handle_int(oidp, &result, 0, req);
3497
3498        if (error || !req->newptr)
3499                return (error);
3500
3501        if (result == 1) {
3502                adapter = (struct adapter *)arg1;
3503                em_print_hw_stats(adapter);
3504        }
3505
3506        return error;
3507}
3508
3509static int
3510em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3511{
3512	struct em_int_delay_info *info;
3513	struct adapter *adapter;
3514	u_int32_t regval;
3515	int error;
3516	int usecs;
3517	int ticks;
3518
3519	info = (struct em_int_delay_info *)arg1;
3520	usecs = info->value;
3521	error = sysctl_handle_int(oidp, &usecs, 0, req);
3522	if (error != 0 || req->newptr == NULL)
3523		return error;
3524	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3525		return EINVAL;
3526	info->value = usecs;
3527	ticks = E1000_USECS_TO_TICKS(usecs);
3528
3529	adapter = info->adapter;
3530
3531	EM_LOCK(adapter);
3532	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3533	regval = (regval & ~0xffff) | (ticks & 0xffff);
3534	/* Handle a few special cases. */
3535	switch (info->offset) {
3536	case E1000_RDTR:
3537	case E1000_82542_RDTR:
3538		regval |= E1000_RDT_FPDB;
3539		break;
3540	case E1000_TIDV:
3541	case E1000_82542_TIDV:
3542		if (ticks == 0) {
3543			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3544			/* Don't write 0 into the TIDV register. */
3545			regval++;
3546		} else
3547			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3548		break;
3549	}
3550	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3551	EM_UNLOCK(adapter);
3552	return 0;
3553}
3554
3555static void
3556em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3557    const char *description, struct em_int_delay_info *info,
3558    int offset, int value)
3559{
3560	info->adapter = adapter;
3561	info->offset = offset;
3562	info->value = value;
3563	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3564	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3565	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3566	    info, 0, em_sysctl_int_delay, "I", description);
3567}
3568