if_em.c revision 154291
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 154291 2006-01-13 08:18:04Z scottl $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "Version - 3.2.18";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
111	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
112	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
113
114	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
115	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
116	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
117
118        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
119        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
120        { 0x8086, E1000_DEV_ID_82573L,              PCI_ANY_ID, PCI_ANY_ID, 0},
121
122        /* required last entry */
123        { 0, 0, 0, 0, 0}
124};
125
126/*********************************************************************
127 *  Table of branding strings for all supported NICs.
128 *********************************************************************/
129
130static char *em_strings[] = {
131	"Intel(R) PRO/1000 Network Connection"
132};
133
134/*********************************************************************
135 *  Function prototypes
136 *********************************************************************/
137static int  em_probe(device_t);
138static int  em_attach(device_t);
139static int  em_detach(device_t);
140static int  em_shutdown(device_t);
141static int  em_suspend(device_t);
142static int  em_resume(device_t);
143static void em_intr(void *);
144#ifndef NO_EM_FASTINTR
145static void em_intr_fast(void *);
146#endif
147static void em_start(struct ifnet *);
148static void em_start_locked(struct ifnet *ifp);
149static int  em_ioctl(struct ifnet *, u_long, caddr_t);
150static void em_watchdog(struct ifnet *);
151static void em_init(void *);
152static void em_init_locked(struct adapter *);
153static void em_stop(void *);
154static void em_media_status(struct ifnet *, struct ifmediareq *);
155static int  em_media_change(struct ifnet *);
156static void em_identify_hardware(struct adapter *);
157static int  em_allocate_pci_resources(struct adapter *);
158static int  em_allocate_intr(struct adapter *);
159static void em_free_intr(struct adapter *);
160static void em_free_pci_resources(struct adapter *);
161static void em_local_timer(void *);
162static int  em_hardware_init(struct adapter *);
163static void em_setup_interface(device_t, struct adapter *);
164static int  em_setup_transmit_structures(struct adapter *);
165static void em_initialize_transmit_unit(struct adapter *);
166static int  em_setup_receive_structures(struct adapter *);
167static void em_initialize_receive_unit(struct adapter *);
168static void em_enable_intr(struct adapter *);
169static void em_disable_intr(struct adapter *);
170static void em_free_transmit_structures(struct adapter *);
171static void em_free_receive_structures(struct adapter *);
172static void em_update_stats_counters(struct adapter *);
173static void em_clean_transmit_interrupts(struct adapter *);
174static int  em_allocate_receive_structures(struct adapter *);
175static int  em_allocate_transmit_structures(struct adapter *);
176static int em_process_receive_interrupts(struct adapter *, int);
177#ifndef __NO_STRICT_ALIGNMENT
178static int  em_fixup_rx(struct adapter *);
179#endif
180static void em_receive_checksum(struct adapter *,
181				struct em_rx_desc *,
182				struct mbuf *);
183static void em_transmit_checksum_setup(struct adapter *,
184				       struct mbuf *,
185				       u_int32_t *,
186				       u_int32_t *);
187static void em_set_promisc(struct adapter *);
188static void em_disable_promisc(struct adapter *);
189static void em_set_multi(struct adapter *);
190static void em_print_hw_stats(struct adapter *);
191static void em_print_link_status(struct adapter *);
192static int  em_get_buf(int i, struct adapter *,
193		       struct mbuf *);
194static void em_enable_vlans(struct adapter *);
195static void em_disable_vlans(struct adapter *);
196static int  em_encap(struct adapter *, struct mbuf **);
197static void em_smartspeed(struct adapter *);
198static int  em_82547_fifo_workaround(struct adapter *, int);
199static void em_82547_update_fifo_head(struct adapter *, int);
200static int  em_82547_tx_fifo_reset(struct adapter *);
201static void em_82547_move_tail(void *arg);
202static void em_82547_move_tail_locked(struct adapter *);
203static int  em_dma_malloc(struct adapter *, bus_size_t,
204			  struct em_dma_alloc *, int);
205static void em_dma_free(struct adapter *, struct em_dma_alloc *);
206static void em_print_debug_info(struct adapter *);
207static int  em_is_valid_ether_addr(u_int8_t *);
208static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
209static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
210static u_int32_t em_fill_descriptors (bus_addr_t address,
211				      u_int32_t length,
212				      PDESC_ARRAY desc_array);
213static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
214static void em_add_int_delay_sysctl(struct adapter *, const char *,
215				    const char *, struct em_int_delay_info *,
216				    int, int);
217#ifndef NO_EM_FASTINTR
218static void em_add_int_process_limit(struct adapter *, const char *,
219				     const char *, int *, int);
220static void em_handle_rxtx(void *context, int pending);
221static void em_handle_link(void *context, int pending);
222#endif
223#ifdef DEVICE_POLLING
224static poll_handler_t em_poll;
225#endif
226
227/*********************************************************************
228 *  FreeBSD Device Interface Entry Points
229 *********************************************************************/
230
231static device_method_t em_methods[] = {
232	/* Device interface */
233	DEVMETHOD(device_probe, em_probe),
234	DEVMETHOD(device_attach, em_attach),
235	DEVMETHOD(device_detach, em_detach),
236	DEVMETHOD(device_shutdown, em_shutdown),
237	DEVMETHOD(device_suspend, em_suspend),
238	DEVMETHOD(device_resume, em_resume),
239	{0, 0}
240};
241
242static driver_t em_driver = {
243	"em", em_methods, sizeof(struct adapter ),
244};
245
246static devclass_t em_devclass;
247DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
248MODULE_DEPEND(em, pci, 1, 1, 1);
249MODULE_DEPEND(em, ether, 1, 1, 1);
250
251/*********************************************************************
252 *  Tunable default values.
253 *********************************************************************/
254
255#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
256#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
257
258static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
259static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
260static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
261static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
262static int em_rxd = EM_DEFAULT_RXD;
263static int em_txd = EM_DEFAULT_TXD;
264
265TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
266TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
267TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
268TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
269TUNABLE_INT("hw.em.rxd", &em_rxd);
270TUNABLE_INT("hw.em.txd", &em_txd);
271#ifndef NO_EM_FASTINTR
272static int em_rx_process_limit = 100;
273TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
274#endif
275
276/*********************************************************************
277 *  Device identification routine
278 *
279 *  em_probe determines if the driver should be loaded on
280 *  adapter based on PCI vendor/device id of the adapter.
281 *
282 *  return BUS_PROBE_DEFAULT on success, positive on failure
283 *********************************************************************/
284
285static int
286em_probe(device_t dev)
287{
288	em_vendor_info_t *ent;
289
290	u_int16_t       pci_vendor_id = 0;
291	u_int16_t       pci_device_id = 0;
292	u_int16_t       pci_subvendor_id = 0;
293	u_int16_t       pci_subdevice_id = 0;
294	char            adapter_name[60];
295
296	INIT_DEBUGOUT("em_probe: begin");
297
298	pci_vendor_id = pci_get_vendor(dev);
299	if (pci_vendor_id != EM_VENDOR_ID)
300		return(ENXIO);
301
302	pci_device_id = pci_get_device(dev);
303	pci_subvendor_id = pci_get_subvendor(dev);
304	pci_subdevice_id = pci_get_subdevice(dev);
305
306	ent = em_vendor_info_array;
307	while (ent->vendor_id != 0) {
308		if ((pci_vendor_id == ent->vendor_id) &&
309		    (pci_device_id == ent->device_id) &&
310
311		    ((pci_subvendor_id == ent->subvendor_id) ||
312		     (ent->subvendor_id == PCI_ANY_ID)) &&
313
314		    ((pci_subdevice_id == ent->subdevice_id) ||
315		     (ent->subdevice_id == PCI_ANY_ID))) {
316			sprintf(adapter_name, "%s %s",
317				em_strings[ent->index],
318				em_driver_version);
319			device_set_desc_copy(dev, adapter_name);
320			return(BUS_PROBE_DEFAULT);
321		}
322		ent++;
323	}
324
325	return(ENXIO);
326}
327
328/*********************************************************************
329 *  Device initialization routine
330 *
331 *  The attach entry point is called when the driver is being loaded.
332 *  This routine identifies the type of hardware, allocates all resources
333 *  and initializes the hardware.
334 *
335 *  return 0 on success, positive on failure
336 *********************************************************************/
337
338static int
339em_attach(device_t dev)
340{
341	struct adapter * adapter;
342	int             tsize, rsize;
343	int		error = 0;
344
345	INIT_DEBUGOUT("em_attach: begin");
346
347	/* Allocate, clear, and link in our adapter structure */
348	if (!(adapter = device_get_softc(dev))) {
349		printf("em: adapter structure allocation failed\n");
350		return(ENOMEM);
351	}
352	bzero(adapter, sizeof(struct adapter ));
353	adapter->dev = dev;
354	adapter->osdep.dev = dev;
355	adapter->unit = device_get_unit(dev);
356	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
357
358	/* SYSCTL stuff */
359        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
360                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
361                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
362                        (void *)adapter, 0,
363                        em_sysctl_debug_info, "I", "Debug Information");
364
365        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
366                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
367                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
368                        (void *)adapter, 0,
369                        em_sysctl_stats, "I", "Statistics");
370
371	callout_init(&adapter->timer, CALLOUT_MPSAFE);
372	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
373
374	/* Determine hardware revision */
375	em_identify_hardware(adapter);
376
377	/* Set up some sysctls for the tunable interrupt delays */
378	em_add_int_delay_sysctl(adapter, "rx_int_delay",
379	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
380	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
381	em_add_int_delay_sysctl(adapter, "tx_int_delay",
382	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
383	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
384	if (adapter->hw.mac_type >= em_82540) {
385		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
386		    "receive interrupt delay limit in usecs",
387		    &adapter->rx_abs_int_delay,
388		    E1000_REG_OFFSET(&adapter->hw, RADV),
389		    em_rx_abs_int_delay_dflt);
390		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
391		    "transmit interrupt delay limit in usecs",
392		    &adapter->tx_abs_int_delay,
393		    E1000_REG_OFFSET(&adapter->hw, TADV),
394		    em_tx_abs_int_delay_dflt);
395	}
396
397	/* Sysctls for limiting the amount of work done in the taskqueue */
398#ifndef NO_EM_FASTINTR
399	em_add_int_process_limit(adapter, "rx_processing_limit",
400	    "max number of rx packets to process", &adapter->rx_process_limit,
401	    em_rx_process_limit);
402#endif
403
404	/*
405	 * Validate number of transmit and receive descriptors. It
406	 * must not exceed hardware maximum, and must be multiple
407	 * of E1000_DBA_ALIGN.
408	 */
409	if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
410	    (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
411	    (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
412	    (em_txd < EM_MIN_TXD)) {
413		printf("em%d: Using %d TX descriptors instead of %d!\n",
414		    adapter->unit, EM_DEFAULT_TXD, em_txd);
415		adapter->num_tx_desc = EM_DEFAULT_TXD;
416	} else
417		adapter->num_tx_desc = em_txd;
418	if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
419	    (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
420	    (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
421	    (em_rxd < EM_MIN_RXD)) {
422		printf("em%d: Using %d RX descriptors instead of %d!\n",
423		    adapter->unit, EM_DEFAULT_RXD, em_rxd);
424		adapter->num_rx_desc = EM_DEFAULT_RXD;
425	} else
426		adapter->num_rx_desc = em_rxd;
427
428        adapter->hw.autoneg = DO_AUTO_NEG;
429        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
430        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
431        adapter->hw.tbi_compatibility_en = TRUE;
432        adapter->rx_buffer_len = EM_RXBUFFER_2048;
433
434	adapter->hw.phy_init_script = 1;
435	adapter->hw.phy_reset_disable = FALSE;
436
437#ifndef EM_MASTER_SLAVE
438	adapter->hw.master_slave = em_ms_hw_default;
439#else
440	adapter->hw.master_slave = EM_MASTER_SLAVE;
441#endif
442	/*
443	 * Set the max frame size assuming standard ethernet
444	 * sized frames
445	 */
446	adapter->hw.max_frame_size =
447		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
448
449	adapter->hw.min_frame_size =
450		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
451
452	/*
453	 * This controls when hardware reports transmit completion
454	 * status.
455	 */
456	adapter->hw.report_tx_early = 1;
457
458	if (em_allocate_pci_resources(adapter)) {
459		printf("em%d: Allocation of PCI resources failed\n",
460		       adapter->unit);
461                error = ENXIO;
462                goto err_pci;
463	}
464
465
466	/* Initialize eeprom parameters */
467        em_init_eeprom_params(&adapter->hw);
468
469	tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
470	    E1000_DBA_ALIGN);
471
472	/* Allocate Transmit Descriptor ring */
473        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
474                printf("em%d: Unable to allocate tx_desc memory\n",
475                       adapter->unit);
476		error = ENOMEM;
477                goto err_tx_desc;
478        }
479        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
480
481	rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
482	    E1000_DBA_ALIGN);
483
484	/* Allocate Receive Descriptor ring */
485        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
486                printf("em%d: Unable to allocate rx_desc memory\n",
487                        adapter->unit);
488		error = ENOMEM;
489                goto err_rx_desc;
490        }
491        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
492
493	/* Initialize the hardware */
494	if (em_hardware_init(adapter)) {
495		printf("em%d: Unable to initialize the hardware\n",
496		       adapter->unit);
497		error = EIO;
498                goto err_hw_init;
499	}
500
501	/* Copy the permanent MAC address out of the EEPROM */
502	if (em_read_mac_addr(&adapter->hw) < 0) {
503		printf("em%d: EEPROM read error while reading mac address\n",
504		       adapter->unit);
505		error = EIO;
506                goto err_mac_addr;
507	}
508
509	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
510                printf("em%d: Invalid mac address\n", adapter->unit);
511                error = EIO;
512                goto err_mac_addr;
513        }
514
515	/* Setup OS specific network interface */
516	em_setup_interface(dev, adapter);
517
518	em_allocate_intr(adapter);
519
520	/* Initialize statistics */
521	em_clear_hw_cntrs(&adapter->hw);
522	em_update_stats_counters(adapter);
523	adapter->hw.get_link_status = 1;
524	em_check_for_link(&adapter->hw);
525
526	if (bootverbose) {
527		/* Print the link status */
528		if (adapter->link_active == 1) {
529			em_get_speed_and_duplex(&adapter->hw,
530			    &adapter->link_speed, &adapter->link_duplex);
531			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
532			       adapter->unit,
533			       adapter->link_speed,
534			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
535				"Half");
536		} else
537			printf("em%d:  Speed:N/A  Duplex:N/A\n",
538			    adapter->unit);
539	}
540
541	/* Identify 82544 on PCIX */
542        em_get_bus_info(&adapter->hw);
543        if(adapter->hw.bus_type == em_bus_type_pcix &&
544           adapter->hw.mac_type == em_82544) {
545                adapter->pcix_82544 = TRUE;
546        }
547        else {
548                adapter->pcix_82544 = FALSE;
549        }
550	INIT_DEBUGOUT("em_attach: end");
551	return(0);
552
553err_mac_addr:
554err_hw_init:
555        em_dma_free(adapter, &adapter->rxdma);
556err_rx_desc:
557        em_dma_free(adapter, &adapter->txdma);
558err_tx_desc:
559err_pci:
560	em_free_intr(adapter);
561        em_free_pci_resources(adapter);
562	EM_LOCK_DESTROY(adapter);
563        return(error);
564
565}
566
567/*********************************************************************
568 *  Device removal routine
569 *
570 *  The detach entry point is called when the driver is being removed.
571 *  This routine stops the adapter and deallocates all the resources
572 *  that were allocated for driver operation.
573 *
574 *  return 0 on success, positive on failure
575 *********************************************************************/
576
577static int
578em_detach(device_t dev)
579{
580	struct adapter * adapter = device_get_softc(dev);
581	struct ifnet   *ifp = adapter->ifp;
582
583	INIT_DEBUGOUT("em_detach: begin");
584
585#ifdef DEVICE_POLLING
586	if (ifp->if_capenable & IFCAP_POLLING)
587		ether_poll_deregister(ifp);
588#endif
589
590	em_free_intr(adapter);
591	EM_LOCK(adapter);
592	adapter->in_detach = 1;
593	em_stop(adapter);
594	em_phy_hw_reset(&adapter->hw);
595	EM_UNLOCK(adapter);
596        ether_ifdetach(adapter->ifp);
597
598	em_free_pci_resources(adapter);
599	bus_generic_detach(dev);
600	if_free(ifp);
601
602	/* Free Transmit Descriptor ring */
603        if (adapter->tx_desc_base) {
604                em_dma_free(adapter, &adapter->txdma);
605                adapter->tx_desc_base = NULL;
606        }
607
608        /* Free Receive Descriptor ring */
609        if (adapter->rx_desc_base) {
610                em_dma_free(adapter, &adapter->rxdma);
611                adapter->rx_desc_base = NULL;
612        }
613
614	EM_LOCK_DESTROY(adapter);
615
616	return(0);
617}
618
619/*********************************************************************
620 *
621 *  Shutdown entry point
622 *
623 **********************************************************************/
624
625static int
626em_shutdown(device_t dev)
627{
628	struct adapter *adapter = device_get_softc(dev);
629	EM_LOCK(adapter);
630	em_stop(adapter);
631	EM_UNLOCK(adapter);
632	return(0);
633}
634
635/*
636 * Suspend/resume device methods.
637 */
638static int
639em_suspend(device_t dev)
640{
641	struct adapter *adapter = device_get_softc(dev);
642
643	EM_LOCK(adapter);
644	em_stop(adapter);
645	EM_UNLOCK(adapter);
646
647	return bus_generic_suspend(dev);
648}
649
650static int
651em_resume(device_t dev)
652{
653	struct adapter *adapter = device_get_softc(dev);
654	struct ifnet *ifp = adapter->ifp;
655
656	EM_LOCK(adapter);
657	em_init_locked(adapter);
658	if ((ifp->if_flags & IFF_UP) &&
659	    (ifp->if_drv_flags & IFF_DRV_RUNNING))
660		em_start_locked(ifp);
661	EM_UNLOCK(adapter);
662
663	return bus_generic_resume(dev);
664}
665
666
667/*********************************************************************
668 *  Transmit entry point
669 *
670 *  em_start is called by the stack to initiate a transmit.
671 *  The driver will remain in this routine as long as there are
672 *  packets to transmit and transmit resources are available.
673 *  In case resources are not available stack is notified and
674 *  the packet is requeued.
675 **********************************************************************/
676
677static void
678em_start_locked(struct ifnet *ifp)
679{
680        struct mbuf    *m_head;
681        struct adapter *adapter = ifp->if_softc;
682
683	mtx_assert(&adapter->mtx, MA_OWNED);
684
685        if (!adapter->link_active)
686                return;
687
688        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
689
690                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
691
692                if (m_head == NULL) break;
693
694		/*
695		 * em_encap() can modify our pointer, and or make it NULL on
696		 * failure.  In that event, we can't requeue.
697		 */
698		if (em_encap(adapter, &m_head)) {
699			if (m_head == NULL)
700				break;
701			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
702			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
703			break;
704                }
705
706		/* Send a copy of the frame to the BPF listener */
707		BPF_MTAP(ifp, m_head);
708
709                /* Set timeout in case hardware has problems transmitting */
710                ifp->if_timer = EM_TX_TIMEOUT;
711
712        }
713        return;
714}
715
716static void
717em_start(struct ifnet *ifp)
718{
719	struct adapter *adapter = ifp->if_softc;
720
721	EM_LOCK(adapter);
722	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
723		em_start_locked(ifp);
724	EM_UNLOCK(adapter);
725	return;
726}
727
728/*********************************************************************
729 *  Ioctl entry point
730 *
731 *  em_ioctl is called when the user wants to configure the
732 *  interface.
733 *
734 *  return 0 on success, positive on failure
735 **********************************************************************/
736
737static int
738em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
739{
740	struct ifreq   *ifr = (struct ifreq *) data;
741	struct adapter * adapter = ifp->if_softc;
742	int error = 0;
743
744	if (adapter->in_detach) return(error);
745
746	switch (command) {
747	case SIOCSIFADDR:
748	case SIOCGIFADDR:
749		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
750		ether_ioctl(ifp, command, data);
751		break;
752	case SIOCSIFMTU:
753	    {
754		int max_frame_size;
755
756		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
757
758		switch (adapter->hw.mac_type) {
759		case em_82571:
760		case em_82572:
761			max_frame_size = 10500;
762			break;
763		case em_82573:
764			/* 82573 does not support jumbo frames. */
765			max_frame_size = ETHER_MAX_LEN;
766			break;
767		default:
768			max_frame_size = MAX_JUMBO_FRAME_SIZE;
769		}
770		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
771		    ETHER_CRC_LEN) {
772			error = EINVAL;
773			break;
774		}
775
776		EM_LOCK(adapter);
777		ifp->if_mtu = ifr->ifr_mtu;
778		adapter->hw.max_frame_size =
779		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
780		em_init_locked(adapter);
781		EM_UNLOCK(adapter);
782		break;
783	    }
784	case SIOCSIFFLAGS:
785		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
786		EM_LOCK(adapter);
787		if (ifp->if_flags & IFF_UP) {
788			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
789				em_init_locked(adapter);
790			}
791
792			em_disable_promisc(adapter);
793			em_set_promisc(adapter);
794		} else {
795			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
796				em_stop(adapter);
797			}
798		}
799		EM_UNLOCK(adapter);
800		break;
801	case SIOCADDMULTI:
802	case SIOCDELMULTI:
803		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
804		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
805			EM_LOCK(adapter);
806			em_disable_intr(adapter);
807			em_set_multi(adapter);
808			if (adapter->hw.mac_type == em_82542_rev2_0) {
809				em_initialize_receive_unit(adapter);
810			}
811#ifdef DEVICE_POLLING
812                        if (!(ifp->if_capenable & IFCAP_POLLING))
813#endif
814				em_enable_intr(adapter);
815			EM_UNLOCK(adapter);
816		}
817		break;
818	case SIOCSIFMEDIA:
819	case SIOCGIFMEDIA:
820		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
821		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
822		break;
823	case SIOCSIFCAP:
824	    {
825		int mask, reinit;
826
827		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
828		reinit = 0;
829		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
830#ifdef DEVICE_POLLING
831		if (mask & IFCAP_POLLING) {
832			if (ifr->ifr_reqcap & IFCAP_POLLING) {
833				error = ether_poll_register(em_poll, ifp);
834				if (error)
835					return(error);
836				EM_LOCK(adapter);
837				em_disable_intr(adapter);
838				ifp->if_capenable |= IFCAP_POLLING;
839				EM_UNLOCK(adapter);
840			} else {
841				error = ether_poll_deregister(ifp);
842				/* Enable interrupt even in error case */
843				EM_LOCK(adapter);
844				em_enable_intr(adapter);
845				ifp->if_capenable &= ~IFCAP_POLLING;
846				EM_UNLOCK(adapter);
847			}
848		}
849#endif
850		if (mask & IFCAP_HWCSUM) {
851			ifp->if_capenable ^= IFCAP_HWCSUM;
852			reinit = 1;
853		}
854		if (mask & IFCAP_VLAN_HWTAGGING) {
855			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
856			reinit = 1;
857		}
858		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
859			em_init(adapter);
860		break;
861	    }
862	default:
863		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
864		error = EINVAL;
865	}
866
867	return(error);
868}
869
870/*********************************************************************
871 *  Watchdog entry point
872 *
873 *  This routine is called whenever hardware quits transmitting.
874 *
875 **********************************************************************/
876
877static void
878em_watchdog(struct ifnet *ifp)
879{
880	struct adapter * adapter;
881	adapter = ifp->if_softc;
882
883	EM_LOCK(adapter);
884	/* If we are in this routine because of pause frames, then
885	 * don't reset the hardware.
886	 */
887	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
888		ifp->if_timer = EM_TX_TIMEOUT;
889		EM_UNLOCK(adapter);
890		return;
891	}
892
893	if (em_check_for_link(&adapter->hw))
894		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
895
896	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
897	adapter->watchdog_events++;
898
899	em_init_locked(adapter);
900	EM_UNLOCK(adapter);
901}
902
903/*********************************************************************
904 *  Init entry point
905 *
906 *  This routine is used in two ways. It is used by the stack as
907 *  init entry point in network interface structure. It is also used
908 *  by the driver as a hw/sw initialization routine to get to a
909 *  consistent state.
910 *
911 *  return 0 on success, positive on failure
912 **********************************************************************/
913
914static void
915em_init_locked(struct adapter * adapter)
916{
917	struct ifnet   *ifp;
918
919	uint32_t	pba;
920	ifp = adapter->ifp;
921
922	INIT_DEBUGOUT("em_init: begin");
923
924	mtx_assert(&adapter->mtx, MA_OWNED);
925
926	em_stop(adapter);
927
928	/*
929	 * Packet Buffer Allocation (PBA)
930	 * Writing PBA sets the receive portion of the buffer
931	 * the remainder is used for the transmit buffer.
932	 */
933	switch (adapter->hw.mac_type) {
934	case em_82547:
935	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
936		if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
937			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
938		else
939			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
940		adapter->tx_fifo_head = 0;
941		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
942		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
943		break;
944	case em_82571: /* 82571: Total Packet Buffer is 48K */
945	case em_82572: /* 82572: Total Packet Buffer is 48K */
946			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
947		break;
948	case em_82573: /* 82573: Total Packet Buffer is 32K */
949		/* Jumbo frames not supported */
950			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
951		break;
952	default:
953		/* Devices before 82547 had a Packet Buffer of 64K.   */
954		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
955			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
956		else
957			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
958	}
959
960	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
961	E1000_WRITE_REG(&adapter->hw, PBA, pba);
962
963	/* Get the latest mac address, User can use a LAA */
964        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
965              ETHER_ADDR_LEN);
966
967	/* Initialize the hardware */
968	if (em_hardware_init(adapter)) {
969		printf("em%d: Unable to initialize the hardware\n",
970		       adapter->unit);
971		return;
972	}
973
974	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
975		em_enable_vlans(adapter);
976
977	/* Prepare transmit descriptors and buffers */
978	if (em_setup_transmit_structures(adapter)) {
979		printf("em%d: Could not setup transmit structures\n",
980		       adapter->unit);
981		em_stop(adapter);
982		return;
983	}
984	em_initialize_transmit_unit(adapter);
985
986	/* Setup Multicast table */
987	em_set_multi(adapter);
988
989	/* Prepare receive descriptors and buffers */
990	if (em_setup_receive_structures(adapter)) {
991		printf("em%d: Could not setup receive structures\n",
992		       adapter->unit);
993		em_stop(adapter);
994		return;
995	}
996	em_initialize_receive_unit(adapter);
997
998	/* Don't loose promiscuous settings */
999	em_set_promisc(adapter);
1000
1001	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1002	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1003
1004	if (adapter->hw.mac_type >= em_82543) {
1005		if (ifp->if_capenable & IFCAP_TXCSUM)
1006			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
1007		else
1008			ifp->if_hwassist = 0;
1009	}
1010
1011	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1012	em_clear_hw_cntrs(&adapter->hw);
1013#ifdef DEVICE_POLLING
1014        /*
1015         * Only enable interrupts if we are not polling, make sure
1016         * they are off otherwise.
1017         */
1018        if (ifp->if_capenable & IFCAP_POLLING)
1019                em_disable_intr(adapter);
1020        else
1021#endif /* DEVICE_POLLING */
1022		em_enable_intr(adapter);
1023
1024	/* Don't reset the phy next time init gets called */
1025	adapter->hw.phy_reset_disable = TRUE;
1026
1027	return;
1028}
1029
1030static void
1031em_init(void *arg)
1032{
1033	struct adapter * adapter = arg;
1034
1035	EM_LOCK(adapter);
1036	em_init_locked(adapter);
1037	EM_UNLOCK(adapter);
1038	return;
1039}
1040
1041
1042#ifdef DEVICE_POLLING
1043static void
1044em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1045{
1046        struct adapter *adapter = ifp->if_softc;
1047        u_int32_t reg_icr;
1048
1049	mtx_assert(&adapter->mtx, MA_OWNED);
1050
1051        if (cmd == POLL_AND_CHECK_STATUS) {
1052                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1053                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1054			callout_stop(&adapter->timer);
1055                        adapter->hw.get_link_status = 1;
1056                        em_check_for_link(&adapter->hw);
1057                        em_print_link_status(adapter);
1058			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1059                }
1060        }
1061	em_process_receive_interrupts(adapter, count);
1062	em_clean_transmit_interrupts(adapter);
1063
1064        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1065                em_start_locked(ifp);
1066}
1067
1068static void
1069em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1070{
1071        struct adapter *adapter = ifp->if_softc;
1072
1073	EM_LOCK(adapter);
1074	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1075		em_poll_locked(ifp, cmd, count);
1076	EM_UNLOCK(adapter);
1077}
1078#endif /* DEVICE_POLLING */
1079
1080#ifndef NO_EM_FASTINTR
1081static void
1082em_handle_link(void *context, int pending)
1083{
1084	struct adapter	*adapter = context;
1085	struct ifnet *ifp;
1086
1087	ifp = adapter->ifp;
1088
1089	EM_LOCK(adapter);
1090
1091	callout_stop(&adapter->timer);
1092	adapter->hw.get_link_status = 1;
1093	em_check_for_link(&adapter->hw);
1094	em_print_link_status(adapter);
1095	callout_reset(&adapter->timer, hz, em_local_timer,
1096	    adapter);
1097	EM_UNLOCK(adapter);
1098}
1099
1100static void
1101em_handle_rxtx(void *context, int pending)
1102{
1103	struct adapter	*adapter = context;
1104	struct ifnet	*ifp;
1105
1106	ifp = adapter->ifp;
1107
1108	/*
1109	 * TODO:
1110	 * It should be possible to run the tx clean loop without the lock.
1111	 */
1112	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1113		if (em_process_receive_interrupts(adapter,
1114		    adapter->rx_process_limit) != 0)
1115			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1116		EM_LOCK(adapter);
1117		em_clean_transmit_interrupts(adapter);
1118
1119		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1120			em_start_locked(ifp);
1121		EM_UNLOCK(adapter);
1122	}
1123
1124	em_enable_intr(adapter);
1125	return;
1126}
1127#endif
1128
1129/*********************************************************************
1130 *
1131 *  Interrupt Service routine
1132 *
1133 **********************************************************************/
1134#ifndef NO_EM_FASTINTR
1135static void
1136em_intr_fast(void *arg)
1137{
1138	struct adapter	*adapter = arg;
1139	struct ifnet	*ifp;
1140	uint32_t	reg_icr;
1141
1142	ifp = adapter->ifp;
1143
1144#ifdef DEVICE_POLLING
1145	if (ifp->if_capenable & IFCAP_POLLING) {
1146		return;
1147	}
1148#endif /* DEVICE_POLLING */
1149
1150	reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1151
1152	/* Hot eject?  */
1153	if (reg_icr == 0xffffffff)
1154		return;
1155
1156	/* Definitely not our interrupt.  */
1157	if (reg_icr == 0x0)
1158		return;
1159
1160	/*
1161	 * Starting with the 82571 chip, bit 31 should be used to
1162	 * determine whether the interrupt belongs to us.
1163	 */
1164	if (adapter->hw.mac_type >= em_82571 &&
1165	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1166		return;
1167
1168	/*
1169	 * Mask interrupts until the taskqueue is finished running.  This is
1170	 * cheap, just assume that it is needed.  This also works around the
1171	 * MSI message reordering errata on certain systems.
1172	 */
1173	em_disable_intr(adapter);
1174	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1175
1176	/* Link status change */
1177	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1178		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1179
1180	if (reg_icr & E1000_ICR_RXO) {
1181		adapter->rx_overruns++;
1182	}
1183	return;
1184}
1185#endif
1186
1187static void
1188em_intr(void *arg)
1189{
1190	struct adapter	*adapter = arg;
1191	struct ifnet	*ifp;
1192	uint32_t	reg_icr;
1193	int		wantinit = 0;
1194
1195	EM_LOCK(adapter);
1196
1197	ifp = adapter->ifp;
1198
1199#ifdef DEVICE_POLLING
1200	if (ifp->if_capenable & IFCAP_POLLING) {
1201		EM_UNLOCK(adapter);
1202		return;
1203	}
1204#endif /* DEVICE_POLLING */
1205
1206	for (;;) {
1207		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1208		if (adapter->hw.mac_type >= em_82571 &&
1209		    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1210			break;
1211		else if (reg_icr == 0)
1212			break;
1213
1214		/*
1215		 * XXX: some laptops trigger several spurious interrupts
1216		 * on em(4) when in the resume cycle. The ICR register
1217		 * reports all-ones value in this case. Processing such
1218		 * interrupts would lead to a freeze. I don't know why.
1219		 */
1220		if (reg_icr == 0xffffffff)
1221			break;
1222
1223		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1224			em_process_receive_interrupts(adapter, -1);
1225			em_clean_transmit_interrupts(adapter);
1226		}
1227
1228		/* Link status change */
1229		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1230			callout_stop(&adapter->timer);
1231			adapter->hw.get_link_status = 1;
1232			em_check_for_link(&adapter->hw);
1233			em_print_link_status(adapter);
1234			callout_reset(&adapter->timer, hz, em_local_timer,
1235			    adapter);
1236		}
1237
1238		if (reg_icr & E1000_ICR_RXO) {
1239			adapter->rx_overruns++;
1240			wantinit = 1;
1241		}
1242	}
1243#if 0
1244	if (wantinit)
1245		em_init_locked(adapter);
1246#endif
1247	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1248	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1249		em_start_locked(ifp);
1250
1251	EM_UNLOCK(adapter);
1252	return;
1253}
1254
1255
1256
1257/*********************************************************************
1258 *
1259 *  Media Ioctl callback
1260 *
1261 *  This routine is called whenever the user queries the status of
1262 *  the interface using ifconfig.
1263 *
1264 **********************************************************************/
1265static void
1266em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1267{
1268	struct adapter * adapter = ifp->if_softc;
1269
1270	INIT_DEBUGOUT("em_media_status: begin");
1271
1272	em_check_for_link(&adapter->hw);
1273	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1274		if (adapter->link_active == 0) {
1275			em_get_speed_and_duplex(&adapter->hw,
1276						&adapter->link_speed,
1277						&adapter->link_duplex);
1278			adapter->link_active = 1;
1279		}
1280	} else {
1281		if (adapter->link_active == 1) {
1282			adapter->link_speed = 0;
1283			adapter->link_duplex = 0;
1284			adapter->link_active = 0;
1285		}
1286	}
1287
1288	ifmr->ifm_status = IFM_AVALID;
1289	ifmr->ifm_active = IFM_ETHER;
1290
1291	if (!adapter->link_active)
1292		return;
1293
1294	ifmr->ifm_status |= IFM_ACTIVE;
1295
1296	if (adapter->hw.media_type == em_media_type_fiber) {
1297		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1298	} else {
1299		switch (adapter->link_speed) {
1300		case 10:
1301			ifmr->ifm_active |= IFM_10_T;
1302			break;
1303		case 100:
1304			ifmr->ifm_active |= IFM_100_TX;
1305			break;
1306		case 1000:
1307			ifmr->ifm_active |= IFM_1000_T;
1308			break;
1309		}
1310		if (adapter->link_duplex == FULL_DUPLEX)
1311			ifmr->ifm_active |= IFM_FDX;
1312		else
1313			ifmr->ifm_active |= IFM_HDX;
1314	}
1315	return;
1316}
1317
1318/*********************************************************************
1319 *
1320 *  Media Ioctl callback
1321 *
1322 *  This routine is called when the user changes speed/duplex using
1323 *  media/mediopt option with ifconfig.
1324 *
1325 **********************************************************************/
1326static int
1327em_media_change(struct ifnet *ifp)
1328{
1329	struct adapter * adapter = ifp->if_softc;
1330	struct ifmedia  *ifm = &adapter->media;
1331
1332	INIT_DEBUGOUT("em_media_change: begin");
1333
1334	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1335		return(EINVAL);
1336
1337	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1338	case IFM_AUTO:
1339		adapter->hw.autoneg = DO_AUTO_NEG;
1340		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1341		break;
1342	case IFM_1000_SX:
1343	case IFM_1000_T:
1344		adapter->hw.autoneg = DO_AUTO_NEG;
1345		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1346		break;
1347	case IFM_100_TX:
1348		adapter->hw.autoneg = FALSE;
1349		adapter->hw.autoneg_advertised = 0;
1350		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1351			adapter->hw.forced_speed_duplex = em_100_full;
1352		else
1353			adapter->hw.forced_speed_duplex	= em_100_half;
1354		break;
1355	case IFM_10_T:
1356		adapter->hw.autoneg = FALSE;
1357		adapter->hw.autoneg_advertised = 0;
1358		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1359			adapter->hw.forced_speed_duplex = em_10_full;
1360		else
1361			adapter->hw.forced_speed_duplex	= em_10_half;
1362		break;
1363	default:
1364		printf("em%d: Unsupported media type\n", adapter->unit);
1365	}
1366
1367	/* As the speed/duplex settings my have changed we need to
1368	 * reset the PHY.
1369	 */
1370	adapter->hw.phy_reset_disable = FALSE;
1371
1372	em_init(adapter);
1373
1374	return(0);
1375}
1376
1377/*********************************************************************
1378 *
1379 *  This routine maps the mbufs to tx descriptors.
1380 *
1381 *  return 0 on success, positive on failure
1382 **********************************************************************/
1383static int
1384em_encap(struct adapter *adapter, struct mbuf **m_headp)
1385{
1386        u_int32_t       txd_upper;
1387        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1388        int             i, j, error = 0;
1389	bus_dmamap_t	map;
1390
1391	struct mbuf	*m_head;
1392
1393	/* For 82544 Workaround */
1394	DESC_ARRAY              desc_array;
1395	u_int32_t               array_elements;
1396	u_int32_t               counter;
1397        struct m_tag    *mtag;
1398	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1399	int			nsegs;
1400        struct em_buffer   *tx_buffer;
1401        struct em_tx_desc *current_tx_desc = NULL;
1402        struct ifnet   *ifp = adapter->ifp;
1403
1404	m_head = *m_headp;
1405
1406        /*
1407         * Force a cleanup if number of TX descriptors
1408         * available hits the threshold
1409         */
1410        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1411                em_clean_transmit_interrupts(adapter);
1412                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1413                        adapter->no_tx_desc_avail1++;
1414                        return(ENOBUFS);
1415                }
1416        }
1417
1418        /*
1419         * Map the packet for DMA.
1420         */
1421	tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1422	error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1423	    segs, &nsegs, BUS_DMA_NOWAIT);
1424	map = tx_buffer->map;
1425        if (error != 0) {
1426                adapter->no_tx_dma_setup++;
1427                return (error);
1428        }
1429        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1430
1431        if (nsegs > adapter->num_tx_desc_avail) {
1432                adapter->no_tx_desc_avail2++;
1433		error = ENOBUFS;
1434		goto encap_fail;
1435        }
1436
1437
1438        if (ifp->if_hwassist > 0) {
1439                em_transmit_checksum_setup(adapter,  m_head,
1440                                           &txd_upper, &txd_lower);
1441        } else
1442                txd_upper = txd_lower = 0;
1443
1444
1445        /* Find out if we are in vlan mode */
1446        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1447
1448	/*
1449	 * When operating in promiscuous mode, hardware encapsulation for
1450	 * packets is disabled.  This means we have to add the vlan
1451	 * encapsulation in the driver, since it will have come down from the
1452	 * VLAN layer with a tag instead of a VLAN header.
1453	 */
1454	if (mtag != NULL && adapter->em_insert_vlan_header) {
1455		struct ether_vlan_header *evl;
1456		struct ether_header eh;
1457
1458		m_head = m_pullup(m_head, sizeof(eh));
1459		if (m_head == NULL) {
1460			*m_headp = NULL;
1461			error = ENOBUFS;
1462			goto encap_fail;
1463		}
1464		eh = *mtod(m_head, struct ether_header *);
1465		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1466		if (m_head == NULL) {
1467			*m_headp = NULL;
1468			error = ENOBUFS;
1469			goto encap_fail;
1470		}
1471		m_head = m_pullup(m_head, sizeof(*evl));
1472		if (m_head == NULL) {
1473			*m_headp = NULL;
1474			error = ENOBUFS;
1475			goto encap_fail;
1476		}
1477		evl = mtod(m_head, struct ether_vlan_header *);
1478		bcopy(&eh, evl, sizeof(*evl));
1479		evl->evl_proto = evl->evl_encap_proto;
1480		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1481		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1482		m_tag_delete(m_head, mtag);
1483		mtag = NULL;
1484		*m_headp = m_head;
1485	}
1486
1487        i = adapter->next_avail_tx_desc;
1488	if (adapter->pcix_82544) {
1489		txd_saved = i;
1490		txd_used = 0;
1491	}
1492        for (j = 0; j < nsegs; j++) {
1493		/* If adapter is 82544 and on PCIX bus */
1494		if(adapter->pcix_82544) {
1495			/*
1496			 * Check the Address and Length combination and
1497			 * split the data accordingly
1498			 */
1499                        array_elements = em_fill_descriptors(segs[j].ds_addr,
1500			    segs[j].ds_len, &desc_array);
1501			for (counter = 0; counter < array_elements; counter++) {
1502				if (txd_used == adapter->num_tx_desc_avail) {
1503					adapter->next_avail_tx_desc = txd_saved;
1504					adapter->no_tx_desc_avail2++;
1505					error = ENOBUFS;
1506					goto encap_fail;
1507                                }
1508                                tx_buffer = &adapter->tx_buffer_area[i];
1509                                current_tx_desc = &adapter->tx_desc_base[i];
1510                                current_tx_desc->buffer_addr = htole64(
1511					desc_array.descriptor[counter].address);
1512                                current_tx_desc->lower.data = htole32(
1513					(adapter->txd_cmd | txd_lower |
1514					 (u_int16_t)desc_array.descriptor[counter].length));
1515                                current_tx_desc->upper.data = htole32((txd_upper));
1516                                if (++i == adapter->num_tx_desc)
1517                                         i = 0;
1518
1519                                tx_buffer->m_head = NULL;
1520                                txd_used++;
1521                        }
1522		} else {
1523			tx_buffer = &adapter->tx_buffer_area[i];
1524			current_tx_desc = &adapter->tx_desc_base[i];
1525
1526			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1527			current_tx_desc->lower.data = htole32(
1528				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1529			current_tx_desc->upper.data = htole32(txd_upper);
1530
1531			if (++i == adapter->num_tx_desc)
1532				i = 0;
1533
1534			tx_buffer->m_head = NULL;
1535		}
1536        }
1537
1538	adapter->next_avail_tx_desc = i;
1539	if (adapter->pcix_82544) {
1540		adapter->num_tx_desc_avail -= txd_used;
1541	}
1542	else {
1543		adapter->num_tx_desc_avail -= nsegs;
1544	}
1545
1546        if (mtag != NULL) {
1547                /* Set the vlan id */
1548                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1549
1550                /* Tell hardware to add tag */
1551                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1552        }
1553
1554        tx_buffer->m_head = m_head;
1555        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1556
1557        /*
1558         * Last Descriptor of Packet needs End Of Packet (EOP)
1559         */
1560        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1561
1562        /*
1563         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1564         * that this frame is available to transmit.
1565         */
1566        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1567            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1568        if (adapter->hw.mac_type == em_82547 &&
1569            adapter->link_duplex == HALF_DUPLEX) {
1570                em_82547_move_tail_locked(adapter);
1571        } else {
1572                E1000_WRITE_REG(&adapter->hw, TDT, i);
1573                if (adapter->hw.mac_type == em_82547) {
1574                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1575                }
1576        }
1577
1578        return(0);
1579
1580encap_fail:
1581	bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1582	return (error);
1583}
1584
1585/*********************************************************************
1586 *
1587 * 82547 workaround to avoid controller hang in half-duplex environment.
1588 * The workaround is to avoid queuing a large packet that would span
1589 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1590 * in this case. We do that only when FIFO is quiescent.
1591 *
1592 **********************************************************************/
1593static void
1594em_82547_move_tail_locked(struct adapter *adapter)
1595{
1596	uint16_t hw_tdt;
1597	uint16_t sw_tdt;
1598	struct em_tx_desc *tx_desc;
1599	uint16_t length = 0;
1600	boolean_t eop = 0;
1601
1602	EM_LOCK_ASSERT(adapter);
1603
1604	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1605	sw_tdt = adapter->next_avail_tx_desc;
1606
1607	while (hw_tdt != sw_tdt) {
1608		tx_desc = &adapter->tx_desc_base[hw_tdt];
1609		length += tx_desc->lower.flags.length;
1610		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1611		if(++hw_tdt == adapter->num_tx_desc)
1612			hw_tdt = 0;
1613
1614		if(eop) {
1615			if (em_82547_fifo_workaround(adapter, length)) {
1616				adapter->tx_fifo_wrk_cnt++;
1617				callout_reset(&adapter->tx_fifo_timer, 1,
1618					em_82547_move_tail, adapter);
1619				break;
1620			}
1621			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1622			em_82547_update_fifo_head(adapter, length);
1623			length = 0;
1624		}
1625	}
1626	return;
1627}
1628
1629static void
1630em_82547_move_tail(void *arg)
1631{
1632        struct adapter *adapter = arg;
1633
1634        EM_LOCK(adapter);
1635        em_82547_move_tail_locked(adapter);
1636        EM_UNLOCK(adapter);
1637}
1638
1639static int
1640em_82547_fifo_workaround(struct adapter *adapter, int len)
1641{
1642	int fifo_space, fifo_pkt_len;
1643
1644	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1645
1646	if (adapter->link_duplex == HALF_DUPLEX) {
1647		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1648
1649		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1650			if (em_82547_tx_fifo_reset(adapter)) {
1651				return(0);
1652			}
1653			else {
1654				return(1);
1655			}
1656		}
1657	}
1658
1659	return(0);
1660}
1661
1662static void
1663em_82547_update_fifo_head(struct adapter *adapter, int len)
1664{
1665	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1666
1667	/* tx_fifo_head is always 16 byte aligned */
1668	adapter->tx_fifo_head += fifo_pkt_len;
1669	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1670		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1671	}
1672
1673	return;
1674}
1675
1676
1677static int
1678em_82547_tx_fifo_reset(struct adapter *adapter)
1679{
1680	uint32_t tctl;
1681
1682	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1683	      E1000_READ_REG(&adapter->hw, TDH)) &&
1684	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1685	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1686	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1687	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1688	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1689
1690		/* Disable TX unit */
1691		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1692		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1693
1694		/* Reset FIFO pointers */
1695		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1696		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1697		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1698		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1699
1700		/* Re-enable TX unit */
1701		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1702		E1000_WRITE_FLUSH(&adapter->hw);
1703
1704		adapter->tx_fifo_head = 0;
1705		adapter->tx_fifo_reset_cnt++;
1706
1707		return(TRUE);
1708	}
1709	else {
1710		return(FALSE);
1711	}
1712}
1713
1714static void
1715em_set_promisc(struct adapter * adapter)
1716{
1717
1718	u_int32_t       reg_rctl;
1719	struct ifnet   *ifp = adapter->ifp;
1720
1721	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1722
1723	if (ifp->if_flags & IFF_PROMISC) {
1724		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1725		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1726		/* Disable VLAN stripping in promiscous mode
1727		 * This enables bridging of vlan tagged frames to occur
1728		 * and also allows vlan tags to be seen in tcpdump
1729		 */
1730		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1731			em_disable_vlans(adapter);
1732		adapter->em_insert_vlan_header = 1;
1733	} else if (ifp->if_flags & IFF_ALLMULTI) {
1734		reg_rctl |= E1000_RCTL_MPE;
1735		reg_rctl &= ~E1000_RCTL_UPE;
1736		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1737		adapter->em_insert_vlan_header = 0;
1738	} else
1739		adapter->em_insert_vlan_header = 0;
1740
1741	return;
1742}
1743
1744static void
1745em_disable_promisc(struct adapter * adapter)
1746{
1747	u_int32_t       reg_rctl;
1748	struct ifnet   *ifp = adapter->ifp;
1749
1750	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1751
1752	reg_rctl &=  (~E1000_RCTL_UPE);
1753	reg_rctl &=  (~E1000_RCTL_MPE);
1754	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1755
1756	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1757		em_enable_vlans(adapter);
1758	adapter->em_insert_vlan_header = 0;
1759
1760	return;
1761}
1762
1763
1764/*********************************************************************
1765 *  Multicast Update
1766 *
1767 *  This routine is called whenever multicast address list is updated.
1768 *
1769 **********************************************************************/
1770
1771static void
1772em_set_multi(struct adapter * adapter)
1773{
1774        u_int32_t reg_rctl = 0;
1775        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1776        struct ifmultiaddr  *ifma;
1777        int mcnt = 0;
1778        struct ifnet   *ifp = adapter->ifp;
1779
1780        IOCTL_DEBUGOUT("em_set_multi: begin");
1781
1782        if (adapter->hw.mac_type == em_82542_rev2_0) {
1783                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1784                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1785                        em_pci_clear_mwi(&adapter->hw);
1786                }
1787                reg_rctl |= E1000_RCTL_RST;
1788                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1789                msec_delay(5);
1790        }
1791
1792	IF_ADDR_LOCK(ifp);
1793        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1794                if (ifma->ifma_addr->sa_family != AF_LINK)
1795                        continue;
1796
1797		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1798
1799                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1800                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1801                mcnt++;
1802        }
1803	IF_ADDR_UNLOCK(ifp);
1804
1805        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1806                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1807                reg_rctl |= E1000_RCTL_MPE;
1808                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1809        } else
1810                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1811
1812        if (adapter->hw.mac_type == em_82542_rev2_0) {
1813                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1814                reg_rctl &= ~E1000_RCTL_RST;
1815                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1816                msec_delay(5);
1817                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1818                        em_pci_set_mwi(&adapter->hw);
1819                }
1820        }
1821
1822        return;
1823}
1824
1825
1826/*********************************************************************
1827 *  Timer routine
1828 *
1829 *  This routine checks for link status and updates statistics.
1830 *
1831 **********************************************************************/
1832
1833static void
1834em_local_timer(void *arg)
1835{
1836	struct ifnet   *ifp;
1837	struct adapter * adapter = arg;
1838	ifp = adapter->ifp;
1839
1840	EM_LOCK(adapter);
1841
1842	em_check_for_link(&adapter->hw);
1843	em_print_link_status(adapter);
1844	em_update_stats_counters(adapter);
1845	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1846		em_print_hw_stats(adapter);
1847	}
1848	em_smartspeed(adapter);
1849
1850	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1851
1852	EM_UNLOCK(adapter);
1853	return;
1854}
1855
1856static void
1857em_print_link_status(struct adapter * adapter)
1858{
1859	struct ifnet *ifp = adapter->ifp;
1860
1861	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1862		if (adapter->link_active == 0) {
1863			em_get_speed_and_duplex(&adapter->hw,
1864						&adapter->link_speed,
1865						&adapter->link_duplex);
1866			if (bootverbose)
1867				printf("em%d: Link is up %d Mbps %s\n",
1868				       adapter->unit,
1869				       adapter->link_speed,
1870				       ((adapter->link_duplex == FULL_DUPLEX) ?
1871					"Full Duplex" : "Half Duplex"));
1872			adapter->link_active = 1;
1873			adapter->smartspeed = 0;
1874			if_link_state_change(ifp, LINK_STATE_UP);
1875		}
1876	} else {
1877		if (adapter->link_active == 1) {
1878			adapter->link_speed = 0;
1879			adapter->link_duplex = 0;
1880			if (bootverbose)
1881				printf("em%d: Link is Down\n", adapter->unit);
1882			adapter->link_active = 0;
1883			if_link_state_change(ifp, LINK_STATE_DOWN);
1884		}
1885	}
1886
1887	return;
1888}
1889
1890/*********************************************************************
1891 *
1892 *  This routine disables all traffic on the adapter by issuing a
1893 *  global reset on the MAC and deallocates TX/RX buffers.
1894 *
1895 **********************************************************************/
1896
1897static void
1898em_stop(void *arg)
1899{
1900	struct ifnet   *ifp;
1901	struct adapter * adapter = arg;
1902	ifp = adapter->ifp;
1903
1904	mtx_assert(&adapter->mtx, MA_OWNED);
1905
1906	INIT_DEBUGOUT("em_stop: begin");
1907
1908	em_disable_intr(adapter);
1909	em_reset_hw(&adapter->hw);
1910	callout_stop(&adapter->timer);
1911	callout_stop(&adapter->tx_fifo_timer);
1912	em_free_transmit_structures(adapter);
1913	em_free_receive_structures(adapter);
1914
1915
1916	/* Tell the stack that the interface is no longer active */
1917	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1918
1919	return;
1920}
1921
1922
1923/*********************************************************************
1924 *
1925 *  Determine hardware revision.
1926 *
1927 **********************************************************************/
1928static void
1929em_identify_hardware(struct adapter * adapter)
1930{
1931	device_t dev = adapter->dev;
1932
1933	/* Make sure our PCI config space has the necessary stuff set */
1934	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1935	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1936	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1937		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1938		       adapter->unit);
1939		adapter->hw.pci_cmd_word |=
1940		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1941		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1942	}
1943
1944	/* Save off the information about this board */
1945	adapter->hw.vendor_id = pci_get_vendor(dev);
1946	adapter->hw.device_id = pci_get_device(dev);
1947	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1948	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1949	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1950
1951	/* Identify the MAC */
1952        if (em_set_mac_type(&adapter->hw))
1953                printf("em%d: Unknown MAC Type\n", adapter->unit);
1954
1955	if(adapter->hw.mac_type == em_82541 ||
1956	   adapter->hw.mac_type == em_82541_rev_2 ||
1957	   adapter->hw.mac_type == em_82547 ||
1958	   adapter->hw.mac_type == em_82547_rev_2)
1959		adapter->hw.phy_init_script = TRUE;
1960
1961        return;
1962}
1963
1964static int
1965em_allocate_pci_resources(struct adapter * adapter)
1966{
1967	int             val, rid;
1968	device_t        dev = adapter->dev;
1969
1970	rid = PCIR_BAR(0);
1971	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1972						     &rid, RF_ACTIVE);
1973	if (!(adapter->res_memory)) {
1974		printf("em%d: Unable to allocate bus resource: memory\n",
1975		       adapter->unit);
1976		return(ENXIO);
1977	}
1978	adapter->osdep.mem_bus_space_tag =
1979	rman_get_bustag(adapter->res_memory);
1980	adapter->osdep.mem_bus_space_handle =
1981	rman_get_bushandle(adapter->res_memory);
1982	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1983
1984
1985	if (adapter->hw.mac_type > em_82543) {
1986		/* Figure our where our IO BAR is ? */
1987		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1988			val = pci_read_config(dev, rid, 4);
1989			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1990				adapter->io_rid = rid;
1991				break;
1992			}
1993			rid += 4;
1994			/* check for 64bit BAR */
1995			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1996				rid += 4;
1997		}
1998		if (rid >= PCIR_CIS) {
1999			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
2000			return (ENXIO);
2001		}
2002		adapter->res_ioport = bus_alloc_resource_any(dev,
2003							     SYS_RES_IOPORT,
2004							     &adapter->io_rid,
2005							     RF_ACTIVE);
2006		if (!(adapter->res_ioport)) {
2007			printf("em%d: Unable to allocate bus resource: ioport\n",
2008			       adapter->unit);
2009			return(ENXIO);
2010		}
2011		adapter->hw.io_base = 0;
2012		adapter->osdep.io_bus_space_tag =
2013		    rman_get_bustag(adapter->res_ioport);
2014		adapter->osdep.io_bus_space_handle =
2015		    rman_get_bushandle(adapter->res_ioport);
2016	}
2017
2018	rid = 0x0;
2019	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2020						        RF_SHAREABLE |
2021							RF_ACTIVE);
2022	if (!(adapter->res_interrupt)) {
2023		printf("em%d: Unable to allocate bus resource: interrupt\n",
2024		       adapter->unit);
2025		return(ENXIO);
2026	}
2027
2028	adapter->hw.back = &adapter->osdep;
2029
2030	return(0);
2031}
2032
2033int
2034em_allocate_intr(struct adapter *adapter)
2035{
2036	device_t        dev = adapter->dev;
2037
2038	/* Manually turn off all interrupts */
2039	E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
2040
2041	/*
2042	 * Try allocating a fast interrupt and the associated deferred
2043	 * processing contexts.  If that doesn't work, try just using an
2044	 * ithread.
2045	 */
2046#ifndef NO_EM_FASTINTR
2047	if (bus_setup_intr(dev, adapter->res_interrupt,
2048			   INTR_TYPE_NET | INTR_FAST, em_intr_fast, adapter,
2049			   &adapter->int_handler_tag) == 0) {
2050
2051		/* Init the deferred processing contexts. */
2052		TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2053		TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2054		adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2055			taskqueue_thread_enqueue,
2056			&adapter->tq, &adapter->tqproc);
2057		kthread_create(taskqueue_thread_loop,
2058			&adapter->tq, &adapter->tqproc,
2059			0, 0, "%s taskq", device_get_nameunit(adapter->dev));
2060		mtx_lock_spin(&sched_lock);
2061		sched_prio(FIRST_THREAD_IN_PROC(adapter->tqproc), PI_NET);
2062		mtx_unlock_spin(&sched_lock);
2063	}
2064#endif
2065	if (adapter->int_handler_tag == NULL) {
2066		if (bus_setup_intr(dev, adapter->res_interrupt,
2067				   INTR_TYPE_NET | INTR_MPSAFE,
2068				   em_intr, adapter,
2069				   &adapter->int_handler_tag)) {
2070			printf("em%d: Error registering interrupt handler!\n",
2071			       adapter->unit);
2072			return(ENXIO);
2073		}
2074	}
2075
2076	em_enable_intr(adapter);
2077	return (0);
2078}
2079
2080static void
2081em_free_intr(struct adapter *adapter)
2082{
2083	device_t dev = adapter->dev;
2084
2085	if (adapter->res_interrupt != NULL) {
2086		bus_teardown_intr(dev, adapter->res_interrupt,
2087				  adapter->int_handler_tag);
2088		adapter->int_handler_tag = NULL;
2089	}
2090	if (adapter->tq != NULL) {
2091		taskqueue_drain(adapter->tq, &adapter->rxtx_task);
2092		taskqueue_drain(taskqueue_fast, &adapter->link_task);
2093		taskqueue_free(adapter->tq);
2094		adapter->tq = NULL;
2095	}
2096}
2097
2098static void
2099em_free_pci_resources(struct adapter * adapter)
2100{
2101	device_t dev = adapter->dev;
2102
2103	if (adapter->res_interrupt != NULL) {
2104		bus_release_resource(dev, SYS_RES_IRQ, 0,
2105				     adapter->res_interrupt);
2106	}
2107	if (adapter->res_memory != NULL) {
2108		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
2109				     adapter->res_memory);
2110	}
2111
2112	if (adapter->res_ioport != NULL) {
2113		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
2114				     adapter->res_ioport);
2115	}
2116	return;
2117}
2118
2119/*********************************************************************
2120 *
2121 *  Initialize the hardware to a configuration as specified by the
2122 *  adapter structure. The controller is reset, the EEPROM is
2123 *  verified, the MAC address is set, then the shared initialization
2124 *  routines are called.
2125 *
2126 **********************************************************************/
2127static int
2128em_hardware_init(struct adapter * adapter)
2129{
2130	uint16_t rx_buffer_size;
2131
2132        INIT_DEBUGOUT("em_hardware_init: begin");
2133	/* Issue a global reset */
2134	em_reset_hw(&adapter->hw);
2135
2136	/* When hardware is reset, fifo_head is also reset */
2137	adapter->tx_fifo_head = 0;
2138
2139	/* Make sure we have a good EEPROM before we read from it */
2140	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2141		printf("em%d: The EEPROM Checksum Is Not Valid\n",
2142		       adapter->unit);
2143		return(EIO);
2144	}
2145
2146	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
2147		printf("em%d: EEPROM read error while reading part number\n",
2148		       adapter->unit);
2149		return(EIO);
2150	}
2151
2152	/*
2153	 * These parameters control the automatic generation (Tx) and
2154	 * response (Rx) to Ethernet PAUSE frames.
2155	 * - High water mark should allow for at least two frames to be
2156	 *   received after sending an XOFF.
2157	 * - Low water mark works best when it is very near the high water mark.
2158	 *   This allows the receiver to restart by sending XON when it has drained
2159	 *   a bit.  Here we use an arbitary value of 1500 which will restart after
2160	 *   one full frame is pulled from the buffer.  There could be several smaller
2161	 *   frames in the buffer and if so they will not trigger the XON until their
2162	 *   total number reduces the buffer by 1500.
2163	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2164	 */
2165	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
2166
2167	adapter->hw.fc_high_water = rx_buffer_size -
2168	    roundup2(adapter->hw.max_frame_size, 1024);
2169	adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
2170	adapter->hw.fc_pause_time = 0x1000;
2171	adapter->hw.fc_send_xon = TRUE;
2172	adapter->hw.fc = em_fc_full;
2173
2174	if (em_init_hw(&adapter->hw) < 0) {
2175		printf("em%d: Hardware Initialization Failed",
2176		       adapter->unit);
2177		return(EIO);
2178	}
2179
2180	em_check_for_link(&adapter->hw);
2181	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
2182		adapter->link_active = 1;
2183	else
2184		adapter->link_active = 0;
2185
2186	if (adapter->link_active) {
2187		em_get_speed_and_duplex(&adapter->hw,
2188					&adapter->link_speed,
2189					&adapter->link_duplex);
2190	} else {
2191		adapter->link_speed = 0;
2192		adapter->link_duplex = 0;
2193	}
2194
2195	return(0);
2196}
2197
2198/*********************************************************************
2199 *
2200 *  Setup networking device structure and register an interface.
2201 *
2202 **********************************************************************/
2203static void
2204em_setup_interface(device_t dev, struct adapter * adapter)
2205{
2206	struct ifnet   *ifp;
2207	INIT_DEBUGOUT("em_setup_interface: begin");
2208
2209	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2210	if (ifp == NULL)
2211		panic("%s: can not if_alloc()", device_get_nameunit(dev));
2212	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2213	ifp->if_mtu = ETHERMTU;
2214	ifp->if_baudrate = 1000000000;
2215	ifp->if_init =  em_init;
2216	ifp->if_softc = adapter;
2217	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2218	ifp->if_ioctl = em_ioctl;
2219	ifp->if_start = em_start;
2220	ifp->if_watchdog = em_watchdog;
2221	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2222	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2223	IFQ_SET_READY(&ifp->if_snd);
2224
2225        ether_ifattach(ifp, adapter->hw.mac_addr);
2226
2227	ifp->if_capabilities = ifp->if_capenable = 0;
2228
2229	if (adapter->hw.mac_type >= em_82543) {
2230		ifp->if_capabilities |= IFCAP_HWCSUM;
2231		ifp->if_capenable |= IFCAP_HWCSUM;
2232	}
2233
2234	/*
2235	 * Tell the upper layer(s) we support long frames.
2236	 */
2237	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2238	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2239	ifp->if_capenable |= IFCAP_VLAN_MTU;
2240
2241#ifdef DEVICE_POLLING
2242	ifp->if_capabilities |= IFCAP_POLLING;
2243#endif
2244
2245	/*
2246	 * Specify the media types supported by this adapter and register
2247	 * callbacks to update media and link information
2248	 */
2249	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2250		     em_media_status);
2251	if (adapter->hw.media_type == em_media_type_fiber) {
2252		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2253			    0, NULL);
2254		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
2255			    0, NULL);
2256	} else {
2257		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2258		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2259			    0, NULL);
2260		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2261			    0, NULL);
2262		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2263			    0, NULL);
2264		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2265			    0, NULL);
2266		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2267	}
2268	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2269	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2270
2271	return;
2272}
2273
2274
2275/*********************************************************************
2276 *
2277 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2278 *
2279 **********************************************************************/
2280static void
2281em_smartspeed(struct adapter *adapter)
2282{
2283        uint16_t phy_tmp;
2284
2285	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2286	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2287		return;
2288
2289        if(adapter->smartspeed == 0) {
2290                /* If Master/Slave config fault is asserted twice,
2291                 * we assume back-to-back */
2292                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2293                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2294                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2295                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2296                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2297					&phy_tmp);
2298                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2299                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2300                                em_write_phy_reg(&adapter->hw,
2301                                                    PHY_1000T_CTRL, phy_tmp);
2302                                adapter->smartspeed++;
2303                                if(adapter->hw.autoneg &&
2304                                   !em_phy_setup_autoneg(&adapter->hw) &&
2305				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2306                                                       &phy_tmp)) {
2307                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2308                                                    MII_CR_RESTART_AUTO_NEG);
2309                                        em_write_phy_reg(&adapter->hw,
2310							 PHY_CTRL, phy_tmp);
2311                                }
2312                        }
2313                }
2314                return;
2315        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2316                /* If still no link, perhaps using 2/3 pair cable */
2317                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2318                phy_tmp |= CR_1000T_MS_ENABLE;
2319                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2320                if(adapter->hw.autoneg &&
2321                   !em_phy_setup_autoneg(&adapter->hw) &&
2322                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2323                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2324                                    MII_CR_RESTART_AUTO_NEG);
2325                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2326                }
2327        }
2328        /* Restart process after EM_SMARTSPEED_MAX iterations */
2329        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2330                adapter->smartspeed = 0;
2331
2332	return;
2333}
2334
2335
2336/*
2337 * Manage DMA'able memory.
2338 */
2339static void
2340em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2341{
2342        if (error)
2343                return;
2344        *(bus_addr_t *) arg = segs[0].ds_addr;
2345}
2346
2347static int
2348em_dma_malloc(struct adapter *adapter, bus_size_t size,
2349        struct em_dma_alloc *dma, int mapflags)
2350{
2351        int r;
2352
2353	r = bus_dma_tag_create(NULL,                    /* parent */
2354                               E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2355                               BUS_SPACE_MAXADDR,       /* lowaddr */
2356                               BUS_SPACE_MAXADDR,       /* highaddr */
2357                               NULL, NULL,              /* filter, filterarg */
2358                               size,                    /* maxsize */
2359                               1,                       /* nsegments */
2360                               size,                    /* maxsegsize */
2361                               BUS_DMA_ALLOCNOW,        /* flags */
2362			       NULL,			/* lockfunc */
2363			       NULL,			/* lockarg */
2364                               &dma->dma_tag);
2365        if (r != 0) {
2366                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2367                        "error %u\n", adapter->unit, r);
2368                goto fail_0;
2369        }
2370
2371        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2372                             BUS_DMA_NOWAIT, &dma->dma_map);
2373        if (r != 0) {
2374                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2375                        "size %ju, error %d\n", adapter->unit,
2376			(uintmax_t)size, r);
2377                goto fail_2;
2378        }
2379
2380	dma->dma_paddr = 0;
2381        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2382                            size,
2383                            em_dmamap_cb,
2384                            &dma->dma_paddr,
2385                            mapflags | BUS_DMA_NOWAIT);
2386        if (r != 0 || dma->dma_paddr == 0) {
2387                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2388                        "error %u\n", adapter->unit, r);
2389                goto fail_3;
2390        }
2391
2392        return (0);
2393
2394fail_3:
2395        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2396fail_2:
2397        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2398        bus_dma_tag_destroy(dma->dma_tag);
2399fail_0:
2400        dma->dma_map = NULL;
2401        dma->dma_tag = NULL;
2402        return (r);
2403}
2404
2405static void
2406em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2407{
2408	if (dma->dma_tag == NULL)
2409		return;
2410	if (dma->dma_map != NULL) {
2411		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2412		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2413		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2414		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2415		dma->dma_map = NULL;
2416	}
2417        bus_dma_tag_destroy(dma->dma_tag);
2418	dma->dma_tag = NULL;
2419}
2420
2421
2422/*********************************************************************
2423 *
2424 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2425 *  the information needed to transmit a packet on the wire.
2426 *
2427 **********************************************************************/
2428static int
2429em_allocate_transmit_structures(struct adapter * adapter)
2430{
2431	if (!(adapter->tx_buffer_area =
2432	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2433					     adapter->num_tx_desc, M_DEVBUF,
2434					     M_NOWAIT))) {
2435		printf("em%d: Unable to allocate tx_buffer memory\n",
2436		       adapter->unit);
2437		return ENOMEM;
2438	}
2439
2440	bzero(adapter->tx_buffer_area,
2441	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2442
2443	return 0;
2444}
2445
2446/*********************************************************************
2447 *
2448 *  Allocate and initialize transmit structures.
2449 *
2450 **********************************************************************/
2451static int
2452em_setup_transmit_structures(struct adapter * adapter)
2453{
2454	struct em_buffer *tx_buffer;
2455	bus_size_t size;
2456	int error, i;
2457
2458        /*
2459         * Setup DMA descriptor areas.
2460         */
2461	size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2462	if ((error = bus_dma_tag_create(NULL,           /* parent */
2463                               1, 0,                    /* alignment, bounds */
2464                               BUS_SPACE_MAXADDR,       /* lowaddr */
2465                               BUS_SPACE_MAXADDR,       /* highaddr */
2466                               NULL, NULL,              /* filter, filterarg */
2467                               size,                    /* maxsize */
2468                               EM_MAX_SCATTER,          /* nsegments */
2469                               size,                    /* maxsegsize */
2470                               0,                       /* flags */
2471			       NULL,			/* lockfunc */
2472			       NULL,			/* lockarg */
2473                               &adapter->txtag)) != 0) {
2474		printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2475		goto fail;
2476        }
2477
2478        if ((error = em_allocate_transmit_structures(adapter)) != 0)
2479		goto fail;
2480
2481        bzero((void *) adapter->tx_desc_base,
2482              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2483	tx_buffer = adapter->tx_buffer_area;
2484	for (i = 0; i < adapter->num_tx_desc; i++) {
2485		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2486		if (error != 0) {
2487			printf("em%d: Unable to create TX DMA map\n",
2488			    adapter->unit);
2489			goto fail;
2490		}
2491		tx_buffer++;
2492	}
2493
2494        adapter->next_avail_tx_desc = 0;
2495        adapter->oldest_used_tx_desc = 0;
2496
2497        /* Set number of descriptors available */
2498        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2499
2500        /* Set checksum context */
2501        adapter->active_checksum_context = OFFLOAD_NONE;
2502	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2503	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2504
2505        return (0);
2506
2507fail:
2508	em_free_transmit_structures(adapter);
2509	return (error);
2510}
2511
2512/*********************************************************************
2513 *
2514 *  Enable transmit unit.
2515 *
2516 **********************************************************************/
2517static void
2518em_initialize_transmit_unit(struct adapter * adapter)
2519{
2520	u_int32_t       reg_tctl;
2521	u_int32_t       reg_tipg = 0;
2522	u_int64_t	bus_addr;
2523
2524         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2525	/* Setup the Base and Length of the Tx Descriptor Ring */
2526	bus_addr = adapter->txdma.dma_paddr;
2527	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2528	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2529	E1000_WRITE_REG(&adapter->hw, TDLEN,
2530			adapter->num_tx_desc *
2531			sizeof(struct em_tx_desc));
2532
2533	/* Setup the HW Tx Head and Tail descriptor pointers */
2534	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2535	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2536
2537
2538	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2539		     E1000_READ_REG(&adapter->hw, TDBAL),
2540		     E1000_READ_REG(&adapter->hw, TDLEN));
2541
2542	/* Set the default values for the Tx Inter Packet Gap timer */
2543	switch (adapter->hw.mac_type) {
2544	case em_82542_rev2_0:
2545        case em_82542_rev2_1:
2546                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2547                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2548                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2549                break;
2550        default:
2551                if (adapter->hw.media_type == em_media_type_fiber)
2552                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2553                else
2554                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2555                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2556                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2557        }
2558
2559	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2560	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2561	if(adapter->hw.mac_type >= em_82540)
2562		E1000_WRITE_REG(&adapter->hw, TADV,
2563		    adapter->tx_abs_int_delay.value);
2564
2565	/* Program the Transmit Control Register */
2566	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2567		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2568	if (adapter->hw.mac_type >= em_82571)
2569		reg_tctl |= E1000_TCTL_MULR;
2570	if (adapter->link_duplex == 1) {
2571		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2572	} else {
2573		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2574	}
2575	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2576
2577	/* Setup Transmit Descriptor Settings for this adapter */
2578	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2579
2580	if (adapter->tx_int_delay.value > 0)
2581		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2582
2583	return;
2584}
2585
2586/*********************************************************************
2587 *
2588 *  Free all transmit related data structures.
2589 *
2590 **********************************************************************/
2591static void
2592em_free_transmit_structures(struct adapter * adapter)
2593{
2594        struct em_buffer   *tx_buffer;
2595        int             i;
2596
2597        INIT_DEBUGOUT("free_transmit_structures: begin");
2598
2599        if (adapter->tx_buffer_area != NULL) {
2600                tx_buffer = adapter->tx_buffer_area;
2601                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2602                        if (tx_buffer->m_head != NULL) {
2603				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2604				    BUS_DMASYNC_POSTWRITE);
2605				bus_dmamap_unload(adapter->txtag,
2606				    tx_buffer->map);
2607                                m_freem(tx_buffer->m_head);
2608				tx_buffer->m_head = NULL;
2609                        } else if (tx_buffer->map != NULL)
2610				bus_dmamap_unload(adapter->txtag,
2611				    tx_buffer->map);
2612			if (tx_buffer->map != NULL) {
2613				bus_dmamap_destroy(adapter->txtag,
2614				    tx_buffer->map);
2615				tx_buffer->map = NULL;
2616			}
2617                }
2618        }
2619        if (adapter->tx_buffer_area != NULL) {
2620                free(adapter->tx_buffer_area, M_DEVBUF);
2621                adapter->tx_buffer_area = NULL;
2622        }
2623        if (adapter->txtag != NULL) {
2624                bus_dma_tag_destroy(adapter->txtag);
2625                adapter->txtag = NULL;
2626        }
2627        return;
2628}
2629
2630/*********************************************************************
2631 *
2632 *  The offload context needs to be set when we transfer the first
2633 *  packet of a particular protocol (TCP/UDP). We change the
2634 *  context only if the protocol type changes.
2635 *
2636 **********************************************************************/
2637static void
2638em_transmit_checksum_setup(struct adapter * adapter,
2639			   struct mbuf *mp,
2640			   u_int32_t *txd_upper,
2641			   u_int32_t *txd_lower)
2642{
2643	struct em_context_desc *TXD;
2644	struct em_buffer *tx_buffer;
2645	int curr_txd;
2646
2647	if (mp->m_pkthdr.csum_flags) {
2648
2649		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2650			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2651			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2652			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2653				return;
2654			else
2655				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2656
2657		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2658			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2659			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2660			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2661				return;
2662			else
2663				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2664		} else {
2665			*txd_upper = 0;
2666			*txd_lower = 0;
2667			return;
2668		}
2669	} else {
2670		*txd_upper = 0;
2671		*txd_lower = 0;
2672		return;
2673	}
2674
2675	/* If we reach this point, the checksum offload context
2676	 * needs to be reset.
2677	 */
2678	curr_txd = adapter->next_avail_tx_desc;
2679	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2680	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2681
2682	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2683	TXD->lower_setup.ip_fields.ipcso =
2684		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2685	TXD->lower_setup.ip_fields.ipcse =
2686		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2687
2688	TXD->upper_setup.tcp_fields.tucss =
2689		ETHER_HDR_LEN + sizeof(struct ip);
2690	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2691
2692	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2693		TXD->upper_setup.tcp_fields.tucso =
2694			ETHER_HDR_LEN + sizeof(struct ip) +
2695			offsetof(struct tcphdr, th_sum);
2696	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2697		TXD->upper_setup.tcp_fields.tucso =
2698			ETHER_HDR_LEN + sizeof(struct ip) +
2699			offsetof(struct udphdr, uh_sum);
2700	}
2701
2702	TXD->tcp_seg_setup.data = htole32(0);
2703	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2704
2705	tx_buffer->m_head = NULL;
2706
2707	if (++curr_txd == adapter->num_tx_desc)
2708		curr_txd = 0;
2709
2710	adapter->num_tx_desc_avail--;
2711	adapter->next_avail_tx_desc = curr_txd;
2712
2713	return;
2714}
2715
2716/**********************************************************************
2717 *
2718 *  Examine each tx_buffer in the used queue. If the hardware is done
2719 *  processing the packet then free associated resources. The
2720 *  tx_buffer is put back on the free queue.
2721 *
2722 **********************************************************************/
2723static void
2724em_clean_transmit_interrupts(struct adapter * adapter)
2725{
2726        int i, num_avail;
2727        struct em_buffer *tx_buffer;
2728        struct em_tx_desc   *tx_desc;
2729	struct ifnet   *ifp = adapter->ifp;
2730
2731	mtx_assert(&adapter->mtx, MA_OWNED);
2732
2733        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2734                return;
2735
2736        num_avail = adapter->num_tx_desc_avail;
2737        i = adapter->oldest_used_tx_desc;
2738
2739        tx_buffer = &adapter->tx_buffer_area[i];
2740        tx_desc = &adapter->tx_desc_base[i];
2741
2742        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2743            BUS_DMASYNC_POSTREAD);
2744        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2745
2746                tx_desc->upper.data = 0;
2747                num_avail++;
2748
2749                if (tx_buffer->m_head) {
2750			ifp->if_opackets++;
2751			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2752			    BUS_DMASYNC_POSTWRITE);
2753			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2754
2755                        m_freem(tx_buffer->m_head);
2756                        tx_buffer->m_head = NULL;
2757                }
2758
2759                if (++i == adapter->num_tx_desc)
2760                        i = 0;
2761
2762                tx_buffer = &adapter->tx_buffer_area[i];
2763                tx_desc = &adapter->tx_desc_base[i];
2764        }
2765        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2766            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2767
2768        adapter->oldest_used_tx_desc = i;
2769
2770        /*
2771         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2772         * that it is OK to send packets.
2773         * If there are no pending descriptors, clear the timeout. Otherwise,
2774         * if some descriptors have been freed, restart the timeout.
2775         */
2776        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2777                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2778                if (num_avail == adapter->num_tx_desc)
2779                        ifp->if_timer = 0;
2780                else if (num_avail == adapter->num_tx_desc_avail)
2781                        ifp->if_timer = EM_TX_TIMEOUT;
2782        }
2783        adapter->num_tx_desc_avail = num_avail;
2784        return;
2785}
2786
2787/*********************************************************************
2788 *
2789 *  Get a buffer from system mbuf buffer pool.
2790 *
2791 **********************************************************************/
2792static int
2793em_get_buf(int i, struct adapter *adapter,
2794           struct mbuf *nmp)
2795{
2796        struct mbuf    *mp = nmp;
2797        struct em_buffer *rx_buffer;
2798        struct ifnet   *ifp;
2799	bus_dma_segment_t segs[1];
2800	int error, nsegs;
2801
2802        ifp = adapter->ifp;
2803
2804        if (mp == NULL) {
2805                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2806                if (mp == NULL) {
2807                        adapter->mbuf_cluster_failed++;
2808                        return(ENOBUFS);
2809                }
2810                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2811        } else {
2812                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2813                mp->m_data = mp->m_ext.ext_buf;
2814                mp->m_next = NULL;
2815        }
2816
2817        if (ifp->if_mtu <= ETHERMTU) {
2818                m_adj(mp, ETHER_ALIGN);
2819        }
2820
2821        rx_buffer = &adapter->rx_buffer_area[i];
2822
2823        /*
2824         * Using memory from the mbuf cluster pool, invoke the
2825         * bus_dma machinery to arrange the memory mapping.
2826         */
2827        error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2828	    mp, segs, &nsegs, 0);
2829        if (error != 0) {
2830                m_free(mp);
2831                return(error);
2832        }
2833	/* If nsegs is wrong then the stack is corrupt */
2834	KASSERT(nsegs == 1, ("Too many segments returned!"));
2835        rx_buffer->m_head = mp;
2836        adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2837        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2838
2839        return(0);
2840}
2841
2842/*********************************************************************
2843 *
2844 *  Allocate memory for rx_buffer structures. Since we use one
2845 *  rx_buffer per received packet, the maximum number of rx_buffer's
2846 *  that we'll need is equal to the number of receive descriptors
2847 *  that we've allocated.
2848 *
2849 **********************************************************************/
2850static int
2851em_allocate_receive_structures(struct adapter * adapter)
2852{
2853        int             i, error;
2854        struct em_buffer *rx_buffer;
2855
2856        if (!(adapter->rx_buffer_area =
2857              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2858                                          adapter->num_rx_desc, M_DEVBUF,
2859                                          M_NOWAIT))) {
2860                printf("em%d: Unable to allocate rx_buffer memory\n",
2861                       adapter->unit);
2862                return(ENOMEM);
2863        }
2864
2865        bzero(adapter->rx_buffer_area,
2866              sizeof(struct em_buffer) * adapter->num_rx_desc);
2867
2868        error = bus_dma_tag_create(NULL,                /* parent */
2869                               1, 0,                    /* alignment, bounds */
2870                               BUS_SPACE_MAXADDR,       /* lowaddr */
2871                               BUS_SPACE_MAXADDR,       /* highaddr */
2872                               NULL, NULL,              /* filter, filterarg */
2873                               MCLBYTES,                /* maxsize */
2874                               1,                       /* nsegments */
2875                               MCLBYTES,                /* maxsegsize */
2876                               BUS_DMA_ALLOCNOW,        /* flags */
2877			       NULL,			/* lockfunc */
2878			       NULL,			/* lockarg */
2879                               &adapter->rxtag);
2880        if (error != 0) {
2881                printf("em%d: em_allocate_receive_structures: "
2882                        "bus_dma_tag_create failed; error %u\n",
2883                       adapter->unit, error);
2884                goto fail;
2885        }
2886
2887        rx_buffer = adapter->rx_buffer_area;
2888        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2889                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2890                                          &rx_buffer->map);
2891                if (error != 0) {
2892                        printf("em%d: em_allocate_receive_structures: "
2893                                "bus_dmamap_create failed; error %u\n",
2894                                adapter->unit, error);
2895                        goto fail;
2896                }
2897        }
2898
2899        for (i = 0; i < adapter->num_rx_desc; i++) {
2900                error = em_get_buf(i, adapter, NULL);
2901		if (error != 0)
2902			goto fail;
2903        }
2904        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2905            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2906
2907        return(0);
2908
2909fail:
2910	em_free_receive_structures(adapter);
2911        return (error);
2912}
2913
2914/*********************************************************************
2915 *
2916 *  Allocate and initialize receive structures.
2917 *
2918 **********************************************************************/
2919static int
2920em_setup_receive_structures(struct adapter * adapter)
2921{
2922	bzero((void *) adapter->rx_desc_base,
2923              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2924
2925	if (em_allocate_receive_structures(adapter))
2926		return ENOMEM;
2927
2928	/* Setup our descriptor pointers */
2929        adapter->next_rx_desc_to_check = 0;
2930	return(0);
2931}
2932
2933/*********************************************************************
2934 *
2935 *  Enable receive unit.
2936 *
2937 **********************************************************************/
2938static void
2939em_initialize_receive_unit(struct adapter * adapter)
2940{
2941	u_int32_t       reg_rctl;
2942	u_int32_t       reg_rxcsum;
2943	struct ifnet    *ifp;
2944	u_int64_t	bus_addr;
2945
2946        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2947	ifp = adapter->ifp;
2948
2949	/* Make sure receives are disabled while setting up the descriptor ring */
2950	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2951
2952	/* Set the Receive Delay Timer Register */
2953	E1000_WRITE_REG(&adapter->hw, RDTR,
2954			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2955
2956	if(adapter->hw.mac_type >= em_82540) {
2957		E1000_WRITE_REG(&adapter->hw, RADV,
2958		    adapter->rx_abs_int_delay.value);
2959
2960                /* Set the interrupt throttling rate.  Value is calculated
2961                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2962#define MAX_INTS_PER_SEC        8000
2963#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2964                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2965        }
2966
2967	/* Setup the Base and Length of the Rx Descriptor Ring */
2968	bus_addr = adapter->rxdma.dma_paddr;
2969	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2970	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2971	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2972			sizeof(struct em_rx_desc));
2973
2974	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2975	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2976	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2977
2978	/* Setup the Receive Control Register */
2979	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2980		   E1000_RCTL_RDMTS_HALF |
2981		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2982
2983	if (adapter->hw.tbi_compatibility_on == TRUE)
2984		reg_rctl |= E1000_RCTL_SBP;
2985
2986
2987	switch (adapter->rx_buffer_len) {
2988	default:
2989	case EM_RXBUFFER_2048:
2990		reg_rctl |= E1000_RCTL_SZ_2048;
2991		break;
2992	case EM_RXBUFFER_4096:
2993		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2994		break;
2995	case EM_RXBUFFER_8192:
2996		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2997		break;
2998	case EM_RXBUFFER_16384:
2999		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
3000		break;
3001	}
3002
3003	if (ifp->if_mtu > ETHERMTU)
3004		reg_rctl |= E1000_RCTL_LPE;
3005
3006	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
3007	if ((adapter->hw.mac_type >= em_82543) &&
3008	    (ifp->if_capenable & IFCAP_RXCSUM)) {
3009		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
3010		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3011		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
3012	}
3013
3014	/* Enable Receives */
3015	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
3016
3017	return;
3018}
3019
3020/*********************************************************************
3021 *
3022 *  Free receive related data structures.
3023 *
3024 **********************************************************************/
3025static void
3026em_free_receive_structures(struct adapter *adapter)
3027{
3028        struct em_buffer   *rx_buffer;
3029        int             i;
3030
3031        INIT_DEBUGOUT("free_receive_structures: begin");
3032
3033        if (adapter->rx_buffer_area != NULL) {
3034                rx_buffer = adapter->rx_buffer_area;
3035                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3036			if (rx_buffer->m_head != NULL) {
3037				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3038				    BUS_DMASYNC_POSTREAD);
3039				bus_dmamap_unload(adapter->rxtag,
3040				    rx_buffer->map);
3041				m_freem(rx_buffer->m_head);
3042				rx_buffer->m_head = NULL;
3043			} else if (rx_buffer->map != NULL)
3044				bus_dmamap_unload(adapter->rxtag,
3045				    rx_buffer->map);
3046                        if (rx_buffer->map != NULL) {
3047				bus_dmamap_destroy(adapter->rxtag,
3048				    rx_buffer->map);
3049				rx_buffer->map = NULL;
3050			}
3051                }
3052        }
3053        if (adapter->rx_buffer_area != NULL) {
3054                free(adapter->rx_buffer_area, M_DEVBUF);
3055                adapter->rx_buffer_area = NULL;
3056        }
3057        if (adapter->rxtag != NULL) {
3058                bus_dma_tag_destroy(adapter->rxtag);
3059                adapter->rxtag = NULL;
3060        }
3061        return;
3062}
3063
3064/*********************************************************************
3065 *
3066 *  This routine executes in interrupt context. It replenishes
3067 *  the mbufs in the descriptor and sends data which has been
3068 *  dma'ed into host memory to upper layer.
3069 *
3070 *  We loop at most count times if count is > 0, or until done if
3071 *  count < 0.
3072 *
3073 *********************************************************************/
3074static int
3075em_process_receive_interrupts(struct adapter * adapter, int count)
3076{
3077	struct ifnet        *ifp;
3078	struct mbuf         *mp;
3079	u_int8_t            accept_frame = 0;
3080 	u_int8_t            eop = 0;
3081	u_int16_t           len, desc_len, prev_len_adj;
3082	int                 i;
3083
3084	/* Pointer to the receive descriptor being examined. */
3085	struct em_rx_desc   *current_desc;
3086
3087	ifp = adapter->ifp;
3088	i = adapter->next_rx_desc_to_check;
3089        current_desc = &adapter->rx_desc_base[i];
3090	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3091	    BUS_DMASYNC_POSTREAD);
3092
3093	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3094		return (0);
3095	}
3096
3097	while ((current_desc->status & E1000_RXD_STAT_DD) &&
3098		    (count != 0) &&
3099		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3100		struct mbuf *m = NULL;
3101
3102		mp = adapter->rx_buffer_area[i].m_head;
3103		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3104		    BUS_DMASYNC_POSTREAD);
3105		bus_dmamap_unload(adapter->rxtag,
3106		    adapter->rx_buffer_area[i].map);
3107
3108		accept_frame = 1;
3109		prev_len_adj = 0;
3110                desc_len = le16toh(current_desc->length);
3111		if (current_desc->status & E1000_RXD_STAT_EOP) {
3112			count--;
3113			eop = 1;
3114			if (desc_len < ETHER_CRC_LEN) {
3115                                len = 0;
3116                                prev_len_adj = ETHER_CRC_LEN - desc_len;
3117                        }
3118                        else {
3119                                len = desc_len - ETHER_CRC_LEN;
3120                        }
3121		} else {
3122			eop = 0;
3123			len = desc_len;
3124		}
3125
3126		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3127			u_int8_t            last_byte;
3128			u_int32_t           pkt_len = desc_len;
3129
3130			if (adapter->fmp != NULL)
3131				pkt_len += adapter->fmp->m_pkthdr.len;
3132
3133			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3134
3135			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
3136				       current_desc->errors,
3137				       pkt_len, last_byte)) {
3138				em_tbi_adjust_stats(&adapter->hw,
3139						    &adapter->stats,
3140						    pkt_len,
3141						    adapter->hw.mac_addr);
3142				if (len > 0) len--;
3143			}
3144			else {
3145				accept_frame = 0;
3146			}
3147		}
3148
3149		if (accept_frame) {
3150
3151			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
3152				adapter->dropped_pkts++;
3153				em_get_buf(i, adapter, mp);
3154				if (adapter->fmp != NULL)
3155					m_freem(adapter->fmp);
3156				adapter->fmp = NULL;
3157				adapter->lmp = NULL;
3158				break;
3159			}
3160
3161			/* Assign correct length to the current fragment */
3162			mp->m_len = len;
3163
3164			if (adapter->fmp == NULL) {
3165				mp->m_pkthdr.len = len;
3166				adapter->fmp = mp;	 /* Store the first mbuf */
3167				adapter->lmp = mp;
3168			} else {
3169				/* Chain mbuf's together */
3170				mp->m_flags &= ~M_PKTHDR;
3171				/*
3172                                 * Adjust length of previous mbuf in chain if we
3173                                 * received less than 4 bytes in the last descriptor.
3174                                 */
3175				if (prev_len_adj > 0) {
3176					adapter->lmp->m_len -= prev_len_adj;
3177					adapter->fmp->m_pkthdr.len -= prev_len_adj;
3178				}
3179				adapter->lmp->m_next = mp;
3180				adapter->lmp = adapter->lmp->m_next;
3181				adapter->fmp->m_pkthdr.len += len;
3182			}
3183
3184                        if (eop) {
3185                                adapter->fmp->m_pkthdr.rcvif = ifp;
3186				ifp->if_ipackets++;
3187                                em_receive_checksum(adapter, current_desc,
3188                                                    adapter->fmp);
3189#ifndef __NO_STRICT_ALIGNMENT
3190				if (ifp->if_mtu > ETHERMTU &&
3191				    em_fixup_rx(adapter) != 0)
3192					goto skip;
3193
3194#endif
3195                                if (current_desc->status & E1000_RXD_STAT_VP)
3196					VLAN_INPUT_TAG(ifp, adapter->fmp,
3197					    (le16toh(current_desc->special) &
3198					    E1000_RXD_SPC_VLAN_MASK));
3199#ifndef __NO_STRICT_ALIGNMENT
3200skip:
3201#endif
3202				m = adapter->fmp;
3203				adapter->fmp = NULL;
3204				adapter->lmp = NULL;
3205                        }
3206		} else {
3207			adapter->dropped_pkts++;
3208			em_get_buf(i, adapter, mp);
3209			if (adapter->fmp != NULL)
3210				m_freem(adapter->fmp);
3211			adapter->fmp = NULL;
3212			adapter->lmp = NULL;
3213		}
3214
3215		/* Zero out the receive descriptors status  */
3216		current_desc->status = 0;
3217		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3218		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3219
3220                /* Advance our pointers to the next descriptor */
3221		if (++i == adapter->num_rx_desc)
3222			i = 0;
3223		if (m != NULL) {
3224			adapter->next_rx_desc_to_check = i;
3225			(*ifp->if_input)(ifp, m);
3226			i = adapter->next_rx_desc_to_check;
3227		}
3228		current_desc = &adapter->rx_desc_base[i];
3229	}
3230	adapter->next_rx_desc_to_check = i;
3231
3232	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3233	if (--i < 0) i = adapter->num_rx_desc - 1;
3234	E1000_WRITE_REG(&adapter->hw, RDT, i);
3235
3236	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3237		return (0);
3238	}
3239	return (1);
3240}
3241
3242#ifndef __NO_STRICT_ALIGNMENT
3243/*
3244 * When jumbo frames are enabled we should realign entire payload on
3245 * architecures with strict alignment. This is serious design mistake of 8254x
3246 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3247 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3248 * payload. On architecures without strict alignment restrictions 8254x still
3249 * performs unaligned memory access which would reduce the performance too.
3250 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3251 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3252 * existing mbuf chain.
3253 *
3254 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3255 * not used at all on architectures with strict alignment.
3256 */
3257static int
3258em_fixup_rx(struct adapter *adapter)
3259{
3260	struct mbuf *m, *n;
3261	int error;
3262
3263	error = 0;
3264	m = adapter->fmp;
3265	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3266		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3267		m->m_data += ETHER_HDR_LEN;
3268	} else {
3269		MGETHDR(n, M_DONTWAIT, MT_DATA);
3270		if (n != NULL) {
3271			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3272			m->m_data += ETHER_HDR_LEN;
3273			m->m_len -= ETHER_HDR_LEN;
3274			n->m_len = ETHER_HDR_LEN;
3275			M_MOVE_PKTHDR(n, m);
3276			n->m_next = m;
3277			adapter->fmp = n;
3278		} else {
3279			adapter->dropped_pkts++;
3280			m_freem(adapter->fmp);
3281			adapter->fmp = NULL;
3282			error = ENOMEM;
3283		}
3284	}
3285
3286	return (error);
3287}
3288#endif
3289
3290/*********************************************************************
3291 *
3292 *  Verify that the hardware indicated that the checksum is valid.
3293 *  Inform the stack about the status of checksum so that stack
3294 *  doesn't spend time verifying the checksum.
3295 *
3296 *********************************************************************/
3297static void
3298em_receive_checksum(struct adapter *adapter,
3299		    struct em_rx_desc *rx_desc,
3300		    struct mbuf *mp)
3301{
3302	/* 82543 or newer only */
3303	if ((adapter->hw.mac_type < em_82543) ||
3304	    /* Ignore Checksum bit is set */
3305	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3306		mp->m_pkthdr.csum_flags = 0;
3307		return;
3308	}
3309
3310	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3311		/* Did it pass? */
3312		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3313			/* IP Checksum Good */
3314			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3315			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3316
3317		} else {
3318			mp->m_pkthdr.csum_flags = 0;
3319		}
3320	}
3321
3322	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3323		/* Did it pass? */
3324		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3325			mp->m_pkthdr.csum_flags |=
3326			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3327			mp->m_pkthdr.csum_data = htons(0xffff);
3328		}
3329	}
3330
3331	return;
3332}
3333
3334
3335static void
3336em_enable_vlans(struct adapter *adapter)
3337{
3338	uint32_t ctrl;
3339
3340	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3341
3342	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3343	ctrl |= E1000_CTRL_VME;
3344	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3345
3346	return;
3347}
3348
3349static void
3350em_disable_vlans(struct adapter *adapter)
3351{
3352	uint32_t ctrl;
3353
3354	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3355	ctrl &= ~E1000_CTRL_VME;
3356	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3357
3358	return;
3359}
3360
3361static void
3362em_enable_intr(struct adapter * adapter)
3363{
3364	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3365	return;
3366}
3367
3368static void
3369em_disable_intr(struct adapter *adapter)
3370{
3371	/*
3372	 * The first version of 82542 had an errata where when link was forced it
3373	 * would stay up even up even if the cable was disconnected.  Sequence errors
3374	 * were used to detect the disconnect and then the driver would unforce the link.
3375	 * This code in the in the ISR.  For this to work correctly the Sequence error
3376	 * interrupt had to be enabled all the time.
3377	 */
3378
3379	if (adapter->hw.mac_type == em_82542_rev2_0)
3380	    E1000_WRITE_REG(&adapter->hw, IMC,
3381	        (0xffffffff & ~E1000_IMC_RXSEQ));
3382	else
3383	    E1000_WRITE_REG(&adapter->hw, IMC,
3384	        0xffffffff);
3385	return;
3386}
3387
3388static int
3389em_is_valid_ether_addr(u_int8_t *addr)
3390{
3391        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3392
3393        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3394                return (FALSE);
3395        }
3396
3397        return(TRUE);
3398}
3399
3400void
3401em_write_pci_cfg(struct em_hw *hw,
3402		      uint32_t reg,
3403		      uint16_t *value)
3404{
3405	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3406			 *value, 2);
3407}
3408
3409void
3410em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3411		     uint16_t *value)
3412{
3413	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3414				 reg, 2);
3415	return;
3416}
3417
3418void
3419em_pci_set_mwi(struct em_hw *hw)
3420{
3421        pci_write_config(((struct em_osdep *)hw->back)->dev,
3422                         PCIR_COMMAND,
3423                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3424        return;
3425}
3426
3427void
3428em_pci_clear_mwi(struct em_hw *hw)
3429{
3430        pci_write_config(((struct em_osdep *)hw->back)->dev,
3431                         PCIR_COMMAND,
3432                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3433        return;
3434}
3435
3436/*********************************************************************
3437* 82544 Coexistence issue workaround.
3438*    There are 2 issues.
3439*       1. Transmit Hang issue.
3440*    To detect this issue, following equation can be used...
3441*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3442*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3443*
3444*       2. DAC issue.
3445*    To detect this issue, following equation can be used...
3446*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3447*          If SUM[3:0] is in between 9 to c, we will have this issue.
3448*
3449*
3450*    WORKAROUND:
3451*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3452*
3453*** *********************************************************************/
3454static u_int32_t
3455em_fill_descriptors (bus_addr_t address,
3456                              u_int32_t length,
3457                              PDESC_ARRAY desc_array)
3458{
3459        /* Since issue is sensitive to length and address.*/
3460        /* Let us first check the address...*/
3461        u_int32_t safe_terminator;
3462        if (length <= 4) {
3463                desc_array->descriptor[0].address = address;
3464                desc_array->descriptor[0].length = length;
3465                desc_array->elements = 1;
3466                return desc_array->elements;
3467        }
3468        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3469        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3470        if (safe_terminator == 0   ||
3471        (safe_terminator > 4   &&
3472        safe_terminator < 9)   ||
3473        (safe_terminator > 0xC &&
3474        safe_terminator <= 0xF)) {
3475                desc_array->descriptor[0].address = address;
3476                desc_array->descriptor[0].length = length;
3477                desc_array->elements = 1;
3478                return desc_array->elements;
3479        }
3480
3481        desc_array->descriptor[0].address = address;
3482        desc_array->descriptor[0].length = length - 4;
3483        desc_array->descriptor[1].address = address + (length - 4);
3484        desc_array->descriptor[1].length = 4;
3485        desc_array->elements = 2;
3486        return desc_array->elements;
3487}
3488
3489/**********************************************************************
3490 *
3491 *  Update the board statistics counters.
3492 *
3493 **********************************************************************/
3494static void
3495em_update_stats_counters(struct adapter *adapter)
3496{
3497	struct ifnet   *ifp;
3498
3499	if(adapter->hw.media_type == em_media_type_copper ||
3500	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3501		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3502		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3503	}
3504	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3505	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3506	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3507	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3508
3509	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3510	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3511	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3512	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3513	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3514	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3515	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3516	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3517	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3518	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3519	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3520	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3521	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3522	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3523	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3524	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3525	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3526	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3527	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3528	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3529
3530	/* For the 64-bit byte counters the low dword must be read first. */
3531	/* Both registers clear on the read of the high dword */
3532
3533	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3534	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3535	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3536	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3537
3538	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3539	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3540	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3541	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3542	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3543
3544	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3545	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3546	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3547	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3548
3549	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3550	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3551	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3552	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3553	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3554	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3555	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3556	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3557	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3558	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3559
3560	if (adapter->hw.mac_type >= em_82543) {
3561		adapter->stats.algnerrc +=
3562		E1000_READ_REG(&adapter->hw, ALGNERRC);
3563		adapter->stats.rxerrc +=
3564		E1000_READ_REG(&adapter->hw, RXERRC);
3565		adapter->stats.tncrs +=
3566		E1000_READ_REG(&adapter->hw, TNCRS);
3567		adapter->stats.cexterr +=
3568		E1000_READ_REG(&adapter->hw, CEXTERR);
3569		adapter->stats.tsctc +=
3570		E1000_READ_REG(&adapter->hw, TSCTC);
3571		adapter->stats.tsctfc +=
3572		E1000_READ_REG(&adapter->hw, TSCTFC);
3573	}
3574	ifp = adapter->ifp;
3575
3576	ifp->if_collisions = adapter->stats.colc;
3577
3578	/* Rx Errors */
3579	ifp->if_ierrors =
3580	adapter->dropped_pkts +
3581	adapter->stats.rxerrc +
3582	adapter->stats.crcerrs +
3583	adapter->stats.algnerrc +
3584	adapter->stats.rlec +
3585	adapter->stats.mpc + adapter->stats.cexterr;
3586
3587	/* Tx Errors */
3588	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3589	    adapter->watchdog_events;
3590
3591}
3592
3593
3594/**********************************************************************
3595 *
3596 *  This routine is called only when em_display_debug_stats is enabled.
3597 *  This routine provides a way to take a look at important statistics
3598 *  maintained by the driver and hardware.
3599 *
3600 **********************************************************************/
3601static void
3602em_print_debug_info(struct adapter *adapter)
3603{
3604	int unit = adapter->unit;
3605	uint8_t *hw_addr = adapter->hw.hw_addr;
3606
3607	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3608	printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit,
3609	    E1000_READ_REG(&adapter->hw, CTRL),
3610	    E1000_READ_REG(&adapter->hw, RCTL));
3611	printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit,
3612	    ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3613	    (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3614	printf("em%d: Flow control watermarks high = %d low = %d\n", unit,
3615	    adapter->hw.fc_high_water,
3616	    adapter->hw.fc_low_water);
3617	printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3618	    E1000_READ_REG(&adapter->hw, TIDV),
3619	    E1000_READ_REG(&adapter->hw, TADV));
3620	printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3621	    E1000_READ_REG(&adapter->hw, RDTR),
3622	    E1000_READ_REG(&adapter->hw, RADV));
3623	printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3624	    unit, (long long)adapter->tx_fifo_wrk_cnt,
3625	    (long long)adapter->tx_fifo_reset_cnt);
3626	printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3627	    E1000_READ_REG(&adapter->hw, TDH),
3628	    E1000_READ_REG(&adapter->hw, TDT));
3629	printf("em%d: Num Tx descriptors avail = %d\n", unit,
3630	    adapter->num_tx_desc_avail);
3631	printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3632	    adapter->no_tx_desc_avail1);
3633	printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3634	    adapter->no_tx_desc_avail2);
3635	printf("em%d: Std mbuf failed = %ld\n", unit,
3636	    adapter->mbuf_alloc_failed);
3637	printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3638	    adapter->mbuf_cluster_failed);
3639	printf("em%d: Driver dropped packets = %ld\n", unit,
3640	    adapter->dropped_pkts);
3641
3642	return;
3643}
3644
3645static void
3646em_print_hw_stats(struct adapter *adapter)
3647{
3648        int unit = adapter->unit;
3649
3650        printf("em%d: Excessive collisions = %lld\n", unit,
3651               (long long)adapter->stats.ecol);
3652        printf("em%d: Symbol errors = %lld\n", unit,
3653               (long long)adapter->stats.symerrs);
3654        printf("em%d: Sequence errors = %lld\n", unit,
3655               (long long)adapter->stats.sec);
3656        printf("em%d: Defer count = %lld\n", unit,
3657               (long long)adapter->stats.dc);
3658
3659        printf("em%d: Missed Packets = %lld\n", unit,
3660               (long long)adapter->stats.mpc);
3661        printf("em%d: Receive No Buffers = %lld\n", unit,
3662               (long long)adapter->stats.rnbc);
3663        printf("em%d: Receive length errors = %lld\n", unit,
3664               (long long)adapter->stats.rlec);
3665        printf("em%d: Receive errors = %lld\n", unit,
3666               (long long)adapter->stats.rxerrc);
3667        printf("em%d: Crc errors = %lld\n", unit,
3668               (long long)adapter->stats.crcerrs);
3669        printf("em%d: Alignment errors = %lld\n", unit,
3670               (long long)adapter->stats.algnerrc);
3671        printf("em%d: Carrier extension errors = %lld\n", unit,
3672               (long long)adapter->stats.cexterr);
3673	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3674	printf("em%d: watchdog timeouts = %ld\n", unit,
3675		adapter->watchdog_events);
3676
3677        printf("em%d: XON Rcvd = %lld\n", unit,
3678               (long long)adapter->stats.xonrxc);
3679        printf("em%d: XON Xmtd = %lld\n", unit,
3680               (long long)adapter->stats.xontxc);
3681        printf("em%d: XOFF Rcvd = %lld\n", unit,
3682               (long long)adapter->stats.xoffrxc);
3683        printf("em%d: XOFF Xmtd = %lld\n", unit,
3684               (long long)adapter->stats.xofftxc);
3685
3686        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3687               (long long)adapter->stats.gprc);
3688        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3689               (long long)adapter->stats.gptc);
3690
3691        return;
3692}
3693
3694static int
3695em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3696{
3697        int error;
3698        int result;
3699        struct adapter *adapter;
3700
3701        result = -1;
3702        error = sysctl_handle_int(oidp, &result, 0, req);
3703
3704        if (error || !req->newptr)
3705                return (error);
3706
3707        if (result == 1) {
3708                adapter = (struct adapter *)arg1;
3709                em_print_debug_info(adapter);
3710        }
3711
3712        return error;
3713}
3714
3715
3716static int
3717em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3718{
3719        int error;
3720        int result;
3721        struct adapter *adapter;
3722
3723        result = -1;
3724        error = sysctl_handle_int(oidp, &result, 0, req);
3725
3726        if (error || !req->newptr)
3727                return (error);
3728
3729        if (result == 1) {
3730                adapter = (struct adapter *)arg1;
3731                em_print_hw_stats(adapter);
3732        }
3733
3734        return error;
3735}
3736
3737static int
3738em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3739{
3740	struct em_int_delay_info *info;
3741	struct adapter *adapter;
3742	u_int32_t regval;
3743	int error;
3744	int usecs;
3745	int ticks;
3746
3747	info = (struct em_int_delay_info *)arg1;
3748	usecs = info->value;
3749	error = sysctl_handle_int(oidp, &usecs, 0, req);
3750	if (error != 0 || req->newptr == NULL)
3751		return error;
3752	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3753		return EINVAL;
3754	info->value = usecs;
3755	ticks = E1000_USECS_TO_TICKS(usecs);
3756
3757	adapter = info->adapter;
3758
3759	EM_LOCK(adapter);
3760	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3761	regval = (regval & ~0xffff) | (ticks & 0xffff);
3762	/* Handle a few special cases. */
3763	switch (info->offset) {
3764	case E1000_RDTR:
3765	case E1000_82542_RDTR:
3766		regval |= E1000_RDT_FPDB;
3767		break;
3768	case E1000_TIDV:
3769	case E1000_82542_TIDV:
3770		if (ticks == 0) {
3771			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3772			/* Don't write 0 into the TIDV register. */
3773			regval++;
3774		} else
3775			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3776		break;
3777	}
3778	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3779	EM_UNLOCK(adapter);
3780	return 0;
3781}
3782
3783static void
3784em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3785    const char *description, struct em_int_delay_info *info,
3786    int offset, int value)
3787{
3788	info->adapter = adapter;
3789	info->offset = offset;
3790	info->value = value;
3791	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3792	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3793	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3794	    info, 0, em_sysctl_int_delay, "I", description);
3795}
3796
3797#ifndef NO_EM_FASTINTR
3798static void
3799em_add_int_process_limit(struct adapter *adapter, const char *name,
3800    const char *description, int *limit, int value)
3801{
3802	*limit = value;
3803	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3804	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3805	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3806}
3807#endif
3808