if_em.c revision 155426
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 155426 2006-02-07 13:11:13Z glebius $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "Version - 3.2.18";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
111	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
112	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
113
114	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
115	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
116	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
117
118        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
119        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
120        { 0x8086, E1000_DEV_ID_82573L,              PCI_ANY_ID, PCI_ANY_ID, 0},
121
122        /* required last entry */
123        { 0, 0, 0, 0, 0}
124};
125
126/*********************************************************************
127 *  Table of branding strings for all supported NICs.
128 *********************************************************************/
129
130static char *em_strings[] = {
131	"Intel(R) PRO/1000 Network Connection"
132};
133
134/*********************************************************************
135 *  Function prototypes
136 *********************************************************************/
137static int  em_probe(device_t);
138static int  em_attach(device_t);
139static int  em_detach(device_t);
140static int  em_shutdown(device_t);
141static int  em_suspend(device_t);
142static int  em_resume(device_t);
143static void em_intr(void *);
144#ifndef NO_EM_FASTINTR
145static void em_intr_fast(void *);
146#endif
147static void em_start(struct ifnet *);
148static void em_start_locked(struct ifnet *ifp);
149static int  em_ioctl(struct ifnet *, u_long, caddr_t);
150static void em_watchdog(struct ifnet *);
151static void em_init(void *);
152static void em_init_locked(struct adapter *);
153static void em_stop(void *);
154static void em_media_status(struct ifnet *, struct ifmediareq *);
155static int  em_media_change(struct ifnet *);
156static void em_identify_hardware(struct adapter *);
157static int  em_allocate_pci_resources(struct adapter *);
158static int  em_allocate_intr(struct adapter *);
159static void em_free_intr(struct adapter *);
160static void em_free_pci_resources(struct adapter *);
161static void em_local_timer(void *);
162static int  em_hardware_init(struct adapter *);
163static void em_setup_interface(device_t, struct adapter *);
164static int  em_setup_transmit_structures(struct adapter *);
165static void em_initialize_transmit_unit(struct adapter *);
166static int  em_setup_receive_structures(struct adapter *);
167static void em_initialize_receive_unit(struct adapter *);
168static void em_enable_intr(struct adapter *);
169static void em_disable_intr(struct adapter *);
170static void em_free_transmit_structures(struct adapter *);
171static void em_free_receive_structures(struct adapter *);
172static void em_update_stats_counters(struct adapter *);
173static void em_clean_transmit_interrupts(struct adapter *);
174static int  em_allocate_receive_structures(struct adapter *);
175static int  em_allocate_transmit_structures(struct adapter *);
176static int em_process_receive_interrupts(struct adapter *, int);
177#ifndef __NO_STRICT_ALIGNMENT
178static int  em_fixup_rx(struct adapter *);
179#endif
180static void em_receive_checksum(struct adapter *,
181				struct em_rx_desc *,
182				struct mbuf *);
183static void em_transmit_checksum_setup(struct adapter *,
184				       struct mbuf *,
185				       u_int32_t *,
186				       u_int32_t *);
187static void em_set_promisc(struct adapter *);
188static void em_disable_promisc(struct adapter *);
189static void em_set_multi(struct adapter *);
190static void em_print_hw_stats(struct adapter *);
191static void em_print_link_status(struct adapter *);
192static int  em_get_buf(int i, struct adapter *,
193		       struct mbuf *);
194static void em_enable_vlans(struct adapter *);
195static void em_disable_vlans(struct adapter *);
196static int  em_encap(struct adapter *, struct mbuf **);
197static void em_smartspeed(struct adapter *);
198static int  em_82547_fifo_workaround(struct adapter *, int);
199static void em_82547_update_fifo_head(struct adapter *, int);
200static int  em_82547_tx_fifo_reset(struct adapter *);
201static void em_82547_move_tail(void *arg);
202static void em_82547_move_tail_locked(struct adapter *);
203static int  em_dma_malloc(struct adapter *, bus_size_t,
204			  struct em_dma_alloc *, int);
205static void em_dma_free(struct adapter *, struct em_dma_alloc *);
206static void em_print_debug_info(struct adapter *);
207static int  em_is_valid_ether_addr(u_int8_t *);
208static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
209static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
210static u_int32_t em_fill_descriptors (bus_addr_t address,
211				      u_int32_t length,
212				      PDESC_ARRAY desc_array);
213static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
214static void em_add_int_delay_sysctl(struct adapter *, const char *,
215				    const char *, struct em_int_delay_info *,
216				    int, int);
217#ifndef NO_EM_FASTINTR
218static void em_add_int_process_limit(struct adapter *, const char *,
219				     const char *, int *, int);
220static void em_handle_rxtx(void *context, int pending);
221static void em_handle_link(void *context, int pending);
222#endif
223#ifdef DEVICE_POLLING
224static poll_handler_t em_poll;
225#endif
226
227/*********************************************************************
228 *  FreeBSD Device Interface Entry Points
229 *********************************************************************/
230
231static device_method_t em_methods[] = {
232	/* Device interface */
233	DEVMETHOD(device_probe, em_probe),
234	DEVMETHOD(device_attach, em_attach),
235	DEVMETHOD(device_detach, em_detach),
236	DEVMETHOD(device_shutdown, em_shutdown),
237	DEVMETHOD(device_suspend, em_suspend),
238	DEVMETHOD(device_resume, em_resume),
239	{0, 0}
240};
241
242static driver_t em_driver = {
243	"em", em_methods, sizeof(struct adapter ),
244};
245
246static devclass_t em_devclass;
247DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
248MODULE_DEPEND(em, pci, 1, 1, 1);
249MODULE_DEPEND(em, ether, 1, 1, 1);
250
251/*********************************************************************
252 *  Tunable default values.
253 *********************************************************************/
254
255#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
256#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
257
258static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
259static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
260static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
261static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
262static int em_rxd = EM_DEFAULT_RXD;
263static int em_txd = EM_DEFAULT_TXD;
264
265TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
266TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
267TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
268TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
269TUNABLE_INT("hw.em.rxd", &em_rxd);
270TUNABLE_INT("hw.em.txd", &em_txd);
271#ifndef NO_EM_FASTINTR
272static int em_rx_process_limit = 100;
273TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
274#endif
275
276/*********************************************************************
277 *  Device identification routine
278 *
279 *  em_probe determines if the driver should be loaded on
280 *  adapter based on PCI vendor/device id of the adapter.
281 *
282 *  return BUS_PROBE_DEFAULT on success, positive on failure
283 *********************************************************************/
284
285static int
286em_probe(device_t dev)
287{
288	em_vendor_info_t *ent;
289
290	u_int16_t       pci_vendor_id = 0;
291	u_int16_t       pci_device_id = 0;
292	u_int16_t       pci_subvendor_id = 0;
293	u_int16_t       pci_subdevice_id = 0;
294	char            adapter_name[60];
295
296	INIT_DEBUGOUT("em_probe: begin");
297
298	pci_vendor_id = pci_get_vendor(dev);
299	if (pci_vendor_id != EM_VENDOR_ID)
300		return(ENXIO);
301
302	pci_device_id = pci_get_device(dev);
303	pci_subvendor_id = pci_get_subvendor(dev);
304	pci_subdevice_id = pci_get_subdevice(dev);
305
306	ent = em_vendor_info_array;
307	while (ent->vendor_id != 0) {
308		if ((pci_vendor_id == ent->vendor_id) &&
309		    (pci_device_id == ent->device_id) &&
310
311		    ((pci_subvendor_id == ent->subvendor_id) ||
312		     (ent->subvendor_id == PCI_ANY_ID)) &&
313
314		    ((pci_subdevice_id == ent->subdevice_id) ||
315		     (ent->subdevice_id == PCI_ANY_ID))) {
316			sprintf(adapter_name, "%s %s",
317				em_strings[ent->index],
318				em_driver_version);
319			device_set_desc_copy(dev, adapter_name);
320			return(BUS_PROBE_DEFAULT);
321		}
322		ent++;
323	}
324
325	return(ENXIO);
326}
327
328/*********************************************************************
329 *  Device initialization routine
330 *
331 *  The attach entry point is called when the driver is being loaded.
332 *  This routine identifies the type of hardware, allocates all resources
333 *  and initializes the hardware.
334 *
335 *  return 0 on success, positive on failure
336 *********************************************************************/
337
338static int
339em_attach(device_t dev)
340{
341	struct adapter * adapter;
342	int             tsize, rsize;
343	int		error = 0;
344
345	INIT_DEBUGOUT("em_attach: begin");
346
347	/* Allocate, clear, and link in our adapter structure */
348	if (!(adapter = device_get_softc(dev))) {
349		printf("em: adapter structure allocation failed\n");
350		return(ENOMEM);
351	}
352	bzero(adapter, sizeof(struct adapter ));
353	adapter->dev = dev;
354	adapter->osdep.dev = dev;
355	adapter->unit = device_get_unit(dev);
356	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
357
358	/* SYSCTL stuff */
359        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
360                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
361                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
362                        (void *)adapter, 0,
363                        em_sysctl_debug_info, "I", "Debug Information");
364
365        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
366                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
367                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
368                        (void *)adapter, 0,
369                        em_sysctl_stats, "I", "Statistics");
370
371	callout_init(&adapter->timer, CALLOUT_MPSAFE);
372	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
373
374	/* Determine hardware revision */
375	em_identify_hardware(adapter);
376
377	/* Set up some sysctls for the tunable interrupt delays */
378	em_add_int_delay_sysctl(adapter, "rx_int_delay",
379	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
380	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
381	em_add_int_delay_sysctl(adapter, "tx_int_delay",
382	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
383	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
384	if (adapter->hw.mac_type >= em_82540) {
385		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
386		    "receive interrupt delay limit in usecs",
387		    &adapter->rx_abs_int_delay,
388		    E1000_REG_OFFSET(&adapter->hw, RADV),
389		    em_rx_abs_int_delay_dflt);
390		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
391		    "transmit interrupt delay limit in usecs",
392		    &adapter->tx_abs_int_delay,
393		    E1000_REG_OFFSET(&adapter->hw, TADV),
394		    em_tx_abs_int_delay_dflt);
395	}
396
397	/* Sysctls for limiting the amount of work done in the taskqueue */
398#ifndef NO_EM_FASTINTR
399	em_add_int_process_limit(adapter, "rx_processing_limit",
400	    "max number of rx packets to process", &adapter->rx_process_limit,
401	    em_rx_process_limit);
402#endif
403
404	/*
405	 * Validate number of transmit and receive descriptors. It
406	 * must not exceed hardware maximum, and must be multiple
407	 * of E1000_DBA_ALIGN.
408	 */
409	if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
410	    (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
411	    (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
412	    (em_txd < EM_MIN_TXD)) {
413		printf("em%d: Using %d TX descriptors instead of %d!\n",
414		    adapter->unit, EM_DEFAULT_TXD, em_txd);
415		adapter->num_tx_desc = EM_DEFAULT_TXD;
416	} else
417		adapter->num_tx_desc = em_txd;
418	if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
419	    (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
420	    (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
421	    (em_rxd < EM_MIN_RXD)) {
422		printf("em%d: Using %d RX descriptors instead of %d!\n",
423		    adapter->unit, EM_DEFAULT_RXD, em_rxd);
424		adapter->num_rx_desc = EM_DEFAULT_RXD;
425	} else
426		adapter->num_rx_desc = em_rxd;
427
428        adapter->hw.autoneg = DO_AUTO_NEG;
429        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
430        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
431        adapter->hw.tbi_compatibility_en = TRUE;
432        adapter->rx_buffer_len = EM_RXBUFFER_2048;
433
434	adapter->hw.phy_init_script = 1;
435	adapter->hw.phy_reset_disable = FALSE;
436
437#ifndef EM_MASTER_SLAVE
438	adapter->hw.master_slave = em_ms_hw_default;
439#else
440	adapter->hw.master_slave = EM_MASTER_SLAVE;
441#endif
442	/*
443	 * Set the max frame size assuming standard ethernet
444	 * sized frames.
445	 */
446	adapter->hw.max_frame_size =
447		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
448
449	adapter->hw.min_frame_size =
450		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
451
452	/*
453	 * This controls when hardware reports transmit completion
454	 * status.
455	 */
456	adapter->hw.report_tx_early = 1;
457
458	if (em_allocate_pci_resources(adapter)) {
459		printf("em%d: Allocation of PCI resources failed\n",
460		       adapter->unit);
461                error = ENXIO;
462                goto err_pci;
463	}
464
465
466	/* Initialize eeprom parameters */
467        em_init_eeprom_params(&adapter->hw);
468
469	tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
470	    E1000_DBA_ALIGN);
471
472	/* Allocate Transmit Descriptor ring */
473        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
474                printf("em%d: Unable to allocate tx_desc memory\n",
475                       adapter->unit);
476		error = ENOMEM;
477                goto err_tx_desc;
478        }
479        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
480
481	rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
482	    E1000_DBA_ALIGN);
483
484	/* Allocate Receive Descriptor ring */
485        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
486                printf("em%d: Unable to allocate rx_desc memory\n",
487                        adapter->unit);
488		error = ENOMEM;
489                goto err_rx_desc;
490        }
491        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
492
493	/* Initialize the hardware */
494	if (em_hardware_init(adapter)) {
495		printf("em%d: Unable to initialize the hardware\n",
496		       adapter->unit);
497		error = EIO;
498                goto err_hw_init;
499	}
500
501	/* Copy the permanent MAC address out of the EEPROM */
502	if (em_read_mac_addr(&adapter->hw) < 0) {
503		printf("em%d: EEPROM read error while reading mac address\n",
504		       adapter->unit);
505		error = EIO;
506                goto err_mac_addr;
507	}
508
509	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
510                printf("em%d: Invalid mac address\n", adapter->unit);
511                error = EIO;
512                goto err_mac_addr;
513        }
514
515	/* Setup OS specific network interface */
516	em_setup_interface(dev, adapter);
517
518	em_allocate_intr(adapter);
519
520	/* Initialize statistics */
521	em_clear_hw_cntrs(&adapter->hw);
522	em_update_stats_counters(adapter);
523	adapter->hw.get_link_status = 1;
524	em_check_for_link(&adapter->hw);
525
526	if (bootverbose) {
527		/* Print the link status */
528		if (adapter->link_active == 1) {
529			em_get_speed_and_duplex(&adapter->hw,
530			    &adapter->link_speed, &adapter->link_duplex);
531			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
532			       adapter->unit,
533			       adapter->link_speed,
534			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
535				"Half");
536		} else
537			printf("em%d:  Speed:N/A  Duplex:N/A\n",
538			    adapter->unit);
539	}
540
541	/* Identify 82544 on PCIX */
542        em_get_bus_info(&adapter->hw);
543        if(adapter->hw.bus_type == em_bus_type_pcix &&
544           adapter->hw.mac_type == em_82544) {
545                adapter->pcix_82544 = TRUE;
546        }
547        else {
548                adapter->pcix_82544 = FALSE;
549        }
550	INIT_DEBUGOUT("em_attach: end");
551	return(0);
552
553err_mac_addr:
554err_hw_init:
555        em_dma_free(adapter, &adapter->rxdma);
556err_rx_desc:
557        em_dma_free(adapter, &adapter->txdma);
558err_tx_desc:
559err_pci:
560	em_free_intr(adapter);
561        em_free_pci_resources(adapter);
562	EM_LOCK_DESTROY(adapter);
563        return(error);
564
565}
566
567/*********************************************************************
568 *  Device removal routine
569 *
570 *  The detach entry point is called when the driver is being removed.
571 *  This routine stops the adapter and deallocates all the resources
572 *  that were allocated for driver operation.
573 *
574 *  return 0 on success, positive on failure
575 *********************************************************************/
576
577static int
578em_detach(device_t dev)
579{
580	struct adapter * adapter = device_get_softc(dev);
581	struct ifnet   *ifp = adapter->ifp;
582
583	INIT_DEBUGOUT("em_detach: begin");
584
585#ifdef DEVICE_POLLING
586	if (ifp->if_capenable & IFCAP_POLLING)
587		ether_poll_deregister(ifp);
588#endif
589
590	em_free_intr(adapter);
591	EM_LOCK(adapter);
592	adapter->in_detach = 1;
593	em_stop(adapter);
594	em_phy_hw_reset(&adapter->hw);
595	EM_UNLOCK(adapter);
596        ether_ifdetach(adapter->ifp);
597
598	em_free_pci_resources(adapter);
599	bus_generic_detach(dev);
600	if_free(ifp);
601
602	/* Free Transmit Descriptor ring */
603        if (adapter->tx_desc_base) {
604                em_dma_free(adapter, &adapter->txdma);
605                adapter->tx_desc_base = NULL;
606        }
607
608        /* Free Receive Descriptor ring */
609        if (adapter->rx_desc_base) {
610                em_dma_free(adapter, &adapter->rxdma);
611                adapter->rx_desc_base = NULL;
612        }
613
614	EM_LOCK_DESTROY(adapter);
615
616	return(0);
617}
618
619/*********************************************************************
620 *
621 *  Shutdown entry point
622 *
623 **********************************************************************/
624
625static int
626em_shutdown(device_t dev)
627{
628	struct adapter *adapter = device_get_softc(dev);
629	EM_LOCK(adapter);
630	em_stop(adapter);
631	EM_UNLOCK(adapter);
632	return(0);
633}
634
635/*
636 * Suspend/resume device methods.
637 */
638static int
639em_suspend(device_t dev)
640{
641	struct adapter *adapter = device_get_softc(dev);
642
643	EM_LOCK(adapter);
644	em_stop(adapter);
645	EM_UNLOCK(adapter);
646
647	return bus_generic_suspend(dev);
648}
649
650static int
651em_resume(device_t dev)
652{
653	struct adapter *adapter = device_get_softc(dev);
654	struct ifnet *ifp = adapter->ifp;
655
656	EM_LOCK(adapter);
657	em_init_locked(adapter);
658	if ((ifp->if_flags & IFF_UP) &&
659	    (ifp->if_drv_flags & IFF_DRV_RUNNING))
660		em_start_locked(ifp);
661	EM_UNLOCK(adapter);
662
663	return bus_generic_resume(dev);
664}
665
666
667/*********************************************************************
668 *  Transmit entry point
669 *
670 *  em_start is called by the stack to initiate a transmit.
671 *  The driver will remain in this routine as long as there are
672 *  packets to transmit and transmit resources are available.
673 *  In case resources are not available stack is notified and
674 *  the packet is requeued.
675 **********************************************************************/
676
677static void
678em_start_locked(struct ifnet *ifp)
679{
680	struct adapter	*adapter = ifp->if_softc;
681	struct mbuf	*m_head;
682
683	mtx_assert(&adapter->mtx, MA_OWNED);
684
685	if (!adapter->link_active)
686		return;
687
688	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
689
690		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
691		if (m_head == NULL)
692			break;
693		/*
694		 * em_encap() can modify our pointer, and or make it NULL on
695		 * failure.  In that event, we can't requeue.
696		 */
697		if (em_encap(adapter, &m_head)) {
698			if (m_head == NULL)
699				break;
700			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
701			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
702			break;
703		}
704
705		/* Send a copy of the frame to the BPF listener */
706		BPF_MTAP(ifp, m_head);
707
708		/* Set timeout in case hardware has problems transmitting. */
709		ifp->if_timer = EM_TX_TIMEOUT;
710	}
711}
712
713static void
714em_start(struct ifnet *ifp)
715{
716	struct adapter *adapter = ifp->if_softc;
717
718	EM_LOCK(adapter);
719	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
720		em_start_locked(ifp);
721	EM_UNLOCK(adapter);
722}
723
724/*********************************************************************
725 *  Ioctl entry point
726 *
727 *  em_ioctl is called when the user wants to configure the
728 *  interface.
729 *
730 *  return 0 on success, positive on failure
731 **********************************************************************/
732
733static int
734em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
735{
736	struct adapter	*adapter = ifp->if_softc;
737	struct ifreq	*ifr = (struct ifreq *)data;
738	int error = 0;
739
740	if (adapter->in_detach)
741		return(error);
742
743	switch (command) {
744	case SIOCSIFADDR:
745	case SIOCGIFADDR:
746		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
747		ether_ioctl(ifp, command, data);
748		break;
749	case SIOCSIFMTU:
750	    {
751		int max_frame_size;
752
753		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
754
755		switch (adapter->hw.mac_type) {
756		case em_82571:
757		case em_82572:
758			max_frame_size = 10500;
759			break;
760		case em_82573:
761			/* 82573 does not support jumbo frames. */
762			max_frame_size = ETHER_MAX_LEN;
763			break;
764		default:
765			max_frame_size = MAX_JUMBO_FRAME_SIZE;
766		}
767		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
768		    ETHER_CRC_LEN) {
769			error = EINVAL;
770			break;
771		}
772
773		EM_LOCK(adapter);
774		ifp->if_mtu = ifr->ifr_mtu;
775		adapter->hw.max_frame_size =
776		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
777		em_init_locked(adapter);
778		EM_UNLOCK(adapter);
779		break;
780	    }
781	case SIOCSIFFLAGS:
782		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
783		EM_LOCK(adapter);
784		if (ifp->if_flags & IFF_UP) {
785			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
786				em_init_locked(adapter);
787			}
788
789			em_disable_promisc(adapter);
790			em_set_promisc(adapter);
791		} else {
792			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
793				em_stop(adapter);
794			}
795		}
796		EM_UNLOCK(adapter);
797		break;
798	case SIOCADDMULTI:
799	case SIOCDELMULTI:
800		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
801		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
802			EM_LOCK(adapter);
803			em_disable_intr(adapter);
804			em_set_multi(adapter);
805			if (adapter->hw.mac_type == em_82542_rev2_0) {
806				em_initialize_receive_unit(adapter);
807			}
808#ifdef DEVICE_POLLING
809                        if (!(ifp->if_capenable & IFCAP_POLLING))
810#endif
811				em_enable_intr(adapter);
812			EM_UNLOCK(adapter);
813		}
814		break;
815	case SIOCSIFMEDIA:
816	case SIOCGIFMEDIA:
817		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
818		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
819		break;
820	case SIOCSIFCAP:
821	    {
822		int mask, reinit;
823
824		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
825		reinit = 0;
826		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
827#ifdef DEVICE_POLLING
828		if (mask & IFCAP_POLLING) {
829			if (ifr->ifr_reqcap & IFCAP_POLLING) {
830				error = ether_poll_register(em_poll, ifp);
831				if (error)
832					return(error);
833				EM_LOCK(adapter);
834				em_disable_intr(adapter);
835				ifp->if_capenable |= IFCAP_POLLING;
836				EM_UNLOCK(adapter);
837			} else {
838				error = ether_poll_deregister(ifp);
839				/* Enable interrupt even in error case */
840				EM_LOCK(adapter);
841				em_enable_intr(adapter);
842				ifp->if_capenable &= ~IFCAP_POLLING;
843				EM_UNLOCK(adapter);
844			}
845		}
846#endif
847		if (mask & IFCAP_HWCSUM) {
848			ifp->if_capenable ^= IFCAP_HWCSUM;
849			reinit = 1;
850		}
851		if (mask & IFCAP_VLAN_HWTAGGING) {
852			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
853			reinit = 1;
854		}
855		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
856			em_init(adapter);
857		VLAN_CAPABILITIES(ifp);
858		break;
859	    }
860	default:
861		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
862		error = EINVAL;
863	}
864
865	return(error);
866}
867
868/*********************************************************************
869 *  Watchdog entry point
870 *
871 *  This routine is called whenever hardware quits transmitting.
872 *
873 **********************************************************************/
874
875static void
876em_watchdog(struct ifnet *ifp)
877{
878	struct adapter *adapter = ifp->if_softc;
879
880	EM_LOCK(adapter);
881	/* If we are in this routine because of pause frames, then
882	 * don't reset the hardware.
883	 */
884	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
885		ifp->if_timer = EM_TX_TIMEOUT;
886		EM_UNLOCK(adapter);
887		return;
888	}
889
890	if (em_check_for_link(&adapter->hw))
891		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
892
893	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
894	adapter->watchdog_events++;
895
896	em_init_locked(adapter);
897	EM_UNLOCK(adapter);
898}
899
900/*********************************************************************
901 *  Init entry point
902 *
903 *  This routine is used in two ways. It is used by the stack as
904 *  init entry point in network interface structure. It is also used
905 *  by the driver as a hw/sw initialization routine to get to a
906 *  consistent state.
907 *
908 *  return 0 on success, positive on failure
909 **********************************************************************/
910
911static void
912em_init_locked(struct adapter * adapter)
913{
914	struct ifnet   *ifp;
915
916	uint32_t	pba;
917	ifp = adapter->ifp;
918
919	INIT_DEBUGOUT("em_init: begin");
920
921	mtx_assert(&adapter->mtx, MA_OWNED);
922
923	em_stop(adapter);
924
925	/*
926	 * Packet Buffer Allocation (PBA)
927	 * Writing PBA sets the receive portion of the buffer
928	 * the remainder is used for the transmit buffer.
929	 */
930	switch (adapter->hw.mac_type) {
931	case em_82547:
932	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
933		if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
934			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
935		else
936			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
937		adapter->tx_fifo_head = 0;
938		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
939		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
940		break;
941	case em_82571: /* 82571: Total Packet Buffer is 48K */
942	case em_82572: /* 82572: Total Packet Buffer is 48K */
943			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
944		break;
945	case em_82573: /* 82573: Total Packet Buffer is 32K */
946		/* Jumbo frames not supported */
947			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
948		break;
949	default:
950		/* Devices before 82547 had a Packet Buffer of 64K.   */
951		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
952			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
953		else
954			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
955	}
956
957	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
958	E1000_WRITE_REG(&adapter->hw, PBA, pba);
959
960	/* Get the latest mac address, User can use a LAA */
961        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
962              ETHER_ADDR_LEN);
963
964	/* Initialize the hardware */
965	if (em_hardware_init(adapter)) {
966		printf("em%d: Unable to initialize the hardware\n",
967		       adapter->unit);
968		return;
969	}
970
971	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
972		em_enable_vlans(adapter);
973
974	/* Prepare transmit descriptors and buffers */
975	if (em_setup_transmit_structures(adapter)) {
976		printf("em%d: Could not setup transmit structures\n",
977		       adapter->unit);
978		em_stop(adapter);
979		return;
980	}
981	em_initialize_transmit_unit(adapter);
982
983	/* Setup Multicast table */
984	em_set_multi(adapter);
985
986	/* Prepare receive descriptors and buffers */
987	if (em_setup_receive_structures(adapter)) {
988		printf("em%d: Could not setup receive structures\n",
989		       adapter->unit);
990		em_stop(adapter);
991		return;
992	}
993	em_initialize_receive_unit(adapter);
994
995	/* Don't loose promiscuous settings */
996	em_set_promisc(adapter);
997
998	ifp->if_drv_flags |= IFF_DRV_RUNNING;
999	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1000
1001	if (adapter->hw.mac_type >= em_82543) {
1002		if (ifp->if_capenable & IFCAP_TXCSUM)
1003			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
1004		else
1005			ifp->if_hwassist = 0;
1006	}
1007
1008	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1009	em_clear_hw_cntrs(&adapter->hw);
1010#ifdef DEVICE_POLLING
1011        /*
1012         * Only enable interrupts if we are not polling, make sure
1013         * they are off otherwise.
1014         */
1015        if (ifp->if_capenable & IFCAP_POLLING)
1016                em_disable_intr(adapter);
1017        else
1018#endif /* DEVICE_POLLING */
1019		em_enable_intr(adapter);
1020
1021	/* Don't reset the phy next time init gets called */
1022	adapter->hw.phy_reset_disable = TRUE;
1023
1024	return;
1025}
1026
1027static void
1028em_init(void *arg)
1029{
1030	struct adapter * adapter = arg;
1031
1032	EM_LOCK(adapter);
1033	em_init_locked(adapter);
1034	EM_UNLOCK(adapter);
1035	return;
1036}
1037
1038
1039#ifdef DEVICE_POLLING
1040static void
1041em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1042{
1043	struct adapter *adapter = ifp->if_softc;
1044	uint32_t reg_icr;
1045
1046	mtx_assert(&adapter->mtx, MA_OWNED);
1047
1048	if (cmd == POLL_AND_CHECK_STATUS) {
1049		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1050		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1051			callout_stop(&adapter->timer);
1052			adapter->hw.get_link_status = 1;
1053			em_check_for_link(&adapter->hw);
1054			em_print_link_status(adapter);
1055			callout_reset(&adapter->timer, hz, em_local_timer,
1056			    adapter);
1057		}
1058        }
1059	em_process_receive_interrupts(adapter, count);
1060	em_clean_transmit_interrupts(adapter);
1061
1062	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1063		em_start_locked(ifp);
1064}
1065
1066static void
1067em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1068{
1069	struct adapter *adapter = ifp->if_softc;
1070
1071	EM_LOCK(adapter);
1072	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1073		em_poll_locked(ifp, cmd, count);
1074	EM_UNLOCK(adapter);
1075}
1076#endif /* DEVICE_POLLING */
1077
1078#ifndef NO_EM_FASTINTR
1079static void
1080em_handle_link(void *context, int pending)
1081{
1082	struct adapter	*adapter = context;
1083	struct ifnet *ifp;
1084
1085	ifp = adapter->ifp;
1086
1087	EM_LOCK(adapter);
1088
1089	callout_stop(&adapter->timer);
1090	adapter->hw.get_link_status = 1;
1091	em_check_for_link(&adapter->hw);
1092	em_print_link_status(adapter);
1093	callout_reset(&adapter->timer, hz, em_local_timer,
1094	    adapter);
1095	EM_UNLOCK(adapter);
1096}
1097
1098static void
1099em_handle_rxtx(void *context, int pending)
1100{
1101	struct adapter	*adapter = context;
1102	struct ifnet	*ifp;
1103
1104	NET_LOCK_GIANT();
1105	ifp = adapter->ifp;
1106
1107	/*
1108	 * TODO:
1109	 * It should be possible to run the tx clean loop without the lock.
1110	 */
1111	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1112		if (em_process_receive_interrupts(adapter,
1113		    adapter->rx_process_limit) != 0)
1114			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1115		EM_LOCK(adapter);
1116		em_clean_transmit_interrupts(adapter);
1117
1118		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1119			em_start_locked(ifp);
1120		EM_UNLOCK(adapter);
1121	}
1122
1123	em_enable_intr(adapter);
1124	NET_UNLOCK_GIANT();
1125}
1126#endif
1127
1128/*********************************************************************
1129 *
1130 *  Interrupt Service routine
1131 *
1132 **********************************************************************/
1133#ifndef NO_EM_FASTINTR
1134static void
1135em_intr_fast(void *arg)
1136{
1137	struct adapter	*adapter = arg;
1138	struct ifnet	*ifp;
1139	uint32_t	reg_icr;
1140
1141	ifp = adapter->ifp;
1142
1143#ifdef DEVICE_POLLING
1144	if (ifp->if_capenable & IFCAP_POLLING) {
1145		return;
1146	}
1147#endif /* DEVICE_POLLING */
1148
1149	reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1150
1151	/* Hot eject?  */
1152	if (reg_icr == 0xffffffff)
1153		return;
1154
1155	/* Definitely not our interrupt.  */
1156	if (reg_icr == 0x0)
1157		return;
1158
1159	/*
1160	 * Starting with the 82571 chip, bit 31 should be used to
1161	 * determine whether the interrupt belongs to us.
1162	 */
1163	if (adapter->hw.mac_type >= em_82571 &&
1164	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1165		return;
1166
1167	/*
1168	 * Mask interrupts until the taskqueue is finished running.  This is
1169	 * cheap, just assume that it is needed.  This also works around the
1170	 * MSI message reordering errata on certain systems.
1171	 */
1172	em_disable_intr(adapter);
1173	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1174
1175	/* Link status change */
1176	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1177		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1178
1179	if (reg_icr & E1000_ICR_RXO) {
1180		adapter->rx_overruns++;
1181	}
1182	return;
1183}
1184#endif
1185
1186static void
1187em_intr(void *arg)
1188{
1189	struct adapter	*adapter = arg;
1190	struct ifnet	*ifp;
1191	uint32_t	reg_icr;
1192	int		wantinit = 0;
1193
1194	EM_LOCK(adapter);
1195
1196	ifp = adapter->ifp;
1197
1198#ifdef DEVICE_POLLING
1199	if (ifp->if_capenable & IFCAP_POLLING) {
1200		EM_UNLOCK(adapter);
1201		return;
1202	}
1203#endif
1204
1205	for (;;) {
1206		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1207		if (adapter->hw.mac_type >= em_82571 &&
1208		    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1209			break;
1210		else if (reg_icr == 0)
1211			break;
1212
1213		/*
1214		 * XXX: some laptops trigger several spurious interrupts
1215		 * on em(4) when in the resume cycle. The ICR register
1216		 * reports all-ones value in this case. Processing such
1217		 * interrupts would lead to a freeze. I don't know why.
1218		 */
1219		if (reg_icr == 0xffffffff)
1220			break;
1221
1222		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1223			em_process_receive_interrupts(adapter, -1);
1224			em_clean_transmit_interrupts(adapter);
1225		}
1226
1227		/* Link status change */
1228		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1229			callout_stop(&adapter->timer);
1230			adapter->hw.get_link_status = 1;
1231			em_check_for_link(&adapter->hw);
1232			em_print_link_status(adapter);
1233			callout_reset(&adapter->timer, hz, em_local_timer,
1234			    adapter);
1235		}
1236
1237		if (reg_icr & E1000_ICR_RXO) {
1238			adapter->rx_overruns++;
1239			wantinit = 1;
1240		}
1241	}
1242#if 0
1243	if (wantinit)
1244		em_init_locked(adapter);
1245#endif
1246	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1247	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1248		em_start_locked(ifp);
1249
1250	EM_UNLOCK(adapter);
1251}
1252
1253/*********************************************************************
1254 *
1255 *  Media Ioctl callback
1256 *
1257 *  This routine is called whenever the user queries the status of
1258 *  the interface using ifconfig.
1259 *
1260 **********************************************************************/
1261static void
1262em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1263{
1264	struct adapter * adapter = ifp->if_softc;
1265
1266	INIT_DEBUGOUT("em_media_status: begin");
1267
1268	em_check_for_link(&adapter->hw);
1269	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1270		if (adapter->link_active == 0) {
1271			em_get_speed_and_duplex(&adapter->hw,
1272						&adapter->link_speed,
1273						&adapter->link_duplex);
1274			adapter->link_active = 1;
1275		}
1276	} else {
1277		if (adapter->link_active == 1) {
1278			adapter->link_speed = 0;
1279			adapter->link_duplex = 0;
1280			adapter->link_active = 0;
1281		}
1282	}
1283
1284	ifmr->ifm_status = IFM_AVALID;
1285	ifmr->ifm_active = IFM_ETHER;
1286
1287	if (!adapter->link_active)
1288		return;
1289
1290	ifmr->ifm_status |= IFM_ACTIVE;
1291
1292	if (adapter->hw.media_type == em_media_type_fiber) {
1293		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1294	} else {
1295		switch (adapter->link_speed) {
1296		case 10:
1297			ifmr->ifm_active |= IFM_10_T;
1298			break;
1299		case 100:
1300			ifmr->ifm_active |= IFM_100_TX;
1301			break;
1302		case 1000:
1303			ifmr->ifm_active |= IFM_1000_T;
1304			break;
1305		}
1306		if (adapter->link_duplex == FULL_DUPLEX)
1307			ifmr->ifm_active |= IFM_FDX;
1308		else
1309			ifmr->ifm_active |= IFM_HDX;
1310	}
1311	return;
1312}
1313
1314/*********************************************************************
1315 *
1316 *  Media Ioctl callback
1317 *
1318 *  This routine is called when the user changes speed/duplex using
1319 *  media/mediopt option with ifconfig.
1320 *
1321 **********************************************************************/
1322static int
1323em_media_change(struct ifnet *ifp)
1324{
1325	struct adapter * adapter = ifp->if_softc;
1326	struct ifmedia  *ifm = &adapter->media;
1327
1328	INIT_DEBUGOUT("em_media_change: begin");
1329
1330	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1331		return(EINVAL);
1332
1333	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1334	case IFM_AUTO:
1335		adapter->hw.autoneg = DO_AUTO_NEG;
1336		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1337		break;
1338	case IFM_1000_SX:
1339	case IFM_1000_T:
1340		adapter->hw.autoneg = DO_AUTO_NEG;
1341		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1342		break;
1343	case IFM_100_TX:
1344		adapter->hw.autoneg = FALSE;
1345		adapter->hw.autoneg_advertised = 0;
1346		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1347			adapter->hw.forced_speed_duplex = em_100_full;
1348		else
1349			adapter->hw.forced_speed_duplex	= em_100_half;
1350		break;
1351	case IFM_10_T:
1352		adapter->hw.autoneg = FALSE;
1353		adapter->hw.autoneg_advertised = 0;
1354		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1355			adapter->hw.forced_speed_duplex = em_10_full;
1356		else
1357			adapter->hw.forced_speed_duplex	= em_10_half;
1358		break;
1359	default:
1360		printf("em%d: Unsupported media type\n", adapter->unit);
1361	}
1362
1363	/* As the speed/duplex settings my have changed we need to
1364	 * reset the PHY.
1365	 */
1366	adapter->hw.phy_reset_disable = FALSE;
1367
1368	em_init(adapter);
1369
1370	return(0);
1371}
1372
1373/*********************************************************************
1374 *
1375 *  This routine maps the mbufs to tx descriptors.
1376 *
1377 *  return 0 on success, positive on failure
1378 **********************************************************************/
1379static int
1380em_encap(struct adapter *adapter, struct mbuf **m_headp)
1381{
1382        struct ifnet		*ifp = adapter->ifp;
1383	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1384	bus_dmamap_t		map;
1385        struct em_buffer	*tx_buffer;
1386        struct em_tx_desc	*current_tx_desc;
1387	struct mbuf		*m_head;
1388        struct m_tag		*mtag;
1389	uint32_t		txd_upper, txd_lower, txd_used, txd_saved;
1390	int			nsegs, i, j;
1391	int			error = 0;
1392
1393	m_head = *m_headp;
1394	current_tx_desc = NULL;
1395	txd_used = txd_saved = 0;
1396
1397	/*
1398	 * Force a cleanup if number of TX descriptors
1399	 * available hits the threshold.
1400	 */
1401	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1402		em_clean_transmit_interrupts(adapter);
1403		if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1404			adapter->no_tx_desc_avail1++;
1405			return(ENOBUFS);
1406		}
1407	}
1408
1409	/*
1410	 * Map the packet for DMA.
1411	 */
1412	tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1413	error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1414	    segs, &nsegs, BUS_DMA_NOWAIT);
1415	map = tx_buffer->map;
1416	if (error != 0) {
1417		adapter->no_tx_dma_setup++;
1418		return (error);
1419	}
1420	KASSERT(nsegs != 0, ("em_encap: empty packet"));
1421
1422	if (nsegs > adapter->num_tx_desc_avail) {
1423		adapter->no_tx_desc_avail2++;
1424		error = ENOBUFS;
1425		goto encap_fail;
1426	}
1427
1428	if (ifp->if_hwassist > 0) {
1429		em_transmit_checksum_setup(adapter,  m_head, &txd_upper,
1430		    &txd_lower);
1431	} else
1432		txd_upper = txd_lower = 0;
1433
1434	/* Find out if we are in vlan mode. */
1435	mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1436
1437	/*
1438	 * When operating in promiscuous mode, hardware encapsulation for
1439	 * packets is disabled.  This means we have to add the vlan
1440	 * encapsulation in the driver, since it will have come down from the
1441	 * VLAN layer with a tag instead of a VLAN header.
1442	 */
1443	if (mtag != NULL && adapter->em_insert_vlan_header) {
1444		struct ether_vlan_header *evl;
1445		struct ether_header eh;
1446
1447		m_head = m_pullup(m_head, sizeof(eh));
1448		if (m_head == NULL) {
1449			*m_headp = NULL;
1450			error = ENOBUFS;
1451			goto encap_fail;
1452		}
1453		eh = *mtod(m_head, struct ether_header *);
1454		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1455		if (m_head == NULL) {
1456			*m_headp = NULL;
1457			error = ENOBUFS;
1458			goto encap_fail;
1459		}
1460		m_head = m_pullup(m_head, sizeof(*evl));
1461		if (m_head == NULL) {
1462			*m_headp = NULL;
1463			error = ENOBUFS;
1464			goto encap_fail;
1465		}
1466		evl = mtod(m_head, struct ether_vlan_header *);
1467		bcopy(&eh, evl, sizeof(*evl));
1468		evl->evl_proto = evl->evl_encap_proto;
1469		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1470		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1471		m_tag_delete(m_head, mtag);
1472		mtag = NULL;
1473		*m_headp = m_head;
1474	}
1475
1476	i = adapter->next_avail_tx_desc;
1477	if (adapter->pcix_82544) {
1478		txd_saved = i;
1479		txd_used = 0;
1480	}
1481	for (j = 0; j < nsegs; j++) {
1482		/* If adapter is 82544 and on PCIX bus. */
1483		if(adapter->pcix_82544) {
1484			DESC_ARRAY	desc_array;
1485			uint32_t	array_elements, counter;
1486
1487			/*
1488			 * Check the Address and Length combination and
1489			 * split the data accordingly
1490			 */
1491			array_elements = em_fill_descriptors(segs[j].ds_addr,
1492			    segs[j].ds_len, &desc_array);
1493			for (counter = 0; counter < array_elements; counter++) {
1494				if (txd_used == adapter->num_tx_desc_avail) {
1495					adapter->next_avail_tx_desc = txd_saved;
1496					adapter->no_tx_desc_avail2++;
1497					error = ENOBUFS;
1498					goto encap_fail;
1499				}
1500				tx_buffer = &adapter->tx_buffer_area[i];
1501				current_tx_desc = &adapter->tx_desc_base[i];
1502				current_tx_desc->buffer_addr = htole64(
1503					desc_array.descriptor[counter].address);
1504				current_tx_desc->lower.data = htole32(
1505					(adapter->txd_cmd | txd_lower |
1506					(uint16_t)desc_array.descriptor[counter].length));
1507				current_tx_desc->upper.data = htole32((txd_upper));
1508				if (++i == adapter->num_tx_desc)
1509					i = 0;
1510
1511				tx_buffer->m_head = NULL;
1512				txd_used++;
1513			}
1514		} else {
1515			tx_buffer = &adapter->tx_buffer_area[i];
1516			current_tx_desc = &adapter->tx_desc_base[i];
1517
1518			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1519			current_tx_desc->lower.data = htole32(
1520				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1521			current_tx_desc->upper.data = htole32(txd_upper);
1522
1523			if (++i == adapter->num_tx_desc)
1524				i = 0;
1525
1526			tx_buffer->m_head = NULL;
1527		}
1528	}
1529
1530	adapter->next_avail_tx_desc = i;
1531	if (adapter->pcix_82544)
1532		adapter->num_tx_desc_avail -= txd_used;
1533	else
1534		adapter->num_tx_desc_avail -= nsegs;
1535
1536	if (mtag != NULL) {
1537		/* Set the vlan id. */
1538		current_tx_desc->upper.fields.special =
1539		    htole16(VLAN_TAG_VALUE(mtag));
1540
1541		/* Tell hardware to add tag. */
1542		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1543	}
1544
1545	tx_buffer->m_head = m_head;
1546	bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1547
1548	/*
1549	 * Last Descriptor of Packet needs End Of Packet (EOP).
1550	 */
1551	current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1552
1553	/*
1554	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1555	 * that this frame is available to transmit.
1556	 */
1557	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1558            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1559	if (adapter->hw.mac_type == em_82547 &&
1560	    adapter->link_duplex == HALF_DUPLEX) {
1561		em_82547_move_tail_locked(adapter);
1562	} else {
1563		E1000_WRITE_REG(&adapter->hw, TDT, i);
1564		if (adapter->hw.mac_type == em_82547) {
1565			em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1566		}
1567	}
1568
1569	return(0);
1570
1571encap_fail:
1572	bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1573	return (error);
1574}
1575
1576/*********************************************************************
1577 *
1578 * 82547 workaround to avoid controller hang in half-duplex environment.
1579 * The workaround is to avoid queuing a large packet that would span
1580 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1581 * in this case. We do that only when FIFO is quiescent.
1582 *
1583 **********************************************************************/
1584static void
1585em_82547_move_tail_locked(struct adapter *adapter)
1586{
1587	uint16_t hw_tdt;
1588	uint16_t sw_tdt;
1589	struct em_tx_desc *tx_desc;
1590	uint16_t length = 0;
1591	boolean_t eop = 0;
1592
1593	EM_LOCK_ASSERT(adapter);
1594
1595	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1596	sw_tdt = adapter->next_avail_tx_desc;
1597
1598	while (hw_tdt != sw_tdt) {
1599		tx_desc = &adapter->tx_desc_base[hw_tdt];
1600		length += tx_desc->lower.flags.length;
1601		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1602		if(++hw_tdt == adapter->num_tx_desc)
1603			hw_tdt = 0;
1604
1605		if(eop) {
1606			if (em_82547_fifo_workaround(adapter, length)) {
1607				adapter->tx_fifo_wrk_cnt++;
1608				callout_reset(&adapter->tx_fifo_timer, 1,
1609					em_82547_move_tail, adapter);
1610				break;
1611			}
1612			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1613			em_82547_update_fifo_head(adapter, length);
1614			length = 0;
1615		}
1616	}
1617	return;
1618}
1619
1620static void
1621em_82547_move_tail(void *arg)
1622{
1623	struct adapter *adapter = arg;
1624
1625	EM_LOCK(adapter);
1626	em_82547_move_tail_locked(adapter);
1627	EM_UNLOCK(adapter);
1628}
1629
1630static int
1631em_82547_fifo_workaround(struct adapter *adapter, int len)
1632{
1633	int fifo_space, fifo_pkt_len;
1634
1635	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1636
1637	if (adapter->link_duplex == HALF_DUPLEX) {
1638		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1639
1640		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1641			if (em_82547_tx_fifo_reset(adapter))
1642				return(0);
1643			else
1644				return(1);
1645		}
1646	}
1647
1648	return(0);
1649}
1650
1651static void
1652em_82547_update_fifo_head(struct adapter *adapter, int len)
1653{
1654	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1655
1656	/* tx_fifo_head is always 16 byte aligned */
1657	adapter->tx_fifo_head += fifo_pkt_len;
1658	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1659		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1660	}
1661
1662	return;
1663}
1664
1665
1666static int
1667em_82547_tx_fifo_reset(struct adapter *adapter)
1668{
1669	uint32_t tctl;
1670
1671	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1672	      E1000_READ_REG(&adapter->hw, TDH)) &&
1673	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1674	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1675	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1676	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1677	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1678
1679		/* Disable TX unit */
1680		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1681		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1682
1683		/* Reset FIFO pointers */
1684		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1685		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1686		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1687		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1688
1689		/* Re-enable TX unit */
1690		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1691		E1000_WRITE_FLUSH(&adapter->hw);
1692
1693		adapter->tx_fifo_head = 0;
1694		adapter->tx_fifo_reset_cnt++;
1695
1696		return(TRUE);
1697	}
1698	else {
1699		return(FALSE);
1700	}
1701}
1702
1703static void
1704em_set_promisc(struct adapter * adapter)
1705{
1706	struct ifnet	*ifp = adapter->ifp;
1707	uint32_t	reg_rctl;
1708
1709	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1710
1711	if (ifp->if_flags & IFF_PROMISC) {
1712		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1713		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1714		/* Disable VLAN stripping in promiscous mode
1715		 * This enables bridging of vlan tagged frames to occur
1716		 * and also allows vlan tags to be seen in tcpdump
1717		 */
1718		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1719			em_disable_vlans(adapter);
1720		adapter->em_insert_vlan_header = 1;
1721	} else if (ifp->if_flags & IFF_ALLMULTI) {
1722		reg_rctl |= E1000_RCTL_MPE;
1723		reg_rctl &= ~E1000_RCTL_UPE;
1724		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1725		adapter->em_insert_vlan_header = 0;
1726	} else
1727		adapter->em_insert_vlan_header = 0;
1728}
1729
1730static void
1731em_disable_promisc(struct adapter * adapter)
1732{
1733	struct ifnet	*ifp = adapter->ifp;
1734	uint32_t	reg_rctl;
1735
1736	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1737
1738	reg_rctl &=  (~E1000_RCTL_UPE);
1739	reg_rctl &=  (~E1000_RCTL_MPE);
1740	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1741
1742	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1743		em_enable_vlans(adapter);
1744	adapter->em_insert_vlan_header = 0;
1745}
1746
1747
1748/*********************************************************************
1749 *  Multicast Update
1750 *
1751 *  This routine is called whenever multicast address list is updated.
1752 *
1753 **********************************************************************/
1754
1755static void
1756em_set_multi(struct adapter * adapter)
1757{
1758        struct ifnet	*ifp = adapter->ifp;
1759        struct ifmultiaddr *ifma;
1760        uint32_t reg_rctl = 0;
1761        uint8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1762        int mcnt = 0;
1763
1764	IOCTL_DEBUGOUT("em_set_multi: begin");
1765
1766	if (adapter->hw.mac_type == em_82542_rev2_0) {
1767		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1768		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1769                        em_pci_clear_mwi(&adapter->hw);
1770		reg_rctl |= E1000_RCTL_RST;
1771		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1772		msec_delay(5);
1773	}
1774
1775	IF_ADDR_LOCK(ifp);
1776        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1777		if (ifma->ifma_addr->sa_family != AF_LINK)
1778			continue;
1779
1780		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1781			break;
1782
1783		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1784		    &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1785		mcnt++;
1786	}
1787	IF_ADDR_UNLOCK(ifp);
1788
1789	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1790		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1791		reg_rctl |= E1000_RCTL_MPE;
1792		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1793	} else
1794		em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1795
1796	if (adapter->hw.mac_type == em_82542_rev2_0) {
1797		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1798		reg_rctl &= ~E1000_RCTL_RST;
1799		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1800		msec_delay(5);
1801		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1802			em_pci_set_mwi(&adapter->hw);
1803	}
1804}
1805
1806
1807/*********************************************************************
1808 *  Timer routine
1809 *
1810 *  This routine checks for link status and updates statistics.
1811 *
1812 **********************************************************************/
1813
1814static void
1815em_local_timer(void *arg)
1816{
1817	struct adapter	*adapter = arg;
1818	struct ifnet	*ifp = adapter->ifp;
1819
1820	EM_LOCK(adapter);
1821
1822	em_check_for_link(&adapter->hw);
1823	em_print_link_status(adapter);
1824	em_update_stats_counters(adapter);
1825	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
1826		em_print_hw_stats(adapter);
1827	em_smartspeed(adapter);
1828
1829	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1830
1831	EM_UNLOCK(adapter);
1832}
1833
1834static void
1835em_print_link_status(struct adapter * adapter)
1836{
1837	struct ifnet *ifp = adapter->ifp;
1838
1839	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1840		if (adapter->link_active == 0) {
1841			em_get_speed_and_duplex(&adapter->hw,
1842			    &adapter->link_speed,
1843			    &adapter->link_duplex);
1844			if (bootverbose)
1845				printf("em%d: Link is up %d Mbps %s\n",
1846				    adapter->unit,
1847				    adapter->link_speed,
1848				    ((adapter->link_duplex == FULL_DUPLEX) ?
1849				    "Full Duplex" : "Half Duplex"));
1850			adapter->link_active = 1;
1851			adapter->smartspeed = 0;
1852			if_link_state_change(ifp, LINK_STATE_UP);
1853		}
1854	} else {
1855		if (adapter->link_active == 1) {
1856			adapter->link_speed = 0;
1857			adapter->link_duplex = 0;
1858			if (bootverbose)
1859				printf("em%d: Link is Down\n", adapter->unit);
1860			adapter->link_active = 0;
1861			if_link_state_change(ifp, LINK_STATE_DOWN);
1862		}
1863	}
1864}
1865
1866/*********************************************************************
1867 *
1868 *  This routine disables all traffic on the adapter by issuing a
1869 *  global reset on the MAC and deallocates TX/RX buffers.
1870 *
1871 **********************************************************************/
1872
1873static void
1874em_stop(void *arg)
1875{
1876	struct adapter	*adapter = arg;
1877	struct ifnet	*ifp = adapter->ifp;
1878
1879	EM_LOCK_ASSERT(adapter);
1880
1881	INIT_DEBUGOUT("em_stop: begin");
1882
1883	em_disable_intr(adapter);
1884	em_reset_hw(&adapter->hw);
1885	callout_stop(&adapter->timer);
1886	callout_stop(&adapter->tx_fifo_timer);
1887	em_free_transmit_structures(adapter);
1888	em_free_receive_structures(adapter);
1889
1890	/* Tell the stack that the interface is no longer active */
1891	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1892}
1893
1894
1895/*********************************************************************
1896 *
1897 *  Determine hardware revision.
1898 *
1899 **********************************************************************/
1900static void
1901em_identify_hardware(struct adapter * adapter)
1902{
1903	device_t dev = adapter->dev;
1904
1905	/* Make sure our PCI config space has the necessary stuff set */
1906	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1907	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1908	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1909		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1910		       adapter->unit);
1911		adapter->hw.pci_cmd_word |=
1912		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1913		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1914	}
1915
1916	/* Save off the information about this board */
1917	adapter->hw.vendor_id = pci_get_vendor(dev);
1918	adapter->hw.device_id = pci_get_device(dev);
1919	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1920	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1921	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1922
1923	/* Identify the MAC */
1924        if (em_set_mac_type(&adapter->hw))
1925                printf("em%d: Unknown MAC Type\n", adapter->unit);
1926
1927	if(adapter->hw.mac_type == em_82541 ||
1928	   adapter->hw.mac_type == em_82541_rev_2 ||
1929	   adapter->hw.mac_type == em_82547 ||
1930	   adapter->hw.mac_type == em_82547_rev_2)
1931		adapter->hw.phy_init_script = TRUE;
1932
1933        return;
1934}
1935
1936static int
1937em_allocate_pci_resources(struct adapter * adapter)
1938{
1939	int             val, rid;
1940	device_t        dev = adapter->dev;
1941
1942	rid = PCIR_BAR(0);
1943	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1944						     &rid, RF_ACTIVE);
1945	if (!(adapter->res_memory)) {
1946		printf("em%d: Unable to allocate bus resource: memory\n",
1947		       adapter->unit);
1948		return(ENXIO);
1949	}
1950	adapter->osdep.mem_bus_space_tag =
1951	rman_get_bustag(adapter->res_memory);
1952	adapter->osdep.mem_bus_space_handle =
1953	rman_get_bushandle(adapter->res_memory);
1954	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1955
1956
1957	if (adapter->hw.mac_type > em_82543) {
1958		/* Figure our where our IO BAR is ? */
1959		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1960			val = pci_read_config(dev, rid, 4);
1961			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1962				adapter->io_rid = rid;
1963				break;
1964			}
1965			rid += 4;
1966			/* check for 64bit BAR */
1967			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1968				rid += 4;
1969		}
1970		if (rid >= PCIR_CIS) {
1971			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1972			return (ENXIO);
1973		}
1974		adapter->res_ioport = bus_alloc_resource_any(dev,
1975							     SYS_RES_IOPORT,
1976							     &adapter->io_rid,
1977							     RF_ACTIVE);
1978		if (!(adapter->res_ioport)) {
1979			printf("em%d: Unable to allocate bus resource: ioport\n",
1980			       adapter->unit);
1981			return(ENXIO);
1982		}
1983		adapter->hw.io_base = 0;
1984		adapter->osdep.io_bus_space_tag =
1985		    rman_get_bustag(adapter->res_ioport);
1986		adapter->osdep.io_bus_space_handle =
1987		    rman_get_bushandle(adapter->res_ioport);
1988	}
1989
1990	rid = 0x0;
1991	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1992						        RF_SHAREABLE |
1993							RF_ACTIVE);
1994	if (!(adapter->res_interrupt)) {
1995		printf("em%d: Unable to allocate bus resource: interrupt\n",
1996		       adapter->unit);
1997		return(ENXIO);
1998	}
1999
2000	adapter->hw.back = &adapter->osdep;
2001
2002	return(0);
2003}
2004
2005int
2006em_allocate_intr(struct adapter *adapter)
2007{
2008	device_t        dev = adapter->dev;
2009
2010	/* Manually turn off all interrupts */
2011	E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
2012
2013	/*
2014	 * Try allocating a fast interrupt and the associated deferred
2015	 * processing contexts.  If that doesn't work, try just using an
2016	 * ithread.
2017	 */
2018#ifndef NO_EM_FASTINTR
2019	/* Init the deferred processing contexts. */
2020	TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2021	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2022	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2023		taskqueue_thread_enqueue, &adapter->tq);
2024	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2025	    device_get_nameunit(adapter->dev));
2026	if (bus_setup_intr(dev, adapter->res_interrupt,
2027			   INTR_TYPE_NET | INTR_FAST, em_intr_fast, adapter,
2028			   &adapter->int_handler_tag) != 0) {
2029		taskqueue_free(adapter->tq);
2030		adapter->tq = NULL;
2031	}
2032#endif
2033	if (adapter->int_handler_tag == NULL) {
2034		if (bus_setup_intr(dev, adapter->res_interrupt,
2035				   INTR_TYPE_NET | INTR_MPSAFE,
2036				   em_intr, adapter,
2037				   &adapter->int_handler_tag)) {
2038			printf("em%d: Error registering interrupt handler!\n",
2039			       adapter->unit);
2040			return(ENXIO);
2041		}
2042	}
2043
2044	em_enable_intr(adapter);
2045	return (0);
2046}
2047
2048static void
2049em_free_intr(struct adapter *adapter)
2050{
2051	device_t dev = adapter->dev;
2052
2053	if (adapter->res_interrupt != NULL) {
2054		bus_teardown_intr(dev, adapter->res_interrupt,
2055				  adapter->int_handler_tag);
2056		adapter->int_handler_tag = NULL;
2057	}
2058	if (adapter->tq != NULL) {
2059		taskqueue_drain(adapter->tq, &adapter->rxtx_task);
2060		taskqueue_drain(taskqueue_fast, &adapter->link_task);
2061		taskqueue_free(adapter->tq);
2062		adapter->tq = NULL;
2063	}
2064}
2065
2066static void
2067em_free_pci_resources(struct adapter * adapter)
2068{
2069	device_t dev = adapter->dev;
2070
2071	if (adapter->res_interrupt != NULL) {
2072		bus_release_resource(dev, SYS_RES_IRQ, 0,
2073				     adapter->res_interrupt);
2074	}
2075	if (adapter->res_memory != NULL) {
2076		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
2077				     adapter->res_memory);
2078	}
2079
2080	if (adapter->res_ioport != NULL) {
2081		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
2082				     adapter->res_ioport);
2083	}
2084	return;
2085}
2086
2087/*********************************************************************
2088 *
2089 *  Initialize the hardware to a configuration as specified by the
2090 *  adapter structure. The controller is reset, the EEPROM is
2091 *  verified, the MAC address is set, then the shared initialization
2092 *  routines are called.
2093 *
2094 **********************************************************************/
2095static int
2096em_hardware_init(struct adapter * adapter)
2097{
2098	uint16_t rx_buffer_size;
2099
2100        INIT_DEBUGOUT("em_hardware_init: begin");
2101	/* Issue a global reset */
2102	em_reset_hw(&adapter->hw);
2103
2104	/* When hardware is reset, fifo_head is also reset */
2105	adapter->tx_fifo_head = 0;
2106
2107	/* Make sure we have a good EEPROM before we read from it */
2108	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2109		printf("em%d: The EEPROM Checksum Is Not Valid\n",
2110		       adapter->unit);
2111		return(EIO);
2112	}
2113
2114	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
2115		printf("em%d: EEPROM read error while reading part number\n",
2116		       adapter->unit);
2117		return(EIO);
2118	}
2119
2120	/*
2121	 * These parameters control the automatic generation (Tx) and
2122	 * response (Rx) to Ethernet PAUSE frames.
2123	 * - High water mark should allow for at least two frames to be
2124	 *   received after sending an XOFF.
2125	 * - Low water mark works best when it is very near the high water mark.
2126	 *   This allows the receiver to restart by sending XON when it has drained
2127	 *   a bit.  Here we use an arbitary value of 1500 which will restart after
2128	 *   one full frame is pulled from the buffer.  There could be several smaller
2129	 *   frames in the buffer and if so they will not trigger the XON until their
2130	 *   total number reduces the buffer by 1500.
2131	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2132	 */
2133	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
2134
2135	adapter->hw.fc_high_water = rx_buffer_size -
2136	    roundup2(adapter->hw.max_frame_size, 1024);
2137	adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
2138	adapter->hw.fc_pause_time = 0x1000;
2139	adapter->hw.fc_send_xon = TRUE;
2140	adapter->hw.fc = em_fc_full;
2141
2142	if (em_init_hw(&adapter->hw) < 0) {
2143		printf("em%d: Hardware Initialization Failed",
2144		       adapter->unit);
2145		return(EIO);
2146	}
2147
2148	em_check_for_link(&adapter->hw);
2149	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
2150		adapter->link_active = 1;
2151	else
2152		adapter->link_active = 0;
2153
2154	if (adapter->link_active) {
2155		em_get_speed_and_duplex(&adapter->hw,
2156					&adapter->link_speed,
2157					&adapter->link_duplex);
2158	} else {
2159		adapter->link_speed = 0;
2160		adapter->link_duplex = 0;
2161	}
2162
2163	return(0);
2164}
2165
2166/*********************************************************************
2167 *
2168 *  Setup networking device structure and register an interface.
2169 *
2170 **********************************************************************/
2171static void
2172em_setup_interface(device_t dev, struct adapter * adapter)
2173{
2174	struct ifnet   *ifp;
2175	INIT_DEBUGOUT("em_setup_interface: begin");
2176
2177	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2178	if (ifp == NULL)
2179		panic("%s: can not if_alloc()", device_get_nameunit(dev));
2180	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2181	ifp->if_mtu = ETHERMTU;
2182	ifp->if_baudrate = 1000000000;
2183	ifp->if_init =  em_init;
2184	ifp->if_softc = adapter;
2185	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2186	ifp->if_ioctl = em_ioctl;
2187	ifp->if_start = em_start;
2188	ifp->if_watchdog = em_watchdog;
2189	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2190	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2191	IFQ_SET_READY(&ifp->if_snd);
2192
2193        ether_ifattach(ifp, adapter->hw.mac_addr);
2194
2195	ifp->if_capabilities = ifp->if_capenable = 0;
2196
2197	if (adapter->hw.mac_type >= em_82543) {
2198		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2199		ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2200	}
2201
2202	/*
2203	 * Tell the upper layer(s) we support long frames.
2204	 */
2205	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2206	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2207	ifp->if_capenable |= IFCAP_VLAN_MTU;
2208
2209#ifdef DEVICE_POLLING
2210	ifp->if_capabilities |= IFCAP_POLLING;
2211#endif
2212
2213	/*
2214	 * Specify the media types supported by this adapter and register
2215	 * callbacks to update media and link information
2216	 */
2217	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2218		     em_media_status);
2219	if (adapter->hw.media_type == em_media_type_fiber) {
2220		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2221			    0, NULL);
2222		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
2223			    0, NULL);
2224	} else {
2225		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2226		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2227			    0, NULL);
2228		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2229			    0, NULL);
2230		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2231			    0, NULL);
2232		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2233			    0, NULL);
2234		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2235	}
2236	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2237	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2238
2239	return;
2240}
2241
2242
2243/*********************************************************************
2244 *
2245 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2246 *
2247 **********************************************************************/
2248static void
2249em_smartspeed(struct adapter *adapter)
2250{
2251        uint16_t phy_tmp;
2252
2253	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2254	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2255		return;
2256
2257        if(adapter->smartspeed == 0) {
2258                /* If Master/Slave config fault is asserted twice,
2259                 * we assume back-to-back */
2260                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2261                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2262                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2263                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2264                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2265					&phy_tmp);
2266                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2267                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2268                                em_write_phy_reg(&adapter->hw,
2269                                                    PHY_1000T_CTRL, phy_tmp);
2270                                adapter->smartspeed++;
2271                                if(adapter->hw.autoneg &&
2272                                   !em_phy_setup_autoneg(&adapter->hw) &&
2273				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2274                                                       &phy_tmp)) {
2275                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2276                                                    MII_CR_RESTART_AUTO_NEG);
2277                                        em_write_phy_reg(&adapter->hw,
2278							 PHY_CTRL, phy_tmp);
2279                                }
2280                        }
2281                }
2282                return;
2283        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2284                /* If still no link, perhaps using 2/3 pair cable */
2285                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2286                phy_tmp |= CR_1000T_MS_ENABLE;
2287                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2288                if(adapter->hw.autoneg &&
2289                   !em_phy_setup_autoneg(&adapter->hw) &&
2290                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2291                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2292                                    MII_CR_RESTART_AUTO_NEG);
2293                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2294                }
2295        }
2296        /* Restart process after EM_SMARTSPEED_MAX iterations */
2297        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2298                adapter->smartspeed = 0;
2299
2300	return;
2301}
2302
2303
2304/*
2305 * Manage DMA'able memory.
2306 */
2307static void
2308em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2309{
2310        if (error)
2311                return;
2312        *(bus_addr_t *) arg = segs[0].ds_addr;
2313}
2314
2315static int
2316em_dma_malloc(struct adapter *adapter, bus_size_t size,
2317        struct em_dma_alloc *dma, int mapflags)
2318{
2319        int r;
2320
2321	r = bus_dma_tag_create(NULL,                    /* parent */
2322                               E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2323                               BUS_SPACE_MAXADDR,       /* lowaddr */
2324                               BUS_SPACE_MAXADDR,       /* highaddr */
2325                               NULL, NULL,              /* filter, filterarg */
2326                               size,                    /* maxsize */
2327                               1,                       /* nsegments */
2328                               size,                    /* maxsegsize */
2329                               0,		        /* flags */
2330			       NULL,			/* lockfunc */
2331			       NULL,			/* lockarg */
2332                               &dma->dma_tag);
2333        if (r != 0) {
2334                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2335                        "error %u\n", adapter->unit, r);
2336                goto fail_0;
2337        }
2338
2339        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2340                             BUS_DMA_NOWAIT, &dma->dma_map);
2341        if (r != 0) {
2342                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2343                        "size %ju, error %d\n", adapter->unit,
2344			(uintmax_t)size, r);
2345                goto fail_2;
2346        }
2347
2348	dma->dma_paddr = 0;
2349        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2350                            size,
2351                            em_dmamap_cb,
2352                            &dma->dma_paddr,
2353                            mapflags | BUS_DMA_NOWAIT);
2354        if (r != 0 || dma->dma_paddr == 0) {
2355                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2356                        "error %u\n", adapter->unit, r);
2357                goto fail_3;
2358        }
2359
2360        return (0);
2361
2362fail_3:
2363        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2364fail_2:
2365        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2366        bus_dma_tag_destroy(dma->dma_tag);
2367fail_0:
2368        dma->dma_map = NULL;
2369        dma->dma_tag = NULL;
2370        return (r);
2371}
2372
2373static void
2374em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2375{
2376	if (dma->dma_tag == NULL)
2377		return;
2378	if (dma->dma_map != NULL) {
2379		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2380		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2381		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2382		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2383		dma->dma_map = NULL;
2384	}
2385        bus_dma_tag_destroy(dma->dma_tag);
2386	dma->dma_tag = NULL;
2387}
2388
2389
2390/*********************************************************************
2391 *
2392 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2393 *  the information needed to transmit a packet on the wire.
2394 *
2395 **********************************************************************/
2396static int
2397em_allocate_transmit_structures(struct adapter * adapter)
2398{
2399	if (!(adapter->tx_buffer_area =
2400	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2401					     adapter->num_tx_desc, M_DEVBUF,
2402					     M_NOWAIT))) {
2403		printf("em%d: Unable to allocate tx_buffer memory\n",
2404		       adapter->unit);
2405		return ENOMEM;
2406	}
2407
2408	bzero(adapter->tx_buffer_area,
2409	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2410
2411	return 0;
2412}
2413
2414/*********************************************************************
2415 *
2416 *  Allocate and initialize transmit structures.
2417 *
2418 **********************************************************************/
2419static int
2420em_setup_transmit_structures(struct adapter * adapter)
2421{
2422	struct em_buffer *tx_buffer;
2423	bus_size_t size;
2424	int error, i;
2425
2426        /*
2427         * Setup DMA descriptor areas.
2428         */
2429	size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2430	if ((error = bus_dma_tag_create(NULL,           /* parent */
2431                               1, 0,                    /* alignment, bounds */
2432                               BUS_SPACE_MAXADDR,       /* lowaddr */
2433                               BUS_SPACE_MAXADDR,       /* highaddr */
2434                               NULL, NULL,              /* filter, filterarg */
2435                               size,                    /* maxsize */
2436                               EM_MAX_SCATTER,          /* nsegments */
2437                               size,                    /* maxsegsize */
2438                               0,                       /* flags */
2439			       NULL,			/* lockfunc */
2440			       NULL,			/* lockarg */
2441                               &adapter->txtag)) != 0) {
2442		printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2443		goto fail;
2444        }
2445
2446        if ((error = em_allocate_transmit_structures(adapter)) != 0)
2447		goto fail;
2448
2449        bzero((void *) adapter->tx_desc_base,
2450              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2451	tx_buffer = adapter->tx_buffer_area;
2452	for (i = 0; i < adapter->num_tx_desc; i++) {
2453		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2454		if (error != 0) {
2455			printf("em%d: Unable to create TX DMA map\n",
2456			    adapter->unit);
2457			goto fail;
2458		}
2459		tx_buffer++;
2460	}
2461
2462        adapter->next_avail_tx_desc = 0;
2463        adapter->oldest_used_tx_desc = 0;
2464
2465        /* Set number of descriptors available */
2466        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2467
2468        /* Set checksum context */
2469        adapter->active_checksum_context = OFFLOAD_NONE;
2470	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2471	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2472
2473        return (0);
2474
2475fail:
2476	em_free_transmit_structures(adapter);
2477	return (error);
2478}
2479
2480/*********************************************************************
2481 *
2482 *  Enable transmit unit.
2483 *
2484 **********************************************************************/
2485static void
2486em_initialize_transmit_unit(struct adapter * adapter)
2487{
2488	u_int32_t       reg_tctl;
2489	u_int32_t       reg_tipg = 0;
2490	u_int64_t	bus_addr;
2491
2492         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2493	/* Setup the Base and Length of the Tx Descriptor Ring */
2494	bus_addr = adapter->txdma.dma_paddr;
2495	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2496	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2497	E1000_WRITE_REG(&adapter->hw, TDLEN,
2498			adapter->num_tx_desc *
2499			sizeof(struct em_tx_desc));
2500
2501	/* Setup the HW Tx Head and Tail descriptor pointers */
2502	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2503	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2504
2505
2506	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2507		     E1000_READ_REG(&adapter->hw, TDBAL),
2508		     E1000_READ_REG(&adapter->hw, TDLEN));
2509
2510	/* Set the default values for the Tx Inter Packet Gap timer */
2511	switch (adapter->hw.mac_type) {
2512	case em_82542_rev2_0:
2513        case em_82542_rev2_1:
2514                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2515                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2516                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2517                break;
2518        default:
2519                if (adapter->hw.media_type == em_media_type_fiber)
2520                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2521                else
2522                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2523                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2524                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2525        }
2526
2527	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2528	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2529	if(adapter->hw.mac_type >= em_82540)
2530		E1000_WRITE_REG(&adapter->hw, TADV,
2531		    adapter->tx_abs_int_delay.value);
2532
2533	/* Program the Transmit Control Register */
2534	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2535		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2536	if (adapter->hw.mac_type >= em_82571)
2537		reg_tctl |= E1000_TCTL_MULR;
2538	if (adapter->link_duplex == 1) {
2539		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2540	} else {
2541		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2542	}
2543	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2544
2545	/* Setup Transmit Descriptor Settings for this adapter */
2546	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2547
2548	if (adapter->tx_int_delay.value > 0)
2549		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2550
2551	return;
2552}
2553
2554/*********************************************************************
2555 *
2556 *  Free all transmit related data structures.
2557 *
2558 **********************************************************************/
2559static void
2560em_free_transmit_structures(struct adapter * adapter)
2561{
2562        struct em_buffer   *tx_buffer;
2563        int             i;
2564
2565        INIT_DEBUGOUT("free_transmit_structures: begin");
2566
2567        if (adapter->tx_buffer_area != NULL) {
2568                tx_buffer = adapter->tx_buffer_area;
2569                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2570                        if (tx_buffer->m_head != NULL) {
2571				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2572				    BUS_DMASYNC_POSTWRITE);
2573				bus_dmamap_unload(adapter->txtag,
2574				    tx_buffer->map);
2575                                m_freem(tx_buffer->m_head);
2576				tx_buffer->m_head = NULL;
2577                        } else if (tx_buffer->map != NULL)
2578				bus_dmamap_unload(adapter->txtag,
2579				    tx_buffer->map);
2580			if (tx_buffer->map != NULL) {
2581				bus_dmamap_destroy(adapter->txtag,
2582				    tx_buffer->map);
2583				tx_buffer->map = NULL;
2584			}
2585                }
2586        }
2587        if (adapter->tx_buffer_area != NULL) {
2588                free(adapter->tx_buffer_area, M_DEVBUF);
2589                adapter->tx_buffer_area = NULL;
2590        }
2591        if (adapter->txtag != NULL) {
2592                bus_dma_tag_destroy(adapter->txtag);
2593                adapter->txtag = NULL;
2594        }
2595        return;
2596}
2597
2598/*********************************************************************
2599 *
2600 *  The offload context needs to be set when we transfer the first
2601 *  packet of a particular protocol (TCP/UDP). We change the
2602 *  context only if the protocol type changes.
2603 *
2604 **********************************************************************/
2605static void
2606em_transmit_checksum_setup(struct adapter * adapter,
2607			   struct mbuf *mp,
2608			   u_int32_t *txd_upper,
2609			   u_int32_t *txd_lower)
2610{
2611	struct em_context_desc *TXD;
2612	struct em_buffer *tx_buffer;
2613	int curr_txd;
2614
2615	if (mp->m_pkthdr.csum_flags) {
2616
2617		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2618			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2619			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2620			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2621				return;
2622			else
2623				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2624
2625		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2626			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2627			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2628			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2629				return;
2630			else
2631				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2632		} else {
2633			*txd_upper = 0;
2634			*txd_lower = 0;
2635			return;
2636		}
2637	} else {
2638		*txd_upper = 0;
2639		*txd_lower = 0;
2640		return;
2641	}
2642
2643	/* If we reach this point, the checksum offload context
2644	 * needs to be reset.
2645	 */
2646	curr_txd = adapter->next_avail_tx_desc;
2647	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2648	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2649
2650	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2651	TXD->lower_setup.ip_fields.ipcso =
2652		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2653	TXD->lower_setup.ip_fields.ipcse =
2654		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2655
2656	TXD->upper_setup.tcp_fields.tucss =
2657		ETHER_HDR_LEN + sizeof(struct ip);
2658	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2659
2660	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2661		TXD->upper_setup.tcp_fields.tucso =
2662			ETHER_HDR_LEN + sizeof(struct ip) +
2663			offsetof(struct tcphdr, th_sum);
2664	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2665		TXD->upper_setup.tcp_fields.tucso =
2666			ETHER_HDR_LEN + sizeof(struct ip) +
2667			offsetof(struct udphdr, uh_sum);
2668	}
2669
2670	TXD->tcp_seg_setup.data = htole32(0);
2671	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2672
2673	tx_buffer->m_head = NULL;
2674
2675	if (++curr_txd == adapter->num_tx_desc)
2676		curr_txd = 0;
2677
2678	adapter->num_tx_desc_avail--;
2679	adapter->next_avail_tx_desc = curr_txd;
2680
2681	return;
2682}
2683
2684/**********************************************************************
2685 *
2686 *  Examine each tx_buffer in the used queue. If the hardware is done
2687 *  processing the packet then free associated resources. The
2688 *  tx_buffer is put back on the free queue.
2689 *
2690 **********************************************************************/
2691static void
2692em_clean_transmit_interrupts(struct adapter * adapter)
2693{
2694        int i, num_avail;
2695        struct em_buffer *tx_buffer;
2696        struct em_tx_desc   *tx_desc;
2697	struct ifnet   *ifp = adapter->ifp;
2698
2699	mtx_assert(&adapter->mtx, MA_OWNED);
2700
2701        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2702                return;
2703
2704        num_avail = adapter->num_tx_desc_avail;
2705        i = adapter->oldest_used_tx_desc;
2706
2707        tx_buffer = &adapter->tx_buffer_area[i];
2708        tx_desc = &adapter->tx_desc_base[i];
2709
2710        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2711            BUS_DMASYNC_POSTREAD);
2712        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2713
2714                tx_desc->upper.data = 0;
2715                num_avail++;
2716
2717                if (tx_buffer->m_head) {
2718			ifp->if_opackets++;
2719			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2720			    BUS_DMASYNC_POSTWRITE);
2721			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2722
2723                        m_freem(tx_buffer->m_head);
2724                        tx_buffer->m_head = NULL;
2725                }
2726
2727                if (++i == adapter->num_tx_desc)
2728                        i = 0;
2729
2730                tx_buffer = &adapter->tx_buffer_area[i];
2731                tx_desc = &adapter->tx_desc_base[i];
2732        }
2733        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2734            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2735
2736        adapter->oldest_used_tx_desc = i;
2737
2738        /*
2739         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2740         * that it is OK to send packets.
2741         * If there are no pending descriptors, clear the timeout. Otherwise,
2742         * if some descriptors have been freed, restart the timeout.
2743         */
2744        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2745                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2746                if (num_avail == adapter->num_tx_desc)
2747                        ifp->if_timer = 0;
2748                else if (num_avail == adapter->num_tx_desc_avail)
2749                        ifp->if_timer = EM_TX_TIMEOUT;
2750        }
2751        adapter->num_tx_desc_avail = num_avail;
2752        return;
2753}
2754
2755/*********************************************************************
2756 *
2757 *  Get a buffer from system mbuf buffer pool.
2758 *
2759 **********************************************************************/
2760static int
2761em_get_buf(int i, struct adapter *adapter, struct mbuf *mp)
2762{
2763	struct ifnet		*ifp = adapter->ifp;
2764	bus_dma_segment_t	segs[1];
2765	struct em_buffer	*rx_buffer;
2766	int			error, nsegs;
2767
2768	if (mp == NULL) {
2769		mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2770		if (mp == NULL) {
2771			adapter->mbuf_cluster_failed++;
2772			return(ENOBUFS);
2773		}
2774		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2775	} else {
2776		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2777		mp->m_data = mp->m_ext.ext_buf;
2778		mp->m_next = NULL;
2779	}
2780
2781	if (ifp->if_mtu <= ETHERMTU)
2782                m_adj(mp, ETHER_ALIGN);
2783
2784	rx_buffer = &adapter->rx_buffer_area[i];
2785
2786	/*
2787	 * Using memory from the mbuf cluster pool, invoke the
2788	 * bus_dma machinery to arrange the memory mapping.
2789	 */
2790	error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2791	    mp, segs, &nsegs, 0);
2792	if (error != 0) {
2793		m_free(mp);
2794		return (error);
2795	}
2796	/* If nsegs is wrong then the stack is corrupt. */
2797	KASSERT(nsegs == 1, ("Too many segments returned!"));
2798	rx_buffer->m_head = mp;
2799	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2800	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2801
2802	return(0);
2803}
2804
2805/*********************************************************************
2806 *
2807 *  Allocate memory for rx_buffer structures. Since we use one
2808 *  rx_buffer per received packet, the maximum number of rx_buffer's
2809 *  that we'll need is equal to the number of receive descriptors
2810 *  that we've allocated.
2811 *
2812 **********************************************************************/
2813static int
2814em_allocate_receive_structures(struct adapter * adapter)
2815{
2816        int             i, error;
2817        struct em_buffer *rx_buffer;
2818
2819        if (!(adapter->rx_buffer_area =
2820              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2821                                          adapter->num_rx_desc, M_DEVBUF,
2822                                          M_NOWAIT))) {
2823                printf("em%d: Unable to allocate rx_buffer memory\n",
2824                       adapter->unit);
2825                return(ENOMEM);
2826        }
2827
2828        bzero(adapter->rx_buffer_area,
2829              sizeof(struct em_buffer) * adapter->num_rx_desc);
2830
2831        error = bus_dma_tag_create(NULL,                /* parent */
2832                               1, 0,                    /* alignment, bounds */
2833                               BUS_SPACE_MAXADDR,       /* lowaddr */
2834                               BUS_SPACE_MAXADDR,       /* highaddr */
2835                               NULL, NULL,              /* filter, filterarg */
2836                               MCLBYTES,                /* maxsize */
2837                               1,                       /* nsegments */
2838                               MCLBYTES,                /* maxsegsize */
2839                               BUS_DMA_ALLOCNOW,        /* flags */
2840			       NULL,			/* lockfunc */
2841			       NULL,			/* lockarg */
2842                               &adapter->rxtag);
2843        if (error != 0) {
2844                printf("em%d: em_allocate_receive_structures: "
2845                        "bus_dma_tag_create failed; error %u\n",
2846                       adapter->unit, error);
2847                goto fail;
2848        }
2849
2850        rx_buffer = adapter->rx_buffer_area;
2851        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2852                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2853                                          &rx_buffer->map);
2854                if (error != 0) {
2855                        printf("em%d: em_allocate_receive_structures: "
2856                                "bus_dmamap_create failed; error %u\n",
2857                                adapter->unit, error);
2858                        goto fail;
2859                }
2860        }
2861
2862        for (i = 0; i < adapter->num_rx_desc; i++) {
2863                error = em_get_buf(i, adapter, NULL);
2864		if (error != 0)
2865			goto fail;
2866        }
2867        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2868            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2869
2870        return(0);
2871
2872fail:
2873	em_free_receive_structures(adapter);
2874        return (error);
2875}
2876
2877/*********************************************************************
2878 *
2879 *  Allocate and initialize receive structures.
2880 *
2881 **********************************************************************/
2882static int
2883em_setup_receive_structures(struct adapter * adapter)
2884{
2885	bzero((void *) adapter->rx_desc_base,
2886              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2887
2888	if (em_allocate_receive_structures(adapter))
2889		return ENOMEM;
2890
2891	/* Setup our descriptor pointers */
2892        adapter->next_rx_desc_to_check = 0;
2893	return(0);
2894}
2895
2896/*********************************************************************
2897 *
2898 *  Enable receive unit.
2899 *
2900 **********************************************************************/
2901static void
2902em_initialize_receive_unit(struct adapter * adapter)
2903{
2904	u_int32_t       reg_rctl;
2905	u_int32_t       reg_rxcsum;
2906	struct ifnet    *ifp;
2907	u_int64_t	bus_addr;
2908
2909        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2910	ifp = adapter->ifp;
2911
2912	/* Make sure receives are disabled while setting up the descriptor ring */
2913	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2914
2915	/* Set the Receive Delay Timer Register */
2916	E1000_WRITE_REG(&adapter->hw, RDTR,
2917			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2918
2919	if(adapter->hw.mac_type >= em_82540) {
2920		E1000_WRITE_REG(&adapter->hw, RADV,
2921		    adapter->rx_abs_int_delay.value);
2922
2923                /* Set the interrupt throttling rate.  Value is calculated
2924                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2925#define MAX_INTS_PER_SEC        8000
2926#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2927                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2928        }
2929
2930	/* Setup the Base and Length of the Rx Descriptor Ring */
2931	bus_addr = adapter->rxdma.dma_paddr;
2932	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2933	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2934	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2935			sizeof(struct em_rx_desc));
2936
2937	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2938	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2939	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2940
2941	/* Setup the Receive Control Register */
2942	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2943		   E1000_RCTL_RDMTS_HALF |
2944		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2945
2946	if (adapter->hw.tbi_compatibility_on == TRUE)
2947		reg_rctl |= E1000_RCTL_SBP;
2948
2949
2950	switch (adapter->rx_buffer_len) {
2951	default:
2952	case EM_RXBUFFER_2048:
2953		reg_rctl |= E1000_RCTL_SZ_2048;
2954		break;
2955	case EM_RXBUFFER_4096:
2956		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2957		break;
2958	case EM_RXBUFFER_8192:
2959		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2960		break;
2961	case EM_RXBUFFER_16384:
2962		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2963		break;
2964	}
2965
2966	if (ifp->if_mtu > ETHERMTU)
2967		reg_rctl |= E1000_RCTL_LPE;
2968
2969	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2970	if ((adapter->hw.mac_type >= em_82543) &&
2971	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2972		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2973		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2974		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2975	}
2976
2977	/* Enable Receives */
2978	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2979
2980	return;
2981}
2982
2983/*********************************************************************
2984 *
2985 *  Free receive related data structures.
2986 *
2987 **********************************************************************/
2988static void
2989em_free_receive_structures(struct adapter *adapter)
2990{
2991        struct em_buffer   *rx_buffer;
2992        int             i;
2993
2994        INIT_DEBUGOUT("free_receive_structures: begin");
2995
2996        if (adapter->rx_buffer_area != NULL) {
2997                rx_buffer = adapter->rx_buffer_area;
2998                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2999			if (rx_buffer->m_head != NULL) {
3000				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3001				    BUS_DMASYNC_POSTREAD);
3002				bus_dmamap_unload(adapter->rxtag,
3003				    rx_buffer->map);
3004				m_freem(rx_buffer->m_head);
3005				rx_buffer->m_head = NULL;
3006			} else if (rx_buffer->map != NULL)
3007				bus_dmamap_unload(adapter->rxtag,
3008				    rx_buffer->map);
3009                        if (rx_buffer->map != NULL) {
3010				bus_dmamap_destroy(adapter->rxtag,
3011				    rx_buffer->map);
3012				rx_buffer->map = NULL;
3013			}
3014                }
3015        }
3016        if (adapter->rx_buffer_area != NULL) {
3017                free(adapter->rx_buffer_area, M_DEVBUF);
3018                adapter->rx_buffer_area = NULL;
3019        }
3020        if (adapter->rxtag != NULL) {
3021                bus_dma_tag_destroy(adapter->rxtag);
3022                adapter->rxtag = NULL;
3023        }
3024        return;
3025}
3026
3027/*********************************************************************
3028 *
3029 *  This routine executes in interrupt context. It replenishes
3030 *  the mbufs in the descriptor and sends data which has been
3031 *  dma'ed into host memory to upper layer.
3032 *
3033 *  We loop at most count times if count is > 0, or until done if
3034 *  count < 0.
3035 *
3036 *********************************************************************/
3037static int
3038em_process_receive_interrupts(struct adapter * adapter, int count)
3039{
3040	struct ifnet	*ifp;
3041	struct mbuf	*mp;
3042	uint8_t		accept_frame = 0;
3043	uint8_t		eop = 0;
3044	uint16_t 	len, desc_len, prev_len_adj;
3045	int		i;
3046
3047	/* Pointer to the receive descriptor being examined. */
3048	struct em_rx_desc   *current_desc;
3049
3050	ifp = adapter->ifp;
3051	i = adapter->next_rx_desc_to_check;
3052	current_desc = &adapter->rx_desc_base[i];
3053	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3054	    BUS_DMASYNC_POSTREAD);
3055
3056	if (!((current_desc->status) & E1000_RXD_STAT_DD))
3057		return (0);
3058
3059	while ((current_desc->status & E1000_RXD_STAT_DD) &&
3060	    (count != 0) &&
3061	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3062		struct mbuf *m = NULL;
3063
3064		mp = adapter->rx_buffer_area[i].m_head;
3065		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3066		    BUS_DMASYNC_POSTREAD);
3067		bus_dmamap_unload(adapter->rxtag,
3068		    adapter->rx_buffer_area[i].map);
3069
3070		accept_frame = 1;
3071		prev_len_adj = 0;
3072		desc_len = le16toh(current_desc->length);
3073		if (current_desc->status & E1000_RXD_STAT_EOP) {
3074			count--;
3075			eop = 1;
3076			if (desc_len < ETHER_CRC_LEN) {
3077				len = 0;
3078				prev_len_adj = ETHER_CRC_LEN - desc_len;
3079			} else
3080				len = desc_len - ETHER_CRC_LEN;
3081		} else {
3082			eop = 0;
3083			len = desc_len;
3084		}
3085
3086		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3087			uint8_t		last_byte;
3088			uint32_t	pkt_len = desc_len;
3089
3090			if (adapter->fmp != NULL)
3091				pkt_len += adapter->fmp->m_pkthdr.len;
3092
3093			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3094			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
3095			    current_desc->errors,
3096			    pkt_len, last_byte)) {
3097				em_tbi_adjust_stats(&adapter->hw,
3098				    &adapter->stats, pkt_len,
3099				    adapter->hw.mac_addr);
3100				if (len > 0)
3101					len--;
3102			} else
3103				accept_frame = 0;
3104		}
3105
3106		if (accept_frame) {
3107			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
3108				adapter->dropped_pkts++;
3109				em_get_buf(i, adapter, mp);
3110				if (adapter->fmp != NULL)
3111					m_freem(adapter->fmp);
3112				adapter->fmp = NULL;
3113				adapter->lmp = NULL;
3114				break;
3115			}
3116
3117			/* Assign correct length to the current fragment */
3118			mp->m_len = len;
3119
3120			if (adapter->fmp == NULL) {
3121				mp->m_pkthdr.len = len;
3122				adapter->fmp = mp; /* Store the first mbuf */
3123				adapter->lmp = mp;
3124			} else {
3125				/* Chain mbuf's together */
3126				mp->m_flags &= ~M_PKTHDR;
3127				/*
3128				 * Adjust length of previous mbuf in chain if
3129				 * we received less than 4 bytes in the last
3130				 * descriptor.
3131				 */
3132				if (prev_len_adj > 0) {
3133					adapter->lmp->m_len -= prev_len_adj;
3134					adapter->fmp->m_pkthdr.len -=
3135					    prev_len_adj;
3136				}
3137				adapter->lmp->m_next = mp;
3138				adapter->lmp = adapter->lmp->m_next;
3139				adapter->fmp->m_pkthdr.len += len;
3140			}
3141
3142			if (eop) {
3143				adapter->fmp->m_pkthdr.rcvif = ifp;
3144				ifp->if_ipackets++;
3145				em_receive_checksum(adapter, current_desc,
3146				    adapter->fmp);
3147#ifndef __NO_STRICT_ALIGNMENT
3148				if (ifp->if_mtu > ETHERMTU &&
3149				    em_fixup_rx(adapter) != 0)
3150					goto skip;
3151#endif
3152				if (current_desc->status & E1000_RXD_STAT_VP)
3153					VLAN_INPUT_TAG(ifp, adapter->fmp,
3154					    (le16toh(current_desc->special) &
3155					    E1000_RXD_SPC_VLAN_MASK));
3156#ifndef __NO_STRICT_ALIGNMENT
3157skip:
3158#endif
3159				m = adapter->fmp;
3160				adapter->fmp = NULL;
3161				adapter->lmp = NULL;
3162			}
3163		} else {
3164			adapter->dropped_pkts++;
3165			em_get_buf(i, adapter, mp);
3166			if (adapter->fmp != NULL)
3167				m_freem(adapter->fmp);
3168			adapter->fmp = NULL;
3169			adapter->lmp = NULL;
3170		}
3171
3172		/* Zero out the receive descriptors status. */
3173		current_desc->status = 0;
3174		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3175		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3176
3177		/* Advance our pointers to the next descriptor. */
3178		if (++i == adapter->num_rx_desc)
3179			i = 0;
3180		if (m != NULL) {
3181			adapter->next_rx_desc_to_check = i;
3182			(*ifp->if_input)(ifp, m);
3183			i = adapter->next_rx_desc_to_check;
3184		}
3185		current_desc = &adapter->rx_desc_base[i];
3186	}
3187	adapter->next_rx_desc_to_check = i;
3188
3189	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3190	if (--i < 0)
3191		i = adapter->num_rx_desc - 1;
3192	E1000_WRITE_REG(&adapter->hw, RDT, i);
3193	if (!((current_desc->status) & E1000_RXD_STAT_DD))
3194		return (0);
3195
3196	return (1);
3197}
3198
3199#ifndef __NO_STRICT_ALIGNMENT
3200/*
3201 * When jumbo frames are enabled we should realign entire payload on
3202 * architecures with strict alignment. This is serious design mistake of 8254x
3203 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3204 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3205 * payload. On architecures without strict alignment restrictions 8254x still
3206 * performs unaligned memory access which would reduce the performance too.
3207 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3208 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3209 * existing mbuf chain.
3210 *
3211 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3212 * not used at all on architectures with strict alignment.
3213 */
3214static int
3215em_fixup_rx(struct adapter *adapter)
3216{
3217	struct mbuf *m, *n;
3218	int error;
3219
3220	error = 0;
3221	m = adapter->fmp;
3222	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3223		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3224		m->m_data += ETHER_HDR_LEN;
3225	} else {
3226		MGETHDR(n, M_DONTWAIT, MT_DATA);
3227		if (n != NULL) {
3228			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3229			m->m_data += ETHER_HDR_LEN;
3230			m->m_len -= ETHER_HDR_LEN;
3231			n->m_len = ETHER_HDR_LEN;
3232			M_MOVE_PKTHDR(n, m);
3233			n->m_next = m;
3234			adapter->fmp = n;
3235		} else {
3236			adapter->dropped_pkts++;
3237			m_freem(adapter->fmp);
3238			adapter->fmp = NULL;
3239			error = ENOMEM;
3240		}
3241	}
3242
3243	return (error);
3244}
3245#endif
3246
3247/*********************************************************************
3248 *
3249 *  Verify that the hardware indicated that the checksum is valid.
3250 *  Inform the stack about the status of checksum so that stack
3251 *  doesn't spend time verifying the checksum.
3252 *
3253 *********************************************************************/
3254static void
3255em_receive_checksum(struct adapter *adapter,
3256		    struct em_rx_desc *rx_desc,
3257		    struct mbuf *mp)
3258{
3259	/* 82543 or newer only */
3260	if ((adapter->hw.mac_type < em_82543) ||
3261	    /* Ignore Checksum bit is set */
3262	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3263		mp->m_pkthdr.csum_flags = 0;
3264		return;
3265	}
3266
3267	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3268		/* Did it pass? */
3269		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3270			/* IP Checksum Good */
3271			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3272			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3273
3274		} else {
3275			mp->m_pkthdr.csum_flags = 0;
3276		}
3277	}
3278
3279	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3280		/* Did it pass? */
3281		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3282			mp->m_pkthdr.csum_flags |=
3283			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3284			mp->m_pkthdr.csum_data = htons(0xffff);
3285		}
3286	}
3287
3288	return;
3289}
3290
3291
3292static void
3293em_enable_vlans(struct adapter *adapter)
3294{
3295	uint32_t ctrl;
3296
3297	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3298
3299	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3300	ctrl |= E1000_CTRL_VME;
3301	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3302
3303	return;
3304}
3305
3306static void
3307em_disable_vlans(struct adapter *adapter)
3308{
3309	uint32_t ctrl;
3310
3311	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3312	ctrl &= ~E1000_CTRL_VME;
3313	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3314
3315	return;
3316}
3317
3318static void
3319em_enable_intr(struct adapter * adapter)
3320{
3321	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3322	return;
3323}
3324
3325static void
3326em_disable_intr(struct adapter *adapter)
3327{
3328	/*
3329	 * The first version of 82542 had an errata where when link was forced
3330	 * it would stay up even up even if the cable was disconnected.
3331	 * Sequence errors were used to detect the disconnect and then the
3332	 * driver would unforce the link. This code in the in the ISR. For this
3333	 * to work correctly the Sequence error interrupt had to be enabled
3334	 * all the time.
3335	 */
3336
3337	if (adapter->hw.mac_type == em_82542_rev2_0)
3338	    E1000_WRITE_REG(&adapter->hw, IMC,
3339	        (0xffffffff & ~E1000_IMC_RXSEQ));
3340	else
3341	    E1000_WRITE_REG(&adapter->hw, IMC,
3342	        0xffffffff);
3343	return;
3344}
3345
3346static int
3347em_is_valid_ether_addr(u_int8_t *addr)
3348{
3349        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3350
3351        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3352                return (FALSE);
3353        }
3354
3355        return(TRUE);
3356}
3357
3358void
3359em_write_pci_cfg(struct em_hw *hw,
3360		      uint32_t reg,
3361		      uint16_t *value)
3362{
3363	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3364			 *value, 2);
3365}
3366
3367void
3368em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3369		     uint16_t *value)
3370{
3371	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3372				 reg, 2);
3373	return;
3374}
3375
3376void
3377em_pci_set_mwi(struct em_hw *hw)
3378{
3379        pci_write_config(((struct em_osdep *)hw->back)->dev,
3380                         PCIR_COMMAND,
3381                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3382        return;
3383}
3384
3385void
3386em_pci_clear_mwi(struct em_hw *hw)
3387{
3388        pci_write_config(((struct em_osdep *)hw->back)->dev,
3389                         PCIR_COMMAND,
3390                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3391        return;
3392}
3393
3394/*********************************************************************
3395* 82544 Coexistence issue workaround.
3396*    There are 2 issues.
3397*       1. Transmit Hang issue.
3398*    To detect this issue, following equation can be used...
3399*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3400*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3401*
3402*       2. DAC issue.
3403*    To detect this issue, following equation can be used...
3404*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3405*          If SUM[3:0] is in between 9 to c, we will have this issue.
3406*
3407*
3408*    WORKAROUND:
3409*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3410*
3411*** *********************************************************************/
3412static u_int32_t
3413em_fill_descriptors (bus_addr_t address,
3414                              u_int32_t length,
3415                              PDESC_ARRAY desc_array)
3416{
3417        /* Since issue is sensitive to length and address.*/
3418        /* Let us first check the address...*/
3419        u_int32_t safe_terminator;
3420        if (length <= 4) {
3421                desc_array->descriptor[0].address = address;
3422                desc_array->descriptor[0].length = length;
3423                desc_array->elements = 1;
3424                return desc_array->elements;
3425        }
3426        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3427        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3428        if (safe_terminator == 0   ||
3429        (safe_terminator > 4   &&
3430        safe_terminator < 9)   ||
3431        (safe_terminator > 0xC &&
3432        safe_terminator <= 0xF)) {
3433                desc_array->descriptor[0].address = address;
3434                desc_array->descriptor[0].length = length;
3435                desc_array->elements = 1;
3436                return desc_array->elements;
3437        }
3438
3439        desc_array->descriptor[0].address = address;
3440        desc_array->descriptor[0].length = length - 4;
3441        desc_array->descriptor[1].address = address + (length - 4);
3442        desc_array->descriptor[1].length = 4;
3443        desc_array->elements = 2;
3444        return desc_array->elements;
3445}
3446
3447/**********************************************************************
3448 *
3449 *  Update the board statistics counters.
3450 *
3451 **********************************************************************/
3452static void
3453em_update_stats_counters(struct adapter *adapter)
3454{
3455	struct ifnet   *ifp;
3456
3457	if(adapter->hw.media_type == em_media_type_copper ||
3458	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3459		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3460		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3461	}
3462	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3463	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3464	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3465	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3466
3467	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3468	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3469	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3470	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3471	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3472	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3473	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3474	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3475	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3476	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3477	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3478	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3479	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3480	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3481	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3482	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3483	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3484	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3485	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3486	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3487
3488	/* For the 64-bit byte counters the low dword must be read first. */
3489	/* Both registers clear on the read of the high dword */
3490
3491	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3492	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3493	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3494	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3495
3496	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3497	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3498	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3499	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3500	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3501
3502	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3503	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3504	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3505	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3506
3507	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3508	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3509	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3510	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3511	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3512	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3513	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3514	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3515	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3516	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3517
3518	if (adapter->hw.mac_type >= em_82543) {
3519		adapter->stats.algnerrc +=
3520		E1000_READ_REG(&adapter->hw, ALGNERRC);
3521		adapter->stats.rxerrc +=
3522		E1000_READ_REG(&adapter->hw, RXERRC);
3523		adapter->stats.tncrs +=
3524		E1000_READ_REG(&adapter->hw, TNCRS);
3525		adapter->stats.cexterr +=
3526		E1000_READ_REG(&adapter->hw, CEXTERR);
3527		adapter->stats.tsctc +=
3528		E1000_READ_REG(&adapter->hw, TSCTC);
3529		adapter->stats.tsctfc +=
3530		E1000_READ_REG(&adapter->hw, TSCTFC);
3531	}
3532	ifp = adapter->ifp;
3533
3534	ifp->if_collisions = adapter->stats.colc;
3535
3536	/* Rx Errors */
3537	ifp->if_ierrors =
3538	adapter->dropped_pkts +
3539	adapter->stats.rxerrc +
3540	adapter->stats.crcerrs +
3541	adapter->stats.algnerrc +
3542	adapter->stats.rlec +
3543	adapter->stats.mpc + adapter->stats.cexterr;
3544
3545	/* Tx Errors */
3546	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3547	    adapter->watchdog_events;
3548
3549}
3550
3551
3552/**********************************************************************
3553 *
3554 *  This routine is called only when em_display_debug_stats is enabled.
3555 *  This routine provides a way to take a look at important statistics
3556 *  maintained by the driver and hardware.
3557 *
3558 **********************************************************************/
3559static void
3560em_print_debug_info(struct adapter *adapter)
3561{
3562	int unit = adapter->unit;
3563	uint8_t *hw_addr = adapter->hw.hw_addr;
3564
3565	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3566	printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit,
3567	    E1000_READ_REG(&adapter->hw, CTRL),
3568	    E1000_READ_REG(&adapter->hw, RCTL));
3569	printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit,
3570	    ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3571	    (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3572	printf("em%d: Flow control watermarks high = %d low = %d\n", unit,
3573	    adapter->hw.fc_high_water,
3574	    adapter->hw.fc_low_water);
3575	printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3576	    E1000_READ_REG(&adapter->hw, TIDV),
3577	    E1000_READ_REG(&adapter->hw, TADV));
3578	printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3579	    E1000_READ_REG(&adapter->hw, RDTR),
3580	    E1000_READ_REG(&adapter->hw, RADV));
3581	printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3582	    unit, (long long)adapter->tx_fifo_wrk_cnt,
3583	    (long long)adapter->tx_fifo_reset_cnt);
3584	printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3585	    E1000_READ_REG(&adapter->hw, TDH),
3586	    E1000_READ_REG(&adapter->hw, TDT));
3587	printf("em%d: Num Tx descriptors avail = %d\n", unit,
3588	    adapter->num_tx_desc_avail);
3589	printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3590	    adapter->no_tx_desc_avail1);
3591	printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3592	    adapter->no_tx_desc_avail2);
3593	printf("em%d: Std mbuf failed = %ld\n", unit,
3594	    adapter->mbuf_alloc_failed);
3595	printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3596	    adapter->mbuf_cluster_failed);
3597	printf("em%d: Driver dropped packets = %ld\n", unit,
3598	    adapter->dropped_pkts);
3599
3600	return;
3601}
3602
3603static void
3604em_print_hw_stats(struct adapter *adapter)
3605{
3606        int unit = adapter->unit;
3607
3608        printf("em%d: Excessive collisions = %lld\n", unit,
3609               (long long)adapter->stats.ecol);
3610        printf("em%d: Symbol errors = %lld\n", unit,
3611               (long long)adapter->stats.symerrs);
3612        printf("em%d: Sequence errors = %lld\n", unit,
3613               (long long)adapter->stats.sec);
3614        printf("em%d: Defer count = %lld\n", unit,
3615               (long long)adapter->stats.dc);
3616
3617        printf("em%d: Missed Packets = %lld\n", unit,
3618               (long long)adapter->stats.mpc);
3619        printf("em%d: Receive No Buffers = %lld\n", unit,
3620               (long long)adapter->stats.rnbc);
3621        printf("em%d: Receive length errors = %lld\n", unit,
3622               (long long)adapter->stats.rlec);
3623        printf("em%d: Receive errors = %lld\n", unit,
3624               (long long)adapter->stats.rxerrc);
3625        printf("em%d: Crc errors = %lld\n", unit,
3626               (long long)adapter->stats.crcerrs);
3627        printf("em%d: Alignment errors = %lld\n", unit,
3628               (long long)adapter->stats.algnerrc);
3629        printf("em%d: Carrier extension errors = %lld\n", unit,
3630               (long long)adapter->stats.cexterr);
3631	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3632	printf("em%d: watchdog timeouts = %ld\n", unit,
3633		adapter->watchdog_events);
3634
3635        printf("em%d: XON Rcvd = %lld\n", unit,
3636               (long long)adapter->stats.xonrxc);
3637        printf("em%d: XON Xmtd = %lld\n", unit,
3638               (long long)adapter->stats.xontxc);
3639        printf("em%d: XOFF Rcvd = %lld\n", unit,
3640               (long long)adapter->stats.xoffrxc);
3641        printf("em%d: XOFF Xmtd = %lld\n", unit,
3642               (long long)adapter->stats.xofftxc);
3643
3644        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3645               (long long)adapter->stats.gprc);
3646        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3647               (long long)adapter->stats.gptc);
3648
3649        return;
3650}
3651
3652static int
3653em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3654{
3655        int error;
3656        int result;
3657        struct adapter *adapter;
3658
3659        result = -1;
3660        error = sysctl_handle_int(oidp, &result, 0, req);
3661
3662        if (error || !req->newptr)
3663                return (error);
3664
3665        if (result == 1) {
3666                adapter = (struct adapter *)arg1;
3667                em_print_debug_info(adapter);
3668        }
3669
3670        return error;
3671}
3672
3673
3674static int
3675em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3676{
3677        int error;
3678        int result;
3679        struct adapter *adapter;
3680
3681        result = -1;
3682        error = sysctl_handle_int(oidp, &result, 0, req);
3683
3684        if (error || !req->newptr)
3685                return (error);
3686
3687        if (result == 1) {
3688                adapter = (struct adapter *)arg1;
3689                em_print_hw_stats(adapter);
3690        }
3691
3692        return error;
3693}
3694
3695static int
3696em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3697{
3698	struct em_int_delay_info *info;
3699	struct adapter *adapter;
3700	u_int32_t regval;
3701	int error;
3702	int usecs;
3703	int ticks;
3704
3705	info = (struct em_int_delay_info *)arg1;
3706	usecs = info->value;
3707	error = sysctl_handle_int(oidp, &usecs, 0, req);
3708	if (error != 0 || req->newptr == NULL)
3709		return error;
3710	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3711		return EINVAL;
3712	info->value = usecs;
3713	ticks = E1000_USECS_TO_TICKS(usecs);
3714
3715	adapter = info->adapter;
3716
3717	EM_LOCK(adapter);
3718	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3719	regval = (regval & ~0xffff) | (ticks & 0xffff);
3720	/* Handle a few special cases. */
3721	switch (info->offset) {
3722	case E1000_RDTR:
3723	case E1000_82542_RDTR:
3724		regval |= E1000_RDT_FPDB;
3725		break;
3726	case E1000_TIDV:
3727	case E1000_82542_TIDV:
3728		if (ticks == 0) {
3729			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3730			/* Don't write 0 into the TIDV register. */
3731			regval++;
3732		} else
3733			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3734		break;
3735	}
3736	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3737	EM_UNLOCK(adapter);
3738	return 0;
3739}
3740
3741static void
3742em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3743    const char *description, struct em_int_delay_info *info,
3744    int offset, int value)
3745{
3746	info->adapter = adapter;
3747	info->offset = offset;
3748	info->value = value;
3749	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3750	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3751	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3752	    info, 0, em_sysctl_int_delay, "I", description);
3753}
3754
3755#ifndef NO_EM_FASTINTR
3756static void
3757em_add_int_process_limit(struct adapter *adapter, const char *name,
3758    const char *description, int *limit, int value)
3759{
3760	*limit = value;
3761	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3762	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3763	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3764}
3765#endif
3766