if_em.c revision 155052
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 155052 2006-01-30 13:45:55Z glebius $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "Version - 3.2.18";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
111	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
112	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
113
114	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
115	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
116	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
117
118        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
119        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
120        { 0x8086, E1000_DEV_ID_82573L,              PCI_ANY_ID, PCI_ANY_ID, 0},
121
122        /* required last entry */
123        { 0, 0, 0, 0, 0}
124};
125
126/*********************************************************************
127 *  Table of branding strings for all supported NICs.
128 *********************************************************************/
129
130static char *em_strings[] = {
131	"Intel(R) PRO/1000 Network Connection"
132};
133
134/*********************************************************************
135 *  Function prototypes
136 *********************************************************************/
137static int  em_probe(device_t);
138static int  em_attach(device_t);
139static int  em_detach(device_t);
140static int  em_shutdown(device_t);
141static int  em_suspend(device_t);
142static int  em_resume(device_t);
143static void em_intr(void *);
144#ifndef NO_EM_FASTINTR
145static void em_intr_fast(void *);
146#endif
147static void em_start(struct ifnet *);
148static void em_start_locked(struct ifnet *ifp);
149static int  em_ioctl(struct ifnet *, u_long, caddr_t);
150static void em_watchdog(struct ifnet *);
151static void em_init(void *);
152static void em_init_locked(struct adapter *);
153static void em_stop(void *);
154static void em_media_status(struct ifnet *, struct ifmediareq *);
155static int  em_media_change(struct ifnet *);
156static void em_identify_hardware(struct adapter *);
157static int  em_allocate_pci_resources(struct adapter *);
158static int  em_allocate_intr(struct adapter *);
159static void em_free_intr(struct adapter *);
160static void em_free_pci_resources(struct adapter *);
161static void em_local_timer(void *);
162static int  em_hardware_init(struct adapter *);
163static void em_setup_interface(device_t, struct adapter *);
164static int  em_setup_transmit_structures(struct adapter *);
165static void em_initialize_transmit_unit(struct adapter *);
166static int  em_setup_receive_structures(struct adapter *);
167static void em_initialize_receive_unit(struct adapter *);
168static void em_enable_intr(struct adapter *);
169static void em_disable_intr(struct adapter *);
170static void em_free_transmit_structures(struct adapter *);
171static void em_free_receive_structures(struct adapter *);
172static void em_update_stats_counters(struct adapter *);
173static void em_clean_transmit_interrupts(struct adapter *);
174static int  em_allocate_receive_structures(struct adapter *);
175static int  em_allocate_transmit_structures(struct adapter *);
176static int em_process_receive_interrupts(struct adapter *, int);
177#ifndef __NO_STRICT_ALIGNMENT
178static int  em_fixup_rx(struct adapter *);
179#endif
180static void em_receive_checksum(struct adapter *,
181				struct em_rx_desc *,
182				struct mbuf *);
183static void em_transmit_checksum_setup(struct adapter *,
184				       struct mbuf *,
185				       u_int32_t *,
186				       u_int32_t *);
187static void em_set_promisc(struct adapter *);
188static void em_disable_promisc(struct adapter *);
189static void em_set_multi(struct adapter *);
190static void em_print_hw_stats(struct adapter *);
191static void em_print_link_status(struct adapter *);
192static int  em_get_buf(int i, struct adapter *,
193		       struct mbuf *);
194static void em_enable_vlans(struct adapter *);
195static void em_disable_vlans(struct adapter *);
196static int  em_encap(struct adapter *, struct mbuf **);
197static void em_smartspeed(struct adapter *);
198static int  em_82547_fifo_workaround(struct adapter *, int);
199static void em_82547_update_fifo_head(struct adapter *, int);
200static int  em_82547_tx_fifo_reset(struct adapter *);
201static void em_82547_move_tail(void *arg);
202static void em_82547_move_tail_locked(struct adapter *);
203static int  em_dma_malloc(struct adapter *, bus_size_t,
204			  struct em_dma_alloc *, int);
205static void em_dma_free(struct adapter *, struct em_dma_alloc *);
206static void em_print_debug_info(struct adapter *);
207static int  em_is_valid_ether_addr(u_int8_t *);
208static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
209static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
210static u_int32_t em_fill_descriptors (bus_addr_t address,
211				      u_int32_t length,
212				      PDESC_ARRAY desc_array);
213static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
214static void em_add_int_delay_sysctl(struct adapter *, const char *,
215				    const char *, struct em_int_delay_info *,
216				    int, int);
217#ifndef NO_EM_FASTINTR
218static void em_add_int_process_limit(struct adapter *, const char *,
219				     const char *, int *, int);
220static void em_handle_rxtx(void *context, int pending);
221static void em_handle_link(void *context, int pending);
222#endif
223#ifdef DEVICE_POLLING
224static poll_handler_t em_poll;
225#endif
226
227/*********************************************************************
228 *  FreeBSD Device Interface Entry Points
229 *********************************************************************/
230
231static device_method_t em_methods[] = {
232	/* Device interface */
233	DEVMETHOD(device_probe, em_probe),
234	DEVMETHOD(device_attach, em_attach),
235	DEVMETHOD(device_detach, em_detach),
236	DEVMETHOD(device_shutdown, em_shutdown),
237	DEVMETHOD(device_suspend, em_suspend),
238	DEVMETHOD(device_resume, em_resume),
239	{0, 0}
240};
241
242static driver_t em_driver = {
243	"em", em_methods, sizeof(struct adapter ),
244};
245
246static devclass_t em_devclass;
247DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
248MODULE_DEPEND(em, pci, 1, 1, 1);
249MODULE_DEPEND(em, ether, 1, 1, 1);
250
251/*********************************************************************
252 *  Tunable default values.
253 *********************************************************************/
254
255#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
256#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
257
258static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
259static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
260static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
261static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
262static int em_rxd = EM_DEFAULT_RXD;
263static int em_txd = EM_DEFAULT_TXD;
264
265TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
266TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
267TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
268TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
269TUNABLE_INT("hw.em.rxd", &em_rxd);
270TUNABLE_INT("hw.em.txd", &em_txd);
271#ifndef NO_EM_FASTINTR
272static int em_rx_process_limit = 100;
273TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
274#endif
275
276/*********************************************************************
277 *  Device identification routine
278 *
279 *  em_probe determines if the driver should be loaded on
280 *  adapter based on PCI vendor/device id of the adapter.
281 *
282 *  return BUS_PROBE_DEFAULT on success, positive on failure
283 *********************************************************************/
284
285static int
286em_probe(device_t dev)
287{
288	em_vendor_info_t *ent;
289
290	u_int16_t       pci_vendor_id = 0;
291	u_int16_t       pci_device_id = 0;
292	u_int16_t       pci_subvendor_id = 0;
293	u_int16_t       pci_subdevice_id = 0;
294	char            adapter_name[60];
295
296	INIT_DEBUGOUT("em_probe: begin");
297
298	pci_vendor_id = pci_get_vendor(dev);
299	if (pci_vendor_id != EM_VENDOR_ID)
300		return(ENXIO);
301
302	pci_device_id = pci_get_device(dev);
303	pci_subvendor_id = pci_get_subvendor(dev);
304	pci_subdevice_id = pci_get_subdevice(dev);
305
306	ent = em_vendor_info_array;
307	while (ent->vendor_id != 0) {
308		if ((pci_vendor_id == ent->vendor_id) &&
309		    (pci_device_id == ent->device_id) &&
310
311		    ((pci_subvendor_id == ent->subvendor_id) ||
312		     (ent->subvendor_id == PCI_ANY_ID)) &&
313
314		    ((pci_subdevice_id == ent->subdevice_id) ||
315		     (ent->subdevice_id == PCI_ANY_ID))) {
316			sprintf(adapter_name, "%s %s",
317				em_strings[ent->index],
318				em_driver_version);
319			device_set_desc_copy(dev, adapter_name);
320			return(BUS_PROBE_DEFAULT);
321		}
322		ent++;
323	}
324
325	return(ENXIO);
326}
327
328/*********************************************************************
329 *  Device initialization routine
330 *
331 *  The attach entry point is called when the driver is being loaded.
332 *  This routine identifies the type of hardware, allocates all resources
333 *  and initializes the hardware.
334 *
335 *  return 0 on success, positive on failure
336 *********************************************************************/
337
338static int
339em_attach(device_t dev)
340{
341	struct adapter * adapter;
342	int             tsize, rsize;
343	int		error = 0;
344
345	INIT_DEBUGOUT("em_attach: begin");
346
347	/* Allocate, clear, and link in our adapter structure */
348	if (!(adapter = device_get_softc(dev))) {
349		printf("em: adapter structure allocation failed\n");
350		return(ENOMEM);
351	}
352	bzero(adapter, sizeof(struct adapter ));
353	adapter->dev = dev;
354	adapter->osdep.dev = dev;
355	adapter->unit = device_get_unit(dev);
356	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
357
358	/* SYSCTL stuff */
359        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
360                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
361                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
362                        (void *)adapter, 0,
363                        em_sysctl_debug_info, "I", "Debug Information");
364
365        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
366                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
367                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
368                        (void *)adapter, 0,
369                        em_sysctl_stats, "I", "Statistics");
370
371	callout_init(&adapter->timer, CALLOUT_MPSAFE);
372	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
373
374	/* Determine hardware revision */
375	em_identify_hardware(adapter);
376
377	/* Set up some sysctls for the tunable interrupt delays */
378	em_add_int_delay_sysctl(adapter, "rx_int_delay",
379	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
380	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
381	em_add_int_delay_sysctl(adapter, "tx_int_delay",
382	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
383	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
384	if (adapter->hw.mac_type >= em_82540) {
385		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
386		    "receive interrupt delay limit in usecs",
387		    &adapter->rx_abs_int_delay,
388		    E1000_REG_OFFSET(&adapter->hw, RADV),
389		    em_rx_abs_int_delay_dflt);
390		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
391		    "transmit interrupt delay limit in usecs",
392		    &adapter->tx_abs_int_delay,
393		    E1000_REG_OFFSET(&adapter->hw, TADV),
394		    em_tx_abs_int_delay_dflt);
395	}
396
397	/* Sysctls for limiting the amount of work done in the taskqueue */
398#ifndef NO_EM_FASTINTR
399	em_add_int_process_limit(adapter, "rx_processing_limit",
400	    "max number of rx packets to process", &adapter->rx_process_limit,
401	    em_rx_process_limit);
402#endif
403
404	/*
405	 * Validate number of transmit and receive descriptors. It
406	 * must not exceed hardware maximum, and must be multiple
407	 * of E1000_DBA_ALIGN.
408	 */
409	if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
410	    (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
411	    (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
412	    (em_txd < EM_MIN_TXD)) {
413		printf("em%d: Using %d TX descriptors instead of %d!\n",
414		    adapter->unit, EM_DEFAULT_TXD, em_txd);
415		adapter->num_tx_desc = EM_DEFAULT_TXD;
416	} else
417		adapter->num_tx_desc = em_txd;
418	if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
419	    (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
420	    (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
421	    (em_rxd < EM_MIN_RXD)) {
422		printf("em%d: Using %d RX descriptors instead of %d!\n",
423		    adapter->unit, EM_DEFAULT_RXD, em_rxd);
424		adapter->num_rx_desc = EM_DEFAULT_RXD;
425	} else
426		adapter->num_rx_desc = em_rxd;
427
428        adapter->hw.autoneg = DO_AUTO_NEG;
429        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
430        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
431        adapter->hw.tbi_compatibility_en = TRUE;
432        adapter->rx_buffer_len = EM_RXBUFFER_2048;
433
434	adapter->hw.phy_init_script = 1;
435	adapter->hw.phy_reset_disable = FALSE;
436
437#ifndef EM_MASTER_SLAVE
438	adapter->hw.master_slave = em_ms_hw_default;
439#else
440	adapter->hw.master_slave = EM_MASTER_SLAVE;
441#endif
442	/*
443	 * Set the max frame size assuming standard ethernet
444	 * sized frames.
445	 */
446	adapter->hw.max_frame_size =
447		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
448
449	adapter->hw.min_frame_size =
450		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
451
452	/*
453	 * This controls when hardware reports transmit completion
454	 * status.
455	 */
456	adapter->hw.report_tx_early = 1;
457
458	if (em_allocate_pci_resources(adapter)) {
459		printf("em%d: Allocation of PCI resources failed\n",
460		       adapter->unit);
461                error = ENXIO;
462                goto err_pci;
463	}
464
465
466	/* Initialize eeprom parameters */
467        em_init_eeprom_params(&adapter->hw);
468
469	tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
470	    E1000_DBA_ALIGN);
471
472	/* Allocate Transmit Descriptor ring */
473        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
474                printf("em%d: Unable to allocate tx_desc memory\n",
475                       adapter->unit);
476		error = ENOMEM;
477                goto err_tx_desc;
478        }
479        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
480
481	rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
482	    E1000_DBA_ALIGN);
483
484	/* Allocate Receive Descriptor ring */
485        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
486                printf("em%d: Unable to allocate rx_desc memory\n",
487                        adapter->unit);
488		error = ENOMEM;
489                goto err_rx_desc;
490        }
491        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
492
493	/* Initialize the hardware */
494	if (em_hardware_init(adapter)) {
495		printf("em%d: Unable to initialize the hardware\n",
496		       adapter->unit);
497		error = EIO;
498                goto err_hw_init;
499	}
500
501	/* Copy the permanent MAC address out of the EEPROM */
502	if (em_read_mac_addr(&adapter->hw) < 0) {
503		printf("em%d: EEPROM read error while reading mac address\n",
504		       adapter->unit);
505		error = EIO;
506                goto err_mac_addr;
507	}
508
509	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
510                printf("em%d: Invalid mac address\n", adapter->unit);
511                error = EIO;
512                goto err_mac_addr;
513        }
514
515	/* Setup OS specific network interface */
516	em_setup_interface(dev, adapter);
517
518	em_allocate_intr(adapter);
519
520	/* Initialize statistics */
521	em_clear_hw_cntrs(&adapter->hw);
522	em_update_stats_counters(adapter);
523	adapter->hw.get_link_status = 1;
524	em_check_for_link(&adapter->hw);
525
526	if (bootverbose) {
527		/* Print the link status */
528		if (adapter->link_active == 1) {
529			em_get_speed_and_duplex(&adapter->hw,
530			    &adapter->link_speed, &adapter->link_duplex);
531			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
532			       adapter->unit,
533			       adapter->link_speed,
534			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
535				"Half");
536		} else
537			printf("em%d:  Speed:N/A  Duplex:N/A\n",
538			    adapter->unit);
539	}
540
541	/* Identify 82544 on PCIX */
542        em_get_bus_info(&adapter->hw);
543        if(adapter->hw.bus_type == em_bus_type_pcix &&
544           adapter->hw.mac_type == em_82544) {
545                adapter->pcix_82544 = TRUE;
546        }
547        else {
548                adapter->pcix_82544 = FALSE;
549        }
550	INIT_DEBUGOUT("em_attach: end");
551	return(0);
552
553err_mac_addr:
554err_hw_init:
555        em_dma_free(adapter, &adapter->rxdma);
556err_rx_desc:
557        em_dma_free(adapter, &adapter->txdma);
558err_tx_desc:
559err_pci:
560	em_free_intr(adapter);
561        em_free_pci_resources(adapter);
562	EM_LOCK_DESTROY(adapter);
563        return(error);
564
565}
566
567/*********************************************************************
568 *  Device removal routine
569 *
570 *  The detach entry point is called when the driver is being removed.
571 *  This routine stops the adapter and deallocates all the resources
572 *  that were allocated for driver operation.
573 *
574 *  return 0 on success, positive on failure
575 *********************************************************************/
576
577static int
578em_detach(device_t dev)
579{
580	struct adapter * adapter = device_get_softc(dev);
581	struct ifnet   *ifp = adapter->ifp;
582
583	INIT_DEBUGOUT("em_detach: begin");
584
585#ifdef DEVICE_POLLING
586	if (ifp->if_capenable & IFCAP_POLLING)
587		ether_poll_deregister(ifp);
588#endif
589
590	em_free_intr(adapter);
591	EM_LOCK(adapter);
592	adapter->in_detach = 1;
593	em_stop(adapter);
594	em_phy_hw_reset(&adapter->hw);
595	EM_UNLOCK(adapter);
596        ether_ifdetach(adapter->ifp);
597
598	em_free_pci_resources(adapter);
599	bus_generic_detach(dev);
600	if_free(ifp);
601
602	/* Free Transmit Descriptor ring */
603        if (adapter->tx_desc_base) {
604                em_dma_free(adapter, &adapter->txdma);
605                adapter->tx_desc_base = NULL;
606        }
607
608        /* Free Receive Descriptor ring */
609        if (adapter->rx_desc_base) {
610                em_dma_free(adapter, &adapter->rxdma);
611                adapter->rx_desc_base = NULL;
612        }
613
614	EM_LOCK_DESTROY(adapter);
615
616	return(0);
617}
618
619/*********************************************************************
620 *
621 *  Shutdown entry point
622 *
623 **********************************************************************/
624
625static int
626em_shutdown(device_t dev)
627{
628	struct adapter *adapter = device_get_softc(dev);
629	EM_LOCK(adapter);
630	em_stop(adapter);
631	EM_UNLOCK(adapter);
632	return(0);
633}
634
635/*
636 * Suspend/resume device methods.
637 */
638static int
639em_suspend(device_t dev)
640{
641	struct adapter *adapter = device_get_softc(dev);
642
643	EM_LOCK(adapter);
644	em_stop(adapter);
645	EM_UNLOCK(adapter);
646
647	return bus_generic_suspend(dev);
648}
649
650static int
651em_resume(device_t dev)
652{
653	struct adapter *adapter = device_get_softc(dev);
654	struct ifnet *ifp = adapter->ifp;
655
656	EM_LOCK(adapter);
657	em_init_locked(adapter);
658	if ((ifp->if_flags & IFF_UP) &&
659	    (ifp->if_drv_flags & IFF_DRV_RUNNING))
660		em_start_locked(ifp);
661	EM_UNLOCK(adapter);
662
663	return bus_generic_resume(dev);
664}
665
666
667/*********************************************************************
668 *  Transmit entry point
669 *
670 *  em_start is called by the stack to initiate a transmit.
671 *  The driver will remain in this routine as long as there are
672 *  packets to transmit and transmit resources are available.
673 *  In case resources are not available stack is notified and
674 *  the packet is requeued.
675 **********************************************************************/
676
677static void
678em_start_locked(struct ifnet *ifp)
679{
680	struct adapter	*adapter = ifp->if_softc;
681	struct mbuf	*m_head;
682
683	mtx_assert(&adapter->mtx, MA_OWNED);
684
685	if (!adapter->link_active)
686		return;
687
688	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
689
690		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
691		if (m_head == NULL)
692			break;
693		/*
694		 * em_encap() can modify our pointer, and or make it NULL on
695		 * failure.  In that event, we can't requeue.
696		 */
697		if (em_encap(adapter, &m_head)) {
698			if (m_head == NULL)
699				break;
700			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
701			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
702			break;
703		}
704
705		/* Send a copy of the frame to the BPF listener */
706		BPF_MTAP(ifp, m_head);
707
708		/* Set timeout in case hardware has problems transmitting. */
709		ifp->if_timer = EM_TX_TIMEOUT;
710	}
711}
712
713static void
714em_start(struct ifnet *ifp)
715{
716	struct adapter *adapter = ifp->if_softc;
717
718	EM_LOCK(adapter);
719	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
720		em_start_locked(ifp);
721	EM_UNLOCK(adapter);
722}
723
724/*********************************************************************
725 *  Ioctl entry point
726 *
727 *  em_ioctl is called when the user wants to configure the
728 *  interface.
729 *
730 *  return 0 on success, positive on failure
731 **********************************************************************/
732
733static int
734em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
735{
736	struct adapter	*adapter = ifp->if_softc;
737	struct ifreq	*ifr = (struct ifreq *)data;
738	int error = 0;
739
740	if (adapter->in_detach)
741		return(error);
742
743	switch (command) {
744	case SIOCSIFADDR:
745	case SIOCGIFADDR:
746		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
747		ether_ioctl(ifp, command, data);
748		break;
749	case SIOCSIFMTU:
750	    {
751		int max_frame_size;
752
753		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
754
755		switch (adapter->hw.mac_type) {
756		case em_82571:
757		case em_82572:
758			max_frame_size = 10500;
759			break;
760		case em_82573:
761			/* 82573 does not support jumbo frames. */
762			max_frame_size = ETHER_MAX_LEN;
763			break;
764		default:
765			max_frame_size = MAX_JUMBO_FRAME_SIZE;
766		}
767		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
768		    ETHER_CRC_LEN) {
769			error = EINVAL;
770			break;
771		}
772
773		EM_LOCK(adapter);
774		ifp->if_mtu = ifr->ifr_mtu;
775		adapter->hw.max_frame_size =
776		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
777		em_init_locked(adapter);
778		EM_UNLOCK(adapter);
779		break;
780	    }
781	case SIOCSIFFLAGS:
782		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
783		EM_LOCK(adapter);
784		if (ifp->if_flags & IFF_UP) {
785			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
786				em_init_locked(adapter);
787			}
788
789			em_disable_promisc(adapter);
790			em_set_promisc(adapter);
791		} else {
792			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
793				em_stop(adapter);
794			}
795		}
796		EM_UNLOCK(adapter);
797		break;
798	case SIOCADDMULTI:
799	case SIOCDELMULTI:
800		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
801		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
802			EM_LOCK(adapter);
803			em_disable_intr(adapter);
804			em_set_multi(adapter);
805			if (adapter->hw.mac_type == em_82542_rev2_0) {
806				em_initialize_receive_unit(adapter);
807			}
808#ifdef DEVICE_POLLING
809                        if (!(ifp->if_capenable & IFCAP_POLLING))
810#endif
811				em_enable_intr(adapter);
812			EM_UNLOCK(adapter);
813		}
814		break;
815	case SIOCSIFMEDIA:
816	case SIOCGIFMEDIA:
817		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
818		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
819		break;
820	case SIOCSIFCAP:
821	    {
822		int mask, reinit;
823
824		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
825		reinit = 0;
826		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
827#ifdef DEVICE_POLLING
828		if (mask & IFCAP_POLLING) {
829			if (ifr->ifr_reqcap & IFCAP_POLLING) {
830				error = ether_poll_register(em_poll, ifp);
831				if (error)
832					return(error);
833				EM_LOCK(adapter);
834				em_disable_intr(adapter);
835				ifp->if_capenable |= IFCAP_POLLING;
836				EM_UNLOCK(adapter);
837			} else {
838				error = ether_poll_deregister(ifp);
839				/* Enable interrupt even in error case */
840				EM_LOCK(adapter);
841				em_enable_intr(adapter);
842				ifp->if_capenable &= ~IFCAP_POLLING;
843				EM_UNLOCK(adapter);
844			}
845		}
846#endif
847		if (mask & IFCAP_HWCSUM) {
848			ifp->if_capenable ^= IFCAP_HWCSUM;
849			reinit = 1;
850		}
851		if (mask & IFCAP_VLAN_HWTAGGING) {
852			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
853			reinit = 1;
854		}
855		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
856			em_init(adapter);
857		VLAN_CAPABILITIES(ifp);
858		break;
859	    }
860	default:
861		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
862		error = EINVAL;
863	}
864
865	return(error);
866}
867
868/*********************************************************************
869 *  Watchdog entry point
870 *
871 *  This routine is called whenever hardware quits transmitting.
872 *
873 **********************************************************************/
874
875static void
876em_watchdog(struct ifnet *ifp)
877{
878	struct adapter *adapter = ifp->if_softc;
879
880	EM_LOCK(adapter);
881	/* If we are in this routine because of pause frames, then
882	 * don't reset the hardware.
883	 */
884	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
885		ifp->if_timer = EM_TX_TIMEOUT;
886		EM_UNLOCK(adapter);
887		return;
888	}
889
890	if (em_check_for_link(&adapter->hw))
891		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
892
893	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
894	adapter->watchdog_events++;
895
896	em_init_locked(adapter);
897	EM_UNLOCK(adapter);
898}
899
900/*********************************************************************
901 *  Init entry point
902 *
903 *  This routine is used in two ways. It is used by the stack as
904 *  init entry point in network interface structure. It is also used
905 *  by the driver as a hw/sw initialization routine to get to a
906 *  consistent state.
907 *
908 *  return 0 on success, positive on failure
909 **********************************************************************/
910
911static void
912em_init_locked(struct adapter * adapter)
913{
914	struct ifnet   *ifp;
915
916	uint32_t	pba;
917	ifp = adapter->ifp;
918
919	INIT_DEBUGOUT("em_init: begin");
920
921	mtx_assert(&adapter->mtx, MA_OWNED);
922
923	em_stop(adapter);
924
925	/*
926	 * Packet Buffer Allocation (PBA)
927	 * Writing PBA sets the receive portion of the buffer
928	 * the remainder is used for the transmit buffer.
929	 */
930	switch (adapter->hw.mac_type) {
931	case em_82547:
932	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
933		if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
934			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
935		else
936			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
937		adapter->tx_fifo_head = 0;
938		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
939		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
940		break;
941	case em_82571: /* 82571: Total Packet Buffer is 48K */
942	case em_82572: /* 82572: Total Packet Buffer is 48K */
943			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
944		break;
945	case em_82573: /* 82573: Total Packet Buffer is 32K */
946		/* Jumbo frames not supported */
947			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
948		break;
949	default:
950		/* Devices before 82547 had a Packet Buffer of 64K.   */
951		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
952			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
953		else
954			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
955	}
956
957	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
958	E1000_WRITE_REG(&adapter->hw, PBA, pba);
959
960	/* Get the latest mac address, User can use a LAA */
961        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
962              ETHER_ADDR_LEN);
963
964	/* Initialize the hardware */
965	if (em_hardware_init(adapter)) {
966		printf("em%d: Unable to initialize the hardware\n",
967		       adapter->unit);
968		return;
969	}
970
971	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
972		em_enable_vlans(adapter);
973
974	/* Prepare transmit descriptors and buffers */
975	if (em_setup_transmit_structures(adapter)) {
976		printf("em%d: Could not setup transmit structures\n",
977		       adapter->unit);
978		em_stop(adapter);
979		return;
980	}
981	em_initialize_transmit_unit(adapter);
982
983	/* Setup Multicast table */
984	em_set_multi(adapter);
985
986	/* Prepare receive descriptors and buffers */
987	if (em_setup_receive_structures(adapter)) {
988		printf("em%d: Could not setup receive structures\n",
989		       adapter->unit);
990		em_stop(adapter);
991		return;
992	}
993	em_initialize_receive_unit(adapter);
994
995	/* Don't loose promiscuous settings */
996	em_set_promisc(adapter);
997
998	ifp->if_drv_flags |= IFF_DRV_RUNNING;
999	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1000
1001	if (adapter->hw.mac_type >= em_82543) {
1002		if (ifp->if_capenable & IFCAP_TXCSUM)
1003			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
1004		else
1005			ifp->if_hwassist = 0;
1006	}
1007
1008	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1009	em_clear_hw_cntrs(&adapter->hw);
1010#ifdef DEVICE_POLLING
1011        /*
1012         * Only enable interrupts if we are not polling, make sure
1013         * they are off otherwise.
1014         */
1015        if (ifp->if_capenable & IFCAP_POLLING)
1016                em_disable_intr(adapter);
1017        else
1018#endif /* DEVICE_POLLING */
1019		em_enable_intr(adapter);
1020
1021	/* Don't reset the phy next time init gets called */
1022	adapter->hw.phy_reset_disable = TRUE;
1023
1024	return;
1025}
1026
1027static void
1028em_init(void *arg)
1029{
1030	struct adapter * adapter = arg;
1031
1032	EM_LOCK(adapter);
1033	em_init_locked(adapter);
1034	EM_UNLOCK(adapter);
1035	return;
1036}
1037
1038
1039#ifdef DEVICE_POLLING
1040static void
1041em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1042{
1043	struct adapter *adapter = ifp->if_softc;
1044	uint32_t reg_icr;
1045
1046	mtx_assert(&adapter->mtx, MA_OWNED);
1047
1048	if (cmd == POLL_AND_CHECK_STATUS) {
1049		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1050		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1051			callout_stop(&adapter->timer);
1052			adapter->hw.get_link_status = 1;
1053			em_check_for_link(&adapter->hw);
1054			em_print_link_status(adapter);
1055			callout_reset(&adapter->timer, hz, em_local_timer,
1056			    adapter);
1057		}
1058        }
1059	em_process_receive_interrupts(adapter, count);
1060	em_clean_transmit_interrupts(adapter);
1061
1062	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1063		em_start_locked(ifp);
1064}
1065
1066static void
1067em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1068{
1069	struct adapter *adapter = ifp->if_softc;
1070
1071	EM_LOCK(adapter);
1072	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1073		em_poll_locked(ifp, cmd, count);
1074	EM_UNLOCK(adapter);
1075}
1076#endif /* DEVICE_POLLING */
1077
1078#ifndef NO_EM_FASTINTR
1079static void
1080em_handle_link(void *context, int pending)
1081{
1082	struct adapter	*adapter = context;
1083	struct ifnet *ifp;
1084
1085	ifp = adapter->ifp;
1086
1087	EM_LOCK(adapter);
1088
1089	callout_stop(&adapter->timer);
1090	adapter->hw.get_link_status = 1;
1091	em_check_for_link(&adapter->hw);
1092	em_print_link_status(adapter);
1093	callout_reset(&adapter->timer, hz, em_local_timer,
1094	    adapter);
1095	EM_UNLOCK(adapter);
1096}
1097
1098static void
1099em_handle_rxtx(void *context, int pending)
1100{
1101	struct adapter	*adapter = context;
1102	struct ifnet	*ifp;
1103
1104	ifp = adapter->ifp;
1105
1106	/*
1107	 * TODO:
1108	 * It should be possible to run the tx clean loop without the lock.
1109	 */
1110	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1111		if (em_process_receive_interrupts(adapter,
1112		    adapter->rx_process_limit) != 0)
1113			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1114		EM_LOCK(adapter);
1115		em_clean_transmit_interrupts(adapter);
1116
1117		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1118			em_start_locked(ifp);
1119		EM_UNLOCK(adapter);
1120	}
1121
1122	em_enable_intr(adapter);
1123	return;
1124}
1125#endif
1126
1127/*********************************************************************
1128 *
1129 *  Interrupt Service routine
1130 *
1131 **********************************************************************/
1132#ifndef NO_EM_FASTINTR
1133static void
1134em_intr_fast(void *arg)
1135{
1136	struct adapter	*adapter = arg;
1137	struct ifnet	*ifp;
1138	uint32_t	reg_icr;
1139
1140	ifp = adapter->ifp;
1141
1142#ifdef DEVICE_POLLING
1143	if (ifp->if_capenable & IFCAP_POLLING) {
1144		return;
1145	}
1146#endif /* DEVICE_POLLING */
1147
1148	reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1149
1150	/* Hot eject?  */
1151	if (reg_icr == 0xffffffff)
1152		return;
1153
1154	/* Definitely not our interrupt.  */
1155	if (reg_icr == 0x0)
1156		return;
1157
1158	/*
1159	 * Starting with the 82571 chip, bit 31 should be used to
1160	 * determine whether the interrupt belongs to us.
1161	 */
1162	if (adapter->hw.mac_type >= em_82571 &&
1163	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1164		return;
1165
1166	/*
1167	 * Mask interrupts until the taskqueue is finished running.  This is
1168	 * cheap, just assume that it is needed.  This also works around the
1169	 * MSI message reordering errata on certain systems.
1170	 */
1171	em_disable_intr(adapter);
1172	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1173
1174	/* Link status change */
1175	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1176		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1177
1178	if (reg_icr & E1000_ICR_RXO) {
1179		adapter->rx_overruns++;
1180	}
1181	return;
1182}
1183#endif
1184
1185static void
1186em_intr(void *arg)
1187{
1188	struct adapter	*adapter = arg;
1189	struct ifnet	*ifp;
1190	uint32_t	reg_icr;
1191	int		wantinit = 0;
1192
1193	EM_LOCK(adapter);
1194
1195	ifp = adapter->ifp;
1196
1197#ifdef DEVICE_POLLING
1198	if (ifp->if_capenable & IFCAP_POLLING) {
1199		EM_UNLOCK(adapter);
1200		return;
1201	}
1202#endif
1203
1204	for (;;) {
1205		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1206		if (adapter->hw.mac_type >= em_82571 &&
1207		    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1208			break;
1209		else if (reg_icr == 0)
1210			break;
1211
1212		/*
1213		 * XXX: some laptops trigger several spurious interrupts
1214		 * on em(4) when in the resume cycle. The ICR register
1215		 * reports all-ones value in this case. Processing such
1216		 * interrupts would lead to a freeze. I don't know why.
1217		 */
1218		if (reg_icr == 0xffffffff)
1219			break;
1220
1221		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1222			em_process_receive_interrupts(adapter, -1);
1223			em_clean_transmit_interrupts(adapter);
1224		}
1225
1226		/* Link status change */
1227		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1228			callout_stop(&adapter->timer);
1229			adapter->hw.get_link_status = 1;
1230			em_check_for_link(&adapter->hw);
1231			em_print_link_status(adapter);
1232			callout_reset(&adapter->timer, hz, em_local_timer,
1233			    adapter);
1234		}
1235
1236		if (reg_icr & E1000_ICR_RXO) {
1237			adapter->rx_overruns++;
1238			wantinit = 1;
1239		}
1240	}
1241#if 0
1242	if (wantinit)
1243		em_init_locked(adapter);
1244#endif
1245	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1246	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1247		em_start_locked(ifp);
1248
1249	EM_UNLOCK(adapter);
1250}
1251
1252/*********************************************************************
1253 *
1254 *  Media Ioctl callback
1255 *
1256 *  This routine is called whenever the user queries the status of
1257 *  the interface using ifconfig.
1258 *
1259 **********************************************************************/
1260static void
1261em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1262{
1263	struct adapter * adapter = ifp->if_softc;
1264
1265	INIT_DEBUGOUT("em_media_status: begin");
1266
1267	em_check_for_link(&adapter->hw);
1268	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1269		if (adapter->link_active == 0) {
1270			em_get_speed_and_duplex(&adapter->hw,
1271						&adapter->link_speed,
1272						&adapter->link_duplex);
1273			adapter->link_active = 1;
1274		}
1275	} else {
1276		if (adapter->link_active == 1) {
1277			adapter->link_speed = 0;
1278			adapter->link_duplex = 0;
1279			adapter->link_active = 0;
1280		}
1281	}
1282
1283	ifmr->ifm_status = IFM_AVALID;
1284	ifmr->ifm_active = IFM_ETHER;
1285
1286	if (!adapter->link_active)
1287		return;
1288
1289	ifmr->ifm_status |= IFM_ACTIVE;
1290
1291	if (adapter->hw.media_type == em_media_type_fiber) {
1292		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1293	} else {
1294		switch (adapter->link_speed) {
1295		case 10:
1296			ifmr->ifm_active |= IFM_10_T;
1297			break;
1298		case 100:
1299			ifmr->ifm_active |= IFM_100_TX;
1300			break;
1301		case 1000:
1302			ifmr->ifm_active |= IFM_1000_T;
1303			break;
1304		}
1305		if (adapter->link_duplex == FULL_DUPLEX)
1306			ifmr->ifm_active |= IFM_FDX;
1307		else
1308			ifmr->ifm_active |= IFM_HDX;
1309	}
1310	return;
1311}
1312
1313/*********************************************************************
1314 *
1315 *  Media Ioctl callback
1316 *
1317 *  This routine is called when the user changes speed/duplex using
1318 *  media/mediopt option with ifconfig.
1319 *
1320 **********************************************************************/
1321static int
1322em_media_change(struct ifnet *ifp)
1323{
1324	struct adapter * adapter = ifp->if_softc;
1325	struct ifmedia  *ifm = &adapter->media;
1326
1327	INIT_DEBUGOUT("em_media_change: begin");
1328
1329	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1330		return(EINVAL);
1331
1332	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1333	case IFM_AUTO:
1334		adapter->hw.autoneg = DO_AUTO_NEG;
1335		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1336		break;
1337	case IFM_1000_SX:
1338	case IFM_1000_T:
1339		adapter->hw.autoneg = DO_AUTO_NEG;
1340		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1341		break;
1342	case IFM_100_TX:
1343		adapter->hw.autoneg = FALSE;
1344		adapter->hw.autoneg_advertised = 0;
1345		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1346			adapter->hw.forced_speed_duplex = em_100_full;
1347		else
1348			adapter->hw.forced_speed_duplex	= em_100_half;
1349		break;
1350	case IFM_10_T:
1351		adapter->hw.autoneg = FALSE;
1352		adapter->hw.autoneg_advertised = 0;
1353		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1354			adapter->hw.forced_speed_duplex = em_10_full;
1355		else
1356			adapter->hw.forced_speed_duplex	= em_10_half;
1357		break;
1358	default:
1359		printf("em%d: Unsupported media type\n", adapter->unit);
1360	}
1361
1362	/* As the speed/duplex settings my have changed we need to
1363	 * reset the PHY.
1364	 */
1365	adapter->hw.phy_reset_disable = FALSE;
1366
1367	em_init(adapter);
1368
1369	return(0);
1370}
1371
1372/*********************************************************************
1373 *
1374 *  This routine maps the mbufs to tx descriptors.
1375 *
1376 *  return 0 on success, positive on failure
1377 **********************************************************************/
1378static int
1379em_encap(struct adapter *adapter, struct mbuf **m_headp)
1380{
1381        struct ifnet		*ifp = adapter->ifp;
1382	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1383	bus_dmamap_t		map;
1384        struct em_buffer	*tx_buffer;
1385        struct em_tx_desc	*current_tx_desc;
1386	struct mbuf		*m_head;
1387        struct m_tag		*mtag;
1388	uint32_t		txd_upper, txd_lower, txd_used, txd_saved;
1389	int			nsegs, i, j;
1390	int			error = 0;
1391
1392	m_head = *m_headp;
1393	current_tx_desc = NULL;
1394	txd_used = txd_saved = 0;
1395
1396	/*
1397	 * Force a cleanup if number of TX descriptors
1398	 * available hits the threshold.
1399	 */
1400	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1401		em_clean_transmit_interrupts(adapter);
1402		if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1403			adapter->no_tx_desc_avail1++;
1404			return(ENOBUFS);
1405		}
1406	}
1407
1408	/*
1409	 * Map the packet for DMA.
1410	 */
1411	tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1412	error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1413	    segs, &nsegs, BUS_DMA_NOWAIT);
1414	map = tx_buffer->map;
1415	if (error != 0) {
1416		adapter->no_tx_dma_setup++;
1417		return (error);
1418	}
1419	KASSERT(nsegs != 0, ("em_encap: empty packet"));
1420
1421	if (nsegs > adapter->num_tx_desc_avail) {
1422		adapter->no_tx_desc_avail2++;
1423		error = ENOBUFS;
1424		goto encap_fail;
1425	}
1426
1427	if (ifp->if_hwassist > 0) {
1428		em_transmit_checksum_setup(adapter,  m_head, &txd_upper,
1429		    &txd_lower);
1430	} else
1431		txd_upper = txd_lower = 0;
1432
1433	/* Find out if we are in vlan mode. */
1434	mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1435
1436	/*
1437	 * When operating in promiscuous mode, hardware encapsulation for
1438	 * packets is disabled.  This means we have to add the vlan
1439	 * encapsulation in the driver, since it will have come down from the
1440	 * VLAN layer with a tag instead of a VLAN header.
1441	 */
1442	if (mtag != NULL && adapter->em_insert_vlan_header) {
1443		struct ether_vlan_header *evl;
1444		struct ether_header eh;
1445
1446		m_head = m_pullup(m_head, sizeof(eh));
1447		if (m_head == NULL) {
1448			*m_headp = NULL;
1449			error = ENOBUFS;
1450			goto encap_fail;
1451		}
1452		eh = *mtod(m_head, struct ether_header *);
1453		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1454		if (m_head == NULL) {
1455			*m_headp = NULL;
1456			error = ENOBUFS;
1457			goto encap_fail;
1458		}
1459		m_head = m_pullup(m_head, sizeof(*evl));
1460		if (m_head == NULL) {
1461			*m_headp = NULL;
1462			error = ENOBUFS;
1463			goto encap_fail;
1464		}
1465		evl = mtod(m_head, struct ether_vlan_header *);
1466		bcopy(&eh, evl, sizeof(*evl));
1467		evl->evl_proto = evl->evl_encap_proto;
1468		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1469		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1470		m_tag_delete(m_head, mtag);
1471		mtag = NULL;
1472		*m_headp = m_head;
1473	}
1474
1475	i = adapter->next_avail_tx_desc;
1476	if (adapter->pcix_82544) {
1477		txd_saved = i;
1478		txd_used = 0;
1479	}
1480	for (j = 0; j < nsegs; j++) {
1481		/* If adapter is 82544 and on PCIX bus. */
1482		if(adapter->pcix_82544) {
1483			DESC_ARRAY	desc_array;
1484			uint32_t	array_elements, counter;
1485
1486			/*
1487			 * Check the Address and Length combination and
1488			 * split the data accordingly
1489			 */
1490			array_elements = em_fill_descriptors(segs[j].ds_addr,
1491			    segs[j].ds_len, &desc_array);
1492			for (counter = 0; counter < array_elements; counter++) {
1493				if (txd_used == adapter->num_tx_desc_avail) {
1494					adapter->next_avail_tx_desc = txd_saved;
1495					adapter->no_tx_desc_avail2++;
1496					error = ENOBUFS;
1497					goto encap_fail;
1498				}
1499				tx_buffer = &adapter->tx_buffer_area[i];
1500				current_tx_desc = &adapter->tx_desc_base[i];
1501				current_tx_desc->buffer_addr = htole64(
1502					desc_array.descriptor[counter].address);
1503				current_tx_desc->lower.data = htole32(
1504					(adapter->txd_cmd | txd_lower |
1505					(uint16_t)desc_array.descriptor[counter].length));
1506				current_tx_desc->upper.data = htole32((txd_upper));
1507				if (++i == adapter->num_tx_desc)
1508					i = 0;
1509
1510				tx_buffer->m_head = NULL;
1511				txd_used++;
1512			}
1513		} else {
1514			tx_buffer = &adapter->tx_buffer_area[i];
1515			current_tx_desc = &adapter->tx_desc_base[i];
1516
1517			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1518			current_tx_desc->lower.data = htole32(
1519				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1520			current_tx_desc->upper.data = htole32(txd_upper);
1521
1522			if (++i == adapter->num_tx_desc)
1523				i = 0;
1524
1525			tx_buffer->m_head = NULL;
1526		}
1527	}
1528
1529	adapter->next_avail_tx_desc = i;
1530	if (adapter->pcix_82544)
1531		adapter->num_tx_desc_avail -= txd_used;
1532	else
1533		adapter->num_tx_desc_avail -= nsegs;
1534
1535	if (mtag != NULL) {
1536		/* Set the vlan id. */
1537		current_tx_desc->upper.fields.special =
1538		    htole16(VLAN_TAG_VALUE(mtag));
1539
1540		/* Tell hardware to add tag. */
1541		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1542	}
1543
1544	tx_buffer->m_head = m_head;
1545	bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1546
1547	/*
1548	 * Last Descriptor of Packet needs End Of Packet (EOP).
1549	 */
1550	current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1551
1552	/*
1553	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1554	 * that this frame is available to transmit.
1555	 */
1556	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1557            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1558	if (adapter->hw.mac_type == em_82547 &&
1559	    adapter->link_duplex == HALF_DUPLEX) {
1560		em_82547_move_tail_locked(adapter);
1561	} else {
1562		E1000_WRITE_REG(&adapter->hw, TDT, i);
1563		if (adapter->hw.mac_type == em_82547) {
1564			em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1565		}
1566	}
1567
1568	return(0);
1569
1570encap_fail:
1571	bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1572	return (error);
1573}
1574
1575/*********************************************************************
1576 *
1577 * 82547 workaround to avoid controller hang in half-duplex environment.
1578 * The workaround is to avoid queuing a large packet that would span
1579 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1580 * in this case. We do that only when FIFO is quiescent.
1581 *
1582 **********************************************************************/
1583static void
1584em_82547_move_tail_locked(struct adapter *adapter)
1585{
1586	uint16_t hw_tdt;
1587	uint16_t sw_tdt;
1588	struct em_tx_desc *tx_desc;
1589	uint16_t length = 0;
1590	boolean_t eop = 0;
1591
1592	EM_LOCK_ASSERT(adapter);
1593
1594	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1595	sw_tdt = adapter->next_avail_tx_desc;
1596
1597	while (hw_tdt != sw_tdt) {
1598		tx_desc = &adapter->tx_desc_base[hw_tdt];
1599		length += tx_desc->lower.flags.length;
1600		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1601		if(++hw_tdt == adapter->num_tx_desc)
1602			hw_tdt = 0;
1603
1604		if(eop) {
1605			if (em_82547_fifo_workaround(adapter, length)) {
1606				adapter->tx_fifo_wrk_cnt++;
1607				callout_reset(&adapter->tx_fifo_timer, 1,
1608					em_82547_move_tail, adapter);
1609				break;
1610			}
1611			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1612			em_82547_update_fifo_head(adapter, length);
1613			length = 0;
1614		}
1615	}
1616	return;
1617}
1618
1619static void
1620em_82547_move_tail(void *arg)
1621{
1622	struct adapter *adapter = arg;
1623
1624	EM_LOCK(adapter);
1625	em_82547_move_tail_locked(adapter);
1626	EM_UNLOCK(adapter);
1627}
1628
1629static int
1630em_82547_fifo_workaround(struct adapter *adapter, int len)
1631{
1632	int fifo_space, fifo_pkt_len;
1633
1634	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1635
1636	if (adapter->link_duplex == HALF_DUPLEX) {
1637		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1638
1639		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1640			if (em_82547_tx_fifo_reset(adapter))
1641				return(0);
1642			else
1643				return(1);
1644		}
1645	}
1646
1647	return(0);
1648}
1649
1650static void
1651em_82547_update_fifo_head(struct adapter *adapter, int len)
1652{
1653	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1654
1655	/* tx_fifo_head is always 16 byte aligned */
1656	adapter->tx_fifo_head += fifo_pkt_len;
1657	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1658		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1659	}
1660
1661	return;
1662}
1663
1664
1665static int
1666em_82547_tx_fifo_reset(struct adapter *adapter)
1667{
1668	uint32_t tctl;
1669
1670	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1671	      E1000_READ_REG(&adapter->hw, TDH)) &&
1672	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1673	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1674	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1675	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1676	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1677
1678		/* Disable TX unit */
1679		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1680		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1681
1682		/* Reset FIFO pointers */
1683		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1684		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1685		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1686		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1687
1688		/* Re-enable TX unit */
1689		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1690		E1000_WRITE_FLUSH(&adapter->hw);
1691
1692		adapter->tx_fifo_head = 0;
1693		adapter->tx_fifo_reset_cnt++;
1694
1695		return(TRUE);
1696	}
1697	else {
1698		return(FALSE);
1699	}
1700}
1701
1702static void
1703em_set_promisc(struct adapter * adapter)
1704{
1705	struct ifnet	*ifp = adapter->ifp;
1706	uint32_t	reg_rctl;
1707
1708	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1709
1710	if (ifp->if_flags & IFF_PROMISC) {
1711		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1712		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1713		/* Disable VLAN stripping in promiscous mode
1714		 * This enables bridging of vlan tagged frames to occur
1715		 * and also allows vlan tags to be seen in tcpdump
1716		 */
1717		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1718			em_disable_vlans(adapter);
1719		adapter->em_insert_vlan_header = 1;
1720	} else if (ifp->if_flags & IFF_ALLMULTI) {
1721		reg_rctl |= E1000_RCTL_MPE;
1722		reg_rctl &= ~E1000_RCTL_UPE;
1723		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1724		adapter->em_insert_vlan_header = 0;
1725	} else
1726		adapter->em_insert_vlan_header = 0;
1727}
1728
1729static void
1730em_disable_promisc(struct adapter * adapter)
1731{
1732	struct ifnet	*ifp = adapter->ifp;
1733	uint32_t	reg_rctl;
1734
1735	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1736
1737	reg_rctl &=  (~E1000_RCTL_UPE);
1738	reg_rctl &=  (~E1000_RCTL_MPE);
1739	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1740
1741	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1742		em_enable_vlans(adapter);
1743	adapter->em_insert_vlan_header = 0;
1744}
1745
1746
1747/*********************************************************************
1748 *  Multicast Update
1749 *
1750 *  This routine is called whenever multicast address list is updated.
1751 *
1752 **********************************************************************/
1753
1754static void
1755em_set_multi(struct adapter * adapter)
1756{
1757        struct ifnet	*ifp = adapter->ifp;
1758        struct ifmultiaddr *ifma;
1759        uint32_t reg_rctl = 0;
1760        uint8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1761        int mcnt = 0;
1762
1763	IOCTL_DEBUGOUT("em_set_multi: begin");
1764
1765	if (adapter->hw.mac_type == em_82542_rev2_0) {
1766		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1767		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1768                        em_pci_clear_mwi(&adapter->hw);
1769		reg_rctl |= E1000_RCTL_RST;
1770		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1771		msec_delay(5);
1772	}
1773
1774	IF_ADDR_LOCK(ifp);
1775        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1776		if (ifma->ifma_addr->sa_family != AF_LINK)
1777			continue;
1778
1779		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1780			break;
1781
1782		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1783		    &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1784		mcnt++;
1785	}
1786	IF_ADDR_UNLOCK(ifp);
1787
1788	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1789		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1790		reg_rctl |= E1000_RCTL_MPE;
1791		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1792	} else
1793		em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1794
1795	if (adapter->hw.mac_type == em_82542_rev2_0) {
1796		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1797		reg_rctl &= ~E1000_RCTL_RST;
1798		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1799		msec_delay(5);
1800		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1801			em_pci_set_mwi(&adapter->hw);
1802	}
1803}
1804
1805
1806/*********************************************************************
1807 *  Timer routine
1808 *
1809 *  This routine checks for link status and updates statistics.
1810 *
1811 **********************************************************************/
1812
1813static void
1814em_local_timer(void *arg)
1815{
1816	struct adapter	*adapter = arg;
1817	struct ifnet	*ifp = adapter->ifp;
1818
1819	EM_LOCK(adapter);
1820
1821	em_check_for_link(&adapter->hw);
1822	em_print_link_status(adapter);
1823	em_update_stats_counters(adapter);
1824	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
1825		em_print_hw_stats(adapter);
1826	em_smartspeed(adapter);
1827
1828	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1829
1830	EM_UNLOCK(adapter);
1831}
1832
1833static void
1834em_print_link_status(struct adapter * adapter)
1835{
1836	struct ifnet *ifp = adapter->ifp;
1837
1838	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1839		if (adapter->link_active == 0) {
1840			em_get_speed_and_duplex(&adapter->hw,
1841			    &adapter->link_speed,
1842			    &adapter->link_duplex);
1843			if (bootverbose)
1844				printf("em%d: Link is up %d Mbps %s\n",
1845				    adapter->unit,
1846				    adapter->link_speed,
1847				    ((adapter->link_duplex == FULL_DUPLEX) ?
1848				    "Full Duplex" : "Half Duplex"));
1849			adapter->link_active = 1;
1850			adapter->smartspeed = 0;
1851			if_link_state_change(ifp, LINK_STATE_UP);
1852		}
1853	} else {
1854		if (adapter->link_active == 1) {
1855			adapter->link_speed = 0;
1856			adapter->link_duplex = 0;
1857			if (bootverbose)
1858				printf("em%d: Link is Down\n", adapter->unit);
1859			adapter->link_active = 0;
1860			if_link_state_change(ifp, LINK_STATE_DOWN);
1861		}
1862	}
1863}
1864
1865/*********************************************************************
1866 *
1867 *  This routine disables all traffic on the adapter by issuing a
1868 *  global reset on the MAC and deallocates TX/RX buffers.
1869 *
1870 **********************************************************************/
1871
1872static void
1873em_stop(void *arg)
1874{
1875	struct adapter	*adapter = arg;
1876	struct ifnet	*ifp = adapter->ifp;
1877
1878	EM_LOCK_ASSERT(adapter);
1879
1880	INIT_DEBUGOUT("em_stop: begin");
1881
1882	em_disable_intr(adapter);
1883	em_reset_hw(&adapter->hw);
1884	callout_stop(&adapter->timer);
1885	callout_stop(&adapter->tx_fifo_timer);
1886	em_free_transmit_structures(adapter);
1887	em_free_receive_structures(adapter);
1888
1889	/* Tell the stack that the interface is no longer active */
1890	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1891}
1892
1893
1894/*********************************************************************
1895 *
1896 *  Determine hardware revision.
1897 *
1898 **********************************************************************/
1899static void
1900em_identify_hardware(struct adapter * adapter)
1901{
1902	device_t dev = adapter->dev;
1903
1904	/* Make sure our PCI config space has the necessary stuff set */
1905	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1906	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1907	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1908		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1909		       adapter->unit);
1910		adapter->hw.pci_cmd_word |=
1911		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1912		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1913	}
1914
1915	/* Save off the information about this board */
1916	adapter->hw.vendor_id = pci_get_vendor(dev);
1917	adapter->hw.device_id = pci_get_device(dev);
1918	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1919	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1920	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1921
1922	/* Identify the MAC */
1923        if (em_set_mac_type(&adapter->hw))
1924                printf("em%d: Unknown MAC Type\n", adapter->unit);
1925
1926	if(adapter->hw.mac_type == em_82541 ||
1927	   adapter->hw.mac_type == em_82541_rev_2 ||
1928	   adapter->hw.mac_type == em_82547 ||
1929	   adapter->hw.mac_type == em_82547_rev_2)
1930		adapter->hw.phy_init_script = TRUE;
1931
1932        return;
1933}
1934
1935static int
1936em_allocate_pci_resources(struct adapter * adapter)
1937{
1938	int             val, rid;
1939	device_t        dev = adapter->dev;
1940
1941	rid = PCIR_BAR(0);
1942	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1943						     &rid, RF_ACTIVE);
1944	if (!(adapter->res_memory)) {
1945		printf("em%d: Unable to allocate bus resource: memory\n",
1946		       adapter->unit);
1947		return(ENXIO);
1948	}
1949	adapter->osdep.mem_bus_space_tag =
1950	rman_get_bustag(adapter->res_memory);
1951	adapter->osdep.mem_bus_space_handle =
1952	rman_get_bushandle(adapter->res_memory);
1953	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1954
1955
1956	if (adapter->hw.mac_type > em_82543) {
1957		/* Figure our where our IO BAR is ? */
1958		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1959			val = pci_read_config(dev, rid, 4);
1960			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1961				adapter->io_rid = rid;
1962				break;
1963			}
1964			rid += 4;
1965			/* check for 64bit BAR */
1966			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1967				rid += 4;
1968		}
1969		if (rid >= PCIR_CIS) {
1970			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1971			return (ENXIO);
1972		}
1973		adapter->res_ioport = bus_alloc_resource_any(dev,
1974							     SYS_RES_IOPORT,
1975							     &adapter->io_rid,
1976							     RF_ACTIVE);
1977		if (!(adapter->res_ioport)) {
1978			printf("em%d: Unable to allocate bus resource: ioport\n",
1979			       adapter->unit);
1980			return(ENXIO);
1981		}
1982		adapter->hw.io_base = 0;
1983		adapter->osdep.io_bus_space_tag =
1984		    rman_get_bustag(adapter->res_ioport);
1985		adapter->osdep.io_bus_space_handle =
1986		    rman_get_bushandle(adapter->res_ioport);
1987	}
1988
1989	rid = 0x0;
1990	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1991						        RF_SHAREABLE |
1992							RF_ACTIVE);
1993	if (!(adapter->res_interrupt)) {
1994		printf("em%d: Unable to allocate bus resource: interrupt\n",
1995		       adapter->unit);
1996		return(ENXIO);
1997	}
1998
1999	adapter->hw.back = &adapter->osdep;
2000
2001	return(0);
2002}
2003
2004int
2005em_allocate_intr(struct adapter *adapter)
2006{
2007	device_t        dev = adapter->dev;
2008
2009	/* Manually turn off all interrupts */
2010	E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
2011
2012	/*
2013	 * Try allocating a fast interrupt and the associated deferred
2014	 * processing contexts.  If that doesn't work, try just using an
2015	 * ithread.
2016	 */
2017#ifndef NO_EM_FASTINTR
2018	/* Init the deferred processing contexts. */
2019	TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2020	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2021	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2022		taskqueue_thread_enqueue, &adapter->tq);
2023	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2024	    device_get_nameunit(adapter->dev));
2025	if (bus_setup_intr(dev, adapter->res_interrupt,
2026			   INTR_TYPE_NET | INTR_FAST, em_intr_fast, adapter,
2027			   &adapter->int_handler_tag) != 0) {
2028		taskqueue_free(adapter->tq);
2029		adapter->tq = NULL;
2030	}
2031#endif
2032	if (adapter->int_handler_tag == NULL) {
2033		if (bus_setup_intr(dev, adapter->res_interrupt,
2034				   INTR_TYPE_NET | INTR_MPSAFE,
2035				   em_intr, adapter,
2036				   &adapter->int_handler_tag)) {
2037			printf("em%d: Error registering interrupt handler!\n",
2038			       adapter->unit);
2039			return(ENXIO);
2040		}
2041	}
2042
2043	em_enable_intr(adapter);
2044	return (0);
2045}
2046
2047static void
2048em_free_intr(struct adapter *adapter)
2049{
2050	device_t dev = adapter->dev;
2051
2052	if (adapter->res_interrupt != NULL) {
2053		bus_teardown_intr(dev, adapter->res_interrupt,
2054				  adapter->int_handler_tag);
2055		adapter->int_handler_tag = NULL;
2056	}
2057	if (adapter->tq != NULL) {
2058		taskqueue_drain(adapter->tq, &adapter->rxtx_task);
2059		taskqueue_drain(taskqueue_fast, &adapter->link_task);
2060		taskqueue_free(adapter->tq);
2061		adapter->tq = NULL;
2062	}
2063}
2064
2065static void
2066em_free_pci_resources(struct adapter * adapter)
2067{
2068	device_t dev = adapter->dev;
2069
2070	if (adapter->res_interrupt != NULL) {
2071		bus_release_resource(dev, SYS_RES_IRQ, 0,
2072				     adapter->res_interrupt);
2073	}
2074	if (adapter->res_memory != NULL) {
2075		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
2076				     adapter->res_memory);
2077	}
2078
2079	if (adapter->res_ioport != NULL) {
2080		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
2081				     adapter->res_ioport);
2082	}
2083	return;
2084}
2085
2086/*********************************************************************
2087 *
2088 *  Initialize the hardware to a configuration as specified by the
2089 *  adapter structure. The controller is reset, the EEPROM is
2090 *  verified, the MAC address is set, then the shared initialization
2091 *  routines are called.
2092 *
2093 **********************************************************************/
2094static int
2095em_hardware_init(struct adapter * adapter)
2096{
2097	uint16_t rx_buffer_size;
2098
2099        INIT_DEBUGOUT("em_hardware_init: begin");
2100	/* Issue a global reset */
2101	em_reset_hw(&adapter->hw);
2102
2103	/* When hardware is reset, fifo_head is also reset */
2104	adapter->tx_fifo_head = 0;
2105
2106	/* Make sure we have a good EEPROM before we read from it */
2107	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2108		printf("em%d: The EEPROM Checksum Is Not Valid\n",
2109		       adapter->unit);
2110		return(EIO);
2111	}
2112
2113	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
2114		printf("em%d: EEPROM read error while reading part number\n",
2115		       adapter->unit);
2116		return(EIO);
2117	}
2118
2119	/*
2120	 * These parameters control the automatic generation (Tx) and
2121	 * response (Rx) to Ethernet PAUSE frames.
2122	 * - High water mark should allow for at least two frames to be
2123	 *   received after sending an XOFF.
2124	 * - Low water mark works best when it is very near the high water mark.
2125	 *   This allows the receiver to restart by sending XON when it has drained
2126	 *   a bit.  Here we use an arbitary value of 1500 which will restart after
2127	 *   one full frame is pulled from the buffer.  There could be several smaller
2128	 *   frames in the buffer and if so they will not trigger the XON until their
2129	 *   total number reduces the buffer by 1500.
2130	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2131	 */
2132	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
2133
2134	adapter->hw.fc_high_water = rx_buffer_size -
2135	    roundup2(adapter->hw.max_frame_size, 1024);
2136	adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
2137	adapter->hw.fc_pause_time = 0x1000;
2138	adapter->hw.fc_send_xon = TRUE;
2139	adapter->hw.fc = em_fc_full;
2140
2141	if (em_init_hw(&adapter->hw) < 0) {
2142		printf("em%d: Hardware Initialization Failed",
2143		       adapter->unit);
2144		return(EIO);
2145	}
2146
2147	em_check_for_link(&adapter->hw);
2148	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
2149		adapter->link_active = 1;
2150	else
2151		adapter->link_active = 0;
2152
2153	if (adapter->link_active) {
2154		em_get_speed_and_duplex(&adapter->hw,
2155					&adapter->link_speed,
2156					&adapter->link_duplex);
2157	} else {
2158		adapter->link_speed = 0;
2159		adapter->link_duplex = 0;
2160	}
2161
2162	return(0);
2163}
2164
2165/*********************************************************************
2166 *
2167 *  Setup networking device structure and register an interface.
2168 *
2169 **********************************************************************/
2170static void
2171em_setup_interface(device_t dev, struct adapter * adapter)
2172{
2173	struct ifnet   *ifp;
2174	INIT_DEBUGOUT("em_setup_interface: begin");
2175
2176	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2177	if (ifp == NULL)
2178		panic("%s: can not if_alloc()", device_get_nameunit(dev));
2179	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2180	ifp->if_mtu = ETHERMTU;
2181	ifp->if_baudrate = 1000000000;
2182	ifp->if_init =  em_init;
2183	ifp->if_softc = adapter;
2184	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2185	ifp->if_ioctl = em_ioctl;
2186	ifp->if_start = em_start;
2187	ifp->if_watchdog = em_watchdog;
2188	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2189	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2190	IFQ_SET_READY(&ifp->if_snd);
2191
2192        ether_ifattach(ifp, adapter->hw.mac_addr);
2193
2194	ifp->if_capabilities = ifp->if_capenable = 0;
2195
2196	if (adapter->hw.mac_type >= em_82543) {
2197		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2198		ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2199	}
2200
2201	/*
2202	 * Tell the upper layer(s) we support long frames.
2203	 */
2204	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2205	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2206	ifp->if_capenable |= IFCAP_VLAN_MTU;
2207
2208#ifdef DEVICE_POLLING
2209	ifp->if_capabilities |= IFCAP_POLLING;
2210#endif
2211
2212	/*
2213	 * Specify the media types supported by this adapter and register
2214	 * callbacks to update media and link information
2215	 */
2216	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2217		     em_media_status);
2218	if (adapter->hw.media_type == em_media_type_fiber) {
2219		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2220			    0, NULL);
2221		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
2222			    0, NULL);
2223	} else {
2224		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2225		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2226			    0, NULL);
2227		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2228			    0, NULL);
2229		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2230			    0, NULL);
2231		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2232			    0, NULL);
2233		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2234	}
2235	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2236	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2237
2238	return;
2239}
2240
2241
2242/*********************************************************************
2243 *
2244 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2245 *
2246 **********************************************************************/
2247static void
2248em_smartspeed(struct adapter *adapter)
2249{
2250        uint16_t phy_tmp;
2251
2252	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2253	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2254		return;
2255
2256        if(adapter->smartspeed == 0) {
2257                /* If Master/Slave config fault is asserted twice,
2258                 * we assume back-to-back */
2259                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2260                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2261                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2262                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2263                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2264					&phy_tmp);
2265                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2266                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2267                                em_write_phy_reg(&adapter->hw,
2268                                                    PHY_1000T_CTRL, phy_tmp);
2269                                adapter->smartspeed++;
2270                                if(adapter->hw.autoneg &&
2271                                   !em_phy_setup_autoneg(&adapter->hw) &&
2272				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2273                                                       &phy_tmp)) {
2274                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2275                                                    MII_CR_RESTART_AUTO_NEG);
2276                                        em_write_phy_reg(&adapter->hw,
2277							 PHY_CTRL, phy_tmp);
2278                                }
2279                        }
2280                }
2281                return;
2282        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2283                /* If still no link, perhaps using 2/3 pair cable */
2284                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2285                phy_tmp |= CR_1000T_MS_ENABLE;
2286                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2287                if(adapter->hw.autoneg &&
2288                   !em_phy_setup_autoneg(&adapter->hw) &&
2289                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2290                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2291                                    MII_CR_RESTART_AUTO_NEG);
2292                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2293                }
2294        }
2295        /* Restart process after EM_SMARTSPEED_MAX iterations */
2296        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2297                adapter->smartspeed = 0;
2298
2299	return;
2300}
2301
2302
2303/*
2304 * Manage DMA'able memory.
2305 */
2306static void
2307em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2308{
2309        if (error)
2310                return;
2311        *(bus_addr_t *) arg = segs[0].ds_addr;
2312}
2313
2314static int
2315em_dma_malloc(struct adapter *adapter, bus_size_t size,
2316        struct em_dma_alloc *dma, int mapflags)
2317{
2318        int r;
2319
2320	r = bus_dma_tag_create(NULL,                    /* parent */
2321                               E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2322                               BUS_SPACE_MAXADDR,       /* lowaddr */
2323                               BUS_SPACE_MAXADDR,       /* highaddr */
2324                               NULL, NULL,              /* filter, filterarg */
2325                               size,                    /* maxsize */
2326                               1,                       /* nsegments */
2327                               size,                    /* maxsegsize */
2328                               0,		        /* flags */
2329			       NULL,			/* lockfunc */
2330			       NULL,			/* lockarg */
2331                               &dma->dma_tag);
2332        if (r != 0) {
2333                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2334                        "error %u\n", adapter->unit, r);
2335                goto fail_0;
2336        }
2337
2338        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2339                             BUS_DMA_NOWAIT, &dma->dma_map);
2340        if (r != 0) {
2341                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2342                        "size %ju, error %d\n", adapter->unit,
2343			(uintmax_t)size, r);
2344                goto fail_2;
2345        }
2346
2347	dma->dma_paddr = 0;
2348        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2349                            size,
2350                            em_dmamap_cb,
2351                            &dma->dma_paddr,
2352                            mapflags | BUS_DMA_NOWAIT);
2353        if (r != 0 || dma->dma_paddr == 0) {
2354                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2355                        "error %u\n", adapter->unit, r);
2356                goto fail_3;
2357        }
2358
2359        return (0);
2360
2361fail_3:
2362        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2363fail_2:
2364        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2365        bus_dma_tag_destroy(dma->dma_tag);
2366fail_0:
2367        dma->dma_map = NULL;
2368        dma->dma_tag = NULL;
2369        return (r);
2370}
2371
2372static void
2373em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2374{
2375	if (dma->dma_tag == NULL)
2376		return;
2377	if (dma->dma_map != NULL) {
2378		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2379		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2380		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2381		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2382		dma->dma_map = NULL;
2383	}
2384        bus_dma_tag_destroy(dma->dma_tag);
2385	dma->dma_tag = NULL;
2386}
2387
2388
2389/*********************************************************************
2390 *
2391 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2392 *  the information needed to transmit a packet on the wire.
2393 *
2394 **********************************************************************/
2395static int
2396em_allocate_transmit_structures(struct adapter * adapter)
2397{
2398	if (!(adapter->tx_buffer_area =
2399	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2400					     adapter->num_tx_desc, M_DEVBUF,
2401					     M_NOWAIT))) {
2402		printf("em%d: Unable to allocate tx_buffer memory\n",
2403		       adapter->unit);
2404		return ENOMEM;
2405	}
2406
2407	bzero(adapter->tx_buffer_area,
2408	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2409
2410	return 0;
2411}
2412
2413/*********************************************************************
2414 *
2415 *  Allocate and initialize transmit structures.
2416 *
2417 **********************************************************************/
2418static int
2419em_setup_transmit_structures(struct adapter * adapter)
2420{
2421	struct em_buffer *tx_buffer;
2422	bus_size_t size;
2423	int error, i;
2424
2425        /*
2426         * Setup DMA descriptor areas.
2427         */
2428	size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2429	if ((error = bus_dma_tag_create(NULL,           /* parent */
2430                               1, 0,                    /* alignment, bounds */
2431                               BUS_SPACE_MAXADDR,       /* lowaddr */
2432                               BUS_SPACE_MAXADDR,       /* highaddr */
2433                               NULL, NULL,              /* filter, filterarg */
2434                               size,                    /* maxsize */
2435                               EM_MAX_SCATTER,          /* nsegments */
2436                               size,                    /* maxsegsize */
2437                               0,                       /* flags */
2438			       NULL,			/* lockfunc */
2439			       NULL,			/* lockarg */
2440                               &adapter->txtag)) != 0) {
2441		printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2442		goto fail;
2443        }
2444
2445        if ((error = em_allocate_transmit_structures(adapter)) != 0)
2446		goto fail;
2447
2448        bzero((void *) adapter->tx_desc_base,
2449              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2450	tx_buffer = adapter->tx_buffer_area;
2451	for (i = 0; i < adapter->num_tx_desc; i++) {
2452		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2453		if (error != 0) {
2454			printf("em%d: Unable to create TX DMA map\n",
2455			    adapter->unit);
2456			goto fail;
2457		}
2458		tx_buffer++;
2459	}
2460
2461        adapter->next_avail_tx_desc = 0;
2462        adapter->oldest_used_tx_desc = 0;
2463
2464        /* Set number of descriptors available */
2465        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2466
2467        /* Set checksum context */
2468        adapter->active_checksum_context = OFFLOAD_NONE;
2469	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2470	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2471
2472        return (0);
2473
2474fail:
2475	em_free_transmit_structures(adapter);
2476	return (error);
2477}
2478
2479/*********************************************************************
2480 *
2481 *  Enable transmit unit.
2482 *
2483 **********************************************************************/
2484static void
2485em_initialize_transmit_unit(struct adapter * adapter)
2486{
2487	u_int32_t       reg_tctl;
2488	u_int32_t       reg_tipg = 0;
2489	u_int64_t	bus_addr;
2490
2491         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2492	/* Setup the Base and Length of the Tx Descriptor Ring */
2493	bus_addr = adapter->txdma.dma_paddr;
2494	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2495	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2496	E1000_WRITE_REG(&adapter->hw, TDLEN,
2497			adapter->num_tx_desc *
2498			sizeof(struct em_tx_desc));
2499
2500	/* Setup the HW Tx Head and Tail descriptor pointers */
2501	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2502	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2503
2504
2505	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2506		     E1000_READ_REG(&adapter->hw, TDBAL),
2507		     E1000_READ_REG(&adapter->hw, TDLEN));
2508
2509	/* Set the default values for the Tx Inter Packet Gap timer */
2510	switch (adapter->hw.mac_type) {
2511	case em_82542_rev2_0:
2512        case em_82542_rev2_1:
2513                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2514                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2515                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2516                break;
2517        default:
2518                if (adapter->hw.media_type == em_media_type_fiber)
2519                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2520                else
2521                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2522                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2523                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2524        }
2525
2526	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2527	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2528	if(adapter->hw.mac_type >= em_82540)
2529		E1000_WRITE_REG(&adapter->hw, TADV,
2530		    adapter->tx_abs_int_delay.value);
2531
2532	/* Program the Transmit Control Register */
2533	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2534		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2535	if (adapter->hw.mac_type >= em_82571)
2536		reg_tctl |= E1000_TCTL_MULR;
2537	if (adapter->link_duplex == 1) {
2538		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2539	} else {
2540		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2541	}
2542	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2543
2544	/* Setup Transmit Descriptor Settings for this adapter */
2545	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2546
2547	if (adapter->tx_int_delay.value > 0)
2548		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2549
2550	return;
2551}
2552
2553/*********************************************************************
2554 *
2555 *  Free all transmit related data structures.
2556 *
2557 **********************************************************************/
2558static void
2559em_free_transmit_structures(struct adapter * adapter)
2560{
2561        struct em_buffer   *tx_buffer;
2562        int             i;
2563
2564        INIT_DEBUGOUT("free_transmit_structures: begin");
2565
2566        if (adapter->tx_buffer_area != NULL) {
2567                tx_buffer = adapter->tx_buffer_area;
2568                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2569                        if (tx_buffer->m_head != NULL) {
2570				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2571				    BUS_DMASYNC_POSTWRITE);
2572				bus_dmamap_unload(adapter->txtag,
2573				    tx_buffer->map);
2574                                m_freem(tx_buffer->m_head);
2575				tx_buffer->m_head = NULL;
2576                        } else if (tx_buffer->map != NULL)
2577				bus_dmamap_unload(adapter->txtag,
2578				    tx_buffer->map);
2579			if (tx_buffer->map != NULL) {
2580				bus_dmamap_destroy(adapter->txtag,
2581				    tx_buffer->map);
2582				tx_buffer->map = NULL;
2583			}
2584                }
2585        }
2586        if (adapter->tx_buffer_area != NULL) {
2587                free(adapter->tx_buffer_area, M_DEVBUF);
2588                adapter->tx_buffer_area = NULL;
2589        }
2590        if (adapter->txtag != NULL) {
2591                bus_dma_tag_destroy(adapter->txtag);
2592                adapter->txtag = NULL;
2593        }
2594        return;
2595}
2596
2597/*********************************************************************
2598 *
2599 *  The offload context needs to be set when we transfer the first
2600 *  packet of a particular protocol (TCP/UDP). We change the
2601 *  context only if the protocol type changes.
2602 *
2603 **********************************************************************/
2604static void
2605em_transmit_checksum_setup(struct adapter * adapter,
2606			   struct mbuf *mp,
2607			   u_int32_t *txd_upper,
2608			   u_int32_t *txd_lower)
2609{
2610	struct em_context_desc *TXD;
2611	struct em_buffer *tx_buffer;
2612	int curr_txd;
2613
2614	if (mp->m_pkthdr.csum_flags) {
2615
2616		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2617			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2618			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2619			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2620				return;
2621			else
2622				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2623
2624		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2625			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2626			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2627			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2628				return;
2629			else
2630				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2631		} else {
2632			*txd_upper = 0;
2633			*txd_lower = 0;
2634			return;
2635		}
2636	} else {
2637		*txd_upper = 0;
2638		*txd_lower = 0;
2639		return;
2640	}
2641
2642	/* If we reach this point, the checksum offload context
2643	 * needs to be reset.
2644	 */
2645	curr_txd = adapter->next_avail_tx_desc;
2646	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2647	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2648
2649	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2650	TXD->lower_setup.ip_fields.ipcso =
2651		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2652	TXD->lower_setup.ip_fields.ipcse =
2653		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2654
2655	TXD->upper_setup.tcp_fields.tucss =
2656		ETHER_HDR_LEN + sizeof(struct ip);
2657	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2658
2659	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2660		TXD->upper_setup.tcp_fields.tucso =
2661			ETHER_HDR_LEN + sizeof(struct ip) +
2662			offsetof(struct tcphdr, th_sum);
2663	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2664		TXD->upper_setup.tcp_fields.tucso =
2665			ETHER_HDR_LEN + sizeof(struct ip) +
2666			offsetof(struct udphdr, uh_sum);
2667	}
2668
2669	TXD->tcp_seg_setup.data = htole32(0);
2670	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2671
2672	tx_buffer->m_head = NULL;
2673
2674	if (++curr_txd == adapter->num_tx_desc)
2675		curr_txd = 0;
2676
2677	adapter->num_tx_desc_avail--;
2678	adapter->next_avail_tx_desc = curr_txd;
2679
2680	return;
2681}
2682
2683/**********************************************************************
2684 *
2685 *  Examine each tx_buffer in the used queue. If the hardware is done
2686 *  processing the packet then free associated resources. The
2687 *  tx_buffer is put back on the free queue.
2688 *
2689 **********************************************************************/
2690static void
2691em_clean_transmit_interrupts(struct adapter * adapter)
2692{
2693        int i, num_avail;
2694        struct em_buffer *tx_buffer;
2695        struct em_tx_desc   *tx_desc;
2696	struct ifnet   *ifp = adapter->ifp;
2697
2698	mtx_assert(&adapter->mtx, MA_OWNED);
2699
2700        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2701                return;
2702
2703        num_avail = adapter->num_tx_desc_avail;
2704        i = adapter->oldest_used_tx_desc;
2705
2706        tx_buffer = &adapter->tx_buffer_area[i];
2707        tx_desc = &adapter->tx_desc_base[i];
2708
2709        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2710            BUS_DMASYNC_POSTREAD);
2711        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2712
2713                tx_desc->upper.data = 0;
2714                num_avail++;
2715
2716                if (tx_buffer->m_head) {
2717			ifp->if_opackets++;
2718			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2719			    BUS_DMASYNC_POSTWRITE);
2720			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2721
2722                        m_freem(tx_buffer->m_head);
2723                        tx_buffer->m_head = NULL;
2724                }
2725
2726                if (++i == adapter->num_tx_desc)
2727                        i = 0;
2728
2729                tx_buffer = &adapter->tx_buffer_area[i];
2730                tx_desc = &adapter->tx_desc_base[i];
2731        }
2732        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2733            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2734
2735        adapter->oldest_used_tx_desc = i;
2736
2737        /*
2738         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2739         * that it is OK to send packets.
2740         * If there are no pending descriptors, clear the timeout. Otherwise,
2741         * if some descriptors have been freed, restart the timeout.
2742         */
2743        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2744                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2745                if (num_avail == adapter->num_tx_desc)
2746                        ifp->if_timer = 0;
2747                else if (num_avail == adapter->num_tx_desc_avail)
2748                        ifp->if_timer = EM_TX_TIMEOUT;
2749        }
2750        adapter->num_tx_desc_avail = num_avail;
2751        return;
2752}
2753
2754/*********************************************************************
2755 *
2756 *  Get a buffer from system mbuf buffer pool.
2757 *
2758 **********************************************************************/
2759static int
2760em_get_buf(int i, struct adapter *adapter, struct mbuf *mp)
2761{
2762	struct ifnet		*ifp = adapter->ifp;
2763	bus_dma_segment_t	segs[1];
2764	struct em_buffer	*rx_buffer;
2765	int			error, nsegs;
2766
2767	if (mp == NULL) {
2768		mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2769		if (mp == NULL) {
2770			adapter->mbuf_cluster_failed++;
2771			return(ENOBUFS);
2772		}
2773		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2774	} else {
2775		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2776		mp->m_data = mp->m_ext.ext_buf;
2777		mp->m_next = NULL;
2778	}
2779
2780	if (ifp->if_mtu <= ETHERMTU)
2781                m_adj(mp, ETHER_ALIGN);
2782
2783	rx_buffer = &adapter->rx_buffer_area[i];
2784
2785	/*
2786	 * Using memory from the mbuf cluster pool, invoke the
2787	 * bus_dma machinery to arrange the memory mapping.
2788	 */
2789	error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2790	    mp, segs, &nsegs, 0);
2791	if (error != 0) {
2792		m_free(mp);
2793		return (error);
2794	}
2795	/* If nsegs is wrong then the stack is corrupt. */
2796	KASSERT(nsegs == 1, ("Too many segments returned!"));
2797	rx_buffer->m_head = mp;
2798	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2799	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2800
2801	return(0);
2802}
2803
2804/*********************************************************************
2805 *
2806 *  Allocate memory for rx_buffer structures. Since we use one
2807 *  rx_buffer per received packet, the maximum number of rx_buffer's
2808 *  that we'll need is equal to the number of receive descriptors
2809 *  that we've allocated.
2810 *
2811 **********************************************************************/
2812static int
2813em_allocate_receive_structures(struct adapter * adapter)
2814{
2815        int             i, error;
2816        struct em_buffer *rx_buffer;
2817
2818        if (!(adapter->rx_buffer_area =
2819              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2820                                          adapter->num_rx_desc, M_DEVBUF,
2821                                          M_NOWAIT))) {
2822                printf("em%d: Unable to allocate rx_buffer memory\n",
2823                       adapter->unit);
2824                return(ENOMEM);
2825        }
2826
2827        bzero(adapter->rx_buffer_area,
2828              sizeof(struct em_buffer) * adapter->num_rx_desc);
2829
2830        error = bus_dma_tag_create(NULL,                /* parent */
2831                               1, 0,                    /* alignment, bounds */
2832                               BUS_SPACE_MAXADDR,       /* lowaddr */
2833                               BUS_SPACE_MAXADDR,       /* highaddr */
2834                               NULL, NULL,              /* filter, filterarg */
2835                               MCLBYTES,                /* maxsize */
2836                               1,                       /* nsegments */
2837                               MCLBYTES,                /* maxsegsize */
2838                               BUS_DMA_ALLOCNOW,        /* flags */
2839			       NULL,			/* lockfunc */
2840			       NULL,			/* lockarg */
2841                               &adapter->rxtag);
2842        if (error != 0) {
2843                printf("em%d: em_allocate_receive_structures: "
2844                        "bus_dma_tag_create failed; error %u\n",
2845                       adapter->unit, error);
2846                goto fail;
2847        }
2848
2849        rx_buffer = adapter->rx_buffer_area;
2850        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2851                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2852                                          &rx_buffer->map);
2853                if (error != 0) {
2854                        printf("em%d: em_allocate_receive_structures: "
2855                                "bus_dmamap_create failed; error %u\n",
2856                                adapter->unit, error);
2857                        goto fail;
2858                }
2859        }
2860
2861        for (i = 0; i < adapter->num_rx_desc; i++) {
2862                error = em_get_buf(i, adapter, NULL);
2863		if (error != 0)
2864			goto fail;
2865        }
2866        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2867            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2868
2869        return(0);
2870
2871fail:
2872	em_free_receive_structures(adapter);
2873        return (error);
2874}
2875
2876/*********************************************************************
2877 *
2878 *  Allocate and initialize receive structures.
2879 *
2880 **********************************************************************/
2881static int
2882em_setup_receive_structures(struct adapter * adapter)
2883{
2884	bzero((void *) adapter->rx_desc_base,
2885              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2886
2887	if (em_allocate_receive_structures(adapter))
2888		return ENOMEM;
2889
2890	/* Setup our descriptor pointers */
2891        adapter->next_rx_desc_to_check = 0;
2892	return(0);
2893}
2894
2895/*********************************************************************
2896 *
2897 *  Enable receive unit.
2898 *
2899 **********************************************************************/
2900static void
2901em_initialize_receive_unit(struct adapter * adapter)
2902{
2903	u_int32_t       reg_rctl;
2904	u_int32_t       reg_rxcsum;
2905	struct ifnet    *ifp;
2906	u_int64_t	bus_addr;
2907
2908        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2909	ifp = adapter->ifp;
2910
2911	/* Make sure receives are disabled while setting up the descriptor ring */
2912	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2913
2914	/* Set the Receive Delay Timer Register */
2915	E1000_WRITE_REG(&adapter->hw, RDTR,
2916			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2917
2918	if(adapter->hw.mac_type >= em_82540) {
2919		E1000_WRITE_REG(&adapter->hw, RADV,
2920		    adapter->rx_abs_int_delay.value);
2921
2922                /* Set the interrupt throttling rate.  Value is calculated
2923                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2924#define MAX_INTS_PER_SEC        8000
2925#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2926                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2927        }
2928
2929	/* Setup the Base and Length of the Rx Descriptor Ring */
2930	bus_addr = adapter->rxdma.dma_paddr;
2931	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2932	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2933	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2934			sizeof(struct em_rx_desc));
2935
2936	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2937	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2938	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2939
2940	/* Setup the Receive Control Register */
2941	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2942		   E1000_RCTL_RDMTS_HALF |
2943		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2944
2945	if (adapter->hw.tbi_compatibility_on == TRUE)
2946		reg_rctl |= E1000_RCTL_SBP;
2947
2948
2949	switch (adapter->rx_buffer_len) {
2950	default:
2951	case EM_RXBUFFER_2048:
2952		reg_rctl |= E1000_RCTL_SZ_2048;
2953		break;
2954	case EM_RXBUFFER_4096:
2955		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2956		break;
2957	case EM_RXBUFFER_8192:
2958		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2959		break;
2960	case EM_RXBUFFER_16384:
2961		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2962		break;
2963	}
2964
2965	if (ifp->if_mtu > ETHERMTU)
2966		reg_rctl |= E1000_RCTL_LPE;
2967
2968	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2969	if ((adapter->hw.mac_type >= em_82543) &&
2970	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2971		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2972		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2973		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2974	}
2975
2976	/* Enable Receives */
2977	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2978
2979	return;
2980}
2981
2982/*********************************************************************
2983 *
2984 *  Free receive related data structures.
2985 *
2986 **********************************************************************/
2987static void
2988em_free_receive_structures(struct adapter *adapter)
2989{
2990        struct em_buffer   *rx_buffer;
2991        int             i;
2992
2993        INIT_DEBUGOUT("free_receive_structures: begin");
2994
2995        if (adapter->rx_buffer_area != NULL) {
2996                rx_buffer = adapter->rx_buffer_area;
2997                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2998			if (rx_buffer->m_head != NULL) {
2999				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3000				    BUS_DMASYNC_POSTREAD);
3001				bus_dmamap_unload(adapter->rxtag,
3002				    rx_buffer->map);
3003				m_freem(rx_buffer->m_head);
3004				rx_buffer->m_head = NULL;
3005			} else if (rx_buffer->map != NULL)
3006				bus_dmamap_unload(adapter->rxtag,
3007				    rx_buffer->map);
3008                        if (rx_buffer->map != NULL) {
3009				bus_dmamap_destroy(adapter->rxtag,
3010				    rx_buffer->map);
3011				rx_buffer->map = NULL;
3012			}
3013                }
3014        }
3015        if (adapter->rx_buffer_area != NULL) {
3016                free(adapter->rx_buffer_area, M_DEVBUF);
3017                adapter->rx_buffer_area = NULL;
3018        }
3019        if (adapter->rxtag != NULL) {
3020                bus_dma_tag_destroy(adapter->rxtag);
3021                adapter->rxtag = NULL;
3022        }
3023        return;
3024}
3025
3026/*********************************************************************
3027 *
3028 *  This routine executes in interrupt context. It replenishes
3029 *  the mbufs in the descriptor and sends data which has been
3030 *  dma'ed into host memory to upper layer.
3031 *
3032 *  We loop at most count times if count is > 0, or until done if
3033 *  count < 0.
3034 *
3035 *********************************************************************/
3036static int
3037em_process_receive_interrupts(struct adapter * adapter, int count)
3038{
3039	struct ifnet	*ifp;
3040	struct mbuf	*mp;
3041	uint8_t		accept_frame = 0;
3042	uint8_t		eop = 0;
3043	uint16_t 	len, desc_len, prev_len_adj;
3044	int		i;
3045
3046	/* Pointer to the receive descriptor being examined. */
3047	struct em_rx_desc   *current_desc;
3048
3049	ifp = adapter->ifp;
3050	i = adapter->next_rx_desc_to_check;
3051	current_desc = &adapter->rx_desc_base[i];
3052	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3053	    BUS_DMASYNC_POSTREAD);
3054
3055	if (!((current_desc->status) & E1000_RXD_STAT_DD))
3056		return (0);
3057
3058	while ((current_desc->status & E1000_RXD_STAT_DD) &&
3059	    (count != 0) &&
3060	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3061		struct mbuf *m = NULL;
3062
3063		mp = adapter->rx_buffer_area[i].m_head;
3064		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3065		    BUS_DMASYNC_POSTREAD);
3066		bus_dmamap_unload(adapter->rxtag,
3067		    adapter->rx_buffer_area[i].map);
3068
3069		accept_frame = 1;
3070		prev_len_adj = 0;
3071		desc_len = le16toh(current_desc->length);
3072		if (current_desc->status & E1000_RXD_STAT_EOP) {
3073			count--;
3074			eop = 1;
3075			if (desc_len < ETHER_CRC_LEN) {
3076				len = 0;
3077				prev_len_adj = ETHER_CRC_LEN - desc_len;
3078			} else
3079				len = desc_len - ETHER_CRC_LEN;
3080		} else {
3081			eop = 0;
3082			len = desc_len;
3083		}
3084
3085		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3086			uint8_t		last_byte;
3087			uint32_t	pkt_len = desc_len;
3088
3089			if (adapter->fmp != NULL)
3090				pkt_len += adapter->fmp->m_pkthdr.len;
3091
3092			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3093			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
3094			    current_desc->errors,
3095			    pkt_len, last_byte)) {
3096				em_tbi_adjust_stats(&adapter->hw,
3097				    &adapter->stats, pkt_len,
3098				    adapter->hw.mac_addr);
3099				if (len > 0)
3100					len--;
3101			} else
3102				accept_frame = 0;
3103		}
3104
3105		if (accept_frame) {
3106			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
3107				adapter->dropped_pkts++;
3108				em_get_buf(i, adapter, mp);
3109				if (adapter->fmp != NULL)
3110					m_freem(adapter->fmp);
3111				adapter->fmp = NULL;
3112				adapter->lmp = NULL;
3113				break;
3114			}
3115
3116			/* Assign correct length to the current fragment */
3117			mp->m_len = len;
3118
3119			if (adapter->fmp == NULL) {
3120				mp->m_pkthdr.len = len;
3121				adapter->fmp = mp; /* Store the first mbuf */
3122				adapter->lmp = mp;
3123			} else {
3124				/* Chain mbuf's together */
3125				mp->m_flags &= ~M_PKTHDR;
3126				/*
3127				 * Adjust length of previous mbuf in chain if
3128				 * we received less than 4 bytes in the last
3129				 * descriptor.
3130				 */
3131				if (prev_len_adj > 0) {
3132					adapter->lmp->m_len -= prev_len_adj;
3133					adapter->fmp->m_pkthdr.len -=
3134					    prev_len_adj;
3135				}
3136				adapter->lmp->m_next = mp;
3137				adapter->lmp = adapter->lmp->m_next;
3138				adapter->fmp->m_pkthdr.len += len;
3139			}
3140
3141			if (eop) {
3142				adapter->fmp->m_pkthdr.rcvif = ifp;
3143				ifp->if_ipackets++;
3144				em_receive_checksum(adapter, current_desc,
3145				    adapter->fmp);
3146#ifndef __NO_STRICT_ALIGNMENT
3147				if (ifp->if_mtu > ETHERMTU &&
3148				    em_fixup_rx(adapter) != 0)
3149					goto skip;
3150#endif
3151				if (current_desc->status & E1000_RXD_STAT_VP)
3152					VLAN_INPUT_TAG(ifp, adapter->fmp,
3153					    (le16toh(current_desc->special) &
3154					    E1000_RXD_SPC_VLAN_MASK));
3155#ifndef __NO_STRICT_ALIGNMENT
3156skip:
3157#endif
3158				m = adapter->fmp;
3159				adapter->fmp = NULL;
3160				adapter->lmp = NULL;
3161			}
3162		} else {
3163			adapter->dropped_pkts++;
3164			em_get_buf(i, adapter, mp);
3165			if (adapter->fmp != NULL)
3166				m_freem(adapter->fmp);
3167			adapter->fmp = NULL;
3168			adapter->lmp = NULL;
3169		}
3170
3171		/* Zero out the receive descriptors status. */
3172		current_desc->status = 0;
3173		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3174		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3175
3176		/* Advance our pointers to the next descriptor. */
3177		if (++i == adapter->num_rx_desc)
3178			i = 0;
3179		if (m != NULL) {
3180			adapter->next_rx_desc_to_check = i;
3181			(*ifp->if_input)(ifp, m);
3182			i = adapter->next_rx_desc_to_check;
3183		}
3184		current_desc = &adapter->rx_desc_base[i];
3185	}
3186	adapter->next_rx_desc_to_check = i;
3187
3188	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3189	if (--i < 0)
3190		i = adapter->num_rx_desc - 1;
3191	E1000_WRITE_REG(&adapter->hw, RDT, i);
3192	if (!((current_desc->status) & E1000_RXD_STAT_DD))
3193		return (0);
3194
3195	return (1);
3196}
3197
3198#ifndef __NO_STRICT_ALIGNMENT
3199/*
3200 * When jumbo frames are enabled we should realign entire payload on
3201 * architecures with strict alignment. This is serious design mistake of 8254x
3202 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3203 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3204 * payload. On architecures without strict alignment restrictions 8254x still
3205 * performs unaligned memory access which would reduce the performance too.
3206 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3207 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3208 * existing mbuf chain.
3209 *
3210 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3211 * not used at all on architectures with strict alignment.
3212 */
3213static int
3214em_fixup_rx(struct adapter *adapter)
3215{
3216	struct mbuf *m, *n;
3217	int error;
3218
3219	error = 0;
3220	m = adapter->fmp;
3221	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3222		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3223		m->m_data += ETHER_HDR_LEN;
3224	} else {
3225		MGETHDR(n, M_DONTWAIT, MT_DATA);
3226		if (n != NULL) {
3227			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3228			m->m_data += ETHER_HDR_LEN;
3229			m->m_len -= ETHER_HDR_LEN;
3230			n->m_len = ETHER_HDR_LEN;
3231			M_MOVE_PKTHDR(n, m);
3232			n->m_next = m;
3233			adapter->fmp = n;
3234		} else {
3235			adapter->dropped_pkts++;
3236			m_freem(adapter->fmp);
3237			adapter->fmp = NULL;
3238			error = ENOMEM;
3239		}
3240	}
3241
3242	return (error);
3243}
3244#endif
3245
3246/*********************************************************************
3247 *
3248 *  Verify that the hardware indicated that the checksum is valid.
3249 *  Inform the stack about the status of checksum so that stack
3250 *  doesn't spend time verifying the checksum.
3251 *
3252 *********************************************************************/
3253static void
3254em_receive_checksum(struct adapter *adapter,
3255		    struct em_rx_desc *rx_desc,
3256		    struct mbuf *mp)
3257{
3258	/* 82543 or newer only */
3259	if ((adapter->hw.mac_type < em_82543) ||
3260	    /* Ignore Checksum bit is set */
3261	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3262		mp->m_pkthdr.csum_flags = 0;
3263		return;
3264	}
3265
3266	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3267		/* Did it pass? */
3268		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3269			/* IP Checksum Good */
3270			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3271			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3272
3273		} else {
3274			mp->m_pkthdr.csum_flags = 0;
3275		}
3276	}
3277
3278	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3279		/* Did it pass? */
3280		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3281			mp->m_pkthdr.csum_flags |=
3282			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3283			mp->m_pkthdr.csum_data = htons(0xffff);
3284		}
3285	}
3286
3287	return;
3288}
3289
3290
3291static void
3292em_enable_vlans(struct adapter *adapter)
3293{
3294	uint32_t ctrl;
3295
3296	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3297
3298	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3299	ctrl |= E1000_CTRL_VME;
3300	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3301
3302	return;
3303}
3304
3305static void
3306em_disable_vlans(struct adapter *adapter)
3307{
3308	uint32_t ctrl;
3309
3310	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3311	ctrl &= ~E1000_CTRL_VME;
3312	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3313
3314	return;
3315}
3316
3317static void
3318em_enable_intr(struct adapter * adapter)
3319{
3320	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3321	return;
3322}
3323
3324static void
3325em_disable_intr(struct adapter *adapter)
3326{
3327	/*
3328	 * The first version of 82542 had an errata where when link was forced
3329	 * it would stay up even up even if the cable was disconnected.
3330	 * Sequence errors were used to detect the disconnect and then the
3331	 * driver would unforce the link. This code in the in the ISR. For this
3332	 * to work correctly the Sequence error interrupt had to be enabled
3333	 * all the time.
3334	 */
3335
3336	if (adapter->hw.mac_type == em_82542_rev2_0)
3337	    E1000_WRITE_REG(&adapter->hw, IMC,
3338	        (0xffffffff & ~E1000_IMC_RXSEQ));
3339	else
3340	    E1000_WRITE_REG(&adapter->hw, IMC,
3341	        0xffffffff);
3342	return;
3343}
3344
3345static int
3346em_is_valid_ether_addr(u_int8_t *addr)
3347{
3348        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3349
3350        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3351                return (FALSE);
3352        }
3353
3354        return(TRUE);
3355}
3356
3357void
3358em_write_pci_cfg(struct em_hw *hw,
3359		      uint32_t reg,
3360		      uint16_t *value)
3361{
3362	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3363			 *value, 2);
3364}
3365
3366void
3367em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3368		     uint16_t *value)
3369{
3370	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3371				 reg, 2);
3372	return;
3373}
3374
3375void
3376em_pci_set_mwi(struct em_hw *hw)
3377{
3378        pci_write_config(((struct em_osdep *)hw->back)->dev,
3379                         PCIR_COMMAND,
3380                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3381        return;
3382}
3383
3384void
3385em_pci_clear_mwi(struct em_hw *hw)
3386{
3387        pci_write_config(((struct em_osdep *)hw->back)->dev,
3388                         PCIR_COMMAND,
3389                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3390        return;
3391}
3392
3393/*********************************************************************
3394* 82544 Coexistence issue workaround.
3395*    There are 2 issues.
3396*       1. Transmit Hang issue.
3397*    To detect this issue, following equation can be used...
3398*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3399*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3400*
3401*       2. DAC issue.
3402*    To detect this issue, following equation can be used...
3403*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3404*          If SUM[3:0] is in between 9 to c, we will have this issue.
3405*
3406*
3407*    WORKAROUND:
3408*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3409*
3410*** *********************************************************************/
3411static u_int32_t
3412em_fill_descriptors (bus_addr_t address,
3413                              u_int32_t length,
3414                              PDESC_ARRAY desc_array)
3415{
3416        /* Since issue is sensitive to length and address.*/
3417        /* Let us first check the address...*/
3418        u_int32_t safe_terminator;
3419        if (length <= 4) {
3420                desc_array->descriptor[0].address = address;
3421                desc_array->descriptor[0].length = length;
3422                desc_array->elements = 1;
3423                return desc_array->elements;
3424        }
3425        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3426        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3427        if (safe_terminator == 0   ||
3428        (safe_terminator > 4   &&
3429        safe_terminator < 9)   ||
3430        (safe_terminator > 0xC &&
3431        safe_terminator <= 0xF)) {
3432                desc_array->descriptor[0].address = address;
3433                desc_array->descriptor[0].length = length;
3434                desc_array->elements = 1;
3435                return desc_array->elements;
3436        }
3437
3438        desc_array->descriptor[0].address = address;
3439        desc_array->descriptor[0].length = length - 4;
3440        desc_array->descriptor[1].address = address + (length - 4);
3441        desc_array->descriptor[1].length = 4;
3442        desc_array->elements = 2;
3443        return desc_array->elements;
3444}
3445
3446/**********************************************************************
3447 *
3448 *  Update the board statistics counters.
3449 *
3450 **********************************************************************/
3451static void
3452em_update_stats_counters(struct adapter *adapter)
3453{
3454	struct ifnet   *ifp;
3455
3456	if(adapter->hw.media_type == em_media_type_copper ||
3457	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3458		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3459		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3460	}
3461	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3462	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3463	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3464	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3465
3466	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3467	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3468	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3469	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3470	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3471	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3472	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3473	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3474	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3475	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3476	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3477	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3478	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3479	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3480	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3481	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3482	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3483	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3484	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3485	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3486
3487	/* For the 64-bit byte counters the low dword must be read first. */
3488	/* Both registers clear on the read of the high dword */
3489
3490	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3491	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3492	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3493	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3494
3495	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3496	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3497	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3498	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3499	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3500
3501	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3502	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3503	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3504	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3505
3506	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3507	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3508	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3509	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3510	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3511	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3512	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3513	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3514	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3515	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3516
3517	if (adapter->hw.mac_type >= em_82543) {
3518		adapter->stats.algnerrc +=
3519		E1000_READ_REG(&adapter->hw, ALGNERRC);
3520		adapter->stats.rxerrc +=
3521		E1000_READ_REG(&adapter->hw, RXERRC);
3522		adapter->stats.tncrs +=
3523		E1000_READ_REG(&adapter->hw, TNCRS);
3524		adapter->stats.cexterr +=
3525		E1000_READ_REG(&adapter->hw, CEXTERR);
3526		adapter->stats.tsctc +=
3527		E1000_READ_REG(&adapter->hw, TSCTC);
3528		adapter->stats.tsctfc +=
3529		E1000_READ_REG(&adapter->hw, TSCTFC);
3530	}
3531	ifp = adapter->ifp;
3532
3533	ifp->if_collisions = adapter->stats.colc;
3534
3535	/* Rx Errors */
3536	ifp->if_ierrors =
3537	adapter->dropped_pkts +
3538	adapter->stats.rxerrc +
3539	adapter->stats.crcerrs +
3540	adapter->stats.algnerrc +
3541	adapter->stats.rlec +
3542	adapter->stats.mpc + adapter->stats.cexterr;
3543
3544	/* Tx Errors */
3545	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3546	    adapter->watchdog_events;
3547
3548}
3549
3550
3551/**********************************************************************
3552 *
3553 *  This routine is called only when em_display_debug_stats is enabled.
3554 *  This routine provides a way to take a look at important statistics
3555 *  maintained by the driver and hardware.
3556 *
3557 **********************************************************************/
3558static void
3559em_print_debug_info(struct adapter *adapter)
3560{
3561	int unit = adapter->unit;
3562	uint8_t *hw_addr = adapter->hw.hw_addr;
3563
3564	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3565	printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit,
3566	    E1000_READ_REG(&adapter->hw, CTRL),
3567	    E1000_READ_REG(&adapter->hw, RCTL));
3568	printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit,
3569	    ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3570	    (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3571	printf("em%d: Flow control watermarks high = %d low = %d\n", unit,
3572	    adapter->hw.fc_high_water,
3573	    adapter->hw.fc_low_water);
3574	printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3575	    E1000_READ_REG(&adapter->hw, TIDV),
3576	    E1000_READ_REG(&adapter->hw, TADV));
3577	printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3578	    E1000_READ_REG(&adapter->hw, RDTR),
3579	    E1000_READ_REG(&adapter->hw, RADV));
3580	printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3581	    unit, (long long)adapter->tx_fifo_wrk_cnt,
3582	    (long long)adapter->tx_fifo_reset_cnt);
3583	printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3584	    E1000_READ_REG(&adapter->hw, TDH),
3585	    E1000_READ_REG(&adapter->hw, TDT));
3586	printf("em%d: Num Tx descriptors avail = %d\n", unit,
3587	    adapter->num_tx_desc_avail);
3588	printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3589	    adapter->no_tx_desc_avail1);
3590	printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3591	    adapter->no_tx_desc_avail2);
3592	printf("em%d: Std mbuf failed = %ld\n", unit,
3593	    adapter->mbuf_alloc_failed);
3594	printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3595	    adapter->mbuf_cluster_failed);
3596	printf("em%d: Driver dropped packets = %ld\n", unit,
3597	    adapter->dropped_pkts);
3598
3599	return;
3600}
3601
3602static void
3603em_print_hw_stats(struct adapter *adapter)
3604{
3605        int unit = adapter->unit;
3606
3607        printf("em%d: Excessive collisions = %lld\n", unit,
3608               (long long)adapter->stats.ecol);
3609        printf("em%d: Symbol errors = %lld\n", unit,
3610               (long long)adapter->stats.symerrs);
3611        printf("em%d: Sequence errors = %lld\n", unit,
3612               (long long)adapter->stats.sec);
3613        printf("em%d: Defer count = %lld\n", unit,
3614               (long long)adapter->stats.dc);
3615
3616        printf("em%d: Missed Packets = %lld\n", unit,
3617               (long long)adapter->stats.mpc);
3618        printf("em%d: Receive No Buffers = %lld\n", unit,
3619               (long long)adapter->stats.rnbc);
3620        printf("em%d: Receive length errors = %lld\n", unit,
3621               (long long)adapter->stats.rlec);
3622        printf("em%d: Receive errors = %lld\n", unit,
3623               (long long)adapter->stats.rxerrc);
3624        printf("em%d: Crc errors = %lld\n", unit,
3625               (long long)adapter->stats.crcerrs);
3626        printf("em%d: Alignment errors = %lld\n", unit,
3627               (long long)adapter->stats.algnerrc);
3628        printf("em%d: Carrier extension errors = %lld\n", unit,
3629               (long long)adapter->stats.cexterr);
3630	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3631	printf("em%d: watchdog timeouts = %ld\n", unit,
3632		adapter->watchdog_events);
3633
3634        printf("em%d: XON Rcvd = %lld\n", unit,
3635               (long long)adapter->stats.xonrxc);
3636        printf("em%d: XON Xmtd = %lld\n", unit,
3637               (long long)adapter->stats.xontxc);
3638        printf("em%d: XOFF Rcvd = %lld\n", unit,
3639               (long long)adapter->stats.xoffrxc);
3640        printf("em%d: XOFF Xmtd = %lld\n", unit,
3641               (long long)adapter->stats.xofftxc);
3642
3643        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3644               (long long)adapter->stats.gprc);
3645        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3646               (long long)adapter->stats.gptc);
3647
3648        return;
3649}
3650
3651static int
3652em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3653{
3654        int error;
3655        int result;
3656        struct adapter *adapter;
3657
3658        result = -1;
3659        error = sysctl_handle_int(oidp, &result, 0, req);
3660
3661        if (error || !req->newptr)
3662                return (error);
3663
3664        if (result == 1) {
3665                adapter = (struct adapter *)arg1;
3666                em_print_debug_info(adapter);
3667        }
3668
3669        return error;
3670}
3671
3672
3673static int
3674em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3675{
3676        int error;
3677        int result;
3678        struct adapter *adapter;
3679
3680        result = -1;
3681        error = sysctl_handle_int(oidp, &result, 0, req);
3682
3683        if (error || !req->newptr)
3684                return (error);
3685
3686        if (result == 1) {
3687                adapter = (struct adapter *)arg1;
3688                em_print_hw_stats(adapter);
3689        }
3690
3691        return error;
3692}
3693
3694static int
3695em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3696{
3697	struct em_int_delay_info *info;
3698	struct adapter *adapter;
3699	u_int32_t regval;
3700	int error;
3701	int usecs;
3702	int ticks;
3703
3704	info = (struct em_int_delay_info *)arg1;
3705	usecs = info->value;
3706	error = sysctl_handle_int(oidp, &usecs, 0, req);
3707	if (error != 0 || req->newptr == NULL)
3708		return error;
3709	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3710		return EINVAL;
3711	info->value = usecs;
3712	ticks = E1000_USECS_TO_TICKS(usecs);
3713
3714	adapter = info->adapter;
3715
3716	EM_LOCK(adapter);
3717	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3718	regval = (regval & ~0xffff) | (ticks & 0xffff);
3719	/* Handle a few special cases. */
3720	switch (info->offset) {
3721	case E1000_RDTR:
3722	case E1000_82542_RDTR:
3723		regval |= E1000_RDT_FPDB;
3724		break;
3725	case E1000_TIDV:
3726	case E1000_82542_TIDV:
3727		if (ticks == 0) {
3728			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3729			/* Don't write 0 into the TIDV register. */
3730			regval++;
3731		} else
3732			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3733		break;
3734	}
3735	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3736	EM_UNLOCK(adapter);
3737	return 0;
3738}
3739
3740static void
3741em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3742    const char *description, struct em_int_delay_info *info,
3743    int offset, int value)
3744{
3745	info->adapter = adapter;
3746	info->offset = offset;
3747	info->value = value;
3748	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3749	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3750	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3751	    info, 0, em_sysctl_int_delay, "I", description);
3752}
3753
3754#ifndef NO_EM_FASTINTR
3755static void
3756em_add_int_process_limit(struct adapter *adapter, const char *name,
3757    const char *description, int *limit, int value)
3758{
3759	*limit = value;
3760	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3761	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3762	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3763}
3764#endif
3765