if_em.c revision 154204
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 154204 2006-01-11 00:30:25Z scottl $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "Version - 3.2.18";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
111	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
112	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
113
114	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
115	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
116	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
117
118        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
119        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
120        { 0x8086, E1000_DEV_ID_82573L,              PCI_ANY_ID, PCI_ANY_ID, 0},
121
122        /* required last entry */
123        { 0, 0, 0, 0, 0}
124};
125
126/*********************************************************************
127 *  Table of branding strings for all supported NICs.
128 *********************************************************************/
129
130static char *em_strings[] = {
131	"Intel(R) PRO/1000 Network Connection"
132};
133
134/*********************************************************************
135 *  Function prototypes
136 *********************************************************************/
137static int  em_probe(device_t);
138static int  em_attach(device_t);
139static int  em_detach(device_t);
140static int  em_shutdown(device_t);
141static int  em_suspend(device_t);
142static int  em_resume(device_t);
143static void em_intr(void *);
144#ifndef NO_EM_FASTINTR
145static void em_intr_fast(void *);
146#endif
147static void em_start(struct ifnet *);
148static void em_start_locked(struct ifnet *ifp);
149static int  em_ioctl(struct ifnet *, u_long, caddr_t);
150static void em_watchdog(struct ifnet *);
151static void em_init(void *);
152static void em_init_locked(struct adapter *);
153static void em_stop(void *);
154static void em_media_status(struct ifnet *, struct ifmediareq *);
155static int  em_media_change(struct ifnet *);
156static void em_identify_hardware(struct adapter *);
157static int  em_allocate_pci_resources(struct adapter *);
158static void em_free_pci_resources(struct adapter *);
159static void em_local_timer(void *);
160static int  em_hardware_init(struct adapter *);
161static void em_setup_interface(device_t, struct adapter *);
162static int  em_setup_transmit_structures(struct adapter *);
163static void em_initialize_transmit_unit(struct adapter *);
164static int  em_setup_receive_structures(struct adapter *);
165static void em_initialize_receive_unit(struct adapter *);
166static void em_enable_intr(struct adapter *);
167static void em_disable_intr(struct adapter *);
168static void em_free_transmit_structures(struct adapter *);
169static void em_free_receive_structures(struct adapter *);
170static void em_update_stats_counters(struct adapter *);
171static void em_clean_transmit_interrupts(struct adapter *);
172static int  em_allocate_receive_structures(struct adapter *);
173static int  em_allocate_transmit_structures(struct adapter *);
174static int em_process_receive_interrupts(struct adapter *, int);
175#ifndef __NO_STRICT_ALIGNMENT
176static int  em_fixup_rx(struct adapter *);
177#endif
178static void em_receive_checksum(struct adapter *,
179				struct em_rx_desc *,
180				struct mbuf *);
181static void em_transmit_checksum_setup(struct adapter *,
182				       struct mbuf *,
183				       u_int32_t *,
184				       u_int32_t *);
185static void em_set_promisc(struct adapter *);
186static void em_disable_promisc(struct adapter *);
187static void em_set_multi(struct adapter *);
188static void em_print_hw_stats(struct adapter *);
189static void em_print_link_status(struct adapter *);
190static int  em_get_buf(int i, struct adapter *,
191		       struct mbuf *);
192static void em_enable_vlans(struct adapter *);
193static void em_disable_vlans(struct adapter *);
194static int  em_encap(struct adapter *, struct mbuf **);
195static void em_smartspeed(struct adapter *);
196static int  em_82547_fifo_workaround(struct adapter *, int);
197static void em_82547_update_fifo_head(struct adapter *, int);
198static int  em_82547_tx_fifo_reset(struct adapter *);
199static void em_82547_move_tail(void *arg);
200static void em_82547_move_tail_locked(struct adapter *);
201static int  em_dma_malloc(struct adapter *, bus_size_t,
202			  struct em_dma_alloc *, int);
203static void em_dma_free(struct adapter *, struct em_dma_alloc *);
204static void em_print_debug_info(struct adapter *);
205static int  em_is_valid_ether_addr(u_int8_t *);
206static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
207static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
208static u_int32_t em_fill_descriptors (bus_addr_t address,
209				      u_int32_t length,
210				      PDESC_ARRAY desc_array);
211static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
212static void em_add_int_delay_sysctl(struct adapter *, const char *,
213				    const char *, struct em_int_delay_info *,
214				    int, int);
215#ifndef NO_EM_FASTINTR
216static void em_add_int_process_limit(struct adapter *, const char *,
217				     const char *, int *, int);
218static void em_handle_rxtx(void *context, int pending);
219static void em_handle_link(void *context, int pending);
220#endif
221#ifdef DEVICE_POLLING
222static poll_handler_t em_poll;
223#endif
224
225/*********************************************************************
226 *  FreeBSD Device Interface Entry Points
227 *********************************************************************/
228
229static device_method_t em_methods[] = {
230	/* Device interface */
231	DEVMETHOD(device_probe, em_probe),
232	DEVMETHOD(device_attach, em_attach),
233	DEVMETHOD(device_detach, em_detach),
234	DEVMETHOD(device_shutdown, em_shutdown),
235	DEVMETHOD(device_suspend, em_suspend),
236	DEVMETHOD(device_resume, em_resume),
237	{0, 0}
238};
239
240static driver_t em_driver = {
241	"em", em_methods, sizeof(struct adapter ),
242};
243
244static devclass_t em_devclass;
245DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
246MODULE_DEPEND(em, pci, 1, 1, 1);
247MODULE_DEPEND(em, ether, 1, 1, 1);
248
249/*********************************************************************
250 *  Tunable default values.
251 *********************************************************************/
252
253#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
254#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
255
256static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
257static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
258static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
259static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
260static int em_rxd = EM_DEFAULT_RXD;
261static int em_txd = EM_DEFAULT_TXD;
262
263TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
264TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
265TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
266TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
267TUNABLE_INT("hw.em.rxd", &em_rxd);
268TUNABLE_INT("hw.em.txd", &em_txd);
269#ifndef NO_EM_FASTINTR
270static int em_rx_process_limit = 100;
271TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
272#endif
273
274/*********************************************************************
275 *  Device identification routine
276 *
277 *  em_probe determines if the driver should be loaded on
278 *  adapter based on PCI vendor/device id of the adapter.
279 *
280 *  return BUS_PROBE_DEFAULT on success, positive on failure
281 *********************************************************************/
282
283static int
284em_probe(device_t dev)
285{
286	em_vendor_info_t *ent;
287
288	u_int16_t       pci_vendor_id = 0;
289	u_int16_t       pci_device_id = 0;
290	u_int16_t       pci_subvendor_id = 0;
291	u_int16_t       pci_subdevice_id = 0;
292	char            adapter_name[60];
293
294	INIT_DEBUGOUT("em_probe: begin");
295
296	pci_vendor_id = pci_get_vendor(dev);
297	if (pci_vendor_id != EM_VENDOR_ID)
298		return(ENXIO);
299
300	pci_device_id = pci_get_device(dev);
301	pci_subvendor_id = pci_get_subvendor(dev);
302	pci_subdevice_id = pci_get_subdevice(dev);
303
304	ent = em_vendor_info_array;
305	while (ent->vendor_id != 0) {
306		if ((pci_vendor_id == ent->vendor_id) &&
307		    (pci_device_id == ent->device_id) &&
308
309		    ((pci_subvendor_id == ent->subvendor_id) ||
310		     (ent->subvendor_id == PCI_ANY_ID)) &&
311
312		    ((pci_subdevice_id == ent->subdevice_id) ||
313		     (ent->subdevice_id == PCI_ANY_ID))) {
314			sprintf(adapter_name, "%s %s",
315				em_strings[ent->index],
316				em_driver_version);
317			device_set_desc_copy(dev, adapter_name);
318			return(BUS_PROBE_DEFAULT);
319		}
320		ent++;
321	}
322
323	return(ENXIO);
324}
325
326/*********************************************************************
327 *  Device initialization routine
328 *
329 *  The attach entry point is called when the driver is being loaded.
330 *  This routine identifies the type of hardware, allocates all resources
331 *  and initializes the hardware.
332 *
333 *  return 0 on success, positive on failure
334 *********************************************************************/
335
336static int
337em_attach(device_t dev)
338{
339	struct adapter * adapter;
340	int             tsize, rsize;
341	int		error = 0;
342
343	INIT_DEBUGOUT("em_attach: begin");
344
345	/* Allocate, clear, and link in our adapter structure */
346	if (!(adapter = device_get_softc(dev))) {
347		printf("em: adapter structure allocation failed\n");
348		return(ENOMEM);
349	}
350	bzero(adapter, sizeof(struct adapter ));
351	adapter->dev = dev;
352	adapter->osdep.dev = dev;
353	adapter->unit = device_get_unit(dev);
354	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
355
356	/* SYSCTL stuff */
357        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
358                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
359                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
360                        (void *)adapter, 0,
361                        em_sysctl_debug_info, "I", "Debug Information");
362
363        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
364                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
365                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
366                        (void *)adapter, 0,
367                        em_sysctl_stats, "I", "Statistics");
368
369	callout_init(&adapter->timer, CALLOUT_MPSAFE);
370	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
371
372	/* Determine hardware revision */
373	em_identify_hardware(adapter);
374
375	/* Set up some sysctls for the tunable interrupt delays */
376	em_add_int_delay_sysctl(adapter, "rx_int_delay",
377	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
378	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
379	em_add_int_delay_sysctl(adapter, "tx_int_delay",
380	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
381	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
382	if (adapter->hw.mac_type >= em_82540) {
383		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
384		    "receive interrupt delay limit in usecs",
385		    &adapter->rx_abs_int_delay,
386		    E1000_REG_OFFSET(&adapter->hw, RADV),
387		    em_rx_abs_int_delay_dflt);
388		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
389		    "transmit interrupt delay limit in usecs",
390		    &adapter->tx_abs_int_delay,
391		    E1000_REG_OFFSET(&adapter->hw, TADV),
392		    em_tx_abs_int_delay_dflt);
393	}
394
395	/* Sysctls for limiting the amount of work done in the taskqueue */
396#ifndef NO_EM_FASTINTR
397	em_add_int_process_limit(adapter, "rx_processing_limit",
398	    "max number of rx packets to process", &adapter->rx_process_limit,
399	    em_rx_process_limit);
400#endif
401
402	/*
403	 * Validate number of transmit and receive descriptors. It
404	 * must not exceed hardware maximum, and must be multiple
405	 * of E1000_DBA_ALIGN.
406	 */
407	if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
408	    (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
409	    (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
410	    (em_txd < EM_MIN_TXD)) {
411		printf("em%d: Using %d TX descriptors instead of %d!\n",
412		    adapter->unit, EM_DEFAULT_TXD, em_txd);
413		adapter->num_tx_desc = EM_DEFAULT_TXD;
414	} else
415		adapter->num_tx_desc = em_txd;
416	if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
417	    (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
418	    (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
419	    (em_rxd < EM_MIN_RXD)) {
420		printf("em%d: Using %d RX descriptors instead of %d!\n",
421		    adapter->unit, EM_DEFAULT_RXD, em_rxd);
422		adapter->num_rx_desc = EM_DEFAULT_RXD;
423	} else
424		adapter->num_rx_desc = em_rxd;
425
426        adapter->hw.autoneg = DO_AUTO_NEG;
427        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
428        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
429        adapter->hw.tbi_compatibility_en = TRUE;
430        adapter->rx_buffer_len = EM_RXBUFFER_2048;
431
432	adapter->hw.phy_init_script = 1;
433	adapter->hw.phy_reset_disable = FALSE;
434
435#ifndef EM_MASTER_SLAVE
436	adapter->hw.master_slave = em_ms_hw_default;
437#else
438	adapter->hw.master_slave = EM_MASTER_SLAVE;
439#endif
440	/*
441	 * Set the max frame size assuming standard ethernet
442	 * sized frames
443	 */
444	adapter->hw.max_frame_size =
445		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
446
447	adapter->hw.min_frame_size =
448		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
449
450	/*
451	 * This controls when hardware reports transmit completion
452	 * status.
453	 */
454	adapter->hw.report_tx_early = 1;
455
456	if (em_allocate_pci_resources(adapter)) {
457		printf("em%d: Allocation of PCI resources failed\n",
458		       adapter->unit);
459                error = ENXIO;
460                goto err_pci;
461	}
462
463
464	/* Initialize eeprom parameters */
465        em_init_eeprom_params(&adapter->hw);
466
467	tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
468	    E1000_DBA_ALIGN);
469
470	/* Allocate Transmit Descriptor ring */
471        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
472                printf("em%d: Unable to allocate tx_desc memory\n",
473                       adapter->unit);
474		error = ENOMEM;
475                goto err_tx_desc;
476        }
477        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
478
479	rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
480	    E1000_DBA_ALIGN);
481
482	/* Allocate Receive Descriptor ring */
483        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
484                printf("em%d: Unable to allocate rx_desc memory\n",
485                        adapter->unit);
486		error = ENOMEM;
487                goto err_rx_desc;
488        }
489        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
490
491	/* Initialize the hardware */
492	if (em_hardware_init(adapter)) {
493		printf("em%d: Unable to initialize the hardware\n",
494		       adapter->unit);
495		error = EIO;
496                goto err_hw_init;
497	}
498
499	/* Copy the permanent MAC address out of the EEPROM */
500	if (em_read_mac_addr(&adapter->hw) < 0) {
501		printf("em%d: EEPROM read error while reading mac address\n",
502		       adapter->unit);
503		error = EIO;
504                goto err_mac_addr;
505	}
506
507	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
508                printf("em%d: Invalid mac address\n", adapter->unit);
509                error = EIO;
510                goto err_mac_addr;
511        }
512
513	/* Setup OS specific network interface */
514	em_setup_interface(dev, adapter);
515
516	/* Initialize statistics */
517	em_clear_hw_cntrs(&adapter->hw);
518	em_update_stats_counters(adapter);
519	adapter->hw.get_link_status = 1;
520	em_check_for_link(&adapter->hw);
521
522	if (bootverbose) {
523		/* Print the link status */
524		if (adapter->link_active == 1) {
525			em_get_speed_and_duplex(&adapter->hw,
526			    &adapter->link_speed, &adapter->link_duplex);
527			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
528			       adapter->unit,
529			       adapter->link_speed,
530			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
531				"Half");
532		} else
533			printf("em%d:  Speed:N/A  Duplex:N/A\n",
534			    adapter->unit);
535	}
536
537	/* Identify 82544 on PCIX */
538        em_get_bus_info(&adapter->hw);
539        if(adapter->hw.bus_type == em_bus_type_pcix &&
540           adapter->hw.mac_type == em_82544) {
541                adapter->pcix_82544 = TRUE;
542        }
543        else {
544                adapter->pcix_82544 = FALSE;
545        }
546	INIT_DEBUGOUT("em_attach: end");
547	return(0);
548
549err_mac_addr:
550err_hw_init:
551        em_dma_free(adapter, &adapter->rxdma);
552err_rx_desc:
553        em_dma_free(adapter, &adapter->txdma);
554err_tx_desc:
555err_pci:
556        em_free_pci_resources(adapter);
557	EM_LOCK_DESTROY(adapter);
558        return(error);
559
560}
561
562/*********************************************************************
563 *  Device removal routine
564 *
565 *  The detach entry point is called when the driver is being removed.
566 *  This routine stops the adapter and deallocates all the resources
567 *  that were allocated for driver operation.
568 *
569 *  return 0 on success, positive on failure
570 *********************************************************************/
571
572static int
573em_detach(device_t dev)
574{
575	struct adapter * adapter = device_get_softc(dev);
576	struct ifnet   *ifp = adapter->ifp;
577
578	INIT_DEBUGOUT("em_detach: begin");
579
580#ifdef DEVICE_POLLING
581	if (ifp->if_capenable & IFCAP_POLLING)
582		ether_poll_deregister(ifp);
583#endif
584
585	if (adapter->res_interrupt != NULL) {
586		bus_teardown_intr(dev, adapter->res_interrupt,
587				  adapter->int_handler_tag);
588		bus_release_resource(dev, SYS_RES_IRQ, 0,
589				     adapter->res_interrupt);
590		adapter->res_interrupt = NULL;
591		if (adapter->tq != NULL) {
592			taskqueue_drain(adapter->tq, &adapter->rxtx_task);
593			taskqueue_drain(taskqueue_fast, &adapter->link_task);
594		}
595	}
596	EM_LOCK(adapter);
597	adapter->in_detach = 1;
598	em_stop(adapter);
599	em_phy_hw_reset(&adapter->hw);
600	EM_UNLOCK(adapter);
601        ether_ifdetach(adapter->ifp);
602
603	em_free_pci_resources(adapter);
604	bus_generic_detach(dev);
605	if_free(ifp);
606
607	/* Free Transmit Descriptor ring */
608        if (adapter->tx_desc_base) {
609                em_dma_free(adapter, &adapter->txdma);
610                adapter->tx_desc_base = NULL;
611        }
612
613        /* Free Receive Descriptor ring */
614        if (adapter->rx_desc_base) {
615                em_dma_free(adapter, &adapter->rxdma);
616                adapter->rx_desc_base = NULL;
617        }
618
619	EM_LOCK_DESTROY(adapter);
620
621	return(0);
622}
623
624/*********************************************************************
625 *
626 *  Shutdown entry point
627 *
628 **********************************************************************/
629
630static int
631em_shutdown(device_t dev)
632{
633	struct adapter *adapter = device_get_softc(dev);
634	EM_LOCK(adapter);
635	em_stop(adapter);
636	EM_UNLOCK(adapter);
637	return(0);
638}
639
640/*
641 * Suspend/resume device methods.
642 */
643static int
644em_suspend(device_t dev)
645{
646	struct adapter *adapter = device_get_softc(dev);
647
648	EM_LOCK(adapter);
649	em_stop(adapter);
650	EM_UNLOCK(adapter);
651
652	return bus_generic_suspend(dev);
653}
654
655static int
656em_resume(device_t dev)
657{
658	struct adapter *adapter = device_get_softc(dev);
659	struct ifnet *ifp = adapter->ifp;
660
661	EM_LOCK(adapter);
662	em_init_locked(adapter);
663	if ((ifp->if_flags & IFF_UP) &&
664	    (ifp->if_drv_flags & IFF_DRV_RUNNING))
665		em_start_locked(ifp);
666	EM_UNLOCK(adapter);
667
668	return bus_generic_resume(dev);
669}
670
671
672/*********************************************************************
673 *  Transmit entry point
674 *
675 *  em_start is called by the stack to initiate a transmit.
676 *  The driver will remain in this routine as long as there are
677 *  packets to transmit and transmit resources are available.
678 *  In case resources are not available stack is notified and
679 *  the packet is requeued.
680 **********************************************************************/
681
682static void
683em_start_locked(struct ifnet *ifp)
684{
685        struct mbuf    *m_head;
686        struct adapter *adapter = ifp->if_softc;
687
688	mtx_assert(&adapter->mtx, MA_OWNED);
689
690        if (!adapter->link_active)
691                return;
692
693        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
694
695                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
696
697                if (m_head == NULL) break;
698
699		/*
700		 * em_encap() can modify our pointer, and or make it NULL on
701		 * failure.  In that event, we can't requeue.
702		 */
703		if (em_encap(adapter, &m_head)) {
704			if (m_head == NULL)
705				break;
706			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
707			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
708			break;
709                }
710
711		/* Send a copy of the frame to the BPF listener */
712		BPF_MTAP(ifp, m_head);
713
714                /* Set timeout in case hardware has problems transmitting */
715                ifp->if_timer = EM_TX_TIMEOUT;
716
717        }
718        return;
719}
720
721static void
722em_start(struct ifnet *ifp)
723{
724	struct adapter *adapter = ifp->if_softc;
725
726	EM_LOCK(adapter);
727	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
728		em_start_locked(ifp);
729	EM_UNLOCK(adapter);
730	return;
731}
732
733/*********************************************************************
734 *  Ioctl entry point
735 *
736 *  em_ioctl is called when the user wants to configure the
737 *  interface.
738 *
739 *  return 0 on success, positive on failure
740 **********************************************************************/
741
742static int
743em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
744{
745	struct ifreq   *ifr = (struct ifreq *) data;
746	struct adapter * adapter = ifp->if_softc;
747	int error = 0;
748
749	if (adapter->in_detach) return(error);
750
751	switch (command) {
752	case SIOCSIFADDR:
753	case SIOCGIFADDR:
754		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
755		ether_ioctl(ifp, command, data);
756		break;
757	case SIOCSIFMTU:
758	    {
759		int max_frame_size;
760
761		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
762
763		switch (adapter->hw.mac_type) {
764		case em_82571:
765		case em_82572:
766			max_frame_size = 10500;
767			break;
768		case em_82573:
769			/* 82573 does not support jumbo frames. */
770			max_frame_size = ETHER_MAX_LEN;
771			break;
772		default:
773			max_frame_size = MAX_JUMBO_FRAME_SIZE;
774		}
775		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
776		    ETHER_CRC_LEN) {
777			error = EINVAL;
778			break;
779		}
780
781		EM_LOCK(adapter);
782		ifp->if_mtu = ifr->ifr_mtu;
783		adapter->hw.max_frame_size =
784		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
785		em_init_locked(adapter);
786		EM_UNLOCK(adapter);
787		break;
788	    }
789	case SIOCSIFFLAGS:
790		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
791		EM_LOCK(adapter);
792		if (ifp->if_flags & IFF_UP) {
793			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
794				em_init_locked(adapter);
795			}
796
797			em_disable_promisc(adapter);
798			em_set_promisc(adapter);
799		} else {
800			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
801				em_stop(adapter);
802			}
803		}
804		EM_UNLOCK(adapter);
805		break;
806	case SIOCADDMULTI:
807	case SIOCDELMULTI:
808		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
809		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
810			EM_LOCK(adapter);
811			em_disable_intr(adapter);
812			em_set_multi(adapter);
813			if (adapter->hw.mac_type == em_82542_rev2_0) {
814				em_initialize_receive_unit(adapter);
815			}
816#ifdef DEVICE_POLLING
817                        if (!(ifp->if_capenable & IFCAP_POLLING))
818#endif
819				em_enable_intr(adapter);
820			EM_UNLOCK(adapter);
821		}
822		break;
823	case SIOCSIFMEDIA:
824	case SIOCGIFMEDIA:
825		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
826		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
827		break;
828	case SIOCSIFCAP:
829	    {
830		int mask, reinit;
831
832		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
833		reinit = 0;
834		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
835#ifdef DEVICE_POLLING
836		if (mask & IFCAP_POLLING) {
837			if (ifr->ifr_reqcap & IFCAP_POLLING) {
838				error = ether_poll_register(em_poll, ifp);
839				if (error)
840					return(error);
841				EM_LOCK(adapter);
842				em_disable_intr(adapter);
843				ifp->if_capenable |= IFCAP_POLLING;
844				EM_UNLOCK(adapter);
845			} else {
846				error = ether_poll_deregister(ifp);
847				/* Enable interrupt even in error case */
848				EM_LOCK(adapter);
849				em_enable_intr(adapter);
850				ifp->if_capenable &= ~IFCAP_POLLING;
851				EM_UNLOCK(adapter);
852			}
853		}
854#endif
855		if (mask & IFCAP_HWCSUM) {
856			ifp->if_capenable ^= IFCAP_HWCSUM;
857			reinit = 1;
858		}
859		if (mask & IFCAP_VLAN_HWTAGGING) {
860			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
861			reinit = 1;
862		}
863		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
864			em_init(adapter);
865		break;
866	    }
867	default:
868		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
869		error = EINVAL;
870	}
871
872	return(error);
873}
874
875/*********************************************************************
876 *  Watchdog entry point
877 *
878 *  This routine is called whenever hardware quits transmitting.
879 *
880 **********************************************************************/
881
882static void
883em_watchdog(struct ifnet *ifp)
884{
885	struct adapter * adapter;
886	adapter = ifp->if_softc;
887
888	EM_LOCK(adapter);
889	/* If we are in this routine because of pause frames, then
890	 * don't reset the hardware.
891	 */
892	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
893		ifp->if_timer = EM_TX_TIMEOUT;
894		EM_UNLOCK(adapter);
895		return;
896	}
897
898	if (em_check_for_link(&adapter->hw))
899		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
900
901	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
902	adapter->watchdog_events++;
903
904	em_init_locked(adapter);
905	EM_UNLOCK(adapter);
906}
907
908/*********************************************************************
909 *  Init entry point
910 *
911 *  This routine is used in two ways. It is used by the stack as
912 *  init entry point in network interface structure. It is also used
913 *  by the driver as a hw/sw initialization routine to get to a
914 *  consistent state.
915 *
916 *  return 0 on success, positive on failure
917 **********************************************************************/
918
919static void
920em_init_locked(struct adapter * adapter)
921{
922	struct ifnet   *ifp;
923
924	uint32_t	pba;
925	ifp = adapter->ifp;
926
927	INIT_DEBUGOUT("em_init: begin");
928
929	mtx_assert(&adapter->mtx, MA_OWNED);
930
931	em_stop(adapter);
932
933	/*
934	 * Packet Buffer Allocation (PBA)
935	 * Writing PBA sets the receive portion of the buffer
936	 * the remainder is used for the transmit buffer.
937	 */
938	switch (adapter->hw.mac_type) {
939	case em_82547:
940	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
941		if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
942			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
943		else
944			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
945		adapter->tx_fifo_head = 0;
946		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
947		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
948		break;
949	case em_82571: /* 82571: Total Packet Buffer is 48K */
950	case em_82572: /* 82572: Total Packet Buffer is 48K */
951			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
952		break;
953	case em_82573: /* 82573: Total Packet Buffer is 32K */
954		/* Jumbo frames not supported */
955			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
956		break;
957	default:
958		/* Devices before 82547 had a Packet Buffer of 64K.   */
959		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
960			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
961		else
962			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
963	}
964
965	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
966	E1000_WRITE_REG(&adapter->hw, PBA, pba);
967
968	/* Get the latest mac address, User can use a LAA */
969        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
970              ETHER_ADDR_LEN);
971
972	/* Initialize the hardware */
973	if (em_hardware_init(adapter)) {
974		printf("em%d: Unable to initialize the hardware\n",
975		       adapter->unit);
976		return;
977	}
978
979	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
980		em_enable_vlans(adapter);
981
982	/* Prepare transmit descriptors and buffers */
983	if (em_setup_transmit_structures(adapter)) {
984		printf("em%d: Could not setup transmit structures\n",
985		       adapter->unit);
986		em_stop(adapter);
987		return;
988	}
989	em_initialize_transmit_unit(adapter);
990
991	/* Setup Multicast table */
992	em_set_multi(adapter);
993
994	/* Prepare receive descriptors and buffers */
995	if (em_setup_receive_structures(adapter)) {
996		printf("em%d: Could not setup receive structures\n",
997		       adapter->unit);
998		em_stop(adapter);
999		return;
1000	}
1001	em_initialize_receive_unit(adapter);
1002
1003	/* Don't loose promiscuous settings */
1004	em_set_promisc(adapter);
1005
1006	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1007	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1008
1009	if (adapter->hw.mac_type >= em_82543) {
1010		if (ifp->if_capenable & IFCAP_TXCSUM)
1011			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
1012		else
1013			ifp->if_hwassist = 0;
1014	}
1015
1016	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1017	em_clear_hw_cntrs(&adapter->hw);
1018#ifdef DEVICE_POLLING
1019        /*
1020         * Only enable interrupts if we are not polling, make sure
1021         * they are off otherwise.
1022         */
1023        if (ifp->if_capenable & IFCAP_POLLING)
1024                em_disable_intr(adapter);
1025        else
1026#endif /* DEVICE_POLLING */
1027		em_enable_intr(adapter);
1028
1029	/* Don't reset the phy next time init gets called */
1030	adapter->hw.phy_reset_disable = TRUE;
1031
1032	return;
1033}
1034
1035static void
1036em_init(void *arg)
1037{
1038	struct adapter * adapter = arg;
1039
1040	EM_LOCK(adapter);
1041	em_init_locked(adapter);
1042	EM_UNLOCK(adapter);
1043	return;
1044}
1045
1046
1047#ifdef DEVICE_POLLING
1048static void
1049em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1050{
1051        struct adapter *adapter = ifp->if_softc;
1052        u_int32_t reg_icr;
1053
1054	mtx_assert(&adapter->mtx, MA_OWNED);
1055
1056        if (cmd == POLL_AND_CHECK_STATUS) {
1057                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1058                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1059			callout_stop(&adapter->timer);
1060                        adapter->hw.get_link_status = 1;
1061                        em_check_for_link(&adapter->hw);
1062                        em_print_link_status(adapter);
1063			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1064                }
1065        }
1066	em_process_receive_interrupts(adapter, count);
1067	em_clean_transmit_interrupts(adapter);
1068
1069        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1070                em_start_locked(ifp);
1071}
1072
1073static void
1074em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1075{
1076        struct adapter *adapter = ifp->if_softc;
1077
1078	EM_LOCK(adapter);
1079	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1080		em_poll_locked(ifp, cmd, count);
1081	EM_UNLOCK(adapter);
1082}
1083#endif /* DEVICE_POLLING */
1084
1085#ifndef NO_EM_FASTINTR
1086static void
1087em_handle_link(void *context, int pending)
1088{
1089	struct adapter	*adapter = context;
1090	struct ifnet *ifp;
1091
1092	ifp = adapter->ifp;
1093
1094	EM_LOCK(adapter);
1095
1096	callout_stop(&adapter->timer);
1097	adapter->hw.get_link_status = 1;
1098	em_check_for_link(&adapter->hw);
1099	em_print_link_status(adapter);
1100	callout_reset(&adapter->timer, hz, em_local_timer,
1101	    adapter);
1102	EM_UNLOCK(adapter);
1103}
1104
1105static void
1106em_handle_rxtx(void *context, int pending)
1107{
1108	struct adapter	*adapter = context;
1109	struct ifnet	*ifp;
1110
1111	ifp = adapter->ifp;
1112
1113	/*
1114	 * TODO:
1115	 * It should be possible to run the tx clean loop without the lock.
1116	 */
1117	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1118		if (em_process_receive_interrupts(adapter,
1119		    adapter->rx_process_limit) != 0)
1120			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1121		EM_LOCK(adapter);
1122		em_clean_transmit_interrupts(adapter);
1123
1124		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1125			em_start_locked(ifp);
1126		EM_UNLOCK(adapter);
1127	}
1128
1129	em_enable_intr(adapter);
1130	return;
1131}
1132#endif
1133
1134/*********************************************************************
1135 *
1136 *  Interrupt Service routine
1137 *
1138 **********************************************************************/
1139#ifndef NO_EM_FASTINTR
1140static void
1141em_intr_fast(void *arg)
1142{
1143	struct adapter	*adapter = arg;
1144	struct ifnet	*ifp;
1145	uint32_t	reg_icr;
1146
1147	ifp = adapter->ifp;
1148
1149#ifdef DEVICE_POLLING
1150	if (ifp->if_capenable & IFCAP_POLLING) {
1151		return;
1152	}
1153#endif /* DEVICE_POLLING */
1154
1155	reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1156
1157	/* Hot eject?  */
1158	if (reg_icr == 0xffffffff)
1159		return;
1160
1161	/* Definitely not our interrupt.  */
1162	if (reg_icr == 0x0)
1163		return;
1164
1165	/*
1166	 * Starting with the 82571 chip, bit 31 should be used to
1167	 * determine whether the interrupt belongs to us.
1168	 */
1169	if (adapter->hw.mac_type >= em_82571 &&
1170	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1171		return;
1172
1173	/*
1174	 * Mask interrupts until the taskqueue is finished running.  This is
1175	 * cheap, just assume that it is needed.  This also works around the
1176	 * MSI message reordering errata on certain systems.
1177	 */
1178	em_disable_intr(adapter);
1179	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1180
1181	/* Link status change */
1182	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1183		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1184
1185	if (reg_icr & E1000_ICR_RXO) {
1186		adapter->rx_overruns++;
1187	}
1188	return;
1189}
1190#endif
1191
1192static void
1193em_intr(void *arg)
1194{
1195	struct adapter	*adapter = arg;
1196	struct ifnet	*ifp;
1197	uint32_t	reg_icr;
1198	int		wantinit = 0;
1199
1200	EM_LOCK(adapter);
1201
1202	ifp = adapter->ifp;
1203
1204#ifdef DEVICE_POLLING
1205	if (ifp->if_capenable & IFCAP_POLLING) {
1206		EM_UNLOCK(adapter);
1207		return;
1208	}
1209#endif /* DEVICE_POLLING */
1210
1211	for (;;) {
1212		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1213		if (adapter->hw.mac_type >= em_82571 &&
1214		    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1215			break;
1216		else if (reg_icr == 0)
1217			break;
1218
1219		/*
1220		 * XXX: some laptops trigger several spurious interrupts
1221		 * on em(4) when in the resume cycle. The ICR register
1222		 * reports all-ones value in this case. Processing such
1223		 * interrupts would lead to a freeze. I don't know why.
1224		 */
1225		if (reg_icr == 0xffffffff)
1226			break;
1227
1228		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1229			em_process_receive_interrupts(adapter, -1);
1230			em_clean_transmit_interrupts(adapter);
1231		}
1232
1233		/* Link status change */
1234		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1235			callout_stop(&adapter->timer);
1236			adapter->hw.get_link_status = 1;
1237			em_check_for_link(&adapter->hw);
1238			em_print_link_status(adapter);
1239			callout_reset(&adapter->timer, hz, em_local_timer,
1240			    adapter);
1241		}
1242
1243		if (reg_icr & E1000_ICR_RXO) {
1244			adapter->rx_overruns++;
1245			wantinit = 1;
1246		}
1247	}
1248#if 0
1249	if (wantinit)
1250		em_init_locked(adapter);
1251#endif
1252	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1253	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1254		em_start_locked(ifp);
1255
1256	EM_UNLOCK(adapter);
1257	return;
1258}
1259
1260
1261
1262/*********************************************************************
1263 *
1264 *  Media Ioctl callback
1265 *
1266 *  This routine is called whenever the user queries the status of
1267 *  the interface using ifconfig.
1268 *
1269 **********************************************************************/
1270static void
1271em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1272{
1273	struct adapter * adapter = ifp->if_softc;
1274
1275	INIT_DEBUGOUT("em_media_status: begin");
1276
1277	em_check_for_link(&adapter->hw);
1278	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1279		if (adapter->link_active == 0) {
1280			em_get_speed_and_duplex(&adapter->hw,
1281						&adapter->link_speed,
1282						&adapter->link_duplex);
1283			adapter->link_active = 1;
1284		}
1285	} else {
1286		if (adapter->link_active == 1) {
1287			adapter->link_speed = 0;
1288			adapter->link_duplex = 0;
1289			adapter->link_active = 0;
1290		}
1291	}
1292
1293	ifmr->ifm_status = IFM_AVALID;
1294	ifmr->ifm_active = IFM_ETHER;
1295
1296	if (!adapter->link_active)
1297		return;
1298
1299	ifmr->ifm_status |= IFM_ACTIVE;
1300
1301	if (adapter->hw.media_type == em_media_type_fiber) {
1302		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1303	} else {
1304		switch (adapter->link_speed) {
1305		case 10:
1306			ifmr->ifm_active |= IFM_10_T;
1307			break;
1308		case 100:
1309			ifmr->ifm_active |= IFM_100_TX;
1310			break;
1311		case 1000:
1312			ifmr->ifm_active |= IFM_1000_T;
1313			break;
1314		}
1315		if (adapter->link_duplex == FULL_DUPLEX)
1316			ifmr->ifm_active |= IFM_FDX;
1317		else
1318			ifmr->ifm_active |= IFM_HDX;
1319	}
1320	return;
1321}
1322
1323/*********************************************************************
1324 *
1325 *  Media Ioctl callback
1326 *
1327 *  This routine is called when the user changes speed/duplex using
1328 *  media/mediopt option with ifconfig.
1329 *
1330 **********************************************************************/
1331static int
1332em_media_change(struct ifnet *ifp)
1333{
1334	struct adapter * adapter = ifp->if_softc;
1335	struct ifmedia  *ifm = &adapter->media;
1336
1337	INIT_DEBUGOUT("em_media_change: begin");
1338
1339	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1340		return(EINVAL);
1341
1342	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1343	case IFM_AUTO:
1344		adapter->hw.autoneg = DO_AUTO_NEG;
1345		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1346		break;
1347	case IFM_1000_SX:
1348	case IFM_1000_T:
1349		adapter->hw.autoneg = DO_AUTO_NEG;
1350		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1351		break;
1352	case IFM_100_TX:
1353		adapter->hw.autoneg = FALSE;
1354		adapter->hw.autoneg_advertised = 0;
1355		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1356			adapter->hw.forced_speed_duplex = em_100_full;
1357		else
1358			adapter->hw.forced_speed_duplex	= em_100_half;
1359		break;
1360	case IFM_10_T:
1361		adapter->hw.autoneg = FALSE;
1362		adapter->hw.autoneg_advertised = 0;
1363		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1364			adapter->hw.forced_speed_duplex = em_10_full;
1365		else
1366			adapter->hw.forced_speed_duplex	= em_10_half;
1367		break;
1368	default:
1369		printf("em%d: Unsupported media type\n", adapter->unit);
1370	}
1371
1372	/* As the speed/duplex settings my have changed we need to
1373	 * reset the PHY.
1374	 */
1375	adapter->hw.phy_reset_disable = FALSE;
1376
1377	em_init(adapter);
1378
1379	return(0);
1380}
1381
1382/*********************************************************************
1383 *
1384 *  This routine maps the mbufs to tx descriptors.
1385 *
1386 *  return 0 on success, positive on failure
1387 **********************************************************************/
1388static int
1389em_encap(struct adapter *adapter, struct mbuf **m_headp)
1390{
1391        u_int32_t       txd_upper;
1392        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1393        int             i, j, error = 0;
1394	bus_dmamap_t	map;
1395
1396	struct mbuf	*m_head;
1397
1398	/* For 82544 Workaround */
1399	DESC_ARRAY              desc_array;
1400	u_int32_t               array_elements;
1401	u_int32_t               counter;
1402        struct m_tag    *mtag;
1403	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1404	int			nsegs;
1405        struct em_buffer   *tx_buffer;
1406        struct em_tx_desc *current_tx_desc = NULL;
1407        struct ifnet   *ifp = adapter->ifp;
1408
1409	m_head = *m_headp;
1410
1411        /*
1412         * Force a cleanup if number of TX descriptors
1413         * available hits the threshold
1414         */
1415        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1416                em_clean_transmit_interrupts(adapter);
1417                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1418                        adapter->no_tx_desc_avail1++;
1419                        return(ENOBUFS);
1420                }
1421        }
1422
1423        /*
1424         * Map the packet for DMA.
1425         */
1426	tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1427	error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1428	    segs, &nsegs, BUS_DMA_NOWAIT);
1429	map = tx_buffer->map;
1430        if (error != 0) {
1431                adapter->no_tx_dma_setup++;
1432                return (error);
1433        }
1434        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1435
1436        if (nsegs > adapter->num_tx_desc_avail) {
1437                adapter->no_tx_desc_avail2++;
1438		error = ENOBUFS;
1439		goto encap_fail;
1440        }
1441
1442
1443        if (ifp->if_hwassist > 0) {
1444                em_transmit_checksum_setup(adapter,  m_head,
1445                                           &txd_upper, &txd_lower);
1446        } else
1447                txd_upper = txd_lower = 0;
1448
1449
1450        /* Find out if we are in vlan mode */
1451        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1452
1453	/*
1454	 * When operating in promiscuous mode, hardware encapsulation for
1455	 * packets is disabled.  This means we have to add the vlan
1456	 * encapsulation in the driver, since it will have come down from the
1457	 * VLAN layer with a tag instead of a VLAN header.
1458	 */
1459	if (mtag != NULL && adapter->em_insert_vlan_header) {
1460		struct ether_vlan_header *evl;
1461		struct ether_header eh;
1462
1463		m_head = m_pullup(m_head, sizeof(eh));
1464		if (m_head == NULL) {
1465			*m_headp = NULL;
1466			error = ENOBUFS;
1467			goto encap_fail;
1468		}
1469		eh = *mtod(m_head, struct ether_header *);
1470		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1471		if (m_head == NULL) {
1472			*m_headp = NULL;
1473			error = ENOBUFS;
1474			goto encap_fail;
1475		}
1476		m_head = m_pullup(m_head, sizeof(*evl));
1477		if (m_head == NULL) {
1478			*m_headp = NULL;
1479			error = ENOBUFS;
1480			goto encap_fail;
1481		}
1482		evl = mtod(m_head, struct ether_vlan_header *);
1483		bcopy(&eh, evl, sizeof(*evl));
1484		evl->evl_proto = evl->evl_encap_proto;
1485		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1486		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1487		m_tag_delete(m_head, mtag);
1488		mtag = NULL;
1489		*m_headp = m_head;
1490	}
1491
1492        i = adapter->next_avail_tx_desc;
1493	if (adapter->pcix_82544) {
1494		txd_saved = i;
1495		txd_used = 0;
1496	}
1497        for (j = 0; j < nsegs; j++) {
1498		/* If adapter is 82544 and on PCIX bus */
1499		if(adapter->pcix_82544) {
1500			/*
1501			 * Check the Address and Length combination and
1502			 * split the data accordingly
1503			 */
1504                        array_elements = em_fill_descriptors(segs[j].ds_addr,
1505			    segs[j].ds_len, &desc_array);
1506			for (counter = 0; counter < array_elements; counter++) {
1507				if (txd_used == adapter->num_tx_desc_avail) {
1508					adapter->next_avail_tx_desc = txd_saved;
1509					adapter->no_tx_desc_avail2++;
1510					error = ENOBUFS;
1511					goto encap_fail;
1512                                }
1513                                tx_buffer = &adapter->tx_buffer_area[i];
1514                                current_tx_desc = &adapter->tx_desc_base[i];
1515                                current_tx_desc->buffer_addr = htole64(
1516					desc_array.descriptor[counter].address);
1517                                current_tx_desc->lower.data = htole32(
1518					(adapter->txd_cmd | txd_lower |
1519					 (u_int16_t)desc_array.descriptor[counter].length));
1520                                current_tx_desc->upper.data = htole32((txd_upper));
1521                                if (++i == adapter->num_tx_desc)
1522                                         i = 0;
1523
1524                                tx_buffer->m_head = NULL;
1525                                txd_used++;
1526                        }
1527		} else {
1528			tx_buffer = &adapter->tx_buffer_area[i];
1529			current_tx_desc = &adapter->tx_desc_base[i];
1530
1531			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1532			current_tx_desc->lower.data = htole32(
1533				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1534			current_tx_desc->upper.data = htole32(txd_upper);
1535
1536			if (++i == adapter->num_tx_desc)
1537				i = 0;
1538
1539			tx_buffer->m_head = NULL;
1540		}
1541        }
1542
1543	adapter->next_avail_tx_desc = i;
1544	if (adapter->pcix_82544) {
1545		adapter->num_tx_desc_avail -= txd_used;
1546	}
1547	else {
1548		adapter->num_tx_desc_avail -= nsegs;
1549	}
1550
1551        if (mtag != NULL) {
1552                /* Set the vlan id */
1553                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1554
1555                /* Tell hardware to add tag */
1556                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1557        }
1558
1559        tx_buffer->m_head = m_head;
1560        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1561
1562        /*
1563         * Last Descriptor of Packet needs End Of Packet (EOP)
1564         */
1565        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1566
1567        /*
1568         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1569         * that this frame is available to transmit.
1570         */
1571        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1572            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1573        if (adapter->hw.mac_type == em_82547 &&
1574            adapter->link_duplex == HALF_DUPLEX) {
1575                em_82547_move_tail_locked(adapter);
1576        } else {
1577                E1000_WRITE_REG(&adapter->hw, TDT, i);
1578                if (adapter->hw.mac_type == em_82547) {
1579                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1580                }
1581        }
1582
1583        return(0);
1584
1585encap_fail:
1586	bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1587	return (error);
1588}
1589
1590/*********************************************************************
1591 *
1592 * 82547 workaround to avoid controller hang in half-duplex environment.
1593 * The workaround is to avoid queuing a large packet that would span
1594 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1595 * in this case. We do that only when FIFO is quiescent.
1596 *
1597 **********************************************************************/
1598static void
1599em_82547_move_tail_locked(struct adapter *adapter)
1600{
1601	uint16_t hw_tdt;
1602	uint16_t sw_tdt;
1603	struct em_tx_desc *tx_desc;
1604	uint16_t length = 0;
1605	boolean_t eop = 0;
1606
1607	EM_LOCK_ASSERT(adapter);
1608
1609	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1610	sw_tdt = adapter->next_avail_tx_desc;
1611
1612	while (hw_tdt != sw_tdt) {
1613		tx_desc = &adapter->tx_desc_base[hw_tdt];
1614		length += tx_desc->lower.flags.length;
1615		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1616		if(++hw_tdt == adapter->num_tx_desc)
1617			hw_tdt = 0;
1618
1619		if(eop) {
1620			if (em_82547_fifo_workaround(adapter, length)) {
1621				adapter->tx_fifo_wrk_cnt++;
1622				callout_reset(&adapter->tx_fifo_timer, 1,
1623					em_82547_move_tail, adapter);
1624				break;
1625			}
1626			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1627			em_82547_update_fifo_head(adapter, length);
1628			length = 0;
1629		}
1630	}
1631	return;
1632}
1633
1634static void
1635em_82547_move_tail(void *arg)
1636{
1637        struct adapter *adapter = arg;
1638
1639        EM_LOCK(adapter);
1640        em_82547_move_tail_locked(adapter);
1641        EM_UNLOCK(adapter);
1642}
1643
1644static int
1645em_82547_fifo_workaround(struct adapter *adapter, int len)
1646{
1647	int fifo_space, fifo_pkt_len;
1648
1649	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1650
1651	if (adapter->link_duplex == HALF_DUPLEX) {
1652		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1653
1654		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1655			if (em_82547_tx_fifo_reset(adapter)) {
1656				return(0);
1657			}
1658			else {
1659				return(1);
1660			}
1661		}
1662	}
1663
1664	return(0);
1665}
1666
1667static void
1668em_82547_update_fifo_head(struct adapter *adapter, int len)
1669{
1670	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1671
1672	/* tx_fifo_head is always 16 byte aligned */
1673	adapter->tx_fifo_head += fifo_pkt_len;
1674	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1675		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1676	}
1677
1678	return;
1679}
1680
1681
1682static int
1683em_82547_tx_fifo_reset(struct adapter *adapter)
1684{
1685	uint32_t tctl;
1686
1687	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1688	      E1000_READ_REG(&adapter->hw, TDH)) &&
1689	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1690	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1691	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1692	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1693	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1694
1695		/* Disable TX unit */
1696		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1697		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1698
1699		/* Reset FIFO pointers */
1700		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1701		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1702		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1703		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1704
1705		/* Re-enable TX unit */
1706		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1707		E1000_WRITE_FLUSH(&adapter->hw);
1708
1709		adapter->tx_fifo_head = 0;
1710		adapter->tx_fifo_reset_cnt++;
1711
1712		return(TRUE);
1713	}
1714	else {
1715		return(FALSE);
1716	}
1717}
1718
1719static void
1720em_set_promisc(struct adapter * adapter)
1721{
1722
1723	u_int32_t       reg_rctl;
1724	struct ifnet   *ifp = adapter->ifp;
1725
1726	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1727
1728	if (ifp->if_flags & IFF_PROMISC) {
1729		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1730		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1731		/* Disable VLAN stripping in promiscous mode
1732		 * This enables bridging of vlan tagged frames to occur
1733		 * and also allows vlan tags to be seen in tcpdump
1734		 */
1735		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1736			em_disable_vlans(adapter);
1737		adapter->em_insert_vlan_header = 1;
1738	} else if (ifp->if_flags & IFF_ALLMULTI) {
1739		reg_rctl |= E1000_RCTL_MPE;
1740		reg_rctl &= ~E1000_RCTL_UPE;
1741		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1742		adapter->em_insert_vlan_header = 0;
1743	} else
1744		adapter->em_insert_vlan_header = 0;
1745
1746	return;
1747}
1748
1749static void
1750em_disable_promisc(struct adapter * adapter)
1751{
1752	u_int32_t       reg_rctl;
1753	struct ifnet   *ifp = adapter->ifp;
1754
1755	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1756
1757	reg_rctl &=  (~E1000_RCTL_UPE);
1758	reg_rctl &=  (~E1000_RCTL_MPE);
1759	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1760
1761	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1762		em_enable_vlans(adapter);
1763	adapter->em_insert_vlan_header = 0;
1764
1765	return;
1766}
1767
1768
1769/*********************************************************************
1770 *  Multicast Update
1771 *
1772 *  This routine is called whenever multicast address list is updated.
1773 *
1774 **********************************************************************/
1775
1776static void
1777em_set_multi(struct adapter * adapter)
1778{
1779        u_int32_t reg_rctl = 0;
1780        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1781        struct ifmultiaddr  *ifma;
1782        int mcnt = 0;
1783        struct ifnet   *ifp = adapter->ifp;
1784
1785        IOCTL_DEBUGOUT("em_set_multi: begin");
1786
1787        if (adapter->hw.mac_type == em_82542_rev2_0) {
1788                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1789                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1790                        em_pci_clear_mwi(&adapter->hw);
1791                }
1792                reg_rctl |= E1000_RCTL_RST;
1793                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1794                msec_delay(5);
1795        }
1796
1797	IF_ADDR_LOCK(ifp);
1798        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1799                if (ifma->ifma_addr->sa_family != AF_LINK)
1800                        continue;
1801
1802		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1803
1804                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1805                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1806                mcnt++;
1807        }
1808	IF_ADDR_UNLOCK(ifp);
1809
1810        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1811                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1812                reg_rctl |= E1000_RCTL_MPE;
1813                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1814        } else
1815                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1816
1817        if (adapter->hw.mac_type == em_82542_rev2_0) {
1818                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1819                reg_rctl &= ~E1000_RCTL_RST;
1820                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1821                msec_delay(5);
1822                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1823                        em_pci_set_mwi(&adapter->hw);
1824                }
1825        }
1826
1827        return;
1828}
1829
1830
1831/*********************************************************************
1832 *  Timer routine
1833 *
1834 *  This routine checks for link status and updates statistics.
1835 *
1836 **********************************************************************/
1837
1838static void
1839em_local_timer(void *arg)
1840{
1841	struct ifnet   *ifp;
1842	struct adapter * adapter = arg;
1843	ifp = adapter->ifp;
1844
1845	EM_LOCK(adapter);
1846
1847	em_check_for_link(&adapter->hw);
1848	em_print_link_status(adapter);
1849	em_update_stats_counters(adapter);
1850	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1851		em_print_hw_stats(adapter);
1852	}
1853	em_smartspeed(adapter);
1854
1855	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1856
1857	EM_UNLOCK(adapter);
1858	return;
1859}
1860
1861static void
1862em_print_link_status(struct adapter * adapter)
1863{
1864	struct ifnet *ifp = adapter->ifp;
1865
1866	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1867		if (adapter->link_active == 0) {
1868			em_get_speed_and_duplex(&adapter->hw,
1869						&adapter->link_speed,
1870						&adapter->link_duplex);
1871			if (bootverbose)
1872				printf("em%d: Link is up %d Mbps %s\n",
1873				       adapter->unit,
1874				       adapter->link_speed,
1875				       ((adapter->link_duplex == FULL_DUPLEX) ?
1876					"Full Duplex" : "Half Duplex"));
1877			adapter->link_active = 1;
1878			adapter->smartspeed = 0;
1879			if_link_state_change(ifp, LINK_STATE_UP);
1880		}
1881	} else {
1882		if (adapter->link_active == 1) {
1883			adapter->link_speed = 0;
1884			adapter->link_duplex = 0;
1885			if (bootverbose)
1886				printf("em%d: Link is Down\n", adapter->unit);
1887			adapter->link_active = 0;
1888			if_link_state_change(ifp, LINK_STATE_DOWN);
1889		}
1890	}
1891
1892	return;
1893}
1894
1895/*********************************************************************
1896 *
1897 *  This routine disables all traffic on the adapter by issuing a
1898 *  global reset on the MAC and deallocates TX/RX buffers.
1899 *
1900 **********************************************************************/
1901
1902static void
1903em_stop(void *arg)
1904{
1905	struct ifnet   *ifp;
1906	struct adapter * adapter = arg;
1907	ifp = adapter->ifp;
1908
1909	mtx_assert(&adapter->mtx, MA_OWNED);
1910
1911	INIT_DEBUGOUT("em_stop: begin");
1912
1913	em_disable_intr(adapter);
1914	em_reset_hw(&adapter->hw);
1915	callout_stop(&adapter->timer);
1916	callout_stop(&adapter->tx_fifo_timer);
1917	em_free_transmit_structures(adapter);
1918	em_free_receive_structures(adapter);
1919
1920
1921	/* Tell the stack that the interface is no longer active */
1922	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1923
1924	return;
1925}
1926
1927
1928/*********************************************************************
1929 *
1930 *  Determine hardware revision.
1931 *
1932 **********************************************************************/
1933static void
1934em_identify_hardware(struct adapter * adapter)
1935{
1936	device_t dev = adapter->dev;
1937
1938	/* Make sure our PCI config space has the necessary stuff set */
1939	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1940	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1941	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1942		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1943		       adapter->unit);
1944		adapter->hw.pci_cmd_word |=
1945		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1946		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1947	}
1948
1949	/* Save off the information about this board */
1950	adapter->hw.vendor_id = pci_get_vendor(dev);
1951	adapter->hw.device_id = pci_get_device(dev);
1952	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1953	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1954	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1955
1956	/* Identify the MAC */
1957        if (em_set_mac_type(&adapter->hw))
1958                printf("em%d: Unknown MAC Type\n", adapter->unit);
1959
1960	if(adapter->hw.mac_type == em_82541 ||
1961	   adapter->hw.mac_type == em_82541_rev_2 ||
1962	   adapter->hw.mac_type == em_82547 ||
1963	   adapter->hw.mac_type == em_82547_rev_2)
1964		adapter->hw.phy_init_script = TRUE;
1965
1966        return;
1967}
1968
1969static int
1970em_allocate_pci_resources(struct adapter * adapter)
1971{
1972	int             val, rid;
1973	device_t        dev = adapter->dev;
1974
1975	rid = PCIR_BAR(0);
1976	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1977						     &rid, RF_ACTIVE);
1978	if (!(adapter->res_memory)) {
1979		printf("em%d: Unable to allocate bus resource: memory\n",
1980		       adapter->unit);
1981		return(ENXIO);
1982	}
1983	adapter->osdep.mem_bus_space_tag =
1984	rman_get_bustag(adapter->res_memory);
1985	adapter->osdep.mem_bus_space_handle =
1986	rman_get_bushandle(adapter->res_memory);
1987	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1988
1989
1990	if (adapter->hw.mac_type > em_82543) {
1991		/* Figure our where our IO BAR is ? */
1992		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1993			val = pci_read_config(dev, rid, 4);
1994			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1995				adapter->io_rid = rid;
1996				break;
1997			}
1998			rid += 4;
1999			/* check for 64bit BAR */
2000			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
2001				rid += 4;
2002		}
2003		if (rid >= PCIR_CIS) {
2004			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
2005			return (ENXIO);
2006		}
2007		adapter->res_ioport = bus_alloc_resource_any(dev,
2008							     SYS_RES_IOPORT,
2009							     &adapter->io_rid,
2010							     RF_ACTIVE);
2011		if (!(adapter->res_ioport)) {
2012			printf("em%d: Unable to allocate bus resource: ioport\n",
2013			       adapter->unit);
2014			return(ENXIO);
2015		}
2016		adapter->hw.io_base = 0;
2017		adapter->osdep.io_bus_space_tag =
2018		    rman_get_bustag(adapter->res_ioport);
2019		adapter->osdep.io_bus_space_handle =
2020		    rman_get_bushandle(adapter->res_ioport);
2021	}
2022
2023	rid = 0x0;
2024	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2025						        RF_SHAREABLE |
2026							RF_ACTIVE);
2027	if (!(adapter->res_interrupt)) {
2028		printf("em%d: Unable to allocate bus resource: interrupt\n",
2029		       adapter->unit);
2030		return(ENXIO);
2031	}
2032
2033	/*
2034	 * Try allocating a fast interrupt and the associated deferred
2035	 * processing contexts.  If that doesn't work, try just using an
2036	 * ithread.
2037	 */
2038#ifndef NO_EM_FASTINTR
2039	if (bus_setup_intr(dev, adapter->res_interrupt,
2040			   INTR_TYPE_NET | INTR_FAST, em_intr_fast, adapter,
2041			   &adapter->int_handler_tag) == 0) {
2042
2043		/* Init the deferred processing contexts. */
2044		TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2045		TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2046		adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2047			taskqueue_thread_enqueue,
2048			&adapter->tq, &adapter->tqproc);
2049		kthread_create(taskqueue_thread_loop,
2050			&adapter->tq, &adapter->tqproc,
2051			0, 0, "%s taskq", device_get_nameunit(adapter->dev));
2052		mtx_lock_spin(&sched_lock);
2053		sched_prio(FIRST_THREAD_IN_PROC(adapter->tqproc), PI_NET);
2054		mtx_unlock_spin(&sched_lock);
2055	}
2056#endif
2057	if (adapter->int_handler_tag == NULL) {
2058		if (bus_setup_intr(dev, adapter->res_interrupt,
2059				   INTR_TYPE_NET | INTR_MPSAFE,
2060				   em_intr, adapter,
2061				   &adapter->int_handler_tag)) {
2062			printf("em%d: Error registering interrupt handler!\n",
2063			       adapter->unit);
2064			return(ENXIO);
2065		}
2066	}
2067
2068	adapter->hw.back = &adapter->osdep;
2069
2070	return(0);
2071}
2072
2073static void
2074em_free_pci_resources(struct adapter * adapter)
2075{
2076	device_t dev = adapter->dev;
2077
2078	if (adapter->tq != NULL) {
2079		taskqueue_free(adapter->tq);
2080	}
2081	if (adapter->res_interrupt != NULL) {
2082		bus_teardown_intr(dev, adapter->res_interrupt,
2083				  adapter->int_handler_tag);
2084		bus_release_resource(dev, SYS_RES_IRQ, 0,
2085				     adapter->res_interrupt);
2086	}
2087	if (adapter->res_memory != NULL) {
2088		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
2089				     adapter->res_memory);
2090	}
2091
2092	if (adapter->res_ioport != NULL) {
2093		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
2094				     adapter->res_ioport);
2095	}
2096	return;
2097}
2098
2099/*********************************************************************
2100 *
2101 *  Initialize the hardware to a configuration as specified by the
2102 *  adapter structure. The controller is reset, the EEPROM is
2103 *  verified, the MAC address is set, then the shared initialization
2104 *  routines are called.
2105 *
2106 **********************************************************************/
2107static int
2108em_hardware_init(struct adapter * adapter)
2109{
2110	uint16_t rx_buffer_size;
2111
2112        INIT_DEBUGOUT("em_hardware_init: begin");
2113	/* Issue a global reset */
2114	em_reset_hw(&adapter->hw);
2115
2116	/* When hardware is reset, fifo_head is also reset */
2117	adapter->tx_fifo_head = 0;
2118
2119	/* Make sure we have a good EEPROM before we read from it */
2120	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2121		printf("em%d: The EEPROM Checksum Is Not Valid\n",
2122		       adapter->unit);
2123		return(EIO);
2124	}
2125
2126	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
2127		printf("em%d: EEPROM read error while reading part number\n",
2128		       adapter->unit);
2129		return(EIO);
2130	}
2131
2132	/*
2133	 * These parameters control the automatic generation (Tx) and
2134	 * response (Rx) to Ethernet PAUSE frames.
2135	 * - High water mark should allow for at least two frames to be
2136	 *   received after sending an XOFF.
2137	 * - Low water mark works best when it is very near the high water mark.
2138	 *   This allows the receiver to restart by sending XON when it has drained
2139	 *   a bit.  Here we use an arbitary value of 1500 which will restart after
2140	 *   one full frame is pulled from the buffer.  There could be several smaller
2141	 *   frames in the buffer and if so they will not trigger the XON until their
2142	 *   total number reduces the buffer by 1500.
2143	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2144	 */
2145	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
2146
2147	adapter->hw.fc_high_water = rx_buffer_size -
2148	    roundup2(adapter->hw.max_frame_size, 1024);
2149	adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
2150	adapter->hw.fc_pause_time = 0x1000;
2151	adapter->hw.fc_send_xon = TRUE;
2152	adapter->hw.fc = em_fc_full;
2153
2154	if (em_init_hw(&adapter->hw) < 0) {
2155		printf("em%d: Hardware Initialization Failed",
2156		       adapter->unit);
2157		return(EIO);
2158	}
2159
2160	em_check_for_link(&adapter->hw);
2161	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
2162		adapter->link_active = 1;
2163	else
2164		adapter->link_active = 0;
2165
2166	if (adapter->link_active) {
2167		em_get_speed_and_duplex(&adapter->hw,
2168					&adapter->link_speed,
2169					&adapter->link_duplex);
2170	} else {
2171		adapter->link_speed = 0;
2172		adapter->link_duplex = 0;
2173	}
2174
2175	return(0);
2176}
2177
2178/*********************************************************************
2179 *
2180 *  Setup networking device structure and register an interface.
2181 *
2182 **********************************************************************/
2183static void
2184em_setup_interface(device_t dev, struct adapter * adapter)
2185{
2186	struct ifnet   *ifp;
2187	INIT_DEBUGOUT("em_setup_interface: begin");
2188
2189	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2190	if (ifp == NULL)
2191		panic("%s: can not if_alloc()", device_get_nameunit(dev));
2192	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2193	ifp->if_mtu = ETHERMTU;
2194	ifp->if_baudrate = 1000000000;
2195	ifp->if_init =  em_init;
2196	ifp->if_softc = adapter;
2197	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2198	ifp->if_ioctl = em_ioctl;
2199	ifp->if_start = em_start;
2200	ifp->if_watchdog = em_watchdog;
2201	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2202	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2203	IFQ_SET_READY(&ifp->if_snd);
2204
2205        ether_ifattach(ifp, adapter->hw.mac_addr);
2206
2207	ifp->if_capabilities = ifp->if_capenable = 0;
2208
2209	if (adapter->hw.mac_type >= em_82543) {
2210		ifp->if_capabilities |= IFCAP_HWCSUM;
2211		ifp->if_capenable |= IFCAP_HWCSUM;
2212	}
2213
2214	/*
2215	 * Tell the upper layer(s) we support long frames.
2216	 */
2217	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2218	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2219	ifp->if_capenable |= IFCAP_VLAN_MTU;
2220
2221#ifdef DEVICE_POLLING
2222	ifp->if_capabilities |= IFCAP_POLLING;
2223#endif
2224
2225	/*
2226	 * Specify the media types supported by this adapter and register
2227	 * callbacks to update media and link information
2228	 */
2229	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2230		     em_media_status);
2231	if (adapter->hw.media_type == em_media_type_fiber) {
2232		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2233			    0, NULL);
2234		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
2235			    0, NULL);
2236	} else {
2237		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2238		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2239			    0, NULL);
2240		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2241			    0, NULL);
2242		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2243			    0, NULL);
2244		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2245			    0, NULL);
2246		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2247	}
2248	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2249	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2250
2251	return;
2252}
2253
2254
2255/*********************************************************************
2256 *
2257 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2258 *
2259 **********************************************************************/
2260static void
2261em_smartspeed(struct adapter *adapter)
2262{
2263        uint16_t phy_tmp;
2264
2265	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2266	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2267		return;
2268
2269        if(adapter->smartspeed == 0) {
2270                /* If Master/Slave config fault is asserted twice,
2271                 * we assume back-to-back */
2272                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2273                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2274                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2275                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2276                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2277					&phy_tmp);
2278                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2279                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2280                                em_write_phy_reg(&adapter->hw,
2281                                                    PHY_1000T_CTRL, phy_tmp);
2282                                adapter->smartspeed++;
2283                                if(adapter->hw.autoneg &&
2284                                   !em_phy_setup_autoneg(&adapter->hw) &&
2285				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2286                                                       &phy_tmp)) {
2287                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2288                                                    MII_CR_RESTART_AUTO_NEG);
2289                                        em_write_phy_reg(&adapter->hw,
2290							 PHY_CTRL, phy_tmp);
2291                                }
2292                        }
2293                }
2294                return;
2295        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2296                /* If still no link, perhaps using 2/3 pair cable */
2297                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2298                phy_tmp |= CR_1000T_MS_ENABLE;
2299                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2300                if(adapter->hw.autoneg &&
2301                   !em_phy_setup_autoneg(&adapter->hw) &&
2302                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2303                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2304                                    MII_CR_RESTART_AUTO_NEG);
2305                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2306                }
2307        }
2308        /* Restart process after EM_SMARTSPEED_MAX iterations */
2309        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2310                adapter->smartspeed = 0;
2311
2312	return;
2313}
2314
2315
2316/*
2317 * Manage DMA'able memory.
2318 */
2319static void
2320em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2321{
2322        if (error)
2323                return;
2324        *(bus_addr_t *) arg = segs[0].ds_addr;
2325}
2326
2327static int
2328em_dma_malloc(struct adapter *adapter, bus_size_t size,
2329        struct em_dma_alloc *dma, int mapflags)
2330{
2331        int r;
2332
2333	r = bus_dma_tag_create(NULL,                    /* parent */
2334                               E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2335                               BUS_SPACE_MAXADDR,       /* lowaddr */
2336                               BUS_SPACE_MAXADDR,       /* highaddr */
2337                               NULL, NULL,              /* filter, filterarg */
2338                               size,                    /* maxsize */
2339                               1,                       /* nsegments */
2340                               size,                    /* maxsegsize */
2341                               BUS_DMA_ALLOCNOW,        /* flags */
2342			       NULL,			/* lockfunc */
2343			       NULL,			/* lockarg */
2344                               &dma->dma_tag);
2345        if (r != 0) {
2346                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2347                        "error %u\n", adapter->unit, r);
2348                goto fail_0;
2349        }
2350
2351        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2352                             BUS_DMA_NOWAIT, &dma->dma_map);
2353        if (r != 0) {
2354                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2355                        "size %ju, error %d\n", adapter->unit,
2356			(uintmax_t)size, r);
2357                goto fail_2;
2358        }
2359
2360	dma->dma_paddr = 0;
2361        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2362                            size,
2363                            em_dmamap_cb,
2364                            &dma->dma_paddr,
2365                            mapflags | BUS_DMA_NOWAIT);
2366        if (r != 0 || dma->dma_paddr == 0) {
2367                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2368                        "error %u\n", adapter->unit, r);
2369                goto fail_3;
2370        }
2371
2372        return (0);
2373
2374fail_3:
2375        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2376fail_2:
2377        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2378        bus_dma_tag_destroy(dma->dma_tag);
2379fail_0:
2380        dma->dma_map = NULL;
2381        dma->dma_tag = NULL;
2382        return (r);
2383}
2384
2385static void
2386em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2387{
2388	if (dma->dma_tag == NULL)
2389		return;
2390	if (dma->dma_map != NULL) {
2391		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2392		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2393		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2394		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2395		dma->dma_map = NULL;
2396	}
2397        bus_dma_tag_destroy(dma->dma_tag);
2398	dma->dma_tag = NULL;
2399}
2400
2401
2402/*********************************************************************
2403 *
2404 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2405 *  the information needed to transmit a packet on the wire.
2406 *
2407 **********************************************************************/
2408static int
2409em_allocate_transmit_structures(struct adapter * adapter)
2410{
2411	if (!(adapter->tx_buffer_area =
2412	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2413					     adapter->num_tx_desc, M_DEVBUF,
2414					     M_NOWAIT))) {
2415		printf("em%d: Unable to allocate tx_buffer memory\n",
2416		       adapter->unit);
2417		return ENOMEM;
2418	}
2419
2420	bzero(adapter->tx_buffer_area,
2421	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2422
2423	return 0;
2424}
2425
2426/*********************************************************************
2427 *
2428 *  Allocate and initialize transmit structures.
2429 *
2430 **********************************************************************/
2431static int
2432em_setup_transmit_structures(struct adapter * adapter)
2433{
2434	struct em_buffer *tx_buffer;
2435	bus_size_t size;
2436	int error, i;
2437
2438        /*
2439         * Setup DMA descriptor areas.
2440         */
2441	size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2442	if ((error = bus_dma_tag_create(NULL,           /* parent */
2443                               1, 0,                    /* alignment, bounds */
2444                               BUS_SPACE_MAXADDR,       /* lowaddr */
2445                               BUS_SPACE_MAXADDR,       /* highaddr */
2446                               NULL, NULL,              /* filter, filterarg */
2447                               size,                    /* maxsize */
2448                               EM_MAX_SCATTER,          /* nsegments */
2449                               size,                    /* maxsegsize */
2450                               0,                       /* flags */
2451			       NULL,			/* lockfunc */
2452			       NULL,			/* lockarg */
2453                               &adapter->txtag)) != 0) {
2454		printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2455		goto fail;
2456        }
2457
2458        if ((error = em_allocate_transmit_structures(adapter)) != 0)
2459		goto fail;
2460
2461        bzero((void *) adapter->tx_desc_base,
2462              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2463	tx_buffer = adapter->tx_buffer_area;
2464	for (i = 0; i < adapter->num_tx_desc; i++) {
2465		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2466		if (error != 0) {
2467			printf("em%d: Unable to create TX DMA map\n",
2468			    adapter->unit);
2469			goto fail;
2470		}
2471		tx_buffer++;
2472	}
2473
2474        adapter->next_avail_tx_desc = 0;
2475        adapter->oldest_used_tx_desc = 0;
2476
2477        /* Set number of descriptors available */
2478        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2479
2480        /* Set checksum context */
2481        adapter->active_checksum_context = OFFLOAD_NONE;
2482	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2483	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2484
2485        return (0);
2486
2487fail:
2488	em_free_transmit_structures(adapter);
2489	return (error);
2490}
2491
2492/*********************************************************************
2493 *
2494 *  Enable transmit unit.
2495 *
2496 **********************************************************************/
2497static void
2498em_initialize_transmit_unit(struct adapter * adapter)
2499{
2500	u_int32_t       reg_tctl;
2501	u_int32_t       reg_tipg = 0;
2502	u_int64_t	bus_addr;
2503
2504         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2505	/* Setup the Base and Length of the Tx Descriptor Ring */
2506	bus_addr = adapter->txdma.dma_paddr;
2507	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2508	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2509	E1000_WRITE_REG(&adapter->hw, TDLEN,
2510			adapter->num_tx_desc *
2511			sizeof(struct em_tx_desc));
2512
2513	/* Setup the HW Tx Head and Tail descriptor pointers */
2514	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2515	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2516
2517
2518	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2519		     E1000_READ_REG(&adapter->hw, TDBAL),
2520		     E1000_READ_REG(&adapter->hw, TDLEN));
2521
2522	/* Set the default values for the Tx Inter Packet Gap timer */
2523	switch (adapter->hw.mac_type) {
2524	case em_82542_rev2_0:
2525        case em_82542_rev2_1:
2526                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2527                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2528                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2529                break;
2530        default:
2531                if (adapter->hw.media_type == em_media_type_fiber)
2532                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2533                else
2534                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2535                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2536                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2537        }
2538
2539	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2540	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2541	if(adapter->hw.mac_type >= em_82540)
2542		E1000_WRITE_REG(&adapter->hw, TADV,
2543		    adapter->tx_abs_int_delay.value);
2544
2545	/* Program the Transmit Control Register */
2546	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2547		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2548	if (adapter->hw.mac_type >= em_82571)
2549		reg_tctl |= E1000_TCTL_MULR;
2550	if (adapter->link_duplex == 1) {
2551		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2552	} else {
2553		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2554	}
2555	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2556
2557	/* Setup Transmit Descriptor Settings for this adapter */
2558	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2559
2560	if (adapter->tx_int_delay.value > 0)
2561		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2562
2563	return;
2564}
2565
2566/*********************************************************************
2567 *
2568 *  Free all transmit related data structures.
2569 *
2570 **********************************************************************/
2571static void
2572em_free_transmit_structures(struct adapter * adapter)
2573{
2574        struct em_buffer   *tx_buffer;
2575        int             i;
2576
2577        INIT_DEBUGOUT("free_transmit_structures: begin");
2578
2579        if (adapter->tx_buffer_area != NULL) {
2580                tx_buffer = adapter->tx_buffer_area;
2581                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2582                        if (tx_buffer->m_head != NULL) {
2583				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2584				    BUS_DMASYNC_POSTWRITE);
2585				bus_dmamap_unload(adapter->txtag,
2586				    tx_buffer->map);
2587                                m_freem(tx_buffer->m_head);
2588				tx_buffer->m_head = NULL;
2589                        } else if (tx_buffer->map != NULL)
2590				bus_dmamap_unload(adapter->txtag,
2591				    tx_buffer->map);
2592			if (tx_buffer->map != NULL) {
2593				bus_dmamap_destroy(adapter->txtag,
2594				    tx_buffer->map);
2595				tx_buffer->map = NULL;
2596			}
2597                }
2598        }
2599        if (adapter->tx_buffer_area != NULL) {
2600                free(adapter->tx_buffer_area, M_DEVBUF);
2601                adapter->tx_buffer_area = NULL;
2602        }
2603        if (adapter->txtag != NULL) {
2604                bus_dma_tag_destroy(adapter->txtag);
2605                adapter->txtag = NULL;
2606        }
2607        return;
2608}
2609
2610/*********************************************************************
2611 *
2612 *  The offload context needs to be set when we transfer the first
2613 *  packet of a particular protocol (TCP/UDP). We change the
2614 *  context only if the protocol type changes.
2615 *
2616 **********************************************************************/
2617static void
2618em_transmit_checksum_setup(struct adapter * adapter,
2619			   struct mbuf *mp,
2620			   u_int32_t *txd_upper,
2621			   u_int32_t *txd_lower)
2622{
2623	struct em_context_desc *TXD;
2624	struct em_buffer *tx_buffer;
2625	int curr_txd;
2626
2627	if (mp->m_pkthdr.csum_flags) {
2628
2629		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2630			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2631			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2632			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2633				return;
2634			else
2635				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2636
2637		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2638			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2639			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2640			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2641				return;
2642			else
2643				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2644		} else {
2645			*txd_upper = 0;
2646			*txd_lower = 0;
2647			return;
2648		}
2649	} else {
2650		*txd_upper = 0;
2651		*txd_lower = 0;
2652		return;
2653	}
2654
2655	/* If we reach this point, the checksum offload context
2656	 * needs to be reset.
2657	 */
2658	curr_txd = adapter->next_avail_tx_desc;
2659	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2660	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2661
2662	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2663	TXD->lower_setup.ip_fields.ipcso =
2664		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2665	TXD->lower_setup.ip_fields.ipcse =
2666		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2667
2668	TXD->upper_setup.tcp_fields.tucss =
2669		ETHER_HDR_LEN + sizeof(struct ip);
2670	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2671
2672	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2673		TXD->upper_setup.tcp_fields.tucso =
2674			ETHER_HDR_LEN + sizeof(struct ip) +
2675			offsetof(struct tcphdr, th_sum);
2676	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2677		TXD->upper_setup.tcp_fields.tucso =
2678			ETHER_HDR_LEN + sizeof(struct ip) +
2679			offsetof(struct udphdr, uh_sum);
2680	}
2681
2682	TXD->tcp_seg_setup.data = htole32(0);
2683	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2684
2685	tx_buffer->m_head = NULL;
2686
2687	if (++curr_txd == adapter->num_tx_desc)
2688		curr_txd = 0;
2689
2690	adapter->num_tx_desc_avail--;
2691	adapter->next_avail_tx_desc = curr_txd;
2692
2693	return;
2694}
2695
2696/**********************************************************************
2697 *
2698 *  Examine each tx_buffer in the used queue. If the hardware is done
2699 *  processing the packet then free associated resources. The
2700 *  tx_buffer is put back on the free queue.
2701 *
2702 **********************************************************************/
2703static void
2704em_clean_transmit_interrupts(struct adapter * adapter)
2705{
2706        int i, num_avail;
2707        struct em_buffer *tx_buffer;
2708        struct em_tx_desc   *tx_desc;
2709	struct ifnet   *ifp = adapter->ifp;
2710
2711	mtx_assert(&adapter->mtx, MA_OWNED);
2712
2713        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2714                return;
2715
2716        num_avail = adapter->num_tx_desc_avail;
2717        i = adapter->oldest_used_tx_desc;
2718
2719        tx_buffer = &adapter->tx_buffer_area[i];
2720        tx_desc = &adapter->tx_desc_base[i];
2721
2722        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2723            BUS_DMASYNC_POSTREAD);
2724        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2725
2726                tx_desc->upper.data = 0;
2727                num_avail++;
2728
2729                if (tx_buffer->m_head) {
2730			ifp->if_opackets++;
2731			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2732			    BUS_DMASYNC_POSTWRITE);
2733			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2734
2735                        m_freem(tx_buffer->m_head);
2736                        tx_buffer->m_head = NULL;
2737                }
2738
2739                if (++i == adapter->num_tx_desc)
2740                        i = 0;
2741
2742                tx_buffer = &adapter->tx_buffer_area[i];
2743                tx_desc = &adapter->tx_desc_base[i];
2744        }
2745        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2746            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2747
2748        adapter->oldest_used_tx_desc = i;
2749
2750        /*
2751         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2752         * that it is OK to send packets.
2753         * If there are no pending descriptors, clear the timeout. Otherwise,
2754         * if some descriptors have been freed, restart the timeout.
2755         */
2756        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2757                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2758                if (num_avail == adapter->num_tx_desc)
2759                        ifp->if_timer = 0;
2760                else if (num_avail == adapter->num_tx_desc_avail)
2761                        ifp->if_timer = EM_TX_TIMEOUT;
2762        }
2763        adapter->num_tx_desc_avail = num_avail;
2764        return;
2765}
2766
2767/*********************************************************************
2768 *
2769 *  Get a buffer from system mbuf buffer pool.
2770 *
2771 **********************************************************************/
2772static int
2773em_get_buf(int i, struct adapter *adapter,
2774           struct mbuf *nmp)
2775{
2776        struct mbuf    *mp = nmp;
2777        struct em_buffer *rx_buffer;
2778        struct ifnet   *ifp;
2779	bus_dma_segment_t segs[1];
2780	int error, nsegs;
2781
2782        ifp = adapter->ifp;
2783
2784        if (mp == NULL) {
2785                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2786                if (mp == NULL) {
2787                        adapter->mbuf_cluster_failed++;
2788                        return(ENOBUFS);
2789                }
2790                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2791        } else {
2792                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2793                mp->m_data = mp->m_ext.ext_buf;
2794                mp->m_next = NULL;
2795        }
2796
2797        if (ifp->if_mtu <= ETHERMTU) {
2798                m_adj(mp, ETHER_ALIGN);
2799        }
2800
2801        rx_buffer = &adapter->rx_buffer_area[i];
2802
2803        /*
2804         * Using memory from the mbuf cluster pool, invoke the
2805         * bus_dma machinery to arrange the memory mapping.
2806         */
2807        error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2808	    mp, segs, &nsegs, 0);
2809        if (error != 0) {
2810                m_free(mp);
2811                return(error);
2812        }
2813	/* If nsegs is wrong then the stack is corrupt */
2814	KASSERT(nsegs == 1, ("Too many segments returned!"));
2815        rx_buffer->m_head = mp;
2816        adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2817        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2818
2819        return(0);
2820}
2821
2822/*********************************************************************
2823 *
2824 *  Allocate memory for rx_buffer structures. Since we use one
2825 *  rx_buffer per received packet, the maximum number of rx_buffer's
2826 *  that we'll need is equal to the number of receive descriptors
2827 *  that we've allocated.
2828 *
2829 **********************************************************************/
2830static int
2831em_allocate_receive_structures(struct adapter * adapter)
2832{
2833        int             i, error;
2834        struct em_buffer *rx_buffer;
2835
2836        if (!(adapter->rx_buffer_area =
2837              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2838                                          adapter->num_rx_desc, M_DEVBUF,
2839                                          M_NOWAIT))) {
2840                printf("em%d: Unable to allocate rx_buffer memory\n",
2841                       adapter->unit);
2842                return(ENOMEM);
2843        }
2844
2845        bzero(adapter->rx_buffer_area,
2846              sizeof(struct em_buffer) * adapter->num_rx_desc);
2847
2848        error = bus_dma_tag_create(NULL,                /* parent */
2849                               1, 0,                    /* alignment, bounds */
2850                               BUS_SPACE_MAXADDR,       /* lowaddr */
2851                               BUS_SPACE_MAXADDR,       /* highaddr */
2852                               NULL, NULL,              /* filter, filterarg */
2853                               MCLBYTES,                /* maxsize */
2854                               1,                       /* nsegments */
2855                               MCLBYTES,                /* maxsegsize */
2856                               BUS_DMA_ALLOCNOW,        /* flags */
2857			       NULL,			/* lockfunc */
2858			       NULL,			/* lockarg */
2859                               &adapter->rxtag);
2860        if (error != 0) {
2861                printf("em%d: em_allocate_receive_structures: "
2862                        "bus_dma_tag_create failed; error %u\n",
2863                       adapter->unit, error);
2864                goto fail;
2865        }
2866
2867        rx_buffer = adapter->rx_buffer_area;
2868        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2869                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2870                                          &rx_buffer->map);
2871                if (error != 0) {
2872                        printf("em%d: em_allocate_receive_structures: "
2873                                "bus_dmamap_create failed; error %u\n",
2874                                adapter->unit, error);
2875                        goto fail;
2876                }
2877        }
2878
2879        for (i = 0; i < adapter->num_rx_desc; i++) {
2880                error = em_get_buf(i, adapter, NULL);
2881		if (error != 0)
2882			goto fail;
2883        }
2884        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2885            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2886
2887        return(0);
2888
2889fail:
2890	em_free_receive_structures(adapter);
2891        return (error);
2892}
2893
2894/*********************************************************************
2895 *
2896 *  Allocate and initialize receive structures.
2897 *
2898 **********************************************************************/
2899static int
2900em_setup_receive_structures(struct adapter * adapter)
2901{
2902	bzero((void *) adapter->rx_desc_base,
2903              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2904
2905	if (em_allocate_receive_structures(adapter))
2906		return ENOMEM;
2907
2908	/* Setup our descriptor pointers */
2909        adapter->next_rx_desc_to_check = 0;
2910	return(0);
2911}
2912
2913/*********************************************************************
2914 *
2915 *  Enable receive unit.
2916 *
2917 **********************************************************************/
2918static void
2919em_initialize_receive_unit(struct adapter * adapter)
2920{
2921	u_int32_t       reg_rctl;
2922	u_int32_t       reg_rxcsum;
2923	struct ifnet    *ifp;
2924	u_int64_t	bus_addr;
2925
2926        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2927	ifp = adapter->ifp;
2928
2929	/* Make sure receives are disabled while setting up the descriptor ring */
2930	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2931
2932	/* Set the Receive Delay Timer Register */
2933	E1000_WRITE_REG(&adapter->hw, RDTR,
2934			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2935
2936	if(adapter->hw.mac_type >= em_82540) {
2937		E1000_WRITE_REG(&adapter->hw, RADV,
2938		    adapter->rx_abs_int_delay.value);
2939
2940                /* Set the interrupt throttling rate.  Value is calculated
2941                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2942#define MAX_INTS_PER_SEC        8000
2943#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2944                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2945        }
2946
2947	/* Setup the Base and Length of the Rx Descriptor Ring */
2948	bus_addr = adapter->rxdma.dma_paddr;
2949	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2950	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2951	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2952			sizeof(struct em_rx_desc));
2953
2954	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2955	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2956	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2957
2958	/* Setup the Receive Control Register */
2959	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2960		   E1000_RCTL_RDMTS_HALF |
2961		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2962
2963	if (adapter->hw.tbi_compatibility_on == TRUE)
2964		reg_rctl |= E1000_RCTL_SBP;
2965
2966
2967	switch (adapter->rx_buffer_len) {
2968	default:
2969	case EM_RXBUFFER_2048:
2970		reg_rctl |= E1000_RCTL_SZ_2048;
2971		break;
2972	case EM_RXBUFFER_4096:
2973		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2974		break;
2975	case EM_RXBUFFER_8192:
2976		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2977		break;
2978	case EM_RXBUFFER_16384:
2979		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2980		break;
2981	}
2982
2983	if (ifp->if_mtu > ETHERMTU)
2984		reg_rctl |= E1000_RCTL_LPE;
2985
2986	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2987	if ((adapter->hw.mac_type >= em_82543) &&
2988	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2989		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2990		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2991		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2992	}
2993
2994	/* Enable Receives */
2995	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2996
2997	return;
2998}
2999
3000/*********************************************************************
3001 *
3002 *  Free receive related data structures.
3003 *
3004 **********************************************************************/
3005static void
3006em_free_receive_structures(struct adapter *adapter)
3007{
3008        struct em_buffer   *rx_buffer;
3009        int             i;
3010
3011        INIT_DEBUGOUT("free_receive_structures: begin");
3012
3013        if (adapter->rx_buffer_area != NULL) {
3014                rx_buffer = adapter->rx_buffer_area;
3015                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3016			if (rx_buffer->m_head != NULL) {
3017				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3018				    BUS_DMASYNC_POSTREAD);
3019				bus_dmamap_unload(adapter->rxtag,
3020				    rx_buffer->map);
3021				m_freem(rx_buffer->m_head);
3022				rx_buffer->m_head = NULL;
3023			} else if (rx_buffer->map != NULL)
3024				bus_dmamap_unload(adapter->rxtag,
3025				    rx_buffer->map);
3026                        if (rx_buffer->map != NULL) {
3027				bus_dmamap_destroy(adapter->rxtag,
3028				    rx_buffer->map);
3029				rx_buffer->map = NULL;
3030			}
3031                }
3032        }
3033        if (adapter->rx_buffer_area != NULL) {
3034                free(adapter->rx_buffer_area, M_DEVBUF);
3035                adapter->rx_buffer_area = NULL;
3036        }
3037        if (adapter->rxtag != NULL) {
3038                bus_dma_tag_destroy(adapter->rxtag);
3039                adapter->rxtag = NULL;
3040        }
3041        return;
3042}
3043
3044/*********************************************************************
3045 *
3046 *  This routine executes in interrupt context. It replenishes
3047 *  the mbufs in the descriptor and sends data which has been
3048 *  dma'ed into host memory to upper layer.
3049 *
3050 *  We loop at most count times if count is > 0, or until done if
3051 *  count < 0.
3052 *
3053 *********************************************************************/
3054static int
3055em_process_receive_interrupts(struct adapter * adapter, int count)
3056{
3057	struct ifnet        *ifp;
3058	struct mbuf         *mp;
3059	u_int8_t            accept_frame = 0;
3060 	u_int8_t            eop = 0;
3061	u_int16_t           len, desc_len, prev_len_adj;
3062	int                 i;
3063
3064	/* Pointer to the receive descriptor being examined. */
3065	struct em_rx_desc   *current_desc;
3066
3067	ifp = adapter->ifp;
3068	i = adapter->next_rx_desc_to_check;
3069        current_desc = &adapter->rx_desc_base[i];
3070	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3071	    BUS_DMASYNC_POSTREAD);
3072
3073	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3074		return (0);
3075	}
3076
3077	while ((current_desc->status & E1000_RXD_STAT_DD) &&
3078		    (count != 0) &&
3079		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3080		struct mbuf *m = NULL;
3081
3082		mp = adapter->rx_buffer_area[i].m_head;
3083		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3084		    BUS_DMASYNC_POSTREAD);
3085		bus_dmamap_unload(adapter->rxtag,
3086		    adapter->rx_buffer_area[i].map);
3087
3088		accept_frame = 1;
3089		prev_len_adj = 0;
3090                desc_len = le16toh(current_desc->length);
3091		if (current_desc->status & E1000_RXD_STAT_EOP) {
3092			count--;
3093			eop = 1;
3094			if (desc_len < ETHER_CRC_LEN) {
3095                                len = 0;
3096                                prev_len_adj = ETHER_CRC_LEN - desc_len;
3097                        }
3098                        else {
3099                                len = desc_len - ETHER_CRC_LEN;
3100                        }
3101		} else {
3102			eop = 0;
3103			len = desc_len;
3104		}
3105
3106		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3107			u_int8_t            last_byte;
3108			u_int32_t           pkt_len = desc_len;
3109
3110			if (adapter->fmp != NULL)
3111				pkt_len += adapter->fmp->m_pkthdr.len;
3112
3113			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3114
3115			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
3116				       current_desc->errors,
3117				       pkt_len, last_byte)) {
3118				em_tbi_adjust_stats(&adapter->hw,
3119						    &adapter->stats,
3120						    pkt_len,
3121						    adapter->hw.mac_addr);
3122				if (len > 0) len--;
3123			}
3124			else {
3125				accept_frame = 0;
3126			}
3127		}
3128
3129		if (accept_frame) {
3130
3131			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
3132				adapter->dropped_pkts++;
3133				em_get_buf(i, adapter, mp);
3134				if (adapter->fmp != NULL)
3135					m_freem(adapter->fmp);
3136				adapter->fmp = NULL;
3137				adapter->lmp = NULL;
3138				break;
3139			}
3140
3141			/* Assign correct length to the current fragment */
3142			mp->m_len = len;
3143
3144			if (adapter->fmp == NULL) {
3145				mp->m_pkthdr.len = len;
3146				adapter->fmp = mp;	 /* Store the first mbuf */
3147				adapter->lmp = mp;
3148			} else {
3149				/* Chain mbuf's together */
3150				mp->m_flags &= ~M_PKTHDR;
3151				/*
3152                                 * Adjust length of previous mbuf in chain if we
3153                                 * received less than 4 bytes in the last descriptor.
3154                                 */
3155				if (prev_len_adj > 0) {
3156					adapter->lmp->m_len -= prev_len_adj;
3157					adapter->fmp->m_pkthdr.len -= prev_len_adj;
3158				}
3159				adapter->lmp->m_next = mp;
3160				adapter->lmp = adapter->lmp->m_next;
3161				adapter->fmp->m_pkthdr.len += len;
3162			}
3163
3164                        if (eop) {
3165                                adapter->fmp->m_pkthdr.rcvif = ifp;
3166				ifp->if_ipackets++;
3167                                em_receive_checksum(adapter, current_desc,
3168                                                    adapter->fmp);
3169#ifndef __NO_STRICT_ALIGNMENT
3170				if (ifp->if_mtu > ETHERMTU &&
3171				    em_fixup_rx(adapter) != 0)
3172					goto skip;
3173
3174#endif
3175                                if (current_desc->status & E1000_RXD_STAT_VP)
3176					VLAN_INPUT_TAG(ifp, adapter->fmp,
3177					    (le16toh(current_desc->special) &
3178					    E1000_RXD_SPC_VLAN_MASK));
3179#ifndef __NO_STRICT_ALIGNMENT
3180skip:
3181#endif
3182				m = adapter->fmp;
3183				adapter->fmp = NULL;
3184				adapter->lmp = NULL;
3185                        }
3186		} else {
3187			adapter->dropped_pkts++;
3188			em_get_buf(i, adapter, mp);
3189			if (adapter->fmp != NULL)
3190				m_freem(adapter->fmp);
3191			adapter->fmp = NULL;
3192			adapter->lmp = NULL;
3193		}
3194
3195		/* Zero out the receive descriptors status  */
3196		current_desc->status = 0;
3197		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3198		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3199
3200                /* Advance our pointers to the next descriptor */
3201		if (++i == adapter->num_rx_desc)
3202			i = 0;
3203		if (m != NULL) {
3204			adapter->next_rx_desc_to_check = i;
3205			(*ifp->if_input)(ifp, m);
3206			i = adapter->next_rx_desc_to_check;
3207		}
3208		current_desc = &adapter->rx_desc_base[i];
3209	}
3210	adapter->next_rx_desc_to_check = i;
3211
3212	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3213	if (--i < 0) i = adapter->num_rx_desc - 1;
3214	E1000_WRITE_REG(&adapter->hw, RDT, i);
3215
3216	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3217		return (0);
3218	}
3219	return (1);
3220}
3221
3222#ifndef __NO_STRICT_ALIGNMENT
3223/*
3224 * When jumbo frames are enabled we should realign entire payload on
3225 * architecures with strict alignment. This is serious design mistake of 8254x
3226 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3227 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3228 * payload. On architecures without strict alignment restrictions 8254x still
3229 * performs unaligned memory access which would reduce the performance too.
3230 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3231 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3232 * existing mbuf chain.
3233 *
3234 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3235 * not used at all on architectures with strict alignment.
3236 */
3237static int
3238em_fixup_rx(struct adapter *adapter)
3239{
3240	struct mbuf *m, *n;
3241	int error;
3242
3243	error = 0;
3244	m = adapter->fmp;
3245	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3246		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3247		m->m_data += ETHER_HDR_LEN;
3248	} else {
3249		MGETHDR(n, M_DONTWAIT, MT_DATA);
3250		if (n != NULL) {
3251			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3252			m->m_data += ETHER_HDR_LEN;
3253			m->m_len -= ETHER_HDR_LEN;
3254			n->m_len = ETHER_HDR_LEN;
3255			M_MOVE_PKTHDR(n, m);
3256			n->m_next = m;
3257			adapter->fmp = n;
3258		} else {
3259			adapter->dropped_pkts++;
3260			m_freem(adapter->fmp);
3261			adapter->fmp = NULL;
3262			error = ENOMEM;
3263		}
3264	}
3265
3266	return (error);
3267}
3268#endif
3269
3270/*********************************************************************
3271 *
3272 *  Verify that the hardware indicated that the checksum is valid.
3273 *  Inform the stack about the status of checksum so that stack
3274 *  doesn't spend time verifying the checksum.
3275 *
3276 *********************************************************************/
3277static void
3278em_receive_checksum(struct adapter *adapter,
3279		    struct em_rx_desc *rx_desc,
3280		    struct mbuf *mp)
3281{
3282	/* 82543 or newer only */
3283	if ((adapter->hw.mac_type < em_82543) ||
3284	    /* Ignore Checksum bit is set */
3285	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3286		mp->m_pkthdr.csum_flags = 0;
3287		return;
3288	}
3289
3290	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3291		/* Did it pass? */
3292		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3293			/* IP Checksum Good */
3294			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3295			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3296
3297		} else {
3298			mp->m_pkthdr.csum_flags = 0;
3299		}
3300	}
3301
3302	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3303		/* Did it pass? */
3304		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3305			mp->m_pkthdr.csum_flags |=
3306			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3307			mp->m_pkthdr.csum_data = htons(0xffff);
3308		}
3309	}
3310
3311	return;
3312}
3313
3314
3315static void
3316em_enable_vlans(struct adapter *adapter)
3317{
3318	uint32_t ctrl;
3319
3320	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3321
3322	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3323	ctrl |= E1000_CTRL_VME;
3324	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3325
3326	return;
3327}
3328
3329static void
3330em_disable_vlans(struct adapter *adapter)
3331{
3332	uint32_t ctrl;
3333
3334	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3335	ctrl &= ~E1000_CTRL_VME;
3336	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3337
3338	return;
3339}
3340
3341static void
3342em_enable_intr(struct adapter * adapter)
3343{
3344	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3345	return;
3346}
3347
3348static void
3349em_disable_intr(struct adapter *adapter)
3350{
3351	/*
3352	 * The first version of 82542 had an errata where when link was forced it
3353	 * would stay up even up even if the cable was disconnected.  Sequence errors
3354	 * were used to detect the disconnect and then the driver would unforce the link.
3355	 * This code in the in the ISR.  For this to work correctly the Sequence error
3356	 * interrupt had to be enabled all the time.
3357	 */
3358
3359	if (adapter->hw.mac_type == em_82542_rev2_0)
3360	    E1000_WRITE_REG(&adapter->hw, IMC,
3361	        (0xffffffff & ~E1000_IMC_RXSEQ));
3362	else
3363	    E1000_WRITE_REG(&adapter->hw, IMC,
3364	        0xffffffff);
3365	return;
3366}
3367
3368static int
3369em_is_valid_ether_addr(u_int8_t *addr)
3370{
3371        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3372
3373        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3374                return (FALSE);
3375        }
3376
3377        return(TRUE);
3378}
3379
3380void
3381em_write_pci_cfg(struct em_hw *hw,
3382		      uint32_t reg,
3383		      uint16_t *value)
3384{
3385	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3386			 *value, 2);
3387}
3388
3389void
3390em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3391		     uint16_t *value)
3392{
3393	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3394				 reg, 2);
3395	return;
3396}
3397
3398void
3399em_pci_set_mwi(struct em_hw *hw)
3400{
3401        pci_write_config(((struct em_osdep *)hw->back)->dev,
3402                         PCIR_COMMAND,
3403                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3404        return;
3405}
3406
3407void
3408em_pci_clear_mwi(struct em_hw *hw)
3409{
3410        pci_write_config(((struct em_osdep *)hw->back)->dev,
3411                         PCIR_COMMAND,
3412                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3413        return;
3414}
3415
3416/*********************************************************************
3417* 82544 Coexistence issue workaround.
3418*    There are 2 issues.
3419*       1. Transmit Hang issue.
3420*    To detect this issue, following equation can be used...
3421*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3422*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3423*
3424*       2. DAC issue.
3425*    To detect this issue, following equation can be used...
3426*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3427*          If SUM[3:0] is in between 9 to c, we will have this issue.
3428*
3429*
3430*    WORKAROUND:
3431*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3432*
3433*** *********************************************************************/
3434static u_int32_t
3435em_fill_descriptors (bus_addr_t address,
3436                              u_int32_t length,
3437                              PDESC_ARRAY desc_array)
3438{
3439        /* Since issue is sensitive to length and address.*/
3440        /* Let us first check the address...*/
3441        u_int32_t safe_terminator;
3442        if (length <= 4) {
3443                desc_array->descriptor[0].address = address;
3444                desc_array->descriptor[0].length = length;
3445                desc_array->elements = 1;
3446                return desc_array->elements;
3447        }
3448        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3449        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3450        if (safe_terminator == 0   ||
3451        (safe_terminator > 4   &&
3452        safe_terminator < 9)   ||
3453        (safe_terminator > 0xC &&
3454        safe_terminator <= 0xF)) {
3455                desc_array->descriptor[0].address = address;
3456                desc_array->descriptor[0].length = length;
3457                desc_array->elements = 1;
3458                return desc_array->elements;
3459        }
3460
3461        desc_array->descriptor[0].address = address;
3462        desc_array->descriptor[0].length = length - 4;
3463        desc_array->descriptor[1].address = address + (length - 4);
3464        desc_array->descriptor[1].length = 4;
3465        desc_array->elements = 2;
3466        return desc_array->elements;
3467}
3468
3469/**********************************************************************
3470 *
3471 *  Update the board statistics counters.
3472 *
3473 **********************************************************************/
3474static void
3475em_update_stats_counters(struct adapter *adapter)
3476{
3477	struct ifnet   *ifp;
3478
3479	if(adapter->hw.media_type == em_media_type_copper ||
3480	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3481		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3482		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3483	}
3484	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3485	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3486	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3487	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3488
3489	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3490	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3491	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3492	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3493	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3494	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3495	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3496	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3497	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3498	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3499	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3500	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3501	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3502	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3503	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3504	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3505	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3506	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3507	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3508	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3509
3510	/* For the 64-bit byte counters the low dword must be read first. */
3511	/* Both registers clear on the read of the high dword */
3512
3513	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3514	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3515	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3516	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3517
3518	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3519	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3520	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3521	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3522	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3523
3524	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3525	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3526	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3527	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3528
3529	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3530	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3531	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3532	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3533	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3534	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3535	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3536	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3537	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3538	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3539
3540	if (adapter->hw.mac_type >= em_82543) {
3541		adapter->stats.algnerrc +=
3542		E1000_READ_REG(&adapter->hw, ALGNERRC);
3543		adapter->stats.rxerrc +=
3544		E1000_READ_REG(&adapter->hw, RXERRC);
3545		adapter->stats.tncrs +=
3546		E1000_READ_REG(&adapter->hw, TNCRS);
3547		adapter->stats.cexterr +=
3548		E1000_READ_REG(&adapter->hw, CEXTERR);
3549		adapter->stats.tsctc +=
3550		E1000_READ_REG(&adapter->hw, TSCTC);
3551		adapter->stats.tsctfc +=
3552		E1000_READ_REG(&adapter->hw, TSCTFC);
3553	}
3554	ifp = adapter->ifp;
3555
3556	ifp->if_collisions = adapter->stats.colc;
3557
3558	/* Rx Errors */
3559	ifp->if_ierrors =
3560	adapter->dropped_pkts +
3561	adapter->stats.rxerrc +
3562	adapter->stats.crcerrs +
3563	adapter->stats.algnerrc +
3564	adapter->stats.rlec +
3565	adapter->stats.mpc + adapter->stats.cexterr;
3566
3567	/* Tx Errors */
3568	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3569	    adapter->watchdog_events;
3570
3571}
3572
3573
3574/**********************************************************************
3575 *
3576 *  This routine is called only when em_display_debug_stats is enabled.
3577 *  This routine provides a way to take a look at important statistics
3578 *  maintained by the driver and hardware.
3579 *
3580 **********************************************************************/
3581static void
3582em_print_debug_info(struct adapter *adapter)
3583{
3584	int unit = adapter->unit;
3585	uint8_t *hw_addr = adapter->hw.hw_addr;
3586
3587	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3588	printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit,
3589	    E1000_READ_REG(&adapter->hw, CTRL),
3590	    E1000_READ_REG(&adapter->hw, RCTL));
3591	printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit,
3592	    ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3593	    (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3594	printf("em%d: Flow control watermarks high = %d low = %d\n", unit,
3595	    adapter->hw.fc_high_water,
3596	    adapter->hw.fc_low_water);
3597	printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3598	    E1000_READ_REG(&adapter->hw, TIDV),
3599	    E1000_READ_REG(&adapter->hw, TADV));
3600	printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3601	    E1000_READ_REG(&adapter->hw, RDTR),
3602	    E1000_READ_REG(&adapter->hw, RADV));
3603	printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3604	    unit, (long long)adapter->tx_fifo_wrk_cnt,
3605	    (long long)adapter->tx_fifo_reset_cnt);
3606	printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3607	    E1000_READ_REG(&adapter->hw, TDH),
3608	    E1000_READ_REG(&adapter->hw, TDT));
3609	printf("em%d: Num Tx descriptors avail = %d\n", unit,
3610	    adapter->num_tx_desc_avail);
3611	printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3612	    adapter->no_tx_desc_avail1);
3613	printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3614	    adapter->no_tx_desc_avail2);
3615	printf("em%d: Std mbuf failed = %ld\n", unit,
3616	    adapter->mbuf_alloc_failed);
3617	printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3618	    adapter->mbuf_cluster_failed);
3619	printf("em%d: Driver dropped packets = %ld\n", unit,
3620	    adapter->dropped_pkts);
3621
3622	return;
3623}
3624
3625static void
3626em_print_hw_stats(struct adapter *adapter)
3627{
3628        int unit = adapter->unit;
3629
3630        printf("em%d: Excessive collisions = %lld\n", unit,
3631               (long long)adapter->stats.ecol);
3632        printf("em%d: Symbol errors = %lld\n", unit,
3633               (long long)adapter->stats.symerrs);
3634        printf("em%d: Sequence errors = %lld\n", unit,
3635               (long long)adapter->stats.sec);
3636        printf("em%d: Defer count = %lld\n", unit,
3637               (long long)adapter->stats.dc);
3638
3639        printf("em%d: Missed Packets = %lld\n", unit,
3640               (long long)adapter->stats.mpc);
3641        printf("em%d: Receive No Buffers = %lld\n", unit,
3642               (long long)adapter->stats.rnbc);
3643        printf("em%d: Receive length errors = %lld\n", unit,
3644               (long long)adapter->stats.rlec);
3645        printf("em%d: Receive errors = %lld\n", unit,
3646               (long long)adapter->stats.rxerrc);
3647        printf("em%d: Crc errors = %lld\n", unit,
3648               (long long)adapter->stats.crcerrs);
3649        printf("em%d: Alignment errors = %lld\n", unit,
3650               (long long)adapter->stats.algnerrc);
3651        printf("em%d: Carrier extension errors = %lld\n", unit,
3652               (long long)adapter->stats.cexterr);
3653	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3654	printf("em%d: watchdog timeouts = %ld\n", unit,
3655		adapter->watchdog_events);
3656
3657        printf("em%d: XON Rcvd = %lld\n", unit,
3658               (long long)adapter->stats.xonrxc);
3659        printf("em%d: XON Xmtd = %lld\n", unit,
3660               (long long)adapter->stats.xontxc);
3661        printf("em%d: XOFF Rcvd = %lld\n", unit,
3662               (long long)adapter->stats.xoffrxc);
3663        printf("em%d: XOFF Xmtd = %lld\n", unit,
3664               (long long)adapter->stats.xofftxc);
3665
3666        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3667               (long long)adapter->stats.gprc);
3668        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3669               (long long)adapter->stats.gptc);
3670
3671        return;
3672}
3673
3674static int
3675em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3676{
3677        int error;
3678        int result;
3679        struct adapter *adapter;
3680
3681        result = -1;
3682        error = sysctl_handle_int(oidp, &result, 0, req);
3683
3684        if (error || !req->newptr)
3685                return (error);
3686
3687        if (result == 1) {
3688                adapter = (struct adapter *)arg1;
3689                em_print_debug_info(adapter);
3690        }
3691
3692        return error;
3693}
3694
3695
3696static int
3697em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3698{
3699        int error;
3700        int result;
3701        struct adapter *adapter;
3702
3703        result = -1;
3704        error = sysctl_handle_int(oidp, &result, 0, req);
3705
3706        if (error || !req->newptr)
3707                return (error);
3708
3709        if (result == 1) {
3710                adapter = (struct adapter *)arg1;
3711                em_print_hw_stats(adapter);
3712        }
3713
3714        return error;
3715}
3716
3717static int
3718em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3719{
3720	struct em_int_delay_info *info;
3721	struct adapter *adapter;
3722	u_int32_t regval;
3723	int error;
3724	int usecs;
3725	int ticks;
3726
3727	info = (struct em_int_delay_info *)arg1;
3728	usecs = info->value;
3729	error = sysctl_handle_int(oidp, &usecs, 0, req);
3730	if (error != 0 || req->newptr == NULL)
3731		return error;
3732	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3733		return EINVAL;
3734	info->value = usecs;
3735	ticks = E1000_USECS_TO_TICKS(usecs);
3736
3737	adapter = info->adapter;
3738
3739	EM_LOCK(adapter);
3740	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3741	regval = (regval & ~0xffff) | (ticks & 0xffff);
3742	/* Handle a few special cases. */
3743	switch (info->offset) {
3744	case E1000_RDTR:
3745	case E1000_82542_RDTR:
3746		regval |= E1000_RDT_FPDB;
3747		break;
3748	case E1000_TIDV:
3749	case E1000_82542_TIDV:
3750		if (ticks == 0) {
3751			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3752			/* Don't write 0 into the TIDV register. */
3753			regval++;
3754		} else
3755			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3756		break;
3757	}
3758	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3759	EM_UNLOCK(adapter);
3760	return 0;
3761}
3762
3763static void
3764em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3765    const char *description, struct em_int_delay_info *info,
3766    int offset, int value)
3767{
3768	info->adapter = adapter;
3769	info->offset = offset;
3770	info->value = value;
3771	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3772	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3773	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3774	    info, 0, em_sysctl_int_delay, "I", description);
3775}
3776
3777#ifndef NO_EM_FASTINTR
3778static void
3779em_add_int_process_limit(struct adapter *adapter, const char *name,
3780    const char *description, int *limit, int value)
3781{
3782	*limit = value;
3783	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3784	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3785	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3786}
3787#endif
3788