if_em.c revision 152645
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 152645 2005-11-21 04:17:43Z yongari $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "2.1.7";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
111        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
112
113        /* required last entry */
114        { 0, 0, 0, 0, 0}
115};
116
117/*********************************************************************
118 *  Table of branding strings for all supported NICs.
119 *********************************************************************/
120
121static char *em_strings[] = {
122	"Intel(R) PRO/1000 Network Connection"
123};
124
125/*********************************************************************
126 *  Function prototypes
127 *********************************************************************/
128static int  em_probe(device_t);
129static int  em_attach(device_t);
130static int  em_detach(device_t);
131static int  em_shutdown(device_t);
132static void em_intr(void *);
133static void em_start(struct ifnet *);
134static int  em_ioctl(struct ifnet *, u_long, caddr_t);
135static void em_watchdog(struct ifnet *);
136static void em_init(void *);
137static void em_init_locked(struct adapter *);
138static void em_stop(void *);
139static void em_media_status(struct ifnet *, struct ifmediareq *);
140static int  em_media_change(struct ifnet *);
141static void em_identify_hardware(struct adapter *);
142static int  em_allocate_pci_resources(struct adapter *);
143static void em_free_pci_resources(struct adapter *);
144static void em_local_timer(void *);
145static int  em_hardware_init(struct adapter *);
146static void em_setup_interface(device_t, struct adapter *);
147static int  em_setup_transmit_structures(struct adapter *);
148static void em_initialize_transmit_unit(struct adapter *);
149static int  em_setup_receive_structures(struct adapter *);
150static void em_initialize_receive_unit(struct adapter *);
151static void em_enable_intr(struct adapter *);
152static void em_disable_intr(struct adapter *);
153static void em_free_transmit_structures(struct adapter *);
154static void em_free_receive_structures(struct adapter *);
155static void em_update_stats_counters(struct adapter *);
156static void em_clean_transmit_interrupts(struct adapter *);
157static int  em_allocate_receive_structures(struct adapter *);
158static int  em_allocate_transmit_structures(struct adapter *);
159static void em_process_receive_interrupts(struct adapter *, int);
160static void em_receive_checksum(struct adapter *,
161				struct em_rx_desc *,
162				struct mbuf *);
163static void em_transmit_checksum_setup(struct adapter *,
164				       struct mbuf *,
165				       u_int32_t *,
166				       u_int32_t *);
167static void em_set_promisc(struct adapter *);
168static void em_disable_promisc(struct adapter *);
169static void em_set_multi(struct adapter *);
170static void em_print_hw_stats(struct adapter *);
171static void em_print_link_status(struct adapter *);
172static int  em_get_buf(int i, struct adapter *,
173		       struct mbuf *);
174static void em_enable_vlans(struct adapter *);
175static void em_disable_vlans(struct adapter *);
176static int  em_encap(struct adapter *, struct mbuf **);
177static void em_smartspeed(struct adapter *);
178static int  em_82547_fifo_workaround(struct adapter *, int);
179static void em_82547_update_fifo_head(struct adapter *, int);
180static int  em_82547_tx_fifo_reset(struct adapter *);
181static void em_82547_move_tail(void *arg);
182static void em_82547_move_tail_locked(struct adapter *);
183static int  em_dma_malloc(struct adapter *, bus_size_t,
184			  struct em_dma_alloc *, int);
185static void em_dma_free(struct adapter *, struct em_dma_alloc *);
186static void em_print_debug_info(struct adapter *);
187static int  em_is_valid_ether_addr(u_int8_t *);
188static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
189static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
190static u_int32_t em_fill_descriptors (bus_addr_t address,
191				      u_int32_t length,
192				      PDESC_ARRAY desc_array);
193static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
194static void em_add_int_delay_sysctl(struct adapter *, const char *,
195				    const char *, struct em_int_delay_info *,
196				    int, int);
197#ifdef DEVICE_POLLING
198static poll_handler_t em_poll;
199#endif
200
201/*********************************************************************
202 *  FreeBSD Device Interface Entry Points
203 *********************************************************************/
204
205static device_method_t em_methods[] = {
206	/* Device interface */
207	DEVMETHOD(device_probe, em_probe),
208	DEVMETHOD(device_attach, em_attach),
209	DEVMETHOD(device_detach, em_detach),
210	DEVMETHOD(device_shutdown, em_shutdown),
211	{0, 0}
212};
213
214static driver_t em_driver = {
215	"em", em_methods, sizeof(struct adapter ),
216};
217
218static devclass_t em_devclass;
219DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
220MODULE_DEPEND(em, pci, 1, 1, 1);
221MODULE_DEPEND(em, ether, 1, 1, 1);
222
223/*********************************************************************
224 *  Tunable default values.
225 *********************************************************************/
226
227#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
228#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
229
230static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
231static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
232static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
233static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
234static int em_rxd = EM_DEFAULT_RXD;
235static int em_txd = EM_DEFAULT_TXD;
236
237TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
238TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
239TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
240TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
241TUNABLE_INT("hw.em.rxd", &em_rxd);
242TUNABLE_INT("hw.em.txd", &em_txd);
243
244/*********************************************************************
245 *  Device identification routine
246 *
247 *  em_probe determines if the driver should be loaded on
248 *  adapter based on PCI vendor/device id of the adapter.
249 *
250 *  return BUS_PROBE_DEFAULT on success, positive on failure
251 *********************************************************************/
252
253static int
254em_probe(device_t dev)
255{
256	em_vendor_info_t *ent;
257
258	u_int16_t       pci_vendor_id = 0;
259	u_int16_t       pci_device_id = 0;
260	u_int16_t       pci_subvendor_id = 0;
261	u_int16_t       pci_subdevice_id = 0;
262	char            adapter_name[60];
263
264	INIT_DEBUGOUT("em_probe: begin");
265
266	pci_vendor_id = pci_get_vendor(dev);
267	if (pci_vendor_id != EM_VENDOR_ID)
268		return(ENXIO);
269
270	pci_device_id = pci_get_device(dev);
271	pci_subvendor_id = pci_get_subvendor(dev);
272	pci_subdevice_id = pci_get_subdevice(dev);
273
274	ent = em_vendor_info_array;
275	while (ent->vendor_id != 0) {
276		if ((pci_vendor_id == ent->vendor_id) &&
277		    (pci_device_id == ent->device_id) &&
278
279		    ((pci_subvendor_id == ent->subvendor_id) ||
280		     (ent->subvendor_id == PCI_ANY_ID)) &&
281
282		    ((pci_subdevice_id == ent->subdevice_id) ||
283		     (ent->subdevice_id == PCI_ANY_ID))) {
284			sprintf(adapter_name, "%s, Version - %s",
285				em_strings[ent->index],
286				em_driver_version);
287			device_set_desc_copy(dev, adapter_name);
288			return(BUS_PROBE_DEFAULT);
289		}
290		ent++;
291	}
292
293	return(ENXIO);
294}
295
296/*********************************************************************
297 *  Device initialization routine
298 *
299 *  The attach entry point is called when the driver is being loaded.
300 *  This routine identifies the type of hardware, allocates all resources
301 *  and initializes the hardware.
302 *
303 *  return 0 on success, positive on failure
304 *********************************************************************/
305
306static int
307em_attach(device_t dev)
308{
309	struct adapter * adapter;
310	int             tsize, rsize;
311	int		error = 0;
312
313	INIT_DEBUGOUT("em_attach: begin");
314
315	/* Allocate, clear, and link in our adapter structure */
316	if (!(adapter = device_get_softc(dev))) {
317		printf("em: adapter structure allocation failed\n");
318		return(ENOMEM);
319	}
320	bzero(adapter, sizeof(struct adapter ));
321	adapter->dev = dev;
322	adapter->osdep.dev = dev;
323	adapter->unit = device_get_unit(dev);
324	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
325
326	/* SYSCTL stuff */
327        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
328                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
330                        (void *)adapter, 0,
331                        em_sysctl_debug_info, "I", "Debug Information");
332
333        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
334                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
335                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
336                        (void *)adapter, 0,
337                        em_sysctl_stats, "I", "Statistics");
338
339	callout_init(&adapter->timer, CALLOUT_MPSAFE);
340	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
341
342	/* Determine hardware revision */
343	em_identify_hardware(adapter);
344
345	/* Set up some sysctls for the tunable interrupt delays */
346	em_add_int_delay_sysctl(adapter, "rx_int_delay",
347	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
348	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
349	em_add_int_delay_sysctl(adapter, "tx_int_delay",
350	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
351	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
352	if (adapter->hw.mac_type >= em_82540) {
353		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
354		    "receive interrupt delay limit in usecs",
355		    &adapter->rx_abs_int_delay,
356		    E1000_REG_OFFSET(&adapter->hw, RADV),
357		    em_rx_abs_int_delay_dflt);
358		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
359		    "transmit interrupt delay limit in usecs",
360		    &adapter->tx_abs_int_delay,
361		    E1000_REG_OFFSET(&adapter->hw, TADV),
362		    em_tx_abs_int_delay_dflt);
363	}
364
365	/*
366	 * Validate number of transmit and receive descriptors. It
367	 * must not exceed hardware maximum, and must be multiple
368	 * of E1000_DBA_ALIGN.
369	 */
370	if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
371	    (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
372	    (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
373	    (em_txd < EM_MIN_TXD)) {
374		printf("em%d: Using %d TX descriptors instead of %d!\n",
375		    adapter->unit, EM_DEFAULT_TXD, em_txd);
376		adapter->num_tx_desc = EM_DEFAULT_TXD;
377	} else
378		adapter->num_tx_desc = em_txd;
379	if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
380	    (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
381	    (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
382	    (em_rxd < EM_MIN_RXD)) {
383		printf("em%d: Using %d RX descriptors instead of %d!\n",
384		    adapter->unit, EM_DEFAULT_RXD, em_rxd);
385		adapter->num_rx_desc = EM_DEFAULT_RXD;
386	} else
387		adapter->num_rx_desc = em_rxd;
388
389        adapter->hw.autoneg = DO_AUTO_NEG;
390        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
391        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
392        adapter->hw.tbi_compatibility_en = TRUE;
393        adapter->rx_buffer_len = EM_RXBUFFER_2048;
394
395	/*
396         * These parameters control the automatic generation(Tx) and
397         * response(Rx) to Ethernet PAUSE frames.
398         */
399        adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
400        adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
401        adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
402        adapter->hw.fc_send_xon   = TRUE;
403        adapter->hw.fc = em_fc_full;
404
405	adapter->hw.phy_init_script = 1;
406	adapter->hw.phy_reset_disable = FALSE;
407
408#ifndef EM_MASTER_SLAVE
409	adapter->hw.master_slave = em_ms_hw_default;
410#else
411	adapter->hw.master_slave = EM_MASTER_SLAVE;
412#endif
413	/*
414	 * Set the max frame size assuming standard ethernet
415	 * sized frames
416	 */
417	adapter->hw.max_frame_size =
418		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
419
420	adapter->hw.min_frame_size =
421		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
422
423	/*
424	 * This controls when hardware reports transmit completion
425	 * status.
426	 */
427	adapter->hw.report_tx_early = 1;
428
429
430	if (em_allocate_pci_resources(adapter)) {
431		printf("em%d: Allocation of PCI resources failed\n",
432		       adapter->unit);
433                error = ENXIO;
434                goto err_pci;
435	}
436
437
438	/* Initialize eeprom parameters */
439        em_init_eeprom_params(&adapter->hw);
440
441	tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
442	    E1000_DBA_ALIGN);
443
444	/* Allocate Transmit Descriptor ring */
445        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
446                printf("em%d: Unable to allocate tx_desc memory\n",
447                       adapter->unit);
448		error = ENOMEM;
449                goto err_tx_desc;
450        }
451        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
452
453	rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
454	    E1000_DBA_ALIGN);
455
456	/* Allocate Receive Descriptor ring */
457        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
458                printf("em%d: Unable to allocate rx_desc memory\n",
459                        adapter->unit);
460		error = ENOMEM;
461                goto err_rx_desc;
462        }
463        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
464
465	/* Initialize the hardware */
466	if (em_hardware_init(adapter)) {
467		printf("em%d: Unable to initialize the hardware\n",
468		       adapter->unit);
469		error = EIO;
470                goto err_hw_init;
471	}
472
473	/* Copy the permanent MAC address out of the EEPROM */
474	if (em_read_mac_addr(&adapter->hw) < 0) {
475		printf("em%d: EEPROM read error while reading mac address\n",
476		       adapter->unit);
477		error = EIO;
478                goto err_mac_addr;
479	}
480
481	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
482                printf("em%d: Invalid mac address\n", adapter->unit);
483                error = EIO;
484                goto err_mac_addr;
485        }
486
487	/* Setup OS specific network interface */
488	em_setup_interface(dev, adapter);
489
490	/* Initialize statistics */
491	em_clear_hw_cntrs(&adapter->hw);
492	em_update_stats_counters(adapter);
493	adapter->hw.get_link_status = 1;
494	em_check_for_link(&adapter->hw);
495
496	if (bootverbose) {
497		/* Print the link status */
498		if (adapter->link_active == 1) {
499			em_get_speed_and_duplex(&adapter->hw,
500			    &adapter->link_speed, &adapter->link_duplex);
501			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
502			       adapter->unit,
503			       adapter->link_speed,
504			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
505				"Half");
506		} else
507			printf("em%d:  Speed:N/A  Duplex:N/A\n",
508			    adapter->unit);
509	}
510
511	/* Identify 82544 on PCIX */
512        em_get_bus_info(&adapter->hw);
513        if(adapter->hw.bus_type == em_bus_type_pcix &&
514           adapter->hw.mac_type == em_82544) {
515                adapter->pcix_82544 = TRUE;
516        }
517        else {
518                adapter->pcix_82544 = FALSE;
519        }
520	INIT_DEBUGOUT("em_attach: end");
521	return(0);
522
523err_mac_addr:
524err_hw_init:
525        em_dma_free(adapter, &adapter->rxdma);
526err_rx_desc:
527        em_dma_free(adapter, &adapter->txdma);
528err_tx_desc:
529err_pci:
530        em_free_pci_resources(adapter);
531	EM_LOCK_DESTROY(adapter);
532        return(error);
533
534}
535
536/*********************************************************************
537 *  Device removal routine
538 *
539 *  The detach entry point is called when the driver is being removed.
540 *  This routine stops the adapter and deallocates all the resources
541 *  that were allocated for driver operation.
542 *
543 *  return 0 on success, positive on failure
544 *********************************************************************/
545
546static int
547em_detach(device_t dev)
548{
549	struct adapter * adapter = device_get_softc(dev);
550	struct ifnet   *ifp = adapter->ifp;
551
552	INIT_DEBUGOUT("em_detach: begin");
553
554#ifdef DEVICE_POLLING
555	if (ifp->if_capenable & IFCAP_POLLING)
556		ether_poll_deregister(ifp);
557#endif
558
559	EM_LOCK(adapter);
560	adapter->in_detach = 1;
561	em_stop(adapter);
562	em_phy_hw_reset(&adapter->hw);
563	EM_UNLOCK(adapter);
564        ether_ifdetach(adapter->ifp);
565
566	em_free_pci_resources(adapter);
567	bus_generic_detach(dev);
568	if_free(ifp);
569
570	/* Free Transmit Descriptor ring */
571        if (adapter->tx_desc_base) {
572                em_dma_free(adapter, &adapter->txdma);
573                adapter->tx_desc_base = NULL;
574        }
575
576        /* Free Receive Descriptor ring */
577        if (adapter->rx_desc_base) {
578                em_dma_free(adapter, &adapter->rxdma);
579                adapter->rx_desc_base = NULL;
580        }
581
582	EM_LOCK_DESTROY(adapter);
583
584	return(0);
585}
586
587/*********************************************************************
588 *
589 *  Shutdown entry point
590 *
591 **********************************************************************/
592
593static int
594em_shutdown(device_t dev)
595{
596	struct adapter *adapter = device_get_softc(dev);
597	EM_LOCK(adapter);
598	em_stop(adapter);
599	EM_UNLOCK(adapter);
600	return(0);
601}
602
603
604/*********************************************************************
605 *  Transmit entry point
606 *
607 *  em_start is called by the stack to initiate a transmit.
608 *  The driver will remain in this routine as long as there are
609 *  packets to transmit and transmit resources are available.
610 *  In case resources are not available stack is notified and
611 *  the packet is requeued.
612 **********************************************************************/
613
614static void
615em_start_locked(struct ifnet *ifp)
616{
617        struct mbuf    *m_head;
618        struct adapter *adapter = ifp->if_softc;
619
620	mtx_assert(&adapter->mtx, MA_OWNED);
621
622        if (!adapter->link_active)
623                return;
624
625        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
626
627                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
628
629                if (m_head == NULL) break;
630
631		/*
632		 * em_encap() can modify our pointer, and or make it NULL on
633		 * failure.  In that event, we can't requeue.
634		 */
635		if (em_encap(adapter, &m_head)) {
636			if (m_head == NULL)
637				break;
638			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
639			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
640			break;
641                }
642
643		/* Send a copy of the frame to the BPF listener */
644		BPF_MTAP(ifp, m_head);
645
646                /* Set timeout in case hardware has problems transmitting */
647                ifp->if_timer = EM_TX_TIMEOUT;
648
649        }
650        return;
651}
652
653static void
654em_start(struct ifnet *ifp)
655{
656	struct adapter *adapter = ifp->if_softc;
657
658	EM_LOCK(adapter);
659	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
660		em_start_locked(ifp);
661	EM_UNLOCK(adapter);
662	return;
663}
664
665/*********************************************************************
666 *  Ioctl entry point
667 *
668 *  em_ioctl is called when the user wants to configure the
669 *  interface.
670 *
671 *  return 0 on success, positive on failure
672 **********************************************************************/
673
674static int
675em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
676{
677	int             mask, reinit, error = 0;
678	struct ifreq   *ifr = (struct ifreq *) data;
679	struct adapter * adapter = ifp->if_softc;
680
681	if (adapter->in_detach) return(error);
682
683	switch (command) {
684	case SIOCSIFADDR:
685	case SIOCGIFADDR:
686		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
687		ether_ioctl(ifp, command, data);
688		break;
689	case SIOCSIFMTU:
690		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
691#ifndef __NO_STRICT_ALIGNMENT
692		if (ifr->ifr_mtu > ETHERMTU) {
693			/*
694			 * XXX
695			 * Due to the limitation of DMA engine, it needs fix-up
696			 * code for strict alignment architectures. Disable
697			 * jumbo frame until we have better solutions.
698			 */
699			error = EINVAL;
700		} else
701#endif
702		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN || \
703			/* 82573 does not support jumbo frames */
704			(adapter->hw.mac_type == em_82573 && ifr->ifr_mtu > ETHERMTU) ) {
705			error = EINVAL;
706		} else {
707			EM_LOCK(adapter);
708			ifp->if_mtu = ifr->ifr_mtu;
709			adapter->hw.max_frame_size =
710			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
711			em_init_locked(adapter);
712			EM_UNLOCK(adapter);
713		}
714		break;
715	case SIOCSIFFLAGS:
716		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
717		EM_LOCK(adapter);
718		if (ifp->if_flags & IFF_UP) {
719			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
720				em_init_locked(adapter);
721			}
722
723			em_disable_promisc(adapter);
724			em_set_promisc(adapter);
725		} else {
726			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
727				em_stop(adapter);
728			}
729		}
730		EM_UNLOCK(adapter);
731		break;
732	case SIOCADDMULTI:
733	case SIOCDELMULTI:
734		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
735		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
736			EM_LOCK(adapter);
737			em_disable_intr(adapter);
738			em_set_multi(adapter);
739			if (adapter->hw.mac_type == em_82542_rev2_0) {
740				em_initialize_receive_unit(adapter);
741			}
742#ifdef DEVICE_POLLING
743                        if (!(ifp->if_capenable & IFCAP_POLLING))
744#endif
745				em_enable_intr(adapter);
746			EM_UNLOCK(adapter);
747		}
748		break;
749	case SIOCSIFMEDIA:
750	case SIOCGIFMEDIA:
751		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
752		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
753		break;
754	case SIOCSIFCAP:
755		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
756		reinit = 0;
757		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
758#ifdef DEVICE_POLLING
759		if (mask & IFCAP_POLLING) {
760			if (ifr->ifr_reqcap & IFCAP_POLLING) {
761				error = ether_poll_register(em_poll, ifp);
762				if (error)
763					return(error);
764				EM_LOCK(adapter);
765				em_disable_intr(adapter);
766				ifp->if_capenable |= IFCAP_POLLING;
767				EM_UNLOCK(adapter);
768			} else {
769				error = ether_poll_deregister(ifp);
770				/* Enable interrupt even in error case */
771				EM_LOCK(adapter);
772				em_enable_intr(adapter);
773				ifp->if_capenable &= ~IFCAP_POLLING;
774				EM_UNLOCK(adapter);
775			}
776		}
777#endif
778		if (mask & IFCAP_HWCSUM) {
779			ifp->if_capenable ^= IFCAP_HWCSUM;
780			reinit = 1;
781		}
782		if (mask & IFCAP_VLAN_HWTAGGING) {
783			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
784			reinit = 1;
785		}
786		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
787			em_init(adapter);
788		break;
789	default:
790		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
791		error = EINVAL;
792	}
793
794	return(error);
795}
796
797/*********************************************************************
798 *  Watchdog entry point
799 *
800 *  This routine is called whenever hardware quits transmitting.
801 *
802 **********************************************************************/
803
804static void
805em_watchdog(struct ifnet *ifp)
806{
807	struct adapter * adapter;
808	adapter = ifp->if_softc;
809
810	EM_LOCK(adapter);
811	/* If we are in this routine because of pause frames, then
812	 * don't reset the hardware.
813	 */
814	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
815		ifp->if_timer = EM_TX_TIMEOUT;
816		EM_UNLOCK(adapter);
817		return;
818	}
819
820	if (em_check_for_link(&adapter->hw))
821		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
822
823	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
824	adapter->watchdog_events++;
825
826	em_init_locked(adapter);
827	EM_UNLOCK(adapter);
828}
829
830/*********************************************************************
831 *  Init entry point
832 *
833 *  This routine is used in two ways. It is used by the stack as
834 *  init entry point in network interface structure. It is also used
835 *  by the driver as a hw/sw initialization routine to get to a
836 *  consistent state.
837 *
838 *  return 0 on success, positive on failure
839 **********************************************************************/
840
841static void
842em_init_locked(struct adapter * adapter)
843{
844	struct ifnet   *ifp;
845
846	uint32_t	pba;
847	ifp = adapter->ifp;
848
849	INIT_DEBUGOUT("em_init: begin");
850
851	mtx_assert(&adapter->mtx, MA_OWNED);
852
853	em_stop(adapter);
854
855	/* Packet Buffer Allocation (PBA)
856	 * Writing PBA sets the receive portion of the buffer
857	 * the remainder is used for the transmit buffer.
858	 *
859	 * Devices before the 82547 had a Packet Buffer of 64K.
860	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
861	 * After the 82547 the buffer was reduced to 40K.
862	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
863	 *   Note: default does not leave enough room for Jumbo Frame >10k.
864	 */
865	if(adapter->hw.mac_type < em_82547) {
866		/* Total FIFO is 64K */
867		if(adapter->rx_buffer_len > EM_RXBUFFER_8192)
868			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
869		else
870			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
871	} else {
872		/* Total FIFO is 40K */
873		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) {
874			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
875		} else {
876		        pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
877		}
878		adapter->tx_fifo_head = 0;
879		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
880		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
881	}
882	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
883	E1000_WRITE_REG(&adapter->hw, PBA, pba);
884
885	/* Get the latest mac address, User can use a LAA */
886        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
887              ETHER_ADDR_LEN);
888
889	/* Initialize the hardware */
890	if (em_hardware_init(adapter)) {
891		printf("em%d: Unable to initialize the hardware\n",
892		       adapter->unit);
893		return;
894	}
895
896	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
897		em_enable_vlans(adapter);
898
899	/* Prepare transmit descriptors and buffers */
900	if (em_setup_transmit_structures(adapter)) {
901		printf("em%d: Could not setup transmit structures\n",
902		       adapter->unit);
903		em_stop(adapter);
904		return;
905	}
906	em_initialize_transmit_unit(adapter);
907
908	/* Setup Multicast table */
909	em_set_multi(adapter);
910
911	/* Prepare receive descriptors and buffers */
912	if (em_setup_receive_structures(adapter)) {
913		printf("em%d: Could not setup receive structures\n",
914		       adapter->unit);
915		em_stop(adapter);
916		return;
917	}
918	em_initialize_receive_unit(adapter);
919
920	/* Don't loose promiscuous settings */
921	em_set_promisc(adapter);
922
923	ifp->if_drv_flags |= IFF_DRV_RUNNING;
924	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
925
926	if (adapter->hw.mac_type >= em_82543) {
927		if (ifp->if_capenable & IFCAP_TXCSUM)
928			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
929		else
930			ifp->if_hwassist = 0;
931	}
932
933	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
934	em_clear_hw_cntrs(&adapter->hw);
935#ifdef DEVICE_POLLING
936        /*
937         * Only enable interrupts if we are not polling, make sure
938         * they are off otherwise.
939         */
940        if (ifp->if_capenable & IFCAP_POLLING)
941                em_disable_intr(adapter);
942        else
943#endif /* DEVICE_POLLING */
944		em_enable_intr(adapter);
945
946	/* Don't reset the phy next time init gets called */
947	adapter->hw.phy_reset_disable = TRUE;
948
949	return;
950}
951
952static void
953em_init(void *arg)
954{
955	struct adapter * adapter = arg;
956
957	EM_LOCK(adapter);
958	em_init_locked(adapter);
959	EM_UNLOCK(adapter);
960	return;
961}
962
963
964#ifdef DEVICE_POLLING
965static void
966em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
967{
968        struct adapter *adapter = ifp->if_softc;
969        u_int32_t reg_icr;
970
971	mtx_assert(&adapter->mtx, MA_OWNED);
972
973        if (cmd == POLL_AND_CHECK_STATUS) {
974                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
975                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
976			callout_stop(&adapter->timer);
977                        adapter->hw.get_link_status = 1;
978                        em_check_for_link(&adapter->hw);
979                        em_print_link_status(adapter);
980			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
981                }
982        }
983	em_process_receive_interrupts(adapter, count);
984	em_clean_transmit_interrupts(adapter);
985
986        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
987                em_start_locked(ifp);
988}
989
990static void
991em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
992{
993        struct adapter *adapter = ifp->if_softc;
994
995	EM_LOCK(adapter);
996	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
997		em_poll_locked(ifp, cmd, count);
998	EM_UNLOCK(adapter);
999}
1000#endif /* DEVICE_POLLING */
1001
1002/*********************************************************************
1003 *
1004 *  Interrupt Service routine
1005 *
1006 **********************************************************************/
1007static void
1008em_intr(void *arg)
1009{
1010	struct adapter	*adapter = arg;
1011	struct ifnet	*ifp;
1012	uint32_t	reg_icr;
1013	int		wantinit = 0;
1014
1015	EM_LOCK(adapter);
1016
1017	ifp = adapter->ifp;
1018
1019#ifdef DEVICE_POLLING
1020	if (ifp->if_capenable & IFCAP_POLLING) {
1021		EM_UNLOCK(adapter);
1022		return;
1023	}
1024#endif /* DEVICE_POLLING */
1025
1026	for (;;) {
1027		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1028		if (reg_icr == 0)
1029			break;
1030
1031		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1032			em_process_receive_interrupts(adapter, -1);
1033			em_clean_transmit_interrupts(adapter);
1034		}
1035
1036		/* Link status change */
1037		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1038			callout_stop(&adapter->timer);
1039			adapter->hw.get_link_status = 1;
1040			em_check_for_link(&adapter->hw);
1041			em_print_link_status(adapter);
1042			callout_reset(&adapter->timer, hz, em_local_timer,
1043			    adapter);
1044		}
1045
1046		if (reg_icr & E1000_ICR_RXO) {
1047			adapter->rx_overruns++;
1048			wantinit = 1;
1049		}
1050	}
1051#if 0
1052	if (wantinit)
1053		em_init_locked(adapter);
1054#endif
1055	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1056	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1057		em_start_locked(ifp);
1058
1059	EM_UNLOCK(adapter);
1060	return;
1061}
1062
1063
1064
1065/*********************************************************************
1066 *
1067 *  Media Ioctl callback
1068 *
1069 *  This routine is called whenever the user queries the status of
1070 *  the interface using ifconfig.
1071 *
1072 **********************************************************************/
1073static void
1074em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1075{
1076	struct adapter * adapter = ifp->if_softc;
1077
1078	INIT_DEBUGOUT("em_media_status: begin");
1079
1080	em_check_for_link(&adapter->hw);
1081	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1082		if (adapter->link_active == 0) {
1083			em_get_speed_and_duplex(&adapter->hw,
1084						&adapter->link_speed,
1085						&adapter->link_duplex);
1086			adapter->link_active = 1;
1087		}
1088	} else {
1089		if (adapter->link_active == 1) {
1090			adapter->link_speed = 0;
1091			adapter->link_duplex = 0;
1092			adapter->link_active = 0;
1093		}
1094	}
1095
1096	ifmr->ifm_status = IFM_AVALID;
1097	ifmr->ifm_active = IFM_ETHER;
1098
1099	if (!adapter->link_active)
1100		return;
1101
1102	ifmr->ifm_status |= IFM_ACTIVE;
1103
1104	if (adapter->hw.media_type == em_media_type_fiber) {
1105		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1106	} else {
1107		switch (adapter->link_speed) {
1108		case 10:
1109			ifmr->ifm_active |= IFM_10_T;
1110			break;
1111		case 100:
1112			ifmr->ifm_active |= IFM_100_TX;
1113			break;
1114		case 1000:
1115			ifmr->ifm_active |= IFM_1000_T;
1116			break;
1117		}
1118		if (adapter->link_duplex == FULL_DUPLEX)
1119			ifmr->ifm_active |= IFM_FDX;
1120		else
1121			ifmr->ifm_active |= IFM_HDX;
1122	}
1123	return;
1124}
1125
1126/*********************************************************************
1127 *
1128 *  Media Ioctl callback
1129 *
1130 *  This routine is called when the user changes speed/duplex using
1131 *  media/mediopt option with ifconfig.
1132 *
1133 **********************************************************************/
1134static int
1135em_media_change(struct ifnet *ifp)
1136{
1137	struct adapter * adapter = ifp->if_softc;
1138	struct ifmedia  *ifm = &adapter->media;
1139
1140	INIT_DEBUGOUT("em_media_change: begin");
1141
1142	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1143		return(EINVAL);
1144
1145	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1146	case IFM_AUTO:
1147		adapter->hw.autoneg = DO_AUTO_NEG;
1148		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1149		break;
1150	case IFM_1000_SX:
1151	case IFM_1000_T:
1152		adapter->hw.autoneg = DO_AUTO_NEG;
1153		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1154		break;
1155	case IFM_100_TX:
1156		adapter->hw.autoneg = FALSE;
1157		adapter->hw.autoneg_advertised = 0;
1158		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1159			adapter->hw.forced_speed_duplex = em_100_full;
1160		else
1161			adapter->hw.forced_speed_duplex	= em_100_half;
1162		break;
1163	case IFM_10_T:
1164		adapter->hw.autoneg = FALSE;
1165		adapter->hw.autoneg_advertised = 0;
1166		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1167			adapter->hw.forced_speed_duplex = em_10_full;
1168		else
1169			adapter->hw.forced_speed_duplex	= em_10_half;
1170		break;
1171	default:
1172		printf("em%d: Unsupported media type\n", adapter->unit);
1173	}
1174
1175	/* As the speed/duplex settings my have changed we need to
1176	 * reset the PHY.
1177	 */
1178	adapter->hw.phy_reset_disable = FALSE;
1179
1180	em_init(adapter);
1181
1182	return(0);
1183}
1184
1185/*********************************************************************
1186 *
1187 *  This routine maps the mbufs to tx descriptors.
1188 *
1189 *  return 0 on success, positive on failure
1190 **********************************************************************/
1191static int
1192em_encap(struct adapter *adapter, struct mbuf **m_headp)
1193{
1194        u_int32_t       txd_upper;
1195        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1196        int             i, j, error = 0;
1197
1198	struct mbuf	*m_head;
1199
1200	/* For 82544 Workaround */
1201	DESC_ARRAY              desc_array;
1202	u_int32_t               array_elements;
1203	u_int32_t               counter;
1204        struct m_tag    *mtag;
1205	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1206	int			nsegs;
1207        struct em_buffer   *tx_buffer;
1208        struct em_tx_desc *current_tx_desc = NULL;
1209        struct ifnet   *ifp = adapter->ifp;
1210
1211	m_head = *m_headp;
1212
1213        /*
1214         * Force a cleanup if number of TX descriptors
1215         * available hits the threshold
1216         */
1217        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1218                em_clean_transmit_interrupts(adapter);
1219                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1220                        adapter->no_tx_desc_avail1++;
1221                        return(ENOBUFS);
1222                }
1223        }
1224
1225        /*
1226         * Map the packet for DMA.
1227         */
1228	tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1229	error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1230	    segs, &nsegs, BUS_DMA_NOWAIT);
1231        if (error != 0) {
1232                adapter->no_tx_dma_setup++;
1233                return (error);
1234        }
1235        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1236
1237        if (nsegs > adapter->num_tx_desc_avail) {
1238                adapter->no_tx_desc_avail2++;
1239		error = ENOBUFS;
1240		goto encap_fail;
1241        }
1242
1243
1244        if (ifp->if_hwassist > 0) {
1245                em_transmit_checksum_setup(adapter,  m_head,
1246                                           &txd_upper, &txd_lower);
1247        } else
1248                txd_upper = txd_lower = 0;
1249
1250
1251        /* Find out if we are in vlan mode */
1252        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1253
1254	/*
1255	 * When operating in promiscuous mode, hardware encapsulation for
1256	 * packets is disabled.  This means we have to add the vlan
1257	 * encapsulation in the driver, since it will have come down from the
1258	 * VLAN layer with a tag instead of a VLAN header.
1259	 */
1260	if (mtag != NULL && adapter->em_insert_vlan_header) {
1261		struct ether_vlan_header *evl;
1262		struct ether_header eh;
1263
1264		m_head = m_pullup(m_head, sizeof(eh));
1265		if (m_head == NULL) {
1266			*m_headp = NULL;
1267			error = ENOBUFS;
1268			goto encap_fail;
1269		}
1270		eh = *mtod(m_head, struct ether_header *);
1271		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1272		if (m_head == NULL) {
1273			*m_headp = NULL;
1274			error = ENOBUFS;
1275			goto encap_fail;
1276		}
1277		m_head = m_pullup(m_head, sizeof(*evl));
1278		if (m_head == NULL) {
1279			*m_headp = NULL;
1280			error = ENOBUFS;
1281			goto encap_fail;
1282		}
1283		evl = mtod(m_head, struct ether_vlan_header *);
1284		bcopy(&eh, evl, sizeof(*evl));
1285		evl->evl_proto = evl->evl_encap_proto;
1286		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1287		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1288		m_tag_delete(m_head, mtag);
1289		mtag = NULL;
1290		*m_headp = m_head;
1291	}
1292
1293        i = adapter->next_avail_tx_desc;
1294	if (adapter->pcix_82544) {
1295		txd_saved = i;
1296		txd_used = 0;
1297	}
1298        for (j = 0; j < nsegs; j++) {
1299		/* If adapter is 82544 and on PCIX bus */
1300		if(adapter->pcix_82544) {
1301			/*
1302			 * Check the Address and Length combination and
1303			 * split the data accordingly
1304			 */
1305                        array_elements = em_fill_descriptors(segs[j].ds_addr,
1306			    segs[j].ds_len, &desc_array);
1307			for (counter = 0; counter < array_elements; counter++) {
1308				if (txd_used == adapter->num_tx_desc_avail) {
1309					adapter->next_avail_tx_desc = txd_saved;
1310					adapter->no_tx_desc_avail2++;
1311					error = ENOBUFS;
1312					goto encap_fail;
1313                                }
1314                                tx_buffer = &adapter->tx_buffer_area[i];
1315                                current_tx_desc = &adapter->tx_desc_base[i];
1316                                current_tx_desc->buffer_addr = htole64(
1317					desc_array.descriptor[counter].address);
1318                                current_tx_desc->lower.data = htole32(
1319					(adapter->txd_cmd | txd_lower |
1320					 (u_int16_t)desc_array.descriptor[counter].length));
1321                                current_tx_desc->upper.data = htole32((txd_upper));
1322                                if (++i == adapter->num_tx_desc)
1323                                         i = 0;
1324
1325                                tx_buffer->m_head = NULL;
1326                                txd_used++;
1327                        }
1328		} else {
1329			tx_buffer = &adapter->tx_buffer_area[i];
1330			current_tx_desc = &adapter->tx_desc_base[i];
1331
1332			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1333			current_tx_desc->lower.data = htole32(
1334				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1335			current_tx_desc->upper.data = htole32(txd_upper);
1336
1337			if (++i == adapter->num_tx_desc)
1338				i = 0;
1339
1340			tx_buffer->m_head = NULL;
1341		}
1342        }
1343
1344	adapter->next_avail_tx_desc = i;
1345	if (adapter->pcix_82544) {
1346		adapter->num_tx_desc_avail -= txd_used;
1347	}
1348	else {
1349		adapter->num_tx_desc_avail -= nsegs;
1350	}
1351
1352        if (mtag != NULL) {
1353                /* Set the vlan id */
1354                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1355
1356                /* Tell hardware to add tag */
1357                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1358        }
1359
1360        tx_buffer->m_head = m_head;
1361        bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_PREWRITE);
1362
1363        /*
1364         * Last Descriptor of Packet needs End Of Packet (EOP)
1365         */
1366        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1367
1368        /*
1369         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1370         * that this frame is available to transmit.
1371         */
1372        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1373            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1374        if (adapter->hw.mac_type == em_82547 &&
1375            adapter->link_duplex == HALF_DUPLEX) {
1376                em_82547_move_tail_locked(adapter);
1377        } else {
1378                E1000_WRITE_REG(&adapter->hw, TDT, i);
1379                if (adapter->hw.mac_type == em_82547) {
1380                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1381                }
1382        }
1383
1384        return(0);
1385
1386encap_fail:
1387	bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1388	return (error);
1389}
1390
1391/*********************************************************************
1392 *
1393 * 82547 workaround to avoid controller hang in half-duplex environment.
1394 * The workaround is to avoid queuing a large packet that would span
1395 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1396 * in this case. We do that only when FIFO is quiescent.
1397 *
1398 **********************************************************************/
1399static void
1400em_82547_move_tail_locked(struct adapter *adapter)
1401{
1402	uint16_t hw_tdt;
1403	uint16_t sw_tdt;
1404	struct em_tx_desc *tx_desc;
1405	uint16_t length = 0;
1406	boolean_t eop = 0;
1407
1408	EM_LOCK_ASSERT(adapter);
1409
1410	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1411	sw_tdt = adapter->next_avail_tx_desc;
1412
1413	while (hw_tdt != sw_tdt) {
1414		tx_desc = &adapter->tx_desc_base[hw_tdt];
1415		length += tx_desc->lower.flags.length;
1416		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1417		if(++hw_tdt == adapter->num_tx_desc)
1418			hw_tdt = 0;
1419
1420		if(eop) {
1421			if (em_82547_fifo_workaround(adapter, length)) {
1422				adapter->tx_fifo_wrk_cnt++;
1423				callout_reset(&adapter->tx_fifo_timer, 1,
1424					em_82547_move_tail, adapter);
1425				break;
1426			}
1427			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1428			em_82547_update_fifo_head(adapter, length);
1429			length = 0;
1430		}
1431	}
1432	return;
1433}
1434
1435static void
1436em_82547_move_tail(void *arg)
1437{
1438        struct adapter *adapter = arg;
1439
1440        EM_LOCK(adapter);
1441        em_82547_move_tail_locked(adapter);
1442        EM_UNLOCK(adapter);
1443}
1444
1445static int
1446em_82547_fifo_workaround(struct adapter *adapter, int len)
1447{
1448	int fifo_space, fifo_pkt_len;
1449
1450	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1451
1452	if (adapter->link_duplex == HALF_DUPLEX) {
1453		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1454
1455		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1456			if (em_82547_tx_fifo_reset(adapter)) {
1457				return(0);
1458			}
1459			else {
1460				return(1);
1461			}
1462		}
1463	}
1464
1465	return(0);
1466}
1467
1468static void
1469em_82547_update_fifo_head(struct adapter *adapter, int len)
1470{
1471	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1472
1473	/* tx_fifo_head is always 16 byte aligned */
1474	adapter->tx_fifo_head += fifo_pkt_len;
1475	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1476		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1477	}
1478
1479	return;
1480}
1481
1482
1483static int
1484em_82547_tx_fifo_reset(struct adapter *adapter)
1485{
1486	uint32_t tctl;
1487
1488	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1489	      E1000_READ_REG(&adapter->hw, TDH)) &&
1490	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1491	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1492	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1493	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1494	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1495
1496		/* Disable TX unit */
1497		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1498		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1499
1500		/* Reset FIFO pointers */
1501		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1502		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1503		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1504		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1505
1506		/* Re-enable TX unit */
1507		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1508		E1000_WRITE_FLUSH(&adapter->hw);
1509
1510		adapter->tx_fifo_head = 0;
1511		adapter->tx_fifo_reset_cnt++;
1512
1513		return(TRUE);
1514	}
1515	else {
1516		return(FALSE);
1517	}
1518}
1519
1520static void
1521em_set_promisc(struct adapter * adapter)
1522{
1523
1524	u_int32_t       reg_rctl;
1525	struct ifnet   *ifp = adapter->ifp;
1526
1527	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1528
1529	if (ifp->if_flags & IFF_PROMISC) {
1530		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1531		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1532		/* Disable VLAN stripping in promiscous mode
1533		 * This enables bridging of vlan tagged frames to occur
1534		 * and also allows vlan tags to be seen in tcpdump
1535		 */
1536		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1537			em_disable_vlans(adapter);
1538		adapter->em_insert_vlan_header = 1;
1539	} else if (ifp->if_flags & IFF_ALLMULTI) {
1540		reg_rctl |= E1000_RCTL_MPE;
1541		reg_rctl &= ~E1000_RCTL_UPE;
1542		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1543		adapter->em_insert_vlan_header = 0;
1544	} else
1545		adapter->em_insert_vlan_header = 0;
1546
1547	return;
1548}
1549
1550static void
1551em_disable_promisc(struct adapter * adapter)
1552{
1553	u_int32_t       reg_rctl;
1554	struct ifnet   *ifp = adapter->ifp;
1555
1556	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1557
1558	reg_rctl &=  (~E1000_RCTL_UPE);
1559	reg_rctl &=  (~E1000_RCTL_MPE);
1560	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1561
1562	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1563		em_enable_vlans(adapter);
1564	adapter->em_insert_vlan_header = 0;
1565
1566	return;
1567}
1568
1569
1570/*********************************************************************
1571 *  Multicast Update
1572 *
1573 *  This routine is called whenever multicast address list is updated.
1574 *
1575 **********************************************************************/
1576
1577static void
1578em_set_multi(struct adapter * adapter)
1579{
1580        u_int32_t reg_rctl = 0;
1581        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1582        struct ifmultiaddr  *ifma;
1583        int mcnt = 0;
1584        struct ifnet   *ifp = adapter->ifp;
1585
1586        IOCTL_DEBUGOUT("em_set_multi: begin");
1587
1588        if (adapter->hw.mac_type == em_82542_rev2_0) {
1589                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1590                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1591                        em_pci_clear_mwi(&adapter->hw);
1592                }
1593                reg_rctl |= E1000_RCTL_RST;
1594                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1595                msec_delay(5);
1596        }
1597
1598	IF_ADDR_LOCK(ifp);
1599        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1600                if (ifma->ifma_addr->sa_family != AF_LINK)
1601                        continue;
1602
1603		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1604
1605                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1606                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1607                mcnt++;
1608        }
1609	IF_ADDR_UNLOCK(ifp);
1610
1611        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1612                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1613                reg_rctl |= E1000_RCTL_MPE;
1614                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1615        } else
1616                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1617
1618        if (adapter->hw.mac_type == em_82542_rev2_0) {
1619                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1620                reg_rctl &= ~E1000_RCTL_RST;
1621                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1622                msec_delay(5);
1623                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1624                        em_pci_set_mwi(&adapter->hw);
1625                }
1626        }
1627
1628        return;
1629}
1630
1631
1632/*********************************************************************
1633 *  Timer routine
1634 *
1635 *  This routine checks for link status and updates statistics.
1636 *
1637 **********************************************************************/
1638
1639static void
1640em_local_timer(void *arg)
1641{
1642	struct ifnet   *ifp;
1643	struct adapter * adapter = arg;
1644	ifp = adapter->ifp;
1645
1646	EM_LOCK(adapter);
1647
1648	em_check_for_link(&adapter->hw);
1649	em_print_link_status(adapter);
1650	em_update_stats_counters(adapter);
1651	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1652		em_print_hw_stats(adapter);
1653	}
1654	em_smartspeed(adapter);
1655
1656	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1657
1658	EM_UNLOCK(adapter);
1659	return;
1660}
1661
1662static void
1663em_print_link_status(struct adapter * adapter)
1664{
1665	struct ifnet *ifp = adapter->ifp;
1666
1667	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1668		if (adapter->link_active == 0) {
1669			em_get_speed_and_duplex(&adapter->hw,
1670						&adapter->link_speed,
1671						&adapter->link_duplex);
1672			if (bootverbose)
1673				printf("em%d: Link is up %d Mbps %s\n",
1674				       adapter->unit,
1675				       adapter->link_speed,
1676				       ((adapter->link_duplex == FULL_DUPLEX) ?
1677					"Full Duplex" : "Half Duplex"));
1678			adapter->link_active = 1;
1679			adapter->smartspeed = 0;
1680			if_link_state_change(ifp, LINK_STATE_UP);
1681		}
1682	} else {
1683		if (adapter->link_active == 1) {
1684			adapter->link_speed = 0;
1685			adapter->link_duplex = 0;
1686			if (bootverbose)
1687				printf("em%d: Link is Down\n", adapter->unit);
1688			adapter->link_active = 0;
1689			if_link_state_change(ifp, LINK_STATE_DOWN);
1690		}
1691	}
1692
1693	return;
1694}
1695
1696/*********************************************************************
1697 *
1698 *  This routine disables all traffic on the adapter by issuing a
1699 *  global reset on the MAC and deallocates TX/RX buffers.
1700 *
1701 **********************************************************************/
1702
1703static void
1704em_stop(void *arg)
1705{
1706	struct ifnet   *ifp;
1707	struct adapter * adapter = arg;
1708	ifp = adapter->ifp;
1709
1710	mtx_assert(&adapter->mtx, MA_OWNED);
1711
1712	INIT_DEBUGOUT("em_stop: begin");
1713
1714	em_disable_intr(adapter);
1715	em_reset_hw(&adapter->hw);
1716	callout_stop(&adapter->timer);
1717	callout_stop(&adapter->tx_fifo_timer);
1718	em_free_transmit_structures(adapter);
1719	em_free_receive_structures(adapter);
1720
1721
1722	/* Tell the stack that the interface is no longer active */
1723	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1724
1725	return;
1726}
1727
1728
1729/*********************************************************************
1730 *
1731 *  Determine hardware revision.
1732 *
1733 **********************************************************************/
1734static void
1735em_identify_hardware(struct adapter * adapter)
1736{
1737	device_t dev = adapter->dev;
1738
1739	/* Make sure our PCI config space has the necessary stuff set */
1740	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1741	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1742	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1743		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1744		       adapter->unit);
1745		adapter->hw.pci_cmd_word |=
1746		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1747		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1748	}
1749
1750	/* Save off the information about this board */
1751	adapter->hw.vendor_id = pci_get_vendor(dev);
1752	adapter->hw.device_id = pci_get_device(dev);
1753	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1754	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1755	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1756
1757	/* Identify the MAC */
1758        if (em_set_mac_type(&adapter->hw))
1759                printf("em%d: Unknown MAC Type\n", adapter->unit);
1760
1761	if(adapter->hw.mac_type == em_82541 ||
1762	   adapter->hw.mac_type == em_82541_rev_2 ||
1763	   adapter->hw.mac_type == em_82547 ||
1764	   adapter->hw.mac_type == em_82547_rev_2)
1765		adapter->hw.phy_init_script = TRUE;
1766
1767        return;
1768}
1769
1770static int
1771em_allocate_pci_resources(struct adapter * adapter)
1772{
1773	int             val, rid;
1774	device_t        dev = adapter->dev;
1775
1776	rid = PCIR_BAR(0);
1777	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1778						     &rid, RF_ACTIVE);
1779	if (!(adapter->res_memory)) {
1780		printf("em%d: Unable to allocate bus resource: memory\n",
1781		       adapter->unit);
1782		return(ENXIO);
1783	}
1784	adapter->osdep.mem_bus_space_tag =
1785	rman_get_bustag(adapter->res_memory);
1786	adapter->osdep.mem_bus_space_handle =
1787	rman_get_bushandle(adapter->res_memory);
1788	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1789
1790
1791	if (adapter->hw.mac_type > em_82543) {
1792		/* Figure our where our IO BAR is ? */
1793		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1794			val = pci_read_config(dev, rid, 4);
1795			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1796				adapter->io_rid = rid;
1797				break;
1798			}
1799			rid += 4;
1800			/* check for 64bit BAR */
1801			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1802				rid += 4;
1803		}
1804		if (rid >= PCIR_CIS) {
1805			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1806			return (ENXIO);
1807		}
1808		adapter->res_ioport = bus_alloc_resource_any(dev,
1809							     SYS_RES_IOPORT,
1810							     &adapter->io_rid,
1811							     RF_ACTIVE);
1812		if (!(adapter->res_ioport)) {
1813			printf("em%d: Unable to allocate bus resource: ioport\n",
1814			       adapter->unit);
1815			return(ENXIO);
1816		}
1817		adapter->hw.io_base = 0;
1818		adapter->osdep.io_bus_space_tag =
1819		    rman_get_bustag(adapter->res_ioport);
1820		adapter->osdep.io_bus_space_handle =
1821		    rman_get_bushandle(adapter->res_ioport);
1822	}
1823
1824	rid = 0x0;
1825	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1826						        RF_SHAREABLE |
1827							RF_ACTIVE);
1828	if (!(adapter->res_interrupt)) {
1829		printf("em%d: Unable to allocate bus resource: interrupt\n",
1830		       adapter->unit);
1831		return(ENXIO);
1832	}
1833	if (bus_setup_intr(dev, adapter->res_interrupt,
1834			   INTR_TYPE_NET | INTR_MPSAFE,
1835			   (void (*)(void *)) em_intr, adapter,
1836			   &adapter->int_handler_tag)) {
1837		printf("em%d: Error registering interrupt handler!\n",
1838		       adapter->unit);
1839		return(ENXIO);
1840	}
1841
1842	adapter->hw.back = &adapter->osdep;
1843
1844	return(0);
1845}
1846
1847static void
1848em_free_pci_resources(struct adapter * adapter)
1849{
1850	device_t dev = adapter->dev;
1851
1852	if (adapter->res_interrupt != NULL) {
1853		bus_teardown_intr(dev, adapter->res_interrupt,
1854				  adapter->int_handler_tag);
1855		bus_release_resource(dev, SYS_RES_IRQ, 0,
1856				     adapter->res_interrupt);
1857	}
1858	if (adapter->res_memory != NULL) {
1859		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1860				     adapter->res_memory);
1861	}
1862
1863	if (adapter->res_ioport != NULL) {
1864		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1865				     adapter->res_ioport);
1866	}
1867	return;
1868}
1869
1870/*********************************************************************
1871 *
1872 *  Initialize the hardware to a configuration as specified by the
1873 *  adapter structure. The controller is reset, the EEPROM is
1874 *  verified, the MAC address is set, then the shared initialization
1875 *  routines are called.
1876 *
1877 **********************************************************************/
1878static int
1879em_hardware_init(struct adapter * adapter)
1880{
1881        INIT_DEBUGOUT("em_hardware_init: begin");
1882	/* Issue a global reset */
1883	em_reset_hw(&adapter->hw);
1884
1885	/* When hardware is reset, fifo_head is also reset */
1886	adapter->tx_fifo_head = 0;
1887
1888	/* Make sure we have a good EEPROM before we read from it */
1889	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1890		printf("em%d: The EEPROM Checksum Is Not Valid\n",
1891		       adapter->unit);
1892		return(EIO);
1893	}
1894
1895	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1896		printf("em%d: EEPROM read error while reading part number\n",
1897		       adapter->unit);
1898		return(EIO);
1899	}
1900
1901	if (em_init_hw(&adapter->hw) < 0) {
1902		printf("em%d: Hardware Initialization Failed",
1903		       adapter->unit);
1904		return(EIO);
1905	}
1906
1907	em_check_for_link(&adapter->hw);
1908	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1909		adapter->link_active = 1;
1910	else
1911		adapter->link_active = 0;
1912
1913	if (adapter->link_active) {
1914		em_get_speed_and_duplex(&adapter->hw,
1915					&adapter->link_speed,
1916					&adapter->link_duplex);
1917	} else {
1918		adapter->link_speed = 0;
1919		adapter->link_duplex = 0;
1920	}
1921
1922	return(0);
1923}
1924
1925/*********************************************************************
1926 *
1927 *  Setup networking device structure and register an interface.
1928 *
1929 **********************************************************************/
1930static void
1931em_setup_interface(device_t dev, struct adapter * adapter)
1932{
1933	struct ifnet   *ifp;
1934	INIT_DEBUGOUT("em_setup_interface: begin");
1935
1936	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1937	if (ifp == NULL)
1938		panic("%s: can not if_alloc()", device_get_nameunit(dev));
1939	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1940	ifp->if_mtu = ETHERMTU;
1941	ifp->if_baudrate = 1000000000;
1942	ifp->if_init =  em_init;
1943	ifp->if_softc = adapter;
1944	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1945	ifp->if_ioctl = em_ioctl;
1946	ifp->if_start = em_start;
1947	ifp->if_watchdog = em_watchdog;
1948	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
1949	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
1950	IFQ_SET_READY(&ifp->if_snd);
1951
1952        ether_ifattach(ifp, adapter->hw.mac_addr);
1953
1954	ifp->if_capabilities = ifp->if_capenable = 0;
1955
1956	if (adapter->hw.mac_type >= em_82543) {
1957		ifp->if_capabilities |= IFCAP_HWCSUM;
1958		ifp->if_capenable |= IFCAP_HWCSUM;
1959	}
1960
1961	/*
1962	 * Tell the upper layer(s) we support long frames.
1963	 */
1964	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1965	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1966	ifp->if_capenable |= IFCAP_VLAN_MTU;
1967
1968#ifdef DEVICE_POLLING
1969	ifp->if_capabilities |= IFCAP_POLLING;
1970#endif
1971
1972	/*
1973	 * Specify the media types supported by this adapter and register
1974	 * callbacks to update media and link information
1975	 */
1976	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1977		     em_media_status);
1978	if (adapter->hw.media_type == em_media_type_fiber) {
1979		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1980			    0, NULL);
1981		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1982			    0, NULL);
1983	} else {
1984		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1985		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1986			    0, NULL);
1987		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1988			    0, NULL);
1989		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1990			    0, NULL);
1991		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1992			    0, NULL);
1993		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1994	}
1995	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1996	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1997
1998	return;
1999}
2000
2001
2002/*********************************************************************
2003 *
2004 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2005 *
2006 **********************************************************************/
2007static void
2008em_smartspeed(struct adapter *adapter)
2009{
2010        uint16_t phy_tmp;
2011
2012	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2013	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2014		return;
2015
2016        if(adapter->smartspeed == 0) {
2017                /* If Master/Slave config fault is asserted twice,
2018                 * we assume back-to-back */
2019                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2020                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2021                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2022                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2023                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2024					&phy_tmp);
2025                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2026                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2027                                em_write_phy_reg(&adapter->hw,
2028                                                    PHY_1000T_CTRL, phy_tmp);
2029                                adapter->smartspeed++;
2030                                if(adapter->hw.autoneg &&
2031                                   !em_phy_setup_autoneg(&adapter->hw) &&
2032				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2033                                                       &phy_tmp)) {
2034                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2035                                                    MII_CR_RESTART_AUTO_NEG);
2036                                        em_write_phy_reg(&adapter->hw,
2037							 PHY_CTRL, phy_tmp);
2038                                }
2039                        }
2040                }
2041                return;
2042        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2043                /* If still no link, perhaps using 2/3 pair cable */
2044                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2045                phy_tmp |= CR_1000T_MS_ENABLE;
2046                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2047                if(adapter->hw.autoneg &&
2048                   !em_phy_setup_autoneg(&adapter->hw) &&
2049                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2050                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2051                                    MII_CR_RESTART_AUTO_NEG);
2052                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2053                }
2054        }
2055        /* Restart process after EM_SMARTSPEED_MAX iterations */
2056        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2057                adapter->smartspeed = 0;
2058
2059	return;
2060}
2061
2062
2063/*
2064 * Manage DMA'able memory.
2065 */
2066static void
2067em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2068{
2069        if (error)
2070                return;
2071        *(bus_addr_t *) arg = segs[0].ds_addr;
2072}
2073
2074static int
2075em_dma_malloc(struct adapter *adapter, bus_size_t size,
2076        struct em_dma_alloc *dma, int mapflags)
2077{
2078        int r;
2079
2080	r = bus_dma_tag_create(NULL,                    /* parent */
2081                               E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2082                               BUS_SPACE_MAXADDR,       /* lowaddr */
2083                               BUS_SPACE_MAXADDR,       /* highaddr */
2084                               NULL, NULL,              /* filter, filterarg */
2085                               size,                    /* maxsize */
2086                               1,                       /* nsegments */
2087                               size,                    /* maxsegsize */
2088                               BUS_DMA_ALLOCNOW,        /* flags */
2089			       NULL,			/* lockfunc */
2090			       NULL,			/* lockarg */
2091                               &dma->dma_tag);
2092        if (r != 0) {
2093                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2094                        "error %u\n", adapter->unit, r);
2095                goto fail_0;
2096        }
2097
2098        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2099                             BUS_DMA_NOWAIT, &dma->dma_map);
2100        if (r != 0) {
2101                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2102                        "size %ju, error %d\n", adapter->unit,
2103			(uintmax_t)size, r);
2104                goto fail_2;
2105        }
2106
2107	dma->dma_paddr = 0;
2108        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2109                            size,
2110                            em_dmamap_cb,
2111                            &dma->dma_paddr,
2112                            mapflags | BUS_DMA_NOWAIT);
2113        if (r != 0 || dma->dma_paddr == 0) {
2114                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2115                        "error %u\n", adapter->unit, r);
2116                goto fail_3;
2117        }
2118
2119        return (0);
2120
2121fail_3:
2122        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2123fail_2:
2124        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2125        bus_dma_tag_destroy(dma->dma_tag);
2126fail_0:
2127        dma->dma_map = NULL;
2128        dma->dma_tag = NULL;
2129        return (r);
2130}
2131
2132static void
2133em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2134{
2135	if (dma->dma_tag == NULL)
2136		return;
2137	if (dma->dma_map != NULL) {
2138		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2139		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2140		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2141		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2142		dma->dma_map = NULL;
2143	}
2144        bus_dma_tag_destroy(dma->dma_tag);
2145	dma->dma_tag = NULL;
2146}
2147
2148
2149/*********************************************************************
2150 *
2151 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2152 *  the information needed to transmit a packet on the wire.
2153 *
2154 **********************************************************************/
2155static int
2156em_allocate_transmit_structures(struct adapter * adapter)
2157{
2158	if (!(adapter->tx_buffer_area =
2159	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2160					     adapter->num_tx_desc, M_DEVBUF,
2161					     M_NOWAIT))) {
2162		printf("em%d: Unable to allocate tx_buffer memory\n",
2163		       adapter->unit);
2164		return ENOMEM;
2165	}
2166
2167	bzero(adapter->tx_buffer_area,
2168	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2169
2170	return 0;
2171}
2172
2173/*********************************************************************
2174 *
2175 *  Allocate and initialize transmit structures.
2176 *
2177 **********************************************************************/
2178static int
2179em_setup_transmit_structures(struct adapter * adapter)
2180{
2181	struct em_buffer *tx_buffer;
2182	bus_size_t size;
2183	int error, i;
2184
2185        /*
2186         * Setup DMA descriptor areas.
2187         */
2188	size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2189	if ((error = bus_dma_tag_create(NULL,           /* parent */
2190                               1, 0,                    /* alignment, bounds */
2191                               BUS_SPACE_MAXADDR,       /* lowaddr */
2192                               BUS_SPACE_MAXADDR,       /* highaddr */
2193                               NULL, NULL,              /* filter, filterarg */
2194                               size,                    /* maxsize */
2195                               EM_MAX_SCATTER,          /* nsegments */
2196                               size,                    /* maxsegsize */
2197                               0,                       /* flags */
2198			       NULL,			/* lockfunc */
2199			       NULL,			/* lockarg */
2200                               &adapter->txtag)) != 0) {
2201		printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2202		goto fail;
2203        }
2204
2205        if ((error = em_allocate_transmit_structures(adapter)) != 0)
2206		goto fail;
2207
2208        bzero((void *) adapter->tx_desc_base,
2209              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2210	tx_buffer = adapter->tx_buffer_area;
2211	for (i = 0; i < adapter->num_tx_desc; i++) {
2212		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2213		if (error != 0) {
2214			printf("em%d: Unable to create TX DMA map\n",
2215			    adapter->unit);
2216			goto fail;
2217		}
2218		tx_buffer++;
2219	}
2220
2221        adapter->next_avail_tx_desc = 0;
2222        adapter->oldest_used_tx_desc = 0;
2223
2224        /* Set number of descriptors available */
2225        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2226
2227        /* Set checksum context */
2228        adapter->active_checksum_context = OFFLOAD_NONE;
2229	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2230	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2231
2232        return (0);
2233
2234fail:
2235	em_free_transmit_structures(adapter);
2236	return (error);
2237}
2238
2239/*********************************************************************
2240 *
2241 *  Enable transmit unit.
2242 *
2243 **********************************************************************/
2244static void
2245em_initialize_transmit_unit(struct adapter * adapter)
2246{
2247	u_int32_t       reg_tctl;
2248	u_int32_t       reg_tipg = 0;
2249	u_int64_t	bus_addr;
2250
2251         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2252	/* Setup the Base and Length of the Tx Descriptor Ring */
2253	bus_addr = adapter->txdma.dma_paddr;
2254	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2255	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2256	E1000_WRITE_REG(&adapter->hw, TDLEN,
2257			adapter->num_tx_desc *
2258			sizeof(struct em_tx_desc));
2259
2260	/* Setup the HW Tx Head and Tail descriptor pointers */
2261	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2262	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2263
2264
2265	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2266		     E1000_READ_REG(&adapter->hw, TDBAL),
2267		     E1000_READ_REG(&adapter->hw, TDLEN));
2268
2269	/* Set the default values for the Tx Inter Packet Gap timer */
2270	switch (adapter->hw.mac_type) {
2271	case em_82542_rev2_0:
2272        case em_82542_rev2_1:
2273                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2274                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2275                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2276                break;
2277        default:
2278                if (adapter->hw.media_type == em_media_type_fiber)
2279                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2280                else
2281                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2282                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2283                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2284        }
2285
2286	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2287	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2288	if(adapter->hw.mac_type >= em_82540)
2289		E1000_WRITE_REG(&adapter->hw, TADV,
2290		    adapter->tx_abs_int_delay.value);
2291
2292	/* Program the Transmit Control Register */
2293	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2294		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2295	if (adapter->hw.mac_type >= em_82573)
2296		reg_tctl |= E1000_TCTL_MULR;
2297	if (adapter->link_duplex == 1) {
2298		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2299	} else {
2300		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2301	}
2302	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2303
2304	/* Setup Transmit Descriptor Settings for this adapter */
2305	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2306
2307	if (adapter->tx_int_delay.value > 0)
2308		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2309
2310	return;
2311}
2312
2313/*********************************************************************
2314 *
2315 *  Free all transmit related data structures.
2316 *
2317 **********************************************************************/
2318static void
2319em_free_transmit_structures(struct adapter * adapter)
2320{
2321        struct em_buffer   *tx_buffer;
2322        int             i;
2323
2324        INIT_DEBUGOUT("free_transmit_structures: begin");
2325
2326        if (adapter->tx_buffer_area != NULL) {
2327                tx_buffer = adapter->tx_buffer_area;
2328                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2329                        if (tx_buffer->m_head != NULL) {
2330				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2331				    BUS_DMASYNC_POSTWRITE);
2332				bus_dmamap_unload(adapter->txtag,
2333				    tx_buffer->map);
2334                                m_freem(tx_buffer->m_head);
2335				tx_buffer->m_head = NULL;
2336                        } else if (tx_buffer->map != NULL)
2337				bus_dmamap_unload(adapter->txtag,
2338				    tx_buffer->map);
2339			if (tx_buffer->map != NULL) {
2340				bus_dmamap_destroy(adapter->txtag,
2341				    tx_buffer->map);
2342				tx_buffer->map = NULL;
2343			}
2344                }
2345        }
2346        if (adapter->tx_buffer_area != NULL) {
2347                free(adapter->tx_buffer_area, M_DEVBUF);
2348                adapter->tx_buffer_area = NULL;
2349        }
2350        if (adapter->txtag != NULL) {
2351                bus_dma_tag_destroy(adapter->txtag);
2352                adapter->txtag = NULL;
2353        }
2354        return;
2355}
2356
2357/*********************************************************************
2358 *
2359 *  The offload context needs to be set when we transfer the first
2360 *  packet of a particular protocol (TCP/UDP). We change the
2361 *  context only if the protocol type changes.
2362 *
2363 **********************************************************************/
2364static void
2365em_transmit_checksum_setup(struct adapter * adapter,
2366			   struct mbuf *mp,
2367			   u_int32_t *txd_upper,
2368			   u_int32_t *txd_lower)
2369{
2370	struct em_context_desc *TXD;
2371	struct em_buffer *tx_buffer;
2372	int curr_txd;
2373
2374	if (mp->m_pkthdr.csum_flags) {
2375
2376		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2377			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2378			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2379			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2380				return;
2381			else
2382				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2383
2384		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2385			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2386			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2387			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2388				return;
2389			else
2390				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2391		} else {
2392			*txd_upper = 0;
2393			*txd_lower = 0;
2394			return;
2395		}
2396	} else {
2397		*txd_upper = 0;
2398		*txd_lower = 0;
2399		return;
2400	}
2401
2402	/* If we reach this point, the checksum offload context
2403	 * needs to be reset.
2404	 */
2405	curr_txd = adapter->next_avail_tx_desc;
2406	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2407	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2408
2409	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2410	TXD->lower_setup.ip_fields.ipcso =
2411		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2412	TXD->lower_setup.ip_fields.ipcse =
2413		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2414
2415	TXD->upper_setup.tcp_fields.tucss =
2416		ETHER_HDR_LEN + sizeof(struct ip);
2417	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2418
2419	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2420		TXD->upper_setup.tcp_fields.tucso =
2421			ETHER_HDR_LEN + sizeof(struct ip) +
2422			offsetof(struct tcphdr, th_sum);
2423	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2424		TXD->upper_setup.tcp_fields.tucso =
2425			ETHER_HDR_LEN + sizeof(struct ip) +
2426			offsetof(struct udphdr, uh_sum);
2427	}
2428
2429	TXD->tcp_seg_setup.data = htole32(0);
2430	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2431
2432	tx_buffer->m_head = NULL;
2433
2434	if (++curr_txd == adapter->num_tx_desc)
2435		curr_txd = 0;
2436
2437	adapter->num_tx_desc_avail--;
2438	adapter->next_avail_tx_desc = curr_txd;
2439
2440	return;
2441}
2442
2443/**********************************************************************
2444 *
2445 *  Examine each tx_buffer in the used queue. If the hardware is done
2446 *  processing the packet then free associated resources. The
2447 *  tx_buffer is put back on the free queue.
2448 *
2449 **********************************************************************/
2450static void
2451em_clean_transmit_interrupts(struct adapter * adapter)
2452{
2453        int i, num_avail;
2454        struct em_buffer *tx_buffer;
2455        struct em_tx_desc   *tx_desc;
2456	struct ifnet   *ifp = adapter->ifp;
2457
2458	mtx_assert(&adapter->mtx, MA_OWNED);
2459
2460        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2461                return;
2462
2463        num_avail = adapter->num_tx_desc_avail;
2464        i = adapter->oldest_used_tx_desc;
2465
2466        tx_buffer = &adapter->tx_buffer_area[i];
2467        tx_desc = &adapter->tx_desc_base[i];
2468
2469        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2470            BUS_DMASYNC_POSTREAD);
2471        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2472
2473                tx_desc->upper.data = 0;
2474                num_avail++;
2475
2476                if (tx_buffer->m_head) {
2477			ifp->if_opackets++;
2478			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2479			    BUS_DMASYNC_POSTWRITE);
2480			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2481
2482                        m_freem(tx_buffer->m_head);
2483                        tx_buffer->m_head = NULL;
2484                }
2485
2486                if (++i == adapter->num_tx_desc)
2487                        i = 0;
2488
2489                tx_buffer = &adapter->tx_buffer_area[i];
2490                tx_desc = &adapter->tx_desc_base[i];
2491        }
2492        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2493            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2494
2495        adapter->oldest_used_tx_desc = i;
2496
2497        /*
2498         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2499         * that it is OK to send packets.
2500         * If there are no pending descriptors, clear the timeout. Otherwise,
2501         * if some descriptors have been freed, restart the timeout.
2502         */
2503        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2504                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2505                if (num_avail == adapter->num_tx_desc)
2506                        ifp->if_timer = 0;
2507                else if (num_avail == adapter->num_tx_desc_avail)
2508                        ifp->if_timer = EM_TX_TIMEOUT;
2509        }
2510        adapter->num_tx_desc_avail = num_avail;
2511        return;
2512}
2513
2514/*********************************************************************
2515 *
2516 *  Get a buffer from system mbuf buffer pool.
2517 *
2518 **********************************************************************/
2519static int
2520em_get_buf(int i, struct adapter *adapter,
2521           struct mbuf *nmp)
2522{
2523        struct mbuf    *mp = nmp;
2524        struct em_buffer *rx_buffer;
2525        struct ifnet   *ifp;
2526	bus_dma_segment_t segs[1];
2527	int error, nsegs;
2528
2529        ifp = adapter->ifp;
2530
2531        if (mp == NULL) {
2532                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2533                if (mp == NULL) {
2534                        adapter->mbuf_cluster_failed++;
2535                        return(ENOBUFS);
2536                }
2537                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2538        } else {
2539                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2540                mp->m_data = mp->m_ext.ext_buf;
2541                mp->m_next = NULL;
2542        }
2543
2544        if (ifp->if_mtu <= ETHERMTU) {
2545                m_adj(mp, ETHER_ALIGN);
2546        }
2547
2548        rx_buffer = &adapter->rx_buffer_area[i];
2549
2550        /*
2551         * Using memory from the mbuf cluster pool, invoke the
2552         * bus_dma machinery to arrange the memory mapping.
2553         */
2554        error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2555	    mp, segs, &nsegs, 0);
2556        if (error != 0) {
2557                m_free(mp);
2558                return(error);
2559        }
2560	/* If nsegs is wrong then the stack is corrupt */
2561	KASSERT(nsegs == 1, ("Too many segments returned!"));
2562        rx_buffer->m_head = mp;
2563        adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2564        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2565
2566        return(0);
2567}
2568
2569/*********************************************************************
2570 *
2571 *  Allocate memory for rx_buffer structures. Since we use one
2572 *  rx_buffer per received packet, the maximum number of rx_buffer's
2573 *  that we'll need is equal to the number of receive descriptors
2574 *  that we've allocated.
2575 *
2576 **********************************************************************/
2577static int
2578em_allocate_receive_structures(struct adapter * adapter)
2579{
2580        int             i, error;
2581        struct em_buffer *rx_buffer;
2582
2583        if (!(adapter->rx_buffer_area =
2584              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2585                                          adapter->num_rx_desc, M_DEVBUF,
2586                                          M_NOWAIT))) {
2587                printf("em%d: Unable to allocate rx_buffer memory\n",
2588                       adapter->unit);
2589                return(ENOMEM);
2590        }
2591
2592        bzero(adapter->rx_buffer_area,
2593              sizeof(struct em_buffer) * adapter->num_rx_desc);
2594
2595        error = bus_dma_tag_create(NULL,                /* parent */
2596                               1, 0,                    /* alignment, bounds */
2597                               BUS_SPACE_MAXADDR,       /* lowaddr */
2598                               BUS_SPACE_MAXADDR,       /* highaddr */
2599                               NULL, NULL,              /* filter, filterarg */
2600                               MCLBYTES,                /* maxsize */
2601                               1,                       /* nsegments */
2602                               MCLBYTES,                /* maxsegsize */
2603                               BUS_DMA_ALLOCNOW,        /* flags */
2604			       NULL,			/* lockfunc */
2605			       NULL,			/* lockarg */
2606                               &adapter->rxtag);
2607        if (error != 0) {
2608                printf("em%d: em_allocate_receive_structures: "
2609                        "bus_dma_tag_create failed; error %u\n",
2610                       adapter->unit, error);
2611                goto fail;
2612        }
2613
2614        rx_buffer = adapter->rx_buffer_area;
2615        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2616                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2617                                          &rx_buffer->map);
2618                if (error != 0) {
2619                        printf("em%d: em_allocate_receive_structures: "
2620                                "bus_dmamap_create failed; error %u\n",
2621                                adapter->unit, error);
2622                        goto fail;
2623                }
2624        }
2625
2626        for (i = 0; i < adapter->num_rx_desc; i++) {
2627                error = em_get_buf(i, adapter, NULL);
2628		if (error != 0)
2629			goto fail;
2630        }
2631        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2632            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2633
2634        return(0);
2635
2636fail:
2637	em_free_receive_structures(adapter);
2638        return (error);
2639}
2640
2641/*********************************************************************
2642 *
2643 *  Allocate and initialize receive structures.
2644 *
2645 **********************************************************************/
2646static int
2647em_setup_receive_structures(struct adapter * adapter)
2648{
2649	bzero((void *) adapter->rx_desc_base,
2650              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2651
2652	if (em_allocate_receive_structures(adapter))
2653		return ENOMEM;
2654
2655	/* Setup our descriptor pointers */
2656        adapter->next_rx_desc_to_check = 0;
2657	return(0);
2658}
2659
2660/*********************************************************************
2661 *
2662 *  Enable receive unit.
2663 *
2664 **********************************************************************/
2665static void
2666em_initialize_receive_unit(struct adapter * adapter)
2667{
2668	u_int32_t       reg_rctl;
2669	u_int32_t       reg_rxcsum;
2670	struct ifnet    *ifp;
2671	u_int64_t	bus_addr;
2672
2673        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2674	ifp = adapter->ifp;
2675
2676	/* Make sure receives are disabled while setting up the descriptor ring */
2677	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2678
2679	/* Set the Receive Delay Timer Register */
2680	E1000_WRITE_REG(&adapter->hw, RDTR,
2681			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2682
2683	if(adapter->hw.mac_type >= em_82540) {
2684		E1000_WRITE_REG(&adapter->hw, RADV,
2685		    adapter->rx_abs_int_delay.value);
2686
2687                /* Set the interrupt throttling rate.  Value is calculated
2688                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2689#define MAX_INTS_PER_SEC        8000
2690#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2691                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2692        }
2693
2694	/* Setup the Base and Length of the Rx Descriptor Ring */
2695	bus_addr = adapter->rxdma.dma_paddr;
2696	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2697	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2698	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2699			sizeof(struct em_rx_desc));
2700
2701	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2702	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2703	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2704
2705	/* Setup the Receive Control Register */
2706	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2707		   E1000_RCTL_RDMTS_HALF |
2708		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2709
2710	if (adapter->hw.tbi_compatibility_on == TRUE)
2711		reg_rctl |= E1000_RCTL_SBP;
2712
2713
2714	switch (adapter->rx_buffer_len) {
2715	default:
2716	case EM_RXBUFFER_2048:
2717		reg_rctl |= E1000_RCTL_SZ_2048;
2718		break;
2719	case EM_RXBUFFER_4096:
2720		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2721		break;
2722	case EM_RXBUFFER_8192:
2723		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2724		break;
2725	case EM_RXBUFFER_16384:
2726		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2727		break;
2728	}
2729
2730	if (ifp->if_mtu > ETHERMTU)
2731		reg_rctl |= E1000_RCTL_LPE;
2732
2733	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2734	if ((adapter->hw.mac_type >= em_82543) &&
2735	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2736		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2737		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2738		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2739	}
2740
2741	/* Enable Receives */
2742	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2743
2744	return;
2745}
2746
2747/*********************************************************************
2748 *
2749 *  Free receive related data structures.
2750 *
2751 **********************************************************************/
2752static void
2753em_free_receive_structures(struct adapter *adapter)
2754{
2755        struct em_buffer   *rx_buffer;
2756        int             i;
2757
2758        INIT_DEBUGOUT("free_receive_structures: begin");
2759
2760        if (adapter->rx_buffer_area != NULL) {
2761                rx_buffer = adapter->rx_buffer_area;
2762                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2763			if (rx_buffer->m_head != NULL) {
2764				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
2765				    BUS_DMASYNC_POSTREAD);
2766				bus_dmamap_unload(adapter->rxtag,
2767				    rx_buffer->map);
2768				m_freem(rx_buffer->m_head);
2769				rx_buffer->m_head = NULL;
2770			} else if (rx_buffer->map != NULL)
2771				bus_dmamap_unload(adapter->rxtag,
2772				    rx_buffer->map);
2773                        if (rx_buffer->map != NULL) {
2774				bus_dmamap_destroy(adapter->rxtag,
2775				    rx_buffer->map);
2776				rx_buffer->map = NULL;
2777			}
2778                }
2779        }
2780        if (adapter->rx_buffer_area != NULL) {
2781                free(adapter->rx_buffer_area, M_DEVBUF);
2782                adapter->rx_buffer_area = NULL;
2783        }
2784        if (adapter->rxtag != NULL) {
2785                bus_dma_tag_destroy(adapter->rxtag);
2786                adapter->rxtag = NULL;
2787        }
2788        return;
2789}
2790
2791/*********************************************************************
2792 *
2793 *  This routine executes in interrupt context. It replenishes
2794 *  the mbufs in the descriptor and sends data which has been
2795 *  dma'ed into host memory to upper layer.
2796 *
2797 *  We loop at most count times if count is > 0, or until done if
2798 *  count < 0.
2799 *
2800 *********************************************************************/
2801static void
2802em_process_receive_interrupts(struct adapter * adapter, int count)
2803{
2804	struct ifnet        *ifp;
2805	struct mbuf         *mp;
2806	u_int8_t            accept_frame = 0;
2807 	u_int8_t            eop = 0;
2808	u_int16_t           len, desc_len, prev_len_adj;
2809	int                 i;
2810
2811	/* Pointer to the receive descriptor being examined. */
2812	struct em_rx_desc   *current_desc;
2813
2814	mtx_assert(&adapter->mtx, MA_OWNED);
2815
2816	ifp = adapter->ifp;
2817	i = adapter->next_rx_desc_to_check;
2818        current_desc = &adapter->rx_desc_base[i];
2819	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2820	    BUS_DMASYNC_POSTREAD);
2821
2822	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2823		return;
2824	}
2825
2826	while ((current_desc->status & E1000_RXD_STAT_DD) &&
2827		    (count != 0) &&
2828		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2829		struct mbuf *m = NULL;
2830
2831		mp = adapter->rx_buffer_area[i].m_head;
2832		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2833		    BUS_DMASYNC_POSTREAD);
2834		bus_dmamap_unload(adapter->rxtag,
2835		    adapter->rx_buffer_area[i].map);
2836
2837		accept_frame = 1;
2838		prev_len_adj = 0;
2839                desc_len = le16toh(current_desc->length);
2840		if (current_desc->status & E1000_RXD_STAT_EOP) {
2841			count--;
2842			eop = 1;
2843			if (desc_len < ETHER_CRC_LEN) {
2844                                len = 0;
2845                                prev_len_adj = ETHER_CRC_LEN - desc_len;
2846                        }
2847                        else {
2848                                len = desc_len - ETHER_CRC_LEN;
2849                        }
2850		} else {
2851			eop = 0;
2852			len = desc_len;
2853		}
2854
2855		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2856			u_int8_t            last_byte;
2857			u_int32_t           pkt_len = desc_len;
2858
2859			if (adapter->fmp != NULL)
2860				pkt_len += adapter->fmp->m_pkthdr.len;
2861
2862			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2863
2864			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2865				       current_desc->errors,
2866				       pkt_len, last_byte)) {
2867				em_tbi_adjust_stats(&adapter->hw,
2868						    &adapter->stats,
2869						    pkt_len,
2870						    adapter->hw.mac_addr);
2871				if (len > 0) len--;
2872			}
2873			else {
2874				accept_frame = 0;
2875			}
2876		}
2877
2878		if (accept_frame) {
2879
2880			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2881				adapter->dropped_pkts++;
2882				em_get_buf(i, adapter, mp);
2883				if (adapter->fmp != NULL)
2884					m_freem(adapter->fmp);
2885				adapter->fmp = NULL;
2886				adapter->lmp = NULL;
2887				break;
2888			}
2889
2890			/* Assign correct length to the current fragment */
2891			mp->m_len = len;
2892
2893			if (adapter->fmp == NULL) {
2894				mp->m_pkthdr.len = len;
2895				adapter->fmp = mp;	 /* Store the first mbuf */
2896				adapter->lmp = mp;
2897			} else {
2898				/* Chain mbuf's together */
2899				mp->m_flags &= ~M_PKTHDR;
2900				/*
2901                                 * Adjust length of previous mbuf in chain if we
2902                                 * received less than 4 bytes in the last descriptor.
2903                                 */
2904				if (prev_len_adj > 0) {
2905					adapter->lmp->m_len -= prev_len_adj;
2906					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2907				}
2908				adapter->lmp->m_next = mp;
2909				adapter->lmp = adapter->lmp->m_next;
2910				adapter->fmp->m_pkthdr.len += len;
2911			}
2912
2913                        if (eop) {
2914                                adapter->fmp->m_pkthdr.rcvif = ifp;
2915				ifp->if_ipackets++;
2916                                em_receive_checksum(adapter, current_desc,
2917                                                    adapter->fmp);
2918                                if (current_desc->status & E1000_RXD_STAT_VP)
2919					VLAN_INPUT_TAG(ifp, adapter->fmp,
2920					    (le16toh(current_desc->special) &
2921					    E1000_RXD_SPC_VLAN_MASK),
2922					    adapter->fmp = NULL);
2923
2924				m = adapter->fmp;
2925				adapter->fmp = NULL;
2926				adapter->lmp = NULL;
2927                        }
2928		} else {
2929			adapter->dropped_pkts++;
2930			em_get_buf(i, adapter, mp);
2931			if (adapter->fmp != NULL)
2932				m_freem(adapter->fmp);
2933			adapter->fmp = NULL;
2934			adapter->lmp = NULL;
2935		}
2936
2937		/* Zero out the receive descriptors status  */
2938		current_desc->status = 0;
2939		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2940		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2941
2942		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2943                E1000_WRITE_REG(&adapter->hw, RDT, i);
2944
2945                /* Advance our pointers to the next descriptor */
2946		if (++i == adapter->num_rx_desc)
2947			i = 0;
2948		if (m != NULL) {
2949			adapter->next_rx_desc_to_check = i;
2950			EM_UNLOCK(adapter);
2951			(*ifp->if_input)(ifp, m);
2952			EM_LOCK(adapter);
2953			i = adapter->next_rx_desc_to_check;
2954		}
2955		current_desc = &adapter->rx_desc_base[i];
2956	}
2957	adapter->next_rx_desc_to_check = i;
2958	return;
2959}
2960
2961/*********************************************************************
2962 *
2963 *  Verify that the hardware indicated that the checksum is valid.
2964 *  Inform the stack about the status of checksum so that stack
2965 *  doesn't spend time verifying the checksum.
2966 *
2967 *********************************************************************/
2968static void
2969em_receive_checksum(struct adapter *adapter,
2970		    struct em_rx_desc *rx_desc,
2971		    struct mbuf *mp)
2972{
2973	/* 82543 or newer only */
2974	if ((adapter->hw.mac_type < em_82543) ||
2975	    /* Ignore Checksum bit is set */
2976	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2977		mp->m_pkthdr.csum_flags = 0;
2978		return;
2979	}
2980
2981	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2982		/* Did it pass? */
2983		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2984			/* IP Checksum Good */
2985			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2986			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2987
2988		} else {
2989			mp->m_pkthdr.csum_flags = 0;
2990		}
2991	}
2992
2993	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2994		/* Did it pass? */
2995		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2996			mp->m_pkthdr.csum_flags |=
2997			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2998			mp->m_pkthdr.csum_data = htons(0xffff);
2999		}
3000	}
3001
3002	return;
3003}
3004
3005
3006static void
3007em_enable_vlans(struct adapter *adapter)
3008{
3009	uint32_t ctrl;
3010
3011	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3012
3013	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3014	ctrl |= E1000_CTRL_VME;
3015	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3016
3017	return;
3018}
3019
3020static void
3021em_disable_vlans(struct adapter *adapter)
3022{
3023	uint32_t ctrl;
3024
3025	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3026	ctrl &= ~E1000_CTRL_VME;
3027	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3028
3029	return;
3030}
3031
3032static void
3033em_enable_intr(struct adapter * adapter)
3034{
3035	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3036	return;
3037}
3038
3039static void
3040em_disable_intr(struct adapter *adapter)
3041{
3042	/*
3043	 * The first version of 82542 had an errata where when link was forced it
3044	 * would stay up even up even if the cable was disconnected.  Sequence errors
3045	 * were used to detect the disconnect and then the driver would unforce the link.
3046	 * This code in the in the ISR.  For this to work correctly the Sequence error
3047	 * interrupt had to be enabled all the time.
3048	 */
3049
3050	if (adapter->hw.mac_type == em_82542_rev2_0)
3051	    E1000_WRITE_REG(&adapter->hw, IMC,
3052	        (0xffffffff & ~E1000_IMC_RXSEQ));
3053	else
3054	    E1000_WRITE_REG(&adapter->hw, IMC,
3055	        0xffffffff);
3056	return;
3057}
3058
3059static int
3060em_is_valid_ether_addr(u_int8_t *addr)
3061{
3062        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3063
3064        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3065                return (FALSE);
3066        }
3067
3068        return(TRUE);
3069}
3070
3071void
3072em_write_pci_cfg(struct em_hw *hw,
3073		      uint32_t reg,
3074		      uint16_t *value)
3075{
3076	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3077			 *value, 2);
3078}
3079
3080void
3081em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3082		     uint16_t *value)
3083{
3084	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3085				 reg, 2);
3086	return;
3087}
3088
3089void
3090em_pci_set_mwi(struct em_hw *hw)
3091{
3092        pci_write_config(((struct em_osdep *)hw->back)->dev,
3093                         PCIR_COMMAND,
3094                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3095        return;
3096}
3097
3098void
3099em_pci_clear_mwi(struct em_hw *hw)
3100{
3101        pci_write_config(((struct em_osdep *)hw->back)->dev,
3102                         PCIR_COMMAND,
3103                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3104        return;
3105}
3106
3107/*********************************************************************
3108* 82544 Coexistence issue workaround.
3109*    There are 2 issues.
3110*       1. Transmit Hang issue.
3111*    To detect this issue, following equation can be used...
3112*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3113*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3114*
3115*       2. DAC issue.
3116*    To detect this issue, following equation can be used...
3117*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3118*          If SUM[3:0] is in between 9 to c, we will have this issue.
3119*
3120*
3121*    WORKAROUND:
3122*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3123*
3124*** *********************************************************************/
3125static u_int32_t
3126em_fill_descriptors (bus_addr_t address,
3127                              u_int32_t length,
3128                              PDESC_ARRAY desc_array)
3129{
3130        /* Since issue is sensitive to length and address.*/
3131        /* Let us first check the address...*/
3132        u_int32_t safe_terminator;
3133        if (length <= 4) {
3134                desc_array->descriptor[0].address = address;
3135                desc_array->descriptor[0].length = length;
3136                desc_array->elements = 1;
3137                return desc_array->elements;
3138        }
3139        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3140        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3141        if (safe_terminator == 0   ||
3142        (safe_terminator > 4   &&
3143        safe_terminator < 9)   ||
3144        (safe_terminator > 0xC &&
3145        safe_terminator <= 0xF)) {
3146                desc_array->descriptor[0].address = address;
3147                desc_array->descriptor[0].length = length;
3148                desc_array->elements = 1;
3149                return desc_array->elements;
3150        }
3151
3152        desc_array->descriptor[0].address = address;
3153        desc_array->descriptor[0].length = length - 4;
3154        desc_array->descriptor[1].address = address + (length - 4);
3155        desc_array->descriptor[1].length = 4;
3156        desc_array->elements = 2;
3157        return desc_array->elements;
3158}
3159
3160/**********************************************************************
3161 *
3162 *  Update the board statistics counters.
3163 *
3164 **********************************************************************/
3165static void
3166em_update_stats_counters(struct adapter *adapter)
3167{
3168	struct ifnet   *ifp;
3169
3170	if(adapter->hw.media_type == em_media_type_copper ||
3171	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3172		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3173		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3174	}
3175	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3176	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3177	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3178	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3179
3180	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3181	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3182	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3183	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3184	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3185	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3186	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3187	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3188	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3189	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3190	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3191	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3192	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3193	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3194	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3195	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3196	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3197	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3198	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3199	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3200
3201	/* For the 64-bit byte counters the low dword must be read first. */
3202	/* Both registers clear on the read of the high dword */
3203
3204	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3205	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3206	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3207	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3208
3209	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3210	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3211	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3212	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3213	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3214
3215	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3216	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3217	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3218	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3219
3220	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3221	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3222	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3223	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3224	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3225	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3226	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3227	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3228	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3229	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3230
3231	if (adapter->hw.mac_type >= em_82543) {
3232		adapter->stats.algnerrc +=
3233		E1000_READ_REG(&adapter->hw, ALGNERRC);
3234		adapter->stats.rxerrc +=
3235		E1000_READ_REG(&adapter->hw, RXERRC);
3236		adapter->stats.tncrs +=
3237		E1000_READ_REG(&adapter->hw, TNCRS);
3238		adapter->stats.cexterr +=
3239		E1000_READ_REG(&adapter->hw, CEXTERR);
3240		adapter->stats.tsctc +=
3241		E1000_READ_REG(&adapter->hw, TSCTC);
3242		adapter->stats.tsctfc +=
3243		E1000_READ_REG(&adapter->hw, TSCTFC);
3244	}
3245	ifp = adapter->ifp;
3246
3247	ifp->if_collisions = adapter->stats.colc;
3248
3249	/* Rx Errors */
3250	ifp->if_ierrors =
3251	adapter->dropped_pkts +
3252	adapter->stats.rxerrc +
3253	adapter->stats.crcerrs +
3254	adapter->stats.algnerrc +
3255	adapter->stats.rlec +
3256	adapter->stats.mpc + adapter->stats.cexterr;
3257
3258	/* Tx Errors */
3259	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3260	    adapter->watchdog_events;
3261
3262}
3263
3264
3265/**********************************************************************
3266 *
3267 *  This routine is called only when em_display_debug_stats is enabled.
3268 *  This routine provides a way to take a look at important statistics
3269 *  maintained by the driver and hardware.
3270 *
3271 **********************************************************************/
3272static void
3273em_print_debug_info(struct adapter *adapter)
3274{
3275        int unit = adapter->unit;
3276	uint8_t *hw_addr = adapter->hw.hw_addr;
3277
3278	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3279	printf("em%d:CTRL  = 0x%x\n", unit,
3280		E1000_READ_REG(&adapter->hw, CTRL));
3281	printf("em%d:RCTL  = 0x%x PS=(0x8402)\n", unit,
3282		E1000_READ_REG(&adapter->hw, RCTL));
3283	printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3284              E1000_READ_REG(&adapter->hw, TIDV),
3285	      E1000_READ_REG(&adapter->hw, TADV));
3286	printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3287              E1000_READ_REG(&adapter->hw, RDTR),
3288	      E1000_READ_REG(&adapter->hw, RADV));
3289        printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit,
3290               (long long)adapter->tx_fifo_wrk_cnt,
3291               (long long)adapter->tx_fifo_reset_cnt);
3292        printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3293               E1000_READ_REG(&adapter->hw, TDH),
3294               E1000_READ_REG(&adapter->hw, TDT));
3295        printf("em%d: Num Tx descriptors avail = %d\n", unit,
3296               adapter->num_tx_desc_avail);
3297        printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3298               adapter->no_tx_desc_avail1);
3299        printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3300               adapter->no_tx_desc_avail2);
3301        printf("em%d: Std mbuf failed = %ld\n", unit,
3302               adapter->mbuf_alloc_failed);
3303        printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3304               adapter->mbuf_cluster_failed);
3305        printf("em%d: Driver dropped packets = %ld\n", unit,
3306               adapter->dropped_pkts);
3307
3308        return;
3309}
3310
3311static void
3312em_print_hw_stats(struct adapter *adapter)
3313{
3314        int unit = adapter->unit;
3315
3316        printf("em%d: Excessive collisions = %lld\n", unit,
3317               (long long)adapter->stats.ecol);
3318        printf("em%d: Symbol errors = %lld\n", unit,
3319               (long long)adapter->stats.symerrs);
3320        printf("em%d: Sequence errors = %lld\n", unit,
3321               (long long)adapter->stats.sec);
3322        printf("em%d: Defer count = %lld\n", unit,
3323               (long long)adapter->stats.dc);
3324
3325        printf("em%d: Missed Packets = %lld\n", unit,
3326               (long long)adapter->stats.mpc);
3327        printf("em%d: Receive No Buffers = %lld\n", unit,
3328               (long long)adapter->stats.rnbc);
3329        printf("em%d: Receive length errors = %lld\n", unit,
3330               (long long)adapter->stats.rlec);
3331        printf("em%d: Receive errors = %lld\n", unit,
3332               (long long)adapter->stats.rxerrc);
3333        printf("em%d: Crc errors = %lld\n", unit,
3334               (long long)adapter->stats.crcerrs);
3335        printf("em%d: Alignment errors = %lld\n", unit,
3336               (long long)adapter->stats.algnerrc);
3337        printf("em%d: Carrier extension errors = %lld\n", unit,
3338               (long long)adapter->stats.cexterr);
3339	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3340	printf("em%d: watchdog timeouts = %ld\n", unit,
3341		adapter->watchdog_events);
3342
3343        printf("em%d: XON Rcvd = %lld\n", unit,
3344               (long long)adapter->stats.xonrxc);
3345        printf("em%d: XON Xmtd = %lld\n", unit,
3346               (long long)adapter->stats.xontxc);
3347        printf("em%d: XOFF Rcvd = %lld\n", unit,
3348               (long long)adapter->stats.xoffrxc);
3349        printf("em%d: XOFF Xmtd = %lld\n", unit,
3350               (long long)adapter->stats.xofftxc);
3351
3352        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3353               (long long)adapter->stats.gprc);
3354        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3355               (long long)adapter->stats.gptc);
3356
3357        return;
3358}
3359
3360static int
3361em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3362{
3363        int error;
3364        int result;
3365        struct adapter *adapter;
3366
3367        result = -1;
3368        error = sysctl_handle_int(oidp, &result, 0, req);
3369
3370        if (error || !req->newptr)
3371                return (error);
3372
3373        if (result == 1) {
3374                adapter = (struct adapter *)arg1;
3375                em_print_debug_info(adapter);
3376        }
3377
3378        return error;
3379}
3380
3381
3382static int
3383em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3384{
3385        int error;
3386        int result;
3387        struct adapter *adapter;
3388
3389        result = -1;
3390        error = sysctl_handle_int(oidp, &result, 0, req);
3391
3392        if (error || !req->newptr)
3393                return (error);
3394
3395        if (result == 1) {
3396                adapter = (struct adapter *)arg1;
3397                em_print_hw_stats(adapter);
3398        }
3399
3400        return error;
3401}
3402
3403static int
3404em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3405{
3406	struct em_int_delay_info *info;
3407	struct adapter *adapter;
3408	u_int32_t regval;
3409	int error;
3410	int usecs;
3411	int ticks;
3412
3413	info = (struct em_int_delay_info *)arg1;
3414	usecs = info->value;
3415	error = sysctl_handle_int(oidp, &usecs, 0, req);
3416	if (error != 0 || req->newptr == NULL)
3417		return error;
3418	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3419		return EINVAL;
3420	info->value = usecs;
3421	ticks = E1000_USECS_TO_TICKS(usecs);
3422
3423	adapter = info->adapter;
3424
3425	EM_LOCK(adapter);
3426	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3427	regval = (regval & ~0xffff) | (ticks & 0xffff);
3428	/* Handle a few special cases. */
3429	switch (info->offset) {
3430	case E1000_RDTR:
3431	case E1000_82542_RDTR:
3432		regval |= E1000_RDT_FPDB;
3433		break;
3434	case E1000_TIDV:
3435	case E1000_82542_TIDV:
3436		if (ticks == 0) {
3437			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3438			/* Don't write 0 into the TIDV register. */
3439			regval++;
3440		} else
3441			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3442		break;
3443	}
3444	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3445	EM_UNLOCK(adapter);
3446	return 0;
3447}
3448
3449static void
3450em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3451    const char *description, struct em_int_delay_info *info,
3452    int offset, int value)
3453{
3454	info->adapter = adapter;
3455	info->offset = offset;
3456	info->value = value;
3457	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3458	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3459	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3460	    info, 0, em_sysctl_int_delay, "I", description);
3461}
3462