if_em.c revision 152315
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 152315 2005-11-11 16:04:59Z ru $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "2.1.7";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
111        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
112
113        /* required last entry */
114        { 0, 0, 0, 0, 0}
115};
116
117/*********************************************************************
118 *  Table of branding strings for all supported NICs.
119 *********************************************************************/
120
121static char *em_strings[] = {
122	"Intel(R) PRO/1000 Network Connection"
123};
124
125/*********************************************************************
126 *  Function prototypes
127 *********************************************************************/
128static int  em_probe(device_t);
129static int  em_attach(device_t);
130static int  em_detach(device_t);
131static int  em_shutdown(device_t);
132static void em_intr(void *);
133static void em_start(struct ifnet *);
134static int  em_ioctl(struct ifnet *, u_long, caddr_t);
135static void em_watchdog(struct ifnet *);
136static void em_init(void *);
137static void em_init_locked(struct adapter *);
138static void em_stop(void *);
139static void em_media_status(struct ifnet *, struct ifmediareq *);
140static int  em_media_change(struct ifnet *);
141static void em_identify_hardware(struct adapter *);
142static int  em_allocate_pci_resources(struct adapter *);
143static void em_free_pci_resources(struct adapter *);
144static void em_local_timer(void *);
145static int  em_hardware_init(struct adapter *);
146static void em_setup_interface(device_t, struct adapter *);
147static int  em_setup_transmit_structures(struct adapter *);
148static void em_initialize_transmit_unit(struct adapter *);
149static int  em_setup_receive_structures(struct adapter *);
150static void em_initialize_receive_unit(struct adapter *);
151static void em_enable_intr(struct adapter *);
152static void em_disable_intr(struct adapter *);
153static void em_free_transmit_structures(struct adapter *);
154static void em_free_receive_structures(struct adapter *);
155static void em_update_stats_counters(struct adapter *);
156static void em_clean_transmit_interrupts(struct adapter *);
157static int  em_allocate_receive_structures(struct adapter *);
158static int  em_allocate_transmit_structures(struct adapter *);
159static void em_process_receive_interrupts(struct adapter *, int);
160static void em_receive_checksum(struct adapter *,
161				struct em_rx_desc *,
162				struct mbuf *);
163static void em_transmit_checksum_setup(struct adapter *,
164				       struct mbuf *,
165				       u_int32_t *,
166				       u_int32_t *);
167static void em_set_promisc(struct adapter *);
168static void em_disable_promisc(struct adapter *);
169static void em_set_multi(struct adapter *);
170static void em_print_hw_stats(struct adapter *);
171static void em_print_link_status(struct adapter *);
172static int  em_get_buf(int i, struct adapter *,
173		       struct mbuf *);
174static void em_enable_vlans(struct adapter *);
175static void em_disable_vlans(struct adapter *);
176static int  em_encap(struct adapter *, struct mbuf **);
177static void em_smartspeed(struct adapter *);
178static int  em_82547_fifo_workaround(struct adapter *, int);
179static void em_82547_update_fifo_head(struct adapter *, int);
180static int  em_82547_tx_fifo_reset(struct adapter *);
181static void em_82547_move_tail(void *arg);
182static void em_82547_move_tail_locked(struct adapter *);
183static int  em_dma_malloc(struct adapter *, bus_size_t,
184			  struct em_dma_alloc *, int);
185static void em_dma_free(struct adapter *, struct em_dma_alloc *);
186static void em_print_debug_info(struct adapter *);
187static int  em_is_valid_ether_addr(u_int8_t *);
188static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
189static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
190static u_int32_t em_fill_descriptors (bus_addr_t address,
191				      u_int32_t length,
192				      PDESC_ARRAY desc_array);
193static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
194static void em_add_int_delay_sysctl(struct adapter *, const char *,
195				    const char *, struct em_int_delay_info *,
196				    int, int);
197#ifdef DEVICE_POLLING
198static poll_handler_t em_poll;
199#endif
200
201/*********************************************************************
202 *  FreeBSD Device Interface Entry Points
203 *********************************************************************/
204
205static device_method_t em_methods[] = {
206	/* Device interface */
207	DEVMETHOD(device_probe, em_probe),
208	DEVMETHOD(device_attach, em_attach),
209	DEVMETHOD(device_detach, em_detach),
210	DEVMETHOD(device_shutdown, em_shutdown),
211	{0, 0}
212};
213
214static driver_t em_driver = {
215	"em", em_methods, sizeof(struct adapter ),
216};
217
218static devclass_t em_devclass;
219DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
220MODULE_DEPEND(em, pci, 1, 1, 1);
221MODULE_DEPEND(em, ether, 1, 1, 1);
222
223/*********************************************************************
224 *  Tunable default values.
225 *********************************************************************/
226
227#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
228#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
229
230static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
231static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
232static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
233static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
234
235TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
236TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
237TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
238TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
239
240/*********************************************************************
241 *  Device identification routine
242 *
243 *  em_probe determines if the driver should be loaded on
244 *  adapter based on PCI vendor/device id of the adapter.
245 *
246 *  return BUS_PROBE_DEFAULT on success, positive on failure
247 *********************************************************************/
248
249static int
250em_probe(device_t dev)
251{
252	em_vendor_info_t *ent;
253
254	u_int16_t       pci_vendor_id = 0;
255	u_int16_t       pci_device_id = 0;
256	u_int16_t       pci_subvendor_id = 0;
257	u_int16_t       pci_subdevice_id = 0;
258	char            adapter_name[60];
259
260	INIT_DEBUGOUT("em_probe: begin");
261
262	pci_vendor_id = pci_get_vendor(dev);
263	if (pci_vendor_id != EM_VENDOR_ID)
264		return(ENXIO);
265
266	pci_device_id = pci_get_device(dev);
267	pci_subvendor_id = pci_get_subvendor(dev);
268	pci_subdevice_id = pci_get_subdevice(dev);
269
270	ent = em_vendor_info_array;
271	while (ent->vendor_id != 0) {
272		if ((pci_vendor_id == ent->vendor_id) &&
273		    (pci_device_id == ent->device_id) &&
274
275		    ((pci_subvendor_id == ent->subvendor_id) ||
276		     (ent->subvendor_id == PCI_ANY_ID)) &&
277
278		    ((pci_subdevice_id == ent->subdevice_id) ||
279		     (ent->subdevice_id == PCI_ANY_ID))) {
280			sprintf(adapter_name, "%s, Version - %s",
281				em_strings[ent->index],
282				em_driver_version);
283			device_set_desc_copy(dev, adapter_name);
284			return(BUS_PROBE_DEFAULT);
285		}
286		ent++;
287	}
288
289	return(ENXIO);
290}
291
292/*********************************************************************
293 *  Device initialization routine
294 *
295 *  The attach entry point is called when the driver is being loaded.
296 *  This routine identifies the type of hardware, allocates all resources
297 *  and initializes the hardware.
298 *
299 *  return 0 on success, positive on failure
300 *********************************************************************/
301
302static int
303em_attach(device_t dev)
304{
305	struct adapter * adapter;
306	int             tsize, rsize;
307	int		error = 0;
308
309	INIT_DEBUGOUT("em_attach: begin");
310
311	/* Allocate, clear, and link in our adapter structure */
312	if (!(adapter = device_get_softc(dev))) {
313		printf("em: adapter structure allocation failed\n");
314		return(ENOMEM);
315	}
316	bzero(adapter, sizeof(struct adapter ));
317	adapter->dev = dev;
318	adapter->osdep.dev = dev;
319	adapter->unit = device_get_unit(dev);
320	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
321
322	/* SYSCTL stuff */
323        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
324                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
326                        (void *)adapter, 0,
327                        em_sysctl_debug_info, "I", "Debug Information");
328
329        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
330                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
331                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
332                        (void *)adapter, 0,
333                        em_sysctl_stats, "I", "Statistics");
334
335	callout_init(&adapter->timer, CALLOUT_MPSAFE);
336	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
337
338	/* Determine hardware revision */
339	em_identify_hardware(adapter);
340
341	/* Set up some sysctls for the tunable interrupt delays */
342	em_add_int_delay_sysctl(adapter, "rx_int_delay",
343	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
344	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
345	em_add_int_delay_sysctl(adapter, "tx_int_delay",
346	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
347	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
348	if (adapter->hw.mac_type >= em_82540) {
349		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
350		    "receive interrupt delay limit in usecs",
351		    &adapter->rx_abs_int_delay,
352		    E1000_REG_OFFSET(&adapter->hw, RADV),
353		    em_rx_abs_int_delay_dflt);
354		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
355		    "transmit interrupt delay limit in usecs",
356		    &adapter->tx_abs_int_delay,
357		    E1000_REG_OFFSET(&adapter->hw, TADV),
358		    em_tx_abs_int_delay_dflt);
359	}
360
361	/* Parameters (to be read from user) */
362	if (adapter->hw.mac_type >= em_82544) {
363        	adapter->num_tx_desc = EM_TXD_82544;
364        	adapter->num_rx_desc = EM_RXD_82544;
365	} else {
366        	adapter->num_tx_desc = EM_TXD;
367        	adapter->num_rx_desc = EM_RXD;
368	}
369        adapter->hw.autoneg = DO_AUTO_NEG;
370        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
371        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
372        adapter->hw.tbi_compatibility_en = TRUE;
373        adapter->rx_buffer_len = EM_RXBUFFER_2048;
374
375	/*
376         * These parameters control the automatic generation(Tx) and
377         * response(Rx) to Ethernet PAUSE frames.
378         */
379        adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
380        adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
381        adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
382        adapter->hw.fc_send_xon   = TRUE;
383        adapter->hw.fc = em_fc_full;
384
385	adapter->hw.phy_init_script = 1;
386	adapter->hw.phy_reset_disable = FALSE;
387
388#ifndef EM_MASTER_SLAVE
389	adapter->hw.master_slave = em_ms_hw_default;
390#else
391	adapter->hw.master_slave = EM_MASTER_SLAVE;
392#endif
393	/*
394	 * Set the max frame size assuming standard ethernet
395	 * sized frames
396	 */
397	adapter->hw.max_frame_size =
398		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
399
400	adapter->hw.min_frame_size =
401		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
402
403	/*
404	 * This controls when hardware reports transmit completion
405	 * status.
406	 */
407	adapter->hw.report_tx_early = 1;
408
409
410	if (em_allocate_pci_resources(adapter)) {
411		printf("em%d: Allocation of PCI resources failed\n",
412		       adapter->unit);
413                error = ENXIO;
414                goto err_pci;
415	}
416
417
418	/* Initialize eeprom parameters */
419        em_init_eeprom_params(&adapter->hw);
420
421	tsize = EM_ROUNDUP(adapter->num_tx_desc *
422			   sizeof(struct em_tx_desc), 4096);
423
424	/* Allocate Transmit Descriptor ring */
425        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
426                printf("em%d: Unable to allocate tx_desc memory\n",
427                       adapter->unit);
428		error = ENOMEM;
429                goto err_tx_desc;
430        }
431        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
432
433	rsize = EM_ROUNDUP(adapter->num_rx_desc *
434			   sizeof(struct em_rx_desc), 4096);
435
436	/* Allocate Receive Descriptor ring */
437        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
438                printf("em%d: Unable to allocate rx_desc memory\n",
439                        adapter->unit);
440		error = ENOMEM;
441                goto err_rx_desc;
442        }
443        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
444
445	/* Initialize the hardware */
446	if (em_hardware_init(adapter)) {
447		printf("em%d: Unable to initialize the hardware\n",
448		       adapter->unit);
449		error = EIO;
450                goto err_hw_init;
451	}
452
453	/* Copy the permanent MAC address out of the EEPROM */
454	if (em_read_mac_addr(&adapter->hw) < 0) {
455		printf("em%d: EEPROM read error while reading mac address\n",
456		       adapter->unit);
457		error = EIO;
458                goto err_mac_addr;
459	}
460
461	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
462                printf("em%d: Invalid mac address\n", adapter->unit);
463                error = EIO;
464                goto err_mac_addr;
465        }
466
467	/* Setup OS specific network interface */
468	em_setup_interface(dev, adapter);
469
470	/* Initialize statistics */
471	em_clear_hw_cntrs(&adapter->hw);
472	em_update_stats_counters(adapter);
473	adapter->hw.get_link_status = 1;
474	em_check_for_link(&adapter->hw);
475
476	if (bootverbose) {
477		/* Print the link status */
478		if (adapter->link_active == 1) {
479			em_get_speed_and_duplex(&adapter->hw,
480			    &adapter->link_speed, &adapter->link_duplex);
481			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
482			       adapter->unit,
483			       adapter->link_speed,
484			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
485				"Half");
486		} else
487			printf("em%d:  Speed:N/A  Duplex:N/A\n",
488			    adapter->unit);
489	}
490
491	/* Identify 82544 on PCIX */
492        em_get_bus_info(&adapter->hw);
493        if(adapter->hw.bus_type == em_bus_type_pcix &&
494           adapter->hw.mac_type == em_82544) {
495                adapter->pcix_82544 = TRUE;
496        }
497        else {
498                adapter->pcix_82544 = FALSE;
499        }
500	INIT_DEBUGOUT("em_attach: end");
501	return(0);
502
503err_mac_addr:
504err_hw_init:
505        em_dma_free(adapter, &adapter->rxdma);
506err_rx_desc:
507        em_dma_free(adapter, &adapter->txdma);
508err_tx_desc:
509err_pci:
510        em_free_pci_resources(adapter);
511	EM_LOCK_DESTROY(adapter);
512        return(error);
513
514}
515
516/*********************************************************************
517 *  Device removal routine
518 *
519 *  The detach entry point is called when the driver is being removed.
520 *  This routine stops the adapter and deallocates all the resources
521 *  that were allocated for driver operation.
522 *
523 *  return 0 on success, positive on failure
524 *********************************************************************/
525
526static int
527em_detach(device_t dev)
528{
529	struct adapter * adapter = device_get_softc(dev);
530	struct ifnet   *ifp = adapter->ifp;
531
532	INIT_DEBUGOUT("em_detach: begin");
533
534#ifdef DEVICE_POLLING
535	if (ifp->if_capenable & IFCAP_POLLING)
536		ether_poll_deregister(ifp);
537#endif
538
539	EM_LOCK(adapter);
540	adapter->in_detach = 1;
541	em_stop(adapter);
542	em_phy_hw_reset(&adapter->hw);
543	EM_UNLOCK(adapter);
544        ether_ifdetach(adapter->ifp);
545
546	em_free_pci_resources(adapter);
547	bus_generic_detach(dev);
548	if_free(ifp);
549
550	/* Free Transmit Descriptor ring */
551        if (adapter->tx_desc_base) {
552                em_dma_free(adapter, &adapter->txdma);
553                adapter->tx_desc_base = NULL;
554        }
555
556        /* Free Receive Descriptor ring */
557        if (adapter->rx_desc_base) {
558                em_dma_free(adapter, &adapter->rxdma);
559                adapter->rx_desc_base = NULL;
560        }
561
562	EM_LOCK_DESTROY(adapter);
563
564	return(0);
565}
566
567/*********************************************************************
568 *
569 *  Shutdown entry point
570 *
571 **********************************************************************/
572
573static int
574em_shutdown(device_t dev)
575{
576	struct adapter *adapter = device_get_softc(dev);
577	EM_LOCK(adapter);
578	em_stop(adapter);
579	EM_UNLOCK(adapter);
580	return(0);
581}
582
583
584/*********************************************************************
585 *  Transmit entry point
586 *
587 *  em_start is called by the stack to initiate a transmit.
588 *  The driver will remain in this routine as long as there are
589 *  packets to transmit and transmit resources are available.
590 *  In case resources are not available stack is notified and
591 *  the packet is requeued.
592 **********************************************************************/
593
594static void
595em_start_locked(struct ifnet *ifp)
596{
597        struct mbuf    *m_head;
598        struct adapter *adapter = ifp->if_softc;
599
600	mtx_assert(&adapter->mtx, MA_OWNED);
601
602        if (!adapter->link_active)
603                return;
604
605        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
606
607                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
608
609                if (m_head == NULL) break;
610
611		/*
612		 * em_encap() can modify our pointer, and or make it NULL on
613		 * failure.  In that event, we can't requeue.
614		 */
615		if (em_encap(adapter, &m_head)) {
616			if (m_head == NULL)
617				break;
618			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
619			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
620			break;
621                }
622
623		/* Send a copy of the frame to the BPF listener */
624		BPF_MTAP(ifp, m_head);
625
626                /* Set timeout in case hardware has problems transmitting */
627                ifp->if_timer = EM_TX_TIMEOUT;
628
629        }
630        return;
631}
632
633static void
634em_start(struct ifnet *ifp)
635{
636	struct adapter *adapter = ifp->if_softc;
637
638	EM_LOCK(adapter);
639	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
640		em_start_locked(ifp);
641	EM_UNLOCK(adapter);
642	return;
643}
644
645/*********************************************************************
646 *  Ioctl entry point
647 *
648 *  em_ioctl is called when the user wants to configure the
649 *  interface.
650 *
651 *  return 0 on success, positive on failure
652 **********************************************************************/
653
654static int
655em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
656{
657	int             mask, reinit, error = 0;
658	struct ifreq   *ifr = (struct ifreq *) data;
659	struct adapter * adapter = ifp->if_softc;
660
661	if (adapter->in_detach) return(error);
662
663	switch (command) {
664	case SIOCSIFADDR:
665	case SIOCGIFADDR:
666		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
667		ether_ioctl(ifp, command, data);
668		break;
669	case SIOCSIFMTU:
670		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
671#ifndef __NO_STRICT_ALIGNMENT
672		if (ifr->ifr_mtu > ETHERMTU) {
673			/*
674			 * XXX
675			 * Due to the limitation of DMA engine, it needs fix-up
676			 * code for strict alignment architectures. Disable
677			 * jumbo frame until we have better solutions.
678			 */
679			error = EINVAL;
680		} else
681#endif
682		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN || \
683			/* 82573 does not support jumbo frames */
684			(adapter->hw.mac_type == em_82573 && ifr->ifr_mtu > ETHERMTU) ) {
685			error = EINVAL;
686		} else {
687			EM_LOCK(adapter);
688			ifp->if_mtu = ifr->ifr_mtu;
689			adapter->hw.max_frame_size =
690			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
691			em_init_locked(adapter);
692			EM_UNLOCK(adapter);
693		}
694		break;
695	case SIOCSIFFLAGS:
696		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
697		EM_LOCK(adapter);
698		if (ifp->if_flags & IFF_UP) {
699			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
700				em_init_locked(adapter);
701			}
702
703			em_disable_promisc(adapter);
704			em_set_promisc(adapter);
705		} else {
706			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
707				em_stop(adapter);
708			}
709		}
710		EM_UNLOCK(adapter);
711		break;
712	case SIOCADDMULTI:
713	case SIOCDELMULTI:
714		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
715		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
716			EM_LOCK(adapter);
717			em_disable_intr(adapter);
718			em_set_multi(adapter);
719			if (adapter->hw.mac_type == em_82542_rev2_0) {
720				em_initialize_receive_unit(adapter);
721			}
722#ifdef DEVICE_POLLING
723                        if (!(ifp->if_capenable & IFCAP_POLLING))
724#endif
725				em_enable_intr(adapter);
726			EM_UNLOCK(adapter);
727		}
728		break;
729	case SIOCSIFMEDIA:
730	case SIOCGIFMEDIA:
731		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
732		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
733		break;
734	case SIOCSIFCAP:
735		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
736		reinit = 0;
737		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
738#ifdef DEVICE_POLLING
739		if (mask & IFCAP_POLLING) {
740			if (ifr->ifr_reqcap & IFCAP_POLLING) {
741				error = ether_poll_register(em_poll, ifp);
742				if (error)
743					return(error);
744				EM_LOCK(adapter);
745				em_disable_intr(adapter);
746				ifp->if_capenable |= IFCAP_POLLING;
747				EM_UNLOCK(adapter);
748			} else {
749				error = ether_poll_deregister(ifp);
750				/* Enable interrupt even in error case */
751				EM_LOCK(adapter);
752				em_enable_intr(adapter);
753				ifp->if_capenable &= ~IFCAP_POLLING;
754				EM_UNLOCK(adapter);
755			}
756		}
757#endif
758		if (mask & IFCAP_HWCSUM) {
759			ifp->if_capenable ^= IFCAP_HWCSUM;
760			reinit = 1;
761		}
762		if (mask & IFCAP_VLAN_HWTAGGING) {
763			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
764			reinit = 1;
765		}
766		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
767			em_init(adapter);
768		break;
769	default:
770		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
771		error = EINVAL;
772	}
773
774	return(error);
775}
776
777/*********************************************************************
778 *  Watchdog entry point
779 *
780 *  This routine is called whenever hardware quits transmitting.
781 *
782 **********************************************************************/
783
784static void
785em_watchdog(struct ifnet *ifp)
786{
787	struct adapter * adapter;
788	adapter = ifp->if_softc;
789
790	EM_LOCK(adapter);
791	/* If we are in this routine because of pause frames, then
792	 * don't reset the hardware.
793	 */
794	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
795		ifp->if_timer = EM_TX_TIMEOUT;
796		EM_UNLOCK(adapter);
797		return;
798	}
799
800	if (em_check_for_link(&adapter->hw))
801		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
802
803	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
804	adapter->watchdog_events++;
805
806	em_init_locked(adapter);
807	EM_UNLOCK(adapter);
808}
809
810/*********************************************************************
811 *  Init entry point
812 *
813 *  This routine is used in two ways. It is used by the stack as
814 *  init entry point in network interface structure. It is also used
815 *  by the driver as a hw/sw initialization routine to get to a
816 *  consistent state.
817 *
818 *  return 0 on success, positive on failure
819 **********************************************************************/
820
821static void
822em_init_locked(struct adapter * adapter)
823{
824	struct ifnet   *ifp;
825
826	uint32_t	pba;
827	ifp = adapter->ifp;
828
829	INIT_DEBUGOUT("em_init: begin");
830
831	mtx_assert(&adapter->mtx, MA_OWNED);
832
833	em_stop(adapter);
834
835	/* Packet Buffer Allocation (PBA)
836	 * Writing PBA sets the receive portion of the buffer
837	 * the remainder is used for the transmit buffer.
838	 *
839	 * Devices before the 82547 had a Packet Buffer of 64K.
840	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
841	 * After the 82547 the buffer was reduced to 40K.
842	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
843	 *   Note: default does not leave enough room for Jumbo Frame >10k.
844	 */
845	if(adapter->hw.mac_type < em_82547) {
846		/* Total FIFO is 64K */
847		if(adapter->rx_buffer_len > EM_RXBUFFER_8192)
848			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
849		else
850			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
851	} else {
852		/* Total FIFO is 40K */
853		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) {
854			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
855		} else {
856		        pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
857		}
858		adapter->tx_fifo_head = 0;
859		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
860		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
861	}
862	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
863	E1000_WRITE_REG(&adapter->hw, PBA, pba);
864
865	/* Get the latest mac address, User can use a LAA */
866        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
867              ETHER_ADDR_LEN);
868
869	/* Initialize the hardware */
870	if (em_hardware_init(adapter)) {
871		printf("em%d: Unable to initialize the hardware\n",
872		       adapter->unit);
873		return;
874	}
875
876	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
877		em_enable_vlans(adapter);
878
879	/* Prepare transmit descriptors and buffers */
880	if (em_setup_transmit_structures(adapter)) {
881		printf("em%d: Could not setup transmit structures\n",
882		       adapter->unit);
883		em_stop(adapter);
884		return;
885	}
886	em_initialize_transmit_unit(adapter);
887
888	/* Setup Multicast table */
889	em_set_multi(adapter);
890
891	/* Prepare receive descriptors and buffers */
892	if (em_setup_receive_structures(adapter)) {
893		printf("em%d: Could not setup receive structures\n",
894		       adapter->unit);
895		em_stop(adapter);
896		return;
897	}
898	em_initialize_receive_unit(adapter);
899
900	/* Don't loose promiscuous settings */
901	em_set_promisc(adapter);
902
903	ifp->if_drv_flags |= IFF_DRV_RUNNING;
904	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
905
906	if (adapter->hw.mac_type >= em_82543) {
907		if (ifp->if_capenable & IFCAP_TXCSUM)
908			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
909		else
910			ifp->if_hwassist = 0;
911	}
912
913	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
914	em_clear_hw_cntrs(&adapter->hw);
915#ifdef DEVICE_POLLING
916        /*
917         * Only enable interrupts if we are not polling, make sure
918         * they are off otherwise.
919         */
920        if (ifp->if_capenable & IFCAP_POLLING)
921                em_disable_intr(adapter);
922        else
923#endif /* DEVICE_POLLING */
924		em_enable_intr(adapter);
925
926	/* Don't reset the phy next time init gets called */
927	adapter->hw.phy_reset_disable = TRUE;
928
929	return;
930}
931
932static void
933em_init(void *arg)
934{
935	struct adapter * adapter = arg;
936
937	EM_LOCK(adapter);
938	em_init_locked(adapter);
939	EM_UNLOCK(adapter);
940	return;
941}
942
943
944#ifdef DEVICE_POLLING
945static void
946em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
947{
948        struct adapter *adapter = ifp->if_softc;
949        u_int32_t reg_icr;
950
951	mtx_assert(&adapter->mtx, MA_OWNED);
952
953        if (cmd == POLL_AND_CHECK_STATUS) {
954                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
955                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
956			callout_stop(&adapter->timer);
957                        adapter->hw.get_link_status = 1;
958                        em_check_for_link(&adapter->hw);
959                        em_print_link_status(adapter);
960			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
961                }
962        }
963	em_process_receive_interrupts(adapter, count);
964	em_clean_transmit_interrupts(adapter);
965
966        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
967                em_start_locked(ifp);
968}
969
970static void
971em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
972{
973        struct adapter *adapter = ifp->if_softc;
974
975	EM_LOCK(adapter);
976	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
977		em_poll_locked(ifp, cmd, count);
978	EM_UNLOCK(adapter);
979}
980#endif /* DEVICE_POLLING */
981
982/*********************************************************************
983 *
984 *  Interrupt Service routine
985 *
986 **********************************************************************/
987static void
988em_intr(void *arg)
989{
990	struct adapter	*adapter = arg;
991	struct ifnet	*ifp;
992	uint32_t	reg_icr;
993	int		wantinit = 0;
994
995	EM_LOCK(adapter);
996
997	ifp = adapter->ifp;
998
999#ifdef DEVICE_POLLING
1000	if (ifp->if_capenable & IFCAP_POLLING) {
1001		EM_UNLOCK(adapter);
1002		return;
1003	}
1004#endif /* DEVICE_POLLING */
1005
1006	for (;;) {
1007		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1008		if (reg_icr == 0)
1009			break;
1010
1011		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1012			em_process_receive_interrupts(adapter, -1);
1013			em_clean_transmit_interrupts(adapter);
1014		}
1015
1016		/* Link status change */
1017		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1018			callout_stop(&adapter->timer);
1019			adapter->hw.get_link_status = 1;
1020			em_check_for_link(&adapter->hw);
1021			em_print_link_status(adapter);
1022			callout_reset(&adapter->timer, hz, em_local_timer,
1023			    adapter);
1024		}
1025
1026		if (reg_icr & E1000_ICR_RXO) {
1027			adapter->rx_overruns++;
1028			wantinit = 1;
1029		}
1030	}
1031#if 0
1032	if (wantinit)
1033		em_init_locked(adapter);
1034#endif
1035	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1036	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1037		em_start_locked(ifp);
1038
1039	EM_UNLOCK(adapter);
1040	return;
1041}
1042
1043
1044
1045/*********************************************************************
1046 *
1047 *  Media Ioctl callback
1048 *
1049 *  This routine is called whenever the user queries the status of
1050 *  the interface using ifconfig.
1051 *
1052 **********************************************************************/
1053static void
1054em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1055{
1056	struct adapter * adapter = ifp->if_softc;
1057
1058	INIT_DEBUGOUT("em_media_status: begin");
1059
1060	em_check_for_link(&adapter->hw);
1061	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1062		if (adapter->link_active == 0) {
1063			em_get_speed_and_duplex(&adapter->hw,
1064						&adapter->link_speed,
1065						&adapter->link_duplex);
1066			adapter->link_active = 1;
1067		}
1068	} else {
1069		if (adapter->link_active == 1) {
1070			adapter->link_speed = 0;
1071			adapter->link_duplex = 0;
1072			adapter->link_active = 0;
1073		}
1074	}
1075
1076	ifmr->ifm_status = IFM_AVALID;
1077	ifmr->ifm_active = IFM_ETHER;
1078
1079	if (!adapter->link_active)
1080		return;
1081
1082	ifmr->ifm_status |= IFM_ACTIVE;
1083
1084	if (adapter->hw.media_type == em_media_type_fiber) {
1085		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1086	} else {
1087		switch (adapter->link_speed) {
1088		case 10:
1089			ifmr->ifm_active |= IFM_10_T;
1090			break;
1091		case 100:
1092			ifmr->ifm_active |= IFM_100_TX;
1093			break;
1094		case 1000:
1095			ifmr->ifm_active |= IFM_1000_T;
1096			break;
1097		}
1098		if (adapter->link_duplex == FULL_DUPLEX)
1099			ifmr->ifm_active |= IFM_FDX;
1100		else
1101			ifmr->ifm_active |= IFM_HDX;
1102	}
1103	return;
1104}
1105
1106/*********************************************************************
1107 *
1108 *  Media Ioctl callback
1109 *
1110 *  This routine is called when the user changes speed/duplex using
1111 *  media/mediopt option with ifconfig.
1112 *
1113 **********************************************************************/
1114static int
1115em_media_change(struct ifnet *ifp)
1116{
1117	struct adapter * adapter = ifp->if_softc;
1118	struct ifmedia  *ifm = &adapter->media;
1119
1120	INIT_DEBUGOUT("em_media_change: begin");
1121
1122	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1123		return(EINVAL);
1124
1125	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1126	case IFM_AUTO:
1127		adapter->hw.autoneg = DO_AUTO_NEG;
1128		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1129		break;
1130	case IFM_1000_SX:
1131	case IFM_1000_T:
1132		adapter->hw.autoneg = DO_AUTO_NEG;
1133		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1134		break;
1135	case IFM_100_TX:
1136		adapter->hw.autoneg = FALSE;
1137		adapter->hw.autoneg_advertised = 0;
1138		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1139			adapter->hw.forced_speed_duplex = em_100_full;
1140		else
1141			adapter->hw.forced_speed_duplex	= em_100_half;
1142		break;
1143	case IFM_10_T:
1144		adapter->hw.autoneg = FALSE;
1145		adapter->hw.autoneg_advertised = 0;
1146		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1147			adapter->hw.forced_speed_duplex = em_10_full;
1148		else
1149			adapter->hw.forced_speed_duplex	= em_10_half;
1150		break;
1151	default:
1152		printf("em%d: Unsupported media type\n", adapter->unit);
1153	}
1154
1155	/* As the speed/duplex settings my have changed we need to
1156	 * reset the PHY.
1157	 */
1158	adapter->hw.phy_reset_disable = FALSE;
1159
1160	em_init(adapter);
1161
1162	return(0);
1163}
1164
1165/*********************************************************************
1166 *
1167 *  This routine maps the mbufs to tx descriptors.
1168 *
1169 *  return 0 on success, positive on failure
1170 **********************************************************************/
1171static int
1172em_encap(struct adapter *adapter, struct mbuf **m_headp)
1173{
1174        u_int32_t       txd_upper;
1175        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1176        int             i, j, error;
1177
1178	struct mbuf	*m_head;
1179
1180	/* For 82544 Workaround */
1181	DESC_ARRAY              desc_array;
1182	u_int32_t               array_elements;
1183	u_int32_t               counter;
1184        struct m_tag    *mtag;
1185	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1186	bus_dmamap_t		map;
1187	int			nsegs;
1188        struct em_buffer   *tx_buffer = NULL;
1189        struct em_tx_desc *current_tx_desc = NULL;
1190        struct ifnet   *ifp = adapter->ifp;
1191
1192	m_head = *m_headp;
1193
1194        /*
1195         * Force a cleanup if number of TX descriptors
1196         * available hits the threshold
1197         */
1198        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1199                em_clean_transmit_interrupts(adapter);
1200                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1201                        adapter->no_tx_desc_avail1++;
1202                        return(ENOBUFS);
1203                }
1204        }
1205
1206        /*
1207         * Map the packet for DMA.
1208         */
1209        if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
1210                adapter->no_tx_map_avail++;
1211                return (ENOMEM);
1212        }
1213        error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
1214					&nsegs, BUS_DMA_NOWAIT);
1215        if (error != 0) {
1216                adapter->no_tx_dma_setup++;
1217                bus_dmamap_destroy(adapter->txtag, map);
1218                return (error);
1219        }
1220        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1221
1222        if (nsegs > adapter->num_tx_desc_avail) {
1223                adapter->no_tx_desc_avail2++;
1224                bus_dmamap_destroy(adapter->txtag, map);
1225                return (ENOBUFS);
1226        }
1227
1228
1229        if (ifp->if_hwassist > 0) {
1230                em_transmit_checksum_setup(adapter,  m_head,
1231                                           &txd_upper, &txd_lower);
1232        } else
1233                txd_upper = txd_lower = 0;
1234
1235
1236        /* Find out if we are in vlan mode */
1237        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1238
1239	/*
1240	 * When operating in promiscuous mode, hardware encapsulation for
1241	 * packets is disabled.  This means we have to add the vlan
1242	 * encapsulation in the driver, since it will have come down from the
1243	 * VLAN layer with a tag instead of a VLAN header.
1244	 */
1245	if (mtag != NULL && adapter->em_insert_vlan_header) {
1246		struct ether_vlan_header *evl;
1247		struct ether_header eh;
1248
1249		m_head = m_pullup(m_head, sizeof(eh));
1250		if (m_head == NULL) {
1251			*m_headp = NULL;
1252                	bus_dmamap_destroy(adapter->txtag, map);
1253			return (ENOBUFS);
1254		}
1255		eh = *mtod(m_head, struct ether_header *);
1256		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1257		if (m_head == NULL) {
1258			*m_headp = NULL;
1259                	bus_dmamap_destroy(adapter->txtag, map);
1260			return (ENOBUFS);
1261		}
1262		m_head = m_pullup(m_head, sizeof(*evl));
1263		if (m_head == NULL) {
1264			*m_headp = NULL;
1265                	bus_dmamap_destroy(adapter->txtag, map);
1266			return (ENOBUFS);
1267		}
1268		evl = mtod(m_head, struct ether_vlan_header *);
1269		bcopy(&eh, evl, sizeof(*evl));
1270		evl->evl_proto = evl->evl_encap_proto;
1271		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1272		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1273		m_tag_delete(m_head, mtag);
1274		mtag = NULL;
1275		*m_headp = m_head;
1276	}
1277
1278        i = adapter->next_avail_tx_desc;
1279	if (adapter->pcix_82544) {
1280		txd_saved = i;
1281		txd_used = 0;
1282	}
1283        for (j = 0; j < nsegs; j++) {
1284		/* If adapter is 82544 and on PCIX bus */
1285		if(adapter->pcix_82544) {
1286			/*
1287			 * Check the Address and Length combination and
1288			 * split the data accordingly
1289			 */
1290                        array_elements = em_fill_descriptors(segs[j].ds_addr,
1291			    segs[j].ds_len, &desc_array);
1292			for (counter = 0; counter < array_elements; counter++) {
1293                                if (txd_used == adapter->num_tx_desc_avail) {
1294                                         adapter->next_avail_tx_desc = txd_saved;
1295                                          adapter->no_tx_desc_avail2++;
1296					  bus_dmamap_destroy(adapter->txtag, map);
1297                                          return (ENOBUFS);
1298                                }
1299                                tx_buffer = &adapter->tx_buffer_area[i];
1300                                current_tx_desc = &adapter->tx_desc_base[i];
1301                                current_tx_desc->buffer_addr = htole64(
1302					desc_array.descriptor[counter].address);
1303                                current_tx_desc->lower.data = htole32(
1304					(adapter->txd_cmd | txd_lower |
1305					 (u_int16_t)desc_array.descriptor[counter].length));
1306                                current_tx_desc->upper.data = htole32((txd_upper));
1307                                if (++i == adapter->num_tx_desc)
1308                                         i = 0;
1309
1310                                tx_buffer->m_head = NULL;
1311                                txd_used++;
1312                        }
1313		} else {
1314			tx_buffer = &adapter->tx_buffer_area[i];
1315			current_tx_desc = &adapter->tx_desc_base[i];
1316
1317			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1318			current_tx_desc->lower.data = htole32(
1319				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1320			current_tx_desc->upper.data = htole32(txd_upper);
1321
1322			if (++i == adapter->num_tx_desc)
1323				i = 0;
1324
1325			tx_buffer->m_head = NULL;
1326		}
1327        }
1328
1329	adapter->next_avail_tx_desc = i;
1330	if (adapter->pcix_82544) {
1331		adapter->num_tx_desc_avail -= txd_used;
1332	}
1333	else {
1334		adapter->num_tx_desc_avail -= nsegs;
1335	}
1336
1337        if (mtag != NULL) {
1338                /* Set the vlan id */
1339                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1340
1341                /* Tell hardware to add tag */
1342                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1343        }
1344
1345        tx_buffer->m_head = m_head;
1346        tx_buffer->map = map;
1347        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1348
1349        /*
1350         * Last Descriptor of Packet needs End Of Packet (EOP)
1351         */
1352        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1353
1354        /*
1355         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1356         * that this frame is available to transmit.
1357         */
1358        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1359            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1360        if (adapter->hw.mac_type == em_82547 &&
1361            adapter->link_duplex == HALF_DUPLEX) {
1362                em_82547_move_tail_locked(adapter);
1363        } else {
1364                E1000_WRITE_REG(&adapter->hw, TDT, i);
1365                if (adapter->hw.mac_type == em_82547) {
1366                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1367                }
1368        }
1369
1370        return(0);
1371}
1372
1373/*********************************************************************
1374 *
1375 * 82547 workaround to avoid controller hang in half-duplex environment.
1376 * The workaround is to avoid queuing a large packet that would span
1377 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1378 * in this case. We do that only when FIFO is quiescent.
1379 *
1380 **********************************************************************/
1381static void
1382em_82547_move_tail_locked(struct adapter *adapter)
1383{
1384	uint16_t hw_tdt;
1385	uint16_t sw_tdt;
1386	struct em_tx_desc *tx_desc;
1387	uint16_t length = 0;
1388	boolean_t eop = 0;
1389
1390	EM_LOCK_ASSERT(adapter);
1391
1392	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1393	sw_tdt = adapter->next_avail_tx_desc;
1394
1395	while (hw_tdt != sw_tdt) {
1396		tx_desc = &adapter->tx_desc_base[hw_tdt];
1397		length += tx_desc->lower.flags.length;
1398		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1399		if(++hw_tdt == adapter->num_tx_desc)
1400			hw_tdt = 0;
1401
1402		if(eop) {
1403			if (em_82547_fifo_workaround(adapter, length)) {
1404				adapter->tx_fifo_wrk_cnt++;
1405				callout_reset(&adapter->tx_fifo_timer, 1,
1406					em_82547_move_tail, adapter);
1407				break;
1408			}
1409			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1410			em_82547_update_fifo_head(adapter, length);
1411			length = 0;
1412		}
1413	}
1414	return;
1415}
1416
1417static void
1418em_82547_move_tail(void *arg)
1419{
1420        struct adapter *adapter = arg;
1421
1422        EM_LOCK(adapter);
1423        em_82547_move_tail_locked(adapter);
1424        EM_UNLOCK(adapter);
1425}
1426
1427static int
1428em_82547_fifo_workaround(struct adapter *adapter, int len)
1429{
1430	int fifo_space, fifo_pkt_len;
1431
1432	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1433
1434	if (adapter->link_duplex == HALF_DUPLEX) {
1435		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1436
1437		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1438			if (em_82547_tx_fifo_reset(adapter)) {
1439				return(0);
1440			}
1441			else {
1442				return(1);
1443			}
1444		}
1445	}
1446
1447	return(0);
1448}
1449
1450static void
1451em_82547_update_fifo_head(struct adapter *adapter, int len)
1452{
1453	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1454
1455	/* tx_fifo_head is always 16 byte aligned */
1456	adapter->tx_fifo_head += fifo_pkt_len;
1457	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1458		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1459	}
1460
1461	return;
1462}
1463
1464
1465static int
1466em_82547_tx_fifo_reset(struct adapter *adapter)
1467{
1468	uint32_t tctl;
1469
1470	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1471	      E1000_READ_REG(&adapter->hw, TDH)) &&
1472	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1473	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1474	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1475	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1476	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1477
1478		/* Disable TX unit */
1479		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1480		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1481
1482		/* Reset FIFO pointers */
1483		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1484		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1485		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1486		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1487
1488		/* Re-enable TX unit */
1489		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1490		E1000_WRITE_FLUSH(&adapter->hw);
1491
1492		adapter->tx_fifo_head = 0;
1493		adapter->tx_fifo_reset_cnt++;
1494
1495		return(TRUE);
1496	}
1497	else {
1498		return(FALSE);
1499	}
1500}
1501
1502static void
1503em_set_promisc(struct adapter * adapter)
1504{
1505
1506	u_int32_t       reg_rctl;
1507	struct ifnet   *ifp = adapter->ifp;
1508
1509	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1510
1511	if (ifp->if_flags & IFF_PROMISC) {
1512		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1513		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1514		/* Disable VLAN stripping in promiscous mode
1515		 * This enables bridging of vlan tagged frames to occur
1516		 * and also allows vlan tags to be seen in tcpdump
1517		 */
1518		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1519			em_disable_vlans(adapter);
1520		adapter->em_insert_vlan_header = 1;
1521	} else if (ifp->if_flags & IFF_ALLMULTI) {
1522		reg_rctl |= E1000_RCTL_MPE;
1523		reg_rctl &= ~E1000_RCTL_UPE;
1524		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1525		adapter->em_insert_vlan_header = 0;
1526	} else
1527		adapter->em_insert_vlan_header = 0;
1528
1529	return;
1530}
1531
1532static void
1533em_disable_promisc(struct adapter * adapter)
1534{
1535	u_int32_t       reg_rctl;
1536	struct ifnet   *ifp = adapter->ifp;
1537
1538	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1539
1540	reg_rctl &=  (~E1000_RCTL_UPE);
1541	reg_rctl &=  (~E1000_RCTL_MPE);
1542	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1543
1544	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1545		em_enable_vlans(adapter);
1546	adapter->em_insert_vlan_header = 0;
1547
1548	return;
1549}
1550
1551
1552/*********************************************************************
1553 *  Multicast Update
1554 *
1555 *  This routine is called whenever multicast address list is updated.
1556 *
1557 **********************************************************************/
1558
1559static void
1560em_set_multi(struct adapter * adapter)
1561{
1562        u_int32_t reg_rctl = 0;
1563        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1564        struct ifmultiaddr  *ifma;
1565        int mcnt = 0;
1566        struct ifnet   *ifp = adapter->ifp;
1567
1568        IOCTL_DEBUGOUT("em_set_multi: begin");
1569
1570        if (adapter->hw.mac_type == em_82542_rev2_0) {
1571                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1572                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1573                        em_pci_clear_mwi(&adapter->hw);
1574                }
1575                reg_rctl |= E1000_RCTL_RST;
1576                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1577                msec_delay(5);
1578        }
1579
1580	IF_ADDR_LOCK(ifp);
1581        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1582                if (ifma->ifma_addr->sa_family != AF_LINK)
1583                        continue;
1584
1585		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1586
1587                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1588                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1589                mcnt++;
1590        }
1591	IF_ADDR_UNLOCK(ifp);
1592
1593        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1594                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1595                reg_rctl |= E1000_RCTL_MPE;
1596                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1597        } else
1598                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1599
1600        if (adapter->hw.mac_type == em_82542_rev2_0) {
1601                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1602                reg_rctl &= ~E1000_RCTL_RST;
1603                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1604                msec_delay(5);
1605                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1606                        em_pci_set_mwi(&adapter->hw);
1607                }
1608        }
1609
1610        return;
1611}
1612
1613
1614/*********************************************************************
1615 *  Timer routine
1616 *
1617 *  This routine checks for link status and updates statistics.
1618 *
1619 **********************************************************************/
1620
1621static void
1622em_local_timer(void *arg)
1623{
1624	struct ifnet   *ifp;
1625	struct adapter * adapter = arg;
1626	ifp = adapter->ifp;
1627
1628	EM_LOCK(adapter);
1629
1630	em_check_for_link(&adapter->hw);
1631	em_print_link_status(adapter);
1632	em_update_stats_counters(adapter);
1633	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1634		em_print_hw_stats(adapter);
1635	}
1636	em_smartspeed(adapter);
1637
1638	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1639
1640	EM_UNLOCK(adapter);
1641	return;
1642}
1643
1644static void
1645em_print_link_status(struct adapter * adapter)
1646{
1647	struct ifnet *ifp = adapter->ifp;
1648
1649	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1650		if (adapter->link_active == 0) {
1651			em_get_speed_and_duplex(&adapter->hw,
1652						&adapter->link_speed,
1653						&adapter->link_duplex);
1654			if (bootverbose)
1655				printf("em%d: Link is up %d Mbps %s\n",
1656				       adapter->unit,
1657				       adapter->link_speed,
1658				       ((adapter->link_duplex == FULL_DUPLEX) ?
1659					"Full Duplex" : "Half Duplex"));
1660			adapter->link_active = 1;
1661			adapter->smartspeed = 0;
1662			if_link_state_change(ifp, LINK_STATE_UP);
1663		}
1664	} else {
1665		if (adapter->link_active == 1) {
1666			adapter->link_speed = 0;
1667			adapter->link_duplex = 0;
1668			if (bootverbose)
1669				printf("em%d: Link is Down\n", adapter->unit);
1670			adapter->link_active = 0;
1671			if_link_state_change(ifp, LINK_STATE_DOWN);
1672		}
1673	}
1674
1675	return;
1676}
1677
1678/*********************************************************************
1679 *
1680 *  This routine disables all traffic on the adapter by issuing a
1681 *  global reset on the MAC and deallocates TX/RX buffers.
1682 *
1683 **********************************************************************/
1684
1685static void
1686em_stop(void *arg)
1687{
1688	struct ifnet   *ifp;
1689	struct adapter * adapter = arg;
1690	ifp = adapter->ifp;
1691
1692	mtx_assert(&adapter->mtx, MA_OWNED);
1693
1694	INIT_DEBUGOUT("em_stop: begin");
1695
1696	em_disable_intr(adapter);
1697	em_reset_hw(&adapter->hw);
1698	callout_stop(&adapter->timer);
1699	callout_stop(&adapter->tx_fifo_timer);
1700	em_free_transmit_structures(adapter);
1701	em_free_receive_structures(adapter);
1702
1703
1704	/* Tell the stack that the interface is no longer active */
1705	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1706
1707	return;
1708}
1709
1710
1711/*********************************************************************
1712 *
1713 *  Determine hardware revision.
1714 *
1715 **********************************************************************/
1716static void
1717em_identify_hardware(struct adapter * adapter)
1718{
1719	device_t dev = adapter->dev;
1720
1721	/* Make sure our PCI config space has the necessary stuff set */
1722	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1723	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1724	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1725		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1726		       adapter->unit);
1727		adapter->hw.pci_cmd_word |=
1728		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1729		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1730	}
1731
1732	/* Save off the information about this board */
1733	adapter->hw.vendor_id = pci_get_vendor(dev);
1734	adapter->hw.device_id = pci_get_device(dev);
1735	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1736	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1737	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1738
1739	/* Identify the MAC */
1740        if (em_set_mac_type(&adapter->hw))
1741                printf("em%d: Unknown MAC Type\n", adapter->unit);
1742
1743	if(adapter->hw.mac_type == em_82541 ||
1744	   adapter->hw.mac_type == em_82541_rev_2 ||
1745	   adapter->hw.mac_type == em_82547 ||
1746	   adapter->hw.mac_type == em_82547_rev_2)
1747		adapter->hw.phy_init_script = TRUE;
1748
1749        return;
1750}
1751
1752static int
1753em_allocate_pci_resources(struct adapter * adapter)
1754{
1755	int             val, rid;
1756	device_t        dev = adapter->dev;
1757
1758	rid = PCIR_BAR(0);
1759	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1760						     &rid, RF_ACTIVE);
1761	if (!(adapter->res_memory)) {
1762		printf("em%d: Unable to allocate bus resource: memory\n",
1763		       adapter->unit);
1764		return(ENXIO);
1765	}
1766	adapter->osdep.mem_bus_space_tag =
1767	rman_get_bustag(adapter->res_memory);
1768	adapter->osdep.mem_bus_space_handle =
1769	rman_get_bushandle(adapter->res_memory);
1770	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1771
1772
1773	if (adapter->hw.mac_type > em_82543) {
1774		/* Figure our where our IO BAR is ? */
1775		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1776			val = pci_read_config(dev, rid, 4);
1777			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1778				adapter->io_rid = rid;
1779				break;
1780			}
1781			rid += 4;
1782			/* check for 64bit BAR */
1783			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1784				rid += 4;
1785		}
1786		if (rid >= PCIR_CIS) {
1787			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1788			return (ENXIO);
1789		}
1790		adapter->res_ioport = bus_alloc_resource_any(dev,
1791							     SYS_RES_IOPORT,
1792							     &adapter->io_rid,
1793							     RF_ACTIVE);
1794		if (!(adapter->res_ioport)) {
1795			printf("em%d: Unable to allocate bus resource: ioport\n",
1796			       adapter->unit);
1797			return(ENXIO);
1798		}
1799		adapter->hw.io_base = 0;
1800		adapter->osdep.io_bus_space_tag =
1801		    rman_get_bustag(adapter->res_ioport);
1802		adapter->osdep.io_bus_space_handle =
1803		    rman_get_bushandle(adapter->res_ioport);
1804	}
1805
1806	rid = 0x0;
1807	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1808						        RF_SHAREABLE |
1809							RF_ACTIVE);
1810	if (!(adapter->res_interrupt)) {
1811		printf("em%d: Unable to allocate bus resource: interrupt\n",
1812		       adapter->unit);
1813		return(ENXIO);
1814	}
1815	if (bus_setup_intr(dev, adapter->res_interrupt,
1816			   INTR_TYPE_NET | INTR_MPSAFE,
1817			   (void (*)(void *)) em_intr, adapter,
1818			   &adapter->int_handler_tag)) {
1819		printf("em%d: Error registering interrupt handler!\n",
1820		       adapter->unit);
1821		return(ENXIO);
1822	}
1823
1824	adapter->hw.back = &adapter->osdep;
1825
1826	return(0);
1827}
1828
1829static void
1830em_free_pci_resources(struct adapter * adapter)
1831{
1832	device_t dev = adapter->dev;
1833
1834	if (adapter->res_interrupt != NULL) {
1835		bus_teardown_intr(dev, adapter->res_interrupt,
1836				  adapter->int_handler_tag);
1837		bus_release_resource(dev, SYS_RES_IRQ, 0,
1838				     adapter->res_interrupt);
1839	}
1840	if (adapter->res_memory != NULL) {
1841		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1842				     adapter->res_memory);
1843	}
1844
1845	if (adapter->res_ioport != NULL) {
1846		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1847				     adapter->res_ioport);
1848	}
1849	return;
1850}
1851
1852/*********************************************************************
1853 *
1854 *  Initialize the hardware to a configuration as specified by the
1855 *  adapter structure. The controller is reset, the EEPROM is
1856 *  verified, the MAC address is set, then the shared initialization
1857 *  routines are called.
1858 *
1859 **********************************************************************/
1860static int
1861em_hardware_init(struct adapter * adapter)
1862{
1863        INIT_DEBUGOUT("em_hardware_init: begin");
1864	/* Issue a global reset */
1865	em_reset_hw(&adapter->hw);
1866
1867	/* When hardware is reset, fifo_head is also reset */
1868	adapter->tx_fifo_head = 0;
1869
1870	/* Make sure we have a good EEPROM before we read from it */
1871	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1872		printf("em%d: The EEPROM Checksum Is Not Valid\n",
1873		       adapter->unit);
1874		return(EIO);
1875	}
1876
1877	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1878		printf("em%d: EEPROM read error while reading part number\n",
1879		       adapter->unit);
1880		return(EIO);
1881	}
1882
1883	if (em_init_hw(&adapter->hw) < 0) {
1884		printf("em%d: Hardware Initialization Failed",
1885		       adapter->unit);
1886		return(EIO);
1887	}
1888
1889	em_check_for_link(&adapter->hw);
1890	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1891		adapter->link_active = 1;
1892	else
1893		adapter->link_active = 0;
1894
1895	if (adapter->link_active) {
1896		em_get_speed_and_duplex(&adapter->hw,
1897					&adapter->link_speed,
1898					&adapter->link_duplex);
1899	} else {
1900		adapter->link_speed = 0;
1901		adapter->link_duplex = 0;
1902	}
1903
1904	return(0);
1905}
1906
1907/*********************************************************************
1908 *
1909 *  Setup networking device structure and register an interface.
1910 *
1911 **********************************************************************/
1912static void
1913em_setup_interface(device_t dev, struct adapter * adapter)
1914{
1915	struct ifnet   *ifp;
1916	INIT_DEBUGOUT("em_setup_interface: begin");
1917
1918	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1919	if (ifp == NULL)
1920		panic("%s: can not if_alloc()", device_get_nameunit(dev));
1921	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1922	ifp->if_mtu = ETHERMTU;
1923	ifp->if_baudrate = 1000000000;
1924	ifp->if_init =  em_init;
1925	ifp->if_softc = adapter;
1926	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1927	ifp->if_ioctl = em_ioctl;
1928	ifp->if_start = em_start;
1929	ifp->if_watchdog = em_watchdog;
1930	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
1931	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
1932	IFQ_SET_READY(&ifp->if_snd);
1933
1934        ether_ifattach(ifp, adapter->hw.mac_addr);
1935
1936	ifp->if_capabilities = ifp->if_capenable = 0;
1937
1938	if (adapter->hw.mac_type >= em_82543) {
1939		ifp->if_capabilities |= IFCAP_HWCSUM;
1940		ifp->if_capenable |= IFCAP_HWCSUM;
1941	}
1942
1943	/*
1944	 * Tell the upper layer(s) we support long frames.
1945	 */
1946	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1947	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1948	ifp->if_capenable |= IFCAP_VLAN_MTU;
1949
1950#ifdef DEVICE_POLLING
1951	ifp->if_capabilities |= IFCAP_POLLING;
1952#endif
1953
1954	/*
1955	 * Specify the media types supported by this adapter and register
1956	 * callbacks to update media and link information
1957	 */
1958	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1959		     em_media_status);
1960	if (adapter->hw.media_type == em_media_type_fiber) {
1961		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1962			    0, NULL);
1963		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1964			    0, NULL);
1965	} else {
1966		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1967		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1968			    0, NULL);
1969		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1970			    0, NULL);
1971		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1972			    0, NULL);
1973		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1974			    0, NULL);
1975		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1976	}
1977	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1978	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1979
1980	return;
1981}
1982
1983
1984/*********************************************************************
1985 *
1986 *  Workaround for SmartSpeed on 82541 and 82547 controllers
1987 *
1988 **********************************************************************/
1989static void
1990em_smartspeed(struct adapter *adapter)
1991{
1992        uint16_t phy_tmp;
1993
1994	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1995	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1996		return;
1997
1998        if(adapter->smartspeed == 0) {
1999                /* If Master/Slave config fault is asserted twice,
2000                 * we assume back-to-back */
2001                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2002                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2003                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2004                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2005                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2006					&phy_tmp);
2007                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2008                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2009                                em_write_phy_reg(&adapter->hw,
2010                                                    PHY_1000T_CTRL, phy_tmp);
2011                                adapter->smartspeed++;
2012                                if(adapter->hw.autoneg &&
2013                                   !em_phy_setup_autoneg(&adapter->hw) &&
2014				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2015                                                       &phy_tmp)) {
2016                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2017                                                    MII_CR_RESTART_AUTO_NEG);
2018                                        em_write_phy_reg(&adapter->hw,
2019							 PHY_CTRL, phy_tmp);
2020                                }
2021                        }
2022                }
2023                return;
2024        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2025                /* If still no link, perhaps using 2/3 pair cable */
2026                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2027                phy_tmp |= CR_1000T_MS_ENABLE;
2028                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2029                if(adapter->hw.autoneg &&
2030                   !em_phy_setup_autoneg(&adapter->hw) &&
2031                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2032                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2033                                    MII_CR_RESTART_AUTO_NEG);
2034                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2035                }
2036        }
2037        /* Restart process after EM_SMARTSPEED_MAX iterations */
2038        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2039                adapter->smartspeed = 0;
2040
2041	return;
2042}
2043
2044
2045/*
2046 * Manage DMA'able memory.
2047 */
2048static void
2049em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2050{
2051        if (error)
2052                return;
2053        *(bus_addr_t*) arg = segs->ds_addr;
2054        return;
2055}
2056
2057static int
2058em_dma_malloc(struct adapter *adapter, bus_size_t size,
2059        struct em_dma_alloc *dma, int mapflags)
2060{
2061        int r;
2062
2063        r = bus_dma_tag_create(NULL,                    /* parent */
2064                               PAGE_SIZE, 0,            /* alignment, bounds */
2065                               BUS_SPACE_MAXADDR,       /* lowaddr */
2066                               BUS_SPACE_MAXADDR,       /* highaddr */
2067                               NULL, NULL,              /* filter, filterarg */
2068                               size,                    /* maxsize */
2069                               1,                       /* nsegments */
2070                               size,                    /* maxsegsize */
2071                               BUS_DMA_ALLOCNOW,        /* flags */
2072			       NULL,			/* lockfunc */
2073			       NULL,			/* lockarg */
2074                               &dma->dma_tag);
2075        if (r != 0) {
2076                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2077                        "error %u\n", adapter->unit, r);
2078                goto fail_0;
2079        }
2080
2081        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2082                             BUS_DMA_NOWAIT, &dma->dma_map);
2083        if (r != 0) {
2084                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2085                        "size %ju, error %d\n", adapter->unit,
2086			(uintmax_t)size, r);
2087                goto fail_2;
2088        }
2089
2090	dma->dma_paddr = 0;
2091        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2092                            size,
2093                            em_dmamap_cb,
2094                            &dma->dma_paddr,
2095                            mapflags | BUS_DMA_NOWAIT);
2096        if (r != 0 || dma->dma_paddr == 0) {
2097                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2098                        "error %u\n", adapter->unit, r);
2099                goto fail_3;
2100        }
2101
2102        return (0);
2103
2104fail_3:
2105        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2106fail_2:
2107        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2108        bus_dma_tag_destroy(dma->dma_tag);
2109fail_0:
2110        dma->dma_map = NULL;
2111        dma->dma_tag = NULL;
2112        return (r);
2113}
2114
2115static void
2116em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2117{
2118        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2119        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2120        bus_dma_tag_destroy(dma->dma_tag);
2121}
2122
2123
2124/*********************************************************************
2125 *
2126 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2127 *  the information needed to transmit a packet on the wire.
2128 *
2129 **********************************************************************/
2130static int
2131em_allocate_transmit_structures(struct adapter * adapter)
2132{
2133	if (!(adapter->tx_buffer_area =
2134	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2135					     adapter->num_tx_desc, M_DEVBUF,
2136					     M_NOWAIT))) {
2137		printf("em%d: Unable to allocate tx_buffer memory\n",
2138		       adapter->unit);
2139		return ENOMEM;
2140	}
2141
2142	bzero(adapter->tx_buffer_area,
2143	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2144
2145	return 0;
2146}
2147
2148/*********************************************************************
2149 *
2150 *  Allocate and initialize transmit structures.
2151 *
2152 **********************************************************************/
2153static int
2154em_setup_transmit_structures(struct adapter * adapter)
2155{
2156        /*
2157         * Setup DMA descriptor areas.
2158         */
2159        if (bus_dma_tag_create(NULL,                    /* parent */
2160                               1, 0,                    /* alignment, bounds */
2161                               BUS_SPACE_MAXADDR,       /* lowaddr */
2162                               BUS_SPACE_MAXADDR,       /* highaddr */
2163                               NULL, NULL,              /* filter, filterarg */
2164                               MCLBYTES * 8,            /* maxsize */
2165                               EM_MAX_SCATTER,          /* nsegments */
2166                               MCLBYTES * 8,            /* maxsegsize */
2167                               BUS_DMA_ALLOCNOW,        /* flags */
2168			       NULL,			/* lockfunc */
2169			       NULL,			/* lockarg */
2170                               &adapter->txtag)) {
2171                printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2172                return (ENOMEM);
2173        }
2174
2175        if (em_allocate_transmit_structures(adapter))
2176                return (ENOMEM);
2177
2178        bzero((void *) adapter->tx_desc_base,
2179              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2180
2181        adapter->next_avail_tx_desc = 0;
2182        adapter->oldest_used_tx_desc = 0;
2183
2184        /* Set number of descriptors available */
2185        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2186
2187        /* Set checksum context */
2188        adapter->active_checksum_context = OFFLOAD_NONE;
2189
2190        return (0);
2191}
2192
2193/*********************************************************************
2194 *
2195 *  Enable transmit unit.
2196 *
2197 **********************************************************************/
2198static void
2199em_initialize_transmit_unit(struct adapter * adapter)
2200{
2201	u_int32_t       reg_tctl;
2202	u_int32_t       reg_tipg = 0;
2203	u_int64_t	bus_addr;
2204
2205         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2206	/* Setup the Base and Length of the Tx Descriptor Ring */
2207	bus_addr = adapter->txdma.dma_paddr;
2208	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2209	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2210	E1000_WRITE_REG(&adapter->hw, TDLEN,
2211			adapter->num_tx_desc *
2212			sizeof(struct em_tx_desc));
2213
2214	/* Setup the HW Tx Head and Tail descriptor pointers */
2215	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2216	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2217
2218
2219	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2220		     E1000_READ_REG(&adapter->hw, TDBAL),
2221		     E1000_READ_REG(&adapter->hw, TDLEN));
2222
2223	/* Set the default values for the Tx Inter Packet Gap timer */
2224	switch (adapter->hw.mac_type) {
2225	case em_82542_rev2_0:
2226        case em_82542_rev2_1:
2227                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2228                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2229                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2230                break;
2231        default:
2232                if (adapter->hw.media_type == em_media_type_fiber)
2233                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2234                else
2235                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2236                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2237                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2238        }
2239
2240	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2241	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2242	if(adapter->hw.mac_type >= em_82540)
2243		E1000_WRITE_REG(&adapter->hw, TADV,
2244		    adapter->tx_abs_int_delay.value);
2245
2246	/* Program the Transmit Control Register */
2247	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2248		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2249	if (adapter->hw.mac_type >= em_82573)
2250		reg_tctl |= E1000_TCTL_MULR;
2251	if (adapter->link_duplex == 1) {
2252		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2253	} else {
2254		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2255	}
2256	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2257
2258	/* Setup Transmit Descriptor Settings for this adapter */
2259	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2260
2261	if (adapter->tx_int_delay.value > 0)
2262		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2263
2264	return;
2265}
2266
2267/*********************************************************************
2268 *
2269 *  Free all transmit related data structures.
2270 *
2271 **********************************************************************/
2272static void
2273em_free_transmit_structures(struct adapter * adapter)
2274{
2275        struct em_buffer   *tx_buffer;
2276        int             i;
2277
2278        INIT_DEBUGOUT("free_transmit_structures: begin");
2279
2280        if (adapter->tx_buffer_area != NULL) {
2281                tx_buffer = adapter->tx_buffer_area;
2282                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2283                        if (tx_buffer->m_head != NULL) {
2284                                bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2285                                bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2286                                m_freem(tx_buffer->m_head);
2287                        }
2288                        tx_buffer->m_head = NULL;
2289                }
2290        }
2291        if (adapter->tx_buffer_area != NULL) {
2292                free(adapter->tx_buffer_area, M_DEVBUF);
2293                adapter->tx_buffer_area = NULL;
2294        }
2295        if (adapter->txtag != NULL) {
2296                bus_dma_tag_destroy(adapter->txtag);
2297                adapter->txtag = NULL;
2298        }
2299        return;
2300}
2301
2302/*********************************************************************
2303 *
2304 *  The offload context needs to be set when we transfer the first
2305 *  packet of a particular protocol (TCP/UDP). We change the
2306 *  context only if the protocol type changes.
2307 *
2308 **********************************************************************/
2309static void
2310em_transmit_checksum_setup(struct adapter * adapter,
2311			   struct mbuf *mp,
2312			   u_int32_t *txd_upper,
2313			   u_int32_t *txd_lower)
2314{
2315	struct em_context_desc *TXD;
2316	struct em_buffer *tx_buffer;
2317	int curr_txd;
2318
2319	if (mp->m_pkthdr.csum_flags) {
2320
2321		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2322			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2323			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2324			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2325				return;
2326			else
2327				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2328
2329		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2330			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2331			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2332			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2333				return;
2334			else
2335				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2336		} else {
2337			*txd_upper = 0;
2338			*txd_lower = 0;
2339			return;
2340		}
2341	} else {
2342		*txd_upper = 0;
2343		*txd_lower = 0;
2344		return;
2345	}
2346
2347	/* If we reach this point, the checksum offload context
2348	 * needs to be reset.
2349	 */
2350	curr_txd = adapter->next_avail_tx_desc;
2351	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2352	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2353
2354	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2355	TXD->lower_setup.ip_fields.ipcso =
2356		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2357	TXD->lower_setup.ip_fields.ipcse =
2358		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2359
2360	TXD->upper_setup.tcp_fields.tucss =
2361		ETHER_HDR_LEN + sizeof(struct ip);
2362	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2363
2364	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2365		TXD->upper_setup.tcp_fields.tucso =
2366			ETHER_HDR_LEN + sizeof(struct ip) +
2367			offsetof(struct tcphdr, th_sum);
2368	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2369		TXD->upper_setup.tcp_fields.tucso =
2370			ETHER_HDR_LEN + sizeof(struct ip) +
2371			offsetof(struct udphdr, uh_sum);
2372	}
2373
2374	TXD->tcp_seg_setup.data = htole32(0);
2375	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2376
2377	tx_buffer->m_head = NULL;
2378
2379	if (++curr_txd == adapter->num_tx_desc)
2380		curr_txd = 0;
2381
2382	adapter->num_tx_desc_avail--;
2383	adapter->next_avail_tx_desc = curr_txd;
2384
2385	return;
2386}
2387
2388/**********************************************************************
2389 *
2390 *  Examine each tx_buffer in the used queue. If the hardware is done
2391 *  processing the packet then free associated resources. The
2392 *  tx_buffer is put back on the free queue.
2393 *
2394 **********************************************************************/
2395static void
2396em_clean_transmit_interrupts(struct adapter * adapter)
2397{
2398        int i, num_avail;
2399        struct em_buffer *tx_buffer;
2400        struct em_tx_desc   *tx_desc;
2401	struct ifnet   *ifp = adapter->ifp;
2402
2403	mtx_assert(&adapter->mtx, MA_OWNED);
2404
2405        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2406                return;
2407
2408        num_avail = adapter->num_tx_desc_avail;
2409        i = adapter->oldest_used_tx_desc;
2410
2411        tx_buffer = &adapter->tx_buffer_area[i];
2412        tx_desc = &adapter->tx_desc_base[i];
2413
2414        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2415            BUS_DMASYNC_POSTREAD);
2416        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2417
2418                tx_desc->upper.data = 0;
2419                num_avail++;
2420
2421                if (tx_buffer->m_head) {
2422			ifp->if_opackets++;
2423                        bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2424                        bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2425
2426                        m_freem(tx_buffer->m_head);
2427                        tx_buffer->m_head = NULL;
2428                }
2429
2430                if (++i == adapter->num_tx_desc)
2431                        i = 0;
2432
2433                tx_buffer = &adapter->tx_buffer_area[i];
2434                tx_desc = &adapter->tx_desc_base[i];
2435        }
2436        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2437            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2438
2439        adapter->oldest_used_tx_desc = i;
2440
2441        /*
2442         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2443         * that it is OK to send packets.
2444         * If there are no pending descriptors, clear the timeout. Otherwise,
2445         * if some descriptors have been freed, restart the timeout.
2446         */
2447        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2448                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2449                if (num_avail == adapter->num_tx_desc)
2450                        ifp->if_timer = 0;
2451                else if (num_avail == adapter->num_tx_desc_avail)
2452                        ifp->if_timer = EM_TX_TIMEOUT;
2453        }
2454        adapter->num_tx_desc_avail = num_avail;
2455        return;
2456}
2457
2458/*********************************************************************
2459 *
2460 *  Get a buffer from system mbuf buffer pool.
2461 *
2462 **********************************************************************/
2463static int
2464em_get_buf(int i, struct adapter *adapter,
2465           struct mbuf *nmp)
2466{
2467        register struct mbuf    *mp = nmp;
2468        struct em_buffer *rx_buffer;
2469        struct ifnet   *ifp;
2470        bus_addr_t paddr;
2471        int error;
2472
2473        ifp = adapter->ifp;
2474
2475        if (mp == NULL) {
2476                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2477                if (mp == NULL) {
2478                        adapter->mbuf_cluster_failed++;
2479                        return(ENOBUFS);
2480                }
2481                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2482        } else {
2483                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2484                mp->m_data = mp->m_ext.ext_buf;
2485                mp->m_next = NULL;
2486        }
2487
2488        if (ifp->if_mtu <= ETHERMTU) {
2489                m_adj(mp, ETHER_ALIGN);
2490        }
2491
2492        rx_buffer = &adapter->rx_buffer_area[i];
2493
2494        /*
2495         * Using memory from the mbuf cluster pool, invoke the
2496         * bus_dma machinery to arrange the memory mapping.
2497         */
2498	paddr = 0;
2499        error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2500                                mtod(mp, void *), mp->m_len,
2501                                em_dmamap_cb, &paddr, 0);
2502        if (error || paddr == 0) {
2503                m_free(mp);
2504                return(error);
2505        }
2506        rx_buffer->m_head = mp;
2507        adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2508        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD |
2509	    BUS_DMASYNC_PREWRITE);
2510
2511        return(0);
2512}
2513
2514/*********************************************************************
2515 *
2516 *  Allocate memory for rx_buffer structures. Since we use one
2517 *  rx_buffer per received packet, the maximum number of rx_buffer's
2518 *  that we'll need is equal to the number of receive descriptors
2519 *  that we've allocated.
2520 *
2521 **********************************************************************/
2522static int
2523em_allocate_receive_structures(struct adapter * adapter)
2524{
2525        int             i, error;
2526        struct em_buffer *rx_buffer;
2527
2528        if (!(adapter->rx_buffer_area =
2529              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2530                                          adapter->num_rx_desc, M_DEVBUF,
2531                                          M_NOWAIT))) {
2532                printf("em%d: Unable to allocate rx_buffer memory\n",
2533                       adapter->unit);
2534                return(ENOMEM);
2535        }
2536
2537        bzero(adapter->rx_buffer_area,
2538              sizeof(struct em_buffer) * adapter->num_rx_desc);
2539
2540        error = bus_dma_tag_create(NULL,                /* parent */
2541                               1, 0,                    /* alignment, bounds */
2542                               BUS_SPACE_MAXADDR,       /* lowaddr */
2543                               BUS_SPACE_MAXADDR,       /* highaddr */
2544                               NULL, NULL,              /* filter, filterarg */
2545                               MCLBYTES,                /* maxsize */
2546                               1,                       /* nsegments */
2547                               MCLBYTES,                /* maxsegsize */
2548                               BUS_DMA_ALLOCNOW,        /* flags */
2549			       NULL,			/* lockfunc */
2550			       NULL,			/* lockarg */
2551                               &adapter->rxtag);
2552        if (error != 0) {
2553                printf("em%d: em_allocate_receive_structures: "
2554                        "bus_dma_tag_create failed; error %u\n",
2555                       adapter->unit, error);
2556                goto fail_0;
2557        }
2558
2559        rx_buffer = adapter->rx_buffer_area;
2560        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2561                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2562                                          &rx_buffer->map);
2563                if (error != 0) {
2564                        printf("em%d: em_allocate_receive_structures: "
2565                                "bus_dmamap_create failed; error %u\n",
2566                                adapter->unit, error);
2567                        goto fail_1;
2568                }
2569        }
2570
2571        for (i = 0; i < adapter->num_rx_desc; i++) {
2572                error = em_get_buf(i, adapter, NULL);
2573                if (error != 0) {
2574                        adapter->rx_buffer_area[i].m_head = NULL;
2575                        adapter->rx_desc_base[i].buffer_addr = 0;
2576                        return(error);
2577                }
2578        }
2579        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2580            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2581
2582        return(0);
2583
2584fail_1:
2585        bus_dma_tag_destroy(adapter->rxtag);
2586fail_0:
2587        adapter->rxtag = NULL;
2588        free(adapter->rx_buffer_area, M_DEVBUF);
2589        adapter->rx_buffer_area = NULL;
2590        return (error);
2591}
2592
2593/*********************************************************************
2594 *
2595 *  Allocate and initialize receive structures.
2596 *
2597 **********************************************************************/
2598static int
2599em_setup_receive_structures(struct adapter * adapter)
2600{
2601	bzero((void *) adapter->rx_desc_base,
2602              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2603
2604	if (em_allocate_receive_structures(adapter))
2605		return ENOMEM;
2606
2607	/* Setup our descriptor pointers */
2608        adapter->next_rx_desc_to_check = 0;
2609	return(0);
2610}
2611
2612/*********************************************************************
2613 *
2614 *  Enable receive unit.
2615 *
2616 **********************************************************************/
2617static void
2618em_initialize_receive_unit(struct adapter * adapter)
2619{
2620	u_int32_t       reg_rctl;
2621	u_int32_t       reg_rxcsum;
2622	struct ifnet    *ifp;
2623	u_int64_t	bus_addr;
2624
2625        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2626	ifp = adapter->ifp;
2627
2628	/* Make sure receives are disabled while setting up the descriptor ring */
2629	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2630
2631	/* Set the Receive Delay Timer Register */
2632	E1000_WRITE_REG(&adapter->hw, RDTR,
2633			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2634
2635	if(adapter->hw.mac_type >= em_82540) {
2636		E1000_WRITE_REG(&adapter->hw, RADV,
2637		    adapter->rx_abs_int_delay.value);
2638
2639                /* Set the interrupt throttling rate.  Value is calculated
2640                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2641#define MAX_INTS_PER_SEC        8000
2642#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2643                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2644        }
2645
2646	/* Setup the Base and Length of the Rx Descriptor Ring */
2647	bus_addr = adapter->rxdma.dma_paddr;
2648	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2649	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2650	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2651			sizeof(struct em_rx_desc));
2652
2653	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2654	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2655	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2656
2657	/* Setup the Receive Control Register */
2658	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2659		   E1000_RCTL_RDMTS_HALF |
2660		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2661
2662	if (adapter->hw.tbi_compatibility_on == TRUE)
2663		reg_rctl |= E1000_RCTL_SBP;
2664
2665
2666	switch (adapter->rx_buffer_len) {
2667	default:
2668	case EM_RXBUFFER_2048:
2669		reg_rctl |= E1000_RCTL_SZ_2048;
2670		break;
2671	case EM_RXBUFFER_4096:
2672		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2673		break;
2674	case EM_RXBUFFER_8192:
2675		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2676		break;
2677	case EM_RXBUFFER_16384:
2678		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2679		break;
2680	}
2681
2682	if (ifp->if_mtu > ETHERMTU)
2683		reg_rctl |= E1000_RCTL_LPE;
2684
2685	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2686	if ((adapter->hw.mac_type >= em_82543) &&
2687	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2688		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2689		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2690		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2691	}
2692
2693	/* Enable Receives */
2694	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2695
2696	return;
2697}
2698
2699/*********************************************************************
2700 *
2701 *  Free receive related data structures.
2702 *
2703 **********************************************************************/
2704static void
2705em_free_receive_structures(struct adapter *adapter)
2706{
2707        struct em_buffer   *rx_buffer;
2708        int             i;
2709
2710        INIT_DEBUGOUT("free_receive_structures: begin");
2711
2712        if (adapter->rx_buffer_area != NULL) {
2713                rx_buffer = adapter->rx_buffer_area;
2714                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2715                        if (rx_buffer->map != NULL) {
2716                                bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2717                                bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2718                        }
2719                        if (rx_buffer->m_head != NULL)
2720                                m_freem(rx_buffer->m_head);
2721                        rx_buffer->m_head = NULL;
2722                }
2723        }
2724        if (adapter->rx_buffer_area != NULL) {
2725                free(adapter->rx_buffer_area, M_DEVBUF);
2726                adapter->rx_buffer_area = NULL;
2727        }
2728        if (adapter->rxtag != NULL) {
2729                bus_dma_tag_destroy(adapter->rxtag);
2730                adapter->rxtag = NULL;
2731        }
2732        return;
2733}
2734
2735/*********************************************************************
2736 *
2737 *  This routine executes in interrupt context. It replenishes
2738 *  the mbufs in the descriptor and sends data which has been
2739 *  dma'ed into host memory to upper layer.
2740 *
2741 *  We loop at most count times if count is > 0, or until done if
2742 *  count < 0.
2743 *
2744 *********************************************************************/
2745static void
2746em_process_receive_interrupts(struct adapter * adapter, int count)
2747{
2748	struct ifnet        *ifp;
2749	struct mbuf         *mp;
2750	u_int8_t            accept_frame = 0;
2751 	u_int8_t            eop = 0;
2752	u_int16_t           len, desc_len, prev_len_adj;
2753	int                 i;
2754
2755	/* Pointer to the receive descriptor being examined. */
2756	struct em_rx_desc   *current_desc;
2757
2758	mtx_assert(&adapter->mtx, MA_OWNED);
2759
2760	ifp = adapter->ifp;
2761	i = adapter->next_rx_desc_to_check;
2762        current_desc = &adapter->rx_desc_base[i];
2763	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2764	    BUS_DMASYNC_POSTREAD);
2765
2766	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2767		return;
2768	}
2769
2770	while ((current_desc->status & E1000_RXD_STAT_DD) &&
2771		    (count != 0) &&
2772		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2773		struct mbuf *m = NULL;
2774
2775		mp = adapter->rx_buffer_area[i].m_head;
2776		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2777				BUS_DMASYNC_POSTREAD);
2778
2779		accept_frame = 1;
2780		prev_len_adj = 0;
2781                desc_len = le16toh(current_desc->length);
2782		if (current_desc->status & E1000_RXD_STAT_EOP) {
2783			count--;
2784			eop = 1;
2785			if (desc_len < ETHER_CRC_LEN) {
2786                                len = 0;
2787                                prev_len_adj = ETHER_CRC_LEN - desc_len;
2788                        }
2789                        else {
2790                                len = desc_len - ETHER_CRC_LEN;
2791                        }
2792		} else {
2793			eop = 0;
2794			len = desc_len;
2795		}
2796
2797		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2798			u_int8_t            last_byte;
2799			u_int32_t           pkt_len = desc_len;
2800
2801			if (adapter->fmp != NULL)
2802				pkt_len += adapter->fmp->m_pkthdr.len;
2803
2804			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2805
2806			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2807				       current_desc->errors,
2808				       pkt_len, last_byte)) {
2809				em_tbi_adjust_stats(&adapter->hw,
2810						    &adapter->stats,
2811						    pkt_len,
2812						    adapter->hw.mac_addr);
2813				if (len > 0) len--;
2814			}
2815			else {
2816				accept_frame = 0;
2817			}
2818		}
2819
2820		if (accept_frame) {
2821
2822			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2823				adapter->dropped_pkts++;
2824				em_get_buf(i, adapter, mp);
2825				if (adapter->fmp != NULL)
2826					m_freem(adapter->fmp);
2827				adapter->fmp = NULL;
2828				adapter->lmp = NULL;
2829				break;
2830			}
2831
2832			/* Assign correct length to the current fragment */
2833			mp->m_len = len;
2834
2835			if (adapter->fmp == NULL) {
2836				mp->m_pkthdr.len = len;
2837				adapter->fmp = mp;	 /* Store the first mbuf */
2838				adapter->lmp = mp;
2839			} else {
2840				/* Chain mbuf's together */
2841				mp->m_flags &= ~M_PKTHDR;
2842				/*
2843                                 * Adjust length of previous mbuf in chain if we
2844                                 * received less than 4 bytes in the last descriptor.
2845                                 */
2846				if (prev_len_adj > 0) {
2847					adapter->lmp->m_len -= prev_len_adj;
2848					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2849				}
2850				adapter->lmp->m_next = mp;
2851				adapter->lmp = adapter->lmp->m_next;
2852				adapter->fmp->m_pkthdr.len += len;
2853			}
2854
2855                        if (eop) {
2856                                adapter->fmp->m_pkthdr.rcvif = ifp;
2857				ifp->if_ipackets++;
2858                                em_receive_checksum(adapter, current_desc,
2859                                                    adapter->fmp);
2860                                if (current_desc->status & E1000_RXD_STAT_VP)
2861					VLAN_INPUT_TAG(ifp, adapter->fmp,
2862					    (le16toh(current_desc->special) &
2863					    E1000_RXD_SPC_VLAN_MASK),
2864					    adapter->fmp = NULL);
2865
2866				m = adapter->fmp;
2867				adapter->fmp = NULL;
2868				adapter->lmp = NULL;
2869                        }
2870		} else {
2871			adapter->dropped_pkts++;
2872			em_get_buf(i, adapter, mp);
2873			if (adapter->fmp != NULL)
2874				m_freem(adapter->fmp);
2875			adapter->fmp = NULL;
2876			adapter->lmp = NULL;
2877		}
2878
2879		/* Zero out the receive descriptors status  */
2880		current_desc->status = 0;
2881		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2882		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2883
2884		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2885                E1000_WRITE_REG(&adapter->hw, RDT, i);
2886
2887                /* Advance our pointers to the next descriptor */
2888		if (++i == adapter->num_rx_desc)
2889			i = 0;
2890		if (m != NULL) {
2891			adapter->next_rx_desc_to_check = i;
2892			EM_UNLOCK(adapter);
2893			(*ifp->if_input)(ifp, m);
2894			EM_LOCK(adapter);
2895			i = adapter->next_rx_desc_to_check;
2896		}
2897		current_desc = &adapter->rx_desc_base[i];
2898	}
2899	adapter->next_rx_desc_to_check = i;
2900	return;
2901}
2902
2903/*********************************************************************
2904 *
2905 *  Verify that the hardware indicated that the checksum is valid.
2906 *  Inform the stack about the status of checksum so that stack
2907 *  doesn't spend time verifying the checksum.
2908 *
2909 *********************************************************************/
2910static void
2911em_receive_checksum(struct adapter *adapter,
2912		    struct em_rx_desc *rx_desc,
2913		    struct mbuf *mp)
2914{
2915	/* 82543 or newer only */
2916	if ((adapter->hw.mac_type < em_82543) ||
2917	    /* Ignore Checksum bit is set */
2918	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2919		mp->m_pkthdr.csum_flags = 0;
2920		return;
2921	}
2922
2923	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2924		/* Did it pass? */
2925		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2926			/* IP Checksum Good */
2927			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2928			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2929
2930		} else {
2931			mp->m_pkthdr.csum_flags = 0;
2932		}
2933	}
2934
2935	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2936		/* Did it pass? */
2937		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2938			mp->m_pkthdr.csum_flags |=
2939			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2940			mp->m_pkthdr.csum_data = htons(0xffff);
2941		}
2942	}
2943
2944	return;
2945}
2946
2947
2948static void
2949em_enable_vlans(struct adapter *adapter)
2950{
2951	uint32_t ctrl;
2952
2953	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2954
2955	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2956	ctrl |= E1000_CTRL_VME;
2957	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2958
2959	return;
2960}
2961
2962static void
2963em_disable_vlans(struct adapter *adapter)
2964{
2965	uint32_t ctrl;
2966
2967	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2968	ctrl &= ~E1000_CTRL_VME;
2969	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2970
2971	return;
2972}
2973
2974static void
2975em_enable_intr(struct adapter * adapter)
2976{
2977	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2978	return;
2979}
2980
2981static void
2982em_disable_intr(struct adapter *adapter)
2983{
2984	/*
2985	 * The first version of 82542 had an errata where when link was forced it
2986	 * would stay up even up even if the cable was disconnected.  Sequence errors
2987	 * were used to detect the disconnect and then the driver would unforce the link.
2988	 * This code in the in the ISR.  For this to work correctly the Sequence error
2989	 * interrupt had to be enabled all the time.
2990	 */
2991
2992	if (adapter->hw.mac_type == em_82542_rev2_0)
2993	    E1000_WRITE_REG(&adapter->hw, IMC,
2994	        (0xffffffff & ~E1000_IMC_RXSEQ));
2995	else
2996	    E1000_WRITE_REG(&adapter->hw, IMC,
2997	        0xffffffff);
2998	return;
2999}
3000
3001static int
3002em_is_valid_ether_addr(u_int8_t *addr)
3003{
3004        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3005
3006        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3007                return (FALSE);
3008        }
3009
3010        return(TRUE);
3011}
3012
3013void
3014em_write_pci_cfg(struct em_hw *hw,
3015		      uint32_t reg,
3016		      uint16_t *value)
3017{
3018	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3019			 *value, 2);
3020}
3021
3022void
3023em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3024		     uint16_t *value)
3025{
3026	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3027				 reg, 2);
3028	return;
3029}
3030
3031void
3032em_pci_set_mwi(struct em_hw *hw)
3033{
3034        pci_write_config(((struct em_osdep *)hw->back)->dev,
3035                         PCIR_COMMAND,
3036                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3037        return;
3038}
3039
3040void
3041em_pci_clear_mwi(struct em_hw *hw)
3042{
3043        pci_write_config(((struct em_osdep *)hw->back)->dev,
3044                         PCIR_COMMAND,
3045                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3046        return;
3047}
3048
3049/*********************************************************************
3050* 82544 Coexistence issue workaround.
3051*    There are 2 issues.
3052*       1. Transmit Hang issue.
3053*    To detect this issue, following equation can be used...
3054*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3055*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3056*
3057*       2. DAC issue.
3058*    To detect this issue, following equation can be used...
3059*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3060*          If SUM[3:0] is in between 9 to c, we will have this issue.
3061*
3062*
3063*    WORKAROUND:
3064*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3065*
3066*** *********************************************************************/
3067static u_int32_t
3068em_fill_descriptors (bus_addr_t address,
3069                              u_int32_t length,
3070                              PDESC_ARRAY desc_array)
3071{
3072        /* Since issue is sensitive to length and address.*/
3073        /* Let us first check the address...*/
3074        u_int32_t safe_terminator;
3075        if (length <= 4) {
3076                desc_array->descriptor[0].address = address;
3077                desc_array->descriptor[0].length = length;
3078                desc_array->elements = 1;
3079                return desc_array->elements;
3080        }
3081        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3082        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3083        if (safe_terminator == 0   ||
3084        (safe_terminator > 4   &&
3085        safe_terminator < 9)   ||
3086        (safe_terminator > 0xC &&
3087        safe_terminator <= 0xF)) {
3088                desc_array->descriptor[0].address = address;
3089                desc_array->descriptor[0].length = length;
3090                desc_array->elements = 1;
3091                return desc_array->elements;
3092        }
3093
3094        desc_array->descriptor[0].address = address;
3095        desc_array->descriptor[0].length = length - 4;
3096        desc_array->descriptor[1].address = address + (length - 4);
3097        desc_array->descriptor[1].length = 4;
3098        desc_array->elements = 2;
3099        return desc_array->elements;
3100}
3101
3102/**********************************************************************
3103 *
3104 *  Update the board statistics counters.
3105 *
3106 **********************************************************************/
3107static void
3108em_update_stats_counters(struct adapter *adapter)
3109{
3110	struct ifnet   *ifp;
3111
3112	if(adapter->hw.media_type == em_media_type_copper ||
3113	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3114		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3115		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3116	}
3117	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3118	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3119	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3120	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3121
3122	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3123	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3124	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3125	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3126	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3127	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3128	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3129	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3130	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3131	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3132	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3133	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3134	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3135	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3136	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3137	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3138	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3139	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3140	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3141	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3142
3143	/* For the 64-bit byte counters the low dword must be read first. */
3144	/* Both registers clear on the read of the high dword */
3145
3146	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3147	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3148	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3149	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3150
3151	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3152	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3153	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3154	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3155	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3156
3157	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3158	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3159	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3160	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3161
3162	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3163	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3164	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3165	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3166	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3167	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3168	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3169	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3170	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3171	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3172
3173	if (adapter->hw.mac_type >= em_82543) {
3174		adapter->stats.algnerrc +=
3175		E1000_READ_REG(&adapter->hw, ALGNERRC);
3176		adapter->stats.rxerrc +=
3177		E1000_READ_REG(&adapter->hw, RXERRC);
3178		adapter->stats.tncrs +=
3179		E1000_READ_REG(&adapter->hw, TNCRS);
3180		adapter->stats.cexterr +=
3181		E1000_READ_REG(&adapter->hw, CEXTERR);
3182		adapter->stats.tsctc +=
3183		E1000_READ_REG(&adapter->hw, TSCTC);
3184		adapter->stats.tsctfc +=
3185		E1000_READ_REG(&adapter->hw, TSCTFC);
3186	}
3187	ifp = adapter->ifp;
3188
3189	ifp->if_collisions = adapter->stats.colc;
3190
3191	/* Rx Errors */
3192	ifp->if_ierrors =
3193	adapter->dropped_pkts +
3194	adapter->stats.rxerrc +
3195	adapter->stats.crcerrs +
3196	adapter->stats.algnerrc +
3197	adapter->stats.rlec +
3198	adapter->stats.mpc + adapter->stats.cexterr;
3199
3200	/* Tx Errors */
3201	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3202	    adapter->watchdog_events;
3203
3204}
3205
3206
3207/**********************************************************************
3208 *
3209 *  This routine is called only when em_display_debug_stats is enabled.
3210 *  This routine provides a way to take a look at important statistics
3211 *  maintained by the driver and hardware.
3212 *
3213 **********************************************************************/
3214static void
3215em_print_debug_info(struct adapter *adapter)
3216{
3217        int unit = adapter->unit;
3218	uint8_t *hw_addr = adapter->hw.hw_addr;
3219
3220	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3221	printf("em%d:CTRL  = 0x%x\n", unit,
3222		E1000_READ_REG(&adapter->hw, CTRL));
3223	printf("em%d:RCTL  = 0x%x PS=(0x8402)\n", unit,
3224		E1000_READ_REG(&adapter->hw, RCTL));
3225	printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3226              E1000_READ_REG(&adapter->hw, TIDV),
3227	      E1000_READ_REG(&adapter->hw, TADV));
3228	printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3229              E1000_READ_REG(&adapter->hw, RDTR),
3230	      E1000_READ_REG(&adapter->hw, RADV));
3231        printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit,
3232               (long long)adapter->tx_fifo_wrk_cnt,
3233               (long long)adapter->tx_fifo_reset_cnt);
3234        printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3235               E1000_READ_REG(&adapter->hw, TDH),
3236               E1000_READ_REG(&adapter->hw, TDT));
3237        printf("em%d: Num Tx descriptors avail = %d\n", unit,
3238               adapter->num_tx_desc_avail);
3239        printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3240               adapter->no_tx_desc_avail1);
3241        printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3242               adapter->no_tx_desc_avail2);
3243        printf("em%d: Std mbuf failed = %ld\n", unit,
3244               adapter->mbuf_alloc_failed);
3245        printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3246               adapter->mbuf_cluster_failed);
3247        printf("em%d: Driver dropped packets = %ld\n", unit,
3248               adapter->dropped_pkts);
3249
3250        return;
3251}
3252
3253static void
3254em_print_hw_stats(struct adapter *adapter)
3255{
3256        int unit = adapter->unit;
3257
3258        printf("em%d: Excessive collisions = %lld\n", unit,
3259               (long long)adapter->stats.ecol);
3260        printf("em%d: Symbol errors = %lld\n", unit,
3261               (long long)adapter->stats.symerrs);
3262        printf("em%d: Sequence errors = %lld\n", unit,
3263               (long long)adapter->stats.sec);
3264        printf("em%d: Defer count = %lld\n", unit,
3265               (long long)adapter->stats.dc);
3266
3267        printf("em%d: Missed Packets = %lld\n", unit,
3268               (long long)adapter->stats.mpc);
3269        printf("em%d: Receive No Buffers = %lld\n", unit,
3270               (long long)adapter->stats.rnbc);
3271        printf("em%d: Receive length errors = %lld\n", unit,
3272               (long long)adapter->stats.rlec);
3273        printf("em%d: Receive errors = %lld\n", unit,
3274               (long long)adapter->stats.rxerrc);
3275        printf("em%d: Crc errors = %lld\n", unit,
3276               (long long)adapter->stats.crcerrs);
3277        printf("em%d: Alignment errors = %lld\n", unit,
3278               (long long)adapter->stats.algnerrc);
3279        printf("em%d: Carrier extension errors = %lld\n", unit,
3280               (long long)adapter->stats.cexterr);
3281	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3282	printf("em%d: watchdog timeouts = %ld\n", unit,
3283		adapter->watchdog_events);
3284
3285        printf("em%d: XON Rcvd = %lld\n", unit,
3286               (long long)adapter->stats.xonrxc);
3287        printf("em%d: XON Xmtd = %lld\n", unit,
3288               (long long)adapter->stats.xontxc);
3289        printf("em%d: XOFF Rcvd = %lld\n", unit,
3290               (long long)adapter->stats.xoffrxc);
3291        printf("em%d: XOFF Xmtd = %lld\n", unit,
3292               (long long)adapter->stats.xofftxc);
3293
3294        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3295               (long long)adapter->stats.gprc);
3296        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3297               (long long)adapter->stats.gptc);
3298
3299        return;
3300}
3301
3302static int
3303em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3304{
3305        int error;
3306        int result;
3307        struct adapter *adapter;
3308
3309        result = -1;
3310        error = sysctl_handle_int(oidp, &result, 0, req);
3311
3312        if (error || !req->newptr)
3313                return (error);
3314
3315        if (result == 1) {
3316                adapter = (struct adapter *)arg1;
3317                em_print_debug_info(adapter);
3318        }
3319
3320        return error;
3321}
3322
3323
3324static int
3325em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3326{
3327        int error;
3328        int result;
3329        struct adapter *adapter;
3330
3331        result = -1;
3332        error = sysctl_handle_int(oidp, &result, 0, req);
3333
3334        if (error || !req->newptr)
3335                return (error);
3336
3337        if (result == 1) {
3338                adapter = (struct adapter *)arg1;
3339                em_print_hw_stats(adapter);
3340        }
3341
3342        return error;
3343}
3344
3345static int
3346em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3347{
3348	struct em_int_delay_info *info;
3349	struct adapter *adapter;
3350	u_int32_t regval;
3351	int error;
3352	int usecs;
3353	int ticks;
3354
3355	info = (struct em_int_delay_info *)arg1;
3356	usecs = info->value;
3357	error = sysctl_handle_int(oidp, &usecs, 0, req);
3358	if (error != 0 || req->newptr == NULL)
3359		return error;
3360	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3361		return EINVAL;
3362	info->value = usecs;
3363	ticks = E1000_USECS_TO_TICKS(usecs);
3364
3365	adapter = info->adapter;
3366
3367	EM_LOCK(adapter);
3368	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3369	regval = (regval & ~0xffff) | (ticks & 0xffff);
3370	/* Handle a few special cases. */
3371	switch (info->offset) {
3372	case E1000_RDTR:
3373	case E1000_82542_RDTR:
3374		regval |= E1000_RDT_FPDB;
3375		break;
3376	case E1000_TIDV:
3377	case E1000_82542_TIDV:
3378		if (ticks == 0) {
3379			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3380			/* Don't write 0 into the TIDV register. */
3381			regval++;
3382		} else
3383			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3384		break;
3385	}
3386	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3387	EM_UNLOCK(adapter);
3388	return 0;
3389}
3390
3391static void
3392em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3393    const char *description, struct em_int_delay_info *info,
3394    int offset, int value)
3395{
3396	info->adapter = adapter;
3397	info->offset = offset;
3398	info->value = value;
3399	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3400	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3401	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3402	    info, 0, em_sysctl_int_delay, "I", description);
3403}
3404