if_em.c revision 152247
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/em/if_em.c 152247 2005-11-09 15:23:54Z glebius $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/em/if_em.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             em_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50
51char em_driver_version[] = "2.1.7";
52
53
54/*********************************************************************
55 *  PCI Device ID Table
56 *
57 *  Used by probe to select devices to load on
58 *  Last field stores an index into em_strings
59 *  Last entry must be all 0s
60 *
61 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static em_vendor_info_t em_vendor_info_array[] =
65{
66        /* Intel(R) PRO/1000 Network Connection */
67        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
111        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
112
113        /* required last entry */
114        { 0, 0, 0, 0, 0}
115};
116
117/*********************************************************************
118 *  Table of branding strings for all supported NICs.
119 *********************************************************************/
120
121static char *em_strings[] = {
122	"Intel(R) PRO/1000 Network Connection"
123};
124
125/*********************************************************************
126 *  Function prototypes
127 *********************************************************************/
128static int  em_probe(device_t);
129static int  em_attach(device_t);
130static int  em_detach(device_t);
131static int  em_shutdown(device_t);
132static void em_intr(void *);
133static void em_start(struct ifnet *);
134static int  em_ioctl(struct ifnet *, u_long, caddr_t);
135static void em_watchdog(struct ifnet *);
136static void em_init(void *);
137static void em_init_locked(struct adapter *);
138static void em_stop(void *);
139static void em_media_status(struct ifnet *, struct ifmediareq *);
140static int  em_media_change(struct ifnet *);
141static void em_identify_hardware(struct adapter *);
142static int  em_allocate_pci_resources(struct adapter *);
143static void em_free_pci_resources(struct adapter *);
144static void em_local_timer(void *);
145static int  em_hardware_init(struct adapter *);
146static void em_setup_interface(device_t, struct adapter *);
147static int  em_setup_transmit_structures(struct adapter *);
148static void em_initialize_transmit_unit(struct adapter *);
149static int  em_setup_receive_structures(struct adapter *);
150static void em_initialize_receive_unit(struct adapter *);
151static void em_enable_intr(struct adapter *);
152static void em_disable_intr(struct adapter *);
153static void em_free_transmit_structures(struct adapter *);
154static void em_free_receive_structures(struct adapter *);
155static void em_update_stats_counters(struct adapter *);
156static void em_clean_transmit_interrupts(struct adapter *);
157static int  em_allocate_receive_structures(struct adapter *);
158static int  em_allocate_transmit_structures(struct adapter *);
159static void em_process_receive_interrupts(struct adapter *, int);
160static void em_receive_checksum(struct adapter *,
161				struct em_rx_desc *,
162				struct mbuf *);
163static void em_transmit_checksum_setup(struct adapter *,
164				       struct mbuf *,
165				       u_int32_t *,
166				       u_int32_t *);
167static void em_set_promisc(struct adapter *);
168static void em_disable_promisc(struct adapter *);
169static void em_set_multi(struct adapter *);
170static void em_print_hw_stats(struct adapter *);
171static void em_print_link_status(struct adapter *);
172static int  em_get_buf(int i, struct adapter *,
173		       struct mbuf *);
174static void em_enable_vlans(struct adapter *);
175static void em_disable_vlans(struct adapter *);
176static int  em_encap(struct adapter *, struct mbuf **);
177static void em_smartspeed(struct adapter *);
178static int  em_82547_fifo_workaround(struct adapter *, int);
179static void em_82547_update_fifo_head(struct adapter *, int);
180static int  em_82547_tx_fifo_reset(struct adapter *);
181static void em_82547_move_tail(void *arg);
182static void em_82547_move_tail_locked(struct adapter *);
183static int  em_dma_malloc(struct adapter *, bus_size_t,
184			  struct em_dma_alloc *, int);
185static void em_dma_free(struct adapter *, struct em_dma_alloc *);
186static void em_print_debug_info(struct adapter *);
187static int  em_is_valid_ether_addr(u_int8_t *);
188static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
189static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
190static u_int32_t em_fill_descriptors (bus_addr_t address,
191				      u_int32_t length,
192				      PDESC_ARRAY desc_array);
193static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
194static void em_add_int_delay_sysctl(struct adapter *, const char *,
195				    const char *, struct em_int_delay_info *,
196				    int, int);
197#ifdef DEVICE_POLLING
198static poll_handler_t em_poll;
199#endif
200
201/*********************************************************************
202 *  FreeBSD Device Interface Entry Points
203 *********************************************************************/
204
205static device_method_t em_methods[] = {
206	/* Device interface */
207	DEVMETHOD(device_probe, em_probe),
208	DEVMETHOD(device_attach, em_attach),
209	DEVMETHOD(device_detach, em_detach),
210	DEVMETHOD(device_shutdown, em_shutdown),
211	{0, 0}
212};
213
214static driver_t em_driver = {
215	"em", em_methods, sizeof(struct adapter ),
216};
217
218static devclass_t em_devclass;
219DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
220MODULE_DEPEND(em, pci, 1, 1, 1);
221MODULE_DEPEND(em, ether, 1, 1, 1);
222
223/*********************************************************************
224 *  Tunable default values.
225 *********************************************************************/
226
227#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
228#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
229
230static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
231static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
232static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
233static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
234
235TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
236TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
237TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
238TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
239
240/*********************************************************************
241 *  Device identification routine
242 *
243 *  em_probe determines if the driver should be loaded on
244 *  adapter based on PCI vendor/device id of the adapter.
245 *
246 *  return BUS_PROBE_DEFAULT on success, positive on failure
247 *********************************************************************/
248
249static int
250em_probe(device_t dev)
251{
252	em_vendor_info_t *ent;
253
254	u_int16_t       pci_vendor_id = 0;
255	u_int16_t       pci_device_id = 0;
256	u_int16_t       pci_subvendor_id = 0;
257	u_int16_t       pci_subdevice_id = 0;
258	char            adapter_name[60];
259
260	INIT_DEBUGOUT("em_probe: begin");
261
262	pci_vendor_id = pci_get_vendor(dev);
263	if (pci_vendor_id != EM_VENDOR_ID)
264		return(ENXIO);
265
266	pci_device_id = pci_get_device(dev);
267	pci_subvendor_id = pci_get_subvendor(dev);
268	pci_subdevice_id = pci_get_subdevice(dev);
269
270	ent = em_vendor_info_array;
271	while (ent->vendor_id != 0) {
272		if ((pci_vendor_id == ent->vendor_id) &&
273		    (pci_device_id == ent->device_id) &&
274
275		    ((pci_subvendor_id == ent->subvendor_id) ||
276		     (ent->subvendor_id == PCI_ANY_ID)) &&
277
278		    ((pci_subdevice_id == ent->subdevice_id) ||
279		     (ent->subdevice_id == PCI_ANY_ID))) {
280			sprintf(adapter_name, "%s, Version - %s",
281				em_strings[ent->index],
282				em_driver_version);
283			device_set_desc_copy(dev, adapter_name);
284			return(BUS_PROBE_DEFAULT);
285		}
286		ent++;
287	}
288
289	return(ENXIO);
290}
291
292/*********************************************************************
293 *  Device initialization routine
294 *
295 *  The attach entry point is called when the driver is being loaded.
296 *  This routine identifies the type of hardware, allocates all resources
297 *  and initializes the hardware.
298 *
299 *  return 0 on success, positive on failure
300 *********************************************************************/
301
302static int
303em_attach(device_t dev)
304{
305	struct adapter * adapter;
306	int             tsize, rsize;
307	int		error = 0;
308
309	INIT_DEBUGOUT("em_attach: begin");
310
311	/* Allocate, clear, and link in our adapter structure */
312	if (!(adapter = device_get_softc(dev))) {
313		printf("em: adapter structure allocation failed\n");
314		return(ENOMEM);
315	}
316	bzero(adapter, sizeof(struct adapter ));
317	adapter->dev = dev;
318	adapter->osdep.dev = dev;
319	adapter->unit = device_get_unit(dev);
320	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
321
322	/* SYSCTL stuff */
323        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
324                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
326                        (void *)adapter, 0,
327                        em_sysctl_debug_info, "I", "Debug Information");
328
329        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
330                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
331                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
332                        (void *)adapter, 0,
333                        em_sysctl_stats, "I", "Statistics");
334
335	callout_init(&adapter->timer, CALLOUT_MPSAFE);
336	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
337
338	/* Determine hardware revision */
339	em_identify_hardware(adapter);
340
341	/* Set up some sysctls for the tunable interrupt delays */
342	em_add_int_delay_sysctl(adapter, "rx_int_delay",
343	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
344	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
345	em_add_int_delay_sysctl(adapter, "tx_int_delay",
346	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
347	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
348	if (adapter->hw.mac_type >= em_82540) {
349		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
350		    "receive interrupt delay limit in usecs",
351		    &adapter->rx_abs_int_delay,
352		    E1000_REG_OFFSET(&adapter->hw, RADV),
353		    em_rx_abs_int_delay_dflt);
354		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
355		    "transmit interrupt delay limit in usecs",
356		    &adapter->tx_abs_int_delay,
357		    E1000_REG_OFFSET(&adapter->hw, TADV),
358		    em_tx_abs_int_delay_dflt);
359	}
360
361	/* Parameters (to be read from user) */
362        adapter->num_tx_desc = EM_MAX_TXD;
363        adapter->num_rx_desc = EM_MAX_RXD;
364        adapter->hw.autoneg = DO_AUTO_NEG;
365        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
366        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
367        adapter->hw.tbi_compatibility_en = TRUE;
368        adapter->rx_buffer_len = EM_RXBUFFER_2048;
369
370	/*
371         * These parameters control the automatic generation(Tx) and
372         * response(Rx) to Ethernet PAUSE frames.
373         */
374        adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
375        adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
376        adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
377        adapter->hw.fc_send_xon   = TRUE;
378        adapter->hw.fc = em_fc_full;
379
380	adapter->hw.phy_init_script = 1;
381	adapter->hw.phy_reset_disable = FALSE;
382
383#ifndef EM_MASTER_SLAVE
384	adapter->hw.master_slave = em_ms_hw_default;
385#else
386	adapter->hw.master_slave = EM_MASTER_SLAVE;
387#endif
388	/*
389	 * Set the max frame size assuming standard ethernet
390	 * sized frames
391	 */
392	adapter->hw.max_frame_size =
393		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
394
395	adapter->hw.min_frame_size =
396		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
397
398	/*
399	 * This controls when hardware reports transmit completion
400	 * status.
401	 */
402	adapter->hw.report_tx_early = 1;
403
404
405	if (em_allocate_pci_resources(adapter)) {
406		printf("em%d: Allocation of PCI resources failed\n",
407		       adapter->unit);
408                error = ENXIO;
409                goto err_pci;
410	}
411
412
413	/* Initialize eeprom parameters */
414        em_init_eeprom_params(&adapter->hw);
415
416	tsize = EM_ROUNDUP(adapter->num_tx_desc *
417			   sizeof(struct em_tx_desc), 4096);
418
419	/* Allocate Transmit Descriptor ring */
420        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
421                printf("em%d: Unable to allocate tx_desc memory\n",
422                       adapter->unit);
423		error = ENOMEM;
424                goto err_tx_desc;
425        }
426        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
427
428	rsize = EM_ROUNDUP(adapter->num_rx_desc *
429			   sizeof(struct em_rx_desc), 4096);
430
431	/* Allocate Receive Descriptor ring */
432        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
433                printf("em%d: Unable to allocate rx_desc memory\n",
434                        adapter->unit);
435		error = ENOMEM;
436                goto err_rx_desc;
437        }
438        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
439
440	/* Initialize the hardware */
441	if (em_hardware_init(adapter)) {
442		printf("em%d: Unable to initialize the hardware\n",
443		       adapter->unit);
444		error = EIO;
445                goto err_hw_init;
446	}
447
448	/* Copy the permanent MAC address out of the EEPROM */
449	if (em_read_mac_addr(&adapter->hw) < 0) {
450		printf("em%d: EEPROM read error while reading mac address\n",
451		       adapter->unit);
452		error = EIO;
453                goto err_mac_addr;
454	}
455
456	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
457                printf("em%d: Invalid mac address\n", adapter->unit);
458                error = EIO;
459                goto err_mac_addr;
460        }
461
462	/* Setup OS specific network interface */
463	em_setup_interface(dev, adapter);
464
465	/* Initialize statistics */
466	em_clear_hw_cntrs(&adapter->hw);
467	em_update_stats_counters(adapter);
468	adapter->hw.get_link_status = 1;
469	em_check_for_link(&adapter->hw);
470
471	if (bootverbose) {
472		/* Print the link status */
473		if (adapter->link_active == 1) {
474			em_get_speed_and_duplex(&adapter->hw,
475			    &adapter->link_speed, &adapter->link_duplex);
476			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
477			       adapter->unit,
478			       adapter->link_speed,
479			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
480				"Half");
481		} else
482			printf("em%d:  Speed:N/A  Duplex:N/A\n",
483			    adapter->unit);
484	}
485
486	/* Identify 82544 on PCIX */
487        em_get_bus_info(&adapter->hw);
488        if(adapter->hw.bus_type == em_bus_type_pcix &&
489           adapter->hw.mac_type == em_82544) {
490                adapter->pcix_82544 = TRUE;
491        }
492        else {
493                adapter->pcix_82544 = FALSE;
494        }
495	INIT_DEBUGOUT("em_attach: end");
496	return(0);
497
498err_mac_addr:
499err_hw_init:
500        em_dma_free(adapter, &adapter->rxdma);
501err_rx_desc:
502        em_dma_free(adapter, &adapter->txdma);
503err_tx_desc:
504err_pci:
505        em_free_pci_resources(adapter);
506	EM_LOCK_DESTROY(adapter);
507        return(error);
508
509}
510
511/*********************************************************************
512 *  Device removal routine
513 *
514 *  The detach entry point is called when the driver is being removed.
515 *  This routine stops the adapter and deallocates all the resources
516 *  that were allocated for driver operation.
517 *
518 *  return 0 on success, positive on failure
519 *********************************************************************/
520
521static int
522em_detach(device_t dev)
523{
524	struct adapter * adapter = device_get_softc(dev);
525	struct ifnet   *ifp = adapter->ifp;
526
527	INIT_DEBUGOUT("em_detach: begin");
528
529#ifdef DEVICE_POLLING
530	if (ifp->if_capenable & IFCAP_POLLING)
531		ether_poll_deregister(ifp);
532#endif
533
534	EM_LOCK(adapter);
535	adapter->in_detach = 1;
536	em_stop(adapter);
537	em_phy_hw_reset(&adapter->hw);
538	EM_UNLOCK(adapter);
539        ether_ifdetach(adapter->ifp);
540
541	em_free_pci_resources(adapter);
542	bus_generic_detach(dev);
543	if_free(ifp);
544
545	/* Free Transmit Descriptor ring */
546        if (adapter->tx_desc_base) {
547                em_dma_free(adapter, &adapter->txdma);
548                adapter->tx_desc_base = NULL;
549        }
550
551        /* Free Receive Descriptor ring */
552        if (adapter->rx_desc_base) {
553                em_dma_free(adapter, &adapter->rxdma);
554                adapter->rx_desc_base = NULL;
555        }
556
557	EM_LOCK_DESTROY(adapter);
558
559	return(0);
560}
561
562/*********************************************************************
563 *
564 *  Shutdown entry point
565 *
566 **********************************************************************/
567
568static int
569em_shutdown(device_t dev)
570{
571	struct adapter *adapter = device_get_softc(dev);
572	EM_LOCK(adapter);
573	em_stop(adapter);
574	EM_UNLOCK(adapter);
575	return(0);
576}
577
578
579/*********************************************************************
580 *  Transmit entry point
581 *
582 *  em_start is called by the stack to initiate a transmit.
583 *  The driver will remain in this routine as long as there are
584 *  packets to transmit and transmit resources are available.
585 *  In case resources are not available stack is notified and
586 *  the packet is requeued.
587 **********************************************************************/
588
589static void
590em_start_locked(struct ifnet *ifp)
591{
592        struct mbuf    *m_head;
593        struct adapter *adapter = ifp->if_softc;
594
595	mtx_assert(&adapter->mtx, MA_OWNED);
596
597        if (!adapter->link_active)
598                return;
599
600        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
601
602                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
603
604                if (m_head == NULL) break;
605
606		/*
607		 * em_encap() can modify our pointer, and or make it NULL on
608		 * failure.  In that event, we can't requeue.
609		 */
610		if (em_encap(adapter, &m_head)) {
611			if (m_head == NULL)
612				break;
613			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
614			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
615			break;
616                }
617
618		/* Send a copy of the frame to the BPF listener */
619		BPF_MTAP(ifp, m_head);
620
621                /* Set timeout in case hardware has problems transmitting */
622                ifp->if_timer = EM_TX_TIMEOUT;
623
624        }
625        return;
626}
627
628static void
629em_start(struct ifnet *ifp)
630{
631	struct adapter *adapter = ifp->if_softc;
632
633	EM_LOCK(adapter);
634	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
635		em_start_locked(ifp);
636	EM_UNLOCK(adapter);
637	return;
638}
639
640/*********************************************************************
641 *  Ioctl entry point
642 *
643 *  em_ioctl is called when the user wants to configure the
644 *  interface.
645 *
646 *  return 0 on success, positive on failure
647 **********************************************************************/
648
649static int
650em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
651{
652	int             mask, reinit, error = 0;
653	struct ifreq   *ifr = (struct ifreq *) data;
654	struct adapter * adapter = ifp->if_softc;
655
656	if (adapter->in_detach) return(error);
657
658	switch (command) {
659	case SIOCSIFADDR:
660	case SIOCGIFADDR:
661		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
662		ether_ioctl(ifp, command, data);
663		break;
664	case SIOCSIFMTU:
665		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
666#ifndef __NO_STRICT_ALIGNMENT
667		if (ifr->ifr_mtu > ETHERMTU) {
668			/*
669			 * XXX
670			 * Due to the limitation of DMA engine, it needs fix-up
671			 * code for strict alignment architectures. Disable
672			 * jumbo frame until we have better solutions.
673			 */
674			error = EINVAL;
675		} else
676#endif
677		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN || \
678			/* 82573 does not support jumbo frames */
679			(adapter->hw.mac_type == em_82573 && ifr->ifr_mtu > ETHERMTU) ) {
680			error = EINVAL;
681		} else {
682			EM_LOCK(adapter);
683			ifp->if_mtu = ifr->ifr_mtu;
684			adapter->hw.max_frame_size =
685			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
686			em_init_locked(adapter);
687			EM_UNLOCK(adapter);
688		}
689		break;
690	case SIOCSIFFLAGS:
691		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
692		EM_LOCK(adapter);
693		if (ifp->if_flags & IFF_UP) {
694			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
695				em_init_locked(adapter);
696			}
697
698			em_disable_promisc(adapter);
699			em_set_promisc(adapter);
700		} else {
701			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
702				em_stop(adapter);
703			}
704		}
705		EM_UNLOCK(adapter);
706		break;
707	case SIOCADDMULTI:
708	case SIOCDELMULTI:
709		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
710		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
711			EM_LOCK(adapter);
712			em_disable_intr(adapter);
713			em_set_multi(adapter);
714			if (adapter->hw.mac_type == em_82542_rev2_0) {
715				em_initialize_receive_unit(adapter);
716			}
717#ifdef DEVICE_POLLING
718                        if (!(ifp->if_capenable & IFCAP_POLLING))
719#endif
720				em_enable_intr(adapter);
721			EM_UNLOCK(adapter);
722		}
723		break;
724	case SIOCSIFMEDIA:
725	case SIOCGIFMEDIA:
726		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
727		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
728		break;
729	case SIOCSIFCAP:
730		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
731		reinit = 0;
732		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
733#ifdef DEVICE_POLLING
734		if (mask & IFCAP_POLLING) {
735			if (ifr->ifr_reqcap & IFCAP_POLLING) {
736				error = ether_poll_register(em_poll, ifp);
737				if (error)
738					return(error);
739				EM_LOCK(adapter);
740				em_disable_intr(adapter);
741				ifp->if_capenable |= IFCAP_POLLING;
742				EM_UNLOCK(adapter);
743			} else {
744				error = ether_poll_deregister(ifp);
745				/* Enable interrupt even in error case */
746				EM_LOCK(adapter);
747				em_enable_intr(adapter);
748				ifp->if_capenable &= ~IFCAP_POLLING;
749				EM_UNLOCK(adapter);
750			}
751		}
752#endif
753		if (mask & IFCAP_HWCSUM) {
754			ifp->if_capenable ^= IFCAP_HWCSUM;
755			reinit = 1;
756		}
757		if (mask & IFCAP_VLAN_HWTAGGING) {
758			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
759			reinit = 1;
760		}
761		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
762			em_init(adapter);
763		break;
764	default:
765		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
766		error = EINVAL;
767	}
768
769	return(error);
770}
771
772/*********************************************************************
773 *  Watchdog entry point
774 *
775 *  This routine is called whenever hardware quits transmitting.
776 *
777 **********************************************************************/
778
779static void
780em_watchdog(struct ifnet *ifp)
781{
782	struct adapter * adapter;
783	adapter = ifp->if_softc;
784
785	EM_LOCK(adapter);
786	/* If we are in this routine because of pause frames, then
787	 * don't reset the hardware.
788	 */
789	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
790		ifp->if_timer = EM_TX_TIMEOUT;
791		EM_UNLOCK(adapter);
792		return;
793	}
794
795	if (em_check_for_link(&adapter->hw))
796		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
797
798	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
799	adapter->watchdog_events++;
800
801	em_init_locked(adapter);
802	EM_UNLOCK(adapter);
803}
804
805/*********************************************************************
806 *  Init entry point
807 *
808 *  This routine is used in two ways. It is used by the stack as
809 *  init entry point in network interface structure. It is also used
810 *  by the driver as a hw/sw initialization routine to get to a
811 *  consistent state.
812 *
813 *  return 0 on success, positive on failure
814 **********************************************************************/
815
816static void
817em_init_locked(struct adapter * adapter)
818{
819	struct ifnet   *ifp;
820
821	uint32_t	pba;
822	ifp = adapter->ifp;
823
824	INIT_DEBUGOUT("em_init: begin");
825
826	mtx_assert(&adapter->mtx, MA_OWNED);
827
828	em_stop(adapter);
829
830	/* Packet Buffer Allocation (PBA)
831	 * Writing PBA sets the receive portion of the buffer
832	 * the remainder is used for the transmit buffer.
833	 *
834	 * Devices before the 82547 had a Packet Buffer of 64K.
835	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
836	 * After the 82547 the buffer was reduced to 40K.
837	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
838	 *   Note: default does not leave enough room for Jumbo Frame >10k.
839	 */
840	if(adapter->hw.mac_type < em_82547) {
841		/* Total FIFO is 64K */
842		if(adapter->rx_buffer_len > EM_RXBUFFER_8192)
843			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
844		else
845			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
846	} else {
847		/* Total FIFO is 40K */
848		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) {
849			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
850		} else {
851		        pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
852		}
853		adapter->tx_fifo_head = 0;
854		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
855		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
856	}
857	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
858	E1000_WRITE_REG(&adapter->hw, PBA, pba);
859
860	/* Get the latest mac address, User can use a LAA */
861        bcopy(IFP2ENADDR(adapter->ifp), adapter->hw.mac_addr,
862              ETHER_ADDR_LEN);
863
864	/* Initialize the hardware */
865	if (em_hardware_init(adapter)) {
866		printf("em%d: Unable to initialize the hardware\n",
867		       adapter->unit);
868		return;
869	}
870
871	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
872		em_enable_vlans(adapter);
873
874	/* Prepare transmit descriptors and buffers */
875	if (em_setup_transmit_structures(adapter)) {
876		printf("em%d: Could not setup transmit structures\n",
877		       adapter->unit);
878		em_stop(adapter);
879		return;
880	}
881	em_initialize_transmit_unit(adapter);
882
883	/* Setup Multicast table */
884	em_set_multi(adapter);
885
886	/* Prepare receive descriptors and buffers */
887	if (em_setup_receive_structures(adapter)) {
888		printf("em%d: Could not setup receive structures\n",
889		       adapter->unit);
890		em_stop(adapter);
891		return;
892	}
893	em_initialize_receive_unit(adapter);
894
895	/* Don't loose promiscuous settings */
896	em_set_promisc(adapter);
897
898	ifp->if_drv_flags |= IFF_DRV_RUNNING;
899	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
900
901	if (adapter->hw.mac_type >= em_82543) {
902		if (ifp->if_capenable & IFCAP_TXCSUM)
903			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
904		else
905			ifp->if_hwassist = 0;
906	}
907
908	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
909	em_clear_hw_cntrs(&adapter->hw);
910#ifdef DEVICE_POLLING
911        /*
912         * Only enable interrupts if we are not polling, make sure
913         * they are off otherwise.
914         */
915        if (ifp->if_capenable & IFCAP_POLLING)
916                em_disable_intr(adapter);
917        else
918#endif /* DEVICE_POLLING */
919		em_enable_intr(adapter);
920
921	/* Don't reset the phy next time init gets called */
922	adapter->hw.phy_reset_disable = TRUE;
923
924	return;
925}
926
927static void
928em_init(void *arg)
929{
930	struct adapter * adapter = arg;
931
932	EM_LOCK(adapter);
933	em_init_locked(adapter);
934	EM_UNLOCK(adapter);
935	return;
936}
937
938
939#ifdef DEVICE_POLLING
940static void
941em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
942{
943        struct adapter *adapter = ifp->if_softc;
944        u_int32_t reg_icr;
945
946	mtx_assert(&adapter->mtx, MA_OWNED);
947
948        if (cmd == POLL_AND_CHECK_STATUS) {
949                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
950                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
951			callout_stop(&adapter->timer);
952                        adapter->hw.get_link_status = 1;
953                        em_check_for_link(&adapter->hw);
954                        em_print_link_status(adapter);
955			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
956                }
957        }
958	em_process_receive_interrupts(adapter, count);
959	em_clean_transmit_interrupts(adapter);
960
961        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
962                em_start_locked(ifp);
963}
964
965static void
966em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
967{
968        struct adapter *adapter = ifp->if_softc;
969
970	EM_LOCK(adapter);
971	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
972		em_poll_locked(ifp, cmd, count);
973	EM_UNLOCK(adapter);
974}
975#endif /* DEVICE_POLLING */
976
977/*********************************************************************
978 *
979 *  Interrupt Service routine
980 *
981 **********************************************************************/
982static void
983em_intr(void *arg)
984{
985	struct adapter	*adapter = arg;
986	struct ifnet	*ifp;
987	uint32_t	reg_icr;
988	int		wantinit = 0;
989
990	EM_LOCK(adapter);
991
992	ifp = adapter->ifp;
993
994#ifdef DEVICE_POLLING
995	if (ifp->if_capenable & IFCAP_POLLING) {
996		EM_UNLOCK(adapter);
997		return;
998	}
999#endif /* DEVICE_POLLING */
1000
1001	for (;;) {
1002		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1003		if (reg_icr == 0)
1004			break;
1005
1006		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1007			em_process_receive_interrupts(adapter, -1);
1008			em_clean_transmit_interrupts(adapter);
1009		}
1010
1011		/* Link status change */
1012		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1013			callout_stop(&adapter->timer);
1014			adapter->hw.get_link_status = 1;
1015			em_check_for_link(&adapter->hw);
1016			em_print_link_status(adapter);
1017			callout_reset(&adapter->timer, hz, em_local_timer,
1018			    adapter);
1019		}
1020
1021		if (reg_icr & E1000_ICR_RXO) {
1022			adapter->rx_overruns++;
1023			wantinit = 1;
1024		}
1025	}
1026#if 0
1027	if (wantinit)
1028		em_init_locked(adapter);
1029#endif
1030	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1031	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1032		em_start_locked(ifp);
1033
1034	EM_UNLOCK(adapter);
1035	return;
1036}
1037
1038
1039
1040/*********************************************************************
1041 *
1042 *  Media Ioctl callback
1043 *
1044 *  This routine is called whenever the user queries the status of
1045 *  the interface using ifconfig.
1046 *
1047 **********************************************************************/
1048static void
1049em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1050{
1051	struct adapter * adapter = ifp->if_softc;
1052
1053	INIT_DEBUGOUT("em_media_status: begin");
1054
1055	em_check_for_link(&adapter->hw);
1056	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1057		if (adapter->link_active == 0) {
1058			em_get_speed_and_duplex(&adapter->hw,
1059						&adapter->link_speed,
1060						&adapter->link_duplex);
1061			adapter->link_active = 1;
1062		}
1063	} else {
1064		if (adapter->link_active == 1) {
1065			adapter->link_speed = 0;
1066			adapter->link_duplex = 0;
1067			adapter->link_active = 0;
1068		}
1069	}
1070
1071	ifmr->ifm_status = IFM_AVALID;
1072	ifmr->ifm_active = IFM_ETHER;
1073
1074	if (!adapter->link_active)
1075		return;
1076
1077	ifmr->ifm_status |= IFM_ACTIVE;
1078
1079	if (adapter->hw.media_type == em_media_type_fiber) {
1080		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1081	} else {
1082		switch (adapter->link_speed) {
1083		case 10:
1084			ifmr->ifm_active |= IFM_10_T;
1085			break;
1086		case 100:
1087			ifmr->ifm_active |= IFM_100_TX;
1088			break;
1089		case 1000:
1090			ifmr->ifm_active |= IFM_1000_T;
1091			break;
1092		}
1093		if (adapter->link_duplex == FULL_DUPLEX)
1094			ifmr->ifm_active |= IFM_FDX;
1095		else
1096			ifmr->ifm_active |= IFM_HDX;
1097	}
1098	return;
1099}
1100
1101/*********************************************************************
1102 *
1103 *  Media Ioctl callback
1104 *
1105 *  This routine is called when the user changes speed/duplex using
1106 *  media/mediopt option with ifconfig.
1107 *
1108 **********************************************************************/
1109static int
1110em_media_change(struct ifnet *ifp)
1111{
1112	struct adapter * adapter = ifp->if_softc;
1113	struct ifmedia  *ifm = &adapter->media;
1114
1115	INIT_DEBUGOUT("em_media_change: begin");
1116
1117	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1118		return(EINVAL);
1119
1120	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1121	case IFM_AUTO:
1122		adapter->hw.autoneg = DO_AUTO_NEG;
1123		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1124		break;
1125	case IFM_1000_SX:
1126	case IFM_1000_T:
1127		adapter->hw.autoneg = DO_AUTO_NEG;
1128		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1129		break;
1130	case IFM_100_TX:
1131		adapter->hw.autoneg = FALSE;
1132		adapter->hw.autoneg_advertised = 0;
1133		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1134			adapter->hw.forced_speed_duplex = em_100_full;
1135		else
1136			adapter->hw.forced_speed_duplex	= em_100_half;
1137		break;
1138	case IFM_10_T:
1139		adapter->hw.autoneg = FALSE;
1140		adapter->hw.autoneg_advertised = 0;
1141		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1142			adapter->hw.forced_speed_duplex = em_10_full;
1143		else
1144			adapter->hw.forced_speed_duplex	= em_10_half;
1145		break;
1146	default:
1147		printf("em%d: Unsupported media type\n", adapter->unit);
1148	}
1149
1150	/* As the speed/duplex settings my have changed we need to
1151	 * reset the PHY.
1152	 */
1153	adapter->hw.phy_reset_disable = FALSE;
1154
1155	em_init(adapter);
1156
1157	return(0);
1158}
1159
1160/*********************************************************************
1161 *
1162 *  This routine maps the mbufs to tx descriptors.
1163 *
1164 *  return 0 on success, positive on failure
1165 **********************************************************************/
1166static int
1167em_encap(struct adapter *adapter, struct mbuf **m_headp)
1168{
1169        u_int32_t       txd_upper;
1170        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1171        int             i, j, error;
1172
1173	struct mbuf	*m_head;
1174
1175	/* For 82544 Workaround */
1176	DESC_ARRAY              desc_array;
1177	u_int32_t               array_elements;
1178	u_int32_t               counter;
1179        struct m_tag    *mtag;
1180	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1181	bus_dmamap_t		map;
1182	int			nsegs;
1183        struct em_buffer   *tx_buffer = NULL;
1184        struct em_tx_desc *current_tx_desc = NULL;
1185        struct ifnet   *ifp = adapter->ifp;
1186
1187	m_head = *m_headp;
1188
1189        /*
1190         * Force a cleanup if number of TX descriptors
1191         * available hits the threshold
1192         */
1193        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1194                em_clean_transmit_interrupts(adapter);
1195                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1196                        adapter->no_tx_desc_avail1++;
1197                        return(ENOBUFS);
1198                }
1199        }
1200
1201        /*
1202         * Map the packet for DMA.
1203         */
1204        if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
1205                adapter->no_tx_map_avail++;
1206                return (ENOMEM);
1207        }
1208        error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
1209					&nsegs, BUS_DMA_NOWAIT);
1210        if (error != 0) {
1211                adapter->no_tx_dma_setup++;
1212                bus_dmamap_destroy(adapter->txtag, map);
1213                return (error);
1214        }
1215        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1216
1217        if (nsegs > adapter->num_tx_desc_avail) {
1218                adapter->no_tx_desc_avail2++;
1219                bus_dmamap_destroy(adapter->txtag, map);
1220                return (ENOBUFS);
1221        }
1222
1223
1224        if (ifp->if_hwassist > 0) {
1225                em_transmit_checksum_setup(adapter,  m_head,
1226                                           &txd_upper, &txd_lower);
1227        } else
1228                txd_upper = txd_lower = 0;
1229
1230
1231        /* Find out if we are in vlan mode */
1232        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1233
1234	/*
1235	 * When operating in promiscuous mode, hardware encapsulation for
1236	 * packets is disabled.  This means we have to add the vlan
1237	 * encapsulation in the driver, since it will have come down from the
1238	 * VLAN layer with a tag instead of a VLAN header.
1239	 */
1240	if (mtag != NULL && adapter->em_insert_vlan_header) {
1241		struct ether_vlan_header *evl;
1242		struct ether_header eh;
1243
1244		m_head = m_pullup(m_head, sizeof(eh));
1245		if (m_head == NULL) {
1246			*m_headp = NULL;
1247                	bus_dmamap_destroy(adapter->txtag, map);
1248			return (ENOBUFS);
1249		}
1250		eh = *mtod(m_head, struct ether_header *);
1251		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1252		if (m_head == NULL) {
1253			*m_headp = NULL;
1254                	bus_dmamap_destroy(adapter->txtag, map);
1255			return (ENOBUFS);
1256		}
1257		m_head = m_pullup(m_head, sizeof(*evl));
1258		if (m_head == NULL) {
1259			*m_headp = NULL;
1260                	bus_dmamap_destroy(adapter->txtag, map);
1261			return (ENOBUFS);
1262		}
1263		evl = mtod(m_head, struct ether_vlan_header *);
1264		bcopy(&eh, evl, sizeof(*evl));
1265		evl->evl_proto = evl->evl_encap_proto;
1266		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1267		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1268		m_tag_delete(m_head, mtag);
1269		mtag = NULL;
1270		*m_headp = m_head;
1271	}
1272
1273        i = adapter->next_avail_tx_desc;
1274	if (adapter->pcix_82544) {
1275		txd_saved = i;
1276		txd_used = 0;
1277	}
1278        for (j = 0; j < nsegs; j++) {
1279		/* If adapter is 82544 and on PCIX bus */
1280		if(adapter->pcix_82544) {
1281			/*
1282			 * Check the Address and Length combination and
1283			 * split the data accordingly
1284			 */
1285                        array_elements = em_fill_descriptors(segs[j].ds_addr,
1286			    segs[j].ds_len, &desc_array);
1287			for (counter = 0; counter < array_elements; counter++) {
1288                                if (txd_used == adapter->num_tx_desc_avail) {
1289                                         adapter->next_avail_tx_desc = txd_saved;
1290                                          adapter->no_tx_desc_avail2++;
1291					  bus_dmamap_destroy(adapter->txtag, map);
1292                                          return (ENOBUFS);
1293                                }
1294                                tx_buffer = &adapter->tx_buffer_area[i];
1295                                current_tx_desc = &adapter->tx_desc_base[i];
1296                                current_tx_desc->buffer_addr = htole64(
1297					desc_array.descriptor[counter].address);
1298                                current_tx_desc->lower.data = htole32(
1299					(adapter->txd_cmd | txd_lower |
1300					 (u_int16_t)desc_array.descriptor[counter].length));
1301                                current_tx_desc->upper.data = htole32((txd_upper));
1302                                if (++i == adapter->num_tx_desc)
1303                                         i = 0;
1304
1305                                tx_buffer->m_head = NULL;
1306                                txd_used++;
1307                        }
1308		} else {
1309			tx_buffer = &adapter->tx_buffer_area[i];
1310			current_tx_desc = &adapter->tx_desc_base[i];
1311
1312			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1313			current_tx_desc->lower.data = htole32(
1314				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1315			current_tx_desc->upper.data = htole32(txd_upper);
1316
1317			if (++i == adapter->num_tx_desc)
1318				i = 0;
1319
1320			tx_buffer->m_head = NULL;
1321		}
1322        }
1323
1324	adapter->next_avail_tx_desc = i;
1325	if (adapter->pcix_82544) {
1326		adapter->num_tx_desc_avail -= txd_used;
1327	}
1328	else {
1329		adapter->num_tx_desc_avail -= nsegs;
1330	}
1331
1332        if (mtag != NULL) {
1333                /* Set the vlan id */
1334                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1335
1336                /* Tell hardware to add tag */
1337                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1338        }
1339
1340        tx_buffer->m_head = m_head;
1341        tx_buffer->map = map;
1342        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1343
1344        /*
1345         * Last Descriptor of Packet needs End Of Packet (EOP)
1346         */
1347        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1348
1349        /*
1350         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1351         * that this frame is available to transmit.
1352         */
1353        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1354            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1355        if (adapter->hw.mac_type == em_82547 &&
1356            adapter->link_duplex == HALF_DUPLEX) {
1357                em_82547_move_tail_locked(adapter);
1358        } else {
1359                E1000_WRITE_REG(&adapter->hw, TDT, i);
1360                if (adapter->hw.mac_type == em_82547) {
1361                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1362                }
1363        }
1364
1365        return(0);
1366}
1367
1368/*********************************************************************
1369 *
1370 * 82547 workaround to avoid controller hang in half-duplex environment.
1371 * The workaround is to avoid queuing a large packet that would span
1372 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1373 * in this case. We do that only when FIFO is quiescent.
1374 *
1375 **********************************************************************/
1376static void
1377em_82547_move_tail_locked(struct adapter *adapter)
1378{
1379	uint16_t hw_tdt;
1380	uint16_t sw_tdt;
1381	struct em_tx_desc *tx_desc;
1382	uint16_t length = 0;
1383	boolean_t eop = 0;
1384
1385	EM_LOCK_ASSERT(adapter);
1386
1387	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1388	sw_tdt = adapter->next_avail_tx_desc;
1389
1390	while (hw_tdt != sw_tdt) {
1391		tx_desc = &adapter->tx_desc_base[hw_tdt];
1392		length += tx_desc->lower.flags.length;
1393		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1394		if(++hw_tdt == adapter->num_tx_desc)
1395			hw_tdt = 0;
1396
1397		if(eop) {
1398			if (em_82547_fifo_workaround(adapter, length)) {
1399				adapter->tx_fifo_wrk_cnt++;
1400				callout_reset(&adapter->tx_fifo_timer, 1,
1401					em_82547_move_tail, adapter);
1402				break;
1403			}
1404			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1405			em_82547_update_fifo_head(adapter, length);
1406			length = 0;
1407		}
1408	}
1409	return;
1410}
1411
1412static void
1413em_82547_move_tail(void *arg)
1414{
1415        struct adapter *adapter = arg;
1416
1417        EM_LOCK(adapter);
1418        em_82547_move_tail_locked(adapter);
1419        EM_UNLOCK(adapter);
1420}
1421
1422static int
1423em_82547_fifo_workaround(struct adapter *adapter, int len)
1424{
1425	int fifo_space, fifo_pkt_len;
1426
1427	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1428
1429	if (adapter->link_duplex == HALF_DUPLEX) {
1430		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1431
1432		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1433			if (em_82547_tx_fifo_reset(adapter)) {
1434				return(0);
1435			}
1436			else {
1437				return(1);
1438			}
1439		}
1440	}
1441
1442	return(0);
1443}
1444
1445static void
1446em_82547_update_fifo_head(struct adapter *adapter, int len)
1447{
1448	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1449
1450	/* tx_fifo_head is always 16 byte aligned */
1451	adapter->tx_fifo_head += fifo_pkt_len;
1452	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1453		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1454	}
1455
1456	return;
1457}
1458
1459
1460static int
1461em_82547_tx_fifo_reset(struct adapter *adapter)
1462{
1463	uint32_t tctl;
1464
1465	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1466	      E1000_READ_REG(&adapter->hw, TDH)) &&
1467	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1468	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1469	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1470	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1471	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1472
1473		/* Disable TX unit */
1474		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1475		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1476
1477		/* Reset FIFO pointers */
1478		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1479		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1480		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1481		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1482
1483		/* Re-enable TX unit */
1484		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1485		E1000_WRITE_FLUSH(&adapter->hw);
1486
1487		adapter->tx_fifo_head = 0;
1488		adapter->tx_fifo_reset_cnt++;
1489
1490		return(TRUE);
1491	}
1492	else {
1493		return(FALSE);
1494	}
1495}
1496
1497static void
1498em_set_promisc(struct adapter * adapter)
1499{
1500
1501	u_int32_t       reg_rctl;
1502	struct ifnet   *ifp = adapter->ifp;
1503
1504	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1505
1506	if (ifp->if_flags & IFF_PROMISC) {
1507		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1508		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1509		/* Disable VLAN stripping in promiscous mode
1510		 * This enables bridging of vlan tagged frames to occur
1511		 * and also allows vlan tags to be seen in tcpdump
1512		 */
1513		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1514			em_disable_vlans(adapter);
1515		adapter->em_insert_vlan_header = 1;
1516	} else if (ifp->if_flags & IFF_ALLMULTI) {
1517		reg_rctl |= E1000_RCTL_MPE;
1518		reg_rctl &= ~E1000_RCTL_UPE;
1519		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1520		adapter->em_insert_vlan_header = 0;
1521	} else
1522		adapter->em_insert_vlan_header = 0;
1523
1524	return;
1525}
1526
1527static void
1528em_disable_promisc(struct adapter * adapter)
1529{
1530	u_int32_t       reg_rctl;
1531	struct ifnet   *ifp = adapter->ifp;
1532
1533	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1534
1535	reg_rctl &=  (~E1000_RCTL_UPE);
1536	reg_rctl &=  (~E1000_RCTL_MPE);
1537	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1538
1539	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1540		em_enable_vlans(adapter);
1541	adapter->em_insert_vlan_header = 0;
1542
1543	return;
1544}
1545
1546
1547/*********************************************************************
1548 *  Multicast Update
1549 *
1550 *  This routine is called whenever multicast address list is updated.
1551 *
1552 **********************************************************************/
1553
1554static void
1555em_set_multi(struct adapter * adapter)
1556{
1557        u_int32_t reg_rctl = 0;
1558        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1559        struct ifmultiaddr  *ifma;
1560        int mcnt = 0;
1561        struct ifnet   *ifp = adapter->ifp;
1562
1563        IOCTL_DEBUGOUT("em_set_multi: begin");
1564
1565        if (adapter->hw.mac_type == em_82542_rev2_0) {
1566                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1567                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1568                        em_pci_clear_mwi(&adapter->hw);
1569                }
1570                reg_rctl |= E1000_RCTL_RST;
1571                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1572                msec_delay(5);
1573        }
1574
1575	IF_ADDR_LOCK(ifp);
1576        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1577                if (ifma->ifma_addr->sa_family != AF_LINK)
1578                        continue;
1579
1580		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1581
1582                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1583                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1584                mcnt++;
1585        }
1586	IF_ADDR_UNLOCK(ifp);
1587
1588        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1589                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1590                reg_rctl |= E1000_RCTL_MPE;
1591                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1592        } else
1593                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1594
1595        if (adapter->hw.mac_type == em_82542_rev2_0) {
1596                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1597                reg_rctl &= ~E1000_RCTL_RST;
1598                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1599                msec_delay(5);
1600                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1601                        em_pci_set_mwi(&adapter->hw);
1602                }
1603        }
1604
1605        return;
1606}
1607
1608
1609/*********************************************************************
1610 *  Timer routine
1611 *
1612 *  This routine checks for link status and updates statistics.
1613 *
1614 **********************************************************************/
1615
1616static void
1617em_local_timer(void *arg)
1618{
1619	struct ifnet   *ifp;
1620	struct adapter * adapter = arg;
1621	ifp = adapter->ifp;
1622
1623	EM_LOCK(adapter);
1624
1625	em_check_for_link(&adapter->hw);
1626	em_print_link_status(adapter);
1627	em_update_stats_counters(adapter);
1628	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1629		em_print_hw_stats(adapter);
1630	}
1631	em_smartspeed(adapter);
1632
1633	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1634
1635	EM_UNLOCK(adapter);
1636	return;
1637}
1638
1639static void
1640em_print_link_status(struct adapter * adapter)
1641{
1642	struct ifnet *ifp = adapter->ifp;
1643
1644	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1645		if (adapter->link_active == 0) {
1646			em_get_speed_and_duplex(&adapter->hw,
1647						&adapter->link_speed,
1648						&adapter->link_duplex);
1649			if (bootverbose)
1650				printf("em%d: Link is up %d Mbps %s\n",
1651				       adapter->unit,
1652				       adapter->link_speed,
1653				       ((adapter->link_duplex == FULL_DUPLEX) ?
1654					"Full Duplex" : "Half Duplex"));
1655			adapter->link_active = 1;
1656			adapter->smartspeed = 0;
1657			if_link_state_change(ifp, LINK_STATE_UP);
1658		}
1659	} else {
1660		if (adapter->link_active == 1) {
1661			adapter->link_speed = 0;
1662			adapter->link_duplex = 0;
1663			if (bootverbose)
1664				printf("em%d: Link is Down\n", adapter->unit);
1665			adapter->link_active = 0;
1666			if_link_state_change(ifp, LINK_STATE_DOWN);
1667		}
1668	}
1669
1670	return;
1671}
1672
1673/*********************************************************************
1674 *
1675 *  This routine disables all traffic on the adapter by issuing a
1676 *  global reset on the MAC and deallocates TX/RX buffers.
1677 *
1678 **********************************************************************/
1679
1680static void
1681em_stop(void *arg)
1682{
1683	struct ifnet   *ifp;
1684	struct adapter * adapter = arg;
1685	ifp = adapter->ifp;
1686
1687	mtx_assert(&adapter->mtx, MA_OWNED);
1688
1689	INIT_DEBUGOUT("em_stop: begin");
1690
1691	em_disable_intr(adapter);
1692	em_reset_hw(&adapter->hw);
1693	callout_stop(&adapter->timer);
1694	callout_stop(&adapter->tx_fifo_timer);
1695	em_free_transmit_structures(adapter);
1696	em_free_receive_structures(adapter);
1697
1698
1699	/* Tell the stack that the interface is no longer active */
1700	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1701
1702	return;
1703}
1704
1705
1706/*********************************************************************
1707 *
1708 *  Determine hardware revision.
1709 *
1710 **********************************************************************/
1711static void
1712em_identify_hardware(struct adapter * adapter)
1713{
1714	device_t dev = adapter->dev;
1715
1716	/* Make sure our PCI config space has the necessary stuff set */
1717	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1718	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1719	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1720		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1721		       adapter->unit);
1722		adapter->hw.pci_cmd_word |=
1723		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1724		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1725	}
1726
1727	/* Save off the information about this board */
1728	adapter->hw.vendor_id = pci_get_vendor(dev);
1729	adapter->hw.device_id = pci_get_device(dev);
1730	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1731	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1732	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1733
1734	/* Identify the MAC */
1735        if (em_set_mac_type(&adapter->hw))
1736                printf("em%d: Unknown MAC Type\n", adapter->unit);
1737
1738	if(adapter->hw.mac_type == em_82541 ||
1739	   adapter->hw.mac_type == em_82541_rev_2 ||
1740	   adapter->hw.mac_type == em_82547 ||
1741	   adapter->hw.mac_type == em_82547_rev_2)
1742		adapter->hw.phy_init_script = TRUE;
1743
1744        return;
1745}
1746
1747static int
1748em_allocate_pci_resources(struct adapter * adapter)
1749{
1750	int             val, rid;
1751	device_t        dev = adapter->dev;
1752
1753	rid = PCIR_BAR(0);
1754	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1755						     &rid, RF_ACTIVE);
1756	if (!(adapter->res_memory)) {
1757		printf("em%d: Unable to allocate bus resource: memory\n",
1758		       adapter->unit);
1759		return(ENXIO);
1760	}
1761	adapter->osdep.mem_bus_space_tag =
1762	rman_get_bustag(adapter->res_memory);
1763	adapter->osdep.mem_bus_space_handle =
1764	rman_get_bushandle(adapter->res_memory);
1765	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1766
1767
1768	if (adapter->hw.mac_type > em_82543) {
1769		/* Figure our where our IO BAR is ? */
1770		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1771			val = pci_read_config(dev, rid, 4);
1772			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1773				adapter->io_rid = rid;
1774				break;
1775			}
1776			rid += 4;
1777			/* check for 64bit BAR */
1778			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1779				rid += 4;
1780		}
1781		if (rid >= PCIR_CIS) {
1782			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1783			return (ENXIO);
1784		}
1785		adapter->res_ioport = bus_alloc_resource_any(dev,
1786							     SYS_RES_IOPORT,
1787							     &adapter->io_rid,
1788							     RF_ACTIVE);
1789		if (!(adapter->res_ioport)) {
1790			printf("em%d: Unable to allocate bus resource: ioport\n",
1791			       adapter->unit);
1792			return(ENXIO);
1793		}
1794		adapter->hw.io_base = 0;
1795		adapter->osdep.io_bus_space_tag =
1796		    rman_get_bustag(adapter->res_ioport);
1797		adapter->osdep.io_bus_space_handle =
1798		    rman_get_bushandle(adapter->res_ioport);
1799	}
1800
1801	rid = 0x0;
1802	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1803						        RF_SHAREABLE |
1804							RF_ACTIVE);
1805	if (!(adapter->res_interrupt)) {
1806		printf("em%d: Unable to allocate bus resource: interrupt\n",
1807		       adapter->unit);
1808		return(ENXIO);
1809	}
1810	if (bus_setup_intr(dev, adapter->res_interrupt,
1811			   INTR_TYPE_NET | INTR_MPSAFE,
1812			   (void (*)(void *)) em_intr, adapter,
1813			   &adapter->int_handler_tag)) {
1814		printf("em%d: Error registering interrupt handler!\n",
1815		       adapter->unit);
1816		return(ENXIO);
1817	}
1818
1819	adapter->hw.back = &adapter->osdep;
1820
1821	return(0);
1822}
1823
1824static void
1825em_free_pci_resources(struct adapter * adapter)
1826{
1827	device_t dev = adapter->dev;
1828
1829	if (adapter->res_interrupt != NULL) {
1830		bus_teardown_intr(dev, adapter->res_interrupt,
1831				  adapter->int_handler_tag);
1832		bus_release_resource(dev, SYS_RES_IRQ, 0,
1833				     adapter->res_interrupt);
1834	}
1835	if (adapter->res_memory != NULL) {
1836		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1837				     adapter->res_memory);
1838	}
1839
1840	if (adapter->res_ioport != NULL) {
1841		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1842				     adapter->res_ioport);
1843	}
1844	return;
1845}
1846
1847/*********************************************************************
1848 *
1849 *  Initialize the hardware to a configuration as specified by the
1850 *  adapter structure. The controller is reset, the EEPROM is
1851 *  verified, the MAC address is set, then the shared initialization
1852 *  routines are called.
1853 *
1854 **********************************************************************/
1855static int
1856em_hardware_init(struct adapter * adapter)
1857{
1858        INIT_DEBUGOUT("em_hardware_init: begin");
1859	/* Issue a global reset */
1860	em_reset_hw(&adapter->hw);
1861
1862	/* When hardware is reset, fifo_head is also reset */
1863	adapter->tx_fifo_head = 0;
1864
1865	/* Make sure we have a good EEPROM before we read from it */
1866	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1867		printf("em%d: The EEPROM Checksum Is Not Valid\n",
1868		       adapter->unit);
1869		return(EIO);
1870	}
1871
1872	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1873		printf("em%d: EEPROM read error while reading part number\n",
1874		       adapter->unit);
1875		return(EIO);
1876	}
1877
1878	if (em_init_hw(&adapter->hw) < 0) {
1879		printf("em%d: Hardware Initialization Failed",
1880		       adapter->unit);
1881		return(EIO);
1882	}
1883
1884	em_check_for_link(&adapter->hw);
1885	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1886		adapter->link_active = 1;
1887	else
1888		adapter->link_active = 0;
1889
1890	if (adapter->link_active) {
1891		em_get_speed_and_duplex(&adapter->hw,
1892					&adapter->link_speed,
1893					&adapter->link_duplex);
1894	} else {
1895		adapter->link_speed = 0;
1896		adapter->link_duplex = 0;
1897	}
1898
1899	return(0);
1900}
1901
1902/*********************************************************************
1903 *
1904 *  Setup networking device structure and register an interface.
1905 *
1906 **********************************************************************/
1907static void
1908em_setup_interface(device_t dev, struct adapter * adapter)
1909{
1910	struct ifnet   *ifp;
1911	INIT_DEBUGOUT("em_setup_interface: begin");
1912
1913	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1914	if (ifp == NULL)
1915		panic("%s: can not if_alloc()", device_get_nameunit(dev));
1916	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1917	ifp->if_mtu = ETHERMTU;
1918	ifp->if_baudrate = 1000000000;
1919	ifp->if_init =  em_init;
1920	ifp->if_softc = adapter;
1921	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1922	ifp->if_ioctl = em_ioctl;
1923	ifp->if_start = em_start;
1924	ifp->if_watchdog = em_watchdog;
1925	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
1926	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
1927	IFQ_SET_READY(&ifp->if_snd);
1928
1929        ether_ifattach(ifp, adapter->hw.mac_addr);
1930
1931	ifp->if_capabilities = ifp->if_capenable = 0;
1932
1933	if (adapter->hw.mac_type >= em_82543) {
1934		ifp->if_capabilities |= IFCAP_HWCSUM;
1935		ifp->if_capenable |= IFCAP_HWCSUM;
1936	}
1937
1938	/*
1939	 * Tell the upper layer(s) we support long frames.
1940	 */
1941	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1942	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1943	ifp->if_capenable |= IFCAP_VLAN_MTU;
1944
1945#ifdef DEVICE_POLLING
1946	ifp->if_capabilities |= IFCAP_POLLING;
1947#endif
1948
1949	/*
1950	 * Specify the media types supported by this adapter and register
1951	 * callbacks to update media and link information
1952	 */
1953	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1954		     em_media_status);
1955	if (adapter->hw.media_type == em_media_type_fiber) {
1956		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1957			    0, NULL);
1958		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1959			    0, NULL);
1960	} else {
1961		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1962		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1963			    0, NULL);
1964		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1965			    0, NULL);
1966		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1967			    0, NULL);
1968		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1969			    0, NULL);
1970		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1971	}
1972	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1973	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1974
1975	return;
1976}
1977
1978
1979/*********************************************************************
1980 *
1981 *  Workaround for SmartSpeed on 82541 and 82547 controllers
1982 *
1983 **********************************************************************/
1984static void
1985em_smartspeed(struct adapter *adapter)
1986{
1987        uint16_t phy_tmp;
1988
1989	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1990	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1991		return;
1992
1993        if(adapter->smartspeed == 0) {
1994                /* If Master/Slave config fault is asserted twice,
1995                 * we assume back-to-back */
1996                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1997                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
1998                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1999                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2000                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2001					&phy_tmp);
2002                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2003                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2004                                em_write_phy_reg(&adapter->hw,
2005                                                    PHY_1000T_CTRL, phy_tmp);
2006                                adapter->smartspeed++;
2007                                if(adapter->hw.autoneg &&
2008                                   !em_phy_setup_autoneg(&adapter->hw) &&
2009				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2010                                                       &phy_tmp)) {
2011                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2012                                                    MII_CR_RESTART_AUTO_NEG);
2013                                        em_write_phy_reg(&adapter->hw,
2014							 PHY_CTRL, phy_tmp);
2015                                }
2016                        }
2017                }
2018                return;
2019        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2020                /* If still no link, perhaps using 2/3 pair cable */
2021                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2022                phy_tmp |= CR_1000T_MS_ENABLE;
2023                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2024                if(adapter->hw.autoneg &&
2025                   !em_phy_setup_autoneg(&adapter->hw) &&
2026                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2027                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2028                                    MII_CR_RESTART_AUTO_NEG);
2029                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2030                }
2031        }
2032        /* Restart process after EM_SMARTSPEED_MAX iterations */
2033        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2034                adapter->smartspeed = 0;
2035
2036	return;
2037}
2038
2039
2040/*
2041 * Manage DMA'able memory.
2042 */
2043static void
2044em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2045{
2046        if (error)
2047                return;
2048        *(bus_addr_t*) arg = segs->ds_addr;
2049        return;
2050}
2051
2052static int
2053em_dma_malloc(struct adapter *adapter, bus_size_t size,
2054        struct em_dma_alloc *dma, int mapflags)
2055{
2056        int r;
2057
2058        r = bus_dma_tag_create(NULL,                    /* parent */
2059                               PAGE_SIZE, 0,            /* alignment, bounds */
2060                               BUS_SPACE_MAXADDR,       /* lowaddr */
2061                               BUS_SPACE_MAXADDR,       /* highaddr */
2062                               NULL, NULL,              /* filter, filterarg */
2063                               size,                    /* maxsize */
2064                               1,                       /* nsegments */
2065                               size,                    /* maxsegsize */
2066                               BUS_DMA_ALLOCNOW,        /* flags */
2067			       NULL,			/* lockfunc */
2068			       NULL,			/* lockarg */
2069                               &dma->dma_tag);
2070        if (r != 0) {
2071                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2072                        "error %u\n", adapter->unit, r);
2073                goto fail_0;
2074        }
2075
2076        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2077                             BUS_DMA_NOWAIT, &dma->dma_map);
2078        if (r != 0) {
2079                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2080                        "size %ju, error %d\n", adapter->unit,
2081			(uintmax_t)size, r);
2082                goto fail_2;
2083        }
2084
2085	dma->dma_paddr = 0;
2086        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2087                            size,
2088                            em_dmamap_cb,
2089                            &dma->dma_paddr,
2090                            mapflags | BUS_DMA_NOWAIT);
2091        if (r != 0 || dma->dma_paddr == 0) {
2092                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2093                        "error %u\n", adapter->unit, r);
2094                goto fail_3;
2095        }
2096
2097        return (0);
2098
2099fail_3:
2100        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2101fail_2:
2102        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2103        bus_dma_tag_destroy(dma->dma_tag);
2104fail_0:
2105        dma->dma_map = NULL;
2106        dma->dma_tag = NULL;
2107        return (r);
2108}
2109
2110static void
2111em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2112{
2113        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2114        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2115        bus_dma_tag_destroy(dma->dma_tag);
2116}
2117
2118
2119/*********************************************************************
2120 *
2121 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2122 *  the information needed to transmit a packet on the wire.
2123 *
2124 **********************************************************************/
2125static int
2126em_allocate_transmit_structures(struct adapter * adapter)
2127{
2128	if (!(adapter->tx_buffer_area =
2129	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2130					     adapter->num_tx_desc, M_DEVBUF,
2131					     M_NOWAIT))) {
2132		printf("em%d: Unable to allocate tx_buffer memory\n",
2133		       adapter->unit);
2134		return ENOMEM;
2135	}
2136
2137	bzero(adapter->tx_buffer_area,
2138	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2139
2140	return 0;
2141}
2142
2143/*********************************************************************
2144 *
2145 *  Allocate and initialize transmit structures.
2146 *
2147 **********************************************************************/
2148static int
2149em_setup_transmit_structures(struct adapter * adapter)
2150{
2151        /*
2152         * Setup DMA descriptor areas.
2153         */
2154        if (bus_dma_tag_create(NULL,                    /* parent */
2155                               1, 0,                    /* alignment, bounds */
2156                               BUS_SPACE_MAXADDR,       /* lowaddr */
2157                               BUS_SPACE_MAXADDR,       /* highaddr */
2158                               NULL, NULL,              /* filter, filterarg */
2159                               MCLBYTES * 8,            /* maxsize */
2160                               EM_MAX_SCATTER,          /* nsegments */
2161                               MCLBYTES * 8,            /* maxsegsize */
2162                               BUS_DMA_ALLOCNOW,        /* flags */
2163			       NULL,			/* lockfunc */
2164			       NULL,			/* lockarg */
2165                               &adapter->txtag)) {
2166                printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2167                return (ENOMEM);
2168        }
2169
2170        if (em_allocate_transmit_structures(adapter))
2171                return (ENOMEM);
2172
2173        bzero((void *) adapter->tx_desc_base,
2174              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2175
2176        adapter->next_avail_tx_desc = 0;
2177        adapter->oldest_used_tx_desc = 0;
2178
2179        /* Set number of descriptors available */
2180        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2181
2182        /* Set checksum context */
2183        adapter->active_checksum_context = OFFLOAD_NONE;
2184
2185        return (0);
2186}
2187
2188/*********************************************************************
2189 *
2190 *  Enable transmit unit.
2191 *
2192 **********************************************************************/
2193static void
2194em_initialize_transmit_unit(struct adapter * adapter)
2195{
2196	u_int32_t       reg_tctl;
2197	u_int32_t       reg_tipg = 0;
2198	u_int64_t	bus_addr;
2199
2200         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2201	/* Setup the Base and Length of the Tx Descriptor Ring */
2202	bus_addr = adapter->txdma.dma_paddr;
2203	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2204	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2205	E1000_WRITE_REG(&adapter->hw, TDLEN,
2206			adapter->num_tx_desc *
2207			sizeof(struct em_tx_desc));
2208
2209	/* Setup the HW Tx Head and Tail descriptor pointers */
2210	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2211	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2212
2213
2214	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2215		     E1000_READ_REG(&adapter->hw, TDBAL),
2216		     E1000_READ_REG(&adapter->hw, TDLEN));
2217
2218	/* Set the default values for the Tx Inter Packet Gap timer */
2219	switch (adapter->hw.mac_type) {
2220	case em_82542_rev2_0:
2221        case em_82542_rev2_1:
2222                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2223                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2224                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2225                break;
2226        default:
2227                if (adapter->hw.media_type == em_media_type_fiber)
2228                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2229                else
2230                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2231                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2232                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2233        }
2234
2235	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2236	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2237	if(adapter->hw.mac_type >= em_82540)
2238		E1000_WRITE_REG(&adapter->hw, TADV,
2239		    adapter->tx_abs_int_delay.value);
2240
2241	/* Program the Transmit Control Register */
2242	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2243		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2244	if (adapter->hw.mac_type >= em_82573)
2245		reg_tctl |= E1000_TCTL_MULR;
2246	if (adapter->link_duplex == 1) {
2247		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2248	} else {
2249		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2250	}
2251	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2252
2253	/* Setup Transmit Descriptor Settings for this adapter */
2254	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2255
2256	if (adapter->tx_int_delay.value > 0)
2257		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2258
2259	return;
2260}
2261
2262/*********************************************************************
2263 *
2264 *  Free all transmit related data structures.
2265 *
2266 **********************************************************************/
2267static void
2268em_free_transmit_structures(struct adapter * adapter)
2269{
2270        struct em_buffer   *tx_buffer;
2271        int             i;
2272
2273        INIT_DEBUGOUT("free_transmit_structures: begin");
2274
2275        if (adapter->tx_buffer_area != NULL) {
2276                tx_buffer = adapter->tx_buffer_area;
2277                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2278                        if (tx_buffer->m_head != NULL) {
2279                                bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2280                                bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2281                                m_freem(tx_buffer->m_head);
2282                        }
2283                        tx_buffer->m_head = NULL;
2284                }
2285        }
2286        if (adapter->tx_buffer_area != NULL) {
2287                free(adapter->tx_buffer_area, M_DEVBUF);
2288                adapter->tx_buffer_area = NULL;
2289        }
2290        if (adapter->txtag != NULL) {
2291                bus_dma_tag_destroy(adapter->txtag);
2292                adapter->txtag = NULL;
2293        }
2294        return;
2295}
2296
2297/*********************************************************************
2298 *
2299 *  The offload context needs to be set when we transfer the first
2300 *  packet of a particular protocol (TCP/UDP). We change the
2301 *  context only if the protocol type changes.
2302 *
2303 **********************************************************************/
2304static void
2305em_transmit_checksum_setup(struct adapter * adapter,
2306			   struct mbuf *mp,
2307			   u_int32_t *txd_upper,
2308			   u_int32_t *txd_lower)
2309{
2310	struct em_context_desc *TXD;
2311	struct em_buffer *tx_buffer;
2312	int curr_txd;
2313
2314	if (mp->m_pkthdr.csum_flags) {
2315
2316		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2317			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2318			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2319			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2320				return;
2321			else
2322				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2323
2324		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2325			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2326			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2327			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2328				return;
2329			else
2330				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2331		} else {
2332			*txd_upper = 0;
2333			*txd_lower = 0;
2334			return;
2335		}
2336	} else {
2337		*txd_upper = 0;
2338		*txd_lower = 0;
2339		return;
2340	}
2341
2342	/* If we reach this point, the checksum offload context
2343	 * needs to be reset.
2344	 */
2345	curr_txd = adapter->next_avail_tx_desc;
2346	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2347	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2348
2349	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2350	TXD->lower_setup.ip_fields.ipcso =
2351		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2352	TXD->lower_setup.ip_fields.ipcse =
2353		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2354
2355	TXD->upper_setup.tcp_fields.tucss =
2356		ETHER_HDR_LEN + sizeof(struct ip);
2357	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2358
2359	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2360		TXD->upper_setup.tcp_fields.tucso =
2361			ETHER_HDR_LEN + sizeof(struct ip) +
2362			offsetof(struct tcphdr, th_sum);
2363	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2364		TXD->upper_setup.tcp_fields.tucso =
2365			ETHER_HDR_LEN + sizeof(struct ip) +
2366			offsetof(struct udphdr, uh_sum);
2367	}
2368
2369	TXD->tcp_seg_setup.data = htole32(0);
2370	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2371
2372	tx_buffer->m_head = NULL;
2373
2374	if (++curr_txd == adapter->num_tx_desc)
2375		curr_txd = 0;
2376
2377	adapter->num_tx_desc_avail--;
2378	adapter->next_avail_tx_desc = curr_txd;
2379
2380	return;
2381}
2382
2383/**********************************************************************
2384 *
2385 *  Examine each tx_buffer in the used queue. If the hardware is done
2386 *  processing the packet then free associated resources. The
2387 *  tx_buffer is put back on the free queue.
2388 *
2389 **********************************************************************/
2390static void
2391em_clean_transmit_interrupts(struct adapter * adapter)
2392{
2393        int i, num_avail;
2394        struct em_buffer *tx_buffer;
2395        struct em_tx_desc   *tx_desc;
2396	struct ifnet   *ifp = adapter->ifp;
2397
2398	mtx_assert(&adapter->mtx, MA_OWNED);
2399
2400        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2401                return;
2402
2403        num_avail = adapter->num_tx_desc_avail;
2404        i = adapter->oldest_used_tx_desc;
2405
2406        tx_buffer = &adapter->tx_buffer_area[i];
2407        tx_desc = &adapter->tx_desc_base[i];
2408
2409        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2410            BUS_DMASYNC_POSTREAD);
2411        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2412
2413                tx_desc->upper.data = 0;
2414                num_avail++;
2415
2416                if (tx_buffer->m_head) {
2417			ifp->if_opackets++;
2418                        bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2419                        bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2420
2421                        m_freem(tx_buffer->m_head);
2422                        tx_buffer->m_head = NULL;
2423                }
2424
2425                if (++i == adapter->num_tx_desc)
2426                        i = 0;
2427
2428                tx_buffer = &adapter->tx_buffer_area[i];
2429                tx_desc = &adapter->tx_desc_base[i];
2430        }
2431        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2432            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2433
2434        adapter->oldest_used_tx_desc = i;
2435
2436        /*
2437         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2438         * that it is OK to send packets.
2439         * If there are no pending descriptors, clear the timeout. Otherwise,
2440         * if some descriptors have been freed, restart the timeout.
2441         */
2442        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2443                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2444                if (num_avail == adapter->num_tx_desc)
2445                        ifp->if_timer = 0;
2446                else if (num_avail == adapter->num_tx_desc_avail)
2447                        ifp->if_timer = EM_TX_TIMEOUT;
2448        }
2449        adapter->num_tx_desc_avail = num_avail;
2450        return;
2451}
2452
2453/*********************************************************************
2454 *
2455 *  Get a buffer from system mbuf buffer pool.
2456 *
2457 **********************************************************************/
2458static int
2459em_get_buf(int i, struct adapter *adapter,
2460           struct mbuf *nmp)
2461{
2462        register struct mbuf    *mp = nmp;
2463        struct em_buffer *rx_buffer;
2464        struct ifnet   *ifp;
2465        bus_addr_t paddr;
2466        int error;
2467
2468        ifp = adapter->ifp;
2469
2470        if (mp == NULL) {
2471                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2472                if (mp == NULL) {
2473                        adapter->mbuf_cluster_failed++;
2474                        return(ENOBUFS);
2475                }
2476                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2477        } else {
2478                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2479                mp->m_data = mp->m_ext.ext_buf;
2480                mp->m_next = NULL;
2481        }
2482
2483        if (ifp->if_mtu <= ETHERMTU) {
2484                m_adj(mp, ETHER_ALIGN);
2485        }
2486
2487        rx_buffer = &adapter->rx_buffer_area[i];
2488
2489        /*
2490         * Using memory from the mbuf cluster pool, invoke the
2491         * bus_dma machinery to arrange the memory mapping.
2492         */
2493	paddr = 0;
2494        error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2495                                mtod(mp, void *), mp->m_len,
2496                                em_dmamap_cb, &paddr, 0);
2497        if (error || paddr == 0) {
2498                m_free(mp);
2499                return(error);
2500        }
2501        rx_buffer->m_head = mp;
2502        adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2503        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD |
2504	    BUS_DMASYNC_PREWRITE);
2505
2506        return(0);
2507}
2508
2509/*********************************************************************
2510 *
2511 *  Allocate memory for rx_buffer structures. Since we use one
2512 *  rx_buffer per received packet, the maximum number of rx_buffer's
2513 *  that we'll need is equal to the number of receive descriptors
2514 *  that we've allocated.
2515 *
2516 **********************************************************************/
2517static int
2518em_allocate_receive_structures(struct adapter * adapter)
2519{
2520        int             i, error;
2521        struct em_buffer *rx_buffer;
2522
2523        if (!(adapter->rx_buffer_area =
2524              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2525                                          adapter->num_rx_desc, M_DEVBUF,
2526                                          M_NOWAIT))) {
2527                printf("em%d: Unable to allocate rx_buffer memory\n",
2528                       adapter->unit);
2529                return(ENOMEM);
2530        }
2531
2532        bzero(adapter->rx_buffer_area,
2533              sizeof(struct em_buffer) * adapter->num_rx_desc);
2534
2535        error = bus_dma_tag_create(NULL,                /* parent */
2536                               1, 0,                    /* alignment, bounds */
2537                               BUS_SPACE_MAXADDR,       /* lowaddr */
2538                               BUS_SPACE_MAXADDR,       /* highaddr */
2539                               NULL, NULL,              /* filter, filterarg */
2540                               MCLBYTES,                /* maxsize */
2541                               1,                       /* nsegments */
2542                               MCLBYTES,                /* maxsegsize */
2543                               BUS_DMA_ALLOCNOW,        /* flags */
2544			       NULL,			/* lockfunc */
2545			       NULL,			/* lockarg */
2546                               &adapter->rxtag);
2547        if (error != 0) {
2548                printf("em%d: em_allocate_receive_structures: "
2549                        "bus_dma_tag_create failed; error %u\n",
2550                       adapter->unit, error);
2551                goto fail_0;
2552        }
2553
2554        rx_buffer = adapter->rx_buffer_area;
2555        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2556                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2557                                          &rx_buffer->map);
2558                if (error != 0) {
2559                        printf("em%d: em_allocate_receive_structures: "
2560                                "bus_dmamap_create failed; error %u\n",
2561                                adapter->unit, error);
2562                        goto fail_1;
2563                }
2564        }
2565
2566        for (i = 0; i < adapter->num_rx_desc; i++) {
2567                error = em_get_buf(i, adapter, NULL);
2568                if (error != 0) {
2569                        adapter->rx_buffer_area[i].m_head = NULL;
2570                        adapter->rx_desc_base[i].buffer_addr = 0;
2571                        return(error);
2572                }
2573        }
2574        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2575            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2576
2577        return(0);
2578
2579fail_1:
2580        bus_dma_tag_destroy(adapter->rxtag);
2581fail_0:
2582        adapter->rxtag = NULL;
2583        free(adapter->rx_buffer_area, M_DEVBUF);
2584        adapter->rx_buffer_area = NULL;
2585        return (error);
2586}
2587
2588/*********************************************************************
2589 *
2590 *  Allocate and initialize receive structures.
2591 *
2592 **********************************************************************/
2593static int
2594em_setup_receive_structures(struct adapter * adapter)
2595{
2596	bzero((void *) adapter->rx_desc_base,
2597              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2598
2599	if (em_allocate_receive_structures(adapter))
2600		return ENOMEM;
2601
2602	/* Setup our descriptor pointers */
2603        adapter->next_rx_desc_to_check = 0;
2604	return(0);
2605}
2606
2607/*********************************************************************
2608 *
2609 *  Enable receive unit.
2610 *
2611 **********************************************************************/
2612static void
2613em_initialize_receive_unit(struct adapter * adapter)
2614{
2615	u_int32_t       reg_rctl;
2616	u_int32_t       reg_rxcsum;
2617	struct ifnet    *ifp;
2618	u_int64_t	bus_addr;
2619
2620        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2621	ifp = adapter->ifp;
2622
2623	/* Make sure receives are disabled while setting up the descriptor ring */
2624	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2625
2626	/* Set the Receive Delay Timer Register */
2627	E1000_WRITE_REG(&adapter->hw, RDTR,
2628			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2629
2630	if(adapter->hw.mac_type >= em_82540) {
2631		E1000_WRITE_REG(&adapter->hw, RADV,
2632		    adapter->rx_abs_int_delay.value);
2633
2634                /* Set the interrupt throttling rate.  Value is calculated
2635                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2636#define MAX_INTS_PER_SEC        8000
2637#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2638                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2639        }
2640
2641	/* Setup the Base and Length of the Rx Descriptor Ring */
2642	bus_addr = adapter->rxdma.dma_paddr;
2643	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2644	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2645	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2646			sizeof(struct em_rx_desc));
2647
2648	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2649	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2650	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2651
2652	/* Setup the Receive Control Register */
2653	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2654		   E1000_RCTL_RDMTS_HALF |
2655		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2656
2657	if (adapter->hw.tbi_compatibility_on == TRUE)
2658		reg_rctl |= E1000_RCTL_SBP;
2659
2660
2661	switch (adapter->rx_buffer_len) {
2662	default:
2663	case EM_RXBUFFER_2048:
2664		reg_rctl |= E1000_RCTL_SZ_2048;
2665		break;
2666	case EM_RXBUFFER_4096:
2667		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2668		break;
2669	case EM_RXBUFFER_8192:
2670		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2671		break;
2672	case EM_RXBUFFER_16384:
2673		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2674		break;
2675	}
2676
2677	if (ifp->if_mtu > ETHERMTU)
2678		reg_rctl |= E1000_RCTL_LPE;
2679
2680	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2681	if ((adapter->hw.mac_type >= em_82543) &&
2682	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2683		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2684		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2685		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2686	}
2687
2688	/* Enable Receives */
2689	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2690
2691	return;
2692}
2693
2694/*********************************************************************
2695 *
2696 *  Free receive related data structures.
2697 *
2698 **********************************************************************/
2699static void
2700em_free_receive_structures(struct adapter *adapter)
2701{
2702        struct em_buffer   *rx_buffer;
2703        int             i;
2704
2705        INIT_DEBUGOUT("free_receive_structures: begin");
2706
2707        if (adapter->rx_buffer_area != NULL) {
2708                rx_buffer = adapter->rx_buffer_area;
2709                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2710                        if (rx_buffer->map != NULL) {
2711                                bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2712                                bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2713                        }
2714                        if (rx_buffer->m_head != NULL)
2715                                m_freem(rx_buffer->m_head);
2716                        rx_buffer->m_head = NULL;
2717                }
2718        }
2719        if (adapter->rx_buffer_area != NULL) {
2720                free(adapter->rx_buffer_area, M_DEVBUF);
2721                adapter->rx_buffer_area = NULL;
2722        }
2723        if (adapter->rxtag != NULL) {
2724                bus_dma_tag_destroy(adapter->rxtag);
2725                adapter->rxtag = NULL;
2726        }
2727        return;
2728}
2729
2730/*********************************************************************
2731 *
2732 *  This routine executes in interrupt context. It replenishes
2733 *  the mbufs in the descriptor and sends data which has been
2734 *  dma'ed into host memory to upper layer.
2735 *
2736 *  We loop at most count times if count is > 0, or until done if
2737 *  count < 0.
2738 *
2739 *********************************************************************/
2740static void
2741em_process_receive_interrupts(struct adapter * adapter, int count)
2742{
2743	struct ifnet        *ifp;
2744	struct mbuf         *mp;
2745	u_int8_t            accept_frame = 0;
2746 	u_int8_t            eop = 0;
2747	u_int16_t           len, desc_len, prev_len_adj;
2748	int                 i;
2749
2750	/* Pointer to the receive descriptor being examined. */
2751	struct em_rx_desc   *current_desc;
2752
2753	mtx_assert(&adapter->mtx, MA_OWNED);
2754
2755	ifp = adapter->ifp;
2756	i = adapter->next_rx_desc_to_check;
2757        current_desc = &adapter->rx_desc_base[i];
2758	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2759	    BUS_DMASYNC_POSTREAD);
2760
2761	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2762		return;
2763	}
2764
2765	while ((current_desc->status & E1000_RXD_STAT_DD) &&
2766		    (count != 0) &&
2767		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2768		struct mbuf *m = NULL;
2769
2770		mp = adapter->rx_buffer_area[i].m_head;
2771		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2772				BUS_DMASYNC_POSTREAD);
2773
2774		accept_frame = 1;
2775		prev_len_adj = 0;
2776                desc_len = le16toh(current_desc->length);
2777		if (current_desc->status & E1000_RXD_STAT_EOP) {
2778			count--;
2779			eop = 1;
2780			if (desc_len < ETHER_CRC_LEN) {
2781                                len = 0;
2782                                prev_len_adj = ETHER_CRC_LEN - desc_len;
2783                        }
2784                        else {
2785                                len = desc_len - ETHER_CRC_LEN;
2786                        }
2787		} else {
2788			eop = 0;
2789			len = desc_len;
2790		}
2791
2792		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2793			u_int8_t            last_byte;
2794			u_int32_t           pkt_len = desc_len;
2795
2796			if (adapter->fmp != NULL)
2797				pkt_len += adapter->fmp->m_pkthdr.len;
2798
2799			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2800
2801			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2802				       current_desc->errors,
2803				       pkt_len, last_byte)) {
2804				em_tbi_adjust_stats(&adapter->hw,
2805						    &adapter->stats,
2806						    pkt_len,
2807						    adapter->hw.mac_addr);
2808				if (len > 0) len--;
2809			}
2810			else {
2811				accept_frame = 0;
2812			}
2813		}
2814
2815		if (accept_frame) {
2816
2817			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2818				adapter->dropped_pkts++;
2819				em_get_buf(i, adapter, mp);
2820				if (adapter->fmp != NULL)
2821					m_freem(adapter->fmp);
2822				adapter->fmp = NULL;
2823				adapter->lmp = NULL;
2824				break;
2825			}
2826
2827			/* Assign correct length to the current fragment */
2828			mp->m_len = len;
2829
2830			if (adapter->fmp == NULL) {
2831				mp->m_pkthdr.len = len;
2832				adapter->fmp = mp;	 /* Store the first mbuf */
2833				adapter->lmp = mp;
2834			} else {
2835				/* Chain mbuf's together */
2836				mp->m_flags &= ~M_PKTHDR;
2837				/*
2838                                 * Adjust length of previous mbuf in chain if we
2839                                 * received less than 4 bytes in the last descriptor.
2840                                 */
2841				if (prev_len_adj > 0) {
2842					adapter->lmp->m_len -= prev_len_adj;
2843					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2844				}
2845				adapter->lmp->m_next = mp;
2846				adapter->lmp = adapter->lmp->m_next;
2847				adapter->fmp->m_pkthdr.len += len;
2848			}
2849
2850                        if (eop) {
2851                                adapter->fmp->m_pkthdr.rcvif = ifp;
2852				ifp->if_ipackets++;
2853                                em_receive_checksum(adapter, current_desc,
2854                                                    adapter->fmp);
2855                                if (current_desc->status & E1000_RXD_STAT_VP)
2856					VLAN_INPUT_TAG(ifp, adapter->fmp,
2857					    (le16toh(current_desc->special) &
2858					    E1000_RXD_SPC_VLAN_MASK),
2859					    adapter->fmp = NULL);
2860
2861				m = adapter->fmp;
2862				adapter->fmp = NULL;
2863				adapter->lmp = NULL;
2864                        }
2865		} else {
2866			adapter->dropped_pkts++;
2867			em_get_buf(i, adapter, mp);
2868			if (adapter->fmp != NULL)
2869				m_freem(adapter->fmp);
2870			adapter->fmp = NULL;
2871			adapter->lmp = NULL;
2872		}
2873
2874		/* Zero out the receive descriptors status  */
2875		current_desc->status = 0;
2876		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2877		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2878
2879		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2880                E1000_WRITE_REG(&adapter->hw, RDT, i);
2881
2882                /* Advance our pointers to the next descriptor */
2883		if (++i == adapter->num_rx_desc)
2884			i = 0;
2885		if (m != NULL) {
2886			adapter->next_rx_desc_to_check = i;
2887			EM_UNLOCK(adapter);
2888			(*ifp->if_input)(ifp, m);
2889			EM_LOCK(adapter);
2890			i = adapter->next_rx_desc_to_check;
2891		}
2892		current_desc = &adapter->rx_desc_base[i];
2893	}
2894	adapter->next_rx_desc_to_check = i;
2895	return;
2896}
2897
2898/*********************************************************************
2899 *
2900 *  Verify that the hardware indicated that the checksum is valid.
2901 *  Inform the stack about the status of checksum so that stack
2902 *  doesn't spend time verifying the checksum.
2903 *
2904 *********************************************************************/
2905static void
2906em_receive_checksum(struct adapter *adapter,
2907		    struct em_rx_desc *rx_desc,
2908		    struct mbuf *mp)
2909{
2910	/* 82543 or newer only */
2911	if ((adapter->hw.mac_type < em_82543) ||
2912	    /* Ignore Checksum bit is set */
2913	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2914		mp->m_pkthdr.csum_flags = 0;
2915		return;
2916	}
2917
2918	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2919		/* Did it pass? */
2920		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2921			/* IP Checksum Good */
2922			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2923			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2924
2925		} else {
2926			mp->m_pkthdr.csum_flags = 0;
2927		}
2928	}
2929
2930	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2931		/* Did it pass? */
2932		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2933			mp->m_pkthdr.csum_flags |=
2934			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2935			mp->m_pkthdr.csum_data = htons(0xffff);
2936		}
2937	}
2938
2939	return;
2940}
2941
2942
2943static void
2944em_enable_vlans(struct adapter *adapter)
2945{
2946	uint32_t ctrl;
2947
2948	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2949
2950	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2951	ctrl |= E1000_CTRL_VME;
2952	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2953
2954	return;
2955}
2956
2957static void
2958em_disable_vlans(struct adapter *adapter)
2959{
2960	uint32_t ctrl;
2961
2962	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2963	ctrl &= ~E1000_CTRL_VME;
2964	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2965
2966	return;
2967}
2968
2969static void
2970em_enable_intr(struct adapter * adapter)
2971{
2972	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2973	return;
2974}
2975
2976static void
2977em_disable_intr(struct adapter *adapter)
2978{
2979	/*
2980	 * The first version of 82542 had an errata where when link was forced it
2981	 * would stay up even up even if the cable was disconnected.  Sequence errors
2982	 * were used to detect the disconnect and then the driver would unforce the link.
2983	 * This code in the in the ISR.  For this to work correctly the Sequence error
2984	 * interrupt had to be enabled all the time.
2985	 */
2986
2987	if (adapter->hw.mac_type == em_82542_rev2_0)
2988	    E1000_WRITE_REG(&adapter->hw, IMC,
2989	        (0xffffffff & ~E1000_IMC_RXSEQ));
2990	else
2991	    E1000_WRITE_REG(&adapter->hw, IMC,
2992	        0xffffffff);
2993	return;
2994}
2995
2996static int
2997em_is_valid_ether_addr(u_int8_t *addr)
2998{
2999        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3000
3001        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3002                return (FALSE);
3003        }
3004
3005        return(TRUE);
3006}
3007
3008void
3009em_write_pci_cfg(struct em_hw *hw,
3010		      uint32_t reg,
3011		      uint16_t *value)
3012{
3013	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3014			 *value, 2);
3015}
3016
3017void
3018em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3019		     uint16_t *value)
3020{
3021	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3022				 reg, 2);
3023	return;
3024}
3025
3026void
3027em_pci_set_mwi(struct em_hw *hw)
3028{
3029        pci_write_config(((struct em_osdep *)hw->back)->dev,
3030                         PCIR_COMMAND,
3031                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3032        return;
3033}
3034
3035void
3036em_pci_clear_mwi(struct em_hw *hw)
3037{
3038        pci_write_config(((struct em_osdep *)hw->back)->dev,
3039                         PCIR_COMMAND,
3040                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3041        return;
3042}
3043
3044/*********************************************************************
3045* 82544 Coexistence issue workaround.
3046*    There are 2 issues.
3047*       1. Transmit Hang issue.
3048*    To detect this issue, following equation can be used...
3049*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3050*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3051*
3052*       2. DAC issue.
3053*    To detect this issue, following equation can be used...
3054*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3055*          If SUM[3:0] is in between 9 to c, we will have this issue.
3056*
3057*
3058*    WORKAROUND:
3059*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3060*
3061*** *********************************************************************/
3062static u_int32_t
3063em_fill_descriptors (bus_addr_t address,
3064                              u_int32_t length,
3065                              PDESC_ARRAY desc_array)
3066{
3067        /* Since issue is sensitive to length and address.*/
3068        /* Let us first check the address...*/
3069        u_int32_t safe_terminator;
3070        if (length <= 4) {
3071                desc_array->descriptor[0].address = address;
3072                desc_array->descriptor[0].length = length;
3073                desc_array->elements = 1;
3074                return desc_array->elements;
3075        }
3076        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3077        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3078        if (safe_terminator == 0   ||
3079        (safe_terminator > 4   &&
3080        safe_terminator < 9)   ||
3081        (safe_terminator > 0xC &&
3082        safe_terminator <= 0xF)) {
3083                desc_array->descriptor[0].address = address;
3084                desc_array->descriptor[0].length = length;
3085                desc_array->elements = 1;
3086                return desc_array->elements;
3087        }
3088
3089        desc_array->descriptor[0].address = address;
3090        desc_array->descriptor[0].length = length - 4;
3091        desc_array->descriptor[1].address = address + (length - 4);
3092        desc_array->descriptor[1].length = 4;
3093        desc_array->elements = 2;
3094        return desc_array->elements;
3095}
3096
3097/**********************************************************************
3098 *
3099 *  Update the board statistics counters.
3100 *
3101 **********************************************************************/
3102static void
3103em_update_stats_counters(struct adapter *adapter)
3104{
3105	struct ifnet   *ifp;
3106
3107	if(adapter->hw.media_type == em_media_type_copper ||
3108	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3109		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3110		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3111	}
3112	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3113	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3114	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3115	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3116
3117	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3118	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3119	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3120	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3121	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3122	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3123	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3124	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3125	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3126	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3127	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3128	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3129	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3130	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3131	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3132	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3133	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3134	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3135	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3136	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3137
3138	/* For the 64-bit byte counters the low dword must be read first. */
3139	/* Both registers clear on the read of the high dword */
3140
3141	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3142	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3143	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3144	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3145
3146	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3147	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3148	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3149	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3150	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3151
3152	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3153	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3154	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3155	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3156
3157	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3158	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3159	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3160	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3161	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3162	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3163	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3164	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3165	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3166	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3167
3168	if (adapter->hw.mac_type >= em_82543) {
3169		adapter->stats.algnerrc +=
3170		E1000_READ_REG(&adapter->hw, ALGNERRC);
3171		adapter->stats.rxerrc +=
3172		E1000_READ_REG(&adapter->hw, RXERRC);
3173		adapter->stats.tncrs +=
3174		E1000_READ_REG(&adapter->hw, TNCRS);
3175		adapter->stats.cexterr +=
3176		E1000_READ_REG(&adapter->hw, CEXTERR);
3177		adapter->stats.tsctc +=
3178		E1000_READ_REG(&adapter->hw, TSCTC);
3179		adapter->stats.tsctfc +=
3180		E1000_READ_REG(&adapter->hw, TSCTFC);
3181	}
3182	ifp = adapter->ifp;
3183
3184	ifp->if_collisions = adapter->stats.colc;
3185
3186	/* Rx Errors */
3187	ifp->if_ierrors =
3188	adapter->dropped_pkts +
3189	adapter->stats.rxerrc +
3190	adapter->stats.crcerrs +
3191	adapter->stats.algnerrc +
3192	adapter->stats.rlec +
3193	adapter->stats.mpc + adapter->stats.cexterr;
3194
3195	/* Tx Errors */
3196	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3197	    adapter->watchdog_events;
3198
3199}
3200
3201
3202/**********************************************************************
3203 *
3204 *  This routine is called only when em_display_debug_stats is enabled.
3205 *  This routine provides a way to take a look at important statistics
3206 *  maintained by the driver and hardware.
3207 *
3208 **********************************************************************/
3209static void
3210em_print_debug_info(struct adapter *adapter)
3211{
3212        int unit = adapter->unit;
3213	uint8_t *hw_addr = adapter->hw.hw_addr;
3214
3215	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3216	printf("em%d:CTRL  = 0x%x\n", unit,
3217		E1000_READ_REG(&adapter->hw, CTRL));
3218	printf("em%d:RCTL  = 0x%x PS=(0x8402)\n", unit,
3219		E1000_READ_REG(&adapter->hw, RCTL));
3220	printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3221              E1000_READ_REG(&adapter->hw, TIDV),
3222	      E1000_READ_REG(&adapter->hw, TADV));
3223	printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3224              E1000_READ_REG(&adapter->hw, RDTR),
3225	      E1000_READ_REG(&adapter->hw, RADV));
3226        printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit,
3227               (long long)adapter->tx_fifo_wrk_cnt,
3228               (long long)adapter->tx_fifo_reset_cnt);
3229        printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3230               E1000_READ_REG(&adapter->hw, TDH),
3231               E1000_READ_REG(&adapter->hw, TDT));
3232        printf("em%d: Num Tx descriptors avail = %d\n", unit,
3233               adapter->num_tx_desc_avail);
3234        printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3235               adapter->no_tx_desc_avail1);
3236        printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3237               adapter->no_tx_desc_avail2);
3238        printf("em%d: Std mbuf failed = %ld\n", unit,
3239               adapter->mbuf_alloc_failed);
3240        printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3241               adapter->mbuf_cluster_failed);
3242        printf("em%d: Driver dropped packets = %ld\n", unit,
3243               adapter->dropped_pkts);
3244
3245        return;
3246}
3247
3248static void
3249em_print_hw_stats(struct adapter *adapter)
3250{
3251        int unit = adapter->unit;
3252
3253        printf("em%d: Excessive collisions = %lld\n", unit,
3254               (long long)adapter->stats.ecol);
3255        printf("em%d: Symbol errors = %lld\n", unit,
3256               (long long)adapter->stats.symerrs);
3257        printf("em%d: Sequence errors = %lld\n", unit,
3258               (long long)adapter->stats.sec);
3259        printf("em%d: Defer count = %lld\n", unit,
3260               (long long)adapter->stats.dc);
3261
3262        printf("em%d: Missed Packets = %lld\n", unit,
3263               (long long)adapter->stats.mpc);
3264        printf("em%d: Receive No Buffers = %lld\n", unit,
3265               (long long)adapter->stats.rnbc);
3266        printf("em%d: Receive length errors = %lld\n", unit,
3267               (long long)adapter->stats.rlec);
3268        printf("em%d: Receive errors = %lld\n", unit,
3269               (long long)adapter->stats.rxerrc);
3270        printf("em%d: Crc errors = %lld\n", unit,
3271               (long long)adapter->stats.crcerrs);
3272        printf("em%d: Alignment errors = %lld\n", unit,
3273               (long long)adapter->stats.algnerrc);
3274        printf("em%d: Carrier extension errors = %lld\n", unit,
3275               (long long)adapter->stats.cexterr);
3276	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3277	printf("em%d: watchdog timeouts = %ld\n", unit,
3278		adapter->watchdog_events);
3279
3280        printf("em%d: XON Rcvd = %lld\n", unit,
3281               (long long)adapter->stats.xonrxc);
3282        printf("em%d: XON Xmtd = %lld\n", unit,
3283               (long long)adapter->stats.xontxc);
3284        printf("em%d: XOFF Rcvd = %lld\n", unit,
3285               (long long)adapter->stats.xoffrxc);
3286        printf("em%d: XOFF Xmtd = %lld\n", unit,
3287               (long long)adapter->stats.xofftxc);
3288
3289        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3290               (long long)adapter->stats.gprc);
3291        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3292               (long long)adapter->stats.gptc);
3293
3294        return;
3295}
3296
3297static int
3298em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3299{
3300        int error;
3301        int result;
3302        struct adapter *adapter;
3303
3304        result = -1;
3305        error = sysctl_handle_int(oidp, &result, 0, req);
3306
3307        if (error || !req->newptr)
3308                return (error);
3309
3310        if (result == 1) {
3311                adapter = (struct adapter *)arg1;
3312                em_print_debug_info(adapter);
3313        }
3314
3315        return error;
3316}
3317
3318
3319static int
3320em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3321{
3322        int error;
3323        int result;
3324        struct adapter *adapter;
3325
3326        result = -1;
3327        error = sysctl_handle_int(oidp, &result, 0, req);
3328
3329        if (error || !req->newptr)
3330                return (error);
3331
3332        if (result == 1) {
3333                adapter = (struct adapter *)arg1;
3334                em_print_hw_stats(adapter);
3335        }
3336
3337        return error;
3338}
3339
3340static int
3341em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3342{
3343	struct em_int_delay_info *info;
3344	struct adapter *adapter;
3345	u_int32_t regval;
3346	int error;
3347	int usecs;
3348	int ticks;
3349
3350	info = (struct em_int_delay_info *)arg1;
3351	usecs = info->value;
3352	error = sysctl_handle_int(oidp, &usecs, 0, req);
3353	if (error != 0 || req->newptr == NULL)
3354		return error;
3355	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3356		return EINVAL;
3357	info->value = usecs;
3358	ticks = E1000_USECS_TO_TICKS(usecs);
3359
3360	adapter = info->adapter;
3361
3362	EM_LOCK(adapter);
3363	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3364	regval = (regval & ~0xffff) | (ticks & 0xffff);
3365	/* Handle a few special cases. */
3366	switch (info->offset) {
3367	case E1000_RDTR:
3368	case E1000_82542_RDTR:
3369		regval |= E1000_RDT_FPDB;
3370		break;
3371	case E1000_TIDV:
3372	case E1000_82542_TIDV:
3373		if (ticks == 0) {
3374			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3375			/* Don't write 0 into the TIDV register. */
3376			regval++;
3377		} else
3378			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3379		break;
3380	}
3381	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3382	EM_UNLOCK(adapter);
3383	return 0;
3384}
3385
3386static void
3387em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3388    const char *description, struct em_int_delay_info *info,
3389    int offset, int value)
3390{
3391	info->adapter = adapter;
3392	info->offset = offset;
3393	info->value = value;
3394	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3395	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3396	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3397	    info, 0, em_sysctl_int_delay, "I", description);
3398}
3399