if_em.c revision 151466
1211963Sbrian/**************************************************************************
2211963Sbrian
3211963SbrianCopyright (c) 2001-2005, Intel Corporation
4211963SbrianAll rights reserved.
5211963Sbrian
6211963SbrianRedistribution and use in source and binary forms, with or without
7211963Sbrianmodification, are permitted provided that the following conditions are met:
8211963Sbrian
9211963Sbrian 1. Redistributions of source code must retain the above copyright notice,
10211963Sbrian    this list of conditions and the following disclaimer.
11211963Sbrian
12211963Sbrian 2. Redistributions in binary form must reproduce the above copyright
13211963Sbrian    notice, this list of conditions and the following disclaimer in the
14211963Sbrian    documentation and/or other materials provided with the distribution.
15211963Sbrian
16211963Sbrian 3. Neither the name of the Intel Corporation nor the names of its
17211963Sbrian    contributors may be used to endorse or promote products derived from
18211963Sbrian    this software without specific prior written permission.
19211963Sbrian
20211963SbrianTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21211963SbrianAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22211963SbrianIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23211963SbrianARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24211963SbrianLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25211963SbrianCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26211963SbrianSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27211963SbrianINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28211963SbrianCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29211963SbrianARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30211963SbrianPOSSIBILITY OF SUCH DAMAGE.
31211963Sbrian
32211963Sbrian***************************************************************************/
33211963Sbrian
34211963Sbrian/*$FreeBSD: head/sys/dev/em/if_em.c 151466 2005-10-19 13:34:48Z glebius $*/
35211963Sbrian
36211963Sbrian#ifdef HAVE_KERNEL_OPTION_HEADERS
37211963Sbrian#include "opt_device_polling.h"
38211963Sbrian#endif
39211963Sbrian
40211963Sbrian#include <dev/em/if_em.h>
41211963Sbrian
42211963Sbrian/*********************************************************************
43211963Sbrian *  Set this to one to display debug statistics
44211963Sbrian *********************************************************************/
45211963Sbrianint             em_display_debug_stats = 0;
46211963Sbrian
47211963Sbrian/*********************************************************************
48211963Sbrian *  Linked list of board private structures for all NICs found
49211963Sbrian *********************************************************************/
50211963Sbrian
51211963Sbrianstruct adapter *em_adapter_list = NULL;
52211963Sbrian
53211963Sbrian
54211963Sbrian/*********************************************************************
55211963Sbrian *  Driver version
56211963Sbrian *********************************************************************/
57211963Sbrian
58211963Sbrianchar em_driver_version[] = "2.1.7";
59211963Sbrian
60211963Sbrian
61211963Sbrian/*********************************************************************
62211963Sbrian *  PCI Device ID Table
63211963Sbrian *
64211963Sbrian *  Used by probe to select devices to load on
65211963Sbrian *  Last field stores an index into em_strings
66211963Sbrian *  Last entry must be all 0s
67211963Sbrian *
68211963Sbrian *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
69211963Sbrian *********************************************************************/
70211963Sbrian
71211963Sbrianstatic em_vendor_info_t em_vendor_info_array[] =
72211963Sbrian{
73211963Sbrian        /* Intel(R) PRO/1000 Network Connection */
74211963Sbrian        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
75211963Sbrian        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
76211963Sbrian        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
77211963Sbrian        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
78211963Sbrian        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
79211963Sbrian
80211963Sbrian        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
81211963Sbrian        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
82211963Sbrian        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
83211963Sbrian        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
84211963Sbrian        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
85211963Sbrian        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
86211963Sbrian        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
87211963Sbrian
88211963Sbrian        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
89211963Sbrian
90211963Sbrian        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
91211963Sbrian        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92
93        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
96        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
97
98        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
99        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
100        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103
104        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
105        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
106        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
107        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
108        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
109        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
110        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
111        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
112
113        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
114        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
115        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
116
117        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
118        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
119
120        /* required last entry */
121        { 0, 0, 0, 0, 0}
122};
123
124/*********************************************************************
125 *  Table of branding strings for all supported NICs.
126 *********************************************************************/
127
128static char *em_strings[] = {
129	"Intel(R) PRO/1000 Network Connection"
130};
131
132/*********************************************************************
133 *  Function prototypes
134 *********************************************************************/
135static int  em_probe(device_t);
136static int  em_attach(device_t);
137static int  em_detach(device_t);
138static int  em_shutdown(device_t);
139static void em_intr(void *);
140static void em_start(struct ifnet *);
141static int  em_ioctl(struct ifnet *, u_long, caddr_t);
142static void em_watchdog(struct ifnet *);
143static void em_init(void *);
144static void em_init_locked(struct adapter *);
145static void em_stop(void *);
146static void em_media_status(struct ifnet *, struct ifmediareq *);
147static int  em_media_change(struct ifnet *);
148static void em_identify_hardware(struct adapter *);
149static int  em_allocate_pci_resources(struct adapter *);
150static void em_free_pci_resources(struct adapter *);
151static void em_local_timer(void *);
152static int  em_hardware_init(struct adapter *);
153static void em_setup_interface(device_t, struct adapter *);
154static int  em_setup_transmit_structures(struct adapter *);
155static void em_initialize_transmit_unit(struct adapter *);
156static int  em_setup_receive_structures(struct adapter *);
157static void em_initialize_receive_unit(struct adapter *);
158static void em_enable_intr(struct adapter *);
159static void em_disable_intr(struct adapter *);
160static void em_free_transmit_structures(struct adapter *);
161static void em_free_receive_structures(struct adapter *);
162static void em_update_stats_counters(struct adapter *);
163static void em_clean_transmit_interrupts(struct adapter *);
164static int  em_allocate_receive_structures(struct adapter *);
165static int  em_allocate_transmit_structures(struct adapter *);
166static void em_process_receive_interrupts(struct adapter *, int);
167static void em_receive_checksum(struct adapter *,
168				struct em_rx_desc *,
169				struct mbuf *);
170static void em_transmit_checksum_setup(struct adapter *,
171				       struct mbuf *,
172				       u_int32_t *,
173				       u_int32_t *);
174static void em_set_promisc(struct adapter *);
175static void em_disable_promisc(struct adapter *);
176static void em_set_multi(struct adapter *);
177static void em_print_hw_stats(struct adapter *);
178static void em_print_link_status(struct adapter *);
179static int  em_get_buf(int i, struct adapter *,
180		       struct mbuf *);
181static void em_enable_vlans(struct adapter *);
182static void em_disable_vlans(struct adapter *);
183static int  em_encap(struct adapter *, struct mbuf **);
184static void em_smartspeed(struct adapter *);
185static int  em_82547_fifo_workaround(struct adapter *, int);
186static void em_82547_update_fifo_head(struct adapter *, int);
187static int  em_82547_tx_fifo_reset(struct adapter *);
188static void em_82547_move_tail(void *arg);
189static void em_82547_move_tail_locked(struct adapter *);
190static int  em_dma_malloc(struct adapter *, bus_size_t,
191			  struct em_dma_alloc *, int);
192static void em_dma_free(struct adapter *, struct em_dma_alloc *);
193static void em_print_debug_info(struct adapter *);
194static int  em_is_valid_ether_addr(u_int8_t *);
195static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
196static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
197static u_int32_t em_fill_descriptors (u_int64_t address,
198				      u_int32_t length,
199				      PDESC_ARRAY desc_array);
200static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
201static void em_add_int_delay_sysctl(struct adapter *, const char *,
202				    const char *, struct em_int_delay_info *,
203				    int, int);
204#ifdef DEVICE_POLLING
205static poll_handler_t em_poll;
206#endif
207
208/*********************************************************************
209 *  FreeBSD Device Interface Entry Points
210 *********************************************************************/
211
212static device_method_t em_methods[] = {
213	/* Device interface */
214	DEVMETHOD(device_probe, em_probe),
215	DEVMETHOD(device_attach, em_attach),
216	DEVMETHOD(device_detach, em_detach),
217	DEVMETHOD(device_shutdown, em_shutdown),
218	{0, 0}
219};
220
221static driver_t em_driver = {
222	"em", em_methods, sizeof(struct adapter ),
223};
224
225static devclass_t em_devclass;
226DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
227MODULE_DEPEND(em, pci, 1, 1, 1);
228MODULE_DEPEND(em, ether, 1, 1, 1);
229
230/*********************************************************************
231 *  Tunable default values.
232 *********************************************************************/
233
234#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
235#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
236
237static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
238static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
239static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
240static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
241
242TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
243TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
244TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
245TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
246
247/*********************************************************************
248 *  Device identification routine
249 *
250 *  em_probe determines if the driver should be loaded on
251 *  adapter based on PCI vendor/device id of the adapter.
252 *
253 *  return BUS_PROBE_DEFAULT on success, positive on failure
254 *********************************************************************/
255
256static int
257em_probe(device_t dev)
258{
259	em_vendor_info_t *ent;
260
261	u_int16_t       pci_vendor_id = 0;
262	u_int16_t       pci_device_id = 0;
263	u_int16_t       pci_subvendor_id = 0;
264	u_int16_t       pci_subdevice_id = 0;
265	char            adapter_name[60];
266
267	INIT_DEBUGOUT("em_probe: begin");
268
269	pci_vendor_id = pci_get_vendor(dev);
270	if (pci_vendor_id != EM_VENDOR_ID)
271		return(ENXIO);
272
273	pci_device_id = pci_get_device(dev);
274	pci_subvendor_id = pci_get_subvendor(dev);
275	pci_subdevice_id = pci_get_subdevice(dev);
276
277	ent = em_vendor_info_array;
278	while (ent->vendor_id != 0) {
279		if ((pci_vendor_id == ent->vendor_id) &&
280		    (pci_device_id == ent->device_id) &&
281
282		    ((pci_subvendor_id == ent->subvendor_id) ||
283		     (ent->subvendor_id == PCI_ANY_ID)) &&
284
285		    ((pci_subdevice_id == ent->subdevice_id) ||
286		     (ent->subdevice_id == PCI_ANY_ID))) {
287			sprintf(adapter_name, "%s, Version - %s",
288				em_strings[ent->index],
289				em_driver_version);
290			device_set_desc_copy(dev, adapter_name);
291			return(BUS_PROBE_DEFAULT);
292		}
293		ent++;
294	}
295
296	return(ENXIO);
297}
298
299/*********************************************************************
300 *  Device initialization routine
301 *
302 *  The attach entry point is called when the driver is being loaded.
303 *  This routine identifies the type of hardware, allocates all resources
304 *  and initializes the hardware.
305 *
306 *  return 0 on success, positive on failure
307 *********************************************************************/
308
309static int
310em_attach(device_t dev)
311{
312	struct adapter * adapter;
313	int             tsize, rsize;
314	int		error = 0;
315
316	INIT_DEBUGOUT("em_attach: begin");
317
318	/* Allocate, clear, and link in our adapter structure */
319	if (!(adapter = device_get_softc(dev))) {
320		printf("em: adapter structure allocation failed\n");
321		return(ENOMEM);
322	}
323	bzero(adapter, sizeof(struct adapter ));
324	adapter->dev = dev;
325	adapter->osdep.dev = dev;
326	adapter->unit = device_get_unit(dev);
327	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
328
329	if (em_adapter_list != NULL)
330		em_adapter_list->prev = adapter;
331	adapter->next = em_adapter_list;
332	em_adapter_list = adapter;
333
334	/* SYSCTL stuff */
335        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
336                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
337                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
338                        (void *)adapter, 0,
339                        em_sysctl_debug_info, "I", "Debug Information");
340
341        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
342                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
343                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
344                        (void *)adapter, 0,
345                        em_sysctl_stats, "I", "Statistics");
346
347	callout_init(&adapter->timer, CALLOUT_MPSAFE);
348	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
349
350	/* Determine hardware revision */
351	em_identify_hardware(adapter);
352
353	/* Set up some sysctls for the tunable interrupt delays */
354	em_add_int_delay_sysctl(adapter, "rx_int_delay",
355	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
356	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
357	em_add_int_delay_sysctl(adapter, "tx_int_delay",
358	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
359	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
360	if (adapter->hw.mac_type >= em_82540) {
361		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
362		    "receive interrupt delay limit in usecs",
363		    &adapter->rx_abs_int_delay,
364		    E1000_REG_OFFSET(&adapter->hw, RADV),
365		    em_rx_abs_int_delay_dflt);
366		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
367		    "transmit interrupt delay limit in usecs",
368		    &adapter->tx_abs_int_delay,
369		    E1000_REG_OFFSET(&adapter->hw, TADV),
370		    em_tx_abs_int_delay_dflt);
371	}
372
373	/* Parameters (to be read from user) */
374        adapter->num_tx_desc = EM_MAX_TXD;
375        adapter->num_rx_desc = EM_MAX_RXD;
376        adapter->hw.autoneg = DO_AUTO_NEG;
377        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
378        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
379        adapter->hw.tbi_compatibility_en = TRUE;
380        adapter->rx_buffer_len = EM_RXBUFFER_2048;
381
382	/*
383         * These parameters control the automatic generation(Tx) and
384         * response(Rx) to Ethernet PAUSE frames.
385         */
386        adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
387        adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
388        adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
389        adapter->hw.fc_send_xon   = TRUE;
390        adapter->hw.fc = em_fc_full;
391
392	adapter->hw.phy_init_script = 1;
393	adapter->hw.phy_reset_disable = FALSE;
394
395#ifndef EM_MASTER_SLAVE
396	adapter->hw.master_slave = em_ms_hw_default;
397#else
398	adapter->hw.master_slave = EM_MASTER_SLAVE;
399#endif
400	/*
401	 * Set the max frame size assuming standard ethernet
402	 * sized frames
403	 */
404	adapter->hw.max_frame_size =
405		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
406
407	adapter->hw.min_frame_size =
408		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
409
410	/*
411	 * This controls when hardware reports transmit completion
412	 * status.
413	 */
414	adapter->hw.report_tx_early = 1;
415
416
417	if (em_allocate_pci_resources(adapter)) {
418		printf("em%d: Allocation of PCI resources failed\n",
419		       adapter->unit);
420                error = ENXIO;
421                goto err_pci;
422	}
423
424
425	/* Initialize eeprom parameters */
426        em_init_eeprom_params(&adapter->hw);
427
428	tsize = EM_ROUNDUP(adapter->num_tx_desc *
429			   sizeof(struct em_tx_desc), 4096);
430
431	/* Allocate Transmit Descriptor ring */
432        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
433                printf("em%d: Unable to allocate tx_desc memory\n",
434                       adapter->unit);
435		error = ENOMEM;
436                goto err_tx_desc;
437        }
438        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
439
440	rsize = EM_ROUNDUP(adapter->num_rx_desc *
441			   sizeof(struct em_rx_desc), 4096);
442
443	/* Allocate Receive Descriptor ring */
444        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
445                printf("em%d: Unable to allocate rx_desc memory\n",
446                        adapter->unit);
447		error = ENOMEM;
448                goto err_rx_desc;
449        }
450        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
451
452	/* Initialize the hardware */
453	if (em_hardware_init(adapter)) {
454		printf("em%d: Unable to initialize the hardware\n",
455		       adapter->unit);
456		error = EIO;
457                goto err_hw_init;
458	}
459
460	/* Copy the permanent MAC address out of the EEPROM */
461	if (em_read_mac_addr(&adapter->hw) < 0) {
462		printf("em%d: EEPROM read error while reading mac address\n",
463		       adapter->unit);
464		error = EIO;
465                goto err_mac_addr;
466	}
467
468	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
469                printf("em%d: Invalid mac address\n", adapter->unit);
470                error = EIO;
471                goto err_mac_addr;
472        }
473
474	/* Setup OS specific network interface */
475	em_setup_interface(dev, adapter);
476
477	/* Initialize statistics */
478	em_clear_hw_cntrs(&adapter->hw);
479	em_update_stats_counters(adapter);
480	adapter->hw.get_link_status = 1;
481	em_check_for_link(&adapter->hw);
482
483	/* Print the link status */
484	if (adapter->link_active == 1) {
485		em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
486					&adapter->link_duplex);
487		printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
488		       adapter->unit,
489		       adapter->link_speed,
490		       adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
491	} else
492		printf("em%d:  Speed:N/A  Duplex:N/A\n", adapter->unit);
493
494	/* Identify 82544 on PCIX */
495        em_get_bus_info(&adapter->hw);
496        if(adapter->hw.bus_type == em_bus_type_pcix &&
497           adapter->hw.mac_type == em_82544) {
498                adapter->pcix_82544 = TRUE;
499        }
500        else {
501                adapter->pcix_82544 = FALSE;
502        }
503	INIT_DEBUGOUT("em_attach: end");
504	return(0);
505
506err_mac_addr:
507err_hw_init:
508        em_dma_free(adapter, &adapter->rxdma);
509err_rx_desc:
510        em_dma_free(adapter, &adapter->txdma);
511err_tx_desc:
512err_pci:
513        em_free_pci_resources(adapter);
514        return(error);
515
516}
517
518/*********************************************************************
519 *  Device removal routine
520 *
521 *  The detach entry point is called when the driver is being removed.
522 *  This routine stops the adapter and deallocates all the resources
523 *  that were allocated for driver operation.
524 *
525 *  return 0 on success, positive on failure
526 *********************************************************************/
527
528static int
529em_detach(device_t dev)
530{
531	struct adapter * adapter = device_get_softc(dev);
532	struct ifnet   *ifp = adapter->ifp;
533
534	INIT_DEBUGOUT("em_detach: begin");
535
536#ifdef DEVICE_POLLING
537	if (ifp->if_capenable & IFCAP_POLLING)
538		ether_poll_deregister(ifp);
539#endif
540
541	EM_LOCK(adapter);
542	adapter->in_detach = 1;
543	em_stop(adapter);
544	em_phy_hw_reset(&adapter->hw);
545	EM_UNLOCK(adapter);
546        ether_ifdetach(adapter->ifp);
547
548	em_free_pci_resources(adapter);
549	bus_generic_detach(dev);
550	if_free(ifp);
551
552	/* Free Transmit Descriptor ring */
553        if (adapter->tx_desc_base) {
554                em_dma_free(adapter, &adapter->txdma);
555                adapter->tx_desc_base = NULL;
556        }
557
558        /* Free Receive Descriptor ring */
559        if (adapter->rx_desc_base) {
560                em_dma_free(adapter, &adapter->rxdma);
561                adapter->rx_desc_base = NULL;
562        }
563
564	/* Remove from the adapter list */
565	if (em_adapter_list == adapter)
566		em_adapter_list = adapter->next;
567	if (adapter->next != NULL)
568		adapter->next->prev = adapter->prev;
569	if (adapter->prev != NULL)
570		adapter->prev->next = adapter->next;
571
572	EM_LOCK_DESTROY(adapter);
573
574	return(0);
575}
576
577/*********************************************************************
578 *
579 *  Shutdown entry point
580 *
581 **********************************************************************/
582
583static int
584em_shutdown(device_t dev)
585{
586	struct adapter *adapter = device_get_softc(dev);
587	EM_LOCK(adapter);
588	em_stop(adapter);
589	EM_UNLOCK(adapter);
590	return(0);
591}
592
593
594/*********************************************************************
595 *  Transmit entry point
596 *
597 *  em_start is called by the stack to initiate a transmit.
598 *  The driver will remain in this routine as long as there are
599 *  packets to transmit and transmit resources are available.
600 *  In case resources are not available stack is notified and
601 *  the packet is requeued.
602 **********************************************************************/
603
604static void
605em_start_locked(struct ifnet *ifp)
606{
607        struct mbuf    *m_head;
608        struct adapter *adapter = ifp->if_softc;
609
610	mtx_assert(&adapter->mtx, MA_OWNED);
611
612        if (!adapter->link_active)
613                return;
614
615        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
616
617                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
618
619                if (m_head == NULL) break;
620
621		/*
622		 * em_encap() can modify our pointer, and or make it NULL on
623		 * failure.  In that event, we can't requeue.
624		 */
625		if (em_encap(adapter, &m_head)) {
626			if (m_head == NULL)
627				break;
628			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
629			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
630			break;
631                }
632
633		/* Send a copy of the frame to the BPF listener */
634		BPF_MTAP(ifp, m_head);
635
636                /* Set timeout in case hardware has problems transmitting */
637                ifp->if_timer = EM_TX_TIMEOUT;
638
639        }
640        return;
641}
642
643static void
644em_start(struct ifnet *ifp)
645{
646	struct adapter *adapter = ifp->if_softc;
647
648	EM_LOCK(adapter);
649	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
650		em_start_locked(ifp);
651	EM_UNLOCK(adapter);
652	return;
653}
654
655/*********************************************************************
656 *  Ioctl entry point
657 *
658 *  em_ioctl is called when the user wants to configure the
659 *  interface.
660 *
661 *  return 0 on success, positive on failure
662 **********************************************************************/
663
664static int
665em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
666{
667	int             mask, reinit, error = 0;
668	struct ifreq   *ifr = (struct ifreq *) data;
669	struct adapter * adapter = ifp->if_softc;
670
671	if (adapter->in_detach) return(error);
672
673	switch (command) {
674	case SIOCSIFADDR:
675	case SIOCGIFADDR:
676		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
677		ether_ioctl(ifp, command, data);
678		break;
679	case SIOCSIFMTU:
680		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
681		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN || \
682			/* 82573 does not support jumbo frames */
683			(adapter->hw.mac_type == em_82573 && ifr->ifr_mtu > ETHERMTU) ) {
684			error = EINVAL;
685		} else {
686			EM_LOCK(adapter);
687			ifp->if_mtu = ifr->ifr_mtu;
688			adapter->hw.max_frame_size =
689			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
690			em_init_locked(adapter);
691			EM_UNLOCK(adapter);
692		}
693		break;
694	case SIOCSIFFLAGS:
695		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
696		EM_LOCK(adapter);
697		if (ifp->if_flags & IFF_UP) {
698			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
699				em_init_locked(adapter);
700			}
701
702			em_disable_promisc(adapter);
703			em_set_promisc(adapter);
704		} else {
705			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
706				em_stop(adapter);
707			}
708		}
709		EM_UNLOCK(adapter);
710		break;
711	case SIOCADDMULTI:
712	case SIOCDELMULTI:
713		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
714		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
715			EM_LOCK(adapter);
716			em_disable_intr(adapter);
717			em_set_multi(adapter);
718			if (adapter->hw.mac_type == em_82542_rev2_0) {
719				em_initialize_receive_unit(adapter);
720			}
721#ifdef DEVICE_POLLING
722                        if (!(ifp->if_capenable & IFCAP_POLLING))
723#endif
724				em_enable_intr(adapter);
725			EM_UNLOCK(adapter);
726		}
727		break;
728	case SIOCSIFMEDIA:
729	case SIOCGIFMEDIA:
730		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
731		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
732		break;
733	case SIOCSIFCAP:
734		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
735		reinit = 0;
736		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
737#ifdef DEVICE_POLLING
738		if (mask & IFCAP_POLLING) {
739			if (ifr->ifr_reqcap & IFCAP_POLLING) {
740				error = ether_poll_register(em_poll, ifp);
741				if (error)
742					return(error);
743				EM_LOCK(adapter);
744				em_disable_intr(adapter);
745				ifp->if_capenable |= IFCAP_POLLING;
746				EM_UNLOCK(adapter);
747			} else {
748				error = ether_poll_deregister(ifp);
749				/* Enable interrupt even in error case */
750				EM_LOCK(adapter);
751				em_enable_intr(adapter);
752				ifp->if_capenable &= ~IFCAP_POLLING;
753				EM_UNLOCK(adapter);
754			}
755		}
756#endif
757		if (mask & IFCAP_HWCSUM) {
758			ifp->if_capenable ^= IFCAP_HWCSUM;
759			reinit = 1;
760		}
761		if (mask & IFCAP_VLAN_HWTAGGING) {
762			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
763			reinit = 1;
764		}
765		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
766			em_init(adapter);
767		break;
768	default:
769		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
770		error = EINVAL;
771	}
772
773	return(error);
774}
775
776/*********************************************************************
777 *  Watchdog entry point
778 *
779 *  This routine is called whenever hardware quits transmitting.
780 *
781 **********************************************************************/
782
783static void
784em_watchdog(struct ifnet *ifp)
785{
786	struct adapter * adapter;
787	adapter = ifp->if_softc;
788
789	/* If we are in this routine because of pause frames, then
790	 * don't reset the hardware.
791	 */
792	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
793		ifp->if_timer = EM_TX_TIMEOUT;
794		return;
795	}
796
797	if (em_check_for_link(&adapter->hw))
798		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
799
800	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
801
802	em_init(adapter);
803
804	ifp->if_oerrors++;
805	return;
806}
807
808/*********************************************************************
809 *  Init entry point
810 *
811 *  This routine is used in two ways. It is used by the stack as
812 *  init entry point in network interface structure. It is also used
813 *  by the driver as a hw/sw initialization routine to get to a
814 *  consistent state.
815 *
816 *  return 0 on success, positive on failure
817 **********************************************************************/
818
819static void
820em_init_locked(struct adapter * adapter)
821{
822	struct ifnet   *ifp;
823
824	uint32_t	pba;
825	ifp = adapter->ifp;
826
827	INIT_DEBUGOUT("em_init: begin");
828
829	mtx_assert(&adapter->mtx, MA_OWNED);
830
831	em_stop(adapter);
832
833	/* Packet Buffer Allocation (PBA)
834	 * Writing PBA sets the receive portion of the buffer
835	 * the remainder is used for the transmit buffer.
836	 *
837	 * Devices before the 82547 had a Packet Buffer of 64K.
838	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
839	 * After the 82547 the buffer was reduced to 40K.
840	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
841	 *   Note: default does not leave enough room for Jumbo Frame >10k.
842	 */
843	if(adapter->hw.mac_type < em_82547) {
844		/* Total FIFO is 64K */
845		if(adapter->rx_buffer_len > EM_RXBUFFER_8192)
846			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
847		else
848			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
849	} else {
850		/* Total FIFO is 40K */
851		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) {
852			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
853		} else {
854		        pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
855		}
856		adapter->tx_fifo_head = 0;
857		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
858		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
859	}
860	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
861	E1000_WRITE_REG(&adapter->hw, PBA, pba);
862
863	/* Get the latest mac address, User can use a LAA */
864        bcopy(IFP2ENADDR(adapter->ifp), adapter->hw.mac_addr,
865              ETHER_ADDR_LEN);
866
867	/* Initialize the hardware */
868	if (em_hardware_init(adapter)) {
869		printf("em%d: Unable to initialize the hardware\n",
870		       adapter->unit);
871		return;
872	}
873
874	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
875		em_enable_vlans(adapter);
876
877	/* Prepare transmit descriptors and buffers */
878	if (em_setup_transmit_structures(adapter)) {
879		printf("em%d: Could not setup transmit structures\n",
880		       adapter->unit);
881		em_stop(adapter);
882		return;
883	}
884	em_initialize_transmit_unit(adapter);
885
886	/* Setup Multicast table */
887	em_set_multi(adapter);
888
889	/* Prepare receive descriptors and buffers */
890	if (em_setup_receive_structures(adapter)) {
891		printf("em%d: Could not setup receive structures\n",
892		       adapter->unit);
893		em_stop(adapter);
894		return;
895	}
896	em_initialize_receive_unit(adapter);
897
898	/* Don't loose promiscuous settings */
899	em_set_promisc(adapter);
900
901	ifp->if_drv_flags |= IFF_DRV_RUNNING;
902	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
903
904	if (adapter->hw.mac_type >= em_82543) {
905		if (ifp->if_capenable & IFCAP_TXCSUM)
906			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
907		else
908			ifp->if_hwassist = 0;
909	}
910
911	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
912	em_clear_hw_cntrs(&adapter->hw);
913#ifdef DEVICE_POLLING
914        /*
915         * Only enable interrupts if we are not polling, make sure
916         * they are off otherwise.
917         */
918        if (ifp->if_capenable & IFCAP_POLLING)
919                em_disable_intr(adapter);
920        else
921#endif /* DEVICE_POLLING */
922		em_enable_intr(adapter);
923
924	/* Don't reset the phy next time init gets called */
925	adapter->hw.phy_reset_disable = TRUE;
926
927	return;
928}
929
930static void
931em_init(void *arg)
932{
933	struct adapter * adapter = arg;
934
935	EM_LOCK(adapter);
936	em_init_locked(adapter);
937	EM_UNLOCK(adapter);
938	return;
939}
940
941
942#ifdef DEVICE_POLLING
943static void
944em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
945{
946        struct adapter *adapter = ifp->if_softc;
947        u_int32_t reg_icr;
948
949	mtx_assert(&adapter->mtx, MA_OWNED);
950
951        if (cmd == POLL_AND_CHECK_STATUS) {
952                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
953                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
954			callout_stop(&adapter->timer);
955                        adapter->hw.get_link_status = 1;
956                        em_check_for_link(&adapter->hw);
957                        em_print_link_status(adapter);
958			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
959                }
960        }
961	em_process_receive_interrupts(adapter, count);
962	em_clean_transmit_interrupts(adapter);
963
964        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
965                em_start_locked(ifp);
966}
967
968static void
969em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
970{
971        struct adapter *adapter = ifp->if_softc;
972
973	EM_LOCK(adapter);
974	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
975		em_poll_locked(ifp, cmd, count);
976	EM_UNLOCK(adapter);
977}
978#endif /* DEVICE_POLLING */
979
980/*********************************************************************
981 *
982 *  Interrupt Service routine
983 *
984 **********************************************************************/
985static void
986em_intr(void *arg)
987{
988        u_int32_t       loop_cnt = EM_MAX_INTR;
989        u_int32_t       reg_icr;
990        struct ifnet    *ifp;
991        struct adapter  *adapter = arg;
992
993	EM_LOCK(adapter);
994
995        ifp = adapter->ifp;
996
997#ifdef DEVICE_POLLING
998        if (ifp->if_capenable & IFCAP_POLLING) {
999		EM_UNLOCK(adapter);
1000                return;
1001	}
1002#endif /* DEVICE_POLLING */
1003
1004	reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1005        if (!reg_icr) {
1006		EM_UNLOCK(adapter);
1007                return;
1008        }
1009
1010        /* Link status change */
1011        if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1012		callout_stop(&adapter->timer);
1013                adapter->hw.get_link_status = 1;
1014                em_check_for_link(&adapter->hw);
1015                em_print_link_status(adapter);
1016		callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1017        }
1018
1019        while (loop_cnt > 0) {
1020                if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1021                        em_process_receive_interrupts(adapter, -1);
1022                        em_clean_transmit_interrupts(adapter);
1023                }
1024                loop_cnt--;
1025        }
1026
1027        if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1028	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1029                em_start_locked(ifp);
1030
1031	EM_UNLOCK(adapter);
1032        return;
1033}
1034
1035
1036
1037/*********************************************************************
1038 *
1039 *  Media Ioctl callback
1040 *
1041 *  This routine is called whenever the user queries the status of
1042 *  the interface using ifconfig.
1043 *
1044 **********************************************************************/
1045static void
1046em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1047{
1048	struct adapter * adapter = ifp->if_softc;
1049
1050	INIT_DEBUGOUT("em_media_status: begin");
1051
1052	em_check_for_link(&adapter->hw);
1053	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1054		if (adapter->link_active == 0) {
1055			em_get_speed_and_duplex(&adapter->hw,
1056						&adapter->link_speed,
1057						&adapter->link_duplex);
1058			adapter->link_active = 1;
1059		}
1060	} else {
1061		if (adapter->link_active == 1) {
1062			adapter->link_speed = 0;
1063			adapter->link_duplex = 0;
1064			adapter->link_active = 0;
1065		}
1066	}
1067
1068	ifmr->ifm_status = IFM_AVALID;
1069	ifmr->ifm_active = IFM_ETHER;
1070
1071	if (!adapter->link_active)
1072		return;
1073
1074	ifmr->ifm_status |= IFM_ACTIVE;
1075
1076	if (adapter->hw.media_type == em_media_type_fiber) {
1077		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1078	} else {
1079		switch (adapter->link_speed) {
1080		case 10:
1081			ifmr->ifm_active |= IFM_10_T;
1082			break;
1083		case 100:
1084			ifmr->ifm_active |= IFM_100_TX;
1085			break;
1086		case 1000:
1087			ifmr->ifm_active |= IFM_1000_T;
1088			break;
1089		}
1090		if (adapter->link_duplex == FULL_DUPLEX)
1091			ifmr->ifm_active |= IFM_FDX;
1092		else
1093			ifmr->ifm_active |= IFM_HDX;
1094	}
1095	return;
1096}
1097
1098/*********************************************************************
1099 *
1100 *  Media Ioctl callback
1101 *
1102 *  This routine is called when the user changes speed/duplex using
1103 *  media/mediopt option with ifconfig.
1104 *
1105 **********************************************************************/
1106static int
1107em_media_change(struct ifnet *ifp)
1108{
1109	struct adapter * adapter = ifp->if_softc;
1110	struct ifmedia  *ifm = &adapter->media;
1111
1112	INIT_DEBUGOUT("em_media_change: begin");
1113
1114	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1115		return(EINVAL);
1116
1117	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1118	case IFM_AUTO:
1119		adapter->hw.autoneg = DO_AUTO_NEG;
1120		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1121		break;
1122	case IFM_1000_SX:
1123	case IFM_1000_T:
1124		adapter->hw.autoneg = DO_AUTO_NEG;
1125		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1126		break;
1127	case IFM_100_TX:
1128		adapter->hw.autoneg = FALSE;
1129		adapter->hw.autoneg_advertised = 0;
1130		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1131			adapter->hw.forced_speed_duplex = em_100_full;
1132		else
1133			adapter->hw.forced_speed_duplex	= em_100_half;
1134		break;
1135	case IFM_10_T:
1136		adapter->hw.autoneg = FALSE;
1137		adapter->hw.autoneg_advertised = 0;
1138		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1139			adapter->hw.forced_speed_duplex = em_10_full;
1140		else
1141			adapter->hw.forced_speed_duplex	= em_10_half;
1142		break;
1143	default:
1144		printf("em%d: Unsupported media type\n", adapter->unit);
1145	}
1146
1147	/* As the speed/duplex settings my have changed we need to
1148	 * reset the PHY.
1149	 */
1150	adapter->hw.phy_reset_disable = FALSE;
1151
1152	em_init(adapter);
1153
1154	return(0);
1155}
1156
1157/*********************************************************************
1158 *
1159 *  This routine maps the mbufs to tx descriptors.
1160 *
1161 *  return 0 on success, positive on failure
1162 **********************************************************************/
1163static int
1164em_encap(struct adapter *adapter, struct mbuf **m_headp)
1165{
1166        u_int32_t       txd_upper;
1167        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1168        int             i, j, error;
1169        u_int64_t       address;
1170
1171	struct mbuf	*m_head;
1172
1173	/* For 82544 Workaround */
1174	DESC_ARRAY              desc_array;
1175	u_int32_t               array_elements;
1176	u_int32_t               counter;
1177        struct m_tag    *mtag;
1178	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1179	bus_dmamap_t		map;
1180	int			nsegs;
1181        struct em_buffer   *tx_buffer = NULL;
1182        struct em_tx_desc *current_tx_desc = NULL;
1183        struct ifnet   *ifp = adapter->ifp;
1184
1185	m_head = *m_headp;
1186
1187        /*
1188         * Force a cleanup if number of TX descriptors
1189         * available hits the threshold
1190         */
1191        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1192                em_clean_transmit_interrupts(adapter);
1193                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1194                        adapter->no_tx_desc_avail1++;
1195                        return(ENOBUFS);
1196                }
1197        }
1198
1199        /*
1200         * Map the packet for DMA.
1201         */
1202        if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
1203                adapter->no_tx_map_avail++;
1204                return (ENOMEM);
1205        }
1206        error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
1207					&nsegs, BUS_DMA_NOWAIT);
1208        if (error != 0) {
1209                adapter->no_tx_dma_setup++;
1210                bus_dmamap_destroy(adapter->txtag, map);
1211                return (error);
1212        }
1213        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1214
1215        if (nsegs > adapter->num_tx_desc_avail) {
1216                adapter->no_tx_desc_avail2++;
1217                bus_dmamap_destroy(adapter->txtag, map);
1218                return (ENOBUFS);
1219        }
1220
1221
1222        if (ifp->if_hwassist > 0) {
1223                em_transmit_checksum_setup(adapter,  m_head,
1224                                           &txd_upper, &txd_lower);
1225        } else
1226                txd_upper = txd_lower = 0;
1227
1228
1229        /* Find out if we are in vlan mode */
1230        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1231
1232	/*
1233	 * When operating in promiscuous mode, hardware encapsulation for
1234	 * packets is disabled.  This means we have to add the vlan
1235	 * encapsulation in the driver, since it will have come down from the
1236	 * VLAN layer with a tag instead of a VLAN header.
1237	 */
1238	if (mtag != NULL && adapter->em_insert_vlan_header) {
1239		struct ether_vlan_header *evl;
1240		struct ether_header eh;
1241
1242		m_head = m_pullup(m_head, sizeof(eh));
1243		if (m_head == NULL) {
1244			*m_headp = NULL;
1245                	bus_dmamap_destroy(adapter->txtag, map);
1246			return (ENOBUFS);
1247		}
1248		eh = *mtod(m_head, struct ether_header *);
1249		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1250		if (m_head == NULL) {
1251			*m_headp = NULL;
1252                	bus_dmamap_destroy(adapter->txtag, map);
1253			return (ENOBUFS);
1254		}
1255		m_head = m_pullup(m_head, sizeof(*evl));
1256		if (m_head == NULL) {
1257			*m_headp = NULL;
1258                	bus_dmamap_destroy(adapter->txtag, map);
1259			return (ENOBUFS);
1260		}
1261		evl = mtod(m_head, struct ether_vlan_header *);
1262		bcopy(&eh, evl, sizeof(*evl));
1263		evl->evl_proto = evl->evl_encap_proto;
1264		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1265		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1266		m_tag_delete(m_head, mtag);
1267		mtag = NULL;
1268		*m_headp = m_head;
1269	}
1270
1271        i = adapter->next_avail_tx_desc;
1272	if (adapter->pcix_82544) {
1273		txd_saved = i;
1274		txd_used = 0;
1275	}
1276        for (j = 0; j < nsegs; j++) {
1277		/* If adapter is 82544 and on PCIX bus */
1278		if(adapter->pcix_82544) {
1279			array_elements = 0;
1280			address = htole64(segs[j].ds_addr);
1281			/*
1282			 * Check the Address and Length combination and
1283			 * split the data accordingly
1284			 */
1285                        array_elements = em_fill_descriptors(address,
1286							     htole32(segs[j].ds_len),
1287							     &desc_array);
1288			for (counter = 0; counter < array_elements; counter++) {
1289                                if (txd_used == adapter->num_tx_desc_avail) {
1290                                         adapter->next_avail_tx_desc = txd_saved;
1291                                          adapter->no_tx_desc_avail2++;
1292					  bus_dmamap_destroy(adapter->txtag, map);
1293                                          return (ENOBUFS);
1294                                }
1295                                tx_buffer = &adapter->tx_buffer_area[i];
1296                                current_tx_desc = &adapter->tx_desc_base[i];
1297                                current_tx_desc->buffer_addr = htole64(
1298					desc_array.descriptor[counter].address);
1299                                current_tx_desc->lower.data = htole32(
1300					(adapter->txd_cmd | txd_lower |
1301					 (u_int16_t)desc_array.descriptor[counter].length));
1302                                current_tx_desc->upper.data = htole32((txd_upper));
1303                                if (++i == adapter->num_tx_desc)
1304                                         i = 0;
1305
1306                                tx_buffer->m_head = NULL;
1307                                txd_used++;
1308                        }
1309		} else {
1310			tx_buffer = &adapter->tx_buffer_area[i];
1311			current_tx_desc = &adapter->tx_desc_base[i];
1312
1313			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1314			current_tx_desc->lower.data = htole32(
1315				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1316			current_tx_desc->upper.data = htole32(txd_upper);
1317
1318			if (++i == adapter->num_tx_desc)
1319				i = 0;
1320
1321			tx_buffer->m_head = NULL;
1322		}
1323        }
1324
1325	adapter->next_avail_tx_desc = i;
1326	if (adapter->pcix_82544) {
1327		adapter->num_tx_desc_avail -= txd_used;
1328	}
1329	else {
1330		adapter->num_tx_desc_avail -= nsegs;
1331	}
1332
1333        if (mtag != NULL) {
1334                /* Set the vlan id */
1335                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1336
1337                /* Tell hardware to add tag */
1338                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1339        }
1340
1341        tx_buffer->m_head = m_head;
1342        tx_buffer->map = map;
1343        bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1344
1345        /*
1346         * Last Descriptor of Packet needs End Of Packet (EOP)
1347         */
1348        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1349
1350        /*
1351         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1352         * that this frame is available to transmit.
1353         */
1354        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1355            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1356        if (adapter->hw.mac_type == em_82547 &&
1357            adapter->link_duplex == HALF_DUPLEX) {
1358                em_82547_move_tail_locked(adapter);
1359        } else {
1360                E1000_WRITE_REG(&adapter->hw, TDT, i);
1361                if (adapter->hw.mac_type == em_82547) {
1362                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1363                }
1364        }
1365
1366        return(0);
1367}
1368
1369/*********************************************************************
1370 *
1371 * 82547 workaround to avoid controller hang in half-duplex environment.
1372 * The workaround is to avoid queuing a large packet that would span
1373 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1374 * in this case. We do that only when FIFO is quiescent.
1375 *
1376 **********************************************************************/
1377static void
1378em_82547_move_tail_locked(struct adapter *adapter)
1379{
1380	uint16_t hw_tdt;
1381	uint16_t sw_tdt;
1382	struct em_tx_desc *tx_desc;
1383	uint16_t length = 0;
1384	boolean_t eop = 0;
1385
1386	EM_LOCK_ASSERT(adapter);
1387
1388	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1389	sw_tdt = adapter->next_avail_tx_desc;
1390
1391	while (hw_tdt != sw_tdt) {
1392		tx_desc = &adapter->tx_desc_base[hw_tdt];
1393		length += tx_desc->lower.flags.length;
1394		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1395		if(++hw_tdt == adapter->num_tx_desc)
1396			hw_tdt = 0;
1397
1398		if(eop) {
1399			if (em_82547_fifo_workaround(adapter, length)) {
1400				adapter->tx_fifo_wrk_cnt++;
1401				callout_reset(&adapter->tx_fifo_timer, 1,
1402					em_82547_move_tail, adapter);
1403				break;
1404			}
1405			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1406			em_82547_update_fifo_head(adapter, length);
1407			length = 0;
1408		}
1409	}
1410	return;
1411}
1412
1413static void
1414em_82547_move_tail(void *arg)
1415{
1416        struct adapter *adapter = arg;
1417
1418        EM_LOCK(adapter);
1419        em_82547_move_tail_locked(adapter);
1420        EM_UNLOCK(adapter);
1421}
1422
1423static int
1424em_82547_fifo_workaround(struct adapter *adapter, int len)
1425{
1426	int fifo_space, fifo_pkt_len;
1427
1428	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1429
1430	if (adapter->link_duplex == HALF_DUPLEX) {
1431		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1432
1433		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1434			if (em_82547_tx_fifo_reset(adapter)) {
1435				return(0);
1436			}
1437			else {
1438				return(1);
1439			}
1440		}
1441	}
1442
1443	return(0);
1444}
1445
1446static void
1447em_82547_update_fifo_head(struct adapter *adapter, int len)
1448{
1449	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1450
1451	/* tx_fifo_head is always 16 byte aligned */
1452	adapter->tx_fifo_head += fifo_pkt_len;
1453	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1454		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1455	}
1456
1457	return;
1458}
1459
1460
1461static int
1462em_82547_tx_fifo_reset(struct adapter *adapter)
1463{
1464	uint32_t tctl;
1465
1466	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1467	      E1000_READ_REG(&adapter->hw, TDH)) &&
1468	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1469	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1470	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1471	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1472	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1473
1474		/* Disable TX unit */
1475		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1476		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1477
1478		/* Reset FIFO pointers */
1479		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1480		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1481		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1482		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1483
1484		/* Re-enable TX unit */
1485		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1486		E1000_WRITE_FLUSH(&adapter->hw);
1487
1488		adapter->tx_fifo_head = 0;
1489		adapter->tx_fifo_reset_cnt++;
1490
1491		return(TRUE);
1492	}
1493	else {
1494		return(FALSE);
1495	}
1496}
1497
1498static void
1499em_set_promisc(struct adapter * adapter)
1500{
1501
1502	u_int32_t       reg_rctl;
1503	struct ifnet   *ifp = adapter->ifp;
1504
1505	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1506
1507	if (ifp->if_flags & IFF_PROMISC) {
1508		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1509		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1510		/* Disable VLAN stripping in promiscous mode
1511		 * This enables bridging of vlan tagged frames to occur
1512		 * and also allows vlan tags to be seen in tcpdump
1513		 */
1514		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1515			em_disable_vlans(adapter);
1516		adapter->em_insert_vlan_header = 1;
1517	} else if (ifp->if_flags & IFF_ALLMULTI) {
1518		reg_rctl |= E1000_RCTL_MPE;
1519		reg_rctl &= ~E1000_RCTL_UPE;
1520		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1521		adapter->em_insert_vlan_header = 0;
1522	} else
1523		adapter->em_insert_vlan_header = 0;
1524
1525	return;
1526}
1527
1528static void
1529em_disable_promisc(struct adapter * adapter)
1530{
1531	u_int32_t       reg_rctl;
1532	struct ifnet   *ifp = adapter->ifp;
1533
1534	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1535
1536	reg_rctl &=  (~E1000_RCTL_UPE);
1537	reg_rctl &=  (~E1000_RCTL_MPE);
1538	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1539
1540	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1541		em_enable_vlans(adapter);
1542	adapter->em_insert_vlan_header = 0;
1543
1544	return;
1545}
1546
1547
1548/*********************************************************************
1549 *  Multicast Update
1550 *
1551 *  This routine is called whenever multicast address list is updated.
1552 *
1553 **********************************************************************/
1554
1555static void
1556em_set_multi(struct adapter * adapter)
1557{
1558        u_int32_t reg_rctl = 0;
1559        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1560        struct ifmultiaddr  *ifma;
1561        int mcnt = 0;
1562        struct ifnet   *ifp = adapter->ifp;
1563
1564        IOCTL_DEBUGOUT("em_set_multi: begin");
1565
1566        if (adapter->hw.mac_type == em_82542_rev2_0) {
1567                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1568                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1569                        em_pci_clear_mwi(&adapter->hw);
1570                }
1571                reg_rctl |= E1000_RCTL_RST;
1572                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1573                msec_delay(5);
1574        }
1575
1576	IF_ADDR_LOCK(ifp);
1577        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1578                if (ifma->ifma_addr->sa_family != AF_LINK)
1579                        continue;
1580
1581		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1582
1583                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1584                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1585                mcnt++;
1586        }
1587	IF_ADDR_UNLOCK(ifp);
1588
1589        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1590                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1591                reg_rctl |= E1000_RCTL_MPE;
1592                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1593        } else
1594                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1595
1596        if (adapter->hw.mac_type == em_82542_rev2_0) {
1597                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1598                reg_rctl &= ~E1000_RCTL_RST;
1599                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1600                msec_delay(5);
1601                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1602                        em_pci_set_mwi(&adapter->hw);
1603                }
1604        }
1605
1606        return;
1607}
1608
1609
1610/*********************************************************************
1611 *  Timer routine
1612 *
1613 *  This routine checks for link status and updates statistics.
1614 *
1615 **********************************************************************/
1616
1617static void
1618em_local_timer(void *arg)
1619{
1620	struct ifnet   *ifp;
1621	struct adapter * adapter = arg;
1622	ifp = adapter->ifp;
1623
1624	EM_LOCK(adapter);
1625
1626	em_check_for_link(&adapter->hw);
1627	em_print_link_status(adapter);
1628	em_update_stats_counters(adapter);
1629	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1630		em_print_hw_stats(adapter);
1631	}
1632	em_smartspeed(adapter);
1633
1634	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1635
1636	EM_UNLOCK(adapter);
1637	return;
1638}
1639
1640static void
1641em_print_link_status(struct adapter * adapter)
1642{
1643	struct ifnet *ifp = adapter->ifp;
1644
1645	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1646		if (adapter->link_active == 0) {
1647			em_get_speed_and_duplex(&adapter->hw,
1648						&adapter->link_speed,
1649						&adapter->link_duplex);
1650			if (bootverbose)
1651				printf("em%d: Link is up %d Mbps %s\n",
1652				       adapter->unit,
1653				       adapter->link_speed,
1654				       ((adapter->link_duplex == FULL_DUPLEX) ?
1655					"Full Duplex" : "Half Duplex"));
1656			adapter->link_active = 1;
1657			adapter->smartspeed = 0;
1658			if_link_state_change(ifp, LINK_STATE_UP);
1659		}
1660	} else {
1661		if (adapter->link_active == 1) {
1662			adapter->link_speed = 0;
1663			adapter->link_duplex = 0;
1664			if (bootverbose)
1665				printf("em%d: Link is Down\n", adapter->unit);
1666			adapter->link_active = 0;
1667			if_link_state_change(ifp, LINK_STATE_DOWN);
1668		}
1669	}
1670
1671	return;
1672}
1673
1674/*********************************************************************
1675 *
1676 *  This routine disables all traffic on the adapter by issuing a
1677 *  global reset on the MAC and deallocates TX/RX buffers.
1678 *
1679 **********************************************************************/
1680
1681static void
1682em_stop(void *arg)
1683{
1684	struct ifnet   *ifp;
1685	struct adapter * adapter = arg;
1686	ifp = adapter->ifp;
1687
1688	mtx_assert(&adapter->mtx, MA_OWNED);
1689
1690	INIT_DEBUGOUT("em_stop: begin");
1691
1692	em_disable_intr(adapter);
1693	em_reset_hw(&adapter->hw);
1694	callout_stop(&adapter->timer);
1695	callout_stop(&adapter->tx_fifo_timer);
1696	em_free_transmit_structures(adapter);
1697	em_free_receive_structures(adapter);
1698
1699
1700	/* Tell the stack that the interface is no longer active */
1701	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1702
1703	return;
1704}
1705
1706
1707/*********************************************************************
1708 *
1709 *  Determine hardware revision.
1710 *
1711 **********************************************************************/
1712static void
1713em_identify_hardware(struct adapter * adapter)
1714{
1715	device_t dev = adapter->dev;
1716
1717	/* Make sure our PCI config space has the necessary stuff set */
1718	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1719	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1720	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1721		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1722		       adapter->unit);
1723		adapter->hw.pci_cmd_word |=
1724		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1725		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1726	}
1727
1728	/* Save off the information about this board */
1729	adapter->hw.vendor_id = pci_get_vendor(dev);
1730	adapter->hw.device_id = pci_get_device(dev);
1731	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1732	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1733	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1734
1735	/* Identify the MAC */
1736        if (em_set_mac_type(&adapter->hw))
1737                printf("em%d: Unknown MAC Type\n", adapter->unit);
1738
1739	if(adapter->hw.mac_type == em_82541 ||
1740	   adapter->hw.mac_type == em_82541_rev_2 ||
1741	   adapter->hw.mac_type == em_82547 ||
1742	   adapter->hw.mac_type == em_82547_rev_2)
1743		adapter->hw.phy_init_script = TRUE;
1744
1745        return;
1746}
1747
1748static int
1749em_allocate_pci_resources(struct adapter * adapter)
1750{
1751	int             i, val, rid;
1752	device_t        dev = adapter->dev;
1753
1754	rid = EM_MMBA;
1755	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1756						     &rid, RF_ACTIVE);
1757	if (!(adapter->res_memory)) {
1758		printf("em%d: Unable to allocate bus resource: memory\n",
1759		       adapter->unit);
1760		return(ENXIO);
1761	}
1762	adapter->osdep.mem_bus_space_tag =
1763	rman_get_bustag(adapter->res_memory);
1764	adapter->osdep.mem_bus_space_handle =
1765	rman_get_bushandle(adapter->res_memory);
1766	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1767
1768
1769	if (adapter->hw.mac_type > em_82543) {
1770		/* Figure our where our IO BAR is ? */
1771		rid = EM_MMBA;
1772		for (i = 0; i < 5; i++) {
1773			val = pci_read_config(dev, rid, 4);
1774			if (val & 0x00000001) {
1775				adapter->io_rid = rid;
1776				break;
1777			}
1778			rid += 4;
1779		}
1780
1781		adapter->res_ioport = bus_alloc_resource_any(dev,
1782							     SYS_RES_IOPORT,
1783							     &adapter->io_rid,
1784							     RF_ACTIVE);
1785		if (!(adapter->res_ioport)) {
1786			printf("em%d: Unable to allocate bus resource: ioport\n",
1787			       adapter->unit);
1788			return(ENXIO);
1789		}
1790
1791		adapter->hw.io_base =
1792		rman_get_start(adapter->res_ioport);
1793	}
1794
1795	rid = 0x0;
1796	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1797						        RF_SHAREABLE |
1798							RF_ACTIVE);
1799	if (!(adapter->res_interrupt)) {
1800		printf("em%d: Unable to allocate bus resource: interrupt\n",
1801		       adapter->unit);
1802		return(ENXIO);
1803	}
1804	if (bus_setup_intr(dev, adapter->res_interrupt,
1805			   INTR_TYPE_NET | INTR_MPSAFE,
1806			   (void (*)(void *)) em_intr, adapter,
1807			   &adapter->int_handler_tag)) {
1808		printf("em%d: Error registering interrupt handler!\n",
1809		       adapter->unit);
1810		return(ENXIO);
1811	}
1812
1813	adapter->hw.back = &adapter->osdep;
1814
1815	return(0);
1816}
1817
1818static void
1819em_free_pci_resources(struct adapter * adapter)
1820{
1821	device_t dev = adapter->dev;
1822
1823	if (adapter->res_interrupt != NULL) {
1824		bus_teardown_intr(dev, adapter->res_interrupt,
1825				  adapter->int_handler_tag);
1826		bus_release_resource(dev, SYS_RES_IRQ, 0,
1827				     adapter->res_interrupt);
1828	}
1829	if (adapter->res_memory != NULL) {
1830		bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
1831				     adapter->res_memory);
1832	}
1833
1834	if (adapter->res_ioport != NULL) {
1835		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1836				     adapter->res_ioport);
1837	}
1838	return;
1839}
1840
1841/*********************************************************************
1842 *
1843 *  Initialize the hardware to a configuration as specified by the
1844 *  adapter structure. The controller is reset, the EEPROM is
1845 *  verified, the MAC address is set, then the shared initialization
1846 *  routines are called.
1847 *
1848 **********************************************************************/
1849static int
1850em_hardware_init(struct adapter * adapter)
1851{
1852        INIT_DEBUGOUT("em_hardware_init: begin");
1853	/* Issue a global reset */
1854	em_reset_hw(&adapter->hw);
1855
1856	/* When hardware is reset, fifo_head is also reset */
1857	adapter->tx_fifo_head = 0;
1858
1859	/* Make sure we have a good EEPROM before we read from it */
1860	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1861		printf("em%d: The EEPROM Checksum Is Not Valid\n",
1862		       adapter->unit);
1863		return(EIO);
1864	}
1865
1866	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1867		printf("em%d: EEPROM read error while reading part number\n",
1868		       adapter->unit);
1869		return(EIO);
1870	}
1871
1872	if (em_init_hw(&adapter->hw) < 0) {
1873		printf("em%d: Hardware Initialization Failed",
1874		       adapter->unit);
1875		return(EIO);
1876	}
1877
1878	em_check_for_link(&adapter->hw);
1879	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1880		adapter->link_active = 1;
1881	else
1882		adapter->link_active = 0;
1883
1884	if (adapter->link_active) {
1885		em_get_speed_and_duplex(&adapter->hw,
1886					&adapter->link_speed,
1887					&adapter->link_duplex);
1888	} else {
1889		adapter->link_speed = 0;
1890		adapter->link_duplex = 0;
1891	}
1892
1893	return(0);
1894}
1895
1896/*********************************************************************
1897 *
1898 *  Setup networking device structure and register an interface.
1899 *
1900 **********************************************************************/
1901static void
1902em_setup_interface(device_t dev, struct adapter * adapter)
1903{
1904	struct ifnet   *ifp;
1905	INIT_DEBUGOUT("em_setup_interface: begin");
1906
1907	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1908	if (ifp == NULL)
1909		panic("%s: can not if_alloc()", device_get_nameunit(dev));
1910	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1911	ifp->if_mtu = ETHERMTU;
1912	ifp->if_baudrate = 1000000000;
1913	ifp->if_init =  em_init;
1914	ifp->if_softc = adapter;
1915	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1916	ifp->if_ioctl = em_ioctl;
1917	ifp->if_start = em_start;
1918	ifp->if_watchdog = em_watchdog;
1919	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
1920	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
1921	IFQ_SET_READY(&ifp->if_snd);
1922
1923        ether_ifattach(ifp, adapter->hw.mac_addr);
1924
1925	ifp->if_capabilities = ifp->if_capenable = 0;
1926
1927	if (adapter->hw.mac_type >= em_82543) {
1928		ifp->if_capabilities |= IFCAP_HWCSUM;
1929		ifp->if_capenable |= IFCAP_HWCSUM;
1930	}
1931
1932	/*
1933	 * Tell the upper layer(s) we support long frames.
1934	 */
1935	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1936	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1937	ifp->if_capenable |= IFCAP_VLAN_MTU;
1938
1939#ifdef DEVICE_POLLING
1940	ifp->if_capabilities |= IFCAP_POLLING;
1941#endif
1942
1943	/*
1944	 * Specify the media types supported by this adapter and register
1945	 * callbacks to update media and link information
1946	 */
1947	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1948		     em_media_status);
1949	if (adapter->hw.media_type == em_media_type_fiber) {
1950		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1951			    0, NULL);
1952		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1953			    0, NULL);
1954	} else {
1955		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1956		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1957			    0, NULL);
1958		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1959			    0, NULL);
1960		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1961			    0, NULL);
1962		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1963			    0, NULL);
1964		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1965	}
1966	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1967	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1968
1969	return;
1970}
1971
1972
1973/*********************************************************************
1974 *
1975 *  Workaround for SmartSpeed on 82541 and 82547 controllers
1976 *
1977 **********************************************************************/
1978static void
1979em_smartspeed(struct adapter *adapter)
1980{
1981        uint16_t phy_tmp;
1982
1983	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1984	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1985		return;
1986
1987        if(adapter->smartspeed == 0) {
1988                /* If Master/Slave config fault is asserted twice,
1989                 * we assume back-to-back */
1990                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1991                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
1992                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1993                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1994                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1995					&phy_tmp);
1996                        if(phy_tmp & CR_1000T_MS_ENABLE) {
1997                                phy_tmp &= ~CR_1000T_MS_ENABLE;
1998                                em_write_phy_reg(&adapter->hw,
1999                                                    PHY_1000T_CTRL, phy_tmp);
2000                                adapter->smartspeed++;
2001                                if(adapter->hw.autoneg &&
2002                                   !em_phy_setup_autoneg(&adapter->hw) &&
2003				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2004                                                       &phy_tmp)) {
2005                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2006                                                    MII_CR_RESTART_AUTO_NEG);
2007                                        em_write_phy_reg(&adapter->hw,
2008							 PHY_CTRL, phy_tmp);
2009                                }
2010                        }
2011                }
2012                return;
2013        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2014                /* If still no link, perhaps using 2/3 pair cable */
2015                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2016                phy_tmp |= CR_1000T_MS_ENABLE;
2017                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2018                if(adapter->hw.autoneg &&
2019                   !em_phy_setup_autoneg(&adapter->hw) &&
2020                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2021                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2022                                    MII_CR_RESTART_AUTO_NEG);
2023                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2024                }
2025        }
2026        /* Restart process after EM_SMARTSPEED_MAX iterations */
2027        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2028                adapter->smartspeed = 0;
2029
2030	return;
2031}
2032
2033
2034/*
2035 * Manage DMA'able memory.
2036 */
2037static void
2038em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2039{
2040        if (error)
2041                return;
2042        *(bus_addr_t*) arg = segs->ds_addr;
2043        return;
2044}
2045
2046static int
2047em_dma_malloc(struct adapter *adapter, bus_size_t size,
2048        struct em_dma_alloc *dma, int mapflags)
2049{
2050        int r;
2051
2052        r = bus_dma_tag_create(NULL,                    /* parent */
2053                               PAGE_SIZE, 0,            /* alignment, bounds */
2054                               BUS_SPACE_MAXADDR,       /* lowaddr */
2055                               BUS_SPACE_MAXADDR,       /* highaddr */
2056                               NULL, NULL,              /* filter, filterarg */
2057                               size,                    /* maxsize */
2058                               1,                       /* nsegments */
2059                               size,                    /* maxsegsize */
2060                               BUS_DMA_ALLOCNOW,        /* flags */
2061			       NULL,			/* lockfunc */
2062			       NULL,			/* lockarg */
2063                               &dma->dma_tag);
2064        if (r != 0) {
2065                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2066                        "error %u\n", adapter->unit, r);
2067                goto fail_0;
2068        }
2069
2070        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2071                             BUS_DMA_NOWAIT, &dma->dma_map);
2072        if (r != 0) {
2073                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2074                        "size %ju, error %d\n", adapter->unit,
2075			(uintmax_t)size, r);
2076                goto fail_2;
2077        }
2078
2079        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2080                            size,
2081                            em_dmamap_cb,
2082                            &dma->dma_paddr,
2083                            mapflags | BUS_DMA_NOWAIT);
2084        if (r != 0) {
2085                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2086                        "error %u\n", adapter->unit, r);
2087                goto fail_3;
2088        }
2089
2090        dma->dma_size = size;
2091        return (0);
2092
2093fail_3:
2094        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2095fail_2:
2096        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2097        bus_dma_tag_destroy(dma->dma_tag);
2098fail_0:
2099        dma->dma_map = NULL;
2100        dma->dma_tag = NULL;
2101        return (r);
2102}
2103
2104static void
2105em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2106{
2107        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2108        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2109        bus_dma_tag_destroy(dma->dma_tag);
2110}
2111
2112
2113/*********************************************************************
2114 *
2115 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2116 *  the information needed to transmit a packet on the wire.
2117 *
2118 **********************************************************************/
2119static int
2120em_allocate_transmit_structures(struct adapter * adapter)
2121{
2122	if (!(adapter->tx_buffer_area =
2123	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2124					     adapter->num_tx_desc, M_DEVBUF,
2125					     M_NOWAIT))) {
2126		printf("em%d: Unable to allocate tx_buffer memory\n",
2127		       adapter->unit);
2128		return ENOMEM;
2129	}
2130
2131	bzero(adapter->tx_buffer_area,
2132	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2133
2134	return 0;
2135}
2136
2137/*********************************************************************
2138 *
2139 *  Allocate and initialize transmit structures.
2140 *
2141 **********************************************************************/
2142static int
2143em_setup_transmit_structures(struct adapter * adapter)
2144{
2145        /*
2146         * Setup DMA descriptor areas.
2147         */
2148        if (bus_dma_tag_create(NULL,                    /* parent */
2149                               1, 0,                    /* alignment, bounds */
2150                               BUS_SPACE_MAXADDR,       /* lowaddr */
2151                               BUS_SPACE_MAXADDR,       /* highaddr */
2152                               NULL, NULL,              /* filter, filterarg */
2153                               MCLBYTES * 8,            /* maxsize */
2154                               EM_MAX_SCATTER,          /* nsegments */
2155                               MCLBYTES * 8,            /* maxsegsize */
2156                               BUS_DMA_ALLOCNOW,        /* flags */
2157			       NULL,			/* lockfunc */
2158			       NULL,			/* lockarg */
2159                               &adapter->txtag)) {
2160                printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2161                return (ENOMEM);
2162        }
2163
2164        if (em_allocate_transmit_structures(adapter))
2165                return (ENOMEM);
2166
2167        bzero((void *) adapter->tx_desc_base,
2168              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2169
2170        adapter->next_avail_tx_desc = 0;
2171        adapter->oldest_used_tx_desc = 0;
2172
2173        /* Set number of descriptors available */
2174        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2175
2176        /* Set checksum context */
2177        adapter->active_checksum_context = OFFLOAD_NONE;
2178
2179        return (0);
2180}
2181
2182/*********************************************************************
2183 *
2184 *  Enable transmit unit.
2185 *
2186 **********************************************************************/
2187static void
2188em_initialize_transmit_unit(struct adapter * adapter)
2189{
2190	u_int32_t       reg_tctl;
2191	u_int32_t       reg_tipg = 0;
2192	u_int64_t	bus_addr;
2193
2194         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2195	/* Setup the Base and Length of the Tx Descriptor Ring */
2196	bus_addr = adapter->txdma.dma_paddr;
2197	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2198	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2199	E1000_WRITE_REG(&adapter->hw, TDLEN,
2200			adapter->num_tx_desc *
2201			sizeof(struct em_tx_desc));
2202
2203	/* Setup the HW Tx Head and Tail descriptor pointers */
2204	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2205	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2206
2207
2208	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2209		     E1000_READ_REG(&adapter->hw, TDBAL),
2210		     E1000_READ_REG(&adapter->hw, TDLEN));
2211
2212	/* Set the default values for the Tx Inter Packet Gap timer */
2213	switch (adapter->hw.mac_type) {
2214	case em_82542_rev2_0:
2215        case em_82542_rev2_1:
2216                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2217                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2218                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2219                break;
2220        default:
2221                if (adapter->hw.media_type == em_media_type_fiber)
2222                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2223                else
2224                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2225                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2226                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2227        }
2228
2229	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2230	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2231	if(adapter->hw.mac_type >= em_82540)
2232		E1000_WRITE_REG(&adapter->hw, TADV,
2233		    adapter->tx_abs_int_delay.value);
2234
2235	/* Program the Transmit Control Register */
2236	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2237		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2238	if (adapter->hw.mac_type >= em_82573)
2239		reg_tctl |= E1000_TCTL_MULR;
2240	if (adapter->link_duplex == 1) {
2241		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2242	} else {
2243		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2244	}
2245	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2246
2247	/* Setup Transmit Descriptor Settings for this adapter */
2248	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2249
2250	if (adapter->tx_int_delay.value > 0)
2251		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2252
2253	return;
2254}
2255
2256/*********************************************************************
2257 *
2258 *  Free all transmit related data structures.
2259 *
2260 **********************************************************************/
2261static void
2262em_free_transmit_structures(struct adapter * adapter)
2263{
2264        struct em_buffer   *tx_buffer;
2265        int             i;
2266
2267        INIT_DEBUGOUT("free_transmit_structures: begin");
2268
2269        if (adapter->tx_buffer_area != NULL) {
2270                tx_buffer = adapter->tx_buffer_area;
2271                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2272                        if (tx_buffer->m_head != NULL) {
2273                                bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2274                                bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2275                                m_freem(tx_buffer->m_head);
2276                        }
2277                        tx_buffer->m_head = NULL;
2278                }
2279        }
2280        if (adapter->tx_buffer_area != NULL) {
2281                free(adapter->tx_buffer_area, M_DEVBUF);
2282                adapter->tx_buffer_area = NULL;
2283        }
2284        if (adapter->txtag != NULL) {
2285                bus_dma_tag_destroy(adapter->txtag);
2286                adapter->txtag = NULL;
2287        }
2288        return;
2289}
2290
2291/*********************************************************************
2292 *
2293 *  The offload context needs to be set when we transfer the first
2294 *  packet of a particular protocol (TCP/UDP). We change the
2295 *  context only if the protocol type changes.
2296 *
2297 **********************************************************************/
2298static void
2299em_transmit_checksum_setup(struct adapter * adapter,
2300			   struct mbuf *mp,
2301			   u_int32_t *txd_upper,
2302			   u_int32_t *txd_lower)
2303{
2304	struct em_context_desc *TXD;
2305	struct em_buffer *tx_buffer;
2306	int curr_txd;
2307
2308	if (mp->m_pkthdr.csum_flags) {
2309
2310		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2311			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2312			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2313			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2314				return;
2315			else
2316				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2317
2318		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2319			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2320			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2321			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2322				return;
2323			else
2324				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2325		} else {
2326			*txd_upper = 0;
2327			*txd_lower = 0;
2328			return;
2329		}
2330	} else {
2331		*txd_upper = 0;
2332		*txd_lower = 0;
2333		return;
2334	}
2335
2336	/* If we reach this point, the checksum offload context
2337	 * needs to be reset.
2338	 */
2339	curr_txd = adapter->next_avail_tx_desc;
2340	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2341	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2342
2343	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2344	TXD->lower_setup.ip_fields.ipcso =
2345		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2346	TXD->lower_setup.ip_fields.ipcse =
2347		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2348
2349	TXD->upper_setup.tcp_fields.tucss =
2350		ETHER_HDR_LEN + sizeof(struct ip);
2351	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2352
2353	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2354		TXD->upper_setup.tcp_fields.tucso =
2355			ETHER_HDR_LEN + sizeof(struct ip) +
2356			offsetof(struct tcphdr, th_sum);
2357	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2358		TXD->upper_setup.tcp_fields.tucso =
2359			ETHER_HDR_LEN + sizeof(struct ip) +
2360			offsetof(struct udphdr, uh_sum);
2361	}
2362
2363	TXD->tcp_seg_setup.data = htole32(0);
2364	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2365
2366	tx_buffer->m_head = NULL;
2367
2368	if (++curr_txd == adapter->num_tx_desc)
2369		curr_txd = 0;
2370
2371	adapter->num_tx_desc_avail--;
2372	adapter->next_avail_tx_desc = curr_txd;
2373
2374	return;
2375}
2376
2377/**********************************************************************
2378 *
2379 *  Examine each tx_buffer in the used queue. If the hardware is done
2380 *  processing the packet then free associated resources. The
2381 *  tx_buffer is put back on the free queue.
2382 *
2383 **********************************************************************/
2384static void
2385em_clean_transmit_interrupts(struct adapter * adapter)
2386{
2387        int i, num_avail;
2388        struct em_buffer *tx_buffer;
2389        struct em_tx_desc   *tx_desc;
2390	struct ifnet   *ifp = adapter->ifp;
2391
2392	mtx_assert(&adapter->mtx, MA_OWNED);
2393
2394        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2395                return;
2396
2397        num_avail = adapter->num_tx_desc_avail;
2398        i = adapter->oldest_used_tx_desc;
2399
2400        tx_buffer = &adapter->tx_buffer_area[i];
2401        tx_desc = &adapter->tx_desc_base[i];
2402
2403        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2404            BUS_DMASYNC_POSTREAD);
2405        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2406
2407                tx_desc->upper.data = 0;
2408                num_avail++;
2409
2410                if (tx_buffer->m_head) {
2411			ifp->if_opackets++;
2412                        bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2413                        bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2414
2415                        m_freem(tx_buffer->m_head);
2416                        tx_buffer->m_head = NULL;
2417                }
2418
2419                if (++i == adapter->num_tx_desc)
2420                        i = 0;
2421
2422                tx_buffer = &adapter->tx_buffer_area[i];
2423                tx_desc = &adapter->tx_desc_base[i];
2424        }
2425        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2426            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2427
2428        adapter->oldest_used_tx_desc = i;
2429
2430        /*
2431         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2432         * that it is OK to send packets.
2433         * If there are no pending descriptors, clear the timeout. Otherwise,
2434         * if some descriptors have been freed, restart the timeout.
2435         */
2436        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2437                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2438                if (num_avail == adapter->num_tx_desc)
2439                        ifp->if_timer = 0;
2440                else if (num_avail == adapter->num_tx_desc_avail)
2441                        ifp->if_timer = EM_TX_TIMEOUT;
2442        }
2443        adapter->num_tx_desc_avail = num_avail;
2444        return;
2445}
2446
2447/*********************************************************************
2448 *
2449 *  Get a buffer from system mbuf buffer pool.
2450 *
2451 **********************************************************************/
2452static int
2453em_get_buf(int i, struct adapter *adapter,
2454           struct mbuf *nmp)
2455{
2456        register struct mbuf    *mp = nmp;
2457        struct em_buffer *rx_buffer;
2458        struct ifnet   *ifp;
2459        bus_addr_t paddr;
2460        int error;
2461
2462        ifp = adapter->ifp;
2463
2464        if (mp == NULL) {
2465                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2466                if (mp == NULL) {
2467                        adapter->mbuf_cluster_failed++;
2468                        return(ENOBUFS);
2469                }
2470                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2471        } else {
2472                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2473                mp->m_data = mp->m_ext.ext_buf;
2474                mp->m_next = NULL;
2475        }
2476
2477        if (ifp->if_mtu <= ETHERMTU) {
2478                m_adj(mp, ETHER_ALIGN);
2479        }
2480
2481        rx_buffer = &adapter->rx_buffer_area[i];
2482
2483        /*
2484         * Using memory from the mbuf cluster pool, invoke the
2485         * bus_dma machinery to arrange the memory mapping.
2486         */
2487        error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2488                                mtod(mp, void *), mp->m_len,
2489                                em_dmamap_cb, &paddr, 0);
2490        if (error) {
2491                m_free(mp);
2492                return(error);
2493        }
2494        rx_buffer->m_head = mp;
2495        adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2496        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD |
2497	    BUS_DMASYNC_PREWRITE);
2498
2499        return(0);
2500}
2501
2502/*********************************************************************
2503 *
2504 *  Allocate memory for rx_buffer structures. Since we use one
2505 *  rx_buffer per received packet, the maximum number of rx_buffer's
2506 *  that we'll need is equal to the number of receive descriptors
2507 *  that we've allocated.
2508 *
2509 **********************************************************************/
2510static int
2511em_allocate_receive_structures(struct adapter * adapter)
2512{
2513        int             i, error;
2514        struct em_buffer *rx_buffer;
2515
2516        if (!(adapter->rx_buffer_area =
2517              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2518                                          adapter->num_rx_desc, M_DEVBUF,
2519                                          M_NOWAIT))) {
2520                printf("em%d: Unable to allocate rx_buffer memory\n",
2521                       adapter->unit);
2522                return(ENOMEM);
2523        }
2524
2525        bzero(adapter->rx_buffer_area,
2526              sizeof(struct em_buffer) * adapter->num_rx_desc);
2527
2528        error = bus_dma_tag_create(NULL,                /* parent */
2529                               1, 0,                    /* alignment, bounds */
2530                               BUS_SPACE_MAXADDR,       /* lowaddr */
2531                               BUS_SPACE_MAXADDR,       /* highaddr */
2532                               NULL, NULL,              /* filter, filterarg */
2533                               MCLBYTES,                /* maxsize */
2534                               1,                       /* nsegments */
2535                               MCLBYTES,                /* maxsegsize */
2536                               BUS_DMA_ALLOCNOW,        /* flags */
2537			       NULL,			/* lockfunc */
2538			       NULL,			/* lockarg */
2539                               &adapter->rxtag);
2540        if (error != 0) {
2541                printf("em%d: em_allocate_receive_structures: "
2542                        "bus_dma_tag_create failed; error %u\n",
2543                       adapter->unit, error);
2544                goto fail_0;
2545        }
2546
2547        rx_buffer = adapter->rx_buffer_area;
2548        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2549                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2550                                          &rx_buffer->map);
2551                if (error != 0) {
2552                        printf("em%d: em_allocate_receive_structures: "
2553                                "bus_dmamap_create failed; error %u\n",
2554                                adapter->unit, error);
2555                        goto fail_1;
2556                }
2557        }
2558
2559        for (i = 0; i < adapter->num_rx_desc; i++) {
2560                error = em_get_buf(i, adapter, NULL);
2561                if (error != 0) {
2562                        adapter->rx_buffer_area[i].m_head = NULL;
2563                        adapter->rx_desc_base[i].buffer_addr = 0;
2564                        return(error);
2565                }
2566        }
2567        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2568            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2569
2570        return(0);
2571
2572fail_1:
2573        bus_dma_tag_destroy(adapter->rxtag);
2574fail_0:
2575        adapter->rxtag = NULL;
2576        free(adapter->rx_buffer_area, M_DEVBUF);
2577        adapter->rx_buffer_area = NULL;
2578        return (error);
2579}
2580
2581/*********************************************************************
2582 *
2583 *  Allocate and initialize receive structures.
2584 *
2585 **********************************************************************/
2586static int
2587em_setup_receive_structures(struct adapter * adapter)
2588{
2589	bzero((void *) adapter->rx_desc_base,
2590              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2591
2592	if (em_allocate_receive_structures(adapter))
2593		return ENOMEM;
2594
2595	/* Setup our descriptor pointers */
2596        adapter->next_rx_desc_to_check = 0;
2597	return(0);
2598}
2599
2600/*********************************************************************
2601 *
2602 *  Enable receive unit.
2603 *
2604 **********************************************************************/
2605static void
2606em_initialize_receive_unit(struct adapter * adapter)
2607{
2608	u_int32_t       reg_rctl;
2609	u_int32_t       reg_rxcsum;
2610	struct ifnet    *ifp;
2611	u_int64_t	bus_addr;
2612
2613        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2614	ifp = adapter->ifp;
2615
2616	/* Make sure receives are disabled while setting up the descriptor ring */
2617	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2618
2619	/* Set the Receive Delay Timer Register */
2620	E1000_WRITE_REG(&adapter->hw, RDTR,
2621			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2622
2623	if(adapter->hw.mac_type >= em_82540) {
2624		E1000_WRITE_REG(&adapter->hw, RADV,
2625		    adapter->rx_abs_int_delay.value);
2626
2627                /* Set the interrupt throttling rate.  Value is calculated
2628                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2629#define MAX_INTS_PER_SEC        8000
2630#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2631                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2632        }
2633
2634	/* Setup the Base and Length of the Rx Descriptor Ring */
2635	bus_addr = adapter->rxdma.dma_paddr;
2636	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2637	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2638	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2639			sizeof(struct em_rx_desc));
2640
2641	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2642	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2643	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2644
2645	/* Setup the Receive Control Register */
2646	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2647		   E1000_RCTL_RDMTS_HALF |
2648		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2649
2650	if (adapter->hw.tbi_compatibility_on == TRUE)
2651		reg_rctl |= E1000_RCTL_SBP;
2652
2653
2654	switch (adapter->rx_buffer_len) {
2655	default:
2656	case EM_RXBUFFER_2048:
2657		reg_rctl |= E1000_RCTL_SZ_2048;
2658		break;
2659	case EM_RXBUFFER_4096:
2660		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2661		break;
2662	case EM_RXBUFFER_8192:
2663		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2664		break;
2665	case EM_RXBUFFER_16384:
2666		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2667		break;
2668	}
2669
2670	if (ifp->if_mtu > ETHERMTU)
2671		reg_rctl |= E1000_RCTL_LPE;
2672
2673	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2674	if ((adapter->hw.mac_type >= em_82543) &&
2675	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2676		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2677		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2678		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2679	}
2680
2681	/* Enable Receives */
2682	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2683
2684	return;
2685}
2686
2687/*********************************************************************
2688 *
2689 *  Free receive related data structures.
2690 *
2691 **********************************************************************/
2692static void
2693em_free_receive_structures(struct adapter *adapter)
2694{
2695        struct em_buffer   *rx_buffer;
2696        int             i;
2697
2698        INIT_DEBUGOUT("free_receive_structures: begin");
2699
2700        if (adapter->rx_buffer_area != NULL) {
2701                rx_buffer = adapter->rx_buffer_area;
2702                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2703                        if (rx_buffer->map != NULL) {
2704                                bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2705                                bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2706                        }
2707                        if (rx_buffer->m_head != NULL)
2708                                m_freem(rx_buffer->m_head);
2709                        rx_buffer->m_head = NULL;
2710                }
2711        }
2712        if (adapter->rx_buffer_area != NULL) {
2713                free(adapter->rx_buffer_area, M_DEVBUF);
2714                adapter->rx_buffer_area = NULL;
2715        }
2716        if (adapter->rxtag != NULL) {
2717                bus_dma_tag_destroy(adapter->rxtag);
2718                adapter->rxtag = NULL;
2719        }
2720        return;
2721}
2722
2723/*********************************************************************
2724 *
2725 *  This routine executes in interrupt context. It replenishes
2726 *  the mbufs in the descriptor and sends data which has been
2727 *  dma'ed into host memory to upper layer.
2728 *
2729 *  We loop at most count times if count is > 0, or until done if
2730 *  count < 0.
2731 *
2732 *********************************************************************/
2733static void
2734em_process_receive_interrupts(struct adapter * adapter, int count)
2735{
2736	struct ifnet        *ifp;
2737	struct mbuf         *mp;
2738	u_int8_t            accept_frame = 0;
2739 	u_int8_t            eop = 0;
2740	u_int16_t           len, desc_len, prev_len_adj;
2741	int                 i;
2742
2743	/* Pointer to the receive descriptor being examined. */
2744	struct em_rx_desc   *current_desc;
2745
2746	mtx_assert(&adapter->mtx, MA_OWNED);
2747
2748	ifp = adapter->ifp;
2749	i = adapter->next_rx_desc_to_check;
2750        current_desc = &adapter->rx_desc_base[i];
2751	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2752	    BUS_DMASYNC_POSTREAD);
2753
2754	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2755		return;
2756	}
2757
2758	while ((current_desc->status & E1000_RXD_STAT_DD) &&
2759		    (count != 0) &&
2760		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2761		struct mbuf *m = NULL;
2762
2763		mp = adapter->rx_buffer_area[i].m_head;
2764		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2765				BUS_DMASYNC_POSTREAD);
2766
2767		accept_frame = 1;
2768		prev_len_adj = 0;
2769                desc_len = le16toh(current_desc->length);
2770		if (current_desc->status & E1000_RXD_STAT_EOP) {
2771			count--;
2772			eop = 1;
2773			if (desc_len < ETHER_CRC_LEN) {
2774                                len = 0;
2775                                prev_len_adj = ETHER_CRC_LEN - desc_len;
2776                        }
2777                        else {
2778                                len = desc_len - ETHER_CRC_LEN;
2779                        }
2780		} else {
2781			eop = 0;
2782			len = desc_len;
2783		}
2784
2785		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2786			u_int8_t            last_byte;
2787			u_int32_t           pkt_len = desc_len;
2788
2789			if (adapter->fmp != NULL)
2790				pkt_len += adapter->fmp->m_pkthdr.len;
2791
2792			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2793
2794			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2795				       current_desc->errors,
2796				       pkt_len, last_byte)) {
2797				em_tbi_adjust_stats(&adapter->hw,
2798						    &adapter->stats,
2799						    pkt_len,
2800						    adapter->hw.mac_addr);
2801				if (len > 0) len--;
2802			}
2803			else {
2804				accept_frame = 0;
2805			}
2806		}
2807
2808		if (accept_frame) {
2809
2810			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2811				adapter->dropped_pkts++;
2812				em_get_buf(i, adapter, mp);
2813				if (adapter->fmp != NULL)
2814					m_freem(adapter->fmp);
2815				adapter->fmp = NULL;
2816				adapter->lmp = NULL;
2817				break;
2818			}
2819
2820			/* Assign correct length to the current fragment */
2821			mp->m_len = len;
2822
2823			if (adapter->fmp == NULL) {
2824				mp->m_pkthdr.len = len;
2825				adapter->fmp = mp;	 /* Store the first mbuf */
2826				adapter->lmp = mp;
2827			} else {
2828				/* Chain mbuf's together */
2829				mp->m_flags &= ~M_PKTHDR;
2830				/*
2831                                 * Adjust length of previous mbuf in chain if we
2832                                 * received less than 4 bytes in the last descriptor.
2833                                 */
2834				if (prev_len_adj > 0) {
2835					adapter->lmp->m_len -= prev_len_adj;
2836					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2837				}
2838				adapter->lmp->m_next = mp;
2839				adapter->lmp = adapter->lmp->m_next;
2840				adapter->fmp->m_pkthdr.len += len;
2841			}
2842
2843                        if (eop) {
2844                                adapter->fmp->m_pkthdr.rcvif = ifp;
2845				ifp->if_ipackets++;
2846                                em_receive_checksum(adapter, current_desc,
2847                                                    adapter->fmp);
2848                                if (current_desc->status & E1000_RXD_STAT_VP)
2849                                        VLAN_INPUT_TAG(ifp, adapter->fmp,
2850                                                       (current_desc->special &
2851							E1000_RXD_SPC_VLAN_MASK),
2852						       adapter->fmp = NULL);
2853
2854				m = adapter->fmp;
2855				adapter->fmp = NULL;
2856				adapter->lmp = NULL;
2857                        }
2858		} else {
2859			adapter->dropped_pkts++;
2860			em_get_buf(i, adapter, mp);
2861			if (adapter->fmp != NULL)
2862				m_freem(adapter->fmp);
2863			adapter->fmp = NULL;
2864			adapter->lmp = NULL;
2865		}
2866
2867		/* Zero out the receive descriptors status  */
2868		current_desc->status = 0;
2869		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2870		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2871
2872		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2873                E1000_WRITE_REG(&adapter->hw, RDT, i);
2874
2875                /* Advance our pointers to the next descriptor */
2876		if (++i == adapter->num_rx_desc)
2877			i = 0;
2878		if (m != NULL) {
2879			adapter->next_rx_desc_to_check = i;
2880			EM_UNLOCK(adapter);
2881			(*ifp->if_input)(ifp, m);
2882			EM_LOCK(adapter);
2883			i = adapter->next_rx_desc_to_check;
2884		}
2885		current_desc = &adapter->rx_desc_base[i];
2886	}
2887	adapter->next_rx_desc_to_check = i;
2888	return;
2889}
2890
2891/*********************************************************************
2892 *
2893 *  Verify that the hardware indicated that the checksum is valid.
2894 *  Inform the stack about the status of checksum so that stack
2895 *  doesn't spend time verifying the checksum.
2896 *
2897 *********************************************************************/
2898static void
2899em_receive_checksum(struct adapter *adapter,
2900		    struct em_rx_desc *rx_desc,
2901		    struct mbuf *mp)
2902{
2903	/* 82543 or newer only */
2904	if ((adapter->hw.mac_type < em_82543) ||
2905	    /* Ignore Checksum bit is set */
2906	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2907		mp->m_pkthdr.csum_flags = 0;
2908		return;
2909	}
2910
2911	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2912		/* Did it pass? */
2913		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2914			/* IP Checksum Good */
2915			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2916			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2917
2918		} else {
2919			mp->m_pkthdr.csum_flags = 0;
2920		}
2921	}
2922
2923	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2924		/* Did it pass? */
2925		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2926			mp->m_pkthdr.csum_flags |=
2927			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2928			mp->m_pkthdr.csum_data = htons(0xffff);
2929		}
2930	}
2931
2932	return;
2933}
2934
2935
2936static void
2937em_enable_vlans(struct adapter *adapter)
2938{
2939	uint32_t ctrl;
2940
2941	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2942
2943	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2944	ctrl |= E1000_CTRL_VME;
2945	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2946
2947	return;
2948}
2949
2950static void
2951em_disable_vlans(struct adapter *adapter)
2952{
2953	uint32_t ctrl;
2954
2955	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2956	ctrl &= ~E1000_CTRL_VME;
2957	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2958
2959	return;
2960}
2961
2962static void
2963em_enable_intr(struct adapter * adapter)
2964{
2965	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2966	return;
2967}
2968
2969static void
2970em_disable_intr(struct adapter *adapter)
2971{
2972	/*
2973	 * The first version of 82542 had an errata where when link was forced it
2974	 * would stay up even up even if the cable was disconnected.  Sequence errors
2975	 * were used to detect the disconnect and then the driver would unforce the link.
2976	 * This code in the in the ISR.  For this to work correctly the Sequence error
2977	 * interrupt had to be enabled all the time.
2978	 */
2979
2980	if (adapter->hw.mac_type == em_82542_rev2_0)
2981	    E1000_WRITE_REG(&adapter->hw, IMC,
2982	        (0xffffffff & ~E1000_IMC_RXSEQ));
2983	else
2984	    E1000_WRITE_REG(&adapter->hw, IMC,
2985	        0xffffffff);
2986	return;
2987}
2988
2989static int
2990em_is_valid_ether_addr(u_int8_t *addr)
2991{
2992        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2993
2994        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
2995                return (FALSE);
2996        }
2997
2998        return(TRUE);
2999}
3000
3001void
3002em_write_pci_cfg(struct em_hw *hw,
3003		      uint32_t reg,
3004		      uint16_t *value)
3005{
3006	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3007			 *value, 2);
3008}
3009
3010void
3011em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3012		     uint16_t *value)
3013{
3014	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3015				 reg, 2);
3016	return;
3017}
3018
3019void
3020em_pci_set_mwi(struct em_hw *hw)
3021{
3022        pci_write_config(((struct em_osdep *)hw->back)->dev,
3023                         PCIR_COMMAND,
3024                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3025        return;
3026}
3027
3028void
3029em_pci_clear_mwi(struct em_hw *hw)
3030{
3031        pci_write_config(((struct em_osdep *)hw->back)->dev,
3032                         PCIR_COMMAND,
3033                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3034        return;
3035}
3036
3037uint32_t
3038em_io_read(struct em_hw *hw, unsigned long port)
3039{
3040	return(inl(port));
3041}
3042
3043void
3044em_io_write(struct em_hw *hw, unsigned long port, uint32_t value)
3045{
3046	outl(port, value);
3047	return;
3048}
3049
3050/*********************************************************************
3051* 82544 Coexistence issue workaround.
3052*    There are 2 issues.
3053*       1. Transmit Hang issue.
3054*    To detect this issue, following equation can be used...
3055*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3056*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3057*
3058*       2. DAC issue.
3059*    To detect this issue, following equation can be used...
3060*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3061*          If SUM[3:0] is in between 9 to c, we will have this issue.
3062*
3063*
3064*    WORKAROUND:
3065*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3066*
3067*** *********************************************************************/
3068static u_int32_t
3069em_fill_descriptors (u_int64_t address,
3070                              u_int32_t length,
3071                              PDESC_ARRAY desc_array)
3072{
3073        /* Since issue is sensitive to length and address.*/
3074        /* Let us first check the address...*/
3075        u_int32_t safe_terminator;
3076        if (length <= 4) {
3077                desc_array->descriptor[0].address = address;
3078                desc_array->descriptor[0].length = length;
3079                desc_array->elements = 1;
3080                return desc_array->elements;
3081        }
3082        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3083        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3084        if (safe_terminator == 0   ||
3085        (safe_terminator > 4   &&
3086        safe_terminator < 9)   ||
3087        (safe_terminator > 0xC &&
3088        safe_terminator <= 0xF)) {
3089                desc_array->descriptor[0].address = address;
3090                desc_array->descriptor[0].length = length;
3091                desc_array->elements = 1;
3092                return desc_array->elements;
3093        }
3094
3095        desc_array->descriptor[0].address = address;
3096        desc_array->descriptor[0].length = length - 4;
3097        desc_array->descriptor[1].address = address + (length - 4);
3098        desc_array->descriptor[1].length = 4;
3099        desc_array->elements = 2;
3100        return desc_array->elements;
3101}
3102
3103/**********************************************************************
3104 *
3105 *  Update the board statistics counters.
3106 *
3107 **********************************************************************/
3108static void
3109em_update_stats_counters(struct adapter *adapter)
3110{
3111	struct ifnet   *ifp;
3112
3113	if(adapter->hw.media_type == em_media_type_copper ||
3114	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3115		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3116		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3117	}
3118	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3119	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3120	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3121	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3122
3123	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3124	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3125	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3126	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3127	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3128	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3129	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3130	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3131	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3132	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3133	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3134	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3135	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3136	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3137	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3138	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3139	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3140	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3141	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3142	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3143
3144	/* For the 64-bit byte counters the low dword must be read first. */
3145	/* Both registers clear on the read of the high dword */
3146
3147	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3148	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3149	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3150	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3151
3152	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3153	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3154	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3155	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3156	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3157
3158	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3159	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3160	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3161	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3162
3163	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3164	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3165	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3166	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3167	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3168	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3169	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3170	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3171	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3172	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3173
3174	if (adapter->hw.mac_type >= em_82543) {
3175		adapter->stats.algnerrc +=
3176		E1000_READ_REG(&adapter->hw, ALGNERRC);
3177		adapter->stats.rxerrc +=
3178		E1000_READ_REG(&adapter->hw, RXERRC);
3179		adapter->stats.tncrs +=
3180		E1000_READ_REG(&adapter->hw, TNCRS);
3181		adapter->stats.cexterr +=
3182		E1000_READ_REG(&adapter->hw, CEXTERR);
3183		adapter->stats.tsctc +=
3184		E1000_READ_REG(&adapter->hw, TSCTC);
3185		adapter->stats.tsctfc +=
3186		E1000_READ_REG(&adapter->hw, TSCTFC);
3187	}
3188	ifp = adapter->ifp;
3189
3190	/* Fill out the OS statistics structure */
3191	ifp->if_ibytes = adapter->stats.gorcl;
3192	ifp->if_obytes = adapter->stats.gotcl;
3193	ifp->if_imcasts = adapter->stats.mprc;
3194	ifp->if_collisions = adapter->stats.colc;
3195
3196	/* Rx Errors */
3197	ifp->if_ierrors =
3198	adapter->dropped_pkts +
3199	adapter->stats.rxerrc +
3200	adapter->stats.crcerrs +
3201	adapter->stats.algnerrc +
3202	adapter->stats.rlec +
3203	adapter->stats.mpc + adapter->stats.cexterr;
3204
3205	/* Tx Errors */
3206	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
3207
3208}
3209
3210
3211/**********************************************************************
3212 *
3213 *  This routine is called only when em_display_debug_stats is enabled.
3214 *  This routine provides a way to take a look at important statistics
3215 *  maintained by the driver and hardware.
3216 *
3217 **********************************************************************/
3218static void
3219em_print_debug_info(struct adapter *adapter)
3220{
3221        int unit = adapter->unit;
3222	uint8_t *hw_addr = adapter->hw.hw_addr;
3223
3224	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3225	printf("em%d:CTRL  = 0x%x\n", unit,
3226		E1000_READ_REG(&adapter->hw, CTRL));
3227	printf("em%d:RCTL  = 0x%x PS=(0x8402)\n", unit,
3228		E1000_READ_REG(&adapter->hw, RCTL));
3229	printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3230              E1000_READ_REG(&adapter->hw, TIDV),
3231	      E1000_READ_REG(&adapter->hw, TADV));
3232	printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3233              E1000_READ_REG(&adapter->hw, RDTR),
3234	      E1000_READ_REG(&adapter->hw, RADV));
3235        printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit,
3236               (long long)adapter->tx_fifo_wrk_cnt,
3237               (long long)adapter->tx_fifo_reset_cnt);
3238        printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3239               E1000_READ_REG(&adapter->hw, TDH),
3240               E1000_READ_REG(&adapter->hw, TDT));
3241        printf("em%d: Num Tx descriptors avail = %d\n", unit,
3242               adapter->num_tx_desc_avail);
3243        printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3244               adapter->no_tx_desc_avail1);
3245        printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3246               adapter->no_tx_desc_avail2);
3247        printf("em%d: Std mbuf failed = %ld\n", unit,
3248               adapter->mbuf_alloc_failed);
3249        printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3250               adapter->mbuf_cluster_failed);
3251        printf("em%d: Driver dropped packets = %ld\n", unit,
3252               adapter->dropped_pkts);
3253
3254        return;
3255}
3256
3257static void
3258em_print_hw_stats(struct adapter *adapter)
3259{
3260        int unit = adapter->unit;
3261
3262        printf("em%d: Excessive collisions = %lld\n", unit,
3263               (long long)adapter->stats.ecol);
3264        printf("em%d: Symbol errors = %lld\n", unit,
3265               (long long)adapter->stats.symerrs);
3266        printf("em%d: Sequence errors = %lld\n", unit,
3267               (long long)adapter->stats.sec);
3268        printf("em%d: Defer count = %lld\n", unit,
3269               (long long)adapter->stats.dc);
3270
3271        printf("em%d: Missed Packets = %lld\n", unit,
3272               (long long)adapter->stats.mpc);
3273        printf("em%d: Receive No Buffers = %lld\n", unit,
3274               (long long)adapter->stats.rnbc);
3275        printf("em%d: Receive length errors = %lld\n", unit,
3276               (long long)adapter->stats.rlec);
3277        printf("em%d: Receive errors = %lld\n", unit,
3278               (long long)adapter->stats.rxerrc);
3279        printf("em%d: Crc errors = %lld\n", unit,
3280               (long long)adapter->stats.crcerrs);
3281        printf("em%d: Alignment errors = %lld\n", unit,
3282               (long long)adapter->stats.algnerrc);
3283        printf("em%d: Carrier extension errors = %lld\n", unit,
3284               (long long)adapter->stats.cexterr);
3285
3286        printf("em%d: XON Rcvd = %lld\n", unit,
3287               (long long)adapter->stats.xonrxc);
3288        printf("em%d: XON Xmtd = %lld\n", unit,
3289               (long long)adapter->stats.xontxc);
3290        printf("em%d: XOFF Rcvd = %lld\n", unit,
3291               (long long)adapter->stats.xoffrxc);
3292        printf("em%d: XOFF Xmtd = %lld\n", unit,
3293               (long long)adapter->stats.xofftxc);
3294
3295        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3296               (long long)adapter->stats.gprc);
3297        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3298               (long long)adapter->stats.gptc);
3299
3300        return;
3301}
3302
3303static int
3304em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3305{
3306        int error;
3307        int result;
3308        struct adapter *adapter;
3309
3310        result = -1;
3311        error = sysctl_handle_int(oidp, &result, 0, req);
3312
3313        if (error || !req->newptr)
3314                return (error);
3315
3316        if (result == 1) {
3317                adapter = (struct adapter *)arg1;
3318                em_print_debug_info(adapter);
3319        }
3320
3321        return error;
3322}
3323
3324
3325static int
3326em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3327{
3328        int error;
3329        int result;
3330        struct adapter *adapter;
3331
3332        result = -1;
3333        error = sysctl_handle_int(oidp, &result, 0, req);
3334
3335        if (error || !req->newptr)
3336                return (error);
3337
3338        if (result == 1) {
3339                adapter = (struct adapter *)arg1;
3340                em_print_hw_stats(adapter);
3341        }
3342
3343        return error;
3344}
3345
3346static int
3347em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3348{
3349	struct em_int_delay_info *info;
3350	struct adapter *adapter;
3351	u_int32_t regval;
3352	int error;
3353	int usecs;
3354	int ticks;
3355	int s;
3356
3357	info = (struct em_int_delay_info *)arg1;
3358	adapter = info->adapter;
3359	usecs = info->value;
3360	error = sysctl_handle_int(oidp, &usecs, 0, req);
3361	if (error != 0 || req->newptr == NULL)
3362		return error;
3363	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3364		return EINVAL;
3365	info->value = usecs;
3366	ticks = E1000_USECS_TO_TICKS(usecs);
3367
3368	s = splimp();
3369	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3370	regval = (regval & ~0xffff) | (ticks & 0xffff);
3371	/* Handle a few special cases. */
3372	switch (info->offset) {
3373	case E1000_RDTR:
3374	case E1000_82542_RDTR:
3375		regval |= E1000_RDT_FPDB;
3376		break;
3377	case E1000_TIDV:
3378	case E1000_82542_TIDV:
3379		if (ticks == 0) {
3380			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3381			/* Don't write 0 into the TIDV register. */
3382			regval++;
3383		} else
3384			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3385		break;
3386	}
3387	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3388	splx(s);
3389	return 0;
3390}
3391
3392static void
3393em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3394    const char *description, struct em_int_delay_info *info,
3395    int offset, int value)
3396{
3397	info->adapter = adapter;
3398	info->offset = offset;
3399	info->value = value;
3400	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3401	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3402	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3403	    info, 0, em_sysctl_int_delay, "I", description);
3404}
3405