if_em.c revision 152740
1109998Smarkm/**************************************************************************
2109998Smarkm
3109998SmarkmCopyright (c) 2001-2005, Intel Corporation
4109998SmarkmAll rights reserved.
5109998Smarkm
6109998SmarkmRedistribution and use in source and binary forms, with or without
7109998Smarkmmodification, are permitted provided that the following conditions are met:
8109998Smarkm
9109998Smarkm 1. Redistributions of source code must retain the above copyright notice,
10109998Smarkm    this list of conditions and the following disclaimer.
11109998Smarkm
12109998Smarkm 2. Redistributions in binary form must reproduce the above copyright
13109998Smarkm    notice, this list of conditions and the following disclaimer in the
14109998Smarkm    documentation and/or other materials provided with the distribution.
15109998Smarkm
16160814Ssimon 3. Neither the name of the Intel Corporation nor the names of its
17109998Smarkm    contributors may be used to endorse or promote products derived from
18109998Smarkm    this software without specific prior written permission.
19160814Ssimon
20160814SsimonTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21160814SsimonAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22160814SsimonIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23109998SmarkmARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24109998SmarkmLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25160814SsimonCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26160814SsimonSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27160814SsimonINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28160814SsimonCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29160814SsimonARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30160814SsimonPOSSIBILITY OF SUCH DAMAGE.
31160814Ssimon
32160814Ssimon***************************************************************************/
33160814Ssimon
34160814Ssimon/*$FreeBSD: head/sys/dev/em/if_em.c 152740 2005-11-24 01:44:49Z glebius $*/
35160814Ssimon
36160814Ssimon#ifdef HAVE_KERNEL_OPTION_HEADERS
37160814Ssimon#include "opt_device_polling.h"
38160814Ssimon#endif
39160814Ssimon
40160814Ssimon#include <dev/em/if_em.h>
41160814Ssimon
42160814Ssimon/*********************************************************************
43160814Ssimon *  Set this to one to display debug statistics
44160814Ssimon *********************************************************************/
45160814Ssimonint             em_display_debug_stats = 0;
46160814Ssimon
47160814Ssimon/*********************************************************************
48160814Ssimon *  Driver version
49160814Ssimon *********************************************************************/
50160814Ssimon
51160814Ssimonchar em_driver_version[] = "Version - 3.2.18";
52109998Smarkm
53109998Smarkm
54109998Smarkm/*********************************************************************
55109998Smarkm *  PCI Device ID Table
56109998Smarkm *
57109998Smarkm *  Used by probe to select devices to load on
58109998Smarkm *  Last field stores an index into em_strings
59109998Smarkm *  Last entry must be all 0s
60109998Smarkm *
61109998Smarkm *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62109998Smarkm *********************************************************************/
63109998Smarkm
64109998Smarkmstatic em_vendor_info_t em_vendor_info_array[] =
65109998Smarkm{
66109998Smarkm        /* Intel(R) PRO/1000 Network Connection */
67109998Smarkm        { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68109998Smarkm        { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69109998Smarkm        { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70109998Smarkm        { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71109998Smarkm        { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72109998Smarkm
73109998Smarkm        { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74109998Smarkm        { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75109998Smarkm        { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76109998Smarkm        { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77109998Smarkm        { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78109998Smarkm        { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79109998Smarkm        { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80109998Smarkm
81109998Smarkm        { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82109998Smarkm
83109998Smarkm        { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84109998Smarkm        { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85109998Smarkm
86109998Smarkm        { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87109998Smarkm        { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88109998Smarkm        { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89109998Smarkm        { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90109998Smarkm
91109998Smarkm        { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92109998Smarkm        { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93109998Smarkm        { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94109998Smarkm        { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95109998Smarkm        { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96109998Smarkm
97109998Smarkm        { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98109998Smarkm        { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99109998Smarkm        { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100109998Smarkm        { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101109998Smarkm        { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102109998Smarkm        { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103109998Smarkm        { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104109998Smarkm        { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105109998Smarkm
106109998Smarkm        { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107109998Smarkm        { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108109998Smarkm        { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109109998Smarkm
110109998Smarkm	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
111109998Smarkm	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
112109998Smarkm	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
113109998Smarkm
114109998Smarkm	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
115109998Smarkm	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
116109998Smarkm	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
117109998Smarkm
118109998Smarkm        { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
119109998Smarkm        { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
120109998Smarkm        { 0x8086, E1000_DEV_ID_82573L,              PCI_ANY_ID, PCI_ANY_ID, 0},
121109998Smarkm
122109998Smarkm        /* required last entry */
123109998Smarkm        { 0, 0, 0, 0, 0}
124109998Smarkm};
125109998Smarkm
126109998Smarkm/*********************************************************************
127109998Smarkm *  Table of branding strings for all supported NICs.
128109998Smarkm *********************************************************************/
129109998Smarkm
130109998Smarkmstatic char *em_strings[] = {
131109998Smarkm	"Intel(R) PRO/1000 Network Connection"
132109998Smarkm};
133109998Smarkm
134109998Smarkm/*********************************************************************
135109998Smarkm *  Function prototypes
136109998Smarkm *********************************************************************/
137109998Smarkmstatic int  em_probe(device_t);
138109998Smarkmstatic int  em_attach(device_t);
139109998Smarkmstatic int  em_detach(device_t);
140109998Smarkmstatic int  em_shutdown(device_t);
141109998Smarkmstatic void em_intr(void *);
142109998Smarkmstatic void em_start(struct ifnet *);
143109998Smarkmstatic int  em_ioctl(struct ifnet *, u_long, caddr_t);
144109998Smarkmstatic void em_watchdog(struct ifnet *);
145109998Smarkmstatic void em_init(void *);
146109998Smarkmstatic void em_init_locked(struct adapter *);
147109998Smarkmstatic void em_stop(void *);
148109998Smarkmstatic void em_media_status(struct ifnet *, struct ifmediareq *);
149109998Smarkmstatic int  em_media_change(struct ifnet *);
150109998Smarkmstatic void em_identify_hardware(struct adapter *);
151109998Smarkmstatic int  em_allocate_pci_resources(struct adapter *);
152109998Smarkmstatic void em_free_pci_resources(struct adapter *);
153109998Smarkmstatic void em_local_timer(void *);
154109998Smarkmstatic int  em_hardware_init(struct adapter *);
155109998Smarkmstatic void em_setup_interface(device_t, struct adapter *);
156109998Smarkmstatic int  em_setup_transmit_structures(struct adapter *);
157109998Smarkmstatic void em_initialize_transmit_unit(struct adapter *);
158109998Smarkmstatic int  em_setup_receive_structures(struct adapter *);
159109998Smarkmstatic void em_initialize_receive_unit(struct adapter *);
160109998Smarkmstatic void em_enable_intr(struct adapter *);
161109998Smarkmstatic void em_disable_intr(struct adapter *);
162109998Smarkmstatic void em_free_transmit_structures(struct adapter *);
163109998Smarkmstatic void em_free_receive_structures(struct adapter *);
164109998Smarkmstatic void em_update_stats_counters(struct adapter *);
165109998Smarkmstatic void em_clean_transmit_interrupts(struct adapter *);
166109998Smarkmstatic int  em_allocate_receive_structures(struct adapter *);
167127128Snectarstatic int  em_allocate_transmit_structures(struct adapter *);
168109998Smarkmstatic void em_process_receive_interrupts(struct adapter *, int);
169109998Smarkmstatic void em_receive_checksum(struct adapter *,
170109998Smarkm				struct em_rx_desc *,
171109998Smarkm				struct mbuf *);
172109998Smarkmstatic void em_transmit_checksum_setup(struct adapter *,
173109998Smarkm				       struct mbuf *,
174109998Smarkm				       u_int32_t *,
175109998Smarkm				       u_int32_t *);
176160814Ssimonstatic void em_set_promisc(struct adapter *);
177109998Smarkmstatic void em_disable_promisc(struct adapter *);
178109998Smarkmstatic void em_set_multi(struct adapter *);
179109998Smarkmstatic void em_print_hw_stats(struct adapter *);
180109998Smarkmstatic void em_print_link_status(struct adapter *);
181109998Smarkmstatic int  em_get_buf(int i, struct adapter *,
182109998Smarkm		       struct mbuf *);
183109998Smarkmstatic void em_enable_vlans(struct adapter *);
184109998Smarkmstatic void em_disable_vlans(struct adapter *);
185109998Smarkmstatic int  em_encap(struct adapter *, struct mbuf **);
186109998Smarkmstatic void em_smartspeed(struct adapter *);
187109998Smarkmstatic int  em_82547_fifo_workaround(struct adapter *, int);
188109998Smarkmstatic void em_82547_update_fifo_head(struct adapter *, int);
189160814Ssimonstatic int  em_82547_tx_fifo_reset(struct adapter *);
190109998Smarkmstatic void em_82547_move_tail(void *arg);
191109998Smarkmstatic void em_82547_move_tail_locked(struct adapter *);
192109998Smarkmstatic int  em_dma_malloc(struct adapter *, bus_size_t,
193109998Smarkm			  struct em_dma_alloc *, int);
194109998Smarkmstatic void em_dma_free(struct adapter *, struct em_dma_alloc *);
195109998Smarkmstatic void em_print_debug_info(struct adapter *);
196109998Smarkmstatic int  em_is_valid_ether_addr(u_int8_t *);
197109998Smarkmstatic int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
198109998Smarkmstatic int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
199160814Ssimonstatic u_int32_t em_fill_descriptors (bus_addr_t address,
200109998Smarkm				      u_int32_t length,
201109998Smarkm				      PDESC_ARRAY desc_array);
202109998Smarkmstatic int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
203109998Smarkmstatic void em_add_int_delay_sysctl(struct adapter *, const char *,
204109998Smarkm				    const char *, struct em_int_delay_info *,
205109998Smarkm				    int, int);
206109998Smarkm#ifdef DEVICE_POLLING
207109998Smarkmstatic poll_handler_t em_poll;
208109998Smarkm#endif
209109998Smarkm
210109998Smarkm/*********************************************************************
211109998Smarkm *  FreeBSD Device Interface Entry Points
212160814Ssimon *********************************************************************/
213109998Smarkm
214109998Smarkmstatic device_method_t em_methods[] = {
215109998Smarkm	/* Device interface */
216109998Smarkm	DEVMETHOD(device_probe, em_probe),
217109998Smarkm	DEVMETHOD(device_attach, em_attach),
218109998Smarkm	DEVMETHOD(device_detach, em_detach),
219109998Smarkm	DEVMETHOD(device_shutdown, em_shutdown),
220109998Smarkm	{0, 0}
221109998Smarkm};
222109998Smarkm
223109998Smarkmstatic driver_t em_driver = {
224109998Smarkm	"em", em_methods, sizeof(struct adapter ),
225109998Smarkm};
226109998Smarkm
227109998Smarkmstatic devclass_t em_devclass;
228109998SmarkmDRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
229109998SmarkmMODULE_DEPEND(em, pci, 1, 1, 1);
230109998SmarkmMODULE_DEPEND(em, ether, 1, 1, 1);
231109998Smarkm
232109998Smarkm/*********************************************************************
233109998Smarkm *  Tunable default values.
234109998Smarkm *********************************************************************/
235109998Smarkm
236109998Smarkm#define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
237109998Smarkm#define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
238109998Smarkm
239109998Smarkmstatic int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
240109998Smarkmstatic int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
241109998Smarkmstatic int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
242109998Smarkmstatic int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
243109998Smarkmstatic int em_rxd = EM_DEFAULT_RXD;
244109998Smarkmstatic int em_txd = EM_DEFAULT_TXD;
245109998Smarkm
246109998SmarkmTUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
247109998SmarkmTUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
248109998SmarkmTUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
249109998SmarkmTUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
250109998SmarkmTUNABLE_INT("hw.em.rxd", &em_rxd);
251109998SmarkmTUNABLE_INT("hw.em.txd", &em_txd);
252109998Smarkm
253109998Smarkm/*********************************************************************
254109998Smarkm *  Device identification routine
255109998Smarkm *
256109998Smarkm *  em_probe determines if the driver should be loaded on
257109998Smarkm *  adapter based on PCI vendor/device id of the adapter.
258109998Smarkm *
259109998Smarkm *  return BUS_PROBE_DEFAULT on success, positive on failure
260109998Smarkm *********************************************************************/
261109998Smarkm
262109998Smarkmstatic int
263109998Smarkmem_probe(device_t dev)
264109998Smarkm{
265109998Smarkm	em_vendor_info_t *ent;
266109998Smarkm
267109998Smarkm	u_int16_t       pci_vendor_id = 0;
268109998Smarkm	u_int16_t       pci_device_id = 0;
269109998Smarkm	u_int16_t       pci_subvendor_id = 0;
270109998Smarkm	u_int16_t       pci_subdevice_id = 0;
271109998Smarkm	char            adapter_name[60];
272109998Smarkm
273109998Smarkm	INIT_DEBUGOUT("em_probe: begin");
274109998Smarkm
275109998Smarkm	pci_vendor_id = pci_get_vendor(dev);
276109998Smarkm	if (pci_vendor_id != EM_VENDOR_ID)
277109998Smarkm		return(ENXIO);
278109998Smarkm
279109998Smarkm	pci_device_id = pci_get_device(dev);
280109998Smarkm	pci_subvendor_id = pci_get_subvendor(dev);
281109998Smarkm	pci_subdevice_id = pci_get_subdevice(dev);
282109998Smarkm
283109998Smarkm	ent = em_vendor_info_array;
284109998Smarkm	while (ent->vendor_id != 0) {
285109998Smarkm		if ((pci_vendor_id == ent->vendor_id) &&
286109998Smarkm		    (pci_device_id == ent->device_id) &&
287109998Smarkm
288109998Smarkm		    ((pci_subvendor_id == ent->subvendor_id) ||
289109998Smarkm		     (ent->subvendor_id == PCI_ANY_ID)) &&
290109998Smarkm
291109998Smarkm		    ((pci_subdevice_id == ent->subdevice_id) ||
292109998Smarkm		     (ent->subdevice_id == PCI_ANY_ID))) {
293109998Smarkm			sprintf(adapter_name, "%s %s",
294109998Smarkm				em_strings[ent->index],
295109998Smarkm				em_driver_version);
296109998Smarkm			device_set_desc_copy(dev, adapter_name);
297109998Smarkm			return(BUS_PROBE_DEFAULT);
298109998Smarkm		}
299109998Smarkm		ent++;
300109998Smarkm	}
301109998Smarkm
302109998Smarkm	return(ENXIO);
303109998Smarkm}
304109998Smarkm
305109998Smarkm/*********************************************************************
306109998Smarkm *  Device initialization routine
307109998Smarkm *
308109998Smarkm *  The attach entry point is called when the driver is being loaded.
309109998Smarkm *  This routine identifies the type of hardware, allocates all resources
310109998Smarkm *  and initializes the hardware.
311109998Smarkm *
312109998Smarkm *  return 0 on success, positive on failure
313109998Smarkm *********************************************************************/
314109998Smarkm
315109998Smarkmstatic int
316109998Smarkmem_attach(device_t dev)
317109998Smarkm{
318109998Smarkm	struct adapter * adapter;
319109998Smarkm	int             tsize, rsize;
320109998Smarkm	int		error = 0;
321109998Smarkm
322109998Smarkm	INIT_DEBUGOUT("em_attach: begin");
323109998Smarkm
324109998Smarkm	/* Allocate, clear, and link in our adapter structure */
325109998Smarkm	if (!(adapter = device_get_softc(dev))) {
326109998Smarkm		printf("em: adapter structure allocation failed\n");
327109998Smarkm		return(ENOMEM);
328109998Smarkm	}
329109998Smarkm	bzero(adapter, sizeof(struct adapter ));
330109998Smarkm	adapter->dev = dev;
331109998Smarkm	adapter->osdep.dev = dev;
332109998Smarkm	adapter->unit = device_get_unit(dev);
333109998Smarkm	EM_LOCK_INIT(adapter, device_get_nameunit(dev));
334109998Smarkm
335109998Smarkm	/* SYSCTL stuff */
336109998Smarkm        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
337109998Smarkm                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
338109998Smarkm                        OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
339109998Smarkm                        (void *)adapter, 0,
340109998Smarkm                        em_sysctl_debug_info, "I", "Debug Information");
341109998Smarkm
342109998Smarkm        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
343109998Smarkm                        SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
344109998Smarkm                        OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
345109998Smarkm                        (void *)adapter, 0,
346109998Smarkm                        em_sysctl_stats, "I", "Statistics");
347109998Smarkm
348109998Smarkm	callout_init(&adapter->timer, CALLOUT_MPSAFE);
349109998Smarkm	callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
350109998Smarkm
351109998Smarkm	/* Determine hardware revision */
352109998Smarkm	em_identify_hardware(adapter);
353109998Smarkm
354109998Smarkm	/* Set up some sysctls for the tunable interrupt delays */
355109998Smarkm	em_add_int_delay_sysctl(adapter, "rx_int_delay",
356109998Smarkm	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
357109998Smarkm	    E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
358109998Smarkm	em_add_int_delay_sysctl(adapter, "tx_int_delay",
359109998Smarkm	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
360109998Smarkm	    E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
361109998Smarkm	if (adapter->hw.mac_type >= em_82540) {
362109998Smarkm		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
363109998Smarkm		    "receive interrupt delay limit in usecs",
364109998Smarkm		    &adapter->rx_abs_int_delay,
365109998Smarkm		    E1000_REG_OFFSET(&adapter->hw, RADV),
366109998Smarkm		    em_rx_abs_int_delay_dflt);
367109998Smarkm		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
368109998Smarkm		    "transmit interrupt delay limit in usecs",
369109998Smarkm		    &adapter->tx_abs_int_delay,
370109998Smarkm		    E1000_REG_OFFSET(&adapter->hw, TADV),
371109998Smarkm		    em_tx_abs_int_delay_dflt);
372109998Smarkm	}
373109998Smarkm
374109998Smarkm	/*
375109998Smarkm	 * Validate number of transmit and receive descriptors. It
376109998Smarkm	 * must not exceed hardware maximum, and must be multiple
377109998Smarkm	 * of E1000_DBA_ALIGN.
378109998Smarkm	 */
379109998Smarkm	if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
380109998Smarkm	    (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
381109998Smarkm	    (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
382109998Smarkm	    (em_txd < EM_MIN_TXD)) {
383109998Smarkm		printf("em%d: Using %d TX descriptors instead of %d!\n",
384109998Smarkm		    adapter->unit, EM_DEFAULT_TXD, em_txd);
385109998Smarkm		adapter->num_tx_desc = EM_DEFAULT_TXD;
386109998Smarkm	} else
387109998Smarkm		adapter->num_tx_desc = em_txd;
388109998Smarkm	if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
389109998Smarkm	    (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
390109998Smarkm	    (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
391109998Smarkm	    (em_rxd < EM_MIN_RXD)) {
392109998Smarkm		printf("em%d: Using %d RX descriptors instead of %d!\n",
393109998Smarkm		    adapter->unit, EM_DEFAULT_RXD, em_rxd);
394109998Smarkm		adapter->num_rx_desc = EM_DEFAULT_RXD;
395109998Smarkm	} else
396109998Smarkm		adapter->num_rx_desc = em_rxd;
397109998Smarkm
398109998Smarkm        adapter->hw.autoneg = DO_AUTO_NEG;
399109998Smarkm        adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
400109998Smarkm        adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
401109998Smarkm        adapter->hw.tbi_compatibility_en = TRUE;
402109998Smarkm        adapter->rx_buffer_len = EM_RXBUFFER_2048;
403109998Smarkm
404109998Smarkm	adapter->hw.phy_init_script = 1;
405109998Smarkm	adapter->hw.phy_reset_disable = FALSE;
406109998Smarkm
407109998Smarkm#ifndef EM_MASTER_SLAVE
408109998Smarkm	adapter->hw.master_slave = em_ms_hw_default;
409109998Smarkm#else
410109998Smarkm	adapter->hw.master_slave = EM_MASTER_SLAVE;
411109998Smarkm#endif
412109998Smarkm	/*
413109998Smarkm	 * Set the max frame size assuming standard ethernet
414109998Smarkm	 * sized frames
415109998Smarkm	 */
416109998Smarkm	adapter->hw.max_frame_size =
417109998Smarkm		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
418109998Smarkm
419109998Smarkm	adapter->hw.min_frame_size =
420109998Smarkm		MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
421109998Smarkm
422109998Smarkm	/*
423109998Smarkm	 * This controls when hardware reports transmit completion
424109998Smarkm	 * status.
425109998Smarkm	 */
426109998Smarkm	adapter->hw.report_tx_early = 1;
427109998Smarkm
428109998Smarkm
429109998Smarkm	if (em_allocate_pci_resources(adapter)) {
430109998Smarkm		printf("em%d: Allocation of PCI resources failed\n",
431109998Smarkm		       adapter->unit);
432109998Smarkm                error = ENXIO;
433109998Smarkm                goto err_pci;
434109998Smarkm	}
435109998Smarkm
436109998Smarkm
437109998Smarkm	/* Initialize eeprom parameters */
438109998Smarkm        em_init_eeprom_params(&adapter->hw);
439109998Smarkm
440109998Smarkm	tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
441109998Smarkm	    E1000_DBA_ALIGN);
442109998Smarkm
443109998Smarkm	/* Allocate Transmit Descriptor ring */
444109998Smarkm        if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
445109998Smarkm                printf("em%d: Unable to allocate tx_desc memory\n",
446109998Smarkm                       adapter->unit);
447109998Smarkm		error = ENOMEM;
448109998Smarkm                goto err_tx_desc;
449109998Smarkm        }
450109998Smarkm        adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
451109998Smarkm
452109998Smarkm	rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
453109998Smarkm	    E1000_DBA_ALIGN);
454109998Smarkm
455109998Smarkm	/* Allocate Receive Descriptor ring */
456109998Smarkm        if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
457109998Smarkm                printf("em%d: Unable to allocate rx_desc memory\n",
458109998Smarkm                        adapter->unit);
459109998Smarkm		error = ENOMEM;
460109998Smarkm                goto err_rx_desc;
461109998Smarkm        }
462109998Smarkm        adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
463109998Smarkm
464109998Smarkm	/* Initialize the hardware */
465109998Smarkm	if (em_hardware_init(adapter)) {
466109998Smarkm		printf("em%d: Unable to initialize the hardware\n",
467109998Smarkm		       adapter->unit);
468109998Smarkm		error = EIO;
469109998Smarkm                goto err_hw_init;
470109998Smarkm	}
471109998Smarkm
472109998Smarkm	/* Copy the permanent MAC address out of the EEPROM */
473109998Smarkm	if (em_read_mac_addr(&adapter->hw) < 0) {
474109998Smarkm		printf("em%d: EEPROM read error while reading mac address\n",
475109998Smarkm		       adapter->unit);
476109998Smarkm		error = EIO;
477109998Smarkm                goto err_mac_addr;
478109998Smarkm	}
479109998Smarkm
480109998Smarkm	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
481109998Smarkm                printf("em%d: Invalid mac address\n", adapter->unit);
482109998Smarkm                error = EIO;
483109998Smarkm                goto err_mac_addr;
484109998Smarkm        }
485109998Smarkm
486109998Smarkm	/* Setup OS specific network interface */
487109998Smarkm	em_setup_interface(dev, adapter);
488109998Smarkm
489109998Smarkm	/* Initialize statistics */
490109998Smarkm	em_clear_hw_cntrs(&adapter->hw);
491109998Smarkm	em_update_stats_counters(adapter);
492109998Smarkm	adapter->hw.get_link_status = 1;
493109998Smarkm	em_check_for_link(&adapter->hw);
494109998Smarkm
495109998Smarkm	if (bootverbose) {
496109998Smarkm		/* Print the link status */
497109998Smarkm		if (adapter->link_active == 1) {
498109998Smarkm			em_get_speed_and_duplex(&adapter->hw,
499109998Smarkm			    &adapter->link_speed, &adapter->link_duplex);
500109998Smarkm			printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
501109998Smarkm			       adapter->unit,
502109998Smarkm			       adapter->link_speed,
503109998Smarkm			       adapter->link_duplex == FULL_DUPLEX ? "Full" :
504109998Smarkm				"Half");
505109998Smarkm		} else
506109998Smarkm			printf("em%d:  Speed:N/A  Duplex:N/A\n",
507109998Smarkm			    adapter->unit);
508109998Smarkm	}
509109998Smarkm
510109998Smarkm	/* Identify 82544 on PCIX */
511109998Smarkm        em_get_bus_info(&adapter->hw);
512109998Smarkm        if(adapter->hw.bus_type == em_bus_type_pcix &&
513109998Smarkm           adapter->hw.mac_type == em_82544) {
514109998Smarkm                adapter->pcix_82544 = TRUE;
515109998Smarkm        }
516109998Smarkm        else {
517109998Smarkm                adapter->pcix_82544 = FALSE;
518109998Smarkm        }
519109998Smarkm	INIT_DEBUGOUT("em_attach: end");
520109998Smarkm	return(0);
521109998Smarkm
522109998Smarkmerr_mac_addr:
523109998Smarkmerr_hw_init:
524109998Smarkm        em_dma_free(adapter, &adapter->rxdma);
525109998Smarkmerr_rx_desc:
526109998Smarkm        em_dma_free(adapter, &adapter->txdma);
527109998Smarkmerr_tx_desc:
528109998Smarkmerr_pci:
529109998Smarkm        em_free_pci_resources(adapter);
530109998Smarkm	EM_LOCK_DESTROY(adapter);
531109998Smarkm        return(error);
532109998Smarkm
533109998Smarkm}
534109998Smarkm
535109998Smarkm/*********************************************************************
536109998Smarkm *  Device removal routine
537109998Smarkm *
538109998Smarkm *  The detach entry point is called when the driver is being removed.
539109998Smarkm *  This routine stops the adapter and deallocates all the resources
540109998Smarkm *  that were allocated for driver operation.
541109998Smarkm *
542109998Smarkm *  return 0 on success, positive on failure
543109998Smarkm *********************************************************************/
544109998Smarkm
545109998Smarkmstatic int
546109998Smarkmem_detach(device_t dev)
547109998Smarkm{
548109998Smarkm	struct adapter * adapter = device_get_softc(dev);
549109998Smarkm	struct ifnet   *ifp = adapter->ifp;
550109998Smarkm
551109998Smarkm	INIT_DEBUGOUT("em_detach: begin");
552109998Smarkm
553109998Smarkm#ifdef DEVICE_POLLING
554109998Smarkm	if (ifp->if_capenable & IFCAP_POLLING)
555109998Smarkm		ether_poll_deregister(ifp);
556109998Smarkm#endif
557109998Smarkm
558109998Smarkm	EM_LOCK(adapter);
559109998Smarkm	adapter->in_detach = 1;
560109998Smarkm	em_stop(adapter);
561109998Smarkm	em_phy_hw_reset(&adapter->hw);
562109998Smarkm	EM_UNLOCK(adapter);
563109998Smarkm        ether_ifdetach(adapter->ifp);
564109998Smarkm
565109998Smarkm	em_free_pci_resources(adapter);
566109998Smarkm	bus_generic_detach(dev);
567109998Smarkm	if_free(ifp);
568109998Smarkm
569109998Smarkm	/* Free Transmit Descriptor ring */
570109998Smarkm        if (adapter->tx_desc_base) {
571109998Smarkm                em_dma_free(adapter, &adapter->txdma);
572109998Smarkm                adapter->tx_desc_base = NULL;
573109998Smarkm        }
574109998Smarkm
575109998Smarkm        /* Free Receive Descriptor ring */
576109998Smarkm        if (adapter->rx_desc_base) {
577109998Smarkm                em_dma_free(adapter, &adapter->rxdma);
578109998Smarkm                adapter->rx_desc_base = NULL;
579109998Smarkm        }
580109998Smarkm
581109998Smarkm	EM_LOCK_DESTROY(adapter);
582109998Smarkm
583109998Smarkm	return(0);
584109998Smarkm}
585109998Smarkm
586109998Smarkm/*********************************************************************
587109998Smarkm *
588109998Smarkm *  Shutdown entry point
589109998Smarkm *
590109998Smarkm **********************************************************************/
591109998Smarkm
592109998Smarkmstatic int
593109998Smarkmem_shutdown(device_t dev)
594{
595	struct adapter *adapter = device_get_softc(dev);
596	EM_LOCK(adapter);
597	em_stop(adapter);
598	EM_UNLOCK(adapter);
599	return(0);
600}
601
602
603/*********************************************************************
604 *  Transmit entry point
605 *
606 *  em_start is called by the stack to initiate a transmit.
607 *  The driver will remain in this routine as long as there are
608 *  packets to transmit and transmit resources are available.
609 *  In case resources are not available stack is notified and
610 *  the packet is requeued.
611 **********************************************************************/
612
613static void
614em_start_locked(struct ifnet *ifp)
615{
616        struct mbuf    *m_head;
617        struct adapter *adapter = ifp->if_softc;
618
619	mtx_assert(&adapter->mtx, MA_OWNED);
620
621        if (!adapter->link_active)
622                return;
623
624        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
625
626                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
627
628                if (m_head == NULL) break;
629
630		/*
631		 * em_encap() can modify our pointer, and or make it NULL on
632		 * failure.  In that event, we can't requeue.
633		 */
634		if (em_encap(adapter, &m_head)) {
635			if (m_head == NULL)
636				break;
637			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
638			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
639			break;
640                }
641
642		/* Send a copy of the frame to the BPF listener */
643		BPF_MTAP(ifp, m_head);
644
645                /* Set timeout in case hardware has problems transmitting */
646                ifp->if_timer = EM_TX_TIMEOUT;
647
648        }
649        return;
650}
651
652static void
653em_start(struct ifnet *ifp)
654{
655	struct adapter *adapter = ifp->if_softc;
656
657	EM_LOCK(adapter);
658	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
659		em_start_locked(ifp);
660	EM_UNLOCK(adapter);
661	return;
662}
663
664/*********************************************************************
665 *  Ioctl entry point
666 *
667 *  em_ioctl is called when the user wants to configure the
668 *  interface.
669 *
670 *  return 0 on success, positive on failure
671 **********************************************************************/
672
673static int
674em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
675{
676	struct ifreq   *ifr = (struct ifreq *) data;
677	struct adapter * adapter = ifp->if_softc;
678	int error = 0;
679
680	if (adapter->in_detach) return(error);
681
682	switch (command) {
683	case SIOCSIFADDR:
684	case SIOCGIFADDR:
685		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
686		ether_ioctl(ifp, command, data);
687		break;
688	case SIOCSIFMTU:
689	    {
690#ifndef __NO_STRICT_ALIGNMENT
691		if (ifr->ifr_mtu > ETHERMTU)
692			/*
693			 * XXX
694			 * Due to the limitation of DMA engine, it needs fix-up
695			 * code for strict alignment architectures. Disable
696			 * jumbo frame until we have better solutions.
697			 */
698			error = EINVAL;
699#else
700		int max_frame_size;
701
702		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
703
704		switch (adapter->hw.mac_type) {
705		case em_82571:
706		case em_82572:
707			max_frame_size = 10500;
708			break;
709		case em_82573:
710			/* 82573 does not support jumbo frames. */
711			max_frame_size = ETHER_MAX_LEN;
712			break;
713		default:
714			max_frame_size = MAX_JUMBO_FRAME_SIZE;
715		}
716		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
717		    ETHER_CRC_LEN) {
718			error = EINVAL;
719			break;
720		}
721
722		EM_LOCK(adapter);
723		ifp->if_mtu = ifr->ifr_mtu;
724		adapter->hw.max_frame_size =
725		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
726		em_init_locked(adapter);
727		EM_UNLOCK(adapter);
728#endif
729		break;
730	    }
731	case SIOCSIFFLAGS:
732		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
733		EM_LOCK(adapter);
734		if (ifp->if_flags & IFF_UP) {
735			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
736				em_init_locked(adapter);
737			}
738
739			em_disable_promisc(adapter);
740			em_set_promisc(adapter);
741		} else {
742			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
743				em_stop(adapter);
744			}
745		}
746		EM_UNLOCK(adapter);
747		break;
748	case SIOCADDMULTI:
749	case SIOCDELMULTI:
750		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
751		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
752			EM_LOCK(adapter);
753			em_disable_intr(adapter);
754			em_set_multi(adapter);
755			if (adapter->hw.mac_type == em_82542_rev2_0) {
756				em_initialize_receive_unit(adapter);
757			}
758#ifdef DEVICE_POLLING
759                        if (!(ifp->if_capenable & IFCAP_POLLING))
760#endif
761				em_enable_intr(adapter);
762			EM_UNLOCK(adapter);
763		}
764		break;
765	case SIOCSIFMEDIA:
766	case SIOCGIFMEDIA:
767		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
768		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
769		break;
770	case SIOCSIFCAP:
771	    {
772		int mask, reinit;
773
774		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
775		reinit = 0;
776		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
777#ifdef DEVICE_POLLING
778		if (mask & IFCAP_POLLING) {
779			if (ifr->ifr_reqcap & IFCAP_POLLING) {
780				error = ether_poll_register(em_poll, ifp);
781				if (error)
782					return(error);
783				EM_LOCK(adapter);
784				em_disable_intr(adapter);
785				ifp->if_capenable |= IFCAP_POLLING;
786				EM_UNLOCK(adapter);
787			} else {
788				error = ether_poll_deregister(ifp);
789				/* Enable interrupt even in error case */
790				EM_LOCK(adapter);
791				em_enable_intr(adapter);
792				ifp->if_capenable &= ~IFCAP_POLLING;
793				EM_UNLOCK(adapter);
794			}
795		}
796#endif
797		if (mask & IFCAP_HWCSUM) {
798			ifp->if_capenable ^= IFCAP_HWCSUM;
799			reinit = 1;
800		}
801		if (mask & IFCAP_VLAN_HWTAGGING) {
802			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
803			reinit = 1;
804		}
805		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
806			em_init(adapter);
807		break;
808	    }
809	default:
810		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
811		error = EINVAL;
812	}
813
814	return(error);
815}
816
817/*********************************************************************
818 *  Watchdog entry point
819 *
820 *  This routine is called whenever hardware quits transmitting.
821 *
822 **********************************************************************/
823
824static void
825em_watchdog(struct ifnet *ifp)
826{
827	struct adapter * adapter;
828	adapter = ifp->if_softc;
829
830	EM_LOCK(adapter);
831	/* If we are in this routine because of pause frames, then
832	 * don't reset the hardware.
833	 */
834	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
835		ifp->if_timer = EM_TX_TIMEOUT;
836		EM_UNLOCK(adapter);
837		return;
838	}
839
840	if (em_check_for_link(&adapter->hw))
841		printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
842
843	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
844	adapter->watchdog_events++;
845
846	em_init_locked(adapter);
847	EM_UNLOCK(adapter);
848}
849
850/*********************************************************************
851 *  Init entry point
852 *
853 *  This routine is used in two ways. It is used by the stack as
854 *  init entry point in network interface structure. It is also used
855 *  by the driver as a hw/sw initialization routine to get to a
856 *  consistent state.
857 *
858 *  return 0 on success, positive on failure
859 **********************************************************************/
860
861static void
862em_init_locked(struct adapter * adapter)
863{
864	struct ifnet   *ifp;
865
866	uint32_t	pba;
867	ifp = adapter->ifp;
868
869	INIT_DEBUGOUT("em_init: begin");
870
871	mtx_assert(&adapter->mtx, MA_OWNED);
872
873	em_stop(adapter);
874
875	/*
876	 * Packet Buffer Allocation (PBA)
877	 * Writing PBA sets the receive portion of the buffer
878	 * the remainder is used for the transmit buffer.
879	 */
880	switch (adapter->hw.mac_type) {
881	case em_82547:
882	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
883		if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
884			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
885		else
886			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
887		adapter->tx_fifo_head = 0;
888		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
889		adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
890		break;
891	case em_82571: /* 82571: Total Packet Buffer is 48K */
892	case em_82572: /* 82572: Total Packet Buffer is 48K */
893			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
894		break;
895	case em_82573: /* 82573: Total Packet Buffer is 32K */
896		/* Jumbo frames not supported */
897			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
898		break;
899	default:
900		/* Devices before 82547 had a Packet Buffer of 64K.   */
901		if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
902			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
903		else
904			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
905	}
906
907	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
908	E1000_WRITE_REG(&adapter->hw, PBA, pba);
909
910	/* Get the latest mac address, User can use a LAA */
911        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
912              ETHER_ADDR_LEN);
913
914	/* Initialize the hardware */
915	if (em_hardware_init(adapter)) {
916		printf("em%d: Unable to initialize the hardware\n",
917		       adapter->unit);
918		return;
919	}
920
921	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
922		em_enable_vlans(adapter);
923
924	/* Prepare transmit descriptors and buffers */
925	if (em_setup_transmit_structures(adapter)) {
926		printf("em%d: Could not setup transmit structures\n",
927		       adapter->unit);
928		em_stop(adapter);
929		return;
930	}
931	em_initialize_transmit_unit(adapter);
932
933	/* Setup Multicast table */
934	em_set_multi(adapter);
935
936	/* Prepare receive descriptors and buffers */
937	if (em_setup_receive_structures(adapter)) {
938		printf("em%d: Could not setup receive structures\n",
939		       adapter->unit);
940		em_stop(adapter);
941		return;
942	}
943	em_initialize_receive_unit(adapter);
944
945	/* Don't loose promiscuous settings */
946	em_set_promisc(adapter);
947
948	ifp->if_drv_flags |= IFF_DRV_RUNNING;
949	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
950
951	if (adapter->hw.mac_type >= em_82543) {
952		if (ifp->if_capenable & IFCAP_TXCSUM)
953			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
954		else
955			ifp->if_hwassist = 0;
956	}
957
958	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
959	em_clear_hw_cntrs(&adapter->hw);
960#ifdef DEVICE_POLLING
961        /*
962         * Only enable interrupts if we are not polling, make sure
963         * they are off otherwise.
964         */
965        if (ifp->if_capenable & IFCAP_POLLING)
966                em_disable_intr(adapter);
967        else
968#endif /* DEVICE_POLLING */
969		em_enable_intr(adapter);
970
971	/* Don't reset the phy next time init gets called */
972	adapter->hw.phy_reset_disable = TRUE;
973
974	return;
975}
976
977static void
978em_init(void *arg)
979{
980	struct adapter * adapter = arg;
981
982	EM_LOCK(adapter);
983	em_init_locked(adapter);
984	EM_UNLOCK(adapter);
985	return;
986}
987
988
989#ifdef DEVICE_POLLING
990static void
991em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
992{
993        struct adapter *adapter = ifp->if_softc;
994        u_int32_t reg_icr;
995
996	mtx_assert(&adapter->mtx, MA_OWNED);
997
998        if (cmd == POLL_AND_CHECK_STATUS) {
999                reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1000                if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1001			callout_stop(&adapter->timer);
1002                        adapter->hw.get_link_status = 1;
1003                        em_check_for_link(&adapter->hw);
1004                        em_print_link_status(adapter);
1005			callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1006                }
1007        }
1008	em_process_receive_interrupts(adapter, count);
1009	em_clean_transmit_interrupts(adapter);
1010
1011        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1012                em_start_locked(ifp);
1013}
1014
1015static void
1016em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1017{
1018        struct adapter *adapter = ifp->if_softc;
1019
1020	EM_LOCK(adapter);
1021	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1022		em_poll_locked(ifp, cmd, count);
1023	EM_UNLOCK(adapter);
1024}
1025#endif /* DEVICE_POLLING */
1026
1027/*********************************************************************
1028 *
1029 *  Interrupt Service routine
1030 *
1031 **********************************************************************/
1032static void
1033em_intr(void *arg)
1034{
1035	struct adapter	*adapter = arg;
1036	struct ifnet	*ifp;
1037	uint32_t	reg_icr;
1038	int		wantinit = 0;
1039
1040	EM_LOCK(adapter);
1041
1042	ifp = adapter->ifp;
1043
1044#ifdef DEVICE_POLLING
1045	if (ifp->if_capenable & IFCAP_POLLING) {
1046		EM_UNLOCK(adapter);
1047		return;
1048	}
1049#endif /* DEVICE_POLLING */
1050
1051	for (;;) {
1052		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1053		if (reg_icr == 0)
1054			break;
1055
1056		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1057			em_process_receive_interrupts(adapter, -1);
1058			em_clean_transmit_interrupts(adapter);
1059		}
1060
1061		/* Link status change */
1062		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1063			callout_stop(&adapter->timer);
1064			adapter->hw.get_link_status = 1;
1065			em_check_for_link(&adapter->hw);
1066			em_print_link_status(adapter);
1067			callout_reset(&adapter->timer, hz, em_local_timer,
1068			    adapter);
1069		}
1070
1071		if (reg_icr & E1000_ICR_RXO) {
1072			adapter->rx_overruns++;
1073			wantinit = 1;
1074		}
1075	}
1076#if 0
1077	if (wantinit)
1078		em_init_locked(adapter);
1079#endif
1080	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1081	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1082		em_start_locked(ifp);
1083
1084	EM_UNLOCK(adapter);
1085	return;
1086}
1087
1088
1089
1090/*********************************************************************
1091 *
1092 *  Media Ioctl callback
1093 *
1094 *  This routine is called whenever the user queries the status of
1095 *  the interface using ifconfig.
1096 *
1097 **********************************************************************/
1098static void
1099em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1100{
1101	struct adapter * adapter = ifp->if_softc;
1102
1103	INIT_DEBUGOUT("em_media_status: begin");
1104
1105	em_check_for_link(&adapter->hw);
1106	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1107		if (adapter->link_active == 0) {
1108			em_get_speed_and_duplex(&adapter->hw,
1109						&adapter->link_speed,
1110						&adapter->link_duplex);
1111			adapter->link_active = 1;
1112		}
1113	} else {
1114		if (adapter->link_active == 1) {
1115			adapter->link_speed = 0;
1116			adapter->link_duplex = 0;
1117			adapter->link_active = 0;
1118		}
1119	}
1120
1121	ifmr->ifm_status = IFM_AVALID;
1122	ifmr->ifm_active = IFM_ETHER;
1123
1124	if (!adapter->link_active)
1125		return;
1126
1127	ifmr->ifm_status |= IFM_ACTIVE;
1128
1129	if (adapter->hw.media_type == em_media_type_fiber) {
1130		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1131	} else {
1132		switch (adapter->link_speed) {
1133		case 10:
1134			ifmr->ifm_active |= IFM_10_T;
1135			break;
1136		case 100:
1137			ifmr->ifm_active |= IFM_100_TX;
1138			break;
1139		case 1000:
1140			ifmr->ifm_active |= IFM_1000_T;
1141			break;
1142		}
1143		if (adapter->link_duplex == FULL_DUPLEX)
1144			ifmr->ifm_active |= IFM_FDX;
1145		else
1146			ifmr->ifm_active |= IFM_HDX;
1147	}
1148	return;
1149}
1150
1151/*********************************************************************
1152 *
1153 *  Media Ioctl callback
1154 *
1155 *  This routine is called when the user changes speed/duplex using
1156 *  media/mediopt option with ifconfig.
1157 *
1158 **********************************************************************/
1159static int
1160em_media_change(struct ifnet *ifp)
1161{
1162	struct adapter * adapter = ifp->if_softc;
1163	struct ifmedia  *ifm = &adapter->media;
1164
1165	INIT_DEBUGOUT("em_media_change: begin");
1166
1167	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1168		return(EINVAL);
1169
1170	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1171	case IFM_AUTO:
1172		adapter->hw.autoneg = DO_AUTO_NEG;
1173		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1174		break;
1175	case IFM_1000_SX:
1176	case IFM_1000_T:
1177		adapter->hw.autoneg = DO_AUTO_NEG;
1178		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1179		break;
1180	case IFM_100_TX:
1181		adapter->hw.autoneg = FALSE;
1182		adapter->hw.autoneg_advertised = 0;
1183		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1184			adapter->hw.forced_speed_duplex = em_100_full;
1185		else
1186			adapter->hw.forced_speed_duplex	= em_100_half;
1187		break;
1188	case IFM_10_T:
1189		adapter->hw.autoneg = FALSE;
1190		adapter->hw.autoneg_advertised = 0;
1191		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1192			adapter->hw.forced_speed_duplex = em_10_full;
1193		else
1194			adapter->hw.forced_speed_duplex	= em_10_half;
1195		break;
1196	default:
1197		printf("em%d: Unsupported media type\n", adapter->unit);
1198	}
1199
1200	/* As the speed/duplex settings my have changed we need to
1201	 * reset the PHY.
1202	 */
1203	adapter->hw.phy_reset_disable = FALSE;
1204
1205	em_init(adapter);
1206
1207	return(0);
1208}
1209
1210/*********************************************************************
1211 *
1212 *  This routine maps the mbufs to tx descriptors.
1213 *
1214 *  return 0 on success, positive on failure
1215 **********************************************************************/
1216static int
1217em_encap(struct adapter *adapter, struct mbuf **m_headp)
1218{
1219        u_int32_t       txd_upper;
1220        u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1221        int             i, j, error = 0;
1222
1223	struct mbuf	*m_head;
1224
1225	/* For 82544 Workaround */
1226	DESC_ARRAY              desc_array;
1227	u_int32_t               array_elements;
1228	u_int32_t               counter;
1229        struct m_tag    *mtag;
1230	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1231	int			nsegs;
1232        struct em_buffer   *tx_buffer;
1233        struct em_tx_desc *current_tx_desc = NULL;
1234        struct ifnet   *ifp = adapter->ifp;
1235
1236	m_head = *m_headp;
1237
1238        /*
1239         * Force a cleanup if number of TX descriptors
1240         * available hits the threshold
1241         */
1242        if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1243                em_clean_transmit_interrupts(adapter);
1244                if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1245                        adapter->no_tx_desc_avail1++;
1246                        return(ENOBUFS);
1247                }
1248        }
1249
1250        /*
1251         * Map the packet for DMA.
1252         */
1253	tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1254	error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1255	    segs, &nsegs, BUS_DMA_NOWAIT);
1256        if (error != 0) {
1257                adapter->no_tx_dma_setup++;
1258                return (error);
1259        }
1260        KASSERT(nsegs != 0, ("em_encap: empty packet"));
1261
1262        if (nsegs > adapter->num_tx_desc_avail) {
1263                adapter->no_tx_desc_avail2++;
1264		error = ENOBUFS;
1265		goto encap_fail;
1266        }
1267
1268
1269        if (ifp->if_hwassist > 0) {
1270                em_transmit_checksum_setup(adapter,  m_head,
1271                                           &txd_upper, &txd_lower);
1272        } else
1273                txd_upper = txd_lower = 0;
1274
1275
1276        /* Find out if we are in vlan mode */
1277        mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1278
1279	/*
1280	 * When operating in promiscuous mode, hardware encapsulation for
1281	 * packets is disabled.  This means we have to add the vlan
1282	 * encapsulation in the driver, since it will have come down from the
1283	 * VLAN layer with a tag instead of a VLAN header.
1284	 */
1285	if (mtag != NULL && adapter->em_insert_vlan_header) {
1286		struct ether_vlan_header *evl;
1287		struct ether_header eh;
1288
1289		m_head = m_pullup(m_head, sizeof(eh));
1290		if (m_head == NULL) {
1291			*m_headp = NULL;
1292			error = ENOBUFS;
1293			goto encap_fail;
1294		}
1295		eh = *mtod(m_head, struct ether_header *);
1296		M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1297		if (m_head == NULL) {
1298			*m_headp = NULL;
1299			error = ENOBUFS;
1300			goto encap_fail;
1301		}
1302		m_head = m_pullup(m_head, sizeof(*evl));
1303		if (m_head == NULL) {
1304			*m_headp = NULL;
1305			error = ENOBUFS;
1306			goto encap_fail;
1307		}
1308		evl = mtod(m_head, struct ether_vlan_header *);
1309		bcopy(&eh, evl, sizeof(*evl));
1310		evl->evl_proto = evl->evl_encap_proto;
1311		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1312		evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1313		m_tag_delete(m_head, mtag);
1314		mtag = NULL;
1315		*m_headp = m_head;
1316	}
1317
1318        i = adapter->next_avail_tx_desc;
1319	if (adapter->pcix_82544) {
1320		txd_saved = i;
1321		txd_used = 0;
1322	}
1323        for (j = 0; j < nsegs; j++) {
1324		/* If adapter is 82544 and on PCIX bus */
1325		if(adapter->pcix_82544) {
1326			/*
1327			 * Check the Address and Length combination and
1328			 * split the data accordingly
1329			 */
1330                        array_elements = em_fill_descriptors(segs[j].ds_addr,
1331			    segs[j].ds_len, &desc_array);
1332			for (counter = 0; counter < array_elements; counter++) {
1333				if (txd_used == adapter->num_tx_desc_avail) {
1334					adapter->next_avail_tx_desc = txd_saved;
1335					adapter->no_tx_desc_avail2++;
1336					error = ENOBUFS;
1337					goto encap_fail;
1338                                }
1339                                tx_buffer = &adapter->tx_buffer_area[i];
1340                                current_tx_desc = &adapter->tx_desc_base[i];
1341                                current_tx_desc->buffer_addr = htole64(
1342					desc_array.descriptor[counter].address);
1343                                current_tx_desc->lower.data = htole32(
1344					(adapter->txd_cmd | txd_lower |
1345					 (u_int16_t)desc_array.descriptor[counter].length));
1346                                current_tx_desc->upper.data = htole32((txd_upper));
1347                                if (++i == adapter->num_tx_desc)
1348                                         i = 0;
1349
1350                                tx_buffer->m_head = NULL;
1351                                txd_used++;
1352                        }
1353		} else {
1354			tx_buffer = &adapter->tx_buffer_area[i];
1355			current_tx_desc = &adapter->tx_desc_base[i];
1356
1357			current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1358			current_tx_desc->lower.data = htole32(
1359				adapter->txd_cmd | txd_lower | segs[j].ds_len);
1360			current_tx_desc->upper.data = htole32(txd_upper);
1361
1362			if (++i == adapter->num_tx_desc)
1363				i = 0;
1364
1365			tx_buffer->m_head = NULL;
1366		}
1367        }
1368
1369	adapter->next_avail_tx_desc = i;
1370	if (adapter->pcix_82544) {
1371		adapter->num_tx_desc_avail -= txd_used;
1372	}
1373	else {
1374		adapter->num_tx_desc_avail -= nsegs;
1375	}
1376
1377        if (mtag != NULL) {
1378                /* Set the vlan id */
1379                current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1380
1381                /* Tell hardware to add tag */
1382                current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1383        }
1384
1385        tx_buffer->m_head = m_head;
1386        bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_PREWRITE);
1387
1388        /*
1389         * Last Descriptor of Packet needs End Of Packet (EOP)
1390         */
1391        current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1392
1393        /*
1394         * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1395         * that this frame is available to transmit.
1396         */
1397        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1398            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1399        if (adapter->hw.mac_type == em_82547 &&
1400            adapter->link_duplex == HALF_DUPLEX) {
1401                em_82547_move_tail_locked(adapter);
1402        } else {
1403                E1000_WRITE_REG(&adapter->hw, TDT, i);
1404                if (adapter->hw.mac_type == em_82547) {
1405                        em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1406                }
1407        }
1408
1409        return(0);
1410
1411encap_fail:
1412	bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1413	return (error);
1414}
1415
1416/*********************************************************************
1417 *
1418 * 82547 workaround to avoid controller hang in half-duplex environment.
1419 * The workaround is to avoid queuing a large packet that would span
1420 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1421 * in this case. We do that only when FIFO is quiescent.
1422 *
1423 **********************************************************************/
1424static void
1425em_82547_move_tail_locked(struct adapter *adapter)
1426{
1427	uint16_t hw_tdt;
1428	uint16_t sw_tdt;
1429	struct em_tx_desc *tx_desc;
1430	uint16_t length = 0;
1431	boolean_t eop = 0;
1432
1433	EM_LOCK_ASSERT(adapter);
1434
1435	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1436	sw_tdt = adapter->next_avail_tx_desc;
1437
1438	while (hw_tdt != sw_tdt) {
1439		tx_desc = &adapter->tx_desc_base[hw_tdt];
1440		length += tx_desc->lower.flags.length;
1441		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1442		if(++hw_tdt == adapter->num_tx_desc)
1443			hw_tdt = 0;
1444
1445		if(eop) {
1446			if (em_82547_fifo_workaround(adapter, length)) {
1447				adapter->tx_fifo_wrk_cnt++;
1448				callout_reset(&adapter->tx_fifo_timer, 1,
1449					em_82547_move_tail, adapter);
1450				break;
1451			}
1452			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1453			em_82547_update_fifo_head(adapter, length);
1454			length = 0;
1455		}
1456	}
1457	return;
1458}
1459
1460static void
1461em_82547_move_tail(void *arg)
1462{
1463        struct adapter *adapter = arg;
1464
1465        EM_LOCK(adapter);
1466        em_82547_move_tail_locked(adapter);
1467        EM_UNLOCK(adapter);
1468}
1469
1470static int
1471em_82547_fifo_workaround(struct adapter *adapter, int len)
1472{
1473	int fifo_space, fifo_pkt_len;
1474
1475	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1476
1477	if (adapter->link_duplex == HALF_DUPLEX) {
1478		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1479
1480		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1481			if (em_82547_tx_fifo_reset(adapter)) {
1482				return(0);
1483			}
1484			else {
1485				return(1);
1486			}
1487		}
1488	}
1489
1490	return(0);
1491}
1492
1493static void
1494em_82547_update_fifo_head(struct adapter *adapter, int len)
1495{
1496	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1497
1498	/* tx_fifo_head is always 16 byte aligned */
1499	adapter->tx_fifo_head += fifo_pkt_len;
1500	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1501		adapter->tx_fifo_head -= adapter->tx_fifo_size;
1502	}
1503
1504	return;
1505}
1506
1507
1508static int
1509em_82547_tx_fifo_reset(struct adapter *adapter)
1510{
1511	uint32_t tctl;
1512
1513	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1514	      E1000_READ_REG(&adapter->hw, TDH)) &&
1515	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1516	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1517	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1518	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1519	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1520
1521		/* Disable TX unit */
1522		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1523		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1524
1525		/* Reset FIFO pointers */
1526		E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1527		E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1528		E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1529		E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1530
1531		/* Re-enable TX unit */
1532		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1533		E1000_WRITE_FLUSH(&adapter->hw);
1534
1535		adapter->tx_fifo_head = 0;
1536		adapter->tx_fifo_reset_cnt++;
1537
1538		return(TRUE);
1539	}
1540	else {
1541		return(FALSE);
1542	}
1543}
1544
1545static void
1546em_set_promisc(struct adapter * adapter)
1547{
1548
1549	u_int32_t       reg_rctl;
1550	struct ifnet   *ifp = adapter->ifp;
1551
1552	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1553
1554	if (ifp->if_flags & IFF_PROMISC) {
1555		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1556		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1557		/* Disable VLAN stripping in promiscous mode
1558		 * This enables bridging of vlan tagged frames to occur
1559		 * and also allows vlan tags to be seen in tcpdump
1560		 */
1561		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1562			em_disable_vlans(adapter);
1563		adapter->em_insert_vlan_header = 1;
1564	} else if (ifp->if_flags & IFF_ALLMULTI) {
1565		reg_rctl |= E1000_RCTL_MPE;
1566		reg_rctl &= ~E1000_RCTL_UPE;
1567		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1568		adapter->em_insert_vlan_header = 0;
1569	} else
1570		adapter->em_insert_vlan_header = 0;
1571
1572	return;
1573}
1574
1575static void
1576em_disable_promisc(struct adapter * adapter)
1577{
1578	u_int32_t       reg_rctl;
1579	struct ifnet   *ifp = adapter->ifp;
1580
1581	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1582
1583	reg_rctl &=  (~E1000_RCTL_UPE);
1584	reg_rctl &=  (~E1000_RCTL_MPE);
1585	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1586
1587	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1588		em_enable_vlans(adapter);
1589	adapter->em_insert_vlan_header = 0;
1590
1591	return;
1592}
1593
1594
1595/*********************************************************************
1596 *  Multicast Update
1597 *
1598 *  This routine is called whenever multicast address list is updated.
1599 *
1600 **********************************************************************/
1601
1602static void
1603em_set_multi(struct adapter * adapter)
1604{
1605        u_int32_t reg_rctl = 0;
1606        u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1607        struct ifmultiaddr  *ifma;
1608        int mcnt = 0;
1609        struct ifnet   *ifp = adapter->ifp;
1610
1611        IOCTL_DEBUGOUT("em_set_multi: begin");
1612
1613        if (adapter->hw.mac_type == em_82542_rev2_0) {
1614                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1615                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1616                        em_pci_clear_mwi(&adapter->hw);
1617                }
1618                reg_rctl |= E1000_RCTL_RST;
1619                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1620                msec_delay(5);
1621        }
1622
1623	IF_ADDR_LOCK(ifp);
1624        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1625                if (ifma->ifma_addr->sa_family != AF_LINK)
1626                        continue;
1627
1628		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1629
1630                bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1631                      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1632                mcnt++;
1633        }
1634	IF_ADDR_UNLOCK(ifp);
1635
1636        if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1637                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1638                reg_rctl |= E1000_RCTL_MPE;
1639                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1640        } else
1641                em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1642
1643        if (adapter->hw.mac_type == em_82542_rev2_0) {
1644                reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1645                reg_rctl &= ~E1000_RCTL_RST;
1646                E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1647                msec_delay(5);
1648                if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1649                        em_pci_set_mwi(&adapter->hw);
1650                }
1651        }
1652
1653        return;
1654}
1655
1656
1657/*********************************************************************
1658 *  Timer routine
1659 *
1660 *  This routine checks for link status and updates statistics.
1661 *
1662 **********************************************************************/
1663
1664static void
1665em_local_timer(void *arg)
1666{
1667	struct ifnet   *ifp;
1668	struct adapter * adapter = arg;
1669	ifp = adapter->ifp;
1670
1671	EM_LOCK(adapter);
1672
1673	em_check_for_link(&adapter->hw);
1674	em_print_link_status(adapter);
1675	em_update_stats_counters(adapter);
1676	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1677		em_print_hw_stats(adapter);
1678	}
1679	em_smartspeed(adapter);
1680
1681	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1682
1683	EM_UNLOCK(adapter);
1684	return;
1685}
1686
1687static void
1688em_print_link_status(struct adapter * adapter)
1689{
1690	struct ifnet *ifp = adapter->ifp;
1691
1692	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1693		if (adapter->link_active == 0) {
1694			em_get_speed_and_duplex(&adapter->hw,
1695						&adapter->link_speed,
1696						&adapter->link_duplex);
1697			if (bootverbose)
1698				printf("em%d: Link is up %d Mbps %s\n",
1699				       adapter->unit,
1700				       adapter->link_speed,
1701				       ((adapter->link_duplex == FULL_DUPLEX) ?
1702					"Full Duplex" : "Half Duplex"));
1703			adapter->link_active = 1;
1704			adapter->smartspeed = 0;
1705			if_link_state_change(ifp, LINK_STATE_UP);
1706		}
1707	} else {
1708		if (adapter->link_active == 1) {
1709			adapter->link_speed = 0;
1710			adapter->link_duplex = 0;
1711			if (bootverbose)
1712				printf("em%d: Link is Down\n", adapter->unit);
1713			adapter->link_active = 0;
1714			if_link_state_change(ifp, LINK_STATE_DOWN);
1715		}
1716	}
1717
1718	return;
1719}
1720
1721/*********************************************************************
1722 *
1723 *  This routine disables all traffic on the adapter by issuing a
1724 *  global reset on the MAC and deallocates TX/RX buffers.
1725 *
1726 **********************************************************************/
1727
1728static void
1729em_stop(void *arg)
1730{
1731	struct ifnet   *ifp;
1732	struct adapter * adapter = arg;
1733	ifp = adapter->ifp;
1734
1735	mtx_assert(&adapter->mtx, MA_OWNED);
1736
1737	INIT_DEBUGOUT("em_stop: begin");
1738
1739	em_disable_intr(adapter);
1740	em_reset_hw(&adapter->hw);
1741	callout_stop(&adapter->timer);
1742	callout_stop(&adapter->tx_fifo_timer);
1743	em_free_transmit_structures(adapter);
1744	em_free_receive_structures(adapter);
1745
1746
1747	/* Tell the stack that the interface is no longer active */
1748	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1749
1750	return;
1751}
1752
1753
1754/*********************************************************************
1755 *
1756 *  Determine hardware revision.
1757 *
1758 **********************************************************************/
1759static void
1760em_identify_hardware(struct adapter * adapter)
1761{
1762	device_t dev = adapter->dev;
1763
1764	/* Make sure our PCI config space has the necessary stuff set */
1765	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1766	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1767	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1768		printf("em%d: Memory Access and/or Bus Master bits were not set!\n",
1769		       adapter->unit);
1770		adapter->hw.pci_cmd_word |=
1771		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1772		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1773	}
1774
1775	/* Save off the information about this board */
1776	adapter->hw.vendor_id = pci_get_vendor(dev);
1777	adapter->hw.device_id = pci_get_device(dev);
1778	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1779	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1780	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1781
1782	/* Identify the MAC */
1783        if (em_set_mac_type(&adapter->hw))
1784                printf("em%d: Unknown MAC Type\n", adapter->unit);
1785
1786	if(adapter->hw.mac_type == em_82541 ||
1787	   adapter->hw.mac_type == em_82541_rev_2 ||
1788	   adapter->hw.mac_type == em_82547 ||
1789	   adapter->hw.mac_type == em_82547_rev_2)
1790		adapter->hw.phy_init_script = TRUE;
1791
1792        return;
1793}
1794
1795static int
1796em_allocate_pci_resources(struct adapter * adapter)
1797{
1798	int             val, rid;
1799	device_t        dev = adapter->dev;
1800
1801	rid = PCIR_BAR(0);
1802	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1803						     &rid, RF_ACTIVE);
1804	if (!(adapter->res_memory)) {
1805		printf("em%d: Unable to allocate bus resource: memory\n",
1806		       adapter->unit);
1807		return(ENXIO);
1808	}
1809	adapter->osdep.mem_bus_space_tag =
1810	rman_get_bustag(adapter->res_memory);
1811	adapter->osdep.mem_bus_space_handle =
1812	rman_get_bushandle(adapter->res_memory);
1813	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1814
1815
1816	if (adapter->hw.mac_type > em_82543) {
1817		/* Figure our where our IO BAR is ? */
1818		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1819			val = pci_read_config(dev, rid, 4);
1820			if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1821				adapter->io_rid = rid;
1822				break;
1823			}
1824			rid += 4;
1825			/* check for 64bit BAR */
1826			if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1827				rid += 4;
1828		}
1829		if (rid >= PCIR_CIS) {
1830			printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1831			return (ENXIO);
1832		}
1833		adapter->res_ioport = bus_alloc_resource_any(dev,
1834							     SYS_RES_IOPORT,
1835							     &adapter->io_rid,
1836							     RF_ACTIVE);
1837		if (!(adapter->res_ioport)) {
1838			printf("em%d: Unable to allocate bus resource: ioport\n",
1839			       adapter->unit);
1840			return(ENXIO);
1841		}
1842		adapter->hw.io_base = 0;
1843		adapter->osdep.io_bus_space_tag =
1844		    rman_get_bustag(adapter->res_ioport);
1845		adapter->osdep.io_bus_space_handle =
1846		    rman_get_bushandle(adapter->res_ioport);
1847	}
1848
1849	rid = 0x0;
1850	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1851						        RF_SHAREABLE |
1852							RF_ACTIVE);
1853	if (!(adapter->res_interrupt)) {
1854		printf("em%d: Unable to allocate bus resource: interrupt\n",
1855		       adapter->unit);
1856		return(ENXIO);
1857	}
1858	if (bus_setup_intr(dev, adapter->res_interrupt,
1859			   INTR_TYPE_NET | INTR_MPSAFE,
1860			   (void (*)(void *)) em_intr, adapter,
1861			   &adapter->int_handler_tag)) {
1862		printf("em%d: Error registering interrupt handler!\n",
1863		       adapter->unit);
1864		return(ENXIO);
1865	}
1866
1867	adapter->hw.back = &adapter->osdep;
1868
1869	return(0);
1870}
1871
1872static void
1873em_free_pci_resources(struct adapter * adapter)
1874{
1875	device_t dev = adapter->dev;
1876
1877	if (adapter->res_interrupt != NULL) {
1878		bus_teardown_intr(dev, adapter->res_interrupt,
1879				  adapter->int_handler_tag);
1880		bus_release_resource(dev, SYS_RES_IRQ, 0,
1881				     adapter->res_interrupt);
1882	}
1883	if (adapter->res_memory != NULL) {
1884		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1885				     adapter->res_memory);
1886	}
1887
1888	if (adapter->res_ioport != NULL) {
1889		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1890				     adapter->res_ioport);
1891	}
1892	return;
1893}
1894
1895/*********************************************************************
1896 *
1897 *  Initialize the hardware to a configuration as specified by the
1898 *  adapter structure. The controller is reset, the EEPROM is
1899 *  verified, the MAC address is set, then the shared initialization
1900 *  routines are called.
1901 *
1902 **********************************************************************/
1903static int
1904em_hardware_init(struct adapter * adapter)
1905{
1906	uint16_t rx_buffer_size;
1907
1908        INIT_DEBUGOUT("em_hardware_init: begin");
1909	/* Issue a global reset */
1910	em_reset_hw(&adapter->hw);
1911
1912	/* When hardware is reset, fifo_head is also reset */
1913	adapter->tx_fifo_head = 0;
1914
1915	/* Make sure we have a good EEPROM before we read from it */
1916	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1917		printf("em%d: The EEPROM Checksum Is Not Valid\n",
1918		       adapter->unit);
1919		return(EIO);
1920	}
1921
1922	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1923		printf("em%d: EEPROM read error while reading part number\n",
1924		       adapter->unit);
1925		return(EIO);
1926	}
1927
1928	/*
1929	 * These parameters control the automatic generation (Tx) and
1930	 * response (Rx) to Ethernet PAUSE frames.
1931	 * - High water mark should allow for at least two frames to be
1932	 *   received after sending an XOFF.
1933	 * - Low water mark works best when it is very near the high water mark.
1934	 *   This allows the receiver to restart by sending XON when it has drained
1935	 *   a bit.  Here we use an arbitary value of 1500 which will restart after
1936	 *   one full frame is pulled from the buffer.  There could be several smaller
1937	 *   frames in the buffer and if so they will not trigger the XON until their
1938	 *   total number reduces the buffer by 1500.
1939	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1940	 */
1941	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
1942
1943	adapter->hw.fc_high_water = rx_buffer_size -
1944	    roundup2(adapter->hw.max_frame_size, 1024);
1945	adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
1946	adapter->hw.fc_pause_time = 0x1000;
1947	adapter->hw.fc_send_xon = TRUE;
1948	adapter->hw.fc = em_fc_full;
1949
1950	if (em_init_hw(&adapter->hw) < 0) {
1951		printf("em%d: Hardware Initialization Failed",
1952		       adapter->unit);
1953		return(EIO);
1954	}
1955
1956	em_check_for_link(&adapter->hw);
1957	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1958		adapter->link_active = 1;
1959	else
1960		adapter->link_active = 0;
1961
1962	if (adapter->link_active) {
1963		em_get_speed_and_duplex(&adapter->hw,
1964					&adapter->link_speed,
1965					&adapter->link_duplex);
1966	} else {
1967		adapter->link_speed = 0;
1968		adapter->link_duplex = 0;
1969	}
1970
1971	return(0);
1972}
1973
1974/*********************************************************************
1975 *
1976 *  Setup networking device structure and register an interface.
1977 *
1978 **********************************************************************/
1979static void
1980em_setup_interface(device_t dev, struct adapter * adapter)
1981{
1982	struct ifnet   *ifp;
1983	INIT_DEBUGOUT("em_setup_interface: begin");
1984
1985	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1986	if (ifp == NULL)
1987		panic("%s: can not if_alloc()", device_get_nameunit(dev));
1988	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1989	ifp->if_mtu = ETHERMTU;
1990	ifp->if_baudrate = 1000000000;
1991	ifp->if_init =  em_init;
1992	ifp->if_softc = adapter;
1993	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1994	ifp->if_ioctl = em_ioctl;
1995	ifp->if_start = em_start;
1996	ifp->if_watchdog = em_watchdog;
1997	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
1998	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
1999	IFQ_SET_READY(&ifp->if_snd);
2000
2001        ether_ifattach(ifp, adapter->hw.mac_addr);
2002
2003	ifp->if_capabilities = ifp->if_capenable = 0;
2004
2005	if (adapter->hw.mac_type >= em_82543) {
2006		ifp->if_capabilities |= IFCAP_HWCSUM;
2007		ifp->if_capenable |= IFCAP_HWCSUM;
2008	}
2009
2010	/*
2011	 * Tell the upper layer(s) we support long frames.
2012	 */
2013	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2014	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2015	ifp->if_capenable |= IFCAP_VLAN_MTU;
2016
2017#ifdef DEVICE_POLLING
2018	ifp->if_capabilities |= IFCAP_POLLING;
2019#endif
2020
2021	/*
2022	 * Specify the media types supported by this adapter and register
2023	 * callbacks to update media and link information
2024	 */
2025	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2026		     em_media_status);
2027	if (adapter->hw.media_type == em_media_type_fiber) {
2028		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2029			    0, NULL);
2030		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
2031			    0, NULL);
2032	} else {
2033		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2034		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2035			    0, NULL);
2036		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2037			    0, NULL);
2038		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2039			    0, NULL);
2040		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2041			    0, NULL);
2042		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2043	}
2044	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2045	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2046
2047	return;
2048}
2049
2050
2051/*********************************************************************
2052 *
2053 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2054 *
2055 **********************************************************************/
2056static void
2057em_smartspeed(struct adapter *adapter)
2058{
2059        uint16_t phy_tmp;
2060
2061	if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2062	   !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2063		return;
2064
2065        if(adapter->smartspeed == 0) {
2066                /* If Master/Slave config fault is asserted twice,
2067                 * we assume back-to-back */
2068                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2069                if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2070                em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2071                if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2072                        em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2073					&phy_tmp);
2074                        if(phy_tmp & CR_1000T_MS_ENABLE) {
2075                                phy_tmp &= ~CR_1000T_MS_ENABLE;
2076                                em_write_phy_reg(&adapter->hw,
2077                                                    PHY_1000T_CTRL, phy_tmp);
2078                                adapter->smartspeed++;
2079                                if(adapter->hw.autoneg &&
2080                                   !em_phy_setup_autoneg(&adapter->hw) &&
2081				   !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2082                                                       &phy_tmp)) {
2083                                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2084                                                    MII_CR_RESTART_AUTO_NEG);
2085                                        em_write_phy_reg(&adapter->hw,
2086							 PHY_CTRL, phy_tmp);
2087                                }
2088                        }
2089                }
2090                return;
2091        } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2092                /* If still no link, perhaps using 2/3 pair cable */
2093                em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2094                phy_tmp |= CR_1000T_MS_ENABLE;
2095                em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2096                if(adapter->hw.autoneg &&
2097                   !em_phy_setup_autoneg(&adapter->hw) &&
2098                   !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2099                        phy_tmp |= (MII_CR_AUTO_NEG_EN |
2100                                    MII_CR_RESTART_AUTO_NEG);
2101                        em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2102                }
2103        }
2104        /* Restart process after EM_SMARTSPEED_MAX iterations */
2105        if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2106                adapter->smartspeed = 0;
2107
2108	return;
2109}
2110
2111
2112/*
2113 * Manage DMA'able memory.
2114 */
2115static void
2116em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2117{
2118        if (error)
2119                return;
2120        *(bus_addr_t *) arg = segs[0].ds_addr;
2121}
2122
2123static int
2124em_dma_malloc(struct adapter *adapter, bus_size_t size,
2125        struct em_dma_alloc *dma, int mapflags)
2126{
2127        int r;
2128
2129	r = bus_dma_tag_create(NULL,                    /* parent */
2130                               E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2131                               BUS_SPACE_MAXADDR,       /* lowaddr */
2132                               BUS_SPACE_MAXADDR,       /* highaddr */
2133                               NULL, NULL,              /* filter, filterarg */
2134                               size,                    /* maxsize */
2135                               1,                       /* nsegments */
2136                               size,                    /* maxsegsize */
2137                               BUS_DMA_ALLOCNOW,        /* flags */
2138			       NULL,			/* lockfunc */
2139			       NULL,			/* lockarg */
2140                               &dma->dma_tag);
2141        if (r != 0) {
2142                printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2143                        "error %u\n", adapter->unit, r);
2144                goto fail_0;
2145        }
2146
2147        r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2148                             BUS_DMA_NOWAIT, &dma->dma_map);
2149        if (r != 0) {
2150                printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2151                        "size %ju, error %d\n", adapter->unit,
2152			(uintmax_t)size, r);
2153                goto fail_2;
2154        }
2155
2156	dma->dma_paddr = 0;
2157        r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2158                            size,
2159                            em_dmamap_cb,
2160                            &dma->dma_paddr,
2161                            mapflags | BUS_DMA_NOWAIT);
2162        if (r != 0 || dma->dma_paddr == 0) {
2163                printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2164                        "error %u\n", adapter->unit, r);
2165                goto fail_3;
2166        }
2167
2168        return (0);
2169
2170fail_3:
2171        bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2172fail_2:
2173        bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2174        bus_dma_tag_destroy(dma->dma_tag);
2175fail_0:
2176        dma->dma_map = NULL;
2177        dma->dma_tag = NULL;
2178        return (r);
2179}
2180
2181static void
2182em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2183{
2184	if (dma->dma_tag == NULL)
2185		return;
2186	if (dma->dma_map != NULL) {
2187		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2188		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2189		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2190		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2191		dma->dma_map = NULL;
2192	}
2193        bus_dma_tag_destroy(dma->dma_tag);
2194	dma->dma_tag = NULL;
2195}
2196
2197
2198/*********************************************************************
2199 *
2200 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2201 *  the information needed to transmit a packet on the wire.
2202 *
2203 **********************************************************************/
2204static int
2205em_allocate_transmit_structures(struct adapter * adapter)
2206{
2207	if (!(adapter->tx_buffer_area =
2208	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2209					     adapter->num_tx_desc, M_DEVBUF,
2210					     M_NOWAIT))) {
2211		printf("em%d: Unable to allocate tx_buffer memory\n",
2212		       adapter->unit);
2213		return ENOMEM;
2214	}
2215
2216	bzero(adapter->tx_buffer_area,
2217	      sizeof(struct em_buffer) * adapter->num_tx_desc);
2218
2219	return 0;
2220}
2221
2222/*********************************************************************
2223 *
2224 *  Allocate and initialize transmit structures.
2225 *
2226 **********************************************************************/
2227static int
2228em_setup_transmit_structures(struct adapter * adapter)
2229{
2230	struct em_buffer *tx_buffer;
2231	bus_size_t size;
2232	int error, i;
2233
2234        /*
2235         * Setup DMA descriptor areas.
2236         */
2237	size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2238	if ((error = bus_dma_tag_create(NULL,           /* parent */
2239                               1, 0,                    /* alignment, bounds */
2240                               BUS_SPACE_MAXADDR,       /* lowaddr */
2241                               BUS_SPACE_MAXADDR,       /* highaddr */
2242                               NULL, NULL,              /* filter, filterarg */
2243                               size,                    /* maxsize */
2244                               EM_MAX_SCATTER,          /* nsegments */
2245                               size,                    /* maxsegsize */
2246                               0,                       /* flags */
2247			       NULL,			/* lockfunc */
2248			       NULL,			/* lockarg */
2249                               &adapter->txtag)) != 0) {
2250		printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2251		goto fail;
2252        }
2253
2254        if ((error = em_allocate_transmit_structures(adapter)) != 0)
2255		goto fail;
2256
2257        bzero((void *) adapter->tx_desc_base,
2258              (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2259	tx_buffer = adapter->tx_buffer_area;
2260	for (i = 0; i < adapter->num_tx_desc; i++) {
2261		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2262		if (error != 0) {
2263			printf("em%d: Unable to create TX DMA map\n",
2264			    adapter->unit);
2265			goto fail;
2266		}
2267		tx_buffer++;
2268	}
2269
2270        adapter->next_avail_tx_desc = 0;
2271        adapter->oldest_used_tx_desc = 0;
2272
2273        /* Set number of descriptors available */
2274        adapter->num_tx_desc_avail = adapter->num_tx_desc;
2275
2276        /* Set checksum context */
2277        adapter->active_checksum_context = OFFLOAD_NONE;
2278	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2279	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2280
2281        return (0);
2282
2283fail:
2284	em_free_transmit_structures(adapter);
2285	return (error);
2286}
2287
2288/*********************************************************************
2289 *
2290 *  Enable transmit unit.
2291 *
2292 **********************************************************************/
2293static void
2294em_initialize_transmit_unit(struct adapter * adapter)
2295{
2296	u_int32_t       reg_tctl;
2297	u_int32_t       reg_tipg = 0;
2298	u_int64_t	bus_addr;
2299
2300         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2301	/* Setup the Base and Length of the Tx Descriptor Ring */
2302	bus_addr = adapter->txdma.dma_paddr;
2303	E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2304	E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2305	E1000_WRITE_REG(&adapter->hw, TDLEN,
2306			adapter->num_tx_desc *
2307			sizeof(struct em_tx_desc));
2308
2309	/* Setup the HW Tx Head and Tail descriptor pointers */
2310	E1000_WRITE_REG(&adapter->hw, TDH, 0);
2311	E1000_WRITE_REG(&adapter->hw, TDT, 0);
2312
2313
2314	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2315		     E1000_READ_REG(&adapter->hw, TDBAL),
2316		     E1000_READ_REG(&adapter->hw, TDLEN));
2317
2318	/* Set the default values for the Tx Inter Packet Gap timer */
2319	switch (adapter->hw.mac_type) {
2320	case em_82542_rev2_0:
2321        case em_82542_rev2_1:
2322                reg_tipg = DEFAULT_82542_TIPG_IPGT;
2323                reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2324                reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2325                break;
2326        default:
2327                if (adapter->hw.media_type == em_media_type_fiber)
2328                        reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2329                else
2330                        reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2331                reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2332                reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2333        }
2334
2335	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2336	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2337	if(adapter->hw.mac_type >= em_82540)
2338		E1000_WRITE_REG(&adapter->hw, TADV,
2339		    adapter->tx_abs_int_delay.value);
2340
2341	/* Program the Transmit Control Register */
2342	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2343		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2344	if (adapter->hw.mac_type >= em_82571)
2345		reg_tctl |= E1000_TCTL_MULR;
2346	if (adapter->link_duplex == 1) {
2347		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2348	} else {
2349		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2350	}
2351	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2352
2353	/* Setup Transmit Descriptor Settings for this adapter */
2354	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2355
2356	if (adapter->tx_int_delay.value > 0)
2357		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2358
2359	return;
2360}
2361
2362/*********************************************************************
2363 *
2364 *  Free all transmit related data structures.
2365 *
2366 **********************************************************************/
2367static void
2368em_free_transmit_structures(struct adapter * adapter)
2369{
2370        struct em_buffer   *tx_buffer;
2371        int             i;
2372
2373        INIT_DEBUGOUT("free_transmit_structures: begin");
2374
2375        if (adapter->tx_buffer_area != NULL) {
2376                tx_buffer = adapter->tx_buffer_area;
2377                for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2378                        if (tx_buffer->m_head != NULL) {
2379				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2380				    BUS_DMASYNC_POSTWRITE);
2381				bus_dmamap_unload(adapter->txtag,
2382				    tx_buffer->map);
2383                                m_freem(tx_buffer->m_head);
2384				tx_buffer->m_head = NULL;
2385                        } else if (tx_buffer->map != NULL)
2386				bus_dmamap_unload(adapter->txtag,
2387				    tx_buffer->map);
2388			if (tx_buffer->map != NULL) {
2389				bus_dmamap_destroy(adapter->txtag,
2390				    tx_buffer->map);
2391				tx_buffer->map = NULL;
2392			}
2393                }
2394        }
2395        if (adapter->tx_buffer_area != NULL) {
2396                free(adapter->tx_buffer_area, M_DEVBUF);
2397                adapter->tx_buffer_area = NULL;
2398        }
2399        if (adapter->txtag != NULL) {
2400                bus_dma_tag_destroy(adapter->txtag);
2401                adapter->txtag = NULL;
2402        }
2403        return;
2404}
2405
2406/*********************************************************************
2407 *
2408 *  The offload context needs to be set when we transfer the first
2409 *  packet of a particular protocol (TCP/UDP). We change the
2410 *  context only if the protocol type changes.
2411 *
2412 **********************************************************************/
2413static void
2414em_transmit_checksum_setup(struct adapter * adapter,
2415			   struct mbuf *mp,
2416			   u_int32_t *txd_upper,
2417			   u_int32_t *txd_lower)
2418{
2419	struct em_context_desc *TXD;
2420	struct em_buffer *tx_buffer;
2421	int curr_txd;
2422
2423	if (mp->m_pkthdr.csum_flags) {
2424
2425		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2426			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2427			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2428			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2429				return;
2430			else
2431				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2432
2433		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2434			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2435			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2436			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2437				return;
2438			else
2439				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2440		} else {
2441			*txd_upper = 0;
2442			*txd_lower = 0;
2443			return;
2444		}
2445	} else {
2446		*txd_upper = 0;
2447		*txd_lower = 0;
2448		return;
2449	}
2450
2451	/* If we reach this point, the checksum offload context
2452	 * needs to be reset.
2453	 */
2454	curr_txd = adapter->next_avail_tx_desc;
2455	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2456	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2457
2458	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2459	TXD->lower_setup.ip_fields.ipcso =
2460		ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2461	TXD->lower_setup.ip_fields.ipcse =
2462		htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2463
2464	TXD->upper_setup.tcp_fields.tucss =
2465		ETHER_HDR_LEN + sizeof(struct ip);
2466	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2467
2468	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2469		TXD->upper_setup.tcp_fields.tucso =
2470			ETHER_HDR_LEN + sizeof(struct ip) +
2471			offsetof(struct tcphdr, th_sum);
2472	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2473		TXD->upper_setup.tcp_fields.tucso =
2474			ETHER_HDR_LEN + sizeof(struct ip) +
2475			offsetof(struct udphdr, uh_sum);
2476	}
2477
2478	TXD->tcp_seg_setup.data = htole32(0);
2479	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2480
2481	tx_buffer->m_head = NULL;
2482
2483	if (++curr_txd == adapter->num_tx_desc)
2484		curr_txd = 0;
2485
2486	adapter->num_tx_desc_avail--;
2487	adapter->next_avail_tx_desc = curr_txd;
2488
2489	return;
2490}
2491
2492/**********************************************************************
2493 *
2494 *  Examine each tx_buffer in the used queue. If the hardware is done
2495 *  processing the packet then free associated resources. The
2496 *  tx_buffer is put back on the free queue.
2497 *
2498 **********************************************************************/
2499static void
2500em_clean_transmit_interrupts(struct adapter * adapter)
2501{
2502        int i, num_avail;
2503        struct em_buffer *tx_buffer;
2504        struct em_tx_desc   *tx_desc;
2505	struct ifnet   *ifp = adapter->ifp;
2506
2507	mtx_assert(&adapter->mtx, MA_OWNED);
2508
2509        if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2510                return;
2511
2512        num_avail = adapter->num_tx_desc_avail;
2513        i = adapter->oldest_used_tx_desc;
2514
2515        tx_buffer = &adapter->tx_buffer_area[i];
2516        tx_desc = &adapter->tx_desc_base[i];
2517
2518        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2519            BUS_DMASYNC_POSTREAD);
2520        while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2521
2522                tx_desc->upper.data = 0;
2523                num_avail++;
2524
2525                if (tx_buffer->m_head) {
2526			ifp->if_opackets++;
2527			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2528			    BUS_DMASYNC_POSTWRITE);
2529			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2530
2531                        m_freem(tx_buffer->m_head);
2532                        tx_buffer->m_head = NULL;
2533                }
2534
2535                if (++i == adapter->num_tx_desc)
2536                        i = 0;
2537
2538                tx_buffer = &adapter->tx_buffer_area[i];
2539                tx_desc = &adapter->tx_desc_base[i];
2540        }
2541        bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2542            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2543
2544        adapter->oldest_used_tx_desc = i;
2545
2546        /*
2547         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2548         * that it is OK to send packets.
2549         * If there are no pending descriptors, clear the timeout. Otherwise,
2550         * if some descriptors have been freed, restart the timeout.
2551         */
2552        if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2553                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2554                if (num_avail == adapter->num_tx_desc)
2555                        ifp->if_timer = 0;
2556                else if (num_avail == adapter->num_tx_desc_avail)
2557                        ifp->if_timer = EM_TX_TIMEOUT;
2558        }
2559        adapter->num_tx_desc_avail = num_avail;
2560        return;
2561}
2562
2563/*********************************************************************
2564 *
2565 *  Get a buffer from system mbuf buffer pool.
2566 *
2567 **********************************************************************/
2568static int
2569em_get_buf(int i, struct adapter *adapter,
2570           struct mbuf *nmp)
2571{
2572        struct mbuf    *mp = nmp;
2573        struct em_buffer *rx_buffer;
2574        struct ifnet   *ifp;
2575	bus_dma_segment_t segs[1];
2576	int error, nsegs;
2577
2578        ifp = adapter->ifp;
2579
2580        if (mp == NULL) {
2581                mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2582                if (mp == NULL) {
2583                        adapter->mbuf_cluster_failed++;
2584                        return(ENOBUFS);
2585                }
2586                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2587        } else {
2588                mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2589                mp->m_data = mp->m_ext.ext_buf;
2590                mp->m_next = NULL;
2591        }
2592
2593        if (ifp->if_mtu <= ETHERMTU) {
2594                m_adj(mp, ETHER_ALIGN);
2595        }
2596
2597        rx_buffer = &adapter->rx_buffer_area[i];
2598
2599        /*
2600         * Using memory from the mbuf cluster pool, invoke the
2601         * bus_dma machinery to arrange the memory mapping.
2602         */
2603        error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2604	    mp, segs, &nsegs, 0);
2605        if (error != 0) {
2606                m_free(mp);
2607                return(error);
2608        }
2609	/* If nsegs is wrong then the stack is corrupt */
2610	KASSERT(nsegs == 1, ("Too many segments returned!"));
2611        rx_buffer->m_head = mp;
2612        adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2613        bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2614
2615        return(0);
2616}
2617
2618/*********************************************************************
2619 *
2620 *  Allocate memory for rx_buffer structures. Since we use one
2621 *  rx_buffer per received packet, the maximum number of rx_buffer's
2622 *  that we'll need is equal to the number of receive descriptors
2623 *  that we've allocated.
2624 *
2625 **********************************************************************/
2626static int
2627em_allocate_receive_structures(struct adapter * adapter)
2628{
2629        int             i, error;
2630        struct em_buffer *rx_buffer;
2631
2632        if (!(adapter->rx_buffer_area =
2633              (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2634                                          adapter->num_rx_desc, M_DEVBUF,
2635                                          M_NOWAIT))) {
2636                printf("em%d: Unable to allocate rx_buffer memory\n",
2637                       adapter->unit);
2638                return(ENOMEM);
2639        }
2640
2641        bzero(adapter->rx_buffer_area,
2642              sizeof(struct em_buffer) * adapter->num_rx_desc);
2643
2644        error = bus_dma_tag_create(NULL,                /* parent */
2645                               1, 0,                    /* alignment, bounds */
2646                               BUS_SPACE_MAXADDR,       /* lowaddr */
2647                               BUS_SPACE_MAXADDR,       /* highaddr */
2648                               NULL, NULL,              /* filter, filterarg */
2649                               MCLBYTES,                /* maxsize */
2650                               1,                       /* nsegments */
2651                               MCLBYTES,                /* maxsegsize */
2652                               BUS_DMA_ALLOCNOW,        /* flags */
2653			       NULL,			/* lockfunc */
2654			       NULL,			/* lockarg */
2655                               &adapter->rxtag);
2656        if (error != 0) {
2657                printf("em%d: em_allocate_receive_structures: "
2658                        "bus_dma_tag_create failed; error %u\n",
2659                       adapter->unit, error);
2660                goto fail;
2661        }
2662
2663        rx_buffer = adapter->rx_buffer_area;
2664        for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2665                error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2666                                          &rx_buffer->map);
2667                if (error != 0) {
2668                        printf("em%d: em_allocate_receive_structures: "
2669                                "bus_dmamap_create failed; error %u\n",
2670                                adapter->unit, error);
2671                        goto fail;
2672                }
2673        }
2674
2675        for (i = 0; i < adapter->num_rx_desc; i++) {
2676                error = em_get_buf(i, adapter, NULL);
2677		if (error != 0)
2678			goto fail;
2679        }
2680        bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2681            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2682
2683        return(0);
2684
2685fail:
2686	em_free_receive_structures(adapter);
2687        return (error);
2688}
2689
2690/*********************************************************************
2691 *
2692 *  Allocate and initialize receive structures.
2693 *
2694 **********************************************************************/
2695static int
2696em_setup_receive_structures(struct adapter * adapter)
2697{
2698	bzero((void *) adapter->rx_desc_base,
2699              (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2700
2701	if (em_allocate_receive_structures(adapter))
2702		return ENOMEM;
2703
2704	/* Setup our descriptor pointers */
2705        adapter->next_rx_desc_to_check = 0;
2706	return(0);
2707}
2708
2709/*********************************************************************
2710 *
2711 *  Enable receive unit.
2712 *
2713 **********************************************************************/
2714static void
2715em_initialize_receive_unit(struct adapter * adapter)
2716{
2717	u_int32_t       reg_rctl;
2718	u_int32_t       reg_rxcsum;
2719	struct ifnet    *ifp;
2720	u_int64_t	bus_addr;
2721
2722        INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2723	ifp = adapter->ifp;
2724
2725	/* Make sure receives are disabled while setting up the descriptor ring */
2726	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2727
2728	/* Set the Receive Delay Timer Register */
2729	E1000_WRITE_REG(&adapter->hw, RDTR,
2730			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2731
2732	if(adapter->hw.mac_type >= em_82540) {
2733		E1000_WRITE_REG(&adapter->hw, RADV,
2734		    adapter->rx_abs_int_delay.value);
2735
2736                /* Set the interrupt throttling rate.  Value is calculated
2737                 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2738#define MAX_INTS_PER_SEC        8000
2739#define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2740                E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2741        }
2742
2743	/* Setup the Base and Length of the Rx Descriptor Ring */
2744	bus_addr = adapter->rxdma.dma_paddr;
2745	E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2746	E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2747	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2748			sizeof(struct em_rx_desc));
2749
2750	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2751	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2752	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2753
2754	/* Setup the Receive Control Register */
2755	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2756		   E1000_RCTL_RDMTS_HALF |
2757		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2758
2759	if (adapter->hw.tbi_compatibility_on == TRUE)
2760		reg_rctl |= E1000_RCTL_SBP;
2761
2762
2763	switch (adapter->rx_buffer_len) {
2764	default:
2765	case EM_RXBUFFER_2048:
2766		reg_rctl |= E1000_RCTL_SZ_2048;
2767		break;
2768	case EM_RXBUFFER_4096:
2769		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2770		break;
2771	case EM_RXBUFFER_8192:
2772		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2773		break;
2774	case EM_RXBUFFER_16384:
2775		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2776		break;
2777	}
2778
2779	if (ifp->if_mtu > ETHERMTU)
2780		reg_rctl |= E1000_RCTL_LPE;
2781
2782	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2783	if ((adapter->hw.mac_type >= em_82543) &&
2784	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2785		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2786		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2787		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2788	}
2789
2790	/* Enable Receives */
2791	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2792
2793	return;
2794}
2795
2796/*********************************************************************
2797 *
2798 *  Free receive related data structures.
2799 *
2800 **********************************************************************/
2801static void
2802em_free_receive_structures(struct adapter *adapter)
2803{
2804        struct em_buffer   *rx_buffer;
2805        int             i;
2806
2807        INIT_DEBUGOUT("free_receive_structures: begin");
2808
2809        if (adapter->rx_buffer_area != NULL) {
2810                rx_buffer = adapter->rx_buffer_area;
2811                for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2812			if (rx_buffer->m_head != NULL) {
2813				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
2814				    BUS_DMASYNC_POSTREAD);
2815				bus_dmamap_unload(adapter->rxtag,
2816				    rx_buffer->map);
2817				m_freem(rx_buffer->m_head);
2818				rx_buffer->m_head = NULL;
2819			} else if (rx_buffer->map != NULL)
2820				bus_dmamap_unload(adapter->rxtag,
2821				    rx_buffer->map);
2822                        if (rx_buffer->map != NULL) {
2823				bus_dmamap_destroy(adapter->rxtag,
2824				    rx_buffer->map);
2825				rx_buffer->map = NULL;
2826			}
2827                }
2828        }
2829        if (adapter->rx_buffer_area != NULL) {
2830                free(adapter->rx_buffer_area, M_DEVBUF);
2831                adapter->rx_buffer_area = NULL;
2832        }
2833        if (adapter->rxtag != NULL) {
2834                bus_dma_tag_destroy(adapter->rxtag);
2835                adapter->rxtag = NULL;
2836        }
2837        return;
2838}
2839
2840/*********************************************************************
2841 *
2842 *  This routine executes in interrupt context. It replenishes
2843 *  the mbufs in the descriptor and sends data which has been
2844 *  dma'ed into host memory to upper layer.
2845 *
2846 *  We loop at most count times if count is > 0, or until done if
2847 *  count < 0.
2848 *
2849 *********************************************************************/
2850static void
2851em_process_receive_interrupts(struct adapter * adapter, int count)
2852{
2853	struct ifnet        *ifp;
2854	struct mbuf         *mp;
2855	u_int8_t            accept_frame = 0;
2856 	u_int8_t            eop = 0;
2857	u_int16_t           len, desc_len, prev_len_adj;
2858	int                 i;
2859
2860	/* Pointer to the receive descriptor being examined. */
2861	struct em_rx_desc   *current_desc;
2862
2863	mtx_assert(&adapter->mtx, MA_OWNED);
2864
2865	ifp = adapter->ifp;
2866	i = adapter->next_rx_desc_to_check;
2867        current_desc = &adapter->rx_desc_base[i];
2868	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2869	    BUS_DMASYNC_POSTREAD);
2870
2871	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2872		return;
2873	}
2874
2875	while ((current_desc->status & E1000_RXD_STAT_DD) &&
2876		    (count != 0) &&
2877		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2878		struct mbuf *m = NULL;
2879
2880		mp = adapter->rx_buffer_area[i].m_head;
2881		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2882		    BUS_DMASYNC_POSTREAD);
2883		bus_dmamap_unload(adapter->rxtag,
2884		    adapter->rx_buffer_area[i].map);
2885
2886		accept_frame = 1;
2887		prev_len_adj = 0;
2888                desc_len = le16toh(current_desc->length);
2889		if (current_desc->status & E1000_RXD_STAT_EOP) {
2890			count--;
2891			eop = 1;
2892			if (desc_len < ETHER_CRC_LEN) {
2893                                len = 0;
2894                                prev_len_adj = ETHER_CRC_LEN - desc_len;
2895                        }
2896                        else {
2897                                len = desc_len - ETHER_CRC_LEN;
2898                        }
2899		} else {
2900			eop = 0;
2901			len = desc_len;
2902		}
2903
2904		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2905			u_int8_t            last_byte;
2906			u_int32_t           pkt_len = desc_len;
2907
2908			if (adapter->fmp != NULL)
2909				pkt_len += adapter->fmp->m_pkthdr.len;
2910
2911			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2912
2913			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2914				       current_desc->errors,
2915				       pkt_len, last_byte)) {
2916				em_tbi_adjust_stats(&adapter->hw,
2917						    &adapter->stats,
2918						    pkt_len,
2919						    adapter->hw.mac_addr);
2920				if (len > 0) len--;
2921			}
2922			else {
2923				accept_frame = 0;
2924			}
2925		}
2926
2927		if (accept_frame) {
2928
2929			if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2930				adapter->dropped_pkts++;
2931				em_get_buf(i, adapter, mp);
2932				if (adapter->fmp != NULL)
2933					m_freem(adapter->fmp);
2934				adapter->fmp = NULL;
2935				adapter->lmp = NULL;
2936				break;
2937			}
2938
2939			/* Assign correct length to the current fragment */
2940			mp->m_len = len;
2941
2942			if (adapter->fmp == NULL) {
2943				mp->m_pkthdr.len = len;
2944				adapter->fmp = mp;	 /* Store the first mbuf */
2945				adapter->lmp = mp;
2946			} else {
2947				/* Chain mbuf's together */
2948				mp->m_flags &= ~M_PKTHDR;
2949				/*
2950                                 * Adjust length of previous mbuf in chain if we
2951                                 * received less than 4 bytes in the last descriptor.
2952                                 */
2953				if (prev_len_adj > 0) {
2954					adapter->lmp->m_len -= prev_len_adj;
2955					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2956				}
2957				adapter->lmp->m_next = mp;
2958				adapter->lmp = adapter->lmp->m_next;
2959				adapter->fmp->m_pkthdr.len += len;
2960			}
2961
2962                        if (eop) {
2963                                adapter->fmp->m_pkthdr.rcvif = ifp;
2964				ifp->if_ipackets++;
2965                                em_receive_checksum(adapter, current_desc,
2966                                                    adapter->fmp);
2967                                if (current_desc->status & E1000_RXD_STAT_VP)
2968					VLAN_INPUT_TAG(ifp, adapter->fmp,
2969					    (le16toh(current_desc->special) &
2970					    E1000_RXD_SPC_VLAN_MASK),
2971					    adapter->fmp = NULL);
2972
2973				m = adapter->fmp;
2974				adapter->fmp = NULL;
2975				adapter->lmp = NULL;
2976                        }
2977		} else {
2978			adapter->dropped_pkts++;
2979			em_get_buf(i, adapter, mp);
2980			if (adapter->fmp != NULL)
2981				m_freem(adapter->fmp);
2982			adapter->fmp = NULL;
2983			adapter->lmp = NULL;
2984		}
2985
2986		/* Zero out the receive descriptors status  */
2987		current_desc->status = 0;
2988		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2989		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2990
2991		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2992                E1000_WRITE_REG(&adapter->hw, RDT, i);
2993
2994                /* Advance our pointers to the next descriptor */
2995		if (++i == adapter->num_rx_desc)
2996			i = 0;
2997		if (m != NULL) {
2998			adapter->next_rx_desc_to_check = i;
2999			EM_UNLOCK(adapter);
3000			(*ifp->if_input)(ifp, m);
3001			EM_LOCK(adapter);
3002			i = adapter->next_rx_desc_to_check;
3003		}
3004		current_desc = &adapter->rx_desc_base[i];
3005	}
3006	adapter->next_rx_desc_to_check = i;
3007	return;
3008}
3009
3010/*********************************************************************
3011 *
3012 *  Verify that the hardware indicated that the checksum is valid.
3013 *  Inform the stack about the status of checksum so that stack
3014 *  doesn't spend time verifying the checksum.
3015 *
3016 *********************************************************************/
3017static void
3018em_receive_checksum(struct adapter *adapter,
3019		    struct em_rx_desc *rx_desc,
3020		    struct mbuf *mp)
3021{
3022	/* 82543 or newer only */
3023	if ((adapter->hw.mac_type < em_82543) ||
3024	    /* Ignore Checksum bit is set */
3025	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3026		mp->m_pkthdr.csum_flags = 0;
3027		return;
3028	}
3029
3030	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3031		/* Did it pass? */
3032		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3033			/* IP Checksum Good */
3034			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3035			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3036
3037		} else {
3038			mp->m_pkthdr.csum_flags = 0;
3039		}
3040	}
3041
3042	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3043		/* Did it pass? */
3044		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3045			mp->m_pkthdr.csum_flags |=
3046			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3047			mp->m_pkthdr.csum_data = htons(0xffff);
3048		}
3049	}
3050
3051	return;
3052}
3053
3054
3055static void
3056em_enable_vlans(struct adapter *adapter)
3057{
3058	uint32_t ctrl;
3059
3060	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3061
3062	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3063	ctrl |= E1000_CTRL_VME;
3064	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3065
3066	return;
3067}
3068
3069static void
3070em_disable_vlans(struct adapter *adapter)
3071{
3072	uint32_t ctrl;
3073
3074	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3075	ctrl &= ~E1000_CTRL_VME;
3076	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3077
3078	return;
3079}
3080
3081static void
3082em_enable_intr(struct adapter * adapter)
3083{
3084	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3085	return;
3086}
3087
3088static void
3089em_disable_intr(struct adapter *adapter)
3090{
3091	/*
3092	 * The first version of 82542 had an errata where when link was forced it
3093	 * would stay up even up even if the cable was disconnected.  Sequence errors
3094	 * were used to detect the disconnect and then the driver would unforce the link.
3095	 * This code in the in the ISR.  For this to work correctly the Sequence error
3096	 * interrupt had to be enabled all the time.
3097	 */
3098
3099	if (adapter->hw.mac_type == em_82542_rev2_0)
3100	    E1000_WRITE_REG(&adapter->hw, IMC,
3101	        (0xffffffff & ~E1000_IMC_RXSEQ));
3102	else
3103	    E1000_WRITE_REG(&adapter->hw, IMC,
3104	        0xffffffff);
3105	return;
3106}
3107
3108static int
3109em_is_valid_ether_addr(u_int8_t *addr)
3110{
3111        char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3112
3113        if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3114                return (FALSE);
3115        }
3116
3117        return(TRUE);
3118}
3119
3120void
3121em_write_pci_cfg(struct em_hw *hw,
3122		      uint32_t reg,
3123		      uint16_t *value)
3124{
3125	pci_write_config(((struct em_osdep *)hw->back)->dev, reg,
3126			 *value, 2);
3127}
3128
3129void
3130em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3131		     uint16_t *value)
3132{
3133	*value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3134				 reg, 2);
3135	return;
3136}
3137
3138void
3139em_pci_set_mwi(struct em_hw *hw)
3140{
3141        pci_write_config(((struct em_osdep *)hw->back)->dev,
3142                         PCIR_COMMAND,
3143                         (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3144        return;
3145}
3146
3147void
3148em_pci_clear_mwi(struct em_hw *hw)
3149{
3150        pci_write_config(((struct em_osdep *)hw->back)->dev,
3151                         PCIR_COMMAND,
3152                         (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3153        return;
3154}
3155
3156/*********************************************************************
3157* 82544 Coexistence issue workaround.
3158*    There are 2 issues.
3159*       1. Transmit Hang issue.
3160*    To detect this issue, following equation can be used...
3161*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3162*          If SUM[3:0] is in between 1 to 4, we will have this issue.
3163*
3164*       2. DAC issue.
3165*    To detect this issue, following equation can be used...
3166*          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3167*          If SUM[3:0] is in between 9 to c, we will have this issue.
3168*
3169*
3170*    WORKAROUND:
3171*          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3172*
3173*** *********************************************************************/
3174static u_int32_t
3175em_fill_descriptors (bus_addr_t address,
3176                              u_int32_t length,
3177                              PDESC_ARRAY desc_array)
3178{
3179        /* Since issue is sensitive to length and address.*/
3180        /* Let us first check the address...*/
3181        u_int32_t safe_terminator;
3182        if (length <= 4) {
3183                desc_array->descriptor[0].address = address;
3184                desc_array->descriptor[0].length = length;
3185                desc_array->elements = 1;
3186                return desc_array->elements;
3187        }
3188        safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3189        /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3190        if (safe_terminator == 0   ||
3191        (safe_terminator > 4   &&
3192        safe_terminator < 9)   ||
3193        (safe_terminator > 0xC &&
3194        safe_terminator <= 0xF)) {
3195                desc_array->descriptor[0].address = address;
3196                desc_array->descriptor[0].length = length;
3197                desc_array->elements = 1;
3198                return desc_array->elements;
3199        }
3200
3201        desc_array->descriptor[0].address = address;
3202        desc_array->descriptor[0].length = length - 4;
3203        desc_array->descriptor[1].address = address + (length - 4);
3204        desc_array->descriptor[1].length = 4;
3205        desc_array->elements = 2;
3206        return desc_array->elements;
3207}
3208
3209/**********************************************************************
3210 *
3211 *  Update the board statistics counters.
3212 *
3213 **********************************************************************/
3214static void
3215em_update_stats_counters(struct adapter *adapter)
3216{
3217	struct ifnet   *ifp;
3218
3219	if(adapter->hw.media_type == em_media_type_copper ||
3220	   (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3221		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3222		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3223	}
3224	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3225	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3226	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3227	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3228
3229	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3230	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3231	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3232	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3233	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3234	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3235	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3236	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3237	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3238	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3239	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3240	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3241	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3242	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3243	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3244	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3245	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3246	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3247	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3248	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3249
3250	/* For the 64-bit byte counters the low dword must be read first. */
3251	/* Both registers clear on the read of the high dword */
3252
3253	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3254	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3255	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3256	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3257
3258	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3259	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3260	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3261	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3262	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3263
3264	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3265	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3266	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3267	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3268
3269	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3270	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3271	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3272	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3273	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3274	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3275	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3276	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3277	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3278	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3279
3280	if (adapter->hw.mac_type >= em_82543) {
3281		adapter->stats.algnerrc +=
3282		E1000_READ_REG(&adapter->hw, ALGNERRC);
3283		adapter->stats.rxerrc +=
3284		E1000_READ_REG(&adapter->hw, RXERRC);
3285		adapter->stats.tncrs +=
3286		E1000_READ_REG(&adapter->hw, TNCRS);
3287		adapter->stats.cexterr +=
3288		E1000_READ_REG(&adapter->hw, CEXTERR);
3289		adapter->stats.tsctc +=
3290		E1000_READ_REG(&adapter->hw, TSCTC);
3291		adapter->stats.tsctfc +=
3292		E1000_READ_REG(&adapter->hw, TSCTFC);
3293	}
3294	ifp = adapter->ifp;
3295
3296	ifp->if_collisions = adapter->stats.colc;
3297
3298	/* Rx Errors */
3299	ifp->if_ierrors =
3300	adapter->dropped_pkts +
3301	adapter->stats.rxerrc +
3302	adapter->stats.crcerrs +
3303	adapter->stats.algnerrc +
3304	adapter->stats.rlec +
3305	adapter->stats.mpc + adapter->stats.cexterr;
3306
3307	/* Tx Errors */
3308	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3309	    adapter->watchdog_events;
3310
3311}
3312
3313
3314/**********************************************************************
3315 *
3316 *  This routine is called only when em_display_debug_stats is enabled.
3317 *  This routine provides a way to take a look at important statistics
3318 *  maintained by the driver and hardware.
3319 *
3320 **********************************************************************/
3321static void
3322em_print_debug_info(struct adapter *adapter)
3323{
3324	int unit = adapter->unit;
3325	uint8_t *hw_addr = adapter->hw.hw_addr;
3326
3327	printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3328	printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit,
3329	    E1000_READ_REG(&adapter->hw, CTRL),
3330	    E1000_READ_REG(&adapter->hw, RCTL));
3331	printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit,
3332	    ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3333	    (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3334	printf("em%d: Flow control watermarks high = %d low = %d\n", unit,
3335	    adapter->hw.fc_high_water,
3336	    adapter->hw.fc_low_water);
3337	printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit,
3338	    E1000_READ_REG(&adapter->hw, TIDV),
3339	    E1000_READ_REG(&adapter->hw, TADV));
3340	printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit,
3341	    E1000_READ_REG(&adapter->hw, RDTR),
3342	    E1000_READ_REG(&adapter->hw, RADV));
3343	printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3344	    unit, (long long)adapter->tx_fifo_wrk_cnt,
3345	    (long long)adapter->tx_fifo_reset_cnt);
3346	printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3347	    E1000_READ_REG(&adapter->hw, TDH),
3348	    E1000_READ_REG(&adapter->hw, TDT));
3349	printf("em%d: Num Tx descriptors avail = %d\n", unit,
3350	    adapter->num_tx_desc_avail);
3351	printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3352	    adapter->no_tx_desc_avail1);
3353	printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3354	    adapter->no_tx_desc_avail2);
3355	printf("em%d: Std mbuf failed = %ld\n", unit,
3356	    adapter->mbuf_alloc_failed);
3357	printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3358	    adapter->mbuf_cluster_failed);
3359	printf("em%d: Driver dropped packets = %ld\n", unit,
3360	    adapter->dropped_pkts);
3361
3362	return;
3363}
3364
3365static void
3366em_print_hw_stats(struct adapter *adapter)
3367{
3368        int unit = adapter->unit;
3369
3370        printf("em%d: Excessive collisions = %lld\n", unit,
3371               (long long)adapter->stats.ecol);
3372        printf("em%d: Symbol errors = %lld\n", unit,
3373               (long long)adapter->stats.symerrs);
3374        printf("em%d: Sequence errors = %lld\n", unit,
3375               (long long)adapter->stats.sec);
3376        printf("em%d: Defer count = %lld\n", unit,
3377               (long long)adapter->stats.dc);
3378
3379        printf("em%d: Missed Packets = %lld\n", unit,
3380               (long long)adapter->stats.mpc);
3381        printf("em%d: Receive No Buffers = %lld\n", unit,
3382               (long long)adapter->stats.rnbc);
3383        printf("em%d: Receive length errors = %lld\n", unit,
3384               (long long)adapter->stats.rlec);
3385        printf("em%d: Receive errors = %lld\n", unit,
3386               (long long)adapter->stats.rxerrc);
3387        printf("em%d: Crc errors = %lld\n", unit,
3388               (long long)adapter->stats.crcerrs);
3389        printf("em%d: Alignment errors = %lld\n", unit,
3390               (long long)adapter->stats.algnerrc);
3391        printf("em%d: Carrier extension errors = %lld\n", unit,
3392               (long long)adapter->stats.cexterr);
3393	printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3394	printf("em%d: watchdog timeouts = %ld\n", unit,
3395		adapter->watchdog_events);
3396
3397        printf("em%d: XON Rcvd = %lld\n", unit,
3398               (long long)adapter->stats.xonrxc);
3399        printf("em%d: XON Xmtd = %lld\n", unit,
3400               (long long)adapter->stats.xontxc);
3401        printf("em%d: XOFF Rcvd = %lld\n", unit,
3402               (long long)adapter->stats.xoffrxc);
3403        printf("em%d: XOFF Xmtd = %lld\n", unit,
3404               (long long)adapter->stats.xofftxc);
3405
3406        printf("em%d: Good Packets Rcvd = %lld\n", unit,
3407               (long long)adapter->stats.gprc);
3408        printf("em%d: Good Packets Xmtd = %lld\n", unit,
3409               (long long)adapter->stats.gptc);
3410
3411        return;
3412}
3413
3414static int
3415em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3416{
3417        int error;
3418        int result;
3419        struct adapter *adapter;
3420
3421        result = -1;
3422        error = sysctl_handle_int(oidp, &result, 0, req);
3423
3424        if (error || !req->newptr)
3425                return (error);
3426
3427        if (result == 1) {
3428                adapter = (struct adapter *)arg1;
3429                em_print_debug_info(adapter);
3430        }
3431
3432        return error;
3433}
3434
3435
3436static int
3437em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3438{
3439        int error;
3440        int result;
3441        struct adapter *adapter;
3442
3443        result = -1;
3444        error = sysctl_handle_int(oidp, &result, 0, req);
3445
3446        if (error || !req->newptr)
3447                return (error);
3448
3449        if (result == 1) {
3450                adapter = (struct adapter *)arg1;
3451                em_print_hw_stats(adapter);
3452        }
3453
3454        return error;
3455}
3456
3457static int
3458em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3459{
3460	struct em_int_delay_info *info;
3461	struct adapter *adapter;
3462	u_int32_t regval;
3463	int error;
3464	int usecs;
3465	int ticks;
3466
3467	info = (struct em_int_delay_info *)arg1;
3468	usecs = info->value;
3469	error = sysctl_handle_int(oidp, &usecs, 0, req);
3470	if (error != 0 || req->newptr == NULL)
3471		return error;
3472	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3473		return EINVAL;
3474	info->value = usecs;
3475	ticks = E1000_USECS_TO_TICKS(usecs);
3476
3477	adapter = info->adapter;
3478
3479	EM_LOCK(adapter);
3480	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3481	regval = (regval & ~0xffff) | (ticks & 0xffff);
3482	/* Handle a few special cases. */
3483	switch (info->offset) {
3484	case E1000_RDTR:
3485	case E1000_82542_RDTR:
3486		regval |= E1000_RDT_FPDB;
3487		break;
3488	case E1000_TIDV:
3489	case E1000_82542_TIDV:
3490		if (ticks == 0) {
3491			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3492			/* Don't write 0 into the TIDV register. */
3493			regval++;
3494		} else
3495			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3496		break;
3497	}
3498	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3499	EM_UNLOCK(adapter);
3500	return 0;
3501}
3502
3503static void
3504em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3505    const char *description, struct em_int_delay_info *info,
3506    int offset, int value)
3507{
3508	info->adapter = adapter;
3509	info->offset = offset;
3510	info->value = value;
3511	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3512	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3513	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3514	    info, 0, em_sysctl_int_delay, "I", description);
3515}
3516