if_ixgb.c revision 232874
1/*******************************************************************************
2
3Copyright (c) 2001-2004, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/ixgb/if_ixgb.c 232874 2012-03-12 18:15:08Z scottl $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/ixgb/if_ixgb.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             ixgb_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Linked list of board private structures for all NICs found
49 *********************************************************************/
50
51struct adapter *ixgb_adapter_list = NULL;
52
53
54
55/*********************************************************************
56 *  Driver version
57 *********************************************************************/
58
59char            ixgb_driver_version[] = "1.0.6";
60char            ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62/*********************************************************************
63 *  PCI Device ID Table
64 *
65 *  Used by probe to select devices to load on
66 *  Last field stores an index into ixgb_strings
67 *  Last entry must be all 0s
68 *
69 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
71
72static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73{
74	/* Intel(R) PRO/10000 Network Connection */
75	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77	/* required last entry */
78	{0, 0, 0, 0, 0}
79};
80
81/*********************************************************************
82 *  Table of branding strings for all supported NICs.
83 *********************************************************************/
84
85static char    *ixgb_strings[] = {
86	"Intel(R) PRO/10GbE Network Driver"
87};
88
89/*********************************************************************
90 *  Function prototypes
91 *********************************************************************/
92static int      ixgb_probe(device_t);
93static int      ixgb_attach(device_t);
94static int      ixgb_detach(device_t);
95static int      ixgb_shutdown(device_t);
96static void     ixgb_intr(void *);
97static void     ixgb_start(struct ifnet *);
98static void     ixgb_start_locked(struct ifnet *);
99static int      ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100static void     ixgb_watchdog(struct adapter *);
101static void     ixgb_init(void *);
102static void     ixgb_init_locked(struct adapter *);
103static void     ixgb_stop(void *);
104static void     ixgb_media_status(struct ifnet *, struct ifmediareq *);
105static int      ixgb_media_change(struct ifnet *);
106static void     ixgb_identify_hardware(struct adapter *);
107static int      ixgb_allocate_pci_resources(struct adapter *);
108static void     ixgb_free_pci_resources(struct adapter *);
109static void     ixgb_local_timer(void *);
110static int      ixgb_hardware_init(struct adapter *);
111static int      ixgb_setup_interface(device_t, struct adapter *);
112static int      ixgb_setup_transmit_structures(struct adapter *);
113static void     ixgb_initialize_transmit_unit(struct adapter *);
114static int      ixgb_setup_receive_structures(struct adapter *);
115static void     ixgb_initialize_receive_unit(struct adapter *);
116static void     ixgb_enable_intr(struct adapter *);
117static void     ixgb_disable_intr(struct adapter *);
118static void     ixgb_free_transmit_structures(struct adapter *);
119static void     ixgb_free_receive_structures(struct adapter *);
120static void     ixgb_update_stats_counters(struct adapter *);
121static void     ixgb_clean_transmit_interrupts(struct adapter *);
122static int      ixgb_allocate_receive_structures(struct adapter *);
123static int      ixgb_allocate_transmit_structures(struct adapter *);
124static int      ixgb_process_receive_interrupts(struct adapter *, int);
125static void
126ixgb_receive_checksum(struct adapter *,
127		      struct ixgb_rx_desc * rx_desc,
128		      struct mbuf *);
129static void
130ixgb_transmit_checksum_setup(struct adapter *,
131			     struct mbuf *,
132			     u_int8_t *);
133static void     ixgb_set_promisc(struct adapter *);
134static void     ixgb_disable_promisc(struct adapter *);
135static void     ixgb_set_multi(struct adapter *);
136static void     ixgb_print_hw_stats(struct adapter *);
137static void     ixgb_print_link_status(struct adapter *);
138static int
139ixgb_get_buf(int i, struct adapter *,
140	     struct mbuf *);
141static void     ixgb_enable_vlans(struct adapter * adapter);
142static int      ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143static int      ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
144static int
145ixgb_dma_malloc(struct adapter *, bus_size_t,
146		struct ixgb_dma_alloc *, int);
147static void     ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148#ifdef DEVICE_POLLING
149static poll_handler_t ixgb_poll;
150#endif
151
152/*********************************************************************
153 *  FreeBSD Device Interface Entry Points
154 *********************************************************************/
155
156static device_method_t ixgb_methods[] = {
157	/* Device interface */
158	DEVMETHOD(device_probe, ixgb_probe),
159	DEVMETHOD(device_attach, ixgb_attach),
160	DEVMETHOD(device_detach, ixgb_detach),
161	DEVMETHOD(device_shutdown, ixgb_shutdown),
162	{0, 0}
163};
164
165static driver_t ixgb_driver = {
166	"ixgb", ixgb_methods, sizeof(struct adapter),
167};
168
169static devclass_t ixgb_devclass;
170DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
171
172MODULE_DEPEND(ixgb, pci, 1, 1, 1);
173MODULE_DEPEND(ixgb, ether, 1, 1, 1);
174
175/* some defines for controlling descriptor fetches in h/w */
176#define RXDCTL_PTHRESH_DEFAULT 128	/* chip considers prefech below this */
177#define RXDCTL_HTHRESH_DEFAULT 16	/* chip will only prefetch if tail is
178					 * pushed this many descriptors from
179					 * head */
180#define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
181
182
183/*********************************************************************
184 *  Device identification routine
185 *
186 *  ixgb_probe determines if the driver should be loaded on
187 *  adapter based on PCI vendor/device id of the adapter.
188 *
189 *  return 0 on success, positive on failure
190 *********************************************************************/
191
192static int
193ixgb_probe(device_t dev)
194{
195	ixgb_vendor_info_t *ent;
196
197	u_int16_t       pci_vendor_id = 0;
198	u_int16_t       pci_device_id = 0;
199	u_int16_t       pci_subvendor_id = 0;
200	u_int16_t       pci_subdevice_id = 0;
201	char            adapter_name[60];
202
203	INIT_DEBUGOUT("ixgb_probe: begin");
204
205	pci_vendor_id = pci_get_vendor(dev);
206	if (pci_vendor_id != IXGB_VENDOR_ID)
207		return (ENXIO);
208
209	pci_device_id = pci_get_device(dev);
210	pci_subvendor_id = pci_get_subvendor(dev);
211	pci_subdevice_id = pci_get_subdevice(dev);
212
213	ent = ixgb_vendor_info_array;
214	while (ent->vendor_id != 0) {
215		if ((pci_vendor_id == ent->vendor_id) &&
216		    (pci_device_id == ent->device_id) &&
217
218		    ((pci_subvendor_id == ent->subvendor_id) ||
219		     (ent->subvendor_id == PCI_ANY_ID)) &&
220
221		    ((pci_subdevice_id == ent->subdevice_id) ||
222		     (ent->subdevice_id == PCI_ANY_ID))) {
223			sprintf(adapter_name, "%s, Version - %s",
224				ixgb_strings[ent->index],
225				ixgb_driver_version);
226			device_set_desc_copy(dev, adapter_name);
227			return (BUS_PROBE_DEFAULT);
228		}
229		ent++;
230	}
231
232	return (ENXIO);
233}
234
235/*********************************************************************
236 *  Device initialization routine
237 *
238 *  The attach entry point is called when the driver is being loaded.
239 *  This routine identifies the type of hardware, allocates all resources
240 *  and initializes the hardware.
241 *
242 *  return 0 on success, positive on failure
243 *********************************************************************/
244
245static int
246ixgb_attach(device_t dev)
247{
248	struct adapter *adapter;
249	int             tsize, rsize;
250	int             error = 0;
251
252	device_printf(dev, "%s\n", ixgb_copyright);
253	INIT_DEBUGOUT("ixgb_attach: begin");
254
255	/* Allocate, clear, and link in our adapter structure */
256	if (!(adapter = device_get_softc(dev))) {
257		device_printf(dev, "adapter structure allocation failed\n");
258		return (ENOMEM);
259	}
260	bzero(adapter, sizeof(struct adapter));
261	adapter->dev = dev;
262	adapter->osdep.dev = dev;
263	IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
264
265	if (ixgb_adapter_list != NULL)
266		ixgb_adapter_list->prev = adapter;
267	adapter->next = ixgb_adapter_list;
268	ixgb_adapter_list = adapter;
269
270	/* SYSCTL APIs */
271	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
272			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
273			OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
274			(void *)adapter, 0,
275			ixgb_sysctl_stats, "I", "Statistics");
276
277	callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
278
279	/* Determine hardware revision */
280	ixgb_identify_hardware(adapter);
281
282	/* Parameters (to be read from user) */
283	adapter->num_tx_desc = IXGB_MAX_TXD;
284	adapter->num_rx_desc = IXGB_MAX_RXD;
285	adapter->tx_int_delay = TIDV;
286	adapter->rx_int_delay = RDTR;
287	adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
288
289	adapter->hw.fc.high_water = FCRTH;
290	adapter->hw.fc.low_water = FCRTL;
291	adapter->hw.fc.pause_time = FCPAUSE;
292	adapter->hw.fc.send_xon = TRUE;
293	adapter->hw.fc.type = FLOW_CONTROL;
294
295
296	/* Set the max frame size assuming standard ethernet sized frames */
297	adapter->hw.max_frame_size =
298		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
299
300	if (ixgb_allocate_pci_resources(adapter)) {
301		device_printf(dev, "Allocation of PCI resources failed\n");
302		error = ENXIO;
303		goto err_pci;
304	}
305	tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
306			     sizeof(struct ixgb_tx_desc), 4096);
307
308	/* Allocate Transmit Descriptor ring */
309	if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
310		device_printf(dev, "Unable to allocate TxDescriptor memory\n");
311		error = ENOMEM;
312		goto err_tx_desc;
313	}
314	adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
315
316	rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
317			     sizeof(struct ixgb_rx_desc), 4096);
318
319	/* Allocate Receive Descriptor ring */
320	if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
321		device_printf(dev, "Unable to allocate rx_desc memory\n");
322		error = ENOMEM;
323		goto err_rx_desc;
324	}
325	adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
326
327	/* Allocate multicast array memory. */
328	adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
329	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
330	if (adapter->mta == NULL) {
331		device_printf(dev, "Can not allocate multicast setup array\n");
332		error = ENOMEM;
333		goto err_hw_init;
334	}
335
336	/* Initialize the hardware */
337	if (ixgb_hardware_init(adapter)) {
338		device_printf(dev, "Unable to initialize the hardware\n");
339		error = EIO;
340		goto err_hw_init;
341	}
342	/* Setup OS specific network interface */
343	if (ixgb_setup_interface(dev, adapter) != 0)
344		goto err_hw_init;
345
346	/* Initialize statistics */
347	ixgb_clear_hw_cntrs(&adapter->hw);
348	ixgb_update_stats_counters(adapter);
349
350	INIT_DEBUGOUT("ixgb_attach: end");
351	return (0);
352
353err_hw_init:
354	ixgb_dma_free(adapter, &adapter->rxdma);
355err_rx_desc:
356	ixgb_dma_free(adapter, &adapter->txdma);
357err_tx_desc:
358err_pci:
359	if (adapter->ifp != NULL)
360		if_free(adapter->ifp);
361	ixgb_free_pci_resources(adapter);
362	sysctl_ctx_free(&adapter->sysctl_ctx);
363	free(adapter->mta, M_DEVBUF);
364	return (error);
365
366}
367
368/*********************************************************************
369 *  Device removal routine
370 *
371 *  The detach entry point is called when the driver is being removed.
372 *  This routine stops the adapter and deallocates all the resources
373 *  that were allocated for driver operation.
374 *
375 *  return 0 on success, positive on failure
376 *********************************************************************/
377
378static int
379ixgb_detach(device_t dev)
380{
381	struct adapter *adapter = device_get_softc(dev);
382	struct ifnet   *ifp = adapter->ifp;
383
384	INIT_DEBUGOUT("ixgb_detach: begin");
385
386#ifdef DEVICE_POLLING
387	if (ifp->if_capenable & IFCAP_POLLING)
388		ether_poll_deregister(ifp);
389#endif
390
391	IXGB_LOCK(adapter);
392	adapter->in_detach = 1;
393
394	ixgb_stop(adapter);
395	IXGB_UNLOCK(adapter);
396
397#if __FreeBSD_version < 500000
398	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
399#else
400	ether_ifdetach(ifp);
401#endif
402	callout_drain(&adapter->timer);
403	ixgb_free_pci_resources(adapter);
404#if __FreeBSD_version >= 500000
405	if_free(ifp);
406#endif
407
408	/* Free Transmit Descriptor ring */
409	if (adapter->tx_desc_base) {
410		ixgb_dma_free(adapter, &adapter->txdma);
411		adapter->tx_desc_base = NULL;
412	}
413	/* Free Receive Descriptor ring */
414	if (adapter->rx_desc_base) {
415		ixgb_dma_free(adapter, &adapter->rxdma);
416		adapter->rx_desc_base = NULL;
417	}
418	/* Remove from the adapter list */
419	if (ixgb_adapter_list == adapter)
420		ixgb_adapter_list = adapter->next;
421	if (adapter->next != NULL)
422		adapter->next->prev = adapter->prev;
423	if (adapter->prev != NULL)
424		adapter->prev->next = adapter->next;
425	free(adapter->mta, M_DEVBUF);
426
427	IXGB_LOCK_DESTROY(adapter);
428	return (0);
429}
430
431/*********************************************************************
432 *
433 *  Shutdown entry point
434 *
435 **********************************************************************/
436
437static int
438ixgb_shutdown(device_t dev)
439{
440	struct adapter *adapter = device_get_softc(dev);
441	IXGB_LOCK(adapter);
442	ixgb_stop(adapter);
443	IXGB_UNLOCK(adapter);
444	return (0);
445}
446
447
448/*********************************************************************
449 *  Transmit entry point
450 *
451 *  ixgb_start is called by the stack to initiate a transmit.
452 *  The driver will remain in this routine as long as there are
453 *  packets to transmit and transmit resources are available.
454 *  In case resources are not available stack is notified and
455 *  the packet is requeued.
456 **********************************************************************/
457
458static void
459ixgb_start_locked(struct ifnet * ifp)
460{
461	struct mbuf    *m_head;
462	struct adapter *adapter = ifp->if_softc;
463
464	IXGB_LOCK_ASSERT(adapter);
465
466	if (!adapter->link_active)
467		return;
468
469	while (ifp->if_snd.ifq_head != NULL) {
470		IF_DEQUEUE(&ifp->if_snd, m_head);
471
472		if (m_head == NULL)
473			break;
474
475		if (ixgb_encap(adapter, m_head)) {
476			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
477			IF_PREPEND(&ifp->if_snd, m_head);
478			break;
479		}
480		/* Send a copy of the frame to the BPF listener */
481#if __FreeBSD_version < 500000
482		if (ifp->if_bpf)
483			bpf_mtap(ifp, m_head);
484#else
485		ETHER_BPF_MTAP(ifp, m_head);
486#endif
487		/* Set timeout in case hardware has problems transmitting */
488		adapter->tx_timer = IXGB_TX_TIMEOUT;
489
490	}
491	return;
492}
493
494static void
495ixgb_start(struct ifnet *ifp)
496{
497	struct adapter *adapter = ifp->if_softc;
498
499	IXGB_LOCK(adapter);
500	ixgb_start_locked(ifp);
501	IXGB_UNLOCK(adapter);
502	return;
503}
504
505/*********************************************************************
506 *  Ioctl entry point
507 *
508 *  ixgb_ioctl is called when the user wants to configure the
509 *  interface.
510 *
511 *  return 0 on success, positive on failure
512 **********************************************************************/
513
514static int
515ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
516{
517	int             mask, error = 0;
518	struct ifreq   *ifr = (struct ifreq *) data;
519	struct adapter *adapter = ifp->if_softc;
520
521	if (adapter->in_detach)
522		goto out;
523
524	switch (command) {
525	case SIOCSIFADDR:
526	case SIOCGIFADDR:
527		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
528		ether_ioctl(ifp, command, data);
529		break;
530	case SIOCSIFMTU:
531		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
532		if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
533			error = EINVAL;
534		} else {
535			IXGB_LOCK(adapter);
536			ifp->if_mtu = ifr->ifr_mtu;
537			adapter->hw.max_frame_size =
538				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
539
540			ixgb_init_locked(adapter);
541			IXGB_UNLOCK(adapter);
542		}
543		break;
544	case SIOCSIFFLAGS:
545		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
546		IXGB_LOCK(adapter);
547		if (ifp->if_flags & IFF_UP) {
548			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
549				ixgb_init_locked(adapter);
550			}
551			ixgb_disable_promisc(adapter);
552			ixgb_set_promisc(adapter);
553		} else {
554			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
555				ixgb_stop(adapter);
556			}
557		}
558		IXGB_UNLOCK(adapter);
559		break;
560	case SIOCADDMULTI:
561	case SIOCDELMULTI:
562		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
563		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
564			IXGB_LOCK(adapter);
565			ixgb_disable_intr(adapter);
566			ixgb_set_multi(adapter);
567			ixgb_enable_intr(adapter);
568			IXGB_UNLOCK(adapter);
569		}
570		break;
571	case SIOCSIFMEDIA:
572	case SIOCGIFMEDIA:
573		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
574		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
575		break;
576	case SIOCSIFCAP:
577		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
578		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
579#ifdef DEVICE_POLLING
580		if (mask & IFCAP_POLLING) {
581			if (ifr->ifr_reqcap & IFCAP_POLLING) {
582				error = ether_poll_register(ixgb_poll, ifp);
583				if (error)
584					return(error);
585				IXGB_LOCK(adapter);
586				ixgb_disable_intr(adapter);
587				ifp->if_capenable |= IFCAP_POLLING;
588				IXGB_UNLOCK(adapter);
589			} else {
590				error = ether_poll_deregister(ifp);
591				/* Enable interrupt even in error case */
592				IXGB_LOCK(adapter);
593				ixgb_enable_intr(adapter);
594				ifp->if_capenable &= ~IFCAP_POLLING;
595				IXGB_UNLOCK(adapter);
596			}
597		}
598#endif /* DEVICE_POLLING */
599		if (mask & IFCAP_HWCSUM) {
600			if (IFCAP_HWCSUM & ifp->if_capenable)
601				ifp->if_capenable &= ~IFCAP_HWCSUM;
602			else
603				ifp->if_capenable |= IFCAP_HWCSUM;
604			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
605				ixgb_init(adapter);
606		}
607		break;
608	default:
609		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
610		error = EINVAL;
611	}
612
613out:
614	return (error);
615}
616
617/*********************************************************************
618 *  Watchdog entry point
619 *
620 *  This routine is called whenever hardware quits transmitting.
621 *
622 **********************************************************************/
623
624static void
625ixgb_watchdog(struct adapter *adapter)
626{
627	struct ifnet *ifp;
628
629	ifp = adapter->ifp;
630
631	/*
632	 * If we are in this routine because of pause frames, then don't
633	 * reset the hardware.
634	 */
635	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
636		adapter->tx_timer = IXGB_TX_TIMEOUT;
637		return;
638	}
639	if_printf(ifp, "watchdog timeout -- resetting\n");
640
641	ixgb_stop(adapter);
642	ixgb_init_locked(adapter);
643
644
645	ifp->if_oerrors++;
646
647	return;
648}
649
650/*********************************************************************
651 *  Init entry point
652 *
653 *  This routine is used in two ways. It is used by the stack as
654 *  init entry point in network interface structure. It is also used
655 *  by the driver as a hw/sw initialization routine to get to a
656 *  consistent state.
657 *
658 *  return 0 on success, positive on failure
659 **********************************************************************/
660
661static void
662ixgb_init_locked(struct adapter *adapter)
663{
664	struct ifnet   *ifp;
665
666	INIT_DEBUGOUT("ixgb_init: begin");
667
668	IXGB_LOCK_ASSERT(adapter);
669
670	ixgb_stop(adapter);
671	ifp = adapter->ifp;
672
673	/* Get the latest mac address, User can use a LAA */
674	bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
675	    IXGB_ETH_LENGTH_OF_ADDRESS);
676
677	/* Initialize the hardware */
678	if (ixgb_hardware_init(adapter)) {
679		if_printf(ifp, "Unable to initialize the hardware\n");
680		return;
681	}
682	ixgb_enable_vlans(adapter);
683
684	/* Prepare transmit descriptors and buffers */
685	if (ixgb_setup_transmit_structures(adapter)) {
686		if_printf(ifp, "Could not setup transmit structures\n");
687		ixgb_stop(adapter);
688		return;
689	}
690	ixgb_initialize_transmit_unit(adapter);
691
692	/* Setup Multicast table */
693	ixgb_set_multi(adapter);
694
695	/* Prepare receive descriptors and buffers */
696	if (ixgb_setup_receive_structures(adapter)) {
697		if_printf(ifp, "Could not setup receive structures\n");
698		ixgb_stop(adapter);
699		return;
700	}
701	ixgb_initialize_receive_unit(adapter);
702
703	/* Don't lose promiscuous settings */
704	ixgb_set_promisc(adapter);
705
706	ifp = adapter->ifp;
707	ifp->if_drv_flags |= IFF_DRV_RUNNING;
708	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
709
710
711	if (ifp->if_capenable & IFCAP_TXCSUM)
712		ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
713	else
714		ifp->if_hwassist = 0;
715
716
717	/* Enable jumbo frames */
718	if (ifp->if_mtu > ETHERMTU) {
719		uint32_t        temp_reg;
720		IXGB_WRITE_REG(&adapter->hw, MFS,
721			       adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
722		temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
723		temp_reg |= IXGB_CTRL0_JFE;
724		IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
725	}
726	callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
727	ixgb_clear_hw_cntrs(&adapter->hw);
728#ifdef DEVICE_POLLING
729	/*
730	 * Only disable interrupts if we are polling, make sure they are on
731	 * otherwise.
732	 */
733	if (ifp->if_capenable & IFCAP_POLLING)
734		ixgb_disable_intr(adapter);
735	else
736#endif
737		ixgb_enable_intr(adapter);
738
739	return;
740}
741
742static void
743ixgb_init(void *arg)
744{
745	struct adapter *adapter = arg;
746
747	IXGB_LOCK(adapter);
748	ixgb_init_locked(adapter);
749	IXGB_UNLOCK(adapter);
750	return;
751}
752
753#ifdef DEVICE_POLLING
754static int
755ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
756{
757	struct adapter *adapter = ifp->if_softc;
758	u_int32_t       reg_icr;
759	int		rx_npkts;
760
761	IXGB_LOCK_ASSERT(adapter);
762
763	if (cmd == POLL_AND_CHECK_STATUS) {
764		reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
765		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
766			ixgb_check_for_link(&adapter->hw);
767			ixgb_print_link_status(adapter);
768		}
769	}
770	rx_npkts = ixgb_process_receive_interrupts(adapter, count);
771	ixgb_clean_transmit_interrupts(adapter);
772
773	if (ifp->if_snd.ifq_head != NULL)
774		ixgb_start_locked(ifp);
775	return (rx_npkts);
776}
777
778static int
779ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
780{
781	struct adapter *adapter = ifp->if_softc;
782	int rx_npkts = 0;
783
784	IXGB_LOCK(adapter);
785	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
786		rx_npkts = ixgb_poll_locked(ifp, cmd, count);
787	IXGB_UNLOCK(adapter);
788	return (rx_npkts);
789}
790#endif /* DEVICE_POLLING */
791
792/*********************************************************************
793 *
794 *  Interrupt Service routine
795 *
796 **********************************************************************/
797
798static void
799ixgb_intr(void *arg)
800{
801	u_int32_t       loop_cnt = IXGB_MAX_INTR;
802	u_int32_t       reg_icr;
803	struct ifnet   *ifp;
804	struct adapter *adapter = arg;
805	boolean_t       rxdmt0 = FALSE;
806
807	IXGB_LOCK(adapter);
808
809	ifp = adapter->ifp;
810
811#ifdef DEVICE_POLLING
812	if (ifp->if_capenable & IFCAP_POLLING) {
813		IXGB_UNLOCK(adapter);
814		return;
815	}
816#endif
817
818	reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
819	if (reg_icr == 0) {
820		IXGB_UNLOCK(adapter);
821		return;
822	}
823
824	if (reg_icr & IXGB_INT_RXDMT0)
825		rxdmt0 = TRUE;
826
827#ifdef _SV_
828	if (reg_icr & IXGB_INT_RXDMT0)
829		adapter->sv_stats.icr_rxdmt0++;
830	if (reg_icr & IXGB_INT_RXO)
831		adapter->sv_stats.icr_rxo++;
832	if (reg_icr & IXGB_INT_RXT0)
833		adapter->sv_stats.icr_rxt0++;
834	if (reg_icr & IXGB_INT_TXDW)
835		adapter->sv_stats.icr_TXDW++;
836#endif				/* _SV_ */
837
838	/* Link status change */
839	if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
840		ixgb_check_for_link(&adapter->hw);
841		ixgb_print_link_status(adapter);
842	}
843	while (loop_cnt > 0) {
844		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
845			ixgb_process_receive_interrupts(adapter, -1);
846			ixgb_clean_transmit_interrupts(adapter);
847		}
848		loop_cnt--;
849	}
850
851	if (rxdmt0 && adapter->raidc) {
852		IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
853		IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
854	}
855	if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
856		ixgb_start_locked(ifp);
857
858	IXGB_UNLOCK(adapter);
859	return;
860}
861
862
863/*********************************************************************
864 *
865 *  Media Ioctl callback
866 *
867 *  This routine is called whenever the user queries the status of
868 *  the interface using ifconfig.
869 *
870 **********************************************************************/
871static void
872ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
873{
874	struct adapter *adapter = ifp->if_softc;
875
876	INIT_DEBUGOUT("ixgb_media_status: begin");
877
878	ixgb_check_for_link(&adapter->hw);
879	ixgb_print_link_status(adapter);
880
881	ifmr->ifm_status = IFM_AVALID;
882	ifmr->ifm_active = IFM_ETHER;
883
884	if (!adapter->hw.link_up)
885		return;
886
887	ifmr->ifm_status |= IFM_ACTIVE;
888	ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
889
890	return;
891}
892
893/*********************************************************************
894 *
895 *  Media Ioctl callback
896 *
897 *  This routine is called when the user changes speed/duplex using
898 *  media/mediopt option with ifconfig.
899 *
900 **********************************************************************/
901static int
902ixgb_media_change(struct ifnet * ifp)
903{
904	struct adapter *adapter = ifp->if_softc;
905	struct ifmedia *ifm = &adapter->media;
906
907	INIT_DEBUGOUT("ixgb_media_change: begin");
908
909	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
910		return (EINVAL);
911
912	return (0);
913}
914
915/*********************************************************************
916 *
917 *  This routine maps the mbufs to tx descriptors.
918 *
919 *  return 0 on success, positive on failure
920 **********************************************************************/
921
922static int
923ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
924{
925	u_int8_t        txd_popts;
926	int             i, j, error, nsegs;
927
928#if __FreeBSD_version < 500000
929	struct ifvlan  *ifv = NULL;
930#endif
931	bus_dma_segment_t segs[IXGB_MAX_SCATTER];
932	bus_dmamap_t	map;
933	struct ixgb_buffer *tx_buffer = NULL;
934	struct ixgb_tx_desc *current_tx_desc = NULL;
935	struct ifnet   *ifp = adapter->ifp;
936
937	/*
938	 * Force a cleanup if number of TX descriptors available hits the
939	 * threshold
940	 */
941	if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
942		ixgb_clean_transmit_interrupts(adapter);
943	}
944	if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
945		adapter->no_tx_desc_avail1++;
946		return (ENOBUFS);
947	}
948	/*
949	 * Map the packet for DMA.
950	 */
951	if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
952		adapter->no_tx_map_avail++;
953		return (ENOMEM);
954	}
955	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
956					&nsegs, BUS_DMA_NOWAIT);
957	if (error != 0) {
958		adapter->no_tx_dma_setup++;
959		if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
960		       "error %u\n", error);
961		bus_dmamap_destroy(adapter->txtag, map);
962		return (error);
963	}
964	KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
965
966	if (nsegs > adapter->num_tx_desc_avail) {
967		adapter->no_tx_desc_avail2++;
968		bus_dmamap_destroy(adapter->txtag, map);
969		return (ENOBUFS);
970	}
971	if (ifp->if_hwassist > 0) {
972		ixgb_transmit_checksum_setup(adapter, m_head,
973					     &txd_popts);
974	} else
975		txd_popts = 0;
976
977	/* Find out if we are in vlan mode */
978#if __FreeBSD_version < 500000
979	if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
980	    m_head->m_pkthdr.rcvif != NULL &&
981	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
982		ifv = m_head->m_pkthdr.rcvif->if_softc;
983#elseif __FreeBSD_version < 700000
984	mtag = VLAN_OUTPUT_TAG(ifp, m_head);
985#endif
986	i = adapter->next_avail_tx_desc;
987	for (j = 0; j < nsegs; j++) {
988		tx_buffer = &adapter->tx_buffer_area[i];
989		current_tx_desc = &adapter->tx_desc_base[i];
990
991		current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
992		current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
993		current_tx_desc->popts = txd_popts;
994		if (++i == adapter->num_tx_desc)
995			i = 0;
996
997		tx_buffer->m_head = NULL;
998	}
999
1000	adapter->num_tx_desc_avail -= nsegs;
1001	adapter->next_avail_tx_desc = i;
1002
1003#if __FreeBSD_version < 500000
1004	if (ifv != NULL) {
1005		/* Set the vlan id */
1006		current_tx_desc->vlan = ifv->ifv_tag;
1007#elseif __FreeBSD_version < 700000
1008	if (mtag != NULL) {
1009		/* Set the vlan id */
1010		current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1011#else
1012	if (m_head->m_flags & M_VLANTAG) {
1013		current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1014#endif
1015
1016		/* Tell hardware to add tag */
1017		current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1018	}
1019	tx_buffer->m_head = m_head;
1020	tx_buffer->map = map;
1021	bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1022
1023	/*
1024	 * Last Descriptor of Packet needs End Of Packet (EOP)
1025	 */
1026	current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1027
1028	/*
1029	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1030	 * that this frame is available to transmit.
1031	 */
1032	IXGB_WRITE_REG(&adapter->hw, TDT, i);
1033
1034	return (0);
1035}
1036
1037static void
1038ixgb_set_promisc(struct adapter * adapter)
1039{
1040
1041	u_int32_t       reg_rctl;
1042	struct ifnet   *ifp = adapter->ifp;
1043
1044	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1045
1046	if (ifp->if_flags & IFF_PROMISC) {
1047		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1048		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1049	} else if (ifp->if_flags & IFF_ALLMULTI) {
1050		reg_rctl |= IXGB_RCTL_MPE;
1051		reg_rctl &= ~IXGB_RCTL_UPE;
1052		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1053	}
1054	return;
1055}
1056
1057static void
1058ixgb_disable_promisc(struct adapter * adapter)
1059{
1060	u_int32_t       reg_rctl;
1061
1062	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1063
1064	reg_rctl &= (~IXGB_RCTL_UPE);
1065	reg_rctl &= (~IXGB_RCTL_MPE);
1066	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1067
1068	return;
1069}
1070
1071
1072/*********************************************************************
1073 *  Multicast Update
1074 *
1075 *  This routine is called whenever multicast address list is updated.
1076 *
1077 **********************************************************************/
1078
1079static void
1080ixgb_set_multi(struct adapter * adapter)
1081{
1082	u_int32_t       reg_rctl = 0;
1083	u_int8_t        *mta;
1084	struct ifmultiaddr *ifma;
1085	int             mcnt = 0;
1086	struct ifnet   *ifp = adapter->ifp;
1087
1088	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1089
1090	mta = adapter->mta;
1091	bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1092	    MAX_NUM_MULTICAST_ADDRESSES);
1093
1094	if_maddr_rlock(ifp);
1095#if __FreeBSD_version < 500000
1096	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1097#else
1098	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1099#endif
1100		if (ifma->ifma_addr->sa_family != AF_LINK)
1101			continue;
1102
1103		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1104		      &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1105		mcnt++;
1106	}
1107	if_maddr_runlock(ifp);
1108
1109	if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1110		reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1111		reg_rctl |= IXGB_RCTL_MPE;
1112		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1113	} else
1114		ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1115
1116	return;
1117}
1118
1119
1120/*********************************************************************
1121 *  Timer routine
1122 *
1123 *  This routine checks for link status and updates statistics.
1124 *
1125 **********************************************************************/
1126
1127static void
1128ixgb_local_timer(void *arg)
1129{
1130	struct ifnet   *ifp;
1131	struct adapter *adapter = arg;
1132	ifp = adapter->ifp;
1133
1134	IXGB_LOCK_ASSERT(adapter);
1135
1136	ixgb_check_for_link(&adapter->hw);
1137	ixgb_print_link_status(adapter);
1138	ixgb_update_stats_counters(adapter);
1139	if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1140		ixgb_print_hw_stats(adapter);
1141	}
1142	if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1143		ixgb_watchdog(adapter);
1144	callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1145}
1146
1147static void
1148ixgb_print_link_status(struct adapter * adapter)
1149{
1150	if (adapter->hw.link_up) {
1151		if (!adapter->link_active) {
1152			if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1153			       10000,
1154			       "Full Duplex");
1155			adapter->link_active = 1;
1156		}
1157	} else {
1158		if (adapter->link_active) {
1159			if_printf(adapter->ifp, "Link is Down \n");
1160			adapter->link_active = 0;
1161		}
1162	}
1163
1164	return;
1165}
1166
1167
1168
1169/*********************************************************************
1170 *
1171 *  This routine disables all traffic on the adapter by issuing a
1172 *  global reset on the MAC and deallocates TX/RX buffers.
1173 *
1174 **********************************************************************/
1175
1176static void
1177ixgb_stop(void *arg)
1178{
1179	struct ifnet   *ifp;
1180	struct adapter *adapter = arg;
1181	ifp = adapter->ifp;
1182
1183	IXGB_LOCK_ASSERT(adapter);
1184
1185	INIT_DEBUGOUT("ixgb_stop: begin\n");
1186	ixgb_disable_intr(adapter);
1187	adapter->hw.adapter_stopped = FALSE;
1188	ixgb_adapter_stop(&adapter->hw);
1189	callout_stop(&adapter->timer);
1190	ixgb_free_transmit_structures(adapter);
1191	ixgb_free_receive_structures(adapter);
1192
1193	/* Tell the stack that the interface is no longer active */
1194	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1195	adapter->tx_timer = 0;
1196
1197	return;
1198}
1199
1200
1201/*********************************************************************
1202 *
1203 *  Determine hardware revision.
1204 *
1205 **********************************************************************/
1206static void
1207ixgb_identify_hardware(struct adapter * adapter)
1208{
1209	device_t        dev = adapter->dev;
1210
1211	/* Make sure our PCI config space has the necessary stuff set */
1212	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1213	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1214	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1215		device_printf(dev,
1216		    "Memory Access and/or Bus Master bits were not set!\n");
1217		adapter->hw.pci_cmd_word |=
1218			(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1219		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1220	}
1221	/* Save off the information about this board */
1222	adapter->hw.vendor_id = pci_get_vendor(dev);
1223	adapter->hw.device_id = pci_get_device(dev);
1224	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1225	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1226	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1227
1228	/* Set MacType, etc. based on this PCI info */
1229	switch (adapter->hw.device_id) {
1230	case IXGB_DEVICE_ID_82597EX:
1231	case IXGB_DEVICE_ID_82597EX_SR:
1232		adapter->hw.mac_type = ixgb_82597;
1233		break;
1234	default:
1235		INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1236		device_printf(dev, "unsupported device id 0x%x\n",
1237		    adapter->hw.device_id);
1238	}
1239
1240	return;
1241}
1242
1243static int
1244ixgb_allocate_pci_resources(struct adapter * adapter)
1245{
1246	int             rid;
1247	device_t        dev = adapter->dev;
1248
1249	rid = IXGB_MMBA;
1250	adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1251						 &rid, 0, ~0, 1,
1252						 RF_ACTIVE);
1253	if (!(adapter->res_memory)) {
1254		device_printf(dev, "Unable to allocate bus resource: memory\n");
1255		return (ENXIO);
1256	}
1257	adapter->osdep.mem_bus_space_tag =
1258		rman_get_bustag(adapter->res_memory);
1259	adapter->osdep.mem_bus_space_handle =
1260		rman_get_bushandle(adapter->res_memory);
1261	adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1262
1263	rid = 0x0;
1264	adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1265						    &rid, 0, ~0, 1,
1266						  RF_SHAREABLE | RF_ACTIVE);
1267	if (!(adapter->res_interrupt)) {
1268		device_printf(dev,
1269		    "Unable to allocate bus resource: interrupt\n");
1270		return (ENXIO);
1271	}
1272	if (bus_setup_intr(dev, adapter->res_interrupt,
1273			   INTR_TYPE_NET | INTR_MPSAFE,
1274			   NULL, (void (*) (void *))ixgb_intr, adapter,
1275			   &adapter->int_handler_tag)) {
1276		device_printf(dev, "Error registering interrupt handler!\n");
1277		return (ENXIO);
1278	}
1279	adapter->hw.back = &adapter->osdep;
1280
1281	return (0);
1282}
1283
1284static void
1285ixgb_free_pci_resources(struct adapter * adapter)
1286{
1287	device_t        dev = adapter->dev;
1288
1289	if (adapter->res_interrupt != NULL) {
1290		bus_teardown_intr(dev, adapter->res_interrupt,
1291				  adapter->int_handler_tag);
1292		bus_release_resource(dev, SYS_RES_IRQ, 0,
1293				     adapter->res_interrupt);
1294	}
1295	if (adapter->res_memory != NULL) {
1296		bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1297				     adapter->res_memory);
1298	}
1299	if (adapter->res_ioport != NULL) {
1300		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1301				     adapter->res_ioport);
1302	}
1303	return;
1304}
1305
1306/*********************************************************************
1307 *
1308 *  Initialize the hardware to a configuration as specified by the
1309 *  adapter structure. The controller is reset, the EEPROM is
1310 *  verified, the MAC address is set, then the shared initialization
1311 *  routines are called.
1312 *
1313 **********************************************************************/
1314static int
1315ixgb_hardware_init(struct adapter * adapter)
1316{
1317	/* Issue a global reset */
1318	adapter->hw.adapter_stopped = FALSE;
1319	ixgb_adapter_stop(&adapter->hw);
1320
1321	/* Make sure we have a good EEPROM before we read from it */
1322	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1323		device_printf(adapter->dev,
1324		    "The EEPROM Checksum Is Not Valid\n");
1325		return (EIO);
1326	}
1327	if (!ixgb_init_hw(&adapter->hw)) {
1328		device_printf(adapter->dev, "Hardware Initialization Failed");
1329		return (EIO);
1330	}
1331
1332	return (0);
1333}
1334
1335/*********************************************************************
1336 *
1337 *  Setup networking device structure and register an interface.
1338 *
1339 **********************************************************************/
1340static int
1341ixgb_setup_interface(device_t dev, struct adapter * adapter)
1342{
1343	struct ifnet   *ifp;
1344	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1345
1346	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1347	if (ifp == NULL) {
1348		device_printf(dev, "can not allocate ifnet structure\n");
1349		return (-1);
1350	}
1351#if __FreeBSD_version >= 502000
1352	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1353#else
1354	ifp->if_unit = device_get_unit(dev);
1355	ifp->if_name = "ixgb";
1356#endif
1357	ifp->if_baudrate = 1000000000;
1358	ifp->if_init = ixgb_init;
1359	ifp->if_softc = adapter;
1360	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1361	ifp->if_ioctl = ixgb_ioctl;
1362	ifp->if_start = ixgb_start;
1363	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1364
1365#if __FreeBSD_version < 500000
1366	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1367#else
1368	ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1369#endif
1370
1371	ifp->if_capabilities = IFCAP_HWCSUM;
1372
1373	/*
1374	 * Tell the upper layer(s) we support long frames.
1375	 */
1376	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1377
1378#if __FreeBSD_version >= 500000
1379	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1380#endif
1381
1382	ifp->if_capenable = ifp->if_capabilities;
1383
1384#ifdef DEVICE_POLLING
1385	ifp->if_capabilities |= IFCAP_POLLING;
1386#endif
1387
1388	/*
1389	 * Specify the media types supported by this adapter and register
1390	 * callbacks to update media and link information
1391	 */
1392	ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1393		     ixgb_media_status);
1394	ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1395		    0, NULL);
1396	ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1397		    0, NULL);
1398	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1399	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1400
1401	return (0);
1402}
1403
1404/********************************************************************
1405 * Manage DMA'able memory.
1406 *******************************************************************/
1407static void
1408ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1409{
1410	if (error)
1411		return;
1412	*(bus_addr_t *) arg = segs->ds_addr;
1413	return;
1414}
1415
1416static int
1417ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1418		struct ixgb_dma_alloc * dma, int mapflags)
1419{
1420	device_t dev;
1421	int             r;
1422
1423	dev = adapter->dev;
1424	r = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
1425			       PAGE_SIZE, 0,	/* alignment, bounds */
1426			       BUS_SPACE_MAXADDR,	/* lowaddr */
1427			       BUS_SPACE_MAXADDR,	/* highaddr */
1428			       NULL, NULL,	/* filter, filterarg */
1429			       size,	/* maxsize */
1430			       1,	/* nsegments */
1431			       size,	/* maxsegsize */
1432			       BUS_DMA_ALLOCNOW,	/* flags */
1433#if __FreeBSD_version >= 502000
1434			       NULL,	/* lockfunc */
1435			       NULL,	/* lockfuncarg */
1436#endif
1437			       &dma->dma_tag);
1438	if (r != 0) {
1439		device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1440		       "error %u\n", r);
1441		goto fail_0;
1442	}
1443	r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1444			     BUS_DMA_NOWAIT, &dma->dma_map);
1445	if (r != 0) {
1446		device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1447		       "error %u\n", r);
1448		goto fail_1;
1449	}
1450	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1451			    size,
1452			    ixgb_dmamap_cb,
1453			    &dma->dma_paddr,
1454			    mapflags | BUS_DMA_NOWAIT);
1455	if (r != 0) {
1456		device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1457		       "error %u\n", r);
1458		goto fail_2;
1459	}
1460	dma->dma_size = size;
1461	return (0);
1462fail_2:
1463	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1464fail_1:
1465	bus_dma_tag_destroy(dma->dma_tag);
1466fail_0:
1467	dma->dma_map = NULL;
1468	dma->dma_tag = NULL;
1469	return (r);
1470}
1471
1472
1473
1474static void
1475ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1476{
1477	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1478	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1479	bus_dma_tag_destroy(dma->dma_tag);
1480}
1481
1482/*********************************************************************
1483 *
1484 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1485 *  the information needed to transmit a packet on the wire.
1486 *
1487 **********************************************************************/
1488static int
1489ixgb_allocate_transmit_structures(struct adapter * adapter)
1490{
1491	if (!(adapter->tx_buffer_area =
1492	      (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1493					    adapter->num_tx_desc, M_DEVBUF,
1494					    M_NOWAIT | M_ZERO))) {
1495		device_printf(adapter->dev,
1496		    "Unable to allocate tx_buffer memory\n");
1497		return ENOMEM;
1498	}
1499	bzero(adapter->tx_buffer_area,
1500	      sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1501
1502	return 0;
1503}
1504
1505/*********************************************************************
1506 *
1507 *  Allocate and initialize transmit structures.
1508 *
1509 **********************************************************************/
1510static int
1511ixgb_setup_transmit_structures(struct adapter * adapter)
1512{
1513	/*
1514	 * Setup DMA descriptor areas.
1515	 */
1516	if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev),	/* parent */
1517			       PAGE_SIZE, 0,	/* alignment, bounds */
1518			       BUS_SPACE_MAXADDR,	/* lowaddr */
1519			       BUS_SPACE_MAXADDR,	/* highaddr */
1520			       NULL, NULL,	/* filter, filterarg */
1521			       MCLBYTES * IXGB_MAX_SCATTER,	/* maxsize */
1522			       IXGB_MAX_SCATTER,	/* nsegments */
1523			       MCLBYTES,	/* maxsegsize */
1524			       BUS_DMA_ALLOCNOW,	/* flags */
1525#if __FreeBSD_version >= 502000
1526			       NULL,	/* lockfunc */
1527			       NULL,	/* lockfuncarg */
1528#endif
1529			       &adapter->txtag)) {
1530		device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1531		return (ENOMEM);
1532	}
1533	if (ixgb_allocate_transmit_structures(adapter))
1534		return ENOMEM;
1535
1536	bzero((void *)adapter->tx_desc_base,
1537	      (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1538
1539	adapter->next_avail_tx_desc = 0;
1540	adapter->oldest_used_tx_desc = 0;
1541
1542	/* Set number of descriptors available */
1543	adapter->num_tx_desc_avail = adapter->num_tx_desc;
1544
1545	/* Set checksum context */
1546	adapter->active_checksum_context = OFFLOAD_NONE;
1547
1548	return 0;
1549}
1550
1551/*********************************************************************
1552 *
1553 *  Enable transmit unit.
1554 *
1555 **********************************************************************/
1556static void
1557ixgb_initialize_transmit_unit(struct adapter * adapter)
1558{
1559	u_int32_t       reg_tctl;
1560	u_int64_t       tdba = adapter->txdma.dma_paddr;
1561
1562	/* Setup the Base and Length of the Tx Descriptor Ring */
1563	IXGB_WRITE_REG(&adapter->hw, TDBAL,
1564		       (tdba & 0x00000000ffffffffULL));
1565	IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1566	IXGB_WRITE_REG(&adapter->hw, TDLEN,
1567		       adapter->num_tx_desc *
1568		       sizeof(struct ixgb_tx_desc));
1569
1570	/* Setup the HW Tx Head and Tail descriptor pointers */
1571	IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1572	IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1573
1574
1575	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1576		     IXGB_READ_REG(&adapter->hw, TDBAL),
1577		     IXGB_READ_REG(&adapter->hw, TDLEN));
1578
1579	IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1580
1581
1582	/* Program the Transmit Control Register */
1583	reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1584	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1585	IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1586
1587	/* Setup Transmit Descriptor Settings for this adapter */
1588	adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1589
1590	if (adapter->tx_int_delay > 0)
1591		adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1592	return;
1593}
1594
1595/*********************************************************************
1596 *
1597 *  Free all transmit related data structures.
1598 *
1599 **********************************************************************/
1600static void
1601ixgb_free_transmit_structures(struct adapter * adapter)
1602{
1603	struct ixgb_buffer *tx_buffer;
1604	int             i;
1605
1606	INIT_DEBUGOUT("free_transmit_structures: begin");
1607
1608	if (adapter->tx_buffer_area != NULL) {
1609		tx_buffer = adapter->tx_buffer_area;
1610		for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1611			if (tx_buffer->m_head != NULL) {
1612				bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1613				bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1614				m_freem(tx_buffer->m_head);
1615			}
1616			tx_buffer->m_head = NULL;
1617		}
1618	}
1619	if (adapter->tx_buffer_area != NULL) {
1620		free(adapter->tx_buffer_area, M_DEVBUF);
1621		adapter->tx_buffer_area = NULL;
1622	}
1623	if (adapter->txtag != NULL) {
1624		bus_dma_tag_destroy(adapter->txtag);
1625		adapter->txtag = NULL;
1626	}
1627	return;
1628}
1629
1630/*********************************************************************
1631 *
1632 *  The offload context needs to be set when we transfer the first
1633 *  packet of a particular protocol (TCP/UDP). We change the
1634 *  context only if the protocol type changes.
1635 *
1636 **********************************************************************/
1637static void
1638ixgb_transmit_checksum_setup(struct adapter * adapter,
1639			     struct mbuf * mp,
1640			     u_int8_t * txd_popts)
1641{
1642	struct ixgb_context_desc *TXD;
1643	struct ixgb_buffer *tx_buffer;
1644	int             curr_txd;
1645
1646	if (mp->m_pkthdr.csum_flags) {
1647
1648		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1649			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1650			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1651				return;
1652			else
1653				adapter->active_checksum_context = OFFLOAD_TCP_IP;
1654		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1655			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1656			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1657				return;
1658			else
1659				adapter->active_checksum_context = OFFLOAD_UDP_IP;
1660		} else {
1661			*txd_popts = 0;
1662			return;
1663		}
1664	} else {
1665		*txd_popts = 0;
1666		return;
1667	}
1668
1669	/*
1670	 * If we reach this point, the checksum offload context needs to be
1671	 * reset.
1672	 */
1673	curr_txd = adapter->next_avail_tx_desc;
1674	tx_buffer = &adapter->tx_buffer_area[curr_txd];
1675	TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1676
1677
1678	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1679	TXD->tucse = 0;
1680
1681	TXD->mss = 0;
1682
1683	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1684		TXD->tucso =
1685			ENET_HEADER_SIZE + sizeof(struct ip) +
1686			offsetof(struct tcphdr, th_sum);
1687	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1688		TXD->tucso =
1689			ENET_HEADER_SIZE + sizeof(struct ip) +
1690			offsetof(struct udphdr, uh_sum);
1691	}
1692	TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1693
1694	tx_buffer->m_head = NULL;
1695
1696	if (++curr_txd == adapter->num_tx_desc)
1697		curr_txd = 0;
1698
1699	adapter->num_tx_desc_avail--;
1700	adapter->next_avail_tx_desc = curr_txd;
1701	return;
1702}
1703
1704/**********************************************************************
1705 *
1706 *  Examine each tx_buffer in the used queue. If the hardware is done
1707 *  processing the packet then free associated resources. The
1708 *  tx_buffer is put back on the free queue.
1709 *
1710 **********************************************************************/
1711static void
1712ixgb_clean_transmit_interrupts(struct adapter * adapter)
1713{
1714	int             i, num_avail;
1715	struct ixgb_buffer *tx_buffer;
1716	struct ixgb_tx_desc *tx_desc;
1717
1718	IXGB_LOCK_ASSERT(adapter);
1719
1720	if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1721		return;
1722
1723#ifdef _SV_
1724	adapter->clean_tx_interrupts++;
1725#endif
1726	num_avail = adapter->num_tx_desc_avail;
1727	i = adapter->oldest_used_tx_desc;
1728
1729	tx_buffer = &adapter->tx_buffer_area[i];
1730	tx_desc = &adapter->tx_desc_base[i];
1731
1732	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1733
1734		tx_desc->status = 0;
1735		num_avail++;
1736
1737		if (tx_buffer->m_head) {
1738			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1739					BUS_DMASYNC_POSTWRITE);
1740			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1741			bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1742			m_freem(tx_buffer->m_head);
1743			tx_buffer->m_head = NULL;
1744		}
1745		if (++i == adapter->num_tx_desc)
1746			i = 0;
1747
1748		tx_buffer = &adapter->tx_buffer_area[i];
1749		tx_desc = &adapter->tx_desc_base[i];
1750	}
1751
1752	adapter->oldest_used_tx_desc = i;
1753
1754	/*
1755	 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1756	 * it is OK to send packets. If there are no pending descriptors,
1757	 * clear the timeout. Otherwise, if some descriptors have been freed,
1758	 * restart the timeout.
1759	 */
1760	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1761		struct ifnet   *ifp = adapter->ifp;
1762
1763		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1764		if (num_avail == adapter->num_tx_desc)
1765			adapter->tx_timer = 0;
1766		else if (num_avail == adapter->num_tx_desc_avail)
1767			adapter->tx_timer = IXGB_TX_TIMEOUT;
1768	}
1769	adapter->num_tx_desc_avail = num_avail;
1770	return;
1771}
1772
1773
1774/*********************************************************************
1775 *
1776 *  Get a buffer from system mbuf buffer pool.
1777 *
1778 **********************************************************************/
1779static int
1780ixgb_get_buf(int i, struct adapter * adapter,
1781	     struct mbuf * nmp)
1782{
1783	register struct mbuf *mp = nmp;
1784	struct ixgb_buffer *rx_buffer;
1785	struct ifnet   *ifp;
1786	bus_addr_t      paddr;
1787	int             error;
1788
1789	ifp = adapter->ifp;
1790
1791	if (mp == NULL) {
1792
1793		mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1794
1795		if (mp == NULL) {
1796			adapter->mbuf_alloc_failed++;
1797			return (ENOBUFS);
1798		}
1799		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1800	} else {
1801		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1802		mp->m_data = mp->m_ext.ext_buf;
1803		mp->m_next = NULL;
1804	}
1805
1806	if (ifp->if_mtu <= ETHERMTU) {
1807		m_adj(mp, ETHER_ALIGN);
1808	}
1809	rx_buffer = &adapter->rx_buffer_area[i];
1810
1811	/*
1812	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1813	 * machinery to arrange the memory mapping.
1814	 */
1815	error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1816				mtod(mp, void *), mp->m_len,
1817				ixgb_dmamap_cb, &paddr, 0);
1818	if (error) {
1819		m_free(mp);
1820		return (error);
1821	}
1822	rx_buffer->m_head = mp;
1823	adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1824	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1825
1826	return (0);
1827}
1828
1829/*********************************************************************
1830 *
1831 *  Allocate memory for rx_buffer structures. Since we use one
1832 *  rx_buffer per received packet, the maximum number of rx_buffer's
1833 *  that we'll need is equal to the number of receive descriptors
1834 *  that we've allocated.
1835 *
1836 **********************************************************************/
1837static int
1838ixgb_allocate_receive_structures(struct adapter * adapter)
1839{
1840	int             i, error;
1841	struct ixgb_buffer *rx_buffer;
1842
1843	if (!(adapter->rx_buffer_area =
1844	      (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1845					    adapter->num_rx_desc, M_DEVBUF,
1846					    M_NOWAIT | M_ZERO))) {
1847		device_printf(adapter->dev,
1848		    "Unable to allocate rx_buffer memory\n");
1849		return (ENOMEM);
1850	}
1851	bzero(adapter->rx_buffer_area,
1852	      sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1853
1854	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1855				   PAGE_SIZE, 0,	/* alignment, bounds */
1856				   BUS_SPACE_MAXADDR,	/* lowaddr */
1857				   BUS_SPACE_MAXADDR,	/* highaddr */
1858				   NULL, NULL,	/* filter, filterarg */
1859				   MCLBYTES,	/* maxsize */
1860				   1,	/* nsegments */
1861				   MCLBYTES,	/* maxsegsize */
1862				   BUS_DMA_ALLOCNOW,	/* flags */
1863#if __FreeBSD_version >= 502000
1864				   NULL,	/* lockfunc */
1865				   NULL,	/* lockfuncarg */
1866#endif
1867				   &adapter->rxtag);
1868	if (error != 0) {
1869		device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1870		       "bus_dma_tag_create failed; error %u\n",
1871		       error);
1872		goto fail_0;
1873	}
1874	rx_buffer = adapter->rx_buffer_area;
1875	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1876		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1877					  &rx_buffer->map);
1878		if (error != 0) {
1879			device_printf(adapter->dev,
1880			       "ixgb_allocate_receive_structures: "
1881			       "bus_dmamap_create failed; error %u\n",
1882			       error);
1883			goto fail_1;
1884		}
1885	}
1886
1887	for (i = 0; i < adapter->num_rx_desc; i++) {
1888		if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1889			adapter->rx_buffer_area[i].m_head = NULL;
1890			adapter->rx_desc_base[i].buff_addr = 0;
1891			return (ENOBUFS);
1892		}
1893	}
1894
1895	return (0);
1896fail_1:
1897	bus_dma_tag_destroy(adapter->rxtag);
1898fail_0:
1899	adapter->rxtag = NULL;
1900	free(adapter->rx_buffer_area, M_DEVBUF);
1901	adapter->rx_buffer_area = NULL;
1902	return (error);
1903}
1904
1905/*********************************************************************
1906 *
1907 *  Allocate and initialize receive structures.
1908 *
1909 **********************************************************************/
1910static int
1911ixgb_setup_receive_structures(struct adapter * adapter)
1912{
1913	bzero((void *)adapter->rx_desc_base,
1914	      (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1915
1916	if (ixgb_allocate_receive_structures(adapter))
1917		return ENOMEM;
1918
1919	/* Setup our descriptor pointers */
1920	adapter->next_rx_desc_to_check = 0;
1921	adapter->next_rx_desc_to_use = 0;
1922	return (0);
1923}
1924
1925/*********************************************************************
1926 *
1927 *  Enable receive unit.
1928 *
1929 **********************************************************************/
1930static void
1931ixgb_initialize_receive_unit(struct adapter * adapter)
1932{
1933	u_int32_t       reg_rctl;
1934	u_int32_t       reg_rxcsum;
1935	u_int32_t       reg_rxdctl;
1936	struct ifnet   *ifp;
1937	u_int64_t       rdba = adapter->rxdma.dma_paddr;
1938
1939	ifp = adapter->ifp;
1940
1941	/*
1942	 * Make sure receives are disabled while setting up the descriptor
1943	 * ring
1944	 */
1945	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1946	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1947
1948	/* Set the Receive Delay Timer Register */
1949	IXGB_WRITE_REG(&adapter->hw, RDTR,
1950		       adapter->rx_int_delay);
1951
1952
1953	/* Setup the Base and Length of the Rx Descriptor Ring */
1954	IXGB_WRITE_REG(&adapter->hw, RDBAL,
1955		       (rdba & 0x00000000ffffffffULL));
1956	IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1957	IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1958		       sizeof(struct ixgb_rx_desc));
1959
1960	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1961	IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1962
1963	IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1964
1965
1966
1967	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1968		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1969		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1970	IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1971
1972
1973	adapter->raidc = 1;
1974	if (adapter->raidc) {
1975		uint32_t        raidc;
1976		uint8_t         poll_threshold;
1977#define IXGB_RAIDC_POLL_DEFAULT 120
1978
1979		poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1980		poll_threshold >>= 1;
1981		poll_threshold &= 0x3F;
1982		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1983			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1984			(adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1985			poll_threshold;
1986		IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1987	}
1988	/* Enable Receive Checksum Offload for TCP and UDP ? */
1989	if (ifp->if_capenable & IFCAP_RXCSUM) {
1990		reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1991		reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1992		IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1993	}
1994	/* Setup the Receive Control Register */
1995	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1996	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1997	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1998		IXGB_RCTL_CFF |
1999		(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
2000
2001	switch (adapter->rx_buffer_len) {
2002	default:
2003	case IXGB_RXBUFFER_2048:
2004		reg_rctl |= IXGB_RCTL_BSIZE_2048;
2005		break;
2006	case IXGB_RXBUFFER_4096:
2007		reg_rctl |= IXGB_RCTL_BSIZE_4096;
2008		break;
2009	case IXGB_RXBUFFER_8192:
2010		reg_rctl |= IXGB_RCTL_BSIZE_8192;
2011		break;
2012	case IXGB_RXBUFFER_16384:
2013		reg_rctl |= IXGB_RCTL_BSIZE_16384;
2014		break;
2015	}
2016
2017	reg_rctl |= IXGB_RCTL_RXEN;
2018
2019
2020	/* Enable Receives */
2021	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2022
2023	return;
2024}
2025
2026/*********************************************************************
2027 *
2028 *  Free receive related data structures.
2029 *
2030 **********************************************************************/
2031static void
2032ixgb_free_receive_structures(struct adapter * adapter)
2033{
2034	struct ixgb_buffer *rx_buffer;
2035	int             i;
2036
2037	INIT_DEBUGOUT("free_receive_structures: begin");
2038
2039	if (adapter->rx_buffer_area != NULL) {
2040		rx_buffer = adapter->rx_buffer_area;
2041		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2042			if (rx_buffer->map != NULL) {
2043				bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2044				bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2045			}
2046			if (rx_buffer->m_head != NULL)
2047				m_freem(rx_buffer->m_head);
2048			rx_buffer->m_head = NULL;
2049		}
2050	}
2051	if (adapter->rx_buffer_area != NULL) {
2052		free(adapter->rx_buffer_area, M_DEVBUF);
2053		adapter->rx_buffer_area = NULL;
2054	}
2055	if (adapter->rxtag != NULL) {
2056		bus_dma_tag_destroy(adapter->rxtag);
2057		adapter->rxtag = NULL;
2058	}
2059	return;
2060}
2061
2062/*********************************************************************
2063 *
2064 *  This routine executes in interrupt context. It replenishes
2065 *  the mbufs in the descriptor and sends data which has been
2066 *  dma'ed into host memory to upper layer.
2067 *
2068 *  We loop at most count times if count is > 0, or until done if
2069 *  count < 0.
2070 *
2071 *********************************************************************/
2072static int
2073ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2074{
2075	struct ifnet   *ifp;
2076	struct mbuf    *mp;
2077#if __FreeBSD_version < 500000
2078	struct ether_header *eh;
2079#endif
2080	int             eop = 0;
2081	int             len;
2082	u_int8_t        accept_frame = 0;
2083	int             i;
2084	int             next_to_use = 0;
2085	int             eop_desc;
2086	int		rx_npkts = 0;
2087	/* Pointer to the receive descriptor being examined. */
2088	struct ixgb_rx_desc *current_desc;
2089
2090	IXGB_LOCK_ASSERT(adapter);
2091
2092	ifp = adapter->ifp;
2093	i = adapter->next_rx_desc_to_check;
2094	next_to_use = adapter->next_rx_desc_to_use;
2095	eop_desc = adapter->next_rx_desc_to_check;
2096	current_desc = &adapter->rx_desc_base[i];
2097
2098	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2099#ifdef _SV_
2100		adapter->no_pkts_avail++;
2101#endif
2102		return (rx_npkts);
2103	}
2104	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2105
2106		mp = adapter->rx_buffer_area[i].m_head;
2107		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2108				BUS_DMASYNC_POSTREAD);
2109		accept_frame = 1;
2110		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2111			count--;
2112			eop = 1;
2113		} else {
2114			eop = 0;
2115		}
2116		len = current_desc->length;
2117
2118		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2119			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2120					    IXGB_RX_DESC_ERRORS_RXE)) {
2121			accept_frame = 0;
2122		}
2123		if (accept_frame) {
2124
2125			/* Assign correct length to the current fragment */
2126			mp->m_len = len;
2127
2128			if (adapter->fmp == NULL) {
2129				mp->m_pkthdr.len = len;
2130				adapter->fmp = mp;	/* Store the first mbuf */
2131				adapter->lmp = mp;
2132			} else {
2133				/* Chain mbuf's together */
2134				mp->m_flags &= ~M_PKTHDR;
2135				adapter->lmp->m_next = mp;
2136				adapter->lmp = adapter->lmp->m_next;
2137				adapter->fmp->m_pkthdr.len += len;
2138			}
2139
2140			if (eop) {
2141				eop_desc = i;
2142				adapter->fmp->m_pkthdr.rcvif = ifp;
2143
2144#if __FreeBSD_version < 500000
2145				eh = mtod(adapter->fmp, struct ether_header *);
2146
2147				/* Remove ethernet header from mbuf */
2148				m_adj(adapter->fmp, sizeof(struct ether_header));
2149				ixgb_receive_checksum(adapter, current_desc,
2150						      adapter->fmp);
2151
2152				if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2153					VLAN_INPUT_TAG(eh, adapter->fmp,
2154						     current_desc->special);
2155				else
2156					ether_input(ifp, eh, adapter->fmp);
2157#else
2158				ixgb_receive_checksum(adapter, current_desc,
2159						      adapter->fmp);
2160#if __FreeBSD_version < 700000
2161				if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2162					VLAN_INPUT_TAG(ifp, adapter->fmp,
2163						       current_desc->special);
2164#else
2165				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2166					adapter->fmp->m_pkthdr.ether_vtag =
2167					    current_desc->special;
2168					adapter->fmp->m_flags |= M_VLANTAG;
2169				}
2170#endif
2171
2172				if (adapter->fmp != NULL) {
2173					IXGB_UNLOCK(adapter);
2174					(*ifp->if_input) (ifp, adapter->fmp);
2175					IXGB_LOCK(adapter);
2176					rx_npkts++;
2177				}
2178#endif
2179				adapter->fmp = NULL;
2180				adapter->lmp = NULL;
2181			}
2182			adapter->rx_buffer_area[i].m_head = NULL;
2183		} else {
2184			adapter->dropped_pkts++;
2185			if (adapter->fmp != NULL)
2186				m_freem(adapter->fmp);
2187			adapter->fmp = NULL;
2188			adapter->lmp = NULL;
2189		}
2190
2191		/* Zero out the receive descriptors status  */
2192		current_desc->status = 0;
2193
2194		/* Advance our pointers to the next descriptor */
2195		if (++i == adapter->num_rx_desc) {
2196			i = 0;
2197			current_desc = adapter->rx_desc_base;
2198		} else
2199			current_desc++;
2200	}
2201	adapter->next_rx_desc_to_check = i;
2202
2203	if (--i < 0)
2204		i = (adapter->num_rx_desc - 1);
2205
2206	/*
2207	 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2208 	 * memory corruption). Avoid using and re-submitting the most recently received RX
2209	 * descriptor back to hardware.
2210	 *
2211	 * if(Last written back descriptor == EOP bit set descriptor)
2212	 * 	then avoid re-submitting the most recently received RX descriptor
2213	 *	back to hardware.
2214	 * if(Last written back descriptor != EOP bit set descriptor)
2215	 *	then avoid re-submitting the most recently received RX descriptors
2216	 * 	till last EOP bit set descriptor.
2217	 */
2218	if (eop_desc != i) {
2219		if (++eop_desc == adapter->num_rx_desc)
2220			eop_desc = 0;
2221		i = eop_desc;
2222	}
2223	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2224	while (next_to_use != i) {
2225		current_desc = &adapter->rx_desc_base[next_to_use];
2226		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2227			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2228					     IXGB_RX_DESC_ERRORS_RXE))) {
2229			mp = adapter->rx_buffer_area[next_to_use].m_head;
2230			ixgb_get_buf(next_to_use, adapter, mp);
2231		} else {
2232			if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2233				break;
2234		}
2235		/* Advance our pointers to the next descriptor */
2236		if (++next_to_use == adapter->num_rx_desc) {
2237			next_to_use = 0;
2238			current_desc = adapter->rx_desc_base;
2239		} else
2240			current_desc++;
2241	}
2242	adapter->next_rx_desc_to_use = next_to_use;
2243	if (--next_to_use < 0)
2244		next_to_use = (adapter->num_rx_desc - 1);
2245	/* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
2246	IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2247
2248	return (rx_npkts);
2249}
2250
2251/*********************************************************************
2252 *
2253 *  Verify that the hardware indicated that the checksum is valid.
2254 *  Inform the stack about the status of checksum so that stack
2255 *  doesn't spend time verifying the checksum.
2256 *
2257 *********************************************************************/
2258static void
2259ixgb_receive_checksum(struct adapter * adapter,
2260		      struct ixgb_rx_desc * rx_desc,
2261		      struct mbuf * mp)
2262{
2263	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2264		mp->m_pkthdr.csum_flags = 0;
2265		return;
2266	}
2267	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2268		/* Did it pass? */
2269		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2270			/* IP Checksum Good */
2271			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2272			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2273
2274		} else {
2275			mp->m_pkthdr.csum_flags = 0;
2276		}
2277	}
2278	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2279		/* Did it pass? */
2280		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2281			mp->m_pkthdr.csum_flags |=
2282				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2283			mp->m_pkthdr.csum_data = htons(0xffff);
2284		}
2285	}
2286	return;
2287}
2288
2289
2290static void
2291ixgb_enable_vlans(struct adapter * adapter)
2292{
2293	uint32_t        ctrl;
2294
2295	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2296	ctrl |= IXGB_CTRL0_VME;
2297	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2298
2299	return;
2300}
2301
2302
2303static void
2304ixgb_enable_intr(struct adapter * adapter)
2305{
2306	IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2307			    IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2308	return;
2309}
2310
2311static void
2312ixgb_disable_intr(struct adapter * adapter)
2313{
2314	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2315	return;
2316}
2317
2318void
2319ixgb_write_pci_cfg(struct ixgb_hw * hw,
2320		   uint32_t reg,
2321		   uint16_t * value)
2322{
2323	pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2324			 *value, 2);
2325}
2326
2327/**********************************************************************
2328 *
2329 *  Update the board statistics counters.
2330 *
2331 **********************************************************************/
2332static void
2333ixgb_update_stats_counters(struct adapter * adapter)
2334{
2335	struct ifnet   *ifp;
2336
2337	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2338	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2339	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2340	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2341	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2342	adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2343	adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2344	adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2345	adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2346	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2347
2348	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2349	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2350	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2351	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2352	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2353	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2354	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2355	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2356	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2357	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2358	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2359	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2360	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2361	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2362	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2363	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2364	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2365	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2366	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2367	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2368	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2369	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2370	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2371	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2372	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2373	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2374	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2375
2376	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2377	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2378	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2379	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2380	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2381	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2382	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2383	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2384	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2385	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2386	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2387	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2388	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2389	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2390	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2391	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2392	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2393	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2394	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2395	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2396	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2397	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2398
2399	ifp = adapter->ifp;
2400
2401	/* Fill out the OS statistics structure */
2402	ifp->if_ipackets = adapter->stats.gprcl;
2403	ifp->if_opackets = adapter->stats.gptcl;
2404	ifp->if_ibytes = adapter->stats.gorcl;
2405	ifp->if_obytes = adapter->stats.gotcl;
2406	ifp->if_imcasts = adapter->stats.mprcl;
2407	ifp->if_collisions = 0;
2408
2409	/* Rx Errors */
2410	ifp->if_ierrors =
2411		adapter->dropped_pkts +
2412		adapter->stats.crcerrs +
2413		adapter->stats.rnbc +
2414		adapter->stats.mpc +
2415		adapter->stats.rlec;
2416
2417
2418}
2419
2420
2421/**********************************************************************
2422 *
2423 *  This routine is called only when ixgb_display_debug_stats is enabled.
2424 *  This routine provides a way to take a look at important statistics
2425 *  maintained by the driver and hardware.
2426 *
2427 **********************************************************************/
2428static void
2429ixgb_print_hw_stats(struct adapter * adapter)
2430{
2431	char            buf_speed[100], buf_type[100];
2432	ixgb_bus_speed  bus_speed;
2433	ixgb_bus_type   bus_type;
2434	device_t dev;
2435
2436	dev = adapter->dev;
2437#ifdef _SV_
2438	device_printf(dev, "Packets not Avail = %ld\n",
2439	       adapter->no_pkts_avail);
2440	device_printf(dev, "CleanTxInterrupts = %ld\n",
2441	       adapter->clean_tx_interrupts);
2442	device_printf(dev, "ICR RXDMT0 = %lld\n",
2443	       (long long)adapter->sv_stats.icr_rxdmt0);
2444	device_printf(dev, "ICR RXO = %lld\n",
2445	       (long long)adapter->sv_stats.icr_rxo);
2446	device_printf(dev, "ICR RXT0 = %lld\n",
2447	       (long long)adapter->sv_stats.icr_rxt0);
2448	device_printf(dev, "ICR TXDW = %lld\n",
2449	       (long long)adapter->sv_stats.icr_TXDW);
2450#endif				/* _SV_ */
2451
2452	bus_speed = adapter->hw.bus.speed;
2453	bus_type = adapter->hw.bus.type;
2454	sprintf(buf_speed,
2455		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2456		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2457		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2458		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2459		"UNKNOWN");
2460	device_printf(dev, "PCI_Bus_Speed = %s\n",
2461	       buf_speed);
2462
2463	sprintf(buf_type,
2464		bus_type == ixgb_bus_type_pci ? "PCI" :
2465		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2466		"UNKNOWN");
2467	device_printf(dev, "PCI_Bus_Type = %s\n",
2468	       buf_type);
2469
2470	device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2471	       adapter->no_tx_desc_avail1);
2472	device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2473	       adapter->no_tx_desc_avail2);
2474	device_printf(dev, "Std Mbuf Failed = %ld\n",
2475	       adapter->mbuf_alloc_failed);
2476	device_printf(dev, "Std Cluster Failed = %ld\n",
2477	       adapter->mbuf_cluster_failed);
2478
2479	device_printf(dev, "Defer count = %lld\n",
2480	       (long long)adapter->stats.dc);
2481	device_printf(dev, "Missed Packets = %lld\n",
2482	       (long long)adapter->stats.mpc);
2483	device_printf(dev, "Receive No Buffers = %lld\n",
2484	       (long long)adapter->stats.rnbc);
2485	device_printf(dev, "Receive length errors = %lld\n",
2486	       (long long)adapter->stats.rlec);
2487	device_printf(dev, "Crc errors = %lld\n",
2488	       (long long)adapter->stats.crcerrs);
2489	device_printf(dev, "Driver dropped packets = %ld\n",
2490	       adapter->dropped_pkts);
2491
2492	device_printf(dev, "XON Rcvd = %lld\n",
2493	       (long long)adapter->stats.xonrxc);
2494	device_printf(dev, "XON Xmtd = %lld\n",
2495	       (long long)adapter->stats.xontxc);
2496	device_printf(dev, "XOFF Rcvd = %lld\n",
2497	       (long long)adapter->stats.xoffrxc);
2498	device_printf(dev, "XOFF Xmtd = %lld\n",
2499	       (long long)adapter->stats.xofftxc);
2500
2501	device_printf(dev, "Good Packets Rcvd = %lld\n",
2502	       (long long)adapter->stats.gprcl);
2503	device_printf(dev, "Good Packets Xmtd = %lld\n",
2504	       (long long)adapter->stats.gptcl);
2505
2506	device_printf(dev, "Jumbo frames recvd = %lld\n",
2507	       (long long)adapter->stats.jprcl);
2508	device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2509	       (long long)adapter->stats.jptcl);
2510
2511	return;
2512
2513}
2514
2515static int
2516ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2517{
2518	int             error;
2519	int             result;
2520	struct adapter *adapter;
2521
2522	result = -1;
2523	error = sysctl_handle_int(oidp, &result, 0, req);
2524
2525	if (error || !req->newptr)
2526		return (error);
2527
2528	if (result == 1) {
2529		adapter = (struct adapter *) arg1;
2530		ixgb_print_hw_stats(adapter);
2531	}
2532	return error;
2533}
2534