1/*******************************************************************************
2
3Copyright (c) 2001-2004, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: stable/11/sys/dev/ixgb/if_ixgb.c 333171 2018-05-02 15:21:51Z sbruno $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/ixgb/if_ixgb.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             ixgb_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Linked list of board private structures for all NICs found
49 *********************************************************************/
50
51struct adapter *ixgb_adapter_list = NULL;
52
53
54
55/*********************************************************************
56 *  Driver version
57 *********************************************************************/
58
59char            ixgb_driver_version[] = "1.0.6";
60char            ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62/*********************************************************************
63 *  PCI Device ID Table
64 *
65 *  Used by probe to select devices to load on
66 *  Last field stores an index into ixgb_strings
67 *  Last entry must be all 0s
68 *
69 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
71
72static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73{
74	/* Intel(R) PRO/10000 Network Connection */
75	{IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76	{IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77	/* required last entry */
78	{0, 0, 0, 0, 0}
79};
80
81/*********************************************************************
82 *  Table of branding strings for all supported NICs.
83 *********************************************************************/
84
85static char    *ixgb_strings[] = {
86	"Intel(R) PRO/10GbE Network Driver"
87};
88
89/*********************************************************************
90 *  Function prototypes
91 *********************************************************************/
92static int      ixgb_probe(device_t);
93static int      ixgb_attach(device_t);
94static int      ixgb_detach(device_t);
95static int      ixgb_shutdown(device_t);
96static void     ixgb_intr(void *);
97static void     ixgb_start(struct ifnet *);
98static void     ixgb_start_locked(struct ifnet *);
99static int      ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100static uint64_t	ixgb_get_counter(struct ifnet *, ift_counter);
101static void     ixgb_watchdog(struct adapter *);
102static void     ixgb_init(void *);
103static void     ixgb_init_locked(struct adapter *);
104static void     ixgb_stop(void *);
105static void     ixgb_media_status(struct ifnet *, struct ifmediareq *);
106static int      ixgb_media_change(struct ifnet *);
107static void     ixgb_identify_hardware(struct adapter *);
108static int      ixgb_allocate_pci_resources(struct adapter *);
109static void     ixgb_free_pci_resources(struct adapter *);
110static void     ixgb_local_timer(void *);
111static int      ixgb_hardware_init(struct adapter *);
112static int      ixgb_setup_interface(device_t, struct adapter *);
113static int      ixgb_setup_transmit_structures(struct adapter *);
114static void     ixgb_initialize_transmit_unit(struct adapter *);
115static int      ixgb_setup_receive_structures(struct adapter *);
116static void     ixgb_initialize_receive_unit(struct adapter *);
117static void     ixgb_enable_intr(struct adapter *);
118static void     ixgb_disable_intr(struct adapter *);
119static void     ixgb_free_transmit_structures(struct adapter *);
120static void     ixgb_free_receive_structures(struct adapter *);
121static void     ixgb_update_stats_counters(struct adapter *);
122static void     ixgb_clean_transmit_interrupts(struct adapter *);
123static int      ixgb_allocate_receive_structures(struct adapter *);
124static int      ixgb_allocate_transmit_structures(struct adapter *);
125static int      ixgb_process_receive_interrupts(struct adapter *, int);
126static void
127ixgb_receive_checksum(struct adapter *,
128		      struct ixgb_rx_desc * rx_desc,
129		      struct mbuf *);
130static void
131ixgb_transmit_checksum_setup(struct adapter *,
132			     struct mbuf *,
133			     u_int8_t *);
134static void     ixgb_set_promisc(struct adapter *);
135static void     ixgb_disable_promisc(struct adapter *);
136static void     ixgb_set_multi(struct adapter *);
137static void     ixgb_print_hw_stats(struct adapter *);
138static void     ixgb_print_link_status(struct adapter *);
139static int
140ixgb_get_buf(int i, struct adapter *,
141	     struct mbuf *);
142static void     ixgb_enable_vlans(struct adapter * adapter);
143static int      ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
144static int      ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
145static int
146ixgb_dma_malloc(struct adapter *, bus_size_t,
147		struct ixgb_dma_alloc *, int);
148static void     ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
149#ifdef DEVICE_POLLING
150static poll_handler_t ixgb_poll;
151#endif
152
153/*********************************************************************
154 *  FreeBSD Device Interface Entry Points
155 *********************************************************************/
156
157static device_method_t ixgb_methods[] = {
158	/* Device interface */
159	DEVMETHOD(device_probe, ixgb_probe),
160	DEVMETHOD(device_attach, ixgb_attach),
161	DEVMETHOD(device_detach, ixgb_detach),
162	DEVMETHOD(device_shutdown, ixgb_shutdown),
163
164	DEVMETHOD_END
165};
166
167static driver_t ixgb_driver = {
168	"ixgb", ixgb_methods, sizeof(struct adapter),
169};
170
171static devclass_t ixgb_devclass;
172DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
173
174MODULE_DEPEND(ixgb, pci, 1, 1, 1);
175MODULE_DEPEND(ixgb, ether, 1, 1, 1);
176
177/* some defines for controlling descriptor fetches in h/w */
178#define RXDCTL_PTHRESH_DEFAULT 128	/* chip considers prefech below this */
179#define RXDCTL_HTHRESH_DEFAULT 16	/* chip will only prefetch if tail is
180					 * pushed this many descriptors from
181					 * head */
182#define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
183
184
185/*********************************************************************
186 *  Device identification routine
187 *
188 *  ixgb_probe determines if the driver should be loaded on
189 *  adapter based on PCI vendor/device id of the adapter.
190 *
191 *  return 0 on success, positive on failure
192 *********************************************************************/
193
194static int
195ixgb_probe(device_t dev)
196{
197	ixgb_vendor_info_t *ent;
198
199	u_int16_t       pci_vendor_id = 0;
200	u_int16_t       pci_device_id = 0;
201	u_int16_t       pci_subvendor_id = 0;
202	u_int16_t       pci_subdevice_id = 0;
203	char            adapter_name[60];
204
205	INIT_DEBUGOUT("ixgb_probe: begin");
206
207	pci_vendor_id = pci_get_vendor(dev);
208	if (pci_vendor_id != IXGB_VENDOR_ID)
209		return (ENXIO);
210
211	pci_device_id = pci_get_device(dev);
212	pci_subvendor_id = pci_get_subvendor(dev);
213	pci_subdevice_id = pci_get_subdevice(dev);
214
215	ent = ixgb_vendor_info_array;
216	while (ent->vendor_id != 0) {
217		if ((pci_vendor_id == ent->vendor_id) &&
218		    (pci_device_id == ent->device_id) &&
219
220		    ((pci_subvendor_id == ent->subvendor_id) ||
221		     (ent->subvendor_id == PCI_ANY_ID)) &&
222
223		    ((pci_subdevice_id == ent->subdevice_id) ||
224		     (ent->subdevice_id == PCI_ANY_ID))) {
225			sprintf(adapter_name, "%s, Version - %s",
226				ixgb_strings[ent->index],
227				ixgb_driver_version);
228			device_set_desc_copy(dev, adapter_name);
229			return (BUS_PROBE_DEFAULT);
230		}
231		ent++;
232	}
233
234	return (ENXIO);
235}
236
237/*********************************************************************
238 *  Device initialization routine
239 *
240 *  The attach entry point is called when the driver is being loaded.
241 *  This routine identifies the type of hardware, allocates all resources
242 *  and initializes the hardware.
243 *
244 *  return 0 on success, positive on failure
245 *********************************************************************/
246
247static int
248ixgb_attach(device_t dev)
249{
250	struct adapter *adapter;
251	int             tsize, rsize;
252	int             error = 0;
253
254	device_printf(dev, "%s\n", ixgb_copyright);
255	INIT_DEBUGOUT("ixgb_attach: begin");
256
257	/* Allocate, clear, and link in our adapter structure */
258	if (!(adapter = device_get_softc(dev))) {
259		device_printf(dev, "adapter structure allocation failed\n");
260		return (ENOMEM);
261	}
262	bzero(adapter, sizeof(struct adapter));
263	adapter->dev = dev;
264	adapter->osdep.dev = dev;
265	IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
266
267	if (ixgb_adapter_list != NULL)
268		ixgb_adapter_list->prev = adapter;
269	adapter->next = ixgb_adapter_list;
270	ixgb_adapter_list = adapter;
271
272	/* SYSCTL APIs */
273	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
274			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275			OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
276			(void *)adapter, 0,
277			ixgb_sysctl_stats, "I", "Statistics");
278
279	callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
280
281	/* Determine hardware revision */
282	ixgb_identify_hardware(adapter);
283
284	/* Parameters (to be read from user) */
285	adapter->num_tx_desc = IXGB_MAX_TXD;
286	adapter->num_rx_desc = IXGB_MAX_RXD;
287	adapter->tx_int_delay = TIDV;
288	adapter->rx_int_delay = RDTR;
289	adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
290
291	adapter->hw.fc.high_water = FCRTH;
292	adapter->hw.fc.low_water = FCRTL;
293	adapter->hw.fc.pause_time = FCPAUSE;
294	adapter->hw.fc.send_xon = TRUE;
295	adapter->hw.fc.type = FLOW_CONTROL;
296
297
298	/* Set the max frame size assuming standard ethernet sized frames */
299	adapter->hw.max_frame_size =
300		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
301
302	if (ixgb_allocate_pci_resources(adapter)) {
303		device_printf(dev, "Allocation of PCI resources failed\n");
304		error = ENXIO;
305		goto err_pci;
306	}
307	tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
308			     sizeof(struct ixgb_tx_desc), 4096);
309
310	/* Allocate Transmit Descriptor ring */
311	if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
312		device_printf(dev, "Unable to allocate TxDescriptor memory\n");
313		error = ENOMEM;
314		goto err_tx_desc;
315	}
316	adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
317
318	rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
319			     sizeof(struct ixgb_rx_desc), 4096);
320
321	/* Allocate Receive Descriptor ring */
322	if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
323		device_printf(dev, "Unable to allocate rx_desc memory\n");
324		error = ENOMEM;
325		goto err_rx_desc;
326	}
327	adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
328
329	/* Allocate multicast array memory. */
330	adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
331	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
332	if (adapter->mta == NULL) {
333		device_printf(dev, "Can not allocate multicast setup array\n");
334		error = ENOMEM;
335		goto err_hw_init;
336	}
337
338	/* Initialize the hardware */
339	if (ixgb_hardware_init(adapter)) {
340		device_printf(dev, "Unable to initialize the hardware\n");
341		error = EIO;
342		goto err_hw_init;
343	}
344	/* Setup OS specific network interface */
345	if (ixgb_setup_interface(dev, adapter) != 0)
346		goto err_hw_init;
347
348	/* Initialize statistics */
349	ixgb_clear_hw_cntrs(&adapter->hw);
350	ixgb_update_stats_counters(adapter);
351
352	gone_in_dev(dev, 12, "ixgb(4) driver");
353	INIT_DEBUGOUT("ixgb_attach: end");
354	return (0);
355
356err_hw_init:
357	ixgb_dma_free(adapter, &adapter->rxdma);
358err_rx_desc:
359	ixgb_dma_free(adapter, &adapter->txdma);
360err_tx_desc:
361err_pci:
362	if (adapter->ifp != NULL)
363		if_free(adapter->ifp);
364	ixgb_free_pci_resources(adapter);
365	sysctl_ctx_free(&adapter->sysctl_ctx);
366	free(adapter->mta, M_DEVBUF);
367	return (error);
368
369}
370
371/*********************************************************************
372 *  Device removal routine
373 *
374 *  The detach entry point is called when the driver is being removed.
375 *  This routine stops the adapter and deallocates all the resources
376 *  that were allocated for driver operation.
377 *
378 *  return 0 on success, positive on failure
379 *********************************************************************/
380
381static int
382ixgb_detach(device_t dev)
383{
384	struct adapter *adapter = device_get_softc(dev);
385	struct ifnet   *ifp = adapter->ifp;
386
387	INIT_DEBUGOUT("ixgb_detach: begin");
388
389#ifdef DEVICE_POLLING
390	if (ifp->if_capenable & IFCAP_POLLING)
391		ether_poll_deregister(ifp);
392#endif
393
394	IXGB_LOCK(adapter);
395	adapter->in_detach = 1;
396
397	ixgb_stop(adapter);
398	IXGB_UNLOCK(adapter);
399
400#if __FreeBSD_version < 500000
401	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
402#else
403	ether_ifdetach(ifp);
404#endif
405	callout_drain(&adapter->timer);
406	ixgb_free_pci_resources(adapter);
407#if __FreeBSD_version >= 500000
408	if_free(ifp);
409#endif
410
411	/* Free Transmit Descriptor ring */
412	if (adapter->tx_desc_base) {
413		ixgb_dma_free(adapter, &adapter->txdma);
414		adapter->tx_desc_base = NULL;
415	}
416	/* Free Receive Descriptor ring */
417	if (adapter->rx_desc_base) {
418		ixgb_dma_free(adapter, &adapter->rxdma);
419		adapter->rx_desc_base = NULL;
420	}
421	/* Remove from the adapter list */
422	if (ixgb_adapter_list == adapter)
423		ixgb_adapter_list = adapter->next;
424	if (adapter->next != NULL)
425		adapter->next->prev = adapter->prev;
426	if (adapter->prev != NULL)
427		adapter->prev->next = adapter->next;
428	free(adapter->mta, M_DEVBUF);
429
430	IXGB_LOCK_DESTROY(adapter);
431	return (0);
432}
433
434/*********************************************************************
435 *
436 *  Shutdown entry point
437 *
438 **********************************************************************/
439
440static int
441ixgb_shutdown(device_t dev)
442{
443	struct adapter *adapter = device_get_softc(dev);
444	IXGB_LOCK(adapter);
445	ixgb_stop(adapter);
446	IXGB_UNLOCK(adapter);
447	return (0);
448}
449
450
451/*********************************************************************
452 *  Transmit entry point
453 *
454 *  ixgb_start is called by the stack to initiate a transmit.
455 *  The driver will remain in this routine as long as there are
456 *  packets to transmit and transmit resources are available.
457 *  In case resources are not available stack is notified and
458 *  the packet is requeued.
459 **********************************************************************/
460
461static void
462ixgb_start_locked(struct ifnet * ifp)
463{
464	struct mbuf    *m_head;
465	struct adapter *adapter = ifp->if_softc;
466
467	IXGB_LOCK_ASSERT(adapter);
468
469	if (!adapter->link_active)
470		return;
471
472	while (ifp->if_snd.ifq_head != NULL) {
473		IF_DEQUEUE(&ifp->if_snd, m_head);
474
475		if (m_head == NULL)
476			break;
477
478		if (ixgb_encap(adapter, m_head)) {
479			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
480			IF_PREPEND(&ifp->if_snd, m_head);
481			break;
482		}
483		/* Send a copy of the frame to the BPF listener */
484#if __FreeBSD_version < 500000
485		if (ifp->if_bpf)
486			bpf_mtap(ifp, m_head);
487#else
488		ETHER_BPF_MTAP(ifp, m_head);
489#endif
490		/* Set timeout in case hardware has problems transmitting */
491		adapter->tx_timer = IXGB_TX_TIMEOUT;
492
493	}
494	return;
495}
496
497static void
498ixgb_start(struct ifnet *ifp)
499{
500	struct adapter *adapter = ifp->if_softc;
501
502	IXGB_LOCK(adapter);
503	ixgb_start_locked(ifp);
504	IXGB_UNLOCK(adapter);
505	return;
506}
507
508/*********************************************************************
509 *  Ioctl entry point
510 *
511 *  ixgb_ioctl is called when the user wants to configure the
512 *  interface.
513 *
514 *  return 0 on success, positive on failure
515 **********************************************************************/
516
517static int
518ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
519{
520	int             mask, error = 0;
521	struct ifreq   *ifr = (struct ifreq *) data;
522	struct adapter *adapter = ifp->if_softc;
523
524	if (adapter->in_detach)
525		goto out;
526
527	switch (command) {
528	case SIOCSIFADDR:
529	case SIOCGIFADDR:
530		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
531		ether_ioctl(ifp, command, data);
532		break;
533	case SIOCSIFMTU:
534		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
535		if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
536			error = EINVAL;
537		} else {
538			IXGB_LOCK(adapter);
539			ifp->if_mtu = ifr->ifr_mtu;
540			adapter->hw.max_frame_size =
541				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
542
543			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
544				ixgb_init_locked(adapter);
545			IXGB_UNLOCK(adapter);
546		}
547		break;
548	case SIOCSIFFLAGS:
549		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
550		IXGB_LOCK(adapter);
551		if (ifp->if_flags & IFF_UP) {
552			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
553				ixgb_init_locked(adapter);
554			}
555			ixgb_disable_promisc(adapter);
556			ixgb_set_promisc(adapter);
557		} else {
558			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
559				ixgb_stop(adapter);
560			}
561		}
562		IXGB_UNLOCK(adapter);
563		break;
564	case SIOCADDMULTI:
565	case SIOCDELMULTI:
566		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
567		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
568			IXGB_LOCK(adapter);
569			ixgb_disable_intr(adapter);
570			ixgb_set_multi(adapter);
571			ixgb_enable_intr(adapter);
572			IXGB_UNLOCK(adapter);
573		}
574		break;
575	case SIOCSIFMEDIA:
576	case SIOCGIFMEDIA:
577		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
578		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
579		break;
580	case SIOCSIFCAP:
581		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
582		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
583#ifdef DEVICE_POLLING
584		if (mask & IFCAP_POLLING) {
585			if (ifr->ifr_reqcap & IFCAP_POLLING) {
586				error = ether_poll_register(ixgb_poll, ifp);
587				if (error)
588					return(error);
589				IXGB_LOCK(adapter);
590				ixgb_disable_intr(adapter);
591				ifp->if_capenable |= IFCAP_POLLING;
592				IXGB_UNLOCK(adapter);
593			} else {
594				error = ether_poll_deregister(ifp);
595				/* Enable interrupt even in error case */
596				IXGB_LOCK(adapter);
597				ixgb_enable_intr(adapter);
598				ifp->if_capenable &= ~IFCAP_POLLING;
599				IXGB_UNLOCK(adapter);
600			}
601		}
602#endif /* DEVICE_POLLING */
603		if (mask & IFCAP_HWCSUM) {
604			if (IFCAP_HWCSUM & ifp->if_capenable)
605				ifp->if_capenable &= ~IFCAP_HWCSUM;
606			else
607				ifp->if_capenable |= IFCAP_HWCSUM;
608			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
609				ixgb_init(adapter);
610		}
611		break;
612	default:
613		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
614		error = EINVAL;
615	}
616
617out:
618	return (error);
619}
620
621/*********************************************************************
622 *  Watchdog entry point
623 *
624 *  This routine is called whenever hardware quits transmitting.
625 *
626 **********************************************************************/
627
628static void
629ixgb_watchdog(struct adapter *adapter)
630{
631	struct ifnet *ifp;
632
633	ifp = adapter->ifp;
634
635	/*
636	 * If we are in this routine because of pause frames, then don't
637	 * reset the hardware.
638	 */
639	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
640		adapter->tx_timer = IXGB_TX_TIMEOUT;
641		return;
642	}
643	if_printf(ifp, "watchdog timeout -- resetting\n");
644
645	ixgb_stop(adapter);
646	ixgb_init_locked(adapter);
647
648
649	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
650
651	return;
652}
653
654/*********************************************************************
655 *  Init entry point
656 *
657 *  This routine is used in two ways. It is used by the stack as
658 *  init entry point in network interface structure. It is also used
659 *  by the driver as a hw/sw initialization routine to get to a
660 *  consistent state.
661 *
662 *  return 0 on success, positive on failure
663 **********************************************************************/
664
665static void
666ixgb_init_locked(struct adapter *adapter)
667{
668	struct ifnet   *ifp;
669
670	INIT_DEBUGOUT("ixgb_init: begin");
671
672	IXGB_LOCK_ASSERT(adapter);
673
674	ixgb_stop(adapter);
675	ifp = adapter->ifp;
676
677	/* Get the latest mac address, User can use a LAA */
678	bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
679	    IXGB_ETH_LENGTH_OF_ADDRESS);
680
681	/* Initialize the hardware */
682	if (ixgb_hardware_init(adapter)) {
683		if_printf(ifp, "Unable to initialize the hardware\n");
684		return;
685	}
686	ixgb_enable_vlans(adapter);
687
688	/* Prepare transmit descriptors and buffers */
689	if (ixgb_setup_transmit_structures(adapter)) {
690		if_printf(ifp, "Could not setup transmit structures\n");
691		ixgb_stop(adapter);
692		return;
693	}
694	ixgb_initialize_transmit_unit(adapter);
695
696	/* Setup Multicast table */
697	ixgb_set_multi(adapter);
698
699	/* Prepare receive descriptors and buffers */
700	if (ixgb_setup_receive_structures(adapter)) {
701		if_printf(ifp, "Could not setup receive structures\n");
702		ixgb_stop(adapter);
703		return;
704	}
705	ixgb_initialize_receive_unit(adapter);
706
707	/* Don't lose promiscuous settings */
708	ixgb_set_promisc(adapter);
709
710	ifp = adapter->ifp;
711	ifp->if_drv_flags |= IFF_DRV_RUNNING;
712	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
713
714
715	if (ifp->if_capenable & IFCAP_TXCSUM)
716		ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
717	else
718		ifp->if_hwassist = 0;
719
720
721	/* Enable jumbo frames */
722	if (ifp->if_mtu > ETHERMTU) {
723		uint32_t        temp_reg;
724		IXGB_WRITE_REG(&adapter->hw, MFS,
725			       adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
726		temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
727		temp_reg |= IXGB_CTRL0_JFE;
728		IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
729	}
730	callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
731	ixgb_clear_hw_cntrs(&adapter->hw);
732#ifdef DEVICE_POLLING
733	/*
734	 * Only disable interrupts if we are polling, make sure they are on
735	 * otherwise.
736	 */
737	if (ifp->if_capenable & IFCAP_POLLING)
738		ixgb_disable_intr(adapter);
739	else
740#endif
741		ixgb_enable_intr(adapter);
742
743	return;
744}
745
746static void
747ixgb_init(void *arg)
748{
749	struct adapter *adapter = arg;
750
751	IXGB_LOCK(adapter);
752	ixgb_init_locked(adapter);
753	IXGB_UNLOCK(adapter);
754	return;
755}
756
757#ifdef DEVICE_POLLING
758static int
759ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
760{
761	struct adapter *adapter = ifp->if_softc;
762	u_int32_t       reg_icr;
763	int		rx_npkts;
764
765	IXGB_LOCK_ASSERT(adapter);
766
767	if (cmd == POLL_AND_CHECK_STATUS) {
768		reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
769		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
770			ixgb_check_for_link(&adapter->hw);
771			ixgb_print_link_status(adapter);
772		}
773	}
774	rx_npkts = ixgb_process_receive_interrupts(adapter, count);
775	ixgb_clean_transmit_interrupts(adapter);
776
777	if (ifp->if_snd.ifq_head != NULL)
778		ixgb_start_locked(ifp);
779	return (rx_npkts);
780}
781
782static int
783ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
784{
785	struct adapter *adapter = ifp->if_softc;
786	int rx_npkts = 0;
787
788	IXGB_LOCK(adapter);
789	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
790		rx_npkts = ixgb_poll_locked(ifp, cmd, count);
791	IXGB_UNLOCK(adapter);
792	return (rx_npkts);
793}
794#endif /* DEVICE_POLLING */
795
796/*********************************************************************
797 *
798 *  Interrupt Service routine
799 *
800 **********************************************************************/
801
802static void
803ixgb_intr(void *arg)
804{
805	u_int32_t       loop_cnt = IXGB_MAX_INTR;
806	u_int32_t       reg_icr;
807	struct ifnet   *ifp;
808	struct adapter *adapter = arg;
809	boolean_t       rxdmt0 = FALSE;
810
811	IXGB_LOCK(adapter);
812
813	ifp = adapter->ifp;
814
815#ifdef DEVICE_POLLING
816	if (ifp->if_capenable & IFCAP_POLLING) {
817		IXGB_UNLOCK(adapter);
818		return;
819	}
820#endif
821
822	reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
823	if (reg_icr == 0) {
824		IXGB_UNLOCK(adapter);
825		return;
826	}
827
828	if (reg_icr & IXGB_INT_RXDMT0)
829		rxdmt0 = TRUE;
830
831#ifdef _SV_
832	if (reg_icr & IXGB_INT_RXDMT0)
833		adapter->sv_stats.icr_rxdmt0++;
834	if (reg_icr & IXGB_INT_RXO)
835		adapter->sv_stats.icr_rxo++;
836	if (reg_icr & IXGB_INT_RXT0)
837		adapter->sv_stats.icr_rxt0++;
838	if (reg_icr & IXGB_INT_TXDW)
839		adapter->sv_stats.icr_TXDW++;
840#endif				/* _SV_ */
841
842	/* Link status change */
843	if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
844		ixgb_check_for_link(&adapter->hw);
845		ixgb_print_link_status(adapter);
846	}
847	while (loop_cnt > 0) {
848		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
849			ixgb_process_receive_interrupts(adapter, -1);
850			ixgb_clean_transmit_interrupts(adapter);
851		}
852		loop_cnt--;
853	}
854
855	if (rxdmt0 && adapter->raidc) {
856		IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
857		IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
858	}
859	if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
860		ixgb_start_locked(ifp);
861
862	IXGB_UNLOCK(adapter);
863	return;
864}
865
866
867/*********************************************************************
868 *
869 *  Media Ioctl callback
870 *
871 *  This routine is called whenever the user queries the status of
872 *  the interface using ifconfig.
873 *
874 **********************************************************************/
875static void
876ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
877{
878	struct adapter *adapter = ifp->if_softc;
879
880	INIT_DEBUGOUT("ixgb_media_status: begin");
881
882	ixgb_check_for_link(&adapter->hw);
883	ixgb_print_link_status(adapter);
884
885	ifmr->ifm_status = IFM_AVALID;
886	ifmr->ifm_active = IFM_ETHER;
887
888	if (!adapter->hw.link_up)
889		return;
890
891	ifmr->ifm_status |= IFM_ACTIVE;
892	ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
893
894	return;
895}
896
897/*********************************************************************
898 *
899 *  Media Ioctl callback
900 *
901 *  This routine is called when the user changes speed/duplex using
902 *  media/mediopt option with ifconfig.
903 *
904 **********************************************************************/
905static int
906ixgb_media_change(struct ifnet * ifp)
907{
908	struct adapter *adapter = ifp->if_softc;
909	struct ifmedia *ifm = &adapter->media;
910
911	INIT_DEBUGOUT("ixgb_media_change: begin");
912
913	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
914		return (EINVAL);
915
916	return (0);
917}
918
919/*********************************************************************
920 *
921 *  This routine maps the mbufs to tx descriptors.
922 *
923 *  return 0 on success, positive on failure
924 **********************************************************************/
925
926static int
927ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
928{
929	u_int8_t        txd_popts;
930	int             i, j, error, nsegs;
931
932#if __FreeBSD_version < 500000
933	struct ifvlan  *ifv = NULL;
934#endif
935	bus_dma_segment_t segs[IXGB_MAX_SCATTER];
936	bus_dmamap_t	map;
937	struct ixgb_buffer *tx_buffer = NULL;
938	struct ixgb_tx_desc *current_tx_desc = NULL;
939	struct ifnet   *ifp = adapter->ifp;
940
941	/*
942	 * Force a cleanup if number of TX descriptors available hits the
943	 * threshold
944	 */
945	if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
946		ixgb_clean_transmit_interrupts(adapter);
947	}
948	if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
949		adapter->no_tx_desc_avail1++;
950		return (ENOBUFS);
951	}
952	/*
953	 * Map the packet for DMA.
954	 */
955	if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
956		adapter->no_tx_map_avail++;
957		return (ENOMEM);
958	}
959	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
960					&nsegs, BUS_DMA_NOWAIT);
961	if (error != 0) {
962		adapter->no_tx_dma_setup++;
963		if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
964		       "error %u\n", error);
965		bus_dmamap_destroy(adapter->txtag, map);
966		return (error);
967	}
968	KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
969
970	if (nsegs > adapter->num_tx_desc_avail) {
971		adapter->no_tx_desc_avail2++;
972		bus_dmamap_destroy(adapter->txtag, map);
973		return (ENOBUFS);
974	}
975	if (ifp->if_hwassist > 0) {
976		ixgb_transmit_checksum_setup(adapter, m_head,
977					     &txd_popts);
978	} else
979		txd_popts = 0;
980
981	/* Find out if we are in vlan mode */
982#if __FreeBSD_version < 500000
983	if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
984	    m_head->m_pkthdr.rcvif != NULL &&
985	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
986		ifv = m_head->m_pkthdr.rcvif->if_softc;
987#elseif __FreeBSD_version < 700000
988	mtag = VLAN_OUTPUT_TAG(ifp, m_head);
989#endif
990	i = adapter->next_avail_tx_desc;
991	for (j = 0; j < nsegs; j++) {
992		tx_buffer = &adapter->tx_buffer_area[i];
993		current_tx_desc = &adapter->tx_desc_base[i];
994
995		current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
996		current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
997		current_tx_desc->popts = txd_popts;
998		if (++i == adapter->num_tx_desc)
999			i = 0;
1000
1001		tx_buffer->m_head = NULL;
1002	}
1003
1004	adapter->num_tx_desc_avail -= nsegs;
1005	adapter->next_avail_tx_desc = i;
1006
1007#if __FreeBSD_version < 500000
1008	if (ifv != NULL) {
1009		/* Set the vlan id */
1010		current_tx_desc->vlan = ifv->ifv_tag;
1011#elseif __FreeBSD_version < 700000
1012	if (mtag != NULL) {
1013		/* Set the vlan id */
1014		current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1015#else
1016	if (m_head->m_flags & M_VLANTAG) {
1017		current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1018#endif
1019
1020		/* Tell hardware to add tag */
1021		current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1022	}
1023	tx_buffer->m_head = m_head;
1024	tx_buffer->map = map;
1025	bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1026
1027	/*
1028	 * Last Descriptor of Packet needs End Of Packet (EOP)
1029	 */
1030	current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1031
1032	/*
1033	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1034	 * that this frame is available to transmit.
1035	 */
1036	IXGB_WRITE_REG(&adapter->hw, TDT, i);
1037
1038	return (0);
1039}
1040
1041static void
1042ixgb_set_promisc(struct adapter * adapter)
1043{
1044
1045	u_int32_t       reg_rctl;
1046	struct ifnet   *ifp = adapter->ifp;
1047
1048	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1049
1050	if (ifp->if_flags & IFF_PROMISC) {
1051		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1052		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1053	} else if (ifp->if_flags & IFF_ALLMULTI) {
1054		reg_rctl |= IXGB_RCTL_MPE;
1055		reg_rctl &= ~IXGB_RCTL_UPE;
1056		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1057	}
1058	return;
1059}
1060
1061static void
1062ixgb_disable_promisc(struct adapter * adapter)
1063{
1064	u_int32_t       reg_rctl;
1065
1066	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1067
1068	reg_rctl &= (~IXGB_RCTL_UPE);
1069	reg_rctl &= (~IXGB_RCTL_MPE);
1070	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1071
1072	return;
1073}
1074
1075
1076/*********************************************************************
1077 *  Multicast Update
1078 *
1079 *  This routine is called whenever multicast address list is updated.
1080 *
1081 **********************************************************************/
1082
1083static void
1084ixgb_set_multi(struct adapter * adapter)
1085{
1086	u_int32_t       reg_rctl = 0;
1087	u_int8_t        *mta;
1088	struct ifmultiaddr *ifma;
1089	int             mcnt = 0;
1090	struct ifnet   *ifp = adapter->ifp;
1091
1092	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1093
1094	mta = adapter->mta;
1095	bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1096	    MAX_NUM_MULTICAST_ADDRESSES);
1097
1098	if_maddr_rlock(ifp);
1099#if __FreeBSD_version < 500000
1100	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1101#else
1102	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1103#endif
1104		if (ifma->ifma_addr->sa_family != AF_LINK)
1105			continue;
1106
1107		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1108		      &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1109		mcnt++;
1110	}
1111	if_maddr_runlock(ifp);
1112
1113	if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1114		reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1115		reg_rctl |= IXGB_RCTL_MPE;
1116		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1117	} else
1118		ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1119
1120	return;
1121}
1122
1123
1124/*********************************************************************
1125 *  Timer routine
1126 *
1127 *  This routine checks for link status and updates statistics.
1128 *
1129 **********************************************************************/
1130
1131static void
1132ixgb_local_timer(void *arg)
1133{
1134	struct ifnet   *ifp;
1135	struct adapter *adapter = arg;
1136	ifp = adapter->ifp;
1137
1138	IXGB_LOCK_ASSERT(adapter);
1139
1140	ixgb_check_for_link(&adapter->hw);
1141	ixgb_print_link_status(adapter);
1142	ixgb_update_stats_counters(adapter);
1143	if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1144		ixgb_print_hw_stats(adapter);
1145	}
1146	if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1147		ixgb_watchdog(adapter);
1148	callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1149}
1150
1151static void
1152ixgb_print_link_status(struct adapter * adapter)
1153{
1154	if (adapter->hw.link_up) {
1155		if (!adapter->link_active) {
1156			if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1157			       10000,
1158			       "Full Duplex");
1159			adapter->link_active = 1;
1160		}
1161	} else {
1162		if (adapter->link_active) {
1163			if_printf(adapter->ifp, "Link is Down \n");
1164			adapter->link_active = 0;
1165		}
1166	}
1167
1168	return;
1169}
1170
1171
1172
1173/*********************************************************************
1174 *
1175 *  This routine disables all traffic on the adapter by issuing a
1176 *  global reset on the MAC and deallocates TX/RX buffers.
1177 *
1178 **********************************************************************/
1179
1180static void
1181ixgb_stop(void *arg)
1182{
1183	struct ifnet   *ifp;
1184	struct adapter *adapter = arg;
1185	ifp = adapter->ifp;
1186
1187	IXGB_LOCK_ASSERT(adapter);
1188
1189	INIT_DEBUGOUT("ixgb_stop: begin\n");
1190	ixgb_disable_intr(adapter);
1191	adapter->hw.adapter_stopped = FALSE;
1192	ixgb_adapter_stop(&adapter->hw);
1193	callout_stop(&adapter->timer);
1194	ixgb_free_transmit_structures(adapter);
1195	ixgb_free_receive_structures(adapter);
1196
1197	/* Tell the stack that the interface is no longer active */
1198	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1199	adapter->tx_timer = 0;
1200
1201	return;
1202}
1203
1204
1205/*********************************************************************
1206 *
1207 *  Determine hardware revision.
1208 *
1209 **********************************************************************/
1210static void
1211ixgb_identify_hardware(struct adapter * adapter)
1212{
1213	device_t        dev = adapter->dev;
1214
1215	/* Make sure our PCI config space has the necessary stuff set */
1216	pci_enable_busmaster(dev);
1217	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1218
1219	/* Save off the information about this board */
1220	adapter->hw.vendor_id = pci_get_vendor(dev);
1221	adapter->hw.device_id = pci_get_device(dev);
1222	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1223	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1224	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1225
1226	/* Set MacType, etc. based on this PCI info */
1227	switch (adapter->hw.device_id) {
1228	case IXGB_DEVICE_ID_82597EX:
1229	case IXGB_DEVICE_ID_82597EX_SR:
1230		adapter->hw.mac_type = ixgb_82597;
1231		break;
1232	default:
1233		INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1234		device_printf(dev, "unsupported device id 0x%x\n",
1235		    adapter->hw.device_id);
1236	}
1237
1238	return;
1239}
1240
1241static int
1242ixgb_allocate_pci_resources(struct adapter * adapter)
1243{
1244	int             rid;
1245	device_t        dev = adapter->dev;
1246
1247	rid = IXGB_MMBA;
1248	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1249						 &rid,
1250						 RF_ACTIVE);
1251	if (!(adapter->res_memory)) {
1252		device_printf(dev, "Unable to allocate bus resource: memory\n");
1253		return (ENXIO);
1254	}
1255	adapter->osdep.mem_bus_space_tag =
1256		rman_get_bustag(adapter->res_memory);
1257	adapter->osdep.mem_bus_space_handle =
1258		rman_get_bushandle(adapter->res_memory);
1259	adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1260
1261	rid = 0x0;
1262	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1263							&rid,
1264							RF_SHAREABLE | RF_ACTIVE);
1265	if (!(adapter->res_interrupt)) {
1266		device_printf(dev,
1267		    "Unable to allocate bus resource: interrupt\n");
1268		return (ENXIO);
1269	}
1270	if (bus_setup_intr(dev, adapter->res_interrupt,
1271			   INTR_TYPE_NET | INTR_MPSAFE,
1272			   NULL, (void (*) (void *))ixgb_intr, adapter,
1273			   &adapter->int_handler_tag)) {
1274		device_printf(dev, "Error registering interrupt handler!\n");
1275		return (ENXIO);
1276	}
1277	adapter->hw.back = &adapter->osdep;
1278
1279	return (0);
1280}
1281
1282static void
1283ixgb_free_pci_resources(struct adapter * adapter)
1284{
1285	device_t        dev = adapter->dev;
1286
1287	if (adapter->res_interrupt != NULL) {
1288		bus_teardown_intr(dev, adapter->res_interrupt,
1289				  adapter->int_handler_tag);
1290		bus_release_resource(dev, SYS_RES_IRQ, 0,
1291				     adapter->res_interrupt);
1292	}
1293	if (adapter->res_memory != NULL) {
1294		bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1295				     adapter->res_memory);
1296	}
1297	if (adapter->res_ioport != NULL) {
1298		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1299				     adapter->res_ioport);
1300	}
1301	return;
1302}
1303
1304/*********************************************************************
1305 *
1306 *  Initialize the hardware to a configuration as specified by the
1307 *  adapter structure. The controller is reset, the EEPROM is
1308 *  verified, the MAC address is set, then the shared initialization
1309 *  routines are called.
1310 *
1311 **********************************************************************/
1312static int
1313ixgb_hardware_init(struct adapter * adapter)
1314{
1315	/* Issue a global reset */
1316	adapter->hw.adapter_stopped = FALSE;
1317	ixgb_adapter_stop(&adapter->hw);
1318
1319	/* Make sure we have a good EEPROM before we read from it */
1320	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1321		device_printf(adapter->dev,
1322		    "The EEPROM Checksum Is Not Valid\n");
1323		return (EIO);
1324	}
1325	if (!ixgb_init_hw(&adapter->hw)) {
1326		device_printf(adapter->dev, "Hardware Initialization Failed");
1327		return (EIO);
1328	}
1329
1330	return (0);
1331}
1332
1333/*********************************************************************
1334 *
1335 *  Setup networking device structure and register an interface.
1336 *
1337 **********************************************************************/
1338static int
1339ixgb_setup_interface(device_t dev, struct adapter * adapter)
1340{
1341	struct ifnet   *ifp;
1342	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1343
1344	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1345	if (ifp == NULL) {
1346		device_printf(dev, "can not allocate ifnet structure\n");
1347		return (-1);
1348	}
1349#if __FreeBSD_version >= 502000
1350	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1351#else
1352	ifp->if_unit = device_get_unit(dev);
1353	ifp->if_name = "ixgb";
1354#endif
1355	ifp->if_baudrate = 1000000000;
1356	ifp->if_init = ixgb_init;
1357	ifp->if_softc = adapter;
1358	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1359	ifp->if_ioctl = ixgb_ioctl;
1360	ifp->if_start = ixgb_start;
1361	ifp->if_get_counter = ixgb_get_counter;
1362	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1363
1364#if __FreeBSD_version < 500000
1365	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1366#else
1367	ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1368#endif
1369
1370	ifp->if_capabilities = IFCAP_HWCSUM;
1371
1372	/*
1373	 * Tell the upper layer(s) we support long frames.
1374	 */
1375	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1376
1377#if __FreeBSD_version >= 500000
1378	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1379#endif
1380
1381	ifp->if_capenable = ifp->if_capabilities;
1382
1383#ifdef DEVICE_POLLING
1384	ifp->if_capabilities |= IFCAP_POLLING;
1385#endif
1386
1387	/*
1388	 * Specify the media types supported by this adapter and register
1389	 * callbacks to update media and link information
1390	 */
1391	ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1392		     ixgb_media_status);
1393	ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1394		    0, NULL);
1395	ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1396		    0, NULL);
1397	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1398	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1399
1400	return (0);
1401}
1402
1403/********************************************************************
1404 * Manage DMA'able memory.
1405 *******************************************************************/
1406static void
1407ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1408{
1409	if (error)
1410		return;
1411	*(bus_addr_t *) arg = segs->ds_addr;
1412	return;
1413}
1414
1415static int
1416ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1417		struct ixgb_dma_alloc * dma, int mapflags)
1418{
1419	device_t dev;
1420	int             r;
1421
1422	dev = adapter->dev;
1423	r = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
1424			       PAGE_SIZE, 0,	/* alignment, bounds */
1425			       BUS_SPACE_MAXADDR,	/* lowaddr */
1426			       BUS_SPACE_MAXADDR,	/* highaddr */
1427			       NULL, NULL,	/* filter, filterarg */
1428			       size,	/* maxsize */
1429			       1,	/* nsegments */
1430			       size,	/* maxsegsize */
1431			       BUS_DMA_ALLOCNOW,	/* flags */
1432#if __FreeBSD_version >= 502000
1433			       NULL,	/* lockfunc */
1434			       NULL,	/* lockfuncarg */
1435#endif
1436			       &dma->dma_tag);
1437	if (r != 0) {
1438		device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1439		       "error %u\n", r);
1440		goto fail_0;
1441	}
1442	r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1443			     BUS_DMA_NOWAIT, &dma->dma_map);
1444	if (r != 0) {
1445		device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1446		       "error %u\n", r);
1447		goto fail_1;
1448	}
1449	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1450			    size,
1451			    ixgb_dmamap_cb,
1452			    &dma->dma_paddr,
1453			    mapflags | BUS_DMA_NOWAIT);
1454	if (r != 0) {
1455		device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1456		       "error %u\n", r);
1457		goto fail_2;
1458	}
1459	dma->dma_size = size;
1460	return (0);
1461fail_2:
1462	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1463fail_1:
1464	bus_dma_tag_destroy(dma->dma_tag);
1465fail_0:
1466	dma->dma_tag = NULL;
1467	return (r);
1468}
1469
1470
1471
1472static void
1473ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1474{
1475	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1476	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1477	bus_dma_tag_destroy(dma->dma_tag);
1478}
1479
1480/*********************************************************************
1481 *
1482 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1483 *  the information needed to transmit a packet on the wire.
1484 *
1485 **********************************************************************/
1486static int
1487ixgb_allocate_transmit_structures(struct adapter * adapter)
1488{
1489	if (!(adapter->tx_buffer_area =
1490	      (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1491					    adapter->num_tx_desc, M_DEVBUF,
1492					    M_NOWAIT | M_ZERO))) {
1493		device_printf(adapter->dev,
1494		    "Unable to allocate tx_buffer memory\n");
1495		return ENOMEM;
1496	}
1497	bzero(adapter->tx_buffer_area,
1498	      sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1499
1500	return 0;
1501}
1502
1503/*********************************************************************
1504 *
1505 *  Allocate and initialize transmit structures.
1506 *
1507 **********************************************************************/
1508static int
1509ixgb_setup_transmit_structures(struct adapter * adapter)
1510{
1511	/*
1512	 * Setup DMA descriptor areas.
1513	 */
1514	if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev),	/* parent */
1515			       PAGE_SIZE, 0,	/* alignment, bounds */
1516			       BUS_SPACE_MAXADDR,	/* lowaddr */
1517			       BUS_SPACE_MAXADDR,	/* highaddr */
1518			       NULL, NULL,	/* filter, filterarg */
1519			       MCLBYTES * IXGB_MAX_SCATTER,	/* maxsize */
1520			       IXGB_MAX_SCATTER,	/* nsegments */
1521			       MCLBYTES,	/* maxsegsize */
1522			       BUS_DMA_ALLOCNOW,	/* flags */
1523#if __FreeBSD_version >= 502000
1524			       NULL,	/* lockfunc */
1525			       NULL,	/* lockfuncarg */
1526#endif
1527			       &adapter->txtag)) {
1528		device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1529		return (ENOMEM);
1530	}
1531	if (ixgb_allocate_transmit_structures(adapter))
1532		return ENOMEM;
1533
1534	bzero((void *)adapter->tx_desc_base,
1535	      (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1536
1537	adapter->next_avail_tx_desc = 0;
1538	adapter->oldest_used_tx_desc = 0;
1539
1540	/* Set number of descriptors available */
1541	adapter->num_tx_desc_avail = adapter->num_tx_desc;
1542
1543	/* Set checksum context */
1544	adapter->active_checksum_context = OFFLOAD_NONE;
1545
1546	return 0;
1547}
1548
1549/*********************************************************************
1550 *
1551 *  Enable transmit unit.
1552 *
1553 **********************************************************************/
1554static void
1555ixgb_initialize_transmit_unit(struct adapter * adapter)
1556{
1557	u_int32_t       reg_tctl;
1558	u_int64_t       tdba = adapter->txdma.dma_paddr;
1559
1560	/* Setup the Base and Length of the Tx Descriptor Ring */
1561	IXGB_WRITE_REG(&adapter->hw, TDBAL,
1562		       (tdba & 0x00000000ffffffffULL));
1563	IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1564	IXGB_WRITE_REG(&adapter->hw, TDLEN,
1565		       adapter->num_tx_desc *
1566		       sizeof(struct ixgb_tx_desc));
1567
1568	/* Setup the HW Tx Head and Tail descriptor pointers */
1569	IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1570	IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1571
1572
1573	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1574		     IXGB_READ_REG(&adapter->hw, TDBAL),
1575		     IXGB_READ_REG(&adapter->hw, TDLEN));
1576
1577	IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1578
1579
1580	/* Program the Transmit Control Register */
1581	reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1582	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1583	IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1584
1585	/* Setup Transmit Descriptor Settings for this adapter */
1586	adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1587
1588	if (adapter->tx_int_delay > 0)
1589		adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1590	return;
1591}
1592
1593/*********************************************************************
1594 *
1595 *  Free all transmit related data structures.
1596 *
1597 **********************************************************************/
1598static void
1599ixgb_free_transmit_structures(struct adapter * adapter)
1600{
1601	struct ixgb_buffer *tx_buffer;
1602	int             i;
1603
1604	INIT_DEBUGOUT("free_transmit_structures: begin");
1605
1606	if (adapter->tx_buffer_area != NULL) {
1607		tx_buffer = adapter->tx_buffer_area;
1608		for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1609			if (tx_buffer->m_head != NULL) {
1610				bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1611				bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1612				m_freem(tx_buffer->m_head);
1613			}
1614			tx_buffer->m_head = NULL;
1615		}
1616	}
1617	if (adapter->tx_buffer_area != NULL) {
1618		free(adapter->tx_buffer_area, M_DEVBUF);
1619		adapter->tx_buffer_area = NULL;
1620	}
1621	if (adapter->txtag != NULL) {
1622		bus_dma_tag_destroy(adapter->txtag);
1623		adapter->txtag = NULL;
1624	}
1625	return;
1626}
1627
1628/*********************************************************************
1629 *
1630 *  The offload context needs to be set when we transfer the first
1631 *  packet of a particular protocol (TCP/UDP). We change the
1632 *  context only if the protocol type changes.
1633 *
1634 **********************************************************************/
1635static void
1636ixgb_transmit_checksum_setup(struct adapter * adapter,
1637			     struct mbuf * mp,
1638			     u_int8_t * txd_popts)
1639{
1640	struct ixgb_context_desc *TXD;
1641	struct ixgb_buffer *tx_buffer;
1642	int             curr_txd;
1643
1644	if (mp->m_pkthdr.csum_flags) {
1645
1646		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1647			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1648			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1649				return;
1650			else
1651				adapter->active_checksum_context = OFFLOAD_TCP_IP;
1652		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1653			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1654			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1655				return;
1656			else
1657				adapter->active_checksum_context = OFFLOAD_UDP_IP;
1658		} else {
1659			*txd_popts = 0;
1660			return;
1661		}
1662	} else {
1663		*txd_popts = 0;
1664		return;
1665	}
1666
1667	/*
1668	 * If we reach this point, the checksum offload context needs to be
1669	 * reset.
1670	 */
1671	curr_txd = adapter->next_avail_tx_desc;
1672	tx_buffer = &adapter->tx_buffer_area[curr_txd];
1673	TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1674
1675
1676	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1677	TXD->tucse = 0;
1678
1679	TXD->mss = 0;
1680
1681	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1682		TXD->tucso =
1683			ENET_HEADER_SIZE + sizeof(struct ip) +
1684			offsetof(struct tcphdr, th_sum);
1685	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1686		TXD->tucso =
1687			ENET_HEADER_SIZE + sizeof(struct ip) +
1688			offsetof(struct udphdr, uh_sum);
1689	}
1690	TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1691
1692	tx_buffer->m_head = NULL;
1693
1694	if (++curr_txd == adapter->num_tx_desc)
1695		curr_txd = 0;
1696
1697	adapter->num_tx_desc_avail--;
1698	adapter->next_avail_tx_desc = curr_txd;
1699	return;
1700}
1701
1702/**********************************************************************
1703 *
1704 *  Examine each tx_buffer in the used queue. If the hardware is done
1705 *  processing the packet then free associated resources. The
1706 *  tx_buffer is put back on the free queue.
1707 *
1708 **********************************************************************/
1709static void
1710ixgb_clean_transmit_interrupts(struct adapter * adapter)
1711{
1712	int             i, num_avail;
1713	struct ixgb_buffer *tx_buffer;
1714	struct ixgb_tx_desc *tx_desc;
1715
1716	IXGB_LOCK_ASSERT(adapter);
1717
1718	if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1719		return;
1720
1721#ifdef _SV_
1722	adapter->clean_tx_interrupts++;
1723#endif
1724	num_avail = adapter->num_tx_desc_avail;
1725	i = adapter->oldest_used_tx_desc;
1726
1727	tx_buffer = &adapter->tx_buffer_area[i];
1728	tx_desc = &adapter->tx_desc_base[i];
1729
1730	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1731
1732		tx_desc->status = 0;
1733		num_avail++;
1734
1735		if (tx_buffer->m_head) {
1736			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1737					BUS_DMASYNC_POSTWRITE);
1738			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1739			bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1740			m_freem(tx_buffer->m_head);
1741			tx_buffer->m_head = NULL;
1742		}
1743		if (++i == adapter->num_tx_desc)
1744			i = 0;
1745
1746		tx_buffer = &adapter->tx_buffer_area[i];
1747		tx_desc = &adapter->tx_desc_base[i];
1748	}
1749
1750	adapter->oldest_used_tx_desc = i;
1751
1752	/*
1753	 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1754	 * it is OK to send packets. If there are no pending descriptors,
1755	 * clear the timeout. Otherwise, if some descriptors have been freed,
1756	 * restart the timeout.
1757	 */
1758	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1759		struct ifnet   *ifp = adapter->ifp;
1760
1761		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1762		if (num_avail == adapter->num_tx_desc)
1763			adapter->tx_timer = 0;
1764		else if (num_avail == adapter->num_tx_desc_avail)
1765			adapter->tx_timer = IXGB_TX_TIMEOUT;
1766	}
1767	adapter->num_tx_desc_avail = num_avail;
1768	return;
1769}
1770
1771
1772/*********************************************************************
1773 *
1774 *  Get a buffer from system mbuf buffer pool.
1775 *
1776 **********************************************************************/
1777static int
1778ixgb_get_buf(int i, struct adapter * adapter,
1779	     struct mbuf * nmp)
1780{
1781	struct mbuf    *mp = nmp;
1782	struct ixgb_buffer *rx_buffer;
1783	struct ifnet   *ifp;
1784	bus_addr_t      paddr;
1785	int             error;
1786
1787	ifp = adapter->ifp;
1788
1789	if (mp == NULL) {
1790
1791		mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1792
1793		if (mp == NULL) {
1794			adapter->mbuf_alloc_failed++;
1795			return (ENOBUFS);
1796		}
1797		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1798	} else {
1799		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1800		mp->m_data = mp->m_ext.ext_buf;
1801		mp->m_next = NULL;
1802	}
1803
1804	if (ifp->if_mtu <= ETHERMTU) {
1805		m_adj(mp, ETHER_ALIGN);
1806	}
1807	rx_buffer = &adapter->rx_buffer_area[i];
1808
1809	/*
1810	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1811	 * machinery to arrange the memory mapping.
1812	 */
1813	error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1814				mtod(mp, void *), mp->m_len,
1815				ixgb_dmamap_cb, &paddr, 0);
1816	if (error) {
1817		m_free(mp);
1818		return (error);
1819	}
1820	rx_buffer->m_head = mp;
1821	adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1822	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1823
1824	return (0);
1825}
1826
1827/*********************************************************************
1828 *
1829 *  Allocate memory for rx_buffer structures. Since we use one
1830 *  rx_buffer per received packet, the maximum number of rx_buffer's
1831 *  that we'll need is equal to the number of receive descriptors
1832 *  that we've allocated.
1833 *
1834 **********************************************************************/
1835static int
1836ixgb_allocate_receive_structures(struct adapter * adapter)
1837{
1838	int             i, error;
1839	struct ixgb_buffer *rx_buffer;
1840
1841	if (!(adapter->rx_buffer_area =
1842	      (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1843					    adapter->num_rx_desc, M_DEVBUF,
1844					    M_NOWAIT | M_ZERO))) {
1845		device_printf(adapter->dev,
1846		    "Unable to allocate rx_buffer memory\n");
1847		return (ENOMEM);
1848	}
1849	bzero(adapter->rx_buffer_area,
1850	      sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1851
1852	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1853				   PAGE_SIZE, 0,	/* alignment, bounds */
1854				   BUS_SPACE_MAXADDR,	/* lowaddr */
1855				   BUS_SPACE_MAXADDR,	/* highaddr */
1856				   NULL, NULL,	/* filter, filterarg */
1857				   MCLBYTES,	/* maxsize */
1858				   1,	/* nsegments */
1859				   MCLBYTES,	/* maxsegsize */
1860				   BUS_DMA_ALLOCNOW,	/* flags */
1861#if __FreeBSD_version >= 502000
1862				   NULL,	/* lockfunc */
1863				   NULL,	/* lockfuncarg */
1864#endif
1865				   &adapter->rxtag);
1866	if (error != 0) {
1867		device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1868		       "bus_dma_tag_create failed; error %u\n",
1869		       error);
1870		goto fail_0;
1871	}
1872	rx_buffer = adapter->rx_buffer_area;
1873	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1874		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1875					  &rx_buffer->map);
1876		if (error != 0) {
1877			device_printf(adapter->dev,
1878			       "ixgb_allocate_receive_structures: "
1879			       "bus_dmamap_create failed; error %u\n",
1880			       error);
1881			goto fail_1;
1882		}
1883	}
1884
1885	for (i = 0; i < adapter->num_rx_desc; i++) {
1886		if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1887			adapter->rx_buffer_area[i].m_head = NULL;
1888			adapter->rx_desc_base[i].buff_addr = 0;
1889			return (ENOBUFS);
1890		}
1891	}
1892
1893	return (0);
1894fail_1:
1895	bus_dma_tag_destroy(adapter->rxtag);
1896fail_0:
1897	adapter->rxtag = NULL;
1898	free(adapter->rx_buffer_area, M_DEVBUF);
1899	adapter->rx_buffer_area = NULL;
1900	return (error);
1901}
1902
1903/*********************************************************************
1904 *
1905 *  Allocate and initialize receive structures.
1906 *
1907 **********************************************************************/
1908static int
1909ixgb_setup_receive_structures(struct adapter * adapter)
1910{
1911	bzero((void *)adapter->rx_desc_base,
1912	      (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1913
1914	if (ixgb_allocate_receive_structures(adapter))
1915		return ENOMEM;
1916
1917	/* Setup our descriptor pointers */
1918	adapter->next_rx_desc_to_check = 0;
1919	adapter->next_rx_desc_to_use = 0;
1920	return (0);
1921}
1922
1923/*********************************************************************
1924 *
1925 *  Enable receive unit.
1926 *
1927 **********************************************************************/
1928static void
1929ixgb_initialize_receive_unit(struct adapter * adapter)
1930{
1931	u_int32_t       reg_rctl;
1932	u_int32_t       reg_rxcsum;
1933	u_int32_t       reg_rxdctl;
1934	struct ifnet   *ifp;
1935	u_int64_t       rdba = adapter->rxdma.dma_paddr;
1936
1937	ifp = adapter->ifp;
1938
1939	/*
1940	 * Make sure receives are disabled while setting up the descriptor
1941	 * ring
1942	 */
1943	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1944	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1945
1946	/* Set the Receive Delay Timer Register */
1947	IXGB_WRITE_REG(&adapter->hw, RDTR,
1948		       adapter->rx_int_delay);
1949
1950
1951	/* Setup the Base and Length of the Rx Descriptor Ring */
1952	IXGB_WRITE_REG(&adapter->hw, RDBAL,
1953		       (rdba & 0x00000000ffffffffULL));
1954	IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1955	IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1956		       sizeof(struct ixgb_rx_desc));
1957
1958	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1959	IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1960
1961	IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1962
1963
1964
1965	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1966		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1967		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1968	IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1969
1970
1971	adapter->raidc = 1;
1972	if (adapter->raidc) {
1973		uint32_t        raidc;
1974		uint8_t         poll_threshold;
1975#define IXGB_RAIDC_POLL_DEFAULT 120
1976
1977		poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1978		poll_threshold >>= 1;
1979		poll_threshold &= 0x3F;
1980		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1981			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1982			(adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1983			poll_threshold;
1984		IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1985	}
1986	/* Enable Receive Checksum Offload for TCP and UDP ? */
1987	if (ifp->if_capenable & IFCAP_RXCSUM) {
1988		reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1989		reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1990		IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1991	}
1992	/* Setup the Receive Control Register */
1993	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1994	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1995	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1996		IXGB_RCTL_CFF |
1997		(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1998
1999	switch (adapter->rx_buffer_len) {
2000	default:
2001	case IXGB_RXBUFFER_2048:
2002		reg_rctl |= IXGB_RCTL_BSIZE_2048;
2003		break;
2004	case IXGB_RXBUFFER_4096:
2005		reg_rctl |= IXGB_RCTL_BSIZE_4096;
2006		break;
2007	case IXGB_RXBUFFER_8192:
2008		reg_rctl |= IXGB_RCTL_BSIZE_8192;
2009		break;
2010	case IXGB_RXBUFFER_16384:
2011		reg_rctl |= IXGB_RCTL_BSIZE_16384;
2012		break;
2013	}
2014
2015	reg_rctl |= IXGB_RCTL_RXEN;
2016
2017
2018	/* Enable Receives */
2019	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2020
2021	return;
2022}
2023
2024/*********************************************************************
2025 *
2026 *  Free receive related data structures.
2027 *
2028 **********************************************************************/
2029static void
2030ixgb_free_receive_structures(struct adapter * adapter)
2031{
2032	struct ixgb_buffer *rx_buffer;
2033	int             i;
2034
2035	INIT_DEBUGOUT("free_receive_structures: begin");
2036
2037	if (adapter->rx_buffer_area != NULL) {
2038		rx_buffer = adapter->rx_buffer_area;
2039		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2040			if (rx_buffer->map != NULL) {
2041				bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2042				bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2043			}
2044			if (rx_buffer->m_head != NULL)
2045				m_freem(rx_buffer->m_head);
2046			rx_buffer->m_head = NULL;
2047		}
2048	}
2049	if (adapter->rx_buffer_area != NULL) {
2050		free(adapter->rx_buffer_area, M_DEVBUF);
2051		adapter->rx_buffer_area = NULL;
2052	}
2053	if (adapter->rxtag != NULL) {
2054		bus_dma_tag_destroy(adapter->rxtag);
2055		adapter->rxtag = NULL;
2056	}
2057	return;
2058}
2059
2060/*********************************************************************
2061 *
2062 *  This routine executes in interrupt context. It replenishes
2063 *  the mbufs in the descriptor and sends data which has been
2064 *  dma'ed into host memory to upper layer.
2065 *
2066 *  We loop at most count times if count is > 0, or until done if
2067 *  count < 0.
2068 *
2069 *********************************************************************/
2070static int
2071ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2072{
2073	struct ifnet   *ifp;
2074	struct mbuf    *mp;
2075#if __FreeBSD_version < 500000
2076	struct ether_header *eh;
2077#endif
2078	int             eop = 0;
2079	int             len;
2080	u_int8_t        accept_frame = 0;
2081	int             i;
2082	int             next_to_use = 0;
2083	int             eop_desc;
2084	int		rx_npkts = 0;
2085	/* Pointer to the receive descriptor being examined. */
2086	struct ixgb_rx_desc *current_desc;
2087
2088	IXGB_LOCK_ASSERT(adapter);
2089
2090	ifp = adapter->ifp;
2091	i = adapter->next_rx_desc_to_check;
2092	next_to_use = adapter->next_rx_desc_to_use;
2093	eop_desc = adapter->next_rx_desc_to_check;
2094	current_desc = &adapter->rx_desc_base[i];
2095
2096	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2097#ifdef _SV_
2098		adapter->no_pkts_avail++;
2099#endif
2100		return (rx_npkts);
2101	}
2102	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2103
2104		mp = adapter->rx_buffer_area[i].m_head;
2105		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2106				BUS_DMASYNC_POSTREAD);
2107		accept_frame = 1;
2108		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2109			count--;
2110			eop = 1;
2111		} else {
2112			eop = 0;
2113		}
2114		len = current_desc->length;
2115
2116		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2117			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2118					    IXGB_RX_DESC_ERRORS_RXE)) {
2119			accept_frame = 0;
2120		}
2121		if (accept_frame) {
2122
2123			/* Assign correct length to the current fragment */
2124			mp->m_len = len;
2125
2126			if (adapter->fmp == NULL) {
2127				mp->m_pkthdr.len = len;
2128				adapter->fmp = mp;	/* Store the first mbuf */
2129				adapter->lmp = mp;
2130			} else {
2131				/* Chain mbuf's together */
2132				mp->m_flags &= ~M_PKTHDR;
2133				adapter->lmp->m_next = mp;
2134				adapter->lmp = adapter->lmp->m_next;
2135				adapter->fmp->m_pkthdr.len += len;
2136			}
2137
2138			if (eop) {
2139				eop_desc = i;
2140				adapter->fmp->m_pkthdr.rcvif = ifp;
2141
2142#if __FreeBSD_version < 500000
2143				eh = mtod(adapter->fmp, struct ether_header *);
2144
2145				/* Remove ethernet header from mbuf */
2146				m_adj(adapter->fmp, sizeof(struct ether_header));
2147				ixgb_receive_checksum(adapter, current_desc,
2148						      adapter->fmp);
2149
2150				if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2151					VLAN_INPUT_TAG(eh, adapter->fmp,
2152						     current_desc->special);
2153				else
2154					ether_input(ifp, eh, adapter->fmp);
2155#else
2156				ixgb_receive_checksum(adapter, current_desc,
2157						      adapter->fmp);
2158#if __FreeBSD_version < 700000
2159				if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2160					VLAN_INPUT_TAG(ifp, adapter->fmp,
2161						       current_desc->special);
2162#else
2163				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2164					adapter->fmp->m_pkthdr.ether_vtag =
2165					    current_desc->special;
2166					adapter->fmp->m_flags |= M_VLANTAG;
2167				}
2168#endif
2169
2170				if (adapter->fmp != NULL) {
2171					IXGB_UNLOCK(adapter);
2172					(*ifp->if_input) (ifp, adapter->fmp);
2173					IXGB_LOCK(adapter);
2174					rx_npkts++;
2175				}
2176#endif
2177				adapter->fmp = NULL;
2178				adapter->lmp = NULL;
2179			}
2180			adapter->rx_buffer_area[i].m_head = NULL;
2181		} else {
2182			adapter->dropped_pkts++;
2183			if (adapter->fmp != NULL)
2184				m_freem(adapter->fmp);
2185			adapter->fmp = NULL;
2186			adapter->lmp = NULL;
2187		}
2188
2189		/* Zero out the receive descriptors status  */
2190		current_desc->status = 0;
2191
2192		/* Advance our pointers to the next descriptor */
2193		if (++i == adapter->num_rx_desc) {
2194			i = 0;
2195			current_desc = adapter->rx_desc_base;
2196		} else
2197			current_desc++;
2198	}
2199	adapter->next_rx_desc_to_check = i;
2200
2201	if (--i < 0)
2202		i = (adapter->num_rx_desc - 1);
2203
2204	/*
2205	 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2206 	 * memory corruption). Avoid using and re-submitting the most recently received RX
2207	 * descriptor back to hardware.
2208	 *
2209	 * if(Last written back descriptor == EOP bit set descriptor)
2210	 * 	then avoid re-submitting the most recently received RX descriptor
2211	 *	back to hardware.
2212	 * if(Last written back descriptor != EOP bit set descriptor)
2213	 *	then avoid re-submitting the most recently received RX descriptors
2214	 * 	till last EOP bit set descriptor.
2215	 */
2216	if (eop_desc != i) {
2217		if (++eop_desc == adapter->num_rx_desc)
2218			eop_desc = 0;
2219		i = eop_desc;
2220	}
2221	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2222	while (next_to_use != i) {
2223		current_desc = &adapter->rx_desc_base[next_to_use];
2224		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2225			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2226					     IXGB_RX_DESC_ERRORS_RXE))) {
2227			mp = adapter->rx_buffer_area[next_to_use].m_head;
2228			ixgb_get_buf(next_to_use, adapter, mp);
2229		} else {
2230			if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2231				break;
2232		}
2233		/* Advance our pointers to the next descriptor */
2234		if (++next_to_use == adapter->num_rx_desc) {
2235			next_to_use = 0;
2236			current_desc = adapter->rx_desc_base;
2237		} else
2238			current_desc++;
2239	}
2240	adapter->next_rx_desc_to_use = next_to_use;
2241	if (--next_to_use < 0)
2242		next_to_use = (adapter->num_rx_desc - 1);
2243	/* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
2244	IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2245
2246	return (rx_npkts);
2247}
2248
2249/*********************************************************************
2250 *
2251 *  Verify that the hardware indicated that the checksum is valid.
2252 *  Inform the stack about the status of checksum so that stack
2253 *  doesn't spend time verifying the checksum.
2254 *
2255 *********************************************************************/
2256static void
2257ixgb_receive_checksum(struct adapter * adapter,
2258		      struct ixgb_rx_desc * rx_desc,
2259		      struct mbuf * mp)
2260{
2261	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2262		mp->m_pkthdr.csum_flags = 0;
2263		return;
2264	}
2265	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2266		/* Did it pass? */
2267		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2268			/* IP Checksum Good */
2269			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2270			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2271
2272		} else {
2273			mp->m_pkthdr.csum_flags = 0;
2274		}
2275	}
2276	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2277		/* Did it pass? */
2278		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2279			mp->m_pkthdr.csum_flags |=
2280				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2281			mp->m_pkthdr.csum_data = htons(0xffff);
2282		}
2283	}
2284	return;
2285}
2286
2287
2288static void
2289ixgb_enable_vlans(struct adapter * adapter)
2290{
2291	uint32_t        ctrl;
2292
2293	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2294	ctrl |= IXGB_CTRL0_VME;
2295	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2296
2297	return;
2298}
2299
2300
2301static void
2302ixgb_enable_intr(struct adapter * adapter)
2303{
2304	IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2305			    IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2306	return;
2307}
2308
2309static void
2310ixgb_disable_intr(struct adapter * adapter)
2311{
2312	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2313	return;
2314}
2315
2316void
2317ixgb_write_pci_cfg(struct ixgb_hw * hw,
2318		   uint32_t reg,
2319		   uint16_t * value)
2320{
2321	pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2322			 *value, 2);
2323}
2324
2325/**********************************************************************
2326 *
2327 *  Update the board statistics counters.
2328 *
2329 **********************************************************************/
2330static void
2331ixgb_update_stats_counters(struct adapter * adapter)
2332{
2333
2334	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2335	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2336	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2337	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2338	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2339	adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2340	adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2341	adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2342	adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2343	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2344
2345	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2346	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2347	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2348	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2349	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2350	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2351	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2352	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2353	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2354	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2355	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2356	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2357	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2358	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2359	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2360	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2361	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2362	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2363	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2364	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2365	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2366	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2367	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2368	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2369	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2370	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2371	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2372
2373	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2374	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2375	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2376	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2377	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2378	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2379	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2380	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2381	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2382	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2383	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2384	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2385	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2386	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2387	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2388	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2389	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2390	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2391	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2392	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2393	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2394	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2395}
2396
2397static uint64_t
2398ixgb_get_counter(struct ifnet *ifp, ift_counter cnt)
2399{
2400	struct adapter *adapter;
2401
2402	adapter = if_getsoftc(ifp);
2403
2404	switch (cnt) {
2405	case IFCOUNTER_IPACKETS:
2406		return (adapter->stats.gprcl);
2407	case IFCOUNTER_OPACKETS:
2408		return ( adapter->stats.gptcl);
2409	case IFCOUNTER_IBYTES:
2410		return (adapter->stats.gorcl);
2411	case IFCOUNTER_OBYTES:
2412		return (adapter->stats.gotcl);
2413	case IFCOUNTER_IMCASTS:
2414		return ( adapter->stats.mprcl);
2415	case IFCOUNTER_COLLISIONS:
2416		return (0);
2417	case IFCOUNTER_IERRORS:
2418		return (adapter->dropped_pkts + adapter->stats.crcerrs +
2419		    adapter->stats.rnbc + adapter->stats.mpc +
2420		    adapter->stats.rlec);
2421	default:
2422		return (if_get_counter_default(ifp, cnt));
2423	}
2424}
2425
2426/**********************************************************************
2427 *
2428 *  This routine is called only when ixgb_display_debug_stats is enabled.
2429 *  This routine provides a way to take a look at important statistics
2430 *  maintained by the driver and hardware.
2431 *
2432 **********************************************************************/
2433static void
2434ixgb_print_hw_stats(struct adapter * adapter)
2435{
2436	char            buf_speed[100], buf_type[100];
2437	ixgb_bus_speed  bus_speed;
2438	ixgb_bus_type   bus_type;
2439	device_t dev;
2440
2441	dev = adapter->dev;
2442#ifdef _SV_
2443	device_printf(dev, "Packets not Avail = %ld\n",
2444	       adapter->no_pkts_avail);
2445	device_printf(dev, "CleanTxInterrupts = %ld\n",
2446	       adapter->clean_tx_interrupts);
2447	device_printf(dev, "ICR RXDMT0 = %lld\n",
2448	       (long long)adapter->sv_stats.icr_rxdmt0);
2449	device_printf(dev, "ICR RXO = %lld\n",
2450	       (long long)adapter->sv_stats.icr_rxo);
2451	device_printf(dev, "ICR RXT0 = %lld\n",
2452	       (long long)adapter->sv_stats.icr_rxt0);
2453	device_printf(dev, "ICR TXDW = %lld\n",
2454	       (long long)adapter->sv_stats.icr_TXDW);
2455#endif				/* _SV_ */
2456
2457	bus_speed = adapter->hw.bus.speed;
2458	bus_type = adapter->hw.bus.type;
2459	sprintf(buf_speed,
2460		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2461		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2462		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2463		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2464		"UNKNOWN");
2465	device_printf(dev, "PCI_Bus_Speed = %s\n",
2466	       buf_speed);
2467
2468	sprintf(buf_type,
2469		bus_type == ixgb_bus_type_pci ? "PCI" :
2470		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2471		"UNKNOWN");
2472	device_printf(dev, "PCI_Bus_Type = %s\n",
2473	       buf_type);
2474
2475	device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2476	       adapter->no_tx_desc_avail1);
2477	device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2478	       adapter->no_tx_desc_avail2);
2479	device_printf(dev, "Std Mbuf Failed = %ld\n",
2480	       adapter->mbuf_alloc_failed);
2481	device_printf(dev, "Std Cluster Failed = %ld\n",
2482	       adapter->mbuf_cluster_failed);
2483
2484	device_printf(dev, "Defer count = %lld\n",
2485	       (long long)adapter->stats.dc);
2486	device_printf(dev, "Missed Packets = %lld\n",
2487	       (long long)adapter->stats.mpc);
2488	device_printf(dev, "Receive No Buffers = %lld\n",
2489	       (long long)adapter->stats.rnbc);
2490	device_printf(dev, "Receive length errors = %lld\n",
2491	       (long long)adapter->stats.rlec);
2492	device_printf(dev, "Crc errors = %lld\n",
2493	       (long long)adapter->stats.crcerrs);
2494	device_printf(dev, "Driver dropped packets = %ld\n",
2495	       adapter->dropped_pkts);
2496
2497	device_printf(dev, "XON Rcvd = %lld\n",
2498	       (long long)adapter->stats.xonrxc);
2499	device_printf(dev, "XON Xmtd = %lld\n",
2500	       (long long)adapter->stats.xontxc);
2501	device_printf(dev, "XOFF Rcvd = %lld\n",
2502	       (long long)adapter->stats.xoffrxc);
2503	device_printf(dev, "XOFF Xmtd = %lld\n",
2504	       (long long)adapter->stats.xofftxc);
2505
2506	device_printf(dev, "Good Packets Rcvd = %lld\n",
2507	       (long long)adapter->stats.gprcl);
2508	device_printf(dev, "Good Packets Xmtd = %lld\n",
2509	       (long long)adapter->stats.gptcl);
2510
2511	device_printf(dev, "Jumbo frames recvd = %lld\n",
2512	       (long long)adapter->stats.jprcl);
2513	device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2514	       (long long)adapter->stats.jptcl);
2515
2516	return;
2517
2518}
2519
2520static int
2521ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2522{
2523	int             error;
2524	int             result;
2525	struct adapter *adapter;
2526
2527	result = -1;
2528	error = sysctl_handle_int(oidp, &result, 0, req);
2529
2530	if (error || !req->newptr)
2531		return (error);
2532
2533	if (result == 1) {
2534		adapter = (struct adapter *) arg1;
2535		ixgb_print_hw_stats(adapter);
2536	}
2537	return error;
2538}
2539