1/*******************************************************************************
2
3Copyright (c) 2001-2004, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: releng/10.3/sys/dev/ixgb/if_ixgb.c 294958 2016-01-27 22:31:08Z marius $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/ixgb/if_ixgb.h>
41
42/*********************************************************************
43 *  Set this to one to display debug statistics
44 *********************************************************************/
45int             ixgb_display_debug_stats = 0;
46
47/*********************************************************************
48 *  Linked list of board private structures for all NICs found
49 *********************************************************************/
50
51struct adapter *ixgb_adapter_list = NULL;
52
53
54
55/*********************************************************************
56 *  Driver version
57 *********************************************************************/
58
59char            ixgb_driver_version[] = "1.0.6";
60char            ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62/*********************************************************************
63 *  PCI Device ID Table
64 *
65 *  Used by probe to select devices to load on
66 *  Last field stores an index into ixgb_strings
67 *  Last entry must be all 0s
68 *
69 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
71
72static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73{
74	/* Intel(R) PRO/10000 Network Connection */
75	{IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76	{IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77	/* required last entry */
78	{0, 0, 0, 0, 0}
79};
80
81/*********************************************************************
82 *  Table of branding strings for all supported NICs.
83 *********************************************************************/
84
85static char    *ixgb_strings[] = {
86	"Intel(R) PRO/10GbE Network Driver"
87};
88
89/*********************************************************************
90 *  Function prototypes
91 *********************************************************************/
92static int      ixgb_probe(device_t);
93static int      ixgb_attach(device_t);
94static int      ixgb_detach(device_t);
95static int      ixgb_shutdown(device_t);
96static void     ixgb_intr(void *);
97static void     ixgb_start(struct ifnet *);
98static void     ixgb_start_locked(struct ifnet *);
99static int      ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100static void     ixgb_watchdog(struct adapter *);
101static void     ixgb_init(void *);
102static void     ixgb_init_locked(struct adapter *);
103static void     ixgb_stop(void *);
104static void     ixgb_media_status(struct ifnet *, struct ifmediareq *);
105static int      ixgb_media_change(struct ifnet *);
106static void     ixgb_identify_hardware(struct adapter *);
107static int      ixgb_allocate_pci_resources(struct adapter *);
108static void     ixgb_free_pci_resources(struct adapter *);
109static void     ixgb_local_timer(void *);
110static int      ixgb_hardware_init(struct adapter *);
111static int      ixgb_setup_interface(device_t, struct adapter *);
112static int      ixgb_setup_transmit_structures(struct adapter *);
113static void     ixgb_initialize_transmit_unit(struct adapter *);
114static int      ixgb_setup_receive_structures(struct adapter *);
115static void     ixgb_initialize_receive_unit(struct adapter *);
116static void     ixgb_enable_intr(struct adapter *);
117static void     ixgb_disable_intr(struct adapter *);
118static void     ixgb_free_transmit_structures(struct adapter *);
119static void     ixgb_free_receive_structures(struct adapter *);
120static void     ixgb_update_stats_counters(struct adapter *);
121static void     ixgb_clean_transmit_interrupts(struct adapter *);
122static int      ixgb_allocate_receive_structures(struct adapter *);
123static int      ixgb_allocate_transmit_structures(struct adapter *);
124static int      ixgb_process_receive_interrupts(struct adapter *, int);
125static void
126ixgb_receive_checksum(struct adapter *,
127		      struct ixgb_rx_desc * rx_desc,
128		      struct mbuf *);
129static void
130ixgb_transmit_checksum_setup(struct adapter *,
131			     struct mbuf *,
132			     u_int8_t *);
133static void     ixgb_set_promisc(struct adapter *);
134static void     ixgb_disable_promisc(struct adapter *);
135static void     ixgb_set_multi(struct adapter *);
136static void     ixgb_print_hw_stats(struct adapter *);
137static void     ixgb_print_link_status(struct adapter *);
138static int
139ixgb_get_buf(int i, struct adapter *,
140	     struct mbuf *);
141static void     ixgb_enable_vlans(struct adapter * adapter);
142static int      ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143static int      ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
144static int
145ixgb_dma_malloc(struct adapter *, bus_size_t,
146		struct ixgb_dma_alloc *, int);
147static void     ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148#ifdef DEVICE_POLLING
149static poll_handler_t ixgb_poll;
150#endif
151
152/*********************************************************************
153 *  FreeBSD Device Interface Entry Points
154 *********************************************************************/
155
156static device_method_t ixgb_methods[] = {
157	/* Device interface */
158	DEVMETHOD(device_probe, ixgb_probe),
159	DEVMETHOD(device_attach, ixgb_attach),
160	DEVMETHOD(device_detach, ixgb_detach),
161	DEVMETHOD(device_shutdown, ixgb_shutdown),
162
163	DEVMETHOD_END
164};
165
166static driver_t ixgb_driver = {
167	"ixgb", ixgb_methods, sizeof(struct adapter),
168};
169
170static devclass_t ixgb_devclass;
171DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
172
173MODULE_DEPEND(ixgb, pci, 1, 1, 1);
174MODULE_DEPEND(ixgb, ether, 1, 1, 1);
175
176/* some defines for controlling descriptor fetches in h/w */
177#define RXDCTL_PTHRESH_DEFAULT 128	/* chip considers prefech below this */
178#define RXDCTL_HTHRESH_DEFAULT 16	/* chip will only prefetch if tail is
179					 * pushed this many descriptors from
180					 * head */
181#define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
182
183
184/*********************************************************************
185 *  Device identification routine
186 *
187 *  ixgb_probe determines if the driver should be loaded on
188 *  adapter based on PCI vendor/device id of the adapter.
189 *
190 *  return 0 on success, positive on failure
191 *********************************************************************/
192
193static int
194ixgb_probe(device_t dev)
195{
196	ixgb_vendor_info_t *ent;
197
198	u_int16_t       pci_vendor_id = 0;
199	u_int16_t       pci_device_id = 0;
200	u_int16_t       pci_subvendor_id = 0;
201	u_int16_t       pci_subdevice_id = 0;
202	char            adapter_name[60];
203
204	INIT_DEBUGOUT("ixgb_probe: begin");
205
206	pci_vendor_id = pci_get_vendor(dev);
207	if (pci_vendor_id != IXGB_VENDOR_ID)
208		return (ENXIO);
209
210	pci_device_id = pci_get_device(dev);
211	pci_subvendor_id = pci_get_subvendor(dev);
212	pci_subdevice_id = pci_get_subdevice(dev);
213
214	ent = ixgb_vendor_info_array;
215	while (ent->vendor_id != 0) {
216		if ((pci_vendor_id == ent->vendor_id) &&
217		    (pci_device_id == ent->device_id) &&
218
219		    ((pci_subvendor_id == ent->subvendor_id) ||
220		     (ent->subvendor_id == PCI_ANY_ID)) &&
221
222		    ((pci_subdevice_id == ent->subdevice_id) ||
223		     (ent->subdevice_id == PCI_ANY_ID))) {
224			sprintf(adapter_name, "%s, Version - %s",
225				ixgb_strings[ent->index],
226				ixgb_driver_version);
227			device_set_desc_copy(dev, adapter_name);
228			return (BUS_PROBE_DEFAULT);
229		}
230		ent++;
231	}
232
233	return (ENXIO);
234}
235
236/*********************************************************************
237 *  Device initialization routine
238 *
239 *  The attach entry point is called when the driver is being loaded.
240 *  This routine identifies the type of hardware, allocates all resources
241 *  and initializes the hardware.
242 *
243 *  return 0 on success, positive on failure
244 *********************************************************************/
245
246static int
247ixgb_attach(device_t dev)
248{
249	struct adapter *adapter;
250	int             tsize, rsize;
251	int             error = 0;
252
253	device_printf(dev, "%s\n", ixgb_copyright);
254	INIT_DEBUGOUT("ixgb_attach: begin");
255
256	/* Allocate, clear, and link in our adapter structure */
257	if (!(adapter = device_get_softc(dev))) {
258		device_printf(dev, "adapter structure allocation failed\n");
259		return (ENOMEM);
260	}
261	bzero(adapter, sizeof(struct adapter));
262	adapter->dev = dev;
263	adapter->osdep.dev = dev;
264	IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
265
266	if (ixgb_adapter_list != NULL)
267		ixgb_adapter_list->prev = adapter;
268	adapter->next = ixgb_adapter_list;
269	ixgb_adapter_list = adapter;
270
271	/* SYSCTL APIs */
272	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
273			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
274			OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
275			(void *)adapter, 0,
276			ixgb_sysctl_stats, "I", "Statistics");
277
278	callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
279
280	/* Determine hardware revision */
281	ixgb_identify_hardware(adapter);
282
283	/* Parameters (to be read from user) */
284	adapter->num_tx_desc = IXGB_MAX_TXD;
285	adapter->num_rx_desc = IXGB_MAX_RXD;
286	adapter->tx_int_delay = TIDV;
287	adapter->rx_int_delay = RDTR;
288	adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
289
290	adapter->hw.fc.high_water = FCRTH;
291	adapter->hw.fc.low_water = FCRTL;
292	adapter->hw.fc.pause_time = FCPAUSE;
293	adapter->hw.fc.send_xon = TRUE;
294	adapter->hw.fc.type = FLOW_CONTROL;
295
296
297	/* Set the max frame size assuming standard ethernet sized frames */
298	adapter->hw.max_frame_size =
299		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
300
301	if (ixgb_allocate_pci_resources(adapter)) {
302		device_printf(dev, "Allocation of PCI resources failed\n");
303		error = ENXIO;
304		goto err_pci;
305	}
306	tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
307			     sizeof(struct ixgb_tx_desc), 4096);
308
309	/* Allocate Transmit Descriptor ring */
310	if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
311		device_printf(dev, "Unable to allocate TxDescriptor memory\n");
312		error = ENOMEM;
313		goto err_tx_desc;
314	}
315	adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
316
317	rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
318			     sizeof(struct ixgb_rx_desc), 4096);
319
320	/* Allocate Receive Descriptor ring */
321	if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
322		device_printf(dev, "Unable to allocate rx_desc memory\n");
323		error = ENOMEM;
324		goto err_rx_desc;
325	}
326	adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
327
328	/* Allocate multicast array memory. */
329	adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
330	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
331	if (adapter->mta == NULL) {
332		device_printf(dev, "Can not allocate multicast setup array\n");
333		error = ENOMEM;
334		goto err_hw_init;
335	}
336
337	/* Initialize the hardware */
338	if (ixgb_hardware_init(adapter)) {
339		device_printf(dev, "Unable to initialize the hardware\n");
340		error = EIO;
341		goto err_hw_init;
342	}
343	/* Setup OS specific network interface */
344	if (ixgb_setup_interface(dev, adapter) != 0)
345		goto err_hw_init;
346
347	/* Initialize statistics */
348	ixgb_clear_hw_cntrs(&adapter->hw);
349	ixgb_update_stats_counters(adapter);
350
351	INIT_DEBUGOUT("ixgb_attach: end");
352	return (0);
353
354err_hw_init:
355	ixgb_dma_free(adapter, &adapter->rxdma);
356err_rx_desc:
357	ixgb_dma_free(adapter, &adapter->txdma);
358err_tx_desc:
359err_pci:
360	if (adapter->ifp != NULL)
361		if_free(adapter->ifp);
362	ixgb_free_pci_resources(adapter);
363	sysctl_ctx_free(&adapter->sysctl_ctx);
364	free(adapter->mta, M_DEVBUF);
365	return (error);
366
367}
368
369/*********************************************************************
370 *  Device removal routine
371 *
372 *  The detach entry point is called when the driver is being removed.
373 *  This routine stops the adapter and deallocates all the resources
374 *  that were allocated for driver operation.
375 *
376 *  return 0 on success, positive on failure
377 *********************************************************************/
378
379static int
380ixgb_detach(device_t dev)
381{
382	struct adapter *adapter = device_get_softc(dev);
383	struct ifnet   *ifp = adapter->ifp;
384
385	INIT_DEBUGOUT("ixgb_detach: begin");
386
387#ifdef DEVICE_POLLING
388	if (ifp->if_capenable & IFCAP_POLLING)
389		ether_poll_deregister(ifp);
390#endif
391
392	IXGB_LOCK(adapter);
393	adapter->in_detach = 1;
394
395	ixgb_stop(adapter);
396	IXGB_UNLOCK(adapter);
397
398#if __FreeBSD_version < 500000
399	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
400#else
401	ether_ifdetach(ifp);
402#endif
403	callout_drain(&adapter->timer);
404	ixgb_free_pci_resources(adapter);
405#if __FreeBSD_version >= 500000
406	if_free(ifp);
407#endif
408
409	/* Free Transmit Descriptor ring */
410	if (adapter->tx_desc_base) {
411		ixgb_dma_free(adapter, &adapter->txdma);
412		adapter->tx_desc_base = NULL;
413	}
414	/* Free Receive Descriptor ring */
415	if (adapter->rx_desc_base) {
416		ixgb_dma_free(adapter, &adapter->rxdma);
417		adapter->rx_desc_base = NULL;
418	}
419	/* Remove from the adapter list */
420	if (ixgb_adapter_list == adapter)
421		ixgb_adapter_list = adapter->next;
422	if (adapter->next != NULL)
423		adapter->next->prev = adapter->prev;
424	if (adapter->prev != NULL)
425		adapter->prev->next = adapter->next;
426	free(adapter->mta, M_DEVBUF);
427
428	IXGB_LOCK_DESTROY(adapter);
429	return (0);
430}
431
432/*********************************************************************
433 *
434 *  Shutdown entry point
435 *
436 **********************************************************************/
437
438static int
439ixgb_shutdown(device_t dev)
440{
441	struct adapter *adapter = device_get_softc(dev);
442	IXGB_LOCK(adapter);
443	ixgb_stop(adapter);
444	IXGB_UNLOCK(adapter);
445	return (0);
446}
447
448
449/*********************************************************************
450 *  Transmit entry point
451 *
452 *  ixgb_start is called by the stack to initiate a transmit.
453 *  The driver will remain in this routine as long as there are
454 *  packets to transmit and transmit resources are available.
455 *  In case resources are not available stack is notified and
456 *  the packet is requeued.
457 **********************************************************************/
458
459static void
460ixgb_start_locked(struct ifnet * ifp)
461{
462	struct mbuf    *m_head;
463	struct adapter *adapter = ifp->if_softc;
464
465	IXGB_LOCK_ASSERT(adapter);
466
467	if (!adapter->link_active)
468		return;
469
470	while (ifp->if_snd.ifq_head != NULL) {
471		IF_DEQUEUE(&ifp->if_snd, m_head);
472
473		if (m_head == NULL)
474			break;
475
476		if (ixgb_encap(adapter, m_head)) {
477			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
478			IF_PREPEND(&ifp->if_snd, m_head);
479			break;
480		}
481		/* Send a copy of the frame to the BPF listener */
482#if __FreeBSD_version < 500000
483		if (ifp->if_bpf)
484			bpf_mtap(ifp, m_head);
485#else
486		ETHER_BPF_MTAP(ifp, m_head);
487#endif
488		/* Set timeout in case hardware has problems transmitting */
489		adapter->tx_timer = IXGB_TX_TIMEOUT;
490
491	}
492	return;
493}
494
495static void
496ixgb_start(struct ifnet *ifp)
497{
498	struct adapter *adapter = ifp->if_softc;
499
500	IXGB_LOCK(adapter);
501	ixgb_start_locked(ifp);
502	IXGB_UNLOCK(adapter);
503	return;
504}
505
506/*********************************************************************
507 *  Ioctl entry point
508 *
509 *  ixgb_ioctl is called when the user wants to configure the
510 *  interface.
511 *
512 *  return 0 on success, positive on failure
513 **********************************************************************/
514
515static int
516ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
517{
518	int             mask, error = 0;
519	struct ifreq   *ifr = (struct ifreq *) data;
520	struct adapter *adapter = ifp->if_softc;
521
522	if (adapter->in_detach)
523		goto out;
524
525	switch (command) {
526	case SIOCSIFADDR:
527	case SIOCGIFADDR:
528		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
529		ether_ioctl(ifp, command, data);
530		break;
531	case SIOCSIFMTU:
532		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
533		if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
534			error = EINVAL;
535		} else {
536			IXGB_LOCK(adapter);
537			ifp->if_mtu = ifr->ifr_mtu;
538			adapter->hw.max_frame_size =
539				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
540
541			ixgb_init_locked(adapter);
542			IXGB_UNLOCK(adapter);
543		}
544		break;
545	case SIOCSIFFLAGS:
546		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
547		IXGB_LOCK(adapter);
548		if (ifp->if_flags & IFF_UP) {
549			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
550				ixgb_init_locked(adapter);
551			}
552			ixgb_disable_promisc(adapter);
553			ixgb_set_promisc(adapter);
554		} else {
555			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
556				ixgb_stop(adapter);
557			}
558		}
559		IXGB_UNLOCK(adapter);
560		break;
561	case SIOCADDMULTI:
562	case SIOCDELMULTI:
563		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
564		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565			IXGB_LOCK(adapter);
566			ixgb_disable_intr(adapter);
567			ixgb_set_multi(adapter);
568			ixgb_enable_intr(adapter);
569			IXGB_UNLOCK(adapter);
570		}
571		break;
572	case SIOCSIFMEDIA:
573	case SIOCGIFMEDIA:
574		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
575		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
576		break;
577	case SIOCSIFCAP:
578		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
579		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
580#ifdef DEVICE_POLLING
581		if (mask & IFCAP_POLLING) {
582			if (ifr->ifr_reqcap & IFCAP_POLLING) {
583				error = ether_poll_register(ixgb_poll, ifp);
584				if (error)
585					return(error);
586				IXGB_LOCK(adapter);
587				ixgb_disable_intr(adapter);
588				ifp->if_capenable |= IFCAP_POLLING;
589				IXGB_UNLOCK(adapter);
590			} else {
591				error = ether_poll_deregister(ifp);
592				/* Enable interrupt even in error case */
593				IXGB_LOCK(adapter);
594				ixgb_enable_intr(adapter);
595				ifp->if_capenable &= ~IFCAP_POLLING;
596				IXGB_UNLOCK(adapter);
597			}
598		}
599#endif /* DEVICE_POLLING */
600		if (mask & IFCAP_HWCSUM) {
601			if (IFCAP_HWCSUM & ifp->if_capenable)
602				ifp->if_capenable &= ~IFCAP_HWCSUM;
603			else
604				ifp->if_capenable |= IFCAP_HWCSUM;
605			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
606				ixgb_init(adapter);
607		}
608		break;
609	default:
610		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
611		error = EINVAL;
612	}
613
614out:
615	return (error);
616}
617
618/*********************************************************************
619 *  Watchdog entry point
620 *
621 *  This routine is called whenever hardware quits transmitting.
622 *
623 **********************************************************************/
624
625static void
626ixgb_watchdog(struct adapter *adapter)
627{
628	struct ifnet *ifp;
629
630	ifp = adapter->ifp;
631
632	/*
633	 * If we are in this routine because of pause frames, then don't
634	 * reset the hardware.
635	 */
636	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
637		adapter->tx_timer = IXGB_TX_TIMEOUT;
638		return;
639	}
640	if_printf(ifp, "watchdog timeout -- resetting\n");
641
642	ixgb_stop(adapter);
643	ixgb_init_locked(adapter);
644
645
646	ifp->if_oerrors++;
647
648	return;
649}
650
651/*********************************************************************
652 *  Init entry point
653 *
654 *  This routine is used in two ways. It is used by the stack as
655 *  init entry point in network interface structure. It is also used
656 *  by the driver as a hw/sw initialization routine to get to a
657 *  consistent state.
658 *
659 *  return 0 on success, positive on failure
660 **********************************************************************/
661
662static void
663ixgb_init_locked(struct adapter *adapter)
664{
665	struct ifnet   *ifp;
666
667	INIT_DEBUGOUT("ixgb_init: begin");
668
669	IXGB_LOCK_ASSERT(adapter);
670
671	ixgb_stop(adapter);
672	ifp = adapter->ifp;
673
674	/* Get the latest mac address, User can use a LAA */
675	bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
676	    IXGB_ETH_LENGTH_OF_ADDRESS);
677
678	/* Initialize the hardware */
679	if (ixgb_hardware_init(adapter)) {
680		if_printf(ifp, "Unable to initialize the hardware\n");
681		return;
682	}
683	ixgb_enable_vlans(adapter);
684
685	/* Prepare transmit descriptors and buffers */
686	if (ixgb_setup_transmit_structures(adapter)) {
687		if_printf(ifp, "Could not setup transmit structures\n");
688		ixgb_stop(adapter);
689		return;
690	}
691	ixgb_initialize_transmit_unit(adapter);
692
693	/* Setup Multicast table */
694	ixgb_set_multi(adapter);
695
696	/* Prepare receive descriptors and buffers */
697	if (ixgb_setup_receive_structures(adapter)) {
698		if_printf(ifp, "Could not setup receive structures\n");
699		ixgb_stop(adapter);
700		return;
701	}
702	ixgb_initialize_receive_unit(adapter);
703
704	/* Don't lose promiscuous settings */
705	ixgb_set_promisc(adapter);
706
707	ifp = adapter->ifp;
708	ifp->if_drv_flags |= IFF_DRV_RUNNING;
709	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
710
711
712	if (ifp->if_capenable & IFCAP_TXCSUM)
713		ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
714	else
715		ifp->if_hwassist = 0;
716
717
718	/* Enable jumbo frames */
719	if (ifp->if_mtu > ETHERMTU) {
720		uint32_t        temp_reg;
721		IXGB_WRITE_REG(&adapter->hw, MFS,
722			       adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
723		temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
724		temp_reg |= IXGB_CTRL0_JFE;
725		IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
726	}
727	callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
728	ixgb_clear_hw_cntrs(&adapter->hw);
729#ifdef DEVICE_POLLING
730	/*
731	 * Only disable interrupts if we are polling, make sure they are on
732	 * otherwise.
733	 */
734	if (ifp->if_capenable & IFCAP_POLLING)
735		ixgb_disable_intr(adapter);
736	else
737#endif
738		ixgb_enable_intr(adapter);
739
740	return;
741}
742
743static void
744ixgb_init(void *arg)
745{
746	struct adapter *adapter = arg;
747
748	IXGB_LOCK(adapter);
749	ixgb_init_locked(adapter);
750	IXGB_UNLOCK(adapter);
751	return;
752}
753
754#ifdef DEVICE_POLLING
755static int
756ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
757{
758	struct adapter *adapter = ifp->if_softc;
759	u_int32_t       reg_icr;
760	int		rx_npkts;
761
762	IXGB_LOCK_ASSERT(adapter);
763
764	if (cmd == POLL_AND_CHECK_STATUS) {
765		reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
766		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
767			ixgb_check_for_link(&adapter->hw);
768			ixgb_print_link_status(adapter);
769		}
770	}
771	rx_npkts = ixgb_process_receive_interrupts(adapter, count);
772	ixgb_clean_transmit_interrupts(adapter);
773
774	if (ifp->if_snd.ifq_head != NULL)
775		ixgb_start_locked(ifp);
776	return (rx_npkts);
777}
778
779static int
780ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
781{
782	struct adapter *adapter = ifp->if_softc;
783	int rx_npkts = 0;
784
785	IXGB_LOCK(adapter);
786	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
787		rx_npkts = ixgb_poll_locked(ifp, cmd, count);
788	IXGB_UNLOCK(adapter);
789	return (rx_npkts);
790}
791#endif /* DEVICE_POLLING */
792
793/*********************************************************************
794 *
795 *  Interrupt Service routine
796 *
797 **********************************************************************/
798
799static void
800ixgb_intr(void *arg)
801{
802	u_int32_t       loop_cnt = IXGB_MAX_INTR;
803	u_int32_t       reg_icr;
804	struct ifnet   *ifp;
805	struct adapter *adapter = arg;
806	boolean_t       rxdmt0 = FALSE;
807
808	IXGB_LOCK(adapter);
809
810	ifp = adapter->ifp;
811
812#ifdef DEVICE_POLLING
813	if (ifp->if_capenable & IFCAP_POLLING) {
814		IXGB_UNLOCK(adapter);
815		return;
816	}
817#endif
818
819	reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
820	if (reg_icr == 0) {
821		IXGB_UNLOCK(adapter);
822		return;
823	}
824
825	if (reg_icr & IXGB_INT_RXDMT0)
826		rxdmt0 = TRUE;
827
828#ifdef _SV_
829	if (reg_icr & IXGB_INT_RXDMT0)
830		adapter->sv_stats.icr_rxdmt0++;
831	if (reg_icr & IXGB_INT_RXO)
832		adapter->sv_stats.icr_rxo++;
833	if (reg_icr & IXGB_INT_RXT0)
834		adapter->sv_stats.icr_rxt0++;
835	if (reg_icr & IXGB_INT_TXDW)
836		adapter->sv_stats.icr_TXDW++;
837#endif				/* _SV_ */
838
839	/* Link status change */
840	if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
841		ixgb_check_for_link(&adapter->hw);
842		ixgb_print_link_status(adapter);
843	}
844	while (loop_cnt > 0) {
845		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
846			ixgb_process_receive_interrupts(adapter, -1);
847			ixgb_clean_transmit_interrupts(adapter);
848		}
849		loop_cnt--;
850	}
851
852	if (rxdmt0 && adapter->raidc) {
853		IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
854		IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
855	}
856	if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
857		ixgb_start_locked(ifp);
858
859	IXGB_UNLOCK(adapter);
860	return;
861}
862
863
864/*********************************************************************
865 *
866 *  Media Ioctl callback
867 *
868 *  This routine is called whenever the user queries the status of
869 *  the interface using ifconfig.
870 *
871 **********************************************************************/
872static void
873ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
874{
875	struct adapter *adapter = ifp->if_softc;
876
877	INIT_DEBUGOUT("ixgb_media_status: begin");
878
879	ixgb_check_for_link(&adapter->hw);
880	ixgb_print_link_status(adapter);
881
882	ifmr->ifm_status = IFM_AVALID;
883	ifmr->ifm_active = IFM_ETHER;
884
885	if (!adapter->hw.link_up)
886		return;
887
888	ifmr->ifm_status |= IFM_ACTIVE;
889	ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
890
891	return;
892}
893
894/*********************************************************************
895 *
896 *  Media Ioctl callback
897 *
898 *  This routine is called when the user changes speed/duplex using
899 *  media/mediopt option with ifconfig.
900 *
901 **********************************************************************/
902static int
903ixgb_media_change(struct ifnet * ifp)
904{
905	struct adapter *adapter = ifp->if_softc;
906	struct ifmedia *ifm = &adapter->media;
907
908	INIT_DEBUGOUT("ixgb_media_change: begin");
909
910	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
911		return (EINVAL);
912
913	return (0);
914}
915
916/*********************************************************************
917 *
918 *  This routine maps the mbufs to tx descriptors.
919 *
920 *  return 0 on success, positive on failure
921 **********************************************************************/
922
923static int
924ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
925{
926	u_int8_t        txd_popts;
927	int             i, j, error, nsegs;
928
929#if __FreeBSD_version < 500000
930	struct ifvlan  *ifv = NULL;
931#endif
932	bus_dma_segment_t segs[IXGB_MAX_SCATTER];
933	bus_dmamap_t	map;
934	struct ixgb_buffer *tx_buffer = NULL;
935	struct ixgb_tx_desc *current_tx_desc = NULL;
936	struct ifnet   *ifp = adapter->ifp;
937
938	/*
939	 * Force a cleanup if number of TX descriptors available hits the
940	 * threshold
941	 */
942	if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
943		ixgb_clean_transmit_interrupts(adapter);
944	}
945	if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
946		adapter->no_tx_desc_avail1++;
947		return (ENOBUFS);
948	}
949	/*
950	 * Map the packet for DMA.
951	 */
952	if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
953		adapter->no_tx_map_avail++;
954		return (ENOMEM);
955	}
956	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
957					&nsegs, BUS_DMA_NOWAIT);
958	if (error != 0) {
959		adapter->no_tx_dma_setup++;
960		if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
961		       "error %u\n", error);
962		bus_dmamap_destroy(adapter->txtag, map);
963		return (error);
964	}
965	KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
966
967	if (nsegs > adapter->num_tx_desc_avail) {
968		adapter->no_tx_desc_avail2++;
969		bus_dmamap_destroy(adapter->txtag, map);
970		return (ENOBUFS);
971	}
972	if (ifp->if_hwassist > 0) {
973		ixgb_transmit_checksum_setup(adapter, m_head,
974					     &txd_popts);
975	} else
976		txd_popts = 0;
977
978	/* Find out if we are in vlan mode */
979#if __FreeBSD_version < 500000
980	if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
981	    m_head->m_pkthdr.rcvif != NULL &&
982	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
983		ifv = m_head->m_pkthdr.rcvif->if_softc;
984#elseif __FreeBSD_version < 700000
985	mtag = VLAN_OUTPUT_TAG(ifp, m_head);
986#endif
987	i = adapter->next_avail_tx_desc;
988	for (j = 0; j < nsegs; j++) {
989		tx_buffer = &adapter->tx_buffer_area[i];
990		current_tx_desc = &adapter->tx_desc_base[i];
991
992		current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
993		current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
994		current_tx_desc->popts = txd_popts;
995		if (++i == adapter->num_tx_desc)
996			i = 0;
997
998		tx_buffer->m_head = NULL;
999	}
1000
1001	adapter->num_tx_desc_avail -= nsegs;
1002	adapter->next_avail_tx_desc = i;
1003
1004#if __FreeBSD_version < 500000
1005	if (ifv != NULL) {
1006		/* Set the vlan id */
1007		current_tx_desc->vlan = ifv->ifv_tag;
1008#elseif __FreeBSD_version < 700000
1009	if (mtag != NULL) {
1010		/* Set the vlan id */
1011		current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1012#else
1013	if (m_head->m_flags & M_VLANTAG) {
1014		current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1015#endif
1016
1017		/* Tell hardware to add tag */
1018		current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1019	}
1020	tx_buffer->m_head = m_head;
1021	tx_buffer->map = map;
1022	bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1023
1024	/*
1025	 * Last Descriptor of Packet needs End Of Packet (EOP)
1026	 */
1027	current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1028
1029	/*
1030	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1031	 * that this frame is available to transmit.
1032	 */
1033	IXGB_WRITE_REG(&adapter->hw, TDT, i);
1034
1035	return (0);
1036}
1037
1038static void
1039ixgb_set_promisc(struct adapter * adapter)
1040{
1041
1042	u_int32_t       reg_rctl;
1043	struct ifnet   *ifp = adapter->ifp;
1044
1045	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1046
1047	if (ifp->if_flags & IFF_PROMISC) {
1048		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1049		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1050	} else if (ifp->if_flags & IFF_ALLMULTI) {
1051		reg_rctl |= IXGB_RCTL_MPE;
1052		reg_rctl &= ~IXGB_RCTL_UPE;
1053		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1054	}
1055	return;
1056}
1057
1058static void
1059ixgb_disable_promisc(struct adapter * adapter)
1060{
1061	u_int32_t       reg_rctl;
1062
1063	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1064
1065	reg_rctl &= (~IXGB_RCTL_UPE);
1066	reg_rctl &= (~IXGB_RCTL_MPE);
1067	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1068
1069	return;
1070}
1071
1072
1073/*********************************************************************
1074 *  Multicast Update
1075 *
1076 *  This routine is called whenever multicast address list is updated.
1077 *
1078 **********************************************************************/
1079
1080static void
1081ixgb_set_multi(struct adapter * adapter)
1082{
1083	u_int32_t       reg_rctl = 0;
1084	u_int8_t        *mta;
1085	struct ifmultiaddr *ifma;
1086	int             mcnt = 0;
1087	struct ifnet   *ifp = adapter->ifp;
1088
1089	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1090
1091	mta = adapter->mta;
1092	bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1093	    MAX_NUM_MULTICAST_ADDRESSES);
1094
1095	if_maddr_rlock(ifp);
1096#if __FreeBSD_version < 500000
1097	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1098#else
1099	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1100#endif
1101		if (ifma->ifma_addr->sa_family != AF_LINK)
1102			continue;
1103
1104		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1105		      &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1106		mcnt++;
1107	}
1108	if_maddr_runlock(ifp);
1109
1110	if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1111		reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1112		reg_rctl |= IXGB_RCTL_MPE;
1113		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1114	} else
1115		ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1116
1117	return;
1118}
1119
1120
1121/*********************************************************************
1122 *  Timer routine
1123 *
1124 *  This routine checks for link status and updates statistics.
1125 *
1126 **********************************************************************/
1127
1128static void
1129ixgb_local_timer(void *arg)
1130{
1131	struct ifnet   *ifp;
1132	struct adapter *adapter = arg;
1133	ifp = adapter->ifp;
1134
1135	IXGB_LOCK_ASSERT(adapter);
1136
1137	ixgb_check_for_link(&adapter->hw);
1138	ixgb_print_link_status(adapter);
1139	ixgb_update_stats_counters(adapter);
1140	if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1141		ixgb_print_hw_stats(adapter);
1142	}
1143	if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1144		ixgb_watchdog(adapter);
1145	callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1146}
1147
1148static void
1149ixgb_print_link_status(struct adapter * adapter)
1150{
1151	if (adapter->hw.link_up) {
1152		if (!adapter->link_active) {
1153			if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1154			       10000,
1155			       "Full Duplex");
1156			adapter->link_active = 1;
1157		}
1158	} else {
1159		if (adapter->link_active) {
1160			if_printf(adapter->ifp, "Link is Down \n");
1161			adapter->link_active = 0;
1162		}
1163	}
1164
1165	return;
1166}
1167
1168
1169
1170/*********************************************************************
1171 *
1172 *  This routine disables all traffic on the adapter by issuing a
1173 *  global reset on the MAC and deallocates TX/RX buffers.
1174 *
1175 **********************************************************************/
1176
1177static void
1178ixgb_stop(void *arg)
1179{
1180	struct ifnet   *ifp;
1181	struct adapter *adapter = arg;
1182	ifp = adapter->ifp;
1183
1184	IXGB_LOCK_ASSERT(adapter);
1185
1186	INIT_DEBUGOUT("ixgb_stop: begin\n");
1187	ixgb_disable_intr(adapter);
1188	adapter->hw.adapter_stopped = FALSE;
1189	ixgb_adapter_stop(&adapter->hw);
1190	callout_stop(&adapter->timer);
1191	ixgb_free_transmit_structures(adapter);
1192	ixgb_free_receive_structures(adapter);
1193
1194	/* Tell the stack that the interface is no longer active */
1195	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1196	adapter->tx_timer = 0;
1197
1198	return;
1199}
1200
1201
1202/*********************************************************************
1203 *
1204 *  Determine hardware revision.
1205 *
1206 **********************************************************************/
1207static void
1208ixgb_identify_hardware(struct adapter * adapter)
1209{
1210	device_t        dev = adapter->dev;
1211
1212	/* Make sure our PCI config space has the necessary stuff set */
1213	pci_enable_busmaster(dev);
1214	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1215
1216	/* Save off the information about this board */
1217	adapter->hw.vendor_id = pci_get_vendor(dev);
1218	adapter->hw.device_id = pci_get_device(dev);
1219	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1220	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1221	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1222
1223	/* Set MacType, etc. based on this PCI info */
1224	switch (adapter->hw.device_id) {
1225	case IXGB_DEVICE_ID_82597EX:
1226	case IXGB_DEVICE_ID_82597EX_SR:
1227		adapter->hw.mac_type = ixgb_82597;
1228		break;
1229	default:
1230		INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1231		device_printf(dev, "unsupported device id 0x%x\n",
1232		    adapter->hw.device_id);
1233	}
1234
1235	return;
1236}
1237
1238static int
1239ixgb_allocate_pci_resources(struct adapter * adapter)
1240{
1241	int             rid;
1242	device_t        dev = adapter->dev;
1243
1244	rid = IXGB_MMBA;
1245	adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1246						 &rid, 0, ~0, 1,
1247						 RF_ACTIVE);
1248	if (!(adapter->res_memory)) {
1249		device_printf(dev, "Unable to allocate bus resource: memory\n");
1250		return (ENXIO);
1251	}
1252	adapter->osdep.mem_bus_space_tag =
1253		rman_get_bustag(adapter->res_memory);
1254	adapter->osdep.mem_bus_space_handle =
1255		rman_get_bushandle(adapter->res_memory);
1256	adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1257
1258	rid = 0x0;
1259	adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1260						    &rid, 0, ~0, 1,
1261						  RF_SHAREABLE | RF_ACTIVE);
1262	if (!(adapter->res_interrupt)) {
1263		device_printf(dev,
1264		    "Unable to allocate bus resource: interrupt\n");
1265		return (ENXIO);
1266	}
1267	if (bus_setup_intr(dev, adapter->res_interrupt,
1268			   INTR_TYPE_NET | INTR_MPSAFE,
1269			   NULL, (void (*) (void *))ixgb_intr, adapter,
1270			   &adapter->int_handler_tag)) {
1271		device_printf(dev, "Error registering interrupt handler!\n");
1272		return (ENXIO);
1273	}
1274	adapter->hw.back = &adapter->osdep;
1275
1276	return (0);
1277}
1278
1279static void
1280ixgb_free_pci_resources(struct adapter * adapter)
1281{
1282	device_t        dev = adapter->dev;
1283
1284	if (adapter->res_interrupt != NULL) {
1285		bus_teardown_intr(dev, adapter->res_interrupt,
1286				  adapter->int_handler_tag);
1287		bus_release_resource(dev, SYS_RES_IRQ, 0,
1288				     adapter->res_interrupt);
1289	}
1290	if (adapter->res_memory != NULL) {
1291		bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1292				     adapter->res_memory);
1293	}
1294	if (adapter->res_ioport != NULL) {
1295		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1296				     adapter->res_ioport);
1297	}
1298	return;
1299}
1300
1301/*********************************************************************
1302 *
1303 *  Initialize the hardware to a configuration as specified by the
1304 *  adapter structure. The controller is reset, the EEPROM is
1305 *  verified, the MAC address is set, then the shared initialization
1306 *  routines are called.
1307 *
1308 **********************************************************************/
1309static int
1310ixgb_hardware_init(struct adapter * adapter)
1311{
1312	/* Issue a global reset */
1313	adapter->hw.adapter_stopped = FALSE;
1314	ixgb_adapter_stop(&adapter->hw);
1315
1316	/* Make sure we have a good EEPROM before we read from it */
1317	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1318		device_printf(adapter->dev,
1319		    "The EEPROM Checksum Is Not Valid\n");
1320		return (EIO);
1321	}
1322	if (!ixgb_init_hw(&adapter->hw)) {
1323		device_printf(adapter->dev, "Hardware Initialization Failed");
1324		return (EIO);
1325	}
1326
1327	return (0);
1328}
1329
1330/*********************************************************************
1331 *
1332 *  Setup networking device structure and register an interface.
1333 *
1334 **********************************************************************/
1335static int
1336ixgb_setup_interface(device_t dev, struct adapter * adapter)
1337{
1338	struct ifnet   *ifp;
1339	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1340
1341	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1342	if (ifp == NULL) {
1343		device_printf(dev, "can not allocate ifnet structure\n");
1344		return (-1);
1345	}
1346#if __FreeBSD_version >= 502000
1347	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1348#else
1349	ifp->if_unit = device_get_unit(dev);
1350	ifp->if_name = "ixgb";
1351#endif
1352	ifp->if_baudrate = 1000000000;
1353	ifp->if_init = ixgb_init;
1354	ifp->if_softc = adapter;
1355	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1356	ifp->if_ioctl = ixgb_ioctl;
1357	ifp->if_start = ixgb_start;
1358	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1359
1360#if __FreeBSD_version < 500000
1361	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1362#else
1363	ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1364#endif
1365
1366	ifp->if_capabilities = IFCAP_HWCSUM;
1367
1368	/*
1369	 * Tell the upper layer(s) we support long frames.
1370	 */
1371	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1372
1373#if __FreeBSD_version >= 500000
1374	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1375#endif
1376
1377	ifp->if_capenable = ifp->if_capabilities;
1378
1379#ifdef DEVICE_POLLING
1380	ifp->if_capabilities |= IFCAP_POLLING;
1381#endif
1382
1383	/*
1384	 * Specify the media types supported by this adapter and register
1385	 * callbacks to update media and link information
1386	 */
1387	ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1388		     ixgb_media_status);
1389	ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1390		    0, NULL);
1391	ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1392		    0, NULL);
1393	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1394	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1395
1396	return (0);
1397}
1398
1399/********************************************************************
1400 * Manage DMA'able memory.
1401 *******************************************************************/
1402static void
1403ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1404{
1405	if (error)
1406		return;
1407	*(bus_addr_t *) arg = segs->ds_addr;
1408	return;
1409}
1410
1411static int
1412ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1413		struct ixgb_dma_alloc * dma, int mapflags)
1414{
1415	device_t dev;
1416	int             r;
1417
1418	dev = adapter->dev;
1419	r = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
1420			       PAGE_SIZE, 0,	/* alignment, bounds */
1421			       BUS_SPACE_MAXADDR,	/* lowaddr */
1422			       BUS_SPACE_MAXADDR,	/* highaddr */
1423			       NULL, NULL,	/* filter, filterarg */
1424			       size,	/* maxsize */
1425			       1,	/* nsegments */
1426			       size,	/* maxsegsize */
1427			       BUS_DMA_ALLOCNOW,	/* flags */
1428#if __FreeBSD_version >= 502000
1429			       NULL,	/* lockfunc */
1430			       NULL,	/* lockfuncarg */
1431#endif
1432			       &dma->dma_tag);
1433	if (r != 0) {
1434		device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1435		       "error %u\n", r);
1436		goto fail_0;
1437	}
1438	r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1439			     BUS_DMA_NOWAIT, &dma->dma_map);
1440	if (r != 0) {
1441		device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1442		       "error %u\n", r);
1443		goto fail_1;
1444	}
1445	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1446			    size,
1447			    ixgb_dmamap_cb,
1448			    &dma->dma_paddr,
1449			    mapflags | BUS_DMA_NOWAIT);
1450	if (r != 0) {
1451		device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1452		       "error %u\n", r);
1453		goto fail_2;
1454	}
1455	dma->dma_size = size;
1456	return (0);
1457fail_2:
1458	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1459fail_1:
1460	bus_dma_tag_destroy(dma->dma_tag);
1461fail_0:
1462	dma->dma_tag = NULL;
1463	return (r);
1464}
1465
1466
1467
1468static void
1469ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1470{
1471	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1472	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1473	bus_dma_tag_destroy(dma->dma_tag);
1474}
1475
1476/*********************************************************************
1477 *
1478 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1479 *  the information needed to transmit a packet on the wire.
1480 *
1481 **********************************************************************/
1482static int
1483ixgb_allocate_transmit_structures(struct adapter * adapter)
1484{
1485	if (!(adapter->tx_buffer_area =
1486	      (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1487					    adapter->num_tx_desc, M_DEVBUF,
1488					    M_NOWAIT | M_ZERO))) {
1489		device_printf(adapter->dev,
1490		    "Unable to allocate tx_buffer memory\n");
1491		return ENOMEM;
1492	}
1493	bzero(adapter->tx_buffer_area,
1494	      sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1495
1496	return 0;
1497}
1498
1499/*********************************************************************
1500 *
1501 *  Allocate and initialize transmit structures.
1502 *
1503 **********************************************************************/
1504static int
1505ixgb_setup_transmit_structures(struct adapter * adapter)
1506{
1507	/*
1508	 * Setup DMA descriptor areas.
1509	 */
1510	if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev),	/* parent */
1511			       PAGE_SIZE, 0,	/* alignment, bounds */
1512			       BUS_SPACE_MAXADDR,	/* lowaddr */
1513			       BUS_SPACE_MAXADDR,	/* highaddr */
1514			       NULL, NULL,	/* filter, filterarg */
1515			       MCLBYTES * IXGB_MAX_SCATTER,	/* maxsize */
1516			       IXGB_MAX_SCATTER,	/* nsegments */
1517			       MCLBYTES,	/* maxsegsize */
1518			       BUS_DMA_ALLOCNOW,	/* flags */
1519#if __FreeBSD_version >= 502000
1520			       NULL,	/* lockfunc */
1521			       NULL,	/* lockfuncarg */
1522#endif
1523			       &adapter->txtag)) {
1524		device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1525		return (ENOMEM);
1526	}
1527	if (ixgb_allocate_transmit_structures(adapter))
1528		return ENOMEM;
1529
1530	bzero((void *)adapter->tx_desc_base,
1531	      (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1532
1533	adapter->next_avail_tx_desc = 0;
1534	adapter->oldest_used_tx_desc = 0;
1535
1536	/* Set number of descriptors available */
1537	adapter->num_tx_desc_avail = adapter->num_tx_desc;
1538
1539	/* Set checksum context */
1540	adapter->active_checksum_context = OFFLOAD_NONE;
1541
1542	return 0;
1543}
1544
1545/*********************************************************************
1546 *
1547 *  Enable transmit unit.
1548 *
1549 **********************************************************************/
1550static void
1551ixgb_initialize_transmit_unit(struct adapter * adapter)
1552{
1553	u_int32_t       reg_tctl;
1554	u_int64_t       tdba = adapter->txdma.dma_paddr;
1555
1556	/* Setup the Base and Length of the Tx Descriptor Ring */
1557	IXGB_WRITE_REG(&adapter->hw, TDBAL,
1558		       (tdba & 0x00000000ffffffffULL));
1559	IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1560	IXGB_WRITE_REG(&adapter->hw, TDLEN,
1561		       adapter->num_tx_desc *
1562		       sizeof(struct ixgb_tx_desc));
1563
1564	/* Setup the HW Tx Head and Tail descriptor pointers */
1565	IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1566	IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1567
1568
1569	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1570		     IXGB_READ_REG(&adapter->hw, TDBAL),
1571		     IXGB_READ_REG(&adapter->hw, TDLEN));
1572
1573	IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1574
1575
1576	/* Program the Transmit Control Register */
1577	reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1578	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1579	IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1580
1581	/* Setup Transmit Descriptor Settings for this adapter */
1582	adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1583
1584	if (adapter->tx_int_delay > 0)
1585		adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1586	return;
1587}
1588
1589/*********************************************************************
1590 *
1591 *  Free all transmit related data structures.
1592 *
1593 **********************************************************************/
1594static void
1595ixgb_free_transmit_structures(struct adapter * adapter)
1596{
1597	struct ixgb_buffer *tx_buffer;
1598	int             i;
1599
1600	INIT_DEBUGOUT("free_transmit_structures: begin");
1601
1602	if (adapter->tx_buffer_area != NULL) {
1603		tx_buffer = adapter->tx_buffer_area;
1604		for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1605			if (tx_buffer->m_head != NULL) {
1606				bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1607				bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1608				m_freem(tx_buffer->m_head);
1609			}
1610			tx_buffer->m_head = NULL;
1611		}
1612	}
1613	if (adapter->tx_buffer_area != NULL) {
1614		free(adapter->tx_buffer_area, M_DEVBUF);
1615		adapter->tx_buffer_area = NULL;
1616	}
1617	if (adapter->txtag != NULL) {
1618		bus_dma_tag_destroy(adapter->txtag);
1619		adapter->txtag = NULL;
1620	}
1621	return;
1622}
1623
1624/*********************************************************************
1625 *
1626 *  The offload context needs to be set when we transfer the first
1627 *  packet of a particular protocol (TCP/UDP). We change the
1628 *  context only if the protocol type changes.
1629 *
1630 **********************************************************************/
1631static void
1632ixgb_transmit_checksum_setup(struct adapter * adapter,
1633			     struct mbuf * mp,
1634			     u_int8_t * txd_popts)
1635{
1636	struct ixgb_context_desc *TXD;
1637	struct ixgb_buffer *tx_buffer;
1638	int             curr_txd;
1639
1640	if (mp->m_pkthdr.csum_flags) {
1641
1642		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1643			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1644			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1645				return;
1646			else
1647				adapter->active_checksum_context = OFFLOAD_TCP_IP;
1648		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1649			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1650			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1651				return;
1652			else
1653				adapter->active_checksum_context = OFFLOAD_UDP_IP;
1654		} else {
1655			*txd_popts = 0;
1656			return;
1657		}
1658	} else {
1659		*txd_popts = 0;
1660		return;
1661	}
1662
1663	/*
1664	 * If we reach this point, the checksum offload context needs to be
1665	 * reset.
1666	 */
1667	curr_txd = adapter->next_avail_tx_desc;
1668	tx_buffer = &adapter->tx_buffer_area[curr_txd];
1669	TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1670
1671
1672	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1673	TXD->tucse = 0;
1674
1675	TXD->mss = 0;
1676
1677	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1678		TXD->tucso =
1679			ENET_HEADER_SIZE + sizeof(struct ip) +
1680			offsetof(struct tcphdr, th_sum);
1681	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1682		TXD->tucso =
1683			ENET_HEADER_SIZE + sizeof(struct ip) +
1684			offsetof(struct udphdr, uh_sum);
1685	}
1686	TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1687
1688	tx_buffer->m_head = NULL;
1689
1690	if (++curr_txd == adapter->num_tx_desc)
1691		curr_txd = 0;
1692
1693	adapter->num_tx_desc_avail--;
1694	adapter->next_avail_tx_desc = curr_txd;
1695	return;
1696}
1697
1698/**********************************************************************
1699 *
1700 *  Examine each tx_buffer in the used queue. If the hardware is done
1701 *  processing the packet then free associated resources. The
1702 *  tx_buffer is put back on the free queue.
1703 *
1704 **********************************************************************/
1705static void
1706ixgb_clean_transmit_interrupts(struct adapter * adapter)
1707{
1708	int             i, num_avail;
1709	struct ixgb_buffer *tx_buffer;
1710	struct ixgb_tx_desc *tx_desc;
1711
1712	IXGB_LOCK_ASSERT(adapter);
1713
1714	if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1715		return;
1716
1717#ifdef _SV_
1718	adapter->clean_tx_interrupts++;
1719#endif
1720	num_avail = adapter->num_tx_desc_avail;
1721	i = adapter->oldest_used_tx_desc;
1722
1723	tx_buffer = &adapter->tx_buffer_area[i];
1724	tx_desc = &adapter->tx_desc_base[i];
1725
1726	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1727
1728		tx_desc->status = 0;
1729		num_avail++;
1730
1731		if (tx_buffer->m_head) {
1732			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1733					BUS_DMASYNC_POSTWRITE);
1734			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1735			bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1736			m_freem(tx_buffer->m_head);
1737			tx_buffer->m_head = NULL;
1738		}
1739		if (++i == adapter->num_tx_desc)
1740			i = 0;
1741
1742		tx_buffer = &adapter->tx_buffer_area[i];
1743		tx_desc = &adapter->tx_desc_base[i];
1744	}
1745
1746	adapter->oldest_used_tx_desc = i;
1747
1748	/*
1749	 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1750	 * it is OK to send packets. If there are no pending descriptors,
1751	 * clear the timeout. Otherwise, if some descriptors have been freed,
1752	 * restart the timeout.
1753	 */
1754	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1755		struct ifnet   *ifp = adapter->ifp;
1756
1757		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1758		if (num_avail == adapter->num_tx_desc)
1759			adapter->tx_timer = 0;
1760		else if (num_avail == adapter->num_tx_desc_avail)
1761			adapter->tx_timer = IXGB_TX_TIMEOUT;
1762	}
1763	adapter->num_tx_desc_avail = num_avail;
1764	return;
1765}
1766
1767
1768/*********************************************************************
1769 *
1770 *  Get a buffer from system mbuf buffer pool.
1771 *
1772 **********************************************************************/
1773static int
1774ixgb_get_buf(int i, struct adapter * adapter,
1775	     struct mbuf * nmp)
1776{
1777	register struct mbuf *mp = nmp;
1778	struct ixgb_buffer *rx_buffer;
1779	struct ifnet   *ifp;
1780	bus_addr_t      paddr;
1781	int             error;
1782
1783	ifp = adapter->ifp;
1784
1785	if (mp == NULL) {
1786
1787		mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1788
1789		if (mp == NULL) {
1790			adapter->mbuf_alloc_failed++;
1791			return (ENOBUFS);
1792		}
1793		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1794	} else {
1795		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1796		mp->m_data = mp->m_ext.ext_buf;
1797		mp->m_next = NULL;
1798	}
1799
1800	if (ifp->if_mtu <= ETHERMTU) {
1801		m_adj(mp, ETHER_ALIGN);
1802	}
1803	rx_buffer = &adapter->rx_buffer_area[i];
1804
1805	/*
1806	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1807	 * machinery to arrange the memory mapping.
1808	 */
1809	error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1810				mtod(mp, void *), mp->m_len,
1811				ixgb_dmamap_cb, &paddr, 0);
1812	if (error) {
1813		m_free(mp);
1814		return (error);
1815	}
1816	rx_buffer->m_head = mp;
1817	adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1818	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1819
1820	return (0);
1821}
1822
1823/*********************************************************************
1824 *
1825 *  Allocate memory for rx_buffer structures. Since we use one
1826 *  rx_buffer per received packet, the maximum number of rx_buffer's
1827 *  that we'll need is equal to the number of receive descriptors
1828 *  that we've allocated.
1829 *
1830 **********************************************************************/
1831static int
1832ixgb_allocate_receive_structures(struct adapter * adapter)
1833{
1834	int             i, error;
1835	struct ixgb_buffer *rx_buffer;
1836
1837	if (!(adapter->rx_buffer_area =
1838	      (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1839					    adapter->num_rx_desc, M_DEVBUF,
1840					    M_NOWAIT | M_ZERO))) {
1841		device_printf(adapter->dev,
1842		    "Unable to allocate rx_buffer memory\n");
1843		return (ENOMEM);
1844	}
1845	bzero(adapter->rx_buffer_area,
1846	      sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1847
1848	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1849				   PAGE_SIZE, 0,	/* alignment, bounds */
1850				   BUS_SPACE_MAXADDR,	/* lowaddr */
1851				   BUS_SPACE_MAXADDR,	/* highaddr */
1852				   NULL, NULL,	/* filter, filterarg */
1853				   MCLBYTES,	/* maxsize */
1854				   1,	/* nsegments */
1855				   MCLBYTES,	/* maxsegsize */
1856				   BUS_DMA_ALLOCNOW,	/* flags */
1857#if __FreeBSD_version >= 502000
1858				   NULL,	/* lockfunc */
1859				   NULL,	/* lockfuncarg */
1860#endif
1861				   &adapter->rxtag);
1862	if (error != 0) {
1863		device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1864		       "bus_dma_tag_create failed; error %u\n",
1865		       error);
1866		goto fail_0;
1867	}
1868	rx_buffer = adapter->rx_buffer_area;
1869	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1870		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1871					  &rx_buffer->map);
1872		if (error != 0) {
1873			device_printf(adapter->dev,
1874			       "ixgb_allocate_receive_structures: "
1875			       "bus_dmamap_create failed; error %u\n",
1876			       error);
1877			goto fail_1;
1878		}
1879	}
1880
1881	for (i = 0; i < adapter->num_rx_desc; i++) {
1882		if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1883			adapter->rx_buffer_area[i].m_head = NULL;
1884			adapter->rx_desc_base[i].buff_addr = 0;
1885			return (ENOBUFS);
1886		}
1887	}
1888
1889	return (0);
1890fail_1:
1891	bus_dma_tag_destroy(adapter->rxtag);
1892fail_0:
1893	adapter->rxtag = NULL;
1894	free(adapter->rx_buffer_area, M_DEVBUF);
1895	adapter->rx_buffer_area = NULL;
1896	return (error);
1897}
1898
1899/*********************************************************************
1900 *
1901 *  Allocate and initialize receive structures.
1902 *
1903 **********************************************************************/
1904static int
1905ixgb_setup_receive_structures(struct adapter * adapter)
1906{
1907	bzero((void *)adapter->rx_desc_base,
1908	      (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1909
1910	if (ixgb_allocate_receive_structures(adapter))
1911		return ENOMEM;
1912
1913	/* Setup our descriptor pointers */
1914	adapter->next_rx_desc_to_check = 0;
1915	adapter->next_rx_desc_to_use = 0;
1916	return (0);
1917}
1918
1919/*********************************************************************
1920 *
1921 *  Enable receive unit.
1922 *
1923 **********************************************************************/
1924static void
1925ixgb_initialize_receive_unit(struct adapter * adapter)
1926{
1927	u_int32_t       reg_rctl;
1928	u_int32_t       reg_rxcsum;
1929	u_int32_t       reg_rxdctl;
1930	struct ifnet   *ifp;
1931	u_int64_t       rdba = adapter->rxdma.dma_paddr;
1932
1933	ifp = adapter->ifp;
1934
1935	/*
1936	 * Make sure receives are disabled while setting up the descriptor
1937	 * ring
1938	 */
1939	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1940	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1941
1942	/* Set the Receive Delay Timer Register */
1943	IXGB_WRITE_REG(&adapter->hw, RDTR,
1944		       adapter->rx_int_delay);
1945
1946
1947	/* Setup the Base and Length of the Rx Descriptor Ring */
1948	IXGB_WRITE_REG(&adapter->hw, RDBAL,
1949		       (rdba & 0x00000000ffffffffULL));
1950	IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1951	IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1952		       sizeof(struct ixgb_rx_desc));
1953
1954	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1955	IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1956
1957	IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1958
1959
1960
1961	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1962		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1963		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1964	IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1965
1966
1967	adapter->raidc = 1;
1968	if (adapter->raidc) {
1969		uint32_t        raidc;
1970		uint8_t         poll_threshold;
1971#define IXGB_RAIDC_POLL_DEFAULT 120
1972
1973		poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1974		poll_threshold >>= 1;
1975		poll_threshold &= 0x3F;
1976		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1977			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1978			(adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1979			poll_threshold;
1980		IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1981	}
1982	/* Enable Receive Checksum Offload for TCP and UDP ? */
1983	if (ifp->if_capenable & IFCAP_RXCSUM) {
1984		reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1985		reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1986		IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1987	}
1988	/* Setup the Receive Control Register */
1989	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1990	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1991	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1992		IXGB_RCTL_CFF |
1993		(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1994
1995	switch (adapter->rx_buffer_len) {
1996	default:
1997	case IXGB_RXBUFFER_2048:
1998		reg_rctl |= IXGB_RCTL_BSIZE_2048;
1999		break;
2000	case IXGB_RXBUFFER_4096:
2001		reg_rctl |= IXGB_RCTL_BSIZE_4096;
2002		break;
2003	case IXGB_RXBUFFER_8192:
2004		reg_rctl |= IXGB_RCTL_BSIZE_8192;
2005		break;
2006	case IXGB_RXBUFFER_16384:
2007		reg_rctl |= IXGB_RCTL_BSIZE_16384;
2008		break;
2009	}
2010
2011	reg_rctl |= IXGB_RCTL_RXEN;
2012
2013
2014	/* Enable Receives */
2015	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2016
2017	return;
2018}
2019
2020/*********************************************************************
2021 *
2022 *  Free receive related data structures.
2023 *
2024 **********************************************************************/
2025static void
2026ixgb_free_receive_structures(struct adapter * adapter)
2027{
2028	struct ixgb_buffer *rx_buffer;
2029	int             i;
2030
2031	INIT_DEBUGOUT("free_receive_structures: begin");
2032
2033	if (adapter->rx_buffer_area != NULL) {
2034		rx_buffer = adapter->rx_buffer_area;
2035		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2036			if (rx_buffer->map != NULL) {
2037				bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2038				bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2039			}
2040			if (rx_buffer->m_head != NULL)
2041				m_freem(rx_buffer->m_head);
2042			rx_buffer->m_head = NULL;
2043		}
2044	}
2045	if (adapter->rx_buffer_area != NULL) {
2046		free(adapter->rx_buffer_area, M_DEVBUF);
2047		adapter->rx_buffer_area = NULL;
2048	}
2049	if (adapter->rxtag != NULL) {
2050		bus_dma_tag_destroy(adapter->rxtag);
2051		adapter->rxtag = NULL;
2052	}
2053	return;
2054}
2055
2056/*********************************************************************
2057 *
2058 *  This routine executes in interrupt context. It replenishes
2059 *  the mbufs in the descriptor and sends data which has been
2060 *  dma'ed into host memory to upper layer.
2061 *
2062 *  We loop at most count times if count is > 0, or until done if
2063 *  count < 0.
2064 *
2065 *********************************************************************/
2066static int
2067ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2068{
2069	struct ifnet   *ifp;
2070	struct mbuf    *mp;
2071#if __FreeBSD_version < 500000
2072	struct ether_header *eh;
2073#endif
2074	int             eop = 0;
2075	int             len;
2076	u_int8_t        accept_frame = 0;
2077	int             i;
2078	int             next_to_use = 0;
2079	int             eop_desc;
2080	int		rx_npkts = 0;
2081	/* Pointer to the receive descriptor being examined. */
2082	struct ixgb_rx_desc *current_desc;
2083
2084	IXGB_LOCK_ASSERT(adapter);
2085
2086	ifp = adapter->ifp;
2087	i = adapter->next_rx_desc_to_check;
2088	next_to_use = adapter->next_rx_desc_to_use;
2089	eop_desc = adapter->next_rx_desc_to_check;
2090	current_desc = &adapter->rx_desc_base[i];
2091
2092	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2093#ifdef _SV_
2094		adapter->no_pkts_avail++;
2095#endif
2096		return (rx_npkts);
2097	}
2098	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2099
2100		mp = adapter->rx_buffer_area[i].m_head;
2101		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2102				BUS_DMASYNC_POSTREAD);
2103		accept_frame = 1;
2104		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2105			count--;
2106			eop = 1;
2107		} else {
2108			eop = 0;
2109		}
2110		len = current_desc->length;
2111
2112		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2113			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2114					    IXGB_RX_DESC_ERRORS_RXE)) {
2115			accept_frame = 0;
2116		}
2117		if (accept_frame) {
2118
2119			/* Assign correct length to the current fragment */
2120			mp->m_len = len;
2121
2122			if (adapter->fmp == NULL) {
2123				mp->m_pkthdr.len = len;
2124				adapter->fmp = mp;	/* Store the first mbuf */
2125				adapter->lmp = mp;
2126			} else {
2127				/* Chain mbuf's together */
2128				mp->m_flags &= ~M_PKTHDR;
2129				adapter->lmp->m_next = mp;
2130				adapter->lmp = adapter->lmp->m_next;
2131				adapter->fmp->m_pkthdr.len += len;
2132			}
2133
2134			if (eop) {
2135				eop_desc = i;
2136				adapter->fmp->m_pkthdr.rcvif = ifp;
2137
2138#if __FreeBSD_version < 500000
2139				eh = mtod(adapter->fmp, struct ether_header *);
2140
2141				/* Remove ethernet header from mbuf */
2142				m_adj(adapter->fmp, sizeof(struct ether_header));
2143				ixgb_receive_checksum(adapter, current_desc,
2144						      adapter->fmp);
2145
2146				if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2147					VLAN_INPUT_TAG(eh, adapter->fmp,
2148						     current_desc->special);
2149				else
2150					ether_input(ifp, eh, adapter->fmp);
2151#else
2152				ixgb_receive_checksum(adapter, current_desc,
2153						      adapter->fmp);
2154#if __FreeBSD_version < 700000
2155				if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2156					VLAN_INPUT_TAG(ifp, adapter->fmp,
2157						       current_desc->special);
2158#else
2159				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2160					adapter->fmp->m_pkthdr.ether_vtag =
2161					    current_desc->special;
2162					adapter->fmp->m_flags |= M_VLANTAG;
2163				}
2164#endif
2165
2166				if (adapter->fmp != NULL) {
2167					IXGB_UNLOCK(adapter);
2168					(*ifp->if_input) (ifp, adapter->fmp);
2169					IXGB_LOCK(adapter);
2170					rx_npkts++;
2171				}
2172#endif
2173				adapter->fmp = NULL;
2174				adapter->lmp = NULL;
2175			}
2176			adapter->rx_buffer_area[i].m_head = NULL;
2177		} else {
2178			adapter->dropped_pkts++;
2179			if (adapter->fmp != NULL)
2180				m_freem(adapter->fmp);
2181			adapter->fmp = NULL;
2182			adapter->lmp = NULL;
2183		}
2184
2185		/* Zero out the receive descriptors status  */
2186		current_desc->status = 0;
2187
2188		/* Advance our pointers to the next descriptor */
2189		if (++i == adapter->num_rx_desc) {
2190			i = 0;
2191			current_desc = adapter->rx_desc_base;
2192		} else
2193			current_desc++;
2194	}
2195	adapter->next_rx_desc_to_check = i;
2196
2197	if (--i < 0)
2198		i = (adapter->num_rx_desc - 1);
2199
2200	/*
2201	 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2202 	 * memory corruption). Avoid using and re-submitting the most recently received RX
2203	 * descriptor back to hardware.
2204	 *
2205	 * if(Last written back descriptor == EOP bit set descriptor)
2206	 * 	then avoid re-submitting the most recently received RX descriptor
2207	 *	back to hardware.
2208	 * if(Last written back descriptor != EOP bit set descriptor)
2209	 *	then avoid re-submitting the most recently received RX descriptors
2210	 * 	till last EOP bit set descriptor.
2211	 */
2212	if (eop_desc != i) {
2213		if (++eop_desc == adapter->num_rx_desc)
2214			eop_desc = 0;
2215		i = eop_desc;
2216	}
2217	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2218	while (next_to_use != i) {
2219		current_desc = &adapter->rx_desc_base[next_to_use];
2220		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2221			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2222					     IXGB_RX_DESC_ERRORS_RXE))) {
2223			mp = adapter->rx_buffer_area[next_to_use].m_head;
2224			ixgb_get_buf(next_to_use, adapter, mp);
2225		} else {
2226			if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2227				break;
2228		}
2229		/* Advance our pointers to the next descriptor */
2230		if (++next_to_use == adapter->num_rx_desc) {
2231			next_to_use = 0;
2232			current_desc = adapter->rx_desc_base;
2233		} else
2234			current_desc++;
2235	}
2236	adapter->next_rx_desc_to_use = next_to_use;
2237	if (--next_to_use < 0)
2238		next_to_use = (adapter->num_rx_desc - 1);
2239	/* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
2240	IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2241
2242	return (rx_npkts);
2243}
2244
2245/*********************************************************************
2246 *
2247 *  Verify that the hardware indicated that the checksum is valid.
2248 *  Inform the stack about the status of checksum so that stack
2249 *  doesn't spend time verifying the checksum.
2250 *
2251 *********************************************************************/
2252static void
2253ixgb_receive_checksum(struct adapter * adapter,
2254		      struct ixgb_rx_desc * rx_desc,
2255		      struct mbuf * mp)
2256{
2257	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2258		mp->m_pkthdr.csum_flags = 0;
2259		return;
2260	}
2261	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2262		/* Did it pass? */
2263		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2264			/* IP Checksum Good */
2265			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2266			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2267
2268		} else {
2269			mp->m_pkthdr.csum_flags = 0;
2270		}
2271	}
2272	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2273		/* Did it pass? */
2274		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2275			mp->m_pkthdr.csum_flags |=
2276				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2277			mp->m_pkthdr.csum_data = htons(0xffff);
2278		}
2279	}
2280	return;
2281}
2282
2283
2284static void
2285ixgb_enable_vlans(struct adapter * adapter)
2286{
2287	uint32_t        ctrl;
2288
2289	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2290	ctrl |= IXGB_CTRL0_VME;
2291	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2292
2293	return;
2294}
2295
2296
2297static void
2298ixgb_enable_intr(struct adapter * adapter)
2299{
2300	IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2301			    IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2302	return;
2303}
2304
2305static void
2306ixgb_disable_intr(struct adapter * adapter)
2307{
2308	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2309	return;
2310}
2311
2312void
2313ixgb_write_pci_cfg(struct ixgb_hw * hw,
2314		   uint32_t reg,
2315		   uint16_t * value)
2316{
2317	pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2318			 *value, 2);
2319}
2320
2321/**********************************************************************
2322 *
2323 *  Update the board statistics counters.
2324 *
2325 **********************************************************************/
2326static void
2327ixgb_update_stats_counters(struct adapter * adapter)
2328{
2329	struct ifnet   *ifp;
2330
2331	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2332	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2333	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2334	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2335	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2336	adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2337	adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2338	adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2339	adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2340	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2341
2342	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2343	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2344	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2345	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2346	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2347	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2348	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2349	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2350	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2351	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2352	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2353	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2354	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2355	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2356	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2357	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2358	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2359	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2360	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2361	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2362	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2363	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2364	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2365	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2366	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2367	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2368	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2369
2370	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2371	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2372	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2373	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2374	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2375	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2376	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2377	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2378	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2379	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2380	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2381	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2382	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2383	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2384	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2385	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2386	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2387	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2388	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2389	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2390	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2391	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2392
2393	ifp = adapter->ifp;
2394
2395	/* Fill out the OS statistics structure */
2396	ifp->if_ipackets = adapter->stats.gprcl;
2397	ifp->if_opackets = adapter->stats.gptcl;
2398	ifp->if_ibytes = adapter->stats.gorcl;
2399	ifp->if_obytes = adapter->stats.gotcl;
2400	ifp->if_imcasts = adapter->stats.mprcl;
2401	ifp->if_collisions = 0;
2402
2403	/* Rx Errors */
2404	ifp->if_ierrors =
2405		adapter->dropped_pkts +
2406		adapter->stats.crcerrs +
2407		adapter->stats.rnbc +
2408		adapter->stats.mpc +
2409		adapter->stats.rlec;
2410
2411
2412}
2413
2414
2415/**********************************************************************
2416 *
2417 *  This routine is called only when ixgb_display_debug_stats is enabled.
2418 *  This routine provides a way to take a look at important statistics
2419 *  maintained by the driver and hardware.
2420 *
2421 **********************************************************************/
2422static void
2423ixgb_print_hw_stats(struct adapter * adapter)
2424{
2425	char            buf_speed[100], buf_type[100];
2426	ixgb_bus_speed  bus_speed;
2427	ixgb_bus_type   bus_type;
2428	device_t dev;
2429
2430	dev = adapter->dev;
2431#ifdef _SV_
2432	device_printf(dev, "Packets not Avail = %ld\n",
2433	       adapter->no_pkts_avail);
2434	device_printf(dev, "CleanTxInterrupts = %ld\n",
2435	       adapter->clean_tx_interrupts);
2436	device_printf(dev, "ICR RXDMT0 = %lld\n",
2437	       (long long)adapter->sv_stats.icr_rxdmt0);
2438	device_printf(dev, "ICR RXO = %lld\n",
2439	       (long long)adapter->sv_stats.icr_rxo);
2440	device_printf(dev, "ICR RXT0 = %lld\n",
2441	       (long long)adapter->sv_stats.icr_rxt0);
2442	device_printf(dev, "ICR TXDW = %lld\n",
2443	       (long long)adapter->sv_stats.icr_TXDW);
2444#endif				/* _SV_ */
2445
2446	bus_speed = adapter->hw.bus.speed;
2447	bus_type = adapter->hw.bus.type;
2448	sprintf(buf_speed,
2449		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2450		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2451		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2452		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2453		"UNKNOWN");
2454	device_printf(dev, "PCI_Bus_Speed = %s\n",
2455	       buf_speed);
2456
2457	sprintf(buf_type,
2458		bus_type == ixgb_bus_type_pci ? "PCI" :
2459		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2460		"UNKNOWN");
2461	device_printf(dev, "PCI_Bus_Type = %s\n",
2462	       buf_type);
2463
2464	device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2465	       adapter->no_tx_desc_avail1);
2466	device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2467	       adapter->no_tx_desc_avail2);
2468	device_printf(dev, "Std Mbuf Failed = %ld\n",
2469	       adapter->mbuf_alloc_failed);
2470	device_printf(dev, "Std Cluster Failed = %ld\n",
2471	       adapter->mbuf_cluster_failed);
2472
2473	device_printf(dev, "Defer count = %lld\n",
2474	       (long long)adapter->stats.dc);
2475	device_printf(dev, "Missed Packets = %lld\n",
2476	       (long long)adapter->stats.mpc);
2477	device_printf(dev, "Receive No Buffers = %lld\n",
2478	       (long long)adapter->stats.rnbc);
2479	device_printf(dev, "Receive length errors = %lld\n",
2480	       (long long)adapter->stats.rlec);
2481	device_printf(dev, "Crc errors = %lld\n",
2482	       (long long)adapter->stats.crcerrs);
2483	device_printf(dev, "Driver dropped packets = %ld\n",
2484	       adapter->dropped_pkts);
2485
2486	device_printf(dev, "XON Rcvd = %lld\n",
2487	       (long long)adapter->stats.xonrxc);
2488	device_printf(dev, "XON Xmtd = %lld\n",
2489	       (long long)adapter->stats.xontxc);
2490	device_printf(dev, "XOFF Rcvd = %lld\n",
2491	       (long long)adapter->stats.xoffrxc);
2492	device_printf(dev, "XOFF Xmtd = %lld\n",
2493	       (long long)adapter->stats.xofftxc);
2494
2495	device_printf(dev, "Good Packets Rcvd = %lld\n",
2496	       (long long)adapter->stats.gprcl);
2497	device_printf(dev, "Good Packets Xmtd = %lld\n",
2498	       (long long)adapter->stats.gptcl);
2499
2500	device_printf(dev, "Jumbo frames recvd = %lld\n",
2501	       (long long)adapter->stats.jprcl);
2502	device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2503	       (long long)adapter->stats.jptcl);
2504
2505	return;
2506
2507}
2508
2509static int
2510ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2511{
2512	int             error;
2513	int             result;
2514	struct adapter *adapter;
2515
2516	result = -1;
2517	error = sysctl_handle_int(oidp, &result, 0, req);
2518
2519	if (error || !req->newptr)
2520		return (error);
2521
2522	if (result == 1) {
2523		adapter = (struct adapter *) arg1;
2524		ixgb_print_hw_stats(adapter);
2525	}
2526	return error;
2527}
2528