if_ixgb.c revision 192147
1275970Scy/*******************************************************************************
2275970Scy
3275970ScyCopyright (c) 2001-2004, Intel Corporation
4275970ScyAll rights reserved.
5275970Scy
6275970ScyRedistribution and use in source and binary forms, with or without
7275970Scymodification, are permitted provided that the following conditions are met:
8275970Scy
9275970Scy 1. Redistributions of source code must retain the above copyright notice,
10275970Scy    this list of conditions and the following disclaimer.
11275970Scy
12275970Scy 2. Redistributions in binary form must reproduce the above copyright
13275970Scy    notice, this list of conditions and the following disclaimer in the
14275970Scy    documentation and/or other materials provided with the distribution.
15275970Scy
16275970Scy 3. Neither the name of the Intel Corporation nor the names of its
17275970Scy    contributors may be used to endorse or promote products derived from
18275970Scy    this software without specific prior written permission.
19275970Scy
20275970ScyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21275970ScyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22275970ScyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23275970ScyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24275970ScyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25275970ScyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26275970ScySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27275970ScyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28275970ScyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29275970ScyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30275970ScyPOSSIBILITY OF SUCH DAMAGE.
31275970Scy
32275970Scy***************************************************************************/
33275970Scy
34275970Scy/*$FreeBSD: head/sys/dev/ixgb/if_ixgb.c 192147 2009-05-15 17:02:11Z imp $*/
35275970Scy
36275970Scy#ifdef HAVE_KERNEL_OPTION_HEADERS
37275970Scy#include "opt_device_polling.h"
38275970Scy#endif
39275970Scy
40275970Scy#include <dev/ixgb/if_ixgb.h>
41275970Scy
42275970Scy/*********************************************************************
43275970Scy *  Set this to one to display debug statistics
44275970Scy *********************************************************************/
45275970Scyint             ixgb_display_debug_stats = 0;
46294569Sdelphij
47275970Scy/*********************************************************************
48275970Scy *  Linked list of board private structures for all NICs found
49275970Scy *********************************************************************/
50275970Scy
51275970Scystruct adapter *ixgb_adapter_list = NULL;
52275970Scy
53275970Scy
54294569Sdelphij
55294569Sdelphij/*********************************************************************
56294569Sdelphij *  Driver version
57330567Sgordon *********************************************************************/
58330567Sgordon
59330567Sgordonchar            ixgb_driver_version[] = "1.0.6";
60294569Sdelphijchar            ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61330567Sgordon
62330567Sgordon/*********************************************************************
63330567Sgordon *  PCI Device ID Table
64330567Sgordon *
65330567Sgordon *  Used by probe to select devices to load on
66330567Sgordon *  Last field stores an index into ixgb_strings
67294569Sdelphij *  Last entry must be all 0s
68294569Sdelphij *
69294569Sdelphij *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70330567Sgordon *********************************************************************/
71294569Sdelphij
72275970Scystatic ixgb_vendor_info_t ixgb_vendor_info_array[] =
73275970Scy{
74275970Scy	/* Intel(R) PRO/10000 Network Connection */
75275970Scy	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76275970Scy	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77275970Scy	/* required last entry */
78275970Scy	{0, 0, 0, 0, 0}
79275970Scy};
80275970Scy
81275970Scy/*********************************************************************
82275970Scy *  Table of branding strings for all supported NICs.
83275970Scy *********************************************************************/
84275970Scy
85275970Scystatic char    *ixgb_strings[] = {
86275970Scy	"Intel(R) PRO/10GbE Network Driver"
87275970Scy};
88275970Scy
89275970Scy/*********************************************************************
90275970Scy *  Function prototypes
91275970Scy *********************************************************************/
92275970Scystatic int      ixgb_probe(device_t);
93275970Scystatic int      ixgb_attach(device_t);
94275970Scystatic int      ixgb_detach(device_t);
95275970Scystatic int      ixgb_shutdown(device_t);
96275970Scystatic void     ixgb_intr(void *);
97275970Scystatic void     ixgb_start(struct ifnet *);
98275970Scystatic void     ixgb_start_locked(struct ifnet *);
99275970Scystatic int      ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100275970Scystatic void     ixgb_watchdog(struct ifnet *);
101275970Scystatic void     ixgb_init(void *);
102275970Scystatic void     ixgb_init_locked(struct adapter *);
103275970Scystatic void     ixgb_stop(void *);
104275970Scystatic void     ixgb_media_status(struct ifnet *, struct ifmediareq *);
105275970Scystatic int      ixgb_media_change(struct ifnet *);
106275970Scystatic void     ixgb_identify_hardware(struct adapter *);
107275970Scystatic int      ixgb_allocate_pci_resources(struct adapter *);
108275970Scystatic void     ixgb_free_pci_resources(struct adapter *);
109275970Scystatic void     ixgb_local_timer(void *);
110275970Scystatic int      ixgb_hardware_init(struct adapter *);
111275970Scystatic void     ixgb_setup_interface(device_t, struct adapter *);
112275970Scystatic int      ixgb_setup_transmit_structures(struct adapter *);
113275970Scystatic void     ixgb_initialize_transmit_unit(struct adapter *);
114275970Scystatic int      ixgb_setup_receive_structures(struct adapter *);
115275970Scystatic void     ixgb_initialize_receive_unit(struct adapter *);
116275970Scystatic void     ixgb_enable_intr(struct adapter *);
117275970Scystatic void     ixgb_disable_intr(struct adapter *);
118275970Scystatic void     ixgb_free_transmit_structures(struct adapter *);
119275970Scystatic void     ixgb_free_receive_structures(struct adapter *);
120275970Scystatic void     ixgb_update_stats_counters(struct adapter *);
121275970Scystatic void     ixgb_clean_transmit_interrupts(struct adapter *);
122275970Scystatic int      ixgb_allocate_receive_structures(struct adapter *);
123275970Scystatic int      ixgb_allocate_transmit_structures(struct adapter *);
124275970Scystatic void     ixgb_process_receive_interrupts(struct adapter *, int);
125275970Scystatic void
126275970Scyixgb_receive_checksum(struct adapter *,
127275970Scy		      struct ixgb_rx_desc * rx_desc,
128275970Scy		      struct mbuf *);
129275970Scystatic void
130275970Scyixgb_transmit_checksum_setup(struct adapter *,
131275970Scy			     struct mbuf *,
132275970Scy			     u_int8_t *);
133275970Scystatic void     ixgb_set_promisc(struct adapter *);
134275970Scystatic void     ixgb_disable_promisc(struct adapter *);
135275970Scystatic void     ixgb_set_multi(struct adapter *);
136275970Scystatic void     ixgb_print_hw_stats(struct adapter *);
137275970Scystatic void     ixgb_print_link_status(struct adapter *);
138275970Scystatic int
139275970Scyixgb_get_buf(int i, struct adapter *,
140275970Scy	     struct mbuf *);
141275970Scystatic void     ixgb_enable_vlans(struct adapter * adapter);
142275970Scystatic int      ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143275970Scystatic int      ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
144275970Scystatic int
145275970Scyixgb_dma_malloc(struct adapter *, bus_size_t,
146275970Scy		struct ixgb_dma_alloc *, int);
147275970Scystatic void     ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148275970Scy#ifdef DEVICE_POLLING
149275970Scystatic poll_handler_t ixgb_poll;
150275970Scy#endif
151275970Scy
152275970Scy/*********************************************************************
153275970Scy *  FreeBSD Device Interface Entry Points
154275970Scy *********************************************************************/
155275970Scy
156275970Scystatic device_method_t ixgb_methods[] = {
157275970Scy	/* Device interface */
158275970Scy	DEVMETHOD(device_probe, ixgb_probe),
159275970Scy	DEVMETHOD(device_attach, ixgb_attach),
160275970Scy	DEVMETHOD(device_detach, ixgb_detach),
161275970Scy	DEVMETHOD(device_shutdown, ixgb_shutdown),
162275970Scy	{0, 0}
163275970Scy};
164275970Scy
165275970Scystatic driver_t ixgb_driver = {
166275970Scy	"ixgb", ixgb_methods, sizeof(struct adapter),
167275970Scy};
168275970Scy
169static devclass_t ixgb_devclass;
170DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
171
172MODULE_DEPEND(ixgb, pci, 1, 1, 1);
173MODULE_DEPEND(ixgb, ether, 1, 1, 1);
174
175/* some defines for controlling descriptor fetches in h/w */
176#define RXDCTL_PTHRESH_DEFAULT 128	/* chip considers prefech below this */
177#define RXDCTL_HTHRESH_DEFAULT 16	/* chip will only prefetch if tail is
178					 * pushed this many descriptors from
179					 * head */
180#define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
181
182
183/*********************************************************************
184 *  Device identification routine
185 *
186 *  ixgb_probe determines if the driver should be loaded on
187 *  adapter based on PCI vendor/device id of the adapter.
188 *
189 *  return 0 on success, positive on failure
190 *********************************************************************/
191
192static int
193ixgb_probe(device_t dev)
194{
195	ixgb_vendor_info_t *ent;
196
197	u_int16_t       pci_vendor_id = 0;
198	u_int16_t       pci_device_id = 0;
199	u_int16_t       pci_subvendor_id = 0;
200	u_int16_t       pci_subdevice_id = 0;
201	char            adapter_name[60];
202
203	INIT_DEBUGOUT("ixgb_probe: begin");
204
205	pci_vendor_id = pci_get_vendor(dev);
206	if (pci_vendor_id != IXGB_VENDOR_ID)
207		return (ENXIO);
208
209	pci_device_id = pci_get_device(dev);
210	pci_subvendor_id = pci_get_subvendor(dev);
211	pci_subdevice_id = pci_get_subdevice(dev);
212
213	ent = ixgb_vendor_info_array;
214	while (ent->vendor_id != 0) {
215		if ((pci_vendor_id == ent->vendor_id) &&
216		    (pci_device_id == ent->device_id) &&
217
218		    ((pci_subvendor_id == ent->subvendor_id) ||
219		     (ent->subvendor_id == PCI_ANY_ID)) &&
220
221		    ((pci_subdevice_id == ent->subdevice_id) ||
222		     (ent->subdevice_id == PCI_ANY_ID))) {
223			sprintf(adapter_name, "%s, Version - %s",
224				ixgb_strings[ent->index],
225				ixgb_driver_version);
226			device_set_desc_copy(dev, adapter_name);
227			return (BUS_PROBE_DEFAULT);
228		}
229		ent++;
230	}
231
232	return (ENXIO);
233}
234
235/*********************************************************************
236 *  Device initialization routine
237 *
238 *  The attach entry point is called when the driver is being loaded.
239 *  This routine identifies the type of hardware, allocates all resources
240 *  and initializes the hardware.
241 *
242 *  return 0 on success, positive on failure
243 *********************************************************************/
244
245static int
246ixgb_attach(device_t dev)
247{
248	struct adapter *adapter;
249	int             tsize, rsize;
250	int             error = 0;
251
252	printf("ixgb%d: %s\n", device_get_unit(dev), ixgb_copyright);
253	INIT_DEBUGOUT("ixgb_attach: begin");
254
255	/* Allocate, clear, and link in our adapter structure */
256	if (!(adapter = device_get_softc(dev))) {
257		printf("ixgb: adapter structure allocation failed\n");
258		return (ENOMEM);
259	}
260	bzero(adapter, sizeof(struct adapter));
261	adapter->dev = dev;
262	adapter->osdep.dev = dev;
263	adapter->unit = device_get_unit(dev);
264	IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
265
266	if (ixgb_adapter_list != NULL)
267		ixgb_adapter_list->prev = adapter;
268	adapter->next = ixgb_adapter_list;
269	ixgb_adapter_list = adapter;
270
271	/* SYSCTL APIs */
272	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
273			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
274			OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
275			(void *)adapter, 0,
276			ixgb_sysctl_stats, "I", "Statistics");
277
278	callout_init(&adapter->timer, CALLOUT_MPSAFE);
279
280	/* Determine hardware revision */
281	ixgb_identify_hardware(adapter);
282
283	/* Parameters (to be read from user) */
284	adapter->num_tx_desc = IXGB_MAX_TXD;
285	adapter->num_rx_desc = IXGB_MAX_RXD;
286	adapter->tx_int_delay = TIDV;
287	adapter->rx_int_delay = RDTR;
288	adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
289
290	adapter->hw.fc.high_water = FCRTH;
291	adapter->hw.fc.low_water = FCRTL;
292	adapter->hw.fc.pause_time = FCPAUSE;
293	adapter->hw.fc.send_xon = TRUE;
294	adapter->hw.fc.type = FLOW_CONTROL;
295
296
297	/* Set the max frame size assuming standard ethernet sized frames */
298	adapter->hw.max_frame_size =
299		ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
300
301	if (ixgb_allocate_pci_resources(adapter)) {
302		printf("ixgb%d: Allocation of PCI resources failed\n",
303		       adapter->unit);
304		error = ENXIO;
305		goto err_pci;
306	}
307	tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
308			     sizeof(struct ixgb_tx_desc), 4096);
309
310	/* Allocate Transmit Descriptor ring */
311	if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
312		printf("ixgb%d: Unable to allocate TxDescriptor memory\n",
313		       adapter->unit);
314		error = ENOMEM;
315		goto err_tx_desc;
316	}
317	adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
318
319	rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
320			     sizeof(struct ixgb_rx_desc), 4096);
321
322	/* Allocate Receive Descriptor ring */
323	if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
324		printf("ixgb%d: Unable to allocate rx_desc memory\n",
325		       adapter->unit);
326		error = ENOMEM;
327		goto err_rx_desc;
328	}
329	adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
330
331	/* Initialize the hardware */
332	if (ixgb_hardware_init(adapter)) {
333		printf("ixgb%d: Unable to initialize the hardware\n",
334		       adapter->unit);
335		error = EIO;
336		goto err_hw_init;
337	}
338	/* Setup OS specific network interface */
339	ixgb_setup_interface(dev, adapter);
340
341	/* Initialize statistics */
342	ixgb_clear_hw_cntrs(&adapter->hw);
343	ixgb_update_stats_counters(adapter);
344
345	INIT_DEBUGOUT("ixgb_attach: end");
346	return (0);
347
348err_hw_init:
349	ixgb_dma_free(adapter, &adapter->rxdma);
350err_rx_desc:
351	ixgb_dma_free(adapter, &adapter->txdma);
352err_tx_desc:
353err_pci:
354	ixgb_free_pci_resources(adapter);
355	sysctl_ctx_free(&adapter->sysctl_ctx);
356	return (error);
357
358}
359
360/*********************************************************************
361 *  Device removal routine
362 *
363 *  The detach entry point is called when the driver is being removed.
364 *  This routine stops the adapter and deallocates all the resources
365 *  that were allocated for driver operation.
366 *
367 *  return 0 on success, positive on failure
368 *********************************************************************/
369
370static int
371ixgb_detach(device_t dev)
372{
373	struct adapter *adapter = device_get_softc(dev);
374	struct ifnet   *ifp = adapter->ifp;
375
376	INIT_DEBUGOUT("ixgb_detach: begin");
377
378#ifdef DEVICE_POLLING
379	if (ifp->if_capenable & IFCAP_POLLING)
380		ether_poll_deregister(ifp);
381#endif
382
383	IXGB_LOCK(adapter);
384	adapter->in_detach = 1;
385
386	ixgb_stop(adapter);
387	IXGB_UNLOCK(adapter);
388
389#if __FreeBSD_version < 500000
390	ether_ifdetach(adapter->ifp, ETHER_BPF_SUPPORTED);
391#else
392	ether_ifdetach(adapter->ifp);
393#endif
394	ixgb_free_pci_resources(adapter);
395#if __FreeBSD_version >= 500000
396	if_free(adapter->ifp);
397#endif
398
399	/* Free Transmit Descriptor ring */
400	if (adapter->tx_desc_base) {
401		ixgb_dma_free(adapter, &adapter->txdma);
402		adapter->tx_desc_base = NULL;
403	}
404	/* Free Receive Descriptor ring */
405	if (adapter->rx_desc_base) {
406		ixgb_dma_free(adapter, &adapter->rxdma);
407		adapter->rx_desc_base = NULL;
408	}
409	/* Remove from the adapter list */
410	if (ixgb_adapter_list == adapter)
411		ixgb_adapter_list = adapter->next;
412	if (adapter->next != NULL)
413		adapter->next->prev = adapter->prev;
414	if (adapter->prev != NULL)
415		adapter->prev->next = adapter->next;
416
417	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
418	ifp->if_timer = 0;
419
420	IXGB_LOCK_DESTROY(adapter);
421	return (0);
422}
423
424/*********************************************************************
425 *
426 *  Shutdown entry point
427 *
428 **********************************************************************/
429
430static int
431ixgb_shutdown(device_t dev)
432{
433	struct adapter *adapter = device_get_softc(dev);
434	IXGB_LOCK(adapter);
435	ixgb_stop(adapter);
436	IXGB_UNLOCK(adapter);
437	return (0);
438}
439
440
441/*********************************************************************
442 *  Transmit entry point
443 *
444 *  ixgb_start is called by the stack to initiate a transmit.
445 *  The driver will remain in this routine as long as there are
446 *  packets to transmit and transmit resources are available.
447 *  In case resources are not available stack is notified and
448 *  the packet is requeued.
449 **********************************************************************/
450
451static void
452ixgb_start_locked(struct ifnet * ifp)
453{
454	struct mbuf    *m_head;
455	struct adapter *adapter = ifp->if_softc;
456
457	IXGB_LOCK_ASSERT(adapter);
458
459	if (!adapter->link_active)
460		return;
461
462	while (ifp->if_snd.ifq_head != NULL) {
463		IF_DEQUEUE(&ifp->if_snd, m_head);
464
465		if (m_head == NULL)
466			break;
467
468		if (ixgb_encap(adapter, m_head)) {
469			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
470			IF_PREPEND(&ifp->if_snd, m_head);
471			break;
472		}
473		/* Send a copy of the frame to the BPF listener */
474#if __FreeBSD_version < 500000
475		if (ifp->if_bpf)
476			bpf_mtap(ifp, m_head);
477#else
478		ETHER_BPF_MTAP(ifp, m_head);
479#endif
480		/* Set timeout in case hardware has problems transmitting */
481		ifp->if_timer = IXGB_TX_TIMEOUT;
482
483	}
484	return;
485}
486
487static void
488ixgb_start(struct ifnet *ifp)
489{
490	struct adapter *adapter = ifp->if_softc;
491
492	IXGB_LOCK(adapter);
493	ixgb_start_locked(ifp);
494	IXGB_UNLOCK(adapter);
495	return;
496}
497
498/*********************************************************************
499 *  Ioctl entry point
500 *
501 *  ixgb_ioctl is called when the user wants to configure the
502 *  interface.
503 *
504 *  return 0 on success, positive on failure
505 **********************************************************************/
506
507static int
508ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
509{
510	int             mask, error = 0;
511	struct ifreq   *ifr = (struct ifreq *) data;
512	struct adapter *adapter = ifp->if_softc;
513
514	if (adapter->in_detach)
515		goto out;
516
517	switch (command) {
518	case SIOCSIFADDR:
519	case SIOCGIFADDR:
520		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
521		ether_ioctl(ifp, command, data);
522		break;
523	case SIOCSIFMTU:
524		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
525		if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
526			error = EINVAL;
527		} else {
528			IXGB_LOCK(adapter);
529			ifp->if_mtu = ifr->ifr_mtu;
530			adapter->hw.max_frame_size =
531				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
532
533			ixgb_init_locked(adapter);
534			IXGB_UNLOCK(adapter);
535		}
536		break;
537	case SIOCSIFFLAGS:
538		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
539		IXGB_LOCK(adapter);
540		if (ifp->if_flags & IFF_UP) {
541			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
542				ixgb_init_locked(adapter);
543			}
544			ixgb_disable_promisc(adapter);
545			ixgb_set_promisc(adapter);
546		} else {
547			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
548				ixgb_stop(adapter);
549			}
550		}
551		IXGB_UNLOCK(adapter);
552		break;
553	case SIOCADDMULTI:
554	case SIOCDELMULTI:
555		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
556		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
557			IXGB_LOCK(adapter);
558			ixgb_disable_intr(adapter);
559			ixgb_set_multi(adapter);
560			ixgb_enable_intr(adapter);
561			IXGB_UNLOCK(adapter);
562		}
563		break;
564	case SIOCSIFMEDIA:
565	case SIOCGIFMEDIA:
566		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
567		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
568		break;
569	case SIOCSIFCAP:
570		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
571		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
572#ifdef DEVICE_POLLING
573		if (mask & IFCAP_POLLING) {
574			if (ifr->ifr_reqcap & IFCAP_POLLING) {
575				error = ether_poll_register(ixgb_poll, ifp);
576				if (error)
577					return(error);
578				IXGB_LOCK(adapter);
579				ixgb_disable_intr(adapter);
580				ifp->if_capenable |= IFCAP_POLLING;
581				IXGB_UNLOCK(adapter);
582			} else {
583				error = ether_poll_deregister(ifp);
584				/* Enable interrupt even in error case */
585				IXGB_LOCK(adapter);
586				ixgb_enable_intr(adapter);
587				ifp->if_capenable &= ~IFCAP_POLLING;
588				IXGB_UNLOCK(adapter);
589			}
590		}
591#endif /* DEVICE_POLLING */
592		if (mask & IFCAP_HWCSUM) {
593			if (IFCAP_HWCSUM & ifp->if_capenable)
594				ifp->if_capenable &= ~IFCAP_HWCSUM;
595			else
596				ifp->if_capenable |= IFCAP_HWCSUM;
597			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
598				ixgb_init(adapter);
599		}
600		break;
601	default:
602		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
603		error = EINVAL;
604	}
605
606out:
607	return (error);
608}
609
610/*********************************************************************
611 *  Watchdog entry point
612 *
613 *  This routine is called whenever hardware quits transmitting.
614 *
615 **********************************************************************/
616
617static void
618ixgb_watchdog(struct ifnet * ifp)
619{
620	struct adapter *adapter;
621	adapter = ifp->if_softc;
622
623	/*
624	 * If we are in this routine because of pause frames, then don't
625	 * reset the hardware.
626	 */
627	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
628		ifp->if_timer = IXGB_TX_TIMEOUT;
629		return;
630	}
631	printf("ixgb%d: watchdog timeout -- resetting\n", adapter->unit);
632
633	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
634
635
636	ixgb_stop(adapter);
637	ixgb_init(adapter);
638
639
640	ifp->if_oerrors++;
641
642	return;
643}
644
645/*********************************************************************
646 *  Init entry point
647 *
648 *  This routine is used in two ways. It is used by the stack as
649 *  init entry point in network interface structure. It is also used
650 *  by the driver as a hw/sw initialization routine to get to a
651 *  consistent state.
652 *
653 *  return 0 on success, positive on failure
654 **********************************************************************/
655
656static void
657ixgb_init_locked(struct adapter *adapter)
658{
659	struct ifnet   *ifp;
660
661	INIT_DEBUGOUT("ixgb_init: begin");
662
663	IXGB_LOCK_ASSERT(adapter);
664
665	ixgb_stop(adapter);
666
667	/* Get the latest mac address, User can use a LAA */
668	bcopy(IF_LLADDR(adapter->ifp), adapter->hw.curr_mac_addr,
669	      IXGB_ETH_LENGTH_OF_ADDRESS);
670
671	/* Initialize the hardware */
672	if (ixgb_hardware_init(adapter)) {
673		printf("ixgb%d: Unable to initialize the hardware\n",
674		       adapter->unit);
675		return;
676	}
677	ixgb_enable_vlans(adapter);
678
679	/* Prepare transmit descriptors and buffers */
680	if (ixgb_setup_transmit_structures(adapter)) {
681		printf("ixgb%d: Could not setup transmit structures\n",
682		       adapter->unit);
683		ixgb_stop(adapter);
684		return;
685	}
686	ixgb_initialize_transmit_unit(adapter);
687
688	/* Setup Multicast table */
689	ixgb_set_multi(adapter);
690
691	/* Prepare receive descriptors and buffers */
692	if (ixgb_setup_receive_structures(adapter)) {
693		printf("ixgb%d: Could not setup receive structures\n",
694		       adapter->unit);
695		ixgb_stop(adapter);
696		return;
697	}
698	ixgb_initialize_receive_unit(adapter);
699
700	/* Don't lose promiscuous settings */
701	ixgb_set_promisc(adapter);
702
703	ifp = adapter->ifp;
704	ifp->if_drv_flags |= IFF_DRV_RUNNING;
705	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
706
707
708	if (ifp->if_capenable & IFCAP_TXCSUM)
709		ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
710	else
711		ifp->if_hwassist = 0;
712
713
714	/* Enable jumbo frames */
715	if (ifp->if_mtu > ETHERMTU) {
716		uint32_t        temp_reg;
717		IXGB_WRITE_REG(&adapter->hw, MFS,
718			       adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
719		temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
720		temp_reg |= IXGB_CTRL0_JFE;
721		IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
722	}
723	callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter);
724	ixgb_clear_hw_cntrs(&adapter->hw);
725#ifdef DEVICE_POLLING
726	/*
727	 * Only disable interrupts if we are polling, make sure they are on
728	 * otherwise.
729	 */
730	if (ifp->if_capenable & IFCAP_POLLING)
731		ixgb_disable_intr(adapter);
732	else
733#endif
734		ixgb_enable_intr(adapter);
735
736	return;
737}
738
739static void
740ixgb_init(void *arg)
741{
742	struct adapter *adapter = arg;
743
744	IXGB_LOCK(adapter);
745	ixgb_init_locked(adapter);
746	IXGB_UNLOCK(adapter);
747	return;
748}
749
750#ifdef DEVICE_POLLING
751static void
752ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
753{
754	struct adapter *adapter = ifp->if_softc;
755	u_int32_t       reg_icr;
756
757	IXGB_LOCK_ASSERT(adapter);
758
759	if (cmd == POLL_AND_CHECK_STATUS) {
760		reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
761		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
762			callout_stop(&adapter->timer);
763			ixgb_check_for_link(&adapter->hw);
764			ixgb_print_link_status(adapter);
765			callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer,
766			    adapter);
767		}
768	}
769	ixgb_process_receive_interrupts(adapter, count);
770	ixgb_clean_transmit_interrupts(adapter);
771
772	if (ifp->if_snd.ifq_head != NULL)
773		ixgb_start_locked(ifp);
774}
775
776static void
777ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
778{
779	struct adapter *adapter = ifp->if_softc;
780
781	IXGB_LOCK(adapter);
782	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
783		ixgb_poll_locked(ifp, cmd, count);
784	IXGB_UNLOCK(adapter);
785}
786#endif /* DEVICE_POLLING */
787
788/*********************************************************************
789 *
790 *  Interrupt Service routine
791 *
792 **********************************************************************/
793
794static void
795ixgb_intr(void *arg)
796{
797	u_int32_t       loop_cnt = IXGB_MAX_INTR;
798	u_int32_t       reg_icr;
799	struct ifnet   *ifp;
800	struct adapter *adapter = arg;
801	boolean_t       rxdmt0 = FALSE;
802
803	IXGB_LOCK(adapter);
804
805	ifp = adapter->ifp;
806
807#ifdef DEVICE_POLLING
808	if (ifp->if_capenable & IFCAP_POLLING) {
809		IXGB_UNLOCK(adapter);
810		return;
811	}
812#endif
813
814	reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
815	if (reg_icr == 0) {
816		IXGB_UNLOCK(adapter);
817		return;
818	}
819
820	if (reg_icr & IXGB_INT_RXDMT0)
821		rxdmt0 = TRUE;
822
823#ifdef _SV_
824	if (reg_icr & IXGB_INT_RXDMT0)
825		adapter->sv_stats.icr_rxdmt0++;
826	if (reg_icr & IXGB_INT_RXO)
827		adapter->sv_stats.icr_rxo++;
828	if (reg_icr & IXGB_INT_RXT0)
829		adapter->sv_stats.icr_rxt0++;
830	if (reg_icr & IXGB_INT_TXDW)
831		adapter->sv_stats.icr_TXDW++;
832#endif				/* _SV_ */
833
834	/* Link status change */
835	if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
836		callout_stop(&adapter->timer);
837		ixgb_check_for_link(&adapter->hw);
838		ixgb_print_link_status(adapter);
839		callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer,
840		    adapter);
841	}
842	while (loop_cnt > 0) {
843		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
844			ixgb_process_receive_interrupts(adapter, -1);
845			ixgb_clean_transmit_interrupts(adapter);
846		}
847		loop_cnt--;
848	}
849
850	if (rxdmt0 && adapter->raidc) {
851		IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
852		IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
853	}
854	if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
855		ixgb_start_locked(ifp);
856
857	IXGB_UNLOCK(adapter);
858	return;
859}
860
861
862/*********************************************************************
863 *
864 *  Media Ioctl callback
865 *
866 *  This routine is called whenever the user queries the status of
867 *  the interface using ifconfig.
868 *
869 **********************************************************************/
870static void
871ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
872{
873	struct adapter *adapter = ifp->if_softc;
874
875	INIT_DEBUGOUT("ixgb_media_status: begin");
876
877	ixgb_check_for_link(&adapter->hw);
878	ixgb_print_link_status(adapter);
879
880	ifmr->ifm_status = IFM_AVALID;
881	ifmr->ifm_active = IFM_ETHER;
882
883	if (!adapter->hw.link_up)
884		return;
885
886	ifmr->ifm_status |= IFM_ACTIVE;
887	ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
888
889	return;
890}
891
892/*********************************************************************
893 *
894 *  Media Ioctl callback
895 *
896 *  This routine is called when the user changes speed/duplex using
897 *  media/mediopt option with ifconfig.
898 *
899 **********************************************************************/
900static int
901ixgb_media_change(struct ifnet * ifp)
902{
903	struct adapter *adapter = ifp->if_softc;
904	struct ifmedia *ifm = &adapter->media;
905
906	INIT_DEBUGOUT("ixgb_media_change: begin");
907
908	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
909		return (EINVAL);
910
911	return (0);
912}
913
914/*********************************************************************
915 *
916 *  This routine maps the mbufs to tx descriptors.
917 *
918 *  return 0 on success, positive on failure
919 **********************************************************************/
920
921static int
922ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
923{
924	u_int8_t        txd_popts;
925	int             i, j, error, nsegs;
926
927#if __FreeBSD_version < 500000
928	struct ifvlan  *ifv = NULL;
929#endif
930	bus_dma_segment_t segs[IXGB_MAX_SCATTER];
931	bus_dmamap_t	map;
932	struct ixgb_buffer *tx_buffer = NULL;
933	struct ixgb_tx_desc *current_tx_desc = NULL;
934	struct ifnet   *ifp = adapter->ifp;
935
936	/*
937	 * Force a cleanup if number of TX descriptors available hits the
938	 * threshold
939	 */
940	if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
941		ixgb_clean_transmit_interrupts(adapter);
942	}
943	if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
944		adapter->no_tx_desc_avail1++;
945		return (ENOBUFS);
946	}
947	/*
948	 * Map the packet for DMA.
949	 */
950	if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
951		adapter->no_tx_map_avail++;
952		return (ENOMEM);
953	}
954	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
955					&nsegs, BUS_DMA_NOWAIT);
956	if (error != 0) {
957		adapter->no_tx_dma_setup++;
958		printf("ixgb%d: ixgb_encap: bus_dmamap_load_mbuf failed; "
959		       "error %u\n", adapter->unit, error);
960		bus_dmamap_destroy(adapter->txtag, map);
961		return (error);
962	}
963	KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
964
965	if (nsegs > adapter->num_tx_desc_avail) {
966		adapter->no_tx_desc_avail2++;
967		bus_dmamap_destroy(adapter->txtag, map);
968		return (ENOBUFS);
969	}
970	if (ifp->if_hwassist > 0) {
971		ixgb_transmit_checksum_setup(adapter, m_head,
972					     &txd_popts);
973	} else
974		txd_popts = 0;
975
976	/* Find out if we are in vlan mode */
977#if __FreeBSD_version < 500000
978	if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
979	    m_head->m_pkthdr.rcvif != NULL &&
980	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
981		ifv = m_head->m_pkthdr.rcvif->if_softc;
982#elseif __FreeBSD_version < 700000
983	mtag = VLAN_OUTPUT_TAG(ifp, m_head);
984#endif
985	i = adapter->next_avail_tx_desc;
986	for (j = 0; j < nsegs; j++) {
987		tx_buffer = &adapter->tx_buffer_area[i];
988		current_tx_desc = &adapter->tx_desc_base[i];
989
990		current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
991		current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
992		current_tx_desc->popts = txd_popts;
993		if (++i == adapter->num_tx_desc)
994			i = 0;
995
996		tx_buffer->m_head = NULL;
997	}
998
999	adapter->num_tx_desc_avail -= nsegs;
1000	adapter->next_avail_tx_desc = i;
1001
1002#if __FreeBSD_version < 500000
1003	if (ifv != NULL) {
1004		/* Set the vlan id */
1005		current_tx_desc->vlan = ifv->ifv_tag;
1006#elseif __FreeBSD_version < 700000
1007	if (mtag != NULL) {
1008		/* Set the vlan id */
1009		current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1010#else
1011	if (m_head->m_flags & M_VLANTAG) {
1012		current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1013#endif
1014
1015		/* Tell hardware to add tag */
1016		current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1017	}
1018	tx_buffer->m_head = m_head;
1019	tx_buffer->map = map;
1020	bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1021
1022	/*
1023	 * Last Descriptor of Packet needs End Of Packet (EOP)
1024	 */
1025	current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1026
1027	/*
1028	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1029	 * that this frame is available to transmit.
1030	 */
1031	IXGB_WRITE_REG(&adapter->hw, TDT, i);
1032
1033	return (0);
1034}
1035
1036static void
1037ixgb_set_promisc(struct adapter * adapter)
1038{
1039
1040	u_int32_t       reg_rctl;
1041	struct ifnet   *ifp = adapter->ifp;
1042
1043	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1044
1045	if (ifp->if_flags & IFF_PROMISC) {
1046		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1047		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1048	} else if (ifp->if_flags & IFF_ALLMULTI) {
1049		reg_rctl |= IXGB_RCTL_MPE;
1050		reg_rctl &= ~IXGB_RCTL_UPE;
1051		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1052	}
1053	return;
1054}
1055
1056static void
1057ixgb_disable_promisc(struct adapter * adapter)
1058{
1059	u_int32_t       reg_rctl;
1060
1061	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1062
1063	reg_rctl &= (~IXGB_RCTL_UPE);
1064	reg_rctl &= (~IXGB_RCTL_MPE);
1065	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1066
1067	return;
1068}
1069
1070
1071/*********************************************************************
1072 *  Multicast Update
1073 *
1074 *  This routine is called whenever multicast address list is updated.
1075 *
1076 **********************************************************************/
1077
1078static void
1079ixgb_set_multi(struct adapter * adapter)
1080{
1081	u_int32_t       reg_rctl = 0;
1082	u_int8_t        mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
1083	struct ifmultiaddr *ifma;
1084	int             mcnt = 0;
1085	struct ifnet   *ifp = adapter->ifp;
1086
1087	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1088
1089	IF_ADDR_LOCK(ifp);
1090#if __FreeBSD_version < 500000
1091	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1092#else
1093	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1094#endif
1095		if (ifma->ifma_addr->sa_family != AF_LINK)
1096			continue;
1097
1098		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1099		      &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1100		mcnt++;
1101	}
1102	IF_ADDR_UNLOCK(ifp);
1103
1104	if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1105		reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1106		reg_rctl |= IXGB_RCTL_MPE;
1107		IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1108	} else
1109		ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1110
1111	return;
1112}
1113
1114
1115/*********************************************************************
1116 *  Timer routine
1117 *
1118 *  This routine checks for link status and updates statistics.
1119 *
1120 **********************************************************************/
1121
1122static void
1123ixgb_local_timer(void *arg)
1124{
1125	struct ifnet   *ifp;
1126	struct adapter *adapter = arg;
1127	ifp = adapter->ifp;
1128
1129	IXGB_LOCK(adapter);
1130
1131	ixgb_check_for_link(&adapter->hw);
1132	ixgb_print_link_status(adapter);
1133	ixgb_update_stats_counters(adapter);
1134	if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1135		ixgb_print_hw_stats(adapter);
1136	}
1137	callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter);
1138
1139	IXGB_UNLOCK(adapter);
1140	return;
1141}
1142
1143static void
1144ixgb_print_link_status(struct adapter * adapter)
1145{
1146	if (adapter->hw.link_up) {
1147		if (!adapter->link_active) {
1148			printf("ixgb%d: Link is up %d Mbps %s \n",
1149			       adapter->unit,
1150			       10000,
1151			       "Full Duplex");
1152			adapter->link_active = 1;
1153		}
1154	} else {
1155		if (adapter->link_active) {
1156			printf("ixgb%d: Link is Down \n", adapter->unit);
1157			adapter->link_active = 0;
1158		}
1159	}
1160
1161	return;
1162}
1163
1164
1165
1166/*********************************************************************
1167 *
1168 *  This routine disables all traffic on the adapter by issuing a
1169 *  global reset on the MAC and deallocates TX/RX buffers.
1170 *
1171 **********************************************************************/
1172
1173static void
1174ixgb_stop(void *arg)
1175{
1176	struct ifnet   *ifp;
1177	struct adapter *adapter = arg;
1178	ifp = adapter->ifp;
1179
1180	IXGB_LOCK_ASSERT(adapter);
1181
1182	INIT_DEBUGOUT("ixgb_stop: begin\n");
1183	ixgb_disable_intr(adapter);
1184	adapter->hw.adapter_stopped = FALSE;
1185	ixgb_adapter_stop(&adapter->hw);
1186	callout_stop(&adapter->timer);
1187	ixgb_free_transmit_structures(adapter);
1188	ixgb_free_receive_structures(adapter);
1189
1190
1191	/* Tell the stack that the interface is no longer active */
1192	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1193
1194	return;
1195}
1196
1197
1198/*********************************************************************
1199 *
1200 *  Determine hardware revision.
1201 *
1202 **********************************************************************/
1203static void
1204ixgb_identify_hardware(struct adapter * adapter)
1205{
1206	device_t        dev = adapter->dev;
1207
1208	/* Make sure our PCI config space has the necessary stuff set */
1209	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1210	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1211	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1212		printf("ixgb%d: Memory Access and/or Bus Master bits were not set!\n",
1213		       adapter->unit);
1214		adapter->hw.pci_cmd_word |=
1215			(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1216		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1217	}
1218	/* Save off the information about this board */
1219	adapter->hw.vendor_id = pci_get_vendor(dev);
1220	adapter->hw.device_id = pci_get_device(dev);
1221	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1222	adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1223	adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1224
1225	/* Set MacType, etc. based on this PCI info */
1226	switch (adapter->hw.device_id) {
1227	case IXGB_DEVICE_ID_82597EX:
1228	case IXGB_DEVICE_ID_82597EX_SR:
1229		adapter->hw.mac_type = ixgb_82597;
1230		break;
1231	default:
1232		INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1233		printf("ixgb%d: unsupported device id 0x%x\n", adapter->unit, adapter->hw.device_id);
1234	}
1235
1236	return;
1237}
1238
1239static int
1240ixgb_allocate_pci_resources(struct adapter * adapter)
1241{
1242	int             rid;
1243	device_t        dev = adapter->dev;
1244
1245	rid = IXGB_MMBA;
1246	adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1247						 &rid, 0, ~0, 1,
1248						 RF_ACTIVE);
1249	if (!(adapter->res_memory)) {
1250		printf("ixgb%d: Unable to allocate bus resource: memory\n",
1251		       adapter->unit);
1252		return (ENXIO);
1253	}
1254	adapter->osdep.mem_bus_space_tag =
1255		rman_get_bustag(adapter->res_memory);
1256	adapter->osdep.mem_bus_space_handle =
1257		rman_get_bushandle(adapter->res_memory);
1258	adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1259
1260	rid = 0x0;
1261	adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1262						    &rid, 0, ~0, 1,
1263						  RF_SHAREABLE | RF_ACTIVE);
1264	if (!(adapter->res_interrupt)) {
1265		printf("ixgb%d: Unable to allocate bus resource: interrupt\n",
1266		       adapter->unit);
1267		return (ENXIO);
1268	}
1269	if (bus_setup_intr(dev, adapter->res_interrupt,
1270			   INTR_TYPE_NET | INTR_MPSAFE,
1271			   NULL, (void (*) (void *))ixgb_intr, adapter,
1272			   &adapter->int_handler_tag)) {
1273		printf("ixgb%d: Error registering interrupt handler!\n",
1274		       adapter->unit);
1275		return (ENXIO);
1276	}
1277	adapter->hw.back = &adapter->osdep;
1278
1279	return (0);
1280}
1281
1282static void
1283ixgb_free_pci_resources(struct adapter * adapter)
1284{
1285	device_t        dev = adapter->dev;
1286
1287	if (adapter->res_interrupt != NULL) {
1288		bus_teardown_intr(dev, adapter->res_interrupt,
1289				  adapter->int_handler_tag);
1290		bus_release_resource(dev, SYS_RES_IRQ, 0,
1291				     adapter->res_interrupt);
1292	}
1293	if (adapter->res_memory != NULL) {
1294		bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1295				     adapter->res_memory);
1296	}
1297	if (adapter->res_ioport != NULL) {
1298		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1299				     adapter->res_ioport);
1300	}
1301	return;
1302}
1303
1304/*********************************************************************
1305 *
1306 *  Initialize the hardware to a configuration as specified by the
1307 *  adapter structure. The controller is reset, the EEPROM is
1308 *  verified, the MAC address is set, then the shared initialization
1309 *  routines are called.
1310 *
1311 **********************************************************************/
1312static int
1313ixgb_hardware_init(struct adapter * adapter)
1314{
1315	/* Issue a global reset */
1316	adapter->hw.adapter_stopped = FALSE;
1317	ixgb_adapter_stop(&adapter->hw);
1318
1319	/* Make sure we have a good EEPROM before we read from it */
1320	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1321		printf("ixgb%d: The EEPROM Checksum Is Not Valid\n",
1322		       adapter->unit);
1323		return (EIO);
1324	}
1325	if (!ixgb_init_hw(&adapter->hw)) {
1326		printf("ixgb%d: Hardware Initialization Failed",
1327		       adapter->unit);
1328		return (EIO);
1329	}
1330
1331	return (0);
1332}
1333
1334/*********************************************************************
1335 *
1336 *  Setup networking device structure and register an interface.
1337 *
1338 **********************************************************************/
1339static void
1340ixgb_setup_interface(device_t dev, struct adapter * adapter)
1341{
1342	struct ifnet   *ifp;
1343	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1344
1345	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1346	if (ifp == NULL)
1347		panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1348#if __FreeBSD_version >= 502000
1349	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1350#else
1351	ifp->if_unit = adapter->unit;
1352	ifp->if_name = "ixgb";
1353#endif
1354	ifp->if_mtu = ETHERMTU;
1355	ifp->if_baudrate = 1000000000;
1356	ifp->if_init = ixgb_init;
1357	ifp->if_softc = adapter;
1358	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1359	ifp->if_ioctl = ixgb_ioctl;
1360	ifp->if_start = ixgb_start;
1361	ifp->if_watchdog = ixgb_watchdog;
1362	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1363
1364#if __FreeBSD_version < 500000
1365	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1366#else
1367	ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1368#endif
1369
1370	ifp->if_capabilities = IFCAP_HWCSUM;
1371
1372	/*
1373	 * Tell the upper layer(s) we support long frames.
1374	 */
1375	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1376
1377#if __FreeBSD_version >= 500000
1378	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1379#endif
1380
1381	ifp->if_capenable = ifp->if_capabilities;
1382
1383#ifdef DEVICE_POLLING
1384	ifp->if_capabilities |= IFCAP_POLLING;
1385#endif
1386
1387	/*
1388	 * Specify the media types supported by this adapter and register
1389	 * callbacks to update media and link information
1390	 */
1391	ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1392		     ixgb_media_status);
1393	ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1394		    0, NULL);
1395	ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1396		    0, NULL);
1397	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1398	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1399
1400	return;
1401}
1402
1403/********************************************************************
1404 * Manage DMA'able memory.
1405 *******************************************************************/
1406static void
1407ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1408{
1409	if (error)
1410		return;
1411	*(bus_addr_t *) arg = segs->ds_addr;
1412	return;
1413}
1414
1415static int
1416ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1417		struct ixgb_dma_alloc * dma, int mapflags)
1418{
1419	int             r;
1420
1421	r = bus_dma_tag_create(NULL,	/* parent */
1422			       PAGE_SIZE, 0,	/* alignment, bounds */
1423			       BUS_SPACE_MAXADDR,	/* lowaddr */
1424			       BUS_SPACE_MAXADDR,	/* highaddr */
1425			       NULL, NULL,	/* filter, filterarg */
1426			       size,	/* maxsize */
1427			       1,	/* nsegments */
1428			       size,	/* maxsegsize */
1429			       BUS_DMA_ALLOCNOW,	/* flags */
1430#if __FreeBSD_version >= 502000
1431			       NULL,	/* lockfunc */
1432			       NULL,	/* lockfuncarg */
1433#endif
1434			       &dma->dma_tag);
1435	if (r != 0) {
1436		printf("ixgb%d: ixgb_dma_malloc: bus_dma_tag_create failed; "
1437		       "error %u\n", adapter->unit, r);
1438		goto fail_0;
1439	}
1440	r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1441			     BUS_DMA_NOWAIT, &dma->dma_map);
1442	if (r != 0) {
1443		printf("ixgb%d: ixgb_dma_malloc: bus_dmamem_alloc failed; "
1444		       "error %u\n", adapter->unit, r);
1445		goto fail_1;
1446	}
1447	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1448			    size,
1449			    ixgb_dmamap_cb,
1450			    &dma->dma_paddr,
1451			    mapflags | BUS_DMA_NOWAIT);
1452	if (r != 0) {
1453		printf("ixgb%d: ixgb_dma_malloc: bus_dmamap_load failed; "
1454		       "error %u\n", adapter->unit, r);
1455		goto fail_2;
1456	}
1457	dma->dma_size = size;
1458	return (0);
1459fail_2:
1460	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1461fail_1:
1462	bus_dma_tag_destroy(dma->dma_tag);
1463fail_0:
1464	dma->dma_map = NULL;
1465	dma->dma_tag = NULL;
1466	return (r);
1467}
1468
1469
1470
1471static void
1472ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1473{
1474	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1475	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1476	bus_dma_tag_destroy(dma->dma_tag);
1477}
1478
1479/*********************************************************************
1480 *
1481 *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1482 *  the information needed to transmit a packet on the wire.
1483 *
1484 **********************************************************************/
1485static int
1486ixgb_allocate_transmit_structures(struct adapter * adapter)
1487{
1488	if (!(adapter->tx_buffer_area =
1489	      (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1490					    adapter->num_tx_desc, M_DEVBUF,
1491					    M_NOWAIT | M_ZERO))) {
1492		printf("ixgb%d: Unable to allocate tx_buffer memory\n",
1493		       adapter->unit);
1494		return ENOMEM;
1495	}
1496	bzero(adapter->tx_buffer_area,
1497	      sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1498
1499	return 0;
1500}
1501
1502/*********************************************************************
1503 *
1504 *  Allocate and initialize transmit structures.
1505 *
1506 **********************************************************************/
1507static int
1508ixgb_setup_transmit_structures(struct adapter * adapter)
1509{
1510	/*
1511	 * Setup DMA descriptor areas.
1512	 */
1513	if (bus_dma_tag_create(NULL,	/* parent */
1514			       PAGE_SIZE, 0,	/* alignment, bounds */
1515			       BUS_SPACE_MAXADDR,	/* lowaddr */
1516			       BUS_SPACE_MAXADDR,	/* highaddr */
1517			       NULL, NULL,	/* filter, filterarg */
1518			       MCLBYTES * IXGB_MAX_SCATTER,	/* maxsize */
1519			       IXGB_MAX_SCATTER,	/* nsegments */
1520			       MCLBYTES,	/* maxsegsize */
1521			       BUS_DMA_ALLOCNOW,	/* flags */
1522#if __FreeBSD_version >= 502000
1523			       NULL,	/* lockfunc */
1524			       NULL,	/* lockfuncarg */
1525#endif
1526			       &adapter->txtag)) {
1527		printf("ixgb%d: Unable to allocate TX DMA tag\n", adapter->unit);
1528		return (ENOMEM);
1529	}
1530	if (ixgb_allocate_transmit_structures(adapter))
1531		return ENOMEM;
1532
1533	bzero((void *)adapter->tx_desc_base,
1534	      (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1535
1536	adapter->next_avail_tx_desc = 0;
1537	adapter->oldest_used_tx_desc = 0;
1538
1539	/* Set number of descriptors available */
1540	adapter->num_tx_desc_avail = adapter->num_tx_desc;
1541
1542	/* Set checksum context */
1543	adapter->active_checksum_context = OFFLOAD_NONE;
1544
1545	return 0;
1546}
1547
1548/*********************************************************************
1549 *
1550 *  Enable transmit unit.
1551 *
1552 **********************************************************************/
1553static void
1554ixgb_initialize_transmit_unit(struct adapter * adapter)
1555{
1556	u_int32_t       reg_tctl;
1557	u_int64_t       tdba = adapter->txdma.dma_paddr;
1558
1559	/* Setup the Base and Length of the Tx Descriptor Ring */
1560	IXGB_WRITE_REG(&adapter->hw, TDBAL,
1561		       (tdba & 0x00000000ffffffffULL));
1562	IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1563	IXGB_WRITE_REG(&adapter->hw, TDLEN,
1564		       adapter->num_tx_desc *
1565		       sizeof(struct ixgb_tx_desc));
1566
1567	/* Setup the HW Tx Head and Tail descriptor pointers */
1568	IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1569	IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1570
1571
1572	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1573		     IXGB_READ_REG(&adapter->hw, TDBAL),
1574		     IXGB_READ_REG(&adapter->hw, TDLEN));
1575
1576	IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1577
1578
1579	/* Program the Transmit Control Register */
1580	reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1581	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1582	IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1583
1584	/* Setup Transmit Descriptor Settings for this adapter */
1585	adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1586
1587	if (adapter->tx_int_delay > 0)
1588		adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1589	return;
1590}
1591
1592/*********************************************************************
1593 *
1594 *  Free all transmit related data structures.
1595 *
1596 **********************************************************************/
1597static void
1598ixgb_free_transmit_structures(struct adapter * adapter)
1599{
1600	struct ixgb_buffer *tx_buffer;
1601	int             i;
1602
1603	INIT_DEBUGOUT("free_transmit_structures: begin");
1604
1605	if (adapter->tx_buffer_area != NULL) {
1606		tx_buffer = adapter->tx_buffer_area;
1607		for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1608			if (tx_buffer->m_head != NULL) {
1609				bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1610				bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1611				m_freem(tx_buffer->m_head);
1612			}
1613			tx_buffer->m_head = NULL;
1614		}
1615	}
1616	if (adapter->tx_buffer_area != NULL) {
1617		free(adapter->tx_buffer_area, M_DEVBUF);
1618		adapter->tx_buffer_area = NULL;
1619	}
1620	if (adapter->txtag != NULL) {
1621		bus_dma_tag_destroy(adapter->txtag);
1622		adapter->txtag = NULL;
1623	}
1624	return;
1625}
1626
1627/*********************************************************************
1628 *
1629 *  The offload context needs to be set when we transfer the first
1630 *  packet of a particular protocol (TCP/UDP). We change the
1631 *  context only if the protocol type changes.
1632 *
1633 **********************************************************************/
1634static void
1635ixgb_transmit_checksum_setup(struct adapter * adapter,
1636			     struct mbuf * mp,
1637			     u_int8_t * txd_popts)
1638{
1639	struct ixgb_context_desc *TXD;
1640	struct ixgb_buffer *tx_buffer;
1641	int             curr_txd;
1642
1643	if (mp->m_pkthdr.csum_flags) {
1644
1645		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1646			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1647			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1648				return;
1649			else
1650				adapter->active_checksum_context = OFFLOAD_TCP_IP;
1651		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1652			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1653			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1654				return;
1655			else
1656				adapter->active_checksum_context = OFFLOAD_UDP_IP;
1657		} else {
1658			*txd_popts = 0;
1659			return;
1660		}
1661	} else {
1662		*txd_popts = 0;
1663		return;
1664	}
1665
1666	/*
1667	 * If we reach this point, the checksum offload context needs to be
1668	 * reset.
1669	 */
1670	curr_txd = adapter->next_avail_tx_desc;
1671	tx_buffer = &adapter->tx_buffer_area[curr_txd];
1672	TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1673
1674
1675	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1676	TXD->tucse = 0;
1677
1678	TXD->mss = 0;
1679
1680	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1681		TXD->tucso =
1682			ENET_HEADER_SIZE + sizeof(struct ip) +
1683			offsetof(struct tcphdr, th_sum);
1684	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1685		TXD->tucso =
1686			ENET_HEADER_SIZE + sizeof(struct ip) +
1687			offsetof(struct udphdr, uh_sum);
1688	}
1689	TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1690
1691	tx_buffer->m_head = NULL;
1692
1693	if (++curr_txd == adapter->num_tx_desc)
1694		curr_txd = 0;
1695
1696	adapter->num_tx_desc_avail--;
1697	adapter->next_avail_tx_desc = curr_txd;
1698	return;
1699}
1700
1701/**********************************************************************
1702 *
1703 *  Examine each tx_buffer in the used queue. If the hardware is done
1704 *  processing the packet then free associated resources. The
1705 *  tx_buffer is put back on the free queue.
1706 *
1707 **********************************************************************/
1708static void
1709ixgb_clean_transmit_interrupts(struct adapter * adapter)
1710{
1711	int             i, num_avail;
1712	struct ixgb_buffer *tx_buffer;
1713	struct ixgb_tx_desc *tx_desc;
1714
1715	IXGB_LOCK_ASSERT(adapter);
1716
1717	if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1718		return;
1719
1720#ifdef _SV_
1721	adapter->clean_tx_interrupts++;
1722#endif
1723	num_avail = adapter->num_tx_desc_avail;
1724	i = adapter->oldest_used_tx_desc;
1725
1726	tx_buffer = &adapter->tx_buffer_area[i];
1727	tx_desc = &adapter->tx_desc_base[i];
1728
1729	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1730
1731		tx_desc->status = 0;
1732		num_avail++;
1733
1734		if (tx_buffer->m_head) {
1735			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1736					BUS_DMASYNC_POSTWRITE);
1737			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1738			bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1739			m_freem(tx_buffer->m_head);
1740			tx_buffer->m_head = NULL;
1741		}
1742		if (++i == adapter->num_tx_desc)
1743			i = 0;
1744
1745		tx_buffer = &adapter->tx_buffer_area[i];
1746		tx_desc = &adapter->tx_desc_base[i];
1747	}
1748
1749	adapter->oldest_used_tx_desc = i;
1750
1751	/*
1752	 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1753	 * it is OK to send packets. If there are no pending descriptors,
1754	 * clear the timeout. Otherwise, if some descriptors have been freed,
1755	 * restart the timeout.
1756	 */
1757	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1758		struct ifnet   *ifp = adapter->ifp;
1759
1760		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1761		if (num_avail == adapter->num_tx_desc)
1762			ifp->if_timer = 0;
1763		else if (num_avail == adapter->num_tx_desc_avail)
1764			ifp->if_timer = IXGB_TX_TIMEOUT;
1765	}
1766	adapter->num_tx_desc_avail = num_avail;
1767	return;
1768}
1769
1770
1771/*********************************************************************
1772 *
1773 *  Get a buffer from system mbuf buffer pool.
1774 *
1775 **********************************************************************/
1776static int
1777ixgb_get_buf(int i, struct adapter * adapter,
1778	     struct mbuf * nmp)
1779{
1780	register struct mbuf *mp = nmp;
1781	struct ixgb_buffer *rx_buffer;
1782	struct ifnet   *ifp;
1783	bus_addr_t      paddr;
1784	int             error;
1785
1786	ifp = adapter->ifp;
1787
1788	if (mp == NULL) {
1789
1790		mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1791
1792		if (mp == NULL) {
1793			adapter->mbuf_alloc_failed++;
1794			return (ENOBUFS);
1795		}
1796		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1797	} else {
1798		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1799		mp->m_data = mp->m_ext.ext_buf;
1800		mp->m_next = NULL;
1801	}
1802
1803	if (ifp->if_mtu <= ETHERMTU) {
1804		m_adj(mp, ETHER_ALIGN);
1805	}
1806	rx_buffer = &adapter->rx_buffer_area[i];
1807
1808	/*
1809	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1810	 * machinery to arrange the memory mapping.
1811	 */
1812	error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1813				mtod(mp, void *), mp->m_len,
1814				ixgb_dmamap_cb, &paddr, 0);
1815	if (error) {
1816		m_free(mp);
1817		return (error);
1818	}
1819	rx_buffer->m_head = mp;
1820	adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1821	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1822
1823	return (0);
1824}
1825
1826/*********************************************************************
1827 *
1828 *  Allocate memory for rx_buffer structures. Since we use one
1829 *  rx_buffer per received packet, the maximum number of rx_buffer's
1830 *  that we'll need is equal to the number of receive descriptors
1831 *  that we've allocated.
1832 *
1833 **********************************************************************/
1834static int
1835ixgb_allocate_receive_structures(struct adapter * adapter)
1836{
1837	int             i, error;
1838	struct ixgb_buffer *rx_buffer;
1839
1840	if (!(adapter->rx_buffer_area =
1841	      (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1842					    adapter->num_rx_desc, M_DEVBUF,
1843					    M_NOWAIT | M_ZERO))) {
1844		printf("ixgb%d: Unable to allocate rx_buffer memory\n",
1845		       adapter->unit);
1846		return (ENOMEM);
1847	}
1848	bzero(adapter->rx_buffer_area,
1849	      sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1850
1851	error = bus_dma_tag_create(NULL,	/* parent */
1852				   PAGE_SIZE, 0,	/* alignment, bounds */
1853				   BUS_SPACE_MAXADDR,	/* lowaddr */
1854				   BUS_SPACE_MAXADDR,	/* highaddr */
1855				   NULL, NULL,	/* filter, filterarg */
1856				   MCLBYTES,	/* maxsize */
1857				   1,	/* nsegments */
1858				   MCLBYTES,	/* maxsegsize */
1859				   BUS_DMA_ALLOCNOW,	/* flags */
1860#if __FreeBSD_version >= 502000
1861				   NULL,	/* lockfunc */
1862				   NULL,	/* lockfuncarg */
1863#endif
1864				   &adapter->rxtag);
1865	if (error != 0) {
1866		printf("ixgb%d: ixgb_allocate_receive_structures: "
1867		       "bus_dma_tag_create failed; error %u\n",
1868		       adapter->unit, error);
1869		goto fail_0;
1870	}
1871	rx_buffer = adapter->rx_buffer_area;
1872	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1873		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1874					  &rx_buffer->map);
1875		if (error != 0) {
1876			printf("ixgb%d: ixgb_allocate_receive_structures: "
1877			       "bus_dmamap_create failed; error %u\n",
1878			       adapter->unit, error);
1879			goto fail_1;
1880		}
1881	}
1882
1883	for (i = 0; i < adapter->num_rx_desc; i++) {
1884		if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1885			adapter->rx_buffer_area[i].m_head = NULL;
1886			adapter->rx_desc_base[i].buff_addr = 0;
1887			return (ENOBUFS);
1888		}
1889	}
1890
1891	return (0);
1892fail_1:
1893	bus_dma_tag_destroy(adapter->rxtag);
1894fail_0:
1895	adapter->rxtag = NULL;
1896	free(adapter->rx_buffer_area, M_DEVBUF);
1897	adapter->rx_buffer_area = NULL;
1898	return (error);
1899}
1900
1901/*********************************************************************
1902 *
1903 *  Allocate and initialize receive structures.
1904 *
1905 **********************************************************************/
1906static int
1907ixgb_setup_receive_structures(struct adapter * adapter)
1908{
1909	bzero((void *)adapter->rx_desc_base,
1910	      (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1911
1912	if (ixgb_allocate_receive_structures(adapter))
1913		return ENOMEM;
1914
1915	/* Setup our descriptor pointers */
1916	adapter->next_rx_desc_to_check = 0;
1917	adapter->next_rx_desc_to_use = 0;
1918	return (0);
1919}
1920
1921/*********************************************************************
1922 *
1923 *  Enable receive unit.
1924 *
1925 **********************************************************************/
1926static void
1927ixgb_initialize_receive_unit(struct adapter * adapter)
1928{
1929	u_int32_t       reg_rctl;
1930	u_int32_t       reg_rxcsum;
1931	u_int32_t       reg_rxdctl;
1932	struct ifnet   *ifp;
1933	u_int64_t       rdba = adapter->rxdma.dma_paddr;
1934
1935	ifp = adapter->ifp;
1936
1937	/*
1938	 * Make sure receives are disabled while setting up the descriptor
1939	 * ring
1940	 */
1941	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1942	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1943
1944	/* Set the Receive Delay Timer Register */
1945	IXGB_WRITE_REG(&adapter->hw, RDTR,
1946		       adapter->rx_int_delay);
1947
1948
1949	/* Setup the Base and Length of the Rx Descriptor Ring */
1950	IXGB_WRITE_REG(&adapter->hw, RDBAL,
1951		       (rdba & 0x00000000ffffffffULL));
1952	IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1953	IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1954		       sizeof(struct ixgb_rx_desc));
1955
1956	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1957	IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1958
1959	IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1960
1961
1962
1963	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1964		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1965		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1966	IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1967
1968
1969	adapter->raidc = 1;
1970	if (adapter->raidc) {
1971		uint32_t        raidc;
1972		uint8_t         poll_threshold;
1973#define IXGB_RAIDC_POLL_DEFAULT 120
1974
1975		poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1976		poll_threshold >>= 1;
1977		poll_threshold &= 0x3F;
1978		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1979			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1980			(adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1981			poll_threshold;
1982		IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1983	}
1984	/* Enable Receive Checksum Offload for TCP and UDP ? */
1985	if (ifp->if_capenable & IFCAP_RXCSUM) {
1986		reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1987		reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1988		IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1989	}
1990	/* Setup the Receive Control Register */
1991	reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1992	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1993	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1994		IXGB_RCTL_CFF |
1995		(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1996
1997	switch (adapter->rx_buffer_len) {
1998	default:
1999	case IXGB_RXBUFFER_2048:
2000		reg_rctl |= IXGB_RCTL_BSIZE_2048;
2001		break;
2002	case IXGB_RXBUFFER_4096:
2003		reg_rctl |= IXGB_RCTL_BSIZE_4096;
2004		break;
2005	case IXGB_RXBUFFER_8192:
2006		reg_rctl |= IXGB_RCTL_BSIZE_8192;
2007		break;
2008	case IXGB_RXBUFFER_16384:
2009		reg_rctl |= IXGB_RCTL_BSIZE_16384;
2010		break;
2011	}
2012
2013	reg_rctl |= IXGB_RCTL_RXEN;
2014
2015
2016	/* Enable Receives */
2017	IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2018
2019	return;
2020}
2021
2022/*********************************************************************
2023 *
2024 *  Free receive related data structures.
2025 *
2026 **********************************************************************/
2027static void
2028ixgb_free_receive_structures(struct adapter * adapter)
2029{
2030	struct ixgb_buffer *rx_buffer;
2031	int             i;
2032
2033	INIT_DEBUGOUT("free_receive_structures: begin");
2034
2035	if (adapter->rx_buffer_area != NULL) {
2036		rx_buffer = adapter->rx_buffer_area;
2037		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2038			if (rx_buffer->map != NULL) {
2039				bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2040				bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2041			}
2042			if (rx_buffer->m_head != NULL)
2043				m_freem(rx_buffer->m_head);
2044			rx_buffer->m_head = NULL;
2045		}
2046	}
2047	if (adapter->rx_buffer_area != NULL) {
2048		free(adapter->rx_buffer_area, M_DEVBUF);
2049		adapter->rx_buffer_area = NULL;
2050	}
2051	if (adapter->rxtag != NULL) {
2052		bus_dma_tag_destroy(adapter->rxtag);
2053		adapter->rxtag = NULL;
2054	}
2055	return;
2056}
2057
2058/*********************************************************************
2059 *
2060 *  This routine executes in interrupt context. It replenishes
2061 *  the mbufs in the descriptor and sends data which has been
2062 *  dma'ed into host memory to upper layer.
2063 *
2064 *  We loop at most count times if count is > 0, or until done if
2065 *  count < 0.
2066 *
2067 *********************************************************************/
2068static void
2069ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2070{
2071	struct ifnet   *ifp;
2072	struct mbuf    *mp;
2073#if __FreeBSD_version < 500000
2074	struct ether_header *eh;
2075#endif
2076	int             eop = 0;
2077	int             len;
2078	u_int8_t        accept_frame = 0;
2079	int             i;
2080	int             next_to_use = 0;
2081	int             eop_desc;
2082	/* Pointer to the receive descriptor being examined. */
2083	struct ixgb_rx_desc *current_desc;
2084
2085	IXGB_LOCK_ASSERT(adapter);
2086
2087	ifp = adapter->ifp;
2088	i = adapter->next_rx_desc_to_check;
2089	next_to_use = adapter->next_rx_desc_to_use;
2090	eop_desc = adapter->next_rx_desc_to_check;
2091	current_desc = &adapter->rx_desc_base[i];
2092
2093	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2094#ifdef _SV_
2095		adapter->no_pkts_avail++;
2096#endif
2097		return;
2098	}
2099	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2100
2101		mp = adapter->rx_buffer_area[i].m_head;
2102		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2103				BUS_DMASYNC_POSTREAD);
2104		accept_frame = 1;
2105		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2106			count--;
2107			eop = 1;
2108		} else {
2109			eop = 0;
2110		}
2111		len = current_desc->length;
2112
2113		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2114			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2115					    IXGB_RX_DESC_ERRORS_RXE)) {
2116			accept_frame = 0;
2117		}
2118		if (accept_frame) {
2119
2120			/* Assign correct length to the current fragment */
2121			mp->m_len = len;
2122
2123			if (adapter->fmp == NULL) {
2124				mp->m_pkthdr.len = len;
2125				adapter->fmp = mp;	/* Store the first mbuf */
2126				adapter->lmp = mp;
2127			} else {
2128				/* Chain mbuf's together */
2129				mp->m_flags &= ~M_PKTHDR;
2130				adapter->lmp->m_next = mp;
2131				adapter->lmp = adapter->lmp->m_next;
2132				adapter->fmp->m_pkthdr.len += len;
2133			}
2134
2135			if (eop) {
2136				eop_desc = i;
2137				adapter->fmp->m_pkthdr.rcvif = ifp;
2138
2139#if __FreeBSD_version < 500000
2140				eh = mtod(adapter->fmp, struct ether_header *);
2141
2142				/* Remove ethernet header from mbuf */
2143				m_adj(adapter->fmp, sizeof(struct ether_header));
2144				ixgb_receive_checksum(adapter, current_desc,
2145						      adapter->fmp);
2146
2147				if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2148					VLAN_INPUT_TAG(eh, adapter->fmp,
2149						     current_desc->special);
2150				else
2151					ether_input(ifp, eh, adapter->fmp);
2152#else
2153				ixgb_receive_checksum(adapter, current_desc,
2154						      adapter->fmp);
2155#if __FreeBSD_version < 700000
2156				if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2157					VLAN_INPUT_TAG(ifp, adapter->fmp,
2158						       current_desc->special);
2159#else
2160				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2161					adapter->fmp->m_pkthdr.ether_vtag =
2162					    current_desc->special;
2163					adapter->fmp->m_flags |= M_VLANTAG;
2164				}
2165#endif
2166
2167				if (adapter->fmp != NULL) {
2168					IXGB_UNLOCK(adapter);
2169					(*ifp->if_input) (ifp, adapter->fmp);
2170					IXGB_LOCK(adapter);
2171				}
2172#endif
2173				adapter->fmp = NULL;
2174				adapter->lmp = NULL;
2175			}
2176			adapter->rx_buffer_area[i].m_head = NULL;
2177		} else {
2178			adapter->dropped_pkts++;
2179			if (adapter->fmp != NULL)
2180				m_freem(adapter->fmp);
2181			adapter->fmp = NULL;
2182			adapter->lmp = NULL;
2183		}
2184
2185		/* Zero out the receive descriptors status  */
2186		current_desc->status = 0;
2187
2188		/* Advance our pointers to the next descriptor */
2189		if (++i == adapter->num_rx_desc) {
2190			i = 0;
2191			current_desc = adapter->rx_desc_base;
2192		} else
2193			current_desc++;
2194	}
2195	adapter->next_rx_desc_to_check = i;
2196
2197	if (--i < 0)
2198		i = (adapter->num_rx_desc - 1);
2199
2200	/*
2201	 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2202 	 * memory corruption). Avoid using and re-submitting the most recently received RX
2203	 * descriptor back to hardware.
2204	 *
2205	 * if(Last written back descriptor == EOP bit set descriptor)
2206	 * 	then avoid re-submitting the most recently received RX descriptor
2207	 *	back to hardware.
2208	 * if(Last written back descriptor != EOP bit set descriptor)
2209	 *	then avoid re-submitting the most recently received RX descriptors
2210	 * 	till last EOP bit set descriptor.
2211	 */
2212	if (eop_desc != i) {
2213		if (++eop_desc == adapter->num_rx_desc)
2214			eop_desc = 0;
2215		i = eop_desc;
2216	}
2217	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2218	while (next_to_use != i) {
2219		current_desc = &adapter->rx_desc_base[next_to_use];
2220		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2221			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2222					     IXGB_RX_DESC_ERRORS_RXE))) {
2223			mp = adapter->rx_buffer_area[next_to_use].m_head;
2224			ixgb_get_buf(next_to_use, adapter, mp);
2225		} else {
2226			if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2227				break;
2228		}
2229		/* Advance our pointers to the next descriptor */
2230		if (++next_to_use == adapter->num_rx_desc) {
2231			next_to_use = 0;
2232			current_desc = adapter->rx_desc_base;
2233		} else
2234			current_desc++;
2235	}
2236	adapter->next_rx_desc_to_use = next_to_use;
2237	if (--next_to_use < 0)
2238		next_to_use = (adapter->num_rx_desc - 1);
2239	/* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
2240	IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2241
2242	return;
2243}
2244
2245/*********************************************************************
2246 *
2247 *  Verify that the hardware indicated that the checksum is valid.
2248 *  Inform the stack about the status of checksum so that stack
2249 *  doesn't spend time verifying the checksum.
2250 *
2251 *********************************************************************/
2252static void
2253ixgb_receive_checksum(struct adapter * adapter,
2254		      struct ixgb_rx_desc * rx_desc,
2255		      struct mbuf * mp)
2256{
2257	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2258		mp->m_pkthdr.csum_flags = 0;
2259		return;
2260	}
2261	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2262		/* Did it pass? */
2263		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2264			/* IP Checksum Good */
2265			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2266			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2267
2268		} else {
2269			mp->m_pkthdr.csum_flags = 0;
2270		}
2271	}
2272	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2273		/* Did it pass? */
2274		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2275			mp->m_pkthdr.csum_flags |=
2276				(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2277			mp->m_pkthdr.csum_data = htons(0xffff);
2278		}
2279	}
2280	return;
2281}
2282
2283
2284static void
2285ixgb_enable_vlans(struct adapter * adapter)
2286{
2287	uint32_t        ctrl;
2288
2289	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2290	ctrl |= IXGB_CTRL0_VME;
2291	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2292
2293	return;
2294}
2295
2296
2297static void
2298ixgb_enable_intr(struct adapter * adapter)
2299{
2300	IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2301			    IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2302	return;
2303}
2304
2305static void
2306ixgb_disable_intr(struct adapter * adapter)
2307{
2308	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2309	return;
2310}
2311
2312void
2313ixgb_write_pci_cfg(struct ixgb_hw * hw,
2314		   uint32_t reg,
2315		   uint16_t * value)
2316{
2317	pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2318			 *value, 2);
2319}
2320
2321/**********************************************************************
2322 *
2323 *  Update the board statistics counters.
2324 *
2325 **********************************************************************/
2326static void
2327ixgb_update_stats_counters(struct adapter * adapter)
2328{
2329	struct ifnet   *ifp;
2330
2331	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2332	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2333	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2334	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2335	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2336	adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2337	adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2338	adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2339	adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2340	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2341
2342	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2343	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2344	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2345	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2346	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2347	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2348	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2349	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2350	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2351	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2352	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2353	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2354	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2355	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2356	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2357	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2358	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2359	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2360	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2361	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2362	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2363	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2364	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2365	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2366	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2367	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2368	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2369
2370	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2371	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2372	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2373	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2374	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2375	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2376	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2377	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2378	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2379	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2380	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2381	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2382	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2383	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2384	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2385	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2386	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2387	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2388	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2389	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2390	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2391	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2392
2393	ifp = adapter->ifp;
2394
2395	/* Fill out the OS statistics structure */
2396	ifp->if_ipackets = adapter->stats.gprcl;
2397	ifp->if_opackets = adapter->stats.gptcl;
2398	ifp->if_ibytes = adapter->stats.gorcl;
2399	ifp->if_obytes = adapter->stats.gotcl;
2400	ifp->if_imcasts = adapter->stats.mprcl;
2401	ifp->if_collisions = 0;
2402
2403	/* Rx Errors */
2404	ifp->if_ierrors =
2405		adapter->dropped_pkts +
2406		adapter->stats.crcerrs +
2407		adapter->stats.rnbc +
2408		adapter->stats.mpc +
2409		adapter->stats.rlec;
2410
2411
2412}
2413
2414
2415/**********************************************************************
2416 *
2417 *  This routine is called only when ixgb_display_debug_stats is enabled.
2418 *  This routine provides a way to take a look at important statistics
2419 *  maintained by the driver and hardware.
2420 *
2421 **********************************************************************/
2422static void
2423ixgb_print_hw_stats(struct adapter * adapter)
2424{
2425	char            buf_speed[100], buf_type[100];
2426	ixgb_bus_speed  bus_speed;
2427	ixgb_bus_type   bus_type;
2428	int             unit = adapter->unit;
2429
2430#ifdef _SV_
2431	printf("ixgb%d: Packets not Avail = %ld\n", unit,
2432	       adapter->no_pkts_avail);
2433	printf("ixgb%d: CleanTxInterrupts = %ld\n", unit,
2434	       adapter->clean_tx_interrupts);
2435	printf("ixgb%d: ICR RXDMT0 = %lld\n", unit,
2436	       (long long)adapter->sv_stats.icr_rxdmt0);
2437	printf("ixgb%d: ICR RXO = %lld\n", unit,
2438	       (long long)adapter->sv_stats.icr_rxo);
2439	printf("ixgb%d: ICR RXT0 = %lld\n", unit,
2440	       (long long)adapter->sv_stats.icr_rxt0);
2441	printf("ixgb%d: ICR TXDW = %lld\n", unit,
2442	       (long long)adapter->sv_stats.icr_TXDW);
2443#endif				/* _SV_ */
2444
2445	bus_speed = adapter->hw.bus.speed;
2446	bus_type = adapter->hw.bus.type;
2447	sprintf(buf_speed,
2448		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2449		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2450		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2451		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2452		"UNKNOWN");
2453	printf("ixgb%d: PCI_Bus_Speed = %s\n", unit,
2454	       buf_speed);
2455
2456	sprintf(buf_type,
2457		bus_type == ixgb_bus_type_pci ? "PCI" :
2458		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2459		"UNKNOWN");
2460	printf("ixgb%d: PCI_Bus_Type = %s\n", unit,
2461	       buf_type);
2462
2463	printf("ixgb%d: Tx Descriptors not Avail1 = %ld\n", unit,
2464	       adapter->no_tx_desc_avail1);
2465	printf("ixgb%d: Tx Descriptors not Avail2 = %ld\n", unit,
2466	       adapter->no_tx_desc_avail2);
2467	printf("ixgb%d: Std Mbuf Failed = %ld\n", unit,
2468	       adapter->mbuf_alloc_failed);
2469	printf("ixgb%d: Std Cluster Failed = %ld\n", unit,
2470	       adapter->mbuf_cluster_failed);
2471
2472	printf("ixgb%d: Defer count = %lld\n", unit,
2473	       (long long)adapter->stats.dc);
2474	printf("ixgb%d: Missed Packets = %lld\n", unit,
2475	       (long long)adapter->stats.mpc);
2476	printf("ixgb%d: Receive No Buffers = %lld\n", unit,
2477	       (long long)adapter->stats.rnbc);
2478	printf("ixgb%d: Receive length errors = %lld\n", unit,
2479	       (long long)adapter->stats.rlec);
2480	printf("ixgb%d: Crc errors = %lld\n", unit,
2481	       (long long)adapter->stats.crcerrs);
2482	printf("ixgb%d: Driver dropped packets = %ld\n", unit,
2483	       adapter->dropped_pkts);
2484
2485	printf("ixgb%d: XON Rcvd = %lld\n", unit,
2486	       (long long)adapter->stats.xonrxc);
2487	printf("ixgb%d: XON Xmtd = %lld\n", unit,
2488	       (long long)adapter->stats.xontxc);
2489	printf("ixgb%d: XOFF Rcvd = %lld\n", unit,
2490	       (long long)adapter->stats.xoffrxc);
2491	printf("ixgb%d: XOFF Xmtd = %lld\n", unit,
2492	       (long long)adapter->stats.xofftxc);
2493
2494	printf("ixgb%d: Good Packets Rcvd = %lld\n", unit,
2495	       (long long)adapter->stats.gprcl);
2496	printf("ixgb%d: Good Packets Xmtd = %lld\n", unit,
2497	       (long long)adapter->stats.gptcl);
2498
2499	printf("ixgb%d: Jumbo frames recvd = %lld\n", unit,
2500	       (long long)adapter->stats.jprcl);
2501	printf("ixgb%d: Jumbo frames Xmtd = %lld\n", unit,
2502	       (long long)adapter->stats.jptcl);
2503
2504	return;
2505
2506}
2507
2508static int
2509ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2510{
2511	int             error;
2512	int             result;
2513	struct adapter *adapter;
2514
2515	result = -1;
2516	error = sysctl_handle_int(oidp, &result, 0, req);
2517
2518	if (error || !req->newptr)
2519		return (error);
2520
2521	if (result == 1) {
2522		adapter = (struct adapter *) arg1;
2523		ixgb_print_hw_stats(adapter);
2524	}
2525	return error;
2526}
2527