Deleted Added
sdiff udiff text old ( 246128 ) new ( 253102 )
full compact
1/*******************************************************************************
2
3Copyright (c) 2001-2004, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/*$FreeBSD: head/sys/dev/ixgb/if_ixgb.c 253102 2013-07-09 18:15:59Z jkim $*/
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <dev/ixgb/if_ixgb.h>
41
42/*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45int ixgb_display_debug_stats = 0;
46
47/*********************************************************************
48 * Linked list of board private structures for all NICs found
49 *********************************************************************/
50
51struct adapter *ixgb_adapter_list = NULL;
52
53
54
55/*********************************************************************
56 * Driver version
57 *********************************************************************/
58
59char ixgb_driver_version[] = "1.0.6";
60char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62/*********************************************************************
63 * PCI Device ID Table
64 *
65 * Used by probe to select devices to load on
66 * Last field stores an index into ixgb_strings
67 * Last entry must be all 0s
68 *
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
71
72static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73{
74 /* Intel(R) PRO/10000 Network Connection */
75 {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76 {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77 /* required last entry */
78 {0, 0, 0, 0, 0}
79};
80
81/*********************************************************************
82 * Table of branding strings for all supported NICs.
83 *********************************************************************/
84
85static char *ixgb_strings[] = {
86 "Intel(R) PRO/10GbE Network Driver"
87};
88
89/*********************************************************************
90 * Function prototypes
91 *********************************************************************/
92static int ixgb_probe(device_t);
93static int ixgb_attach(device_t);
94static int ixgb_detach(device_t);
95static int ixgb_shutdown(device_t);
96static void ixgb_intr(void *);
97static void ixgb_start(struct ifnet *);
98static void ixgb_start_locked(struct ifnet *);
99static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100static void ixgb_watchdog(struct adapter *);
101static void ixgb_init(void *);
102static void ixgb_init_locked(struct adapter *);
103static void ixgb_stop(void *);
104static void ixgb_media_status(struct ifnet *, struct ifmediareq *);
105static int ixgb_media_change(struct ifnet *);
106static void ixgb_identify_hardware(struct adapter *);
107static int ixgb_allocate_pci_resources(struct adapter *);
108static void ixgb_free_pci_resources(struct adapter *);
109static void ixgb_local_timer(void *);
110static int ixgb_hardware_init(struct adapter *);
111static int ixgb_setup_interface(device_t, struct adapter *);
112static int ixgb_setup_transmit_structures(struct adapter *);
113static void ixgb_initialize_transmit_unit(struct adapter *);
114static int ixgb_setup_receive_structures(struct adapter *);
115static void ixgb_initialize_receive_unit(struct adapter *);
116static void ixgb_enable_intr(struct adapter *);
117static void ixgb_disable_intr(struct adapter *);
118static void ixgb_free_transmit_structures(struct adapter *);
119static void ixgb_free_receive_structures(struct adapter *);
120static void ixgb_update_stats_counters(struct adapter *);
121static void ixgb_clean_transmit_interrupts(struct adapter *);
122static int ixgb_allocate_receive_structures(struct adapter *);
123static int ixgb_allocate_transmit_structures(struct adapter *);
124static int ixgb_process_receive_interrupts(struct adapter *, int);
125static void
126ixgb_receive_checksum(struct adapter *,
127 struct ixgb_rx_desc * rx_desc,
128 struct mbuf *);
129static void
130ixgb_transmit_checksum_setup(struct adapter *,
131 struct mbuf *,
132 u_int8_t *);
133static void ixgb_set_promisc(struct adapter *);
134static void ixgb_disable_promisc(struct adapter *);
135static void ixgb_set_multi(struct adapter *);
136static void ixgb_print_hw_stats(struct adapter *);
137static void ixgb_print_link_status(struct adapter *);
138static int
139ixgb_get_buf(int i, struct adapter *,
140 struct mbuf *);
141static void ixgb_enable_vlans(struct adapter * adapter);
142static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
144static int
145ixgb_dma_malloc(struct adapter *, bus_size_t,
146 struct ixgb_dma_alloc *, int);
147static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148#ifdef DEVICE_POLLING
149static poll_handler_t ixgb_poll;
150#endif
151
152/*********************************************************************
153 * FreeBSD Device Interface Entry Points
154 *********************************************************************/
155
156static device_method_t ixgb_methods[] = {
157 /* Device interface */
158 DEVMETHOD(device_probe, ixgb_probe),
159 DEVMETHOD(device_attach, ixgb_attach),
160 DEVMETHOD(device_detach, ixgb_detach),
161 DEVMETHOD(device_shutdown, ixgb_shutdown),
162
163 DEVMETHOD_END
164};
165
166static driver_t ixgb_driver = {
167 "ixgb", ixgb_methods, sizeof(struct adapter),
168};
169
170static devclass_t ixgb_devclass;
171DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
172
173MODULE_DEPEND(ixgb, pci, 1, 1, 1);
174MODULE_DEPEND(ixgb, ether, 1, 1, 1);
175
176/* some defines for controlling descriptor fetches in h/w */
177#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
178#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
179 * pushed this many descriptors from
180 * head */
181#define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */
182
183
184/*********************************************************************
185 * Device identification routine
186 *
187 * ixgb_probe determines if the driver should be loaded on
188 * adapter based on PCI vendor/device id of the adapter.
189 *
190 * return 0 on success, positive on failure
191 *********************************************************************/
192
193static int
194ixgb_probe(device_t dev)
195{
196 ixgb_vendor_info_t *ent;
197
198 u_int16_t pci_vendor_id = 0;
199 u_int16_t pci_device_id = 0;
200 u_int16_t pci_subvendor_id = 0;
201 u_int16_t pci_subdevice_id = 0;
202 char adapter_name[60];
203
204 INIT_DEBUGOUT("ixgb_probe: begin");
205
206 pci_vendor_id = pci_get_vendor(dev);
207 if (pci_vendor_id != IXGB_VENDOR_ID)
208 return (ENXIO);
209
210 pci_device_id = pci_get_device(dev);
211 pci_subvendor_id = pci_get_subvendor(dev);
212 pci_subdevice_id = pci_get_subdevice(dev);
213
214 ent = ixgb_vendor_info_array;
215 while (ent->vendor_id != 0) {
216 if ((pci_vendor_id == ent->vendor_id) &&
217 (pci_device_id == ent->device_id) &&
218
219 ((pci_subvendor_id == ent->subvendor_id) ||
220 (ent->subvendor_id == PCI_ANY_ID)) &&
221
222 ((pci_subdevice_id == ent->subdevice_id) ||
223 (ent->subdevice_id == PCI_ANY_ID))) {
224 sprintf(adapter_name, "%s, Version - %s",
225 ixgb_strings[ent->index],
226 ixgb_driver_version);
227 device_set_desc_copy(dev, adapter_name);
228 return (BUS_PROBE_DEFAULT);
229 }
230 ent++;
231 }
232
233 return (ENXIO);
234}
235
236/*********************************************************************
237 * Device initialization routine
238 *
239 * The attach entry point is called when the driver is being loaded.
240 * This routine identifies the type of hardware, allocates all resources
241 * and initializes the hardware.
242 *
243 * return 0 on success, positive on failure
244 *********************************************************************/
245
246static int
247ixgb_attach(device_t dev)
248{
249 struct adapter *adapter;
250 int tsize, rsize;
251 int error = 0;
252
253 device_printf(dev, "%s\n", ixgb_copyright);
254 INIT_DEBUGOUT("ixgb_attach: begin");
255
256 /* Allocate, clear, and link in our adapter structure */
257 if (!(adapter = device_get_softc(dev))) {
258 device_printf(dev, "adapter structure allocation failed\n");
259 return (ENOMEM);
260 }
261 bzero(adapter, sizeof(struct adapter));
262 adapter->dev = dev;
263 adapter->osdep.dev = dev;
264 IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
265
266 if (ixgb_adapter_list != NULL)
267 ixgb_adapter_list->prev = adapter;
268 adapter->next = ixgb_adapter_list;
269 ixgb_adapter_list = adapter;
270
271 /* SYSCTL APIs */
272 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
273 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
274 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
275 (void *)adapter, 0,
276 ixgb_sysctl_stats, "I", "Statistics");
277
278 callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
279
280 /* Determine hardware revision */
281 ixgb_identify_hardware(adapter);
282
283 /* Parameters (to be read from user) */
284 adapter->num_tx_desc = IXGB_MAX_TXD;
285 adapter->num_rx_desc = IXGB_MAX_RXD;
286 adapter->tx_int_delay = TIDV;
287 adapter->rx_int_delay = RDTR;
288 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
289
290 adapter->hw.fc.high_water = FCRTH;
291 adapter->hw.fc.low_water = FCRTL;
292 adapter->hw.fc.pause_time = FCPAUSE;
293 adapter->hw.fc.send_xon = TRUE;
294 adapter->hw.fc.type = FLOW_CONTROL;
295
296
297 /* Set the max frame size assuming standard ethernet sized frames */
298 adapter->hw.max_frame_size =
299 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
300
301 if (ixgb_allocate_pci_resources(adapter)) {
302 device_printf(dev, "Allocation of PCI resources failed\n");
303 error = ENXIO;
304 goto err_pci;
305 }
306 tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
307 sizeof(struct ixgb_tx_desc), 4096);
308
309 /* Allocate Transmit Descriptor ring */
310 if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
311 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
312 error = ENOMEM;
313 goto err_tx_desc;
314 }
315 adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
316
317 rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
318 sizeof(struct ixgb_rx_desc), 4096);
319
320 /* Allocate Receive Descriptor ring */
321 if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
322 device_printf(dev, "Unable to allocate rx_desc memory\n");
323 error = ENOMEM;
324 goto err_rx_desc;
325 }
326 adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
327
328 /* Allocate multicast array memory. */
329 adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
330 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
331 if (adapter->mta == NULL) {
332 device_printf(dev, "Can not allocate multicast setup array\n");
333 error = ENOMEM;
334 goto err_hw_init;
335 }
336
337 /* Initialize the hardware */
338 if (ixgb_hardware_init(adapter)) {
339 device_printf(dev, "Unable to initialize the hardware\n");
340 error = EIO;
341 goto err_hw_init;
342 }
343 /* Setup OS specific network interface */
344 if (ixgb_setup_interface(dev, adapter) != 0)
345 goto err_hw_init;
346
347 /* Initialize statistics */
348 ixgb_clear_hw_cntrs(&adapter->hw);
349 ixgb_update_stats_counters(adapter);
350
351 INIT_DEBUGOUT("ixgb_attach: end");
352 return (0);
353
354err_hw_init:
355 ixgb_dma_free(adapter, &adapter->rxdma);
356err_rx_desc:
357 ixgb_dma_free(adapter, &adapter->txdma);
358err_tx_desc:
359err_pci:
360 if (adapter->ifp != NULL)
361 if_free(adapter->ifp);
362 ixgb_free_pci_resources(adapter);
363 sysctl_ctx_free(&adapter->sysctl_ctx);
364 free(adapter->mta, M_DEVBUF);
365 return (error);
366
367}
368
369/*********************************************************************
370 * Device removal routine
371 *
372 * The detach entry point is called when the driver is being removed.
373 * This routine stops the adapter and deallocates all the resources
374 * that were allocated for driver operation.
375 *
376 * return 0 on success, positive on failure
377 *********************************************************************/
378
379static int
380ixgb_detach(device_t dev)
381{
382 struct adapter *adapter = device_get_softc(dev);
383 struct ifnet *ifp = adapter->ifp;
384
385 INIT_DEBUGOUT("ixgb_detach: begin");
386
387#ifdef DEVICE_POLLING
388 if (ifp->if_capenable & IFCAP_POLLING)
389 ether_poll_deregister(ifp);
390#endif
391
392 IXGB_LOCK(adapter);
393 adapter->in_detach = 1;
394
395 ixgb_stop(adapter);
396 IXGB_UNLOCK(adapter);
397
398#if __FreeBSD_version < 500000
399 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
400#else
401 ether_ifdetach(ifp);
402#endif
403 callout_drain(&adapter->timer);
404 ixgb_free_pci_resources(adapter);
405#if __FreeBSD_version >= 500000
406 if_free(ifp);
407#endif
408
409 /* Free Transmit Descriptor ring */
410 if (adapter->tx_desc_base) {
411 ixgb_dma_free(adapter, &adapter->txdma);
412 adapter->tx_desc_base = NULL;
413 }
414 /* Free Receive Descriptor ring */
415 if (adapter->rx_desc_base) {
416 ixgb_dma_free(adapter, &adapter->rxdma);
417 adapter->rx_desc_base = NULL;
418 }
419 /* Remove from the adapter list */
420 if (ixgb_adapter_list == adapter)
421 ixgb_adapter_list = adapter->next;
422 if (adapter->next != NULL)
423 adapter->next->prev = adapter->prev;
424 if (adapter->prev != NULL)
425 adapter->prev->next = adapter->next;
426 free(adapter->mta, M_DEVBUF);
427
428 IXGB_LOCK_DESTROY(adapter);
429 return (0);
430}
431
432/*********************************************************************
433 *
434 * Shutdown entry point
435 *
436 **********************************************************************/
437
438static int
439ixgb_shutdown(device_t dev)
440{
441 struct adapter *adapter = device_get_softc(dev);
442 IXGB_LOCK(adapter);
443 ixgb_stop(adapter);
444 IXGB_UNLOCK(adapter);
445 return (0);
446}
447
448
449/*********************************************************************
450 * Transmit entry point
451 *
452 * ixgb_start is called by the stack to initiate a transmit.
453 * The driver will remain in this routine as long as there are
454 * packets to transmit and transmit resources are available.
455 * In case resources are not available stack is notified and
456 * the packet is requeued.
457 **********************************************************************/
458
459static void
460ixgb_start_locked(struct ifnet * ifp)
461{
462 struct mbuf *m_head;
463 struct adapter *adapter = ifp->if_softc;
464
465 IXGB_LOCK_ASSERT(adapter);
466
467 if (!adapter->link_active)
468 return;
469
470 while (ifp->if_snd.ifq_head != NULL) {
471 IF_DEQUEUE(&ifp->if_snd, m_head);
472
473 if (m_head == NULL)
474 break;
475
476 if (ixgb_encap(adapter, m_head)) {
477 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
478 IF_PREPEND(&ifp->if_snd, m_head);
479 break;
480 }
481 /* Send a copy of the frame to the BPF listener */
482#if __FreeBSD_version < 500000
483 if (ifp->if_bpf)
484 bpf_mtap(ifp, m_head);
485#else
486 ETHER_BPF_MTAP(ifp, m_head);
487#endif
488 /* Set timeout in case hardware has problems transmitting */
489 adapter->tx_timer = IXGB_TX_TIMEOUT;
490
491 }
492 return;
493}
494
495static void
496ixgb_start(struct ifnet *ifp)
497{
498 struct adapter *adapter = ifp->if_softc;
499
500 IXGB_LOCK(adapter);
501 ixgb_start_locked(ifp);
502 IXGB_UNLOCK(adapter);
503 return;
504}
505
506/*********************************************************************
507 * Ioctl entry point
508 *
509 * ixgb_ioctl is called when the user wants to configure the
510 * interface.
511 *
512 * return 0 on success, positive on failure
513 **********************************************************************/
514
515static int
516ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
517{
518 int mask, error = 0;
519 struct ifreq *ifr = (struct ifreq *) data;
520 struct adapter *adapter = ifp->if_softc;
521
522 if (adapter->in_detach)
523 goto out;
524
525 switch (command) {
526 case SIOCSIFADDR:
527 case SIOCGIFADDR:
528 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
529 ether_ioctl(ifp, command, data);
530 break;
531 case SIOCSIFMTU:
532 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
533 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
534 error = EINVAL;
535 } else {
536 IXGB_LOCK(adapter);
537 ifp->if_mtu = ifr->ifr_mtu;
538 adapter->hw.max_frame_size =
539 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
540
541 ixgb_init_locked(adapter);
542 IXGB_UNLOCK(adapter);
543 }
544 break;
545 case SIOCSIFFLAGS:
546 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
547 IXGB_LOCK(adapter);
548 if (ifp->if_flags & IFF_UP) {
549 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
550 ixgb_init_locked(adapter);
551 }
552 ixgb_disable_promisc(adapter);
553 ixgb_set_promisc(adapter);
554 } else {
555 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
556 ixgb_stop(adapter);
557 }
558 }
559 IXGB_UNLOCK(adapter);
560 break;
561 case SIOCADDMULTI:
562 case SIOCDELMULTI:
563 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
564 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565 IXGB_LOCK(adapter);
566 ixgb_disable_intr(adapter);
567 ixgb_set_multi(adapter);
568 ixgb_enable_intr(adapter);
569 IXGB_UNLOCK(adapter);
570 }
571 break;
572 case SIOCSIFMEDIA:
573 case SIOCGIFMEDIA:
574 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
575 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
576 break;
577 case SIOCSIFCAP:
578 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
579 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
580#ifdef DEVICE_POLLING
581 if (mask & IFCAP_POLLING) {
582 if (ifr->ifr_reqcap & IFCAP_POLLING) {
583 error = ether_poll_register(ixgb_poll, ifp);
584 if (error)
585 return(error);
586 IXGB_LOCK(adapter);
587 ixgb_disable_intr(adapter);
588 ifp->if_capenable |= IFCAP_POLLING;
589 IXGB_UNLOCK(adapter);
590 } else {
591 error = ether_poll_deregister(ifp);
592 /* Enable interrupt even in error case */
593 IXGB_LOCK(adapter);
594 ixgb_enable_intr(adapter);
595 ifp->if_capenable &= ~IFCAP_POLLING;
596 IXGB_UNLOCK(adapter);
597 }
598 }
599#endif /* DEVICE_POLLING */
600 if (mask & IFCAP_HWCSUM) {
601 if (IFCAP_HWCSUM & ifp->if_capenable)
602 ifp->if_capenable &= ~IFCAP_HWCSUM;
603 else
604 ifp->if_capenable |= IFCAP_HWCSUM;
605 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
606 ixgb_init(adapter);
607 }
608 break;
609 default:
610 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
611 error = EINVAL;
612 }
613
614out:
615 return (error);
616}
617
618/*********************************************************************
619 * Watchdog entry point
620 *
621 * This routine is called whenever hardware quits transmitting.
622 *
623 **********************************************************************/
624
625static void
626ixgb_watchdog(struct adapter *adapter)
627{
628 struct ifnet *ifp;
629
630 ifp = adapter->ifp;
631
632 /*
633 * If we are in this routine because of pause frames, then don't
634 * reset the hardware.
635 */
636 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
637 adapter->tx_timer = IXGB_TX_TIMEOUT;
638 return;
639 }
640 if_printf(ifp, "watchdog timeout -- resetting\n");
641
642 ixgb_stop(adapter);
643 ixgb_init_locked(adapter);
644
645
646 ifp->if_oerrors++;
647
648 return;
649}
650
651/*********************************************************************
652 * Init entry point
653 *
654 * This routine is used in two ways. It is used by the stack as
655 * init entry point in network interface structure. It is also used
656 * by the driver as a hw/sw initialization routine to get to a
657 * consistent state.
658 *
659 * return 0 on success, positive on failure
660 **********************************************************************/
661
662static void
663ixgb_init_locked(struct adapter *adapter)
664{
665 struct ifnet *ifp;
666
667 INIT_DEBUGOUT("ixgb_init: begin");
668
669 IXGB_LOCK_ASSERT(adapter);
670
671 ixgb_stop(adapter);
672 ifp = adapter->ifp;
673
674 /* Get the latest mac address, User can use a LAA */
675 bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
676 IXGB_ETH_LENGTH_OF_ADDRESS);
677
678 /* Initialize the hardware */
679 if (ixgb_hardware_init(adapter)) {
680 if_printf(ifp, "Unable to initialize the hardware\n");
681 return;
682 }
683 ixgb_enable_vlans(adapter);
684
685 /* Prepare transmit descriptors and buffers */
686 if (ixgb_setup_transmit_structures(adapter)) {
687 if_printf(ifp, "Could not setup transmit structures\n");
688 ixgb_stop(adapter);
689 return;
690 }
691 ixgb_initialize_transmit_unit(adapter);
692
693 /* Setup Multicast table */
694 ixgb_set_multi(adapter);
695
696 /* Prepare receive descriptors and buffers */
697 if (ixgb_setup_receive_structures(adapter)) {
698 if_printf(ifp, "Could not setup receive structures\n");
699 ixgb_stop(adapter);
700 return;
701 }
702 ixgb_initialize_receive_unit(adapter);
703
704 /* Don't lose promiscuous settings */
705 ixgb_set_promisc(adapter);
706
707 ifp = adapter->ifp;
708 ifp->if_drv_flags |= IFF_DRV_RUNNING;
709 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
710
711
712 if (ifp->if_capenable & IFCAP_TXCSUM)
713 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
714 else
715 ifp->if_hwassist = 0;
716
717
718 /* Enable jumbo frames */
719 if (ifp->if_mtu > ETHERMTU) {
720 uint32_t temp_reg;
721 IXGB_WRITE_REG(&adapter->hw, MFS,
722 adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
723 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
724 temp_reg |= IXGB_CTRL0_JFE;
725 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
726 }
727 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
728 ixgb_clear_hw_cntrs(&adapter->hw);
729#ifdef DEVICE_POLLING
730 /*
731 * Only disable interrupts if we are polling, make sure they are on
732 * otherwise.
733 */
734 if (ifp->if_capenable & IFCAP_POLLING)
735 ixgb_disable_intr(adapter);
736 else
737#endif
738 ixgb_enable_intr(adapter);
739
740 return;
741}
742
743static void
744ixgb_init(void *arg)
745{
746 struct adapter *adapter = arg;
747
748 IXGB_LOCK(adapter);
749 ixgb_init_locked(adapter);
750 IXGB_UNLOCK(adapter);
751 return;
752}
753
754#ifdef DEVICE_POLLING
755static int
756ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
757{
758 struct adapter *adapter = ifp->if_softc;
759 u_int32_t reg_icr;
760 int rx_npkts;
761
762 IXGB_LOCK_ASSERT(adapter);
763
764 if (cmd == POLL_AND_CHECK_STATUS) {
765 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
766 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
767 ixgb_check_for_link(&adapter->hw);
768 ixgb_print_link_status(adapter);
769 }
770 }
771 rx_npkts = ixgb_process_receive_interrupts(adapter, count);
772 ixgb_clean_transmit_interrupts(adapter);
773
774 if (ifp->if_snd.ifq_head != NULL)
775 ixgb_start_locked(ifp);
776 return (rx_npkts);
777}
778
779static int
780ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
781{
782 struct adapter *adapter = ifp->if_softc;
783 int rx_npkts = 0;
784
785 IXGB_LOCK(adapter);
786 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
787 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
788 IXGB_UNLOCK(adapter);
789 return (rx_npkts);
790}
791#endif /* DEVICE_POLLING */
792
793/*********************************************************************
794 *
795 * Interrupt Service routine
796 *
797 **********************************************************************/
798
799static void
800ixgb_intr(void *arg)
801{
802 u_int32_t loop_cnt = IXGB_MAX_INTR;
803 u_int32_t reg_icr;
804 struct ifnet *ifp;
805 struct adapter *adapter = arg;
806 boolean_t rxdmt0 = FALSE;
807
808 IXGB_LOCK(adapter);
809
810 ifp = adapter->ifp;
811
812#ifdef DEVICE_POLLING
813 if (ifp->if_capenable & IFCAP_POLLING) {
814 IXGB_UNLOCK(adapter);
815 return;
816 }
817#endif
818
819 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
820 if (reg_icr == 0) {
821 IXGB_UNLOCK(adapter);
822 return;
823 }
824
825 if (reg_icr & IXGB_INT_RXDMT0)
826 rxdmt0 = TRUE;
827
828#ifdef _SV_
829 if (reg_icr & IXGB_INT_RXDMT0)
830 adapter->sv_stats.icr_rxdmt0++;
831 if (reg_icr & IXGB_INT_RXO)
832 adapter->sv_stats.icr_rxo++;
833 if (reg_icr & IXGB_INT_RXT0)
834 adapter->sv_stats.icr_rxt0++;
835 if (reg_icr & IXGB_INT_TXDW)
836 adapter->sv_stats.icr_TXDW++;
837#endif /* _SV_ */
838
839 /* Link status change */
840 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
841 ixgb_check_for_link(&adapter->hw);
842 ixgb_print_link_status(adapter);
843 }
844 while (loop_cnt > 0) {
845 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
846 ixgb_process_receive_interrupts(adapter, -1);
847 ixgb_clean_transmit_interrupts(adapter);
848 }
849 loop_cnt--;
850 }
851
852 if (rxdmt0 && adapter->raidc) {
853 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
854 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
855 }
856 if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
857 ixgb_start_locked(ifp);
858
859 IXGB_UNLOCK(adapter);
860 return;
861}
862
863
864/*********************************************************************
865 *
866 * Media Ioctl callback
867 *
868 * This routine is called whenever the user queries the status of
869 * the interface using ifconfig.
870 *
871 **********************************************************************/
872static void
873ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
874{
875 struct adapter *adapter = ifp->if_softc;
876
877 INIT_DEBUGOUT("ixgb_media_status: begin");
878
879 ixgb_check_for_link(&adapter->hw);
880 ixgb_print_link_status(adapter);
881
882 ifmr->ifm_status = IFM_AVALID;
883 ifmr->ifm_active = IFM_ETHER;
884
885 if (!adapter->hw.link_up)
886 return;
887
888 ifmr->ifm_status |= IFM_ACTIVE;
889 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
890
891 return;
892}
893
894/*********************************************************************
895 *
896 * Media Ioctl callback
897 *
898 * This routine is called when the user changes speed/duplex using
899 * media/mediopt option with ifconfig.
900 *
901 **********************************************************************/
902static int
903ixgb_media_change(struct ifnet * ifp)
904{
905 struct adapter *adapter = ifp->if_softc;
906 struct ifmedia *ifm = &adapter->media;
907
908 INIT_DEBUGOUT("ixgb_media_change: begin");
909
910 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
911 return (EINVAL);
912
913 return (0);
914}
915
916/*********************************************************************
917 *
918 * This routine maps the mbufs to tx descriptors.
919 *
920 * return 0 on success, positive on failure
921 **********************************************************************/
922
923static int
924ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
925{
926 u_int8_t txd_popts;
927 int i, j, error, nsegs;
928
929#if __FreeBSD_version < 500000
930 struct ifvlan *ifv = NULL;
931#endif
932 bus_dma_segment_t segs[IXGB_MAX_SCATTER];
933 bus_dmamap_t map;
934 struct ixgb_buffer *tx_buffer = NULL;
935 struct ixgb_tx_desc *current_tx_desc = NULL;
936 struct ifnet *ifp = adapter->ifp;
937
938 /*
939 * Force a cleanup if number of TX descriptors available hits the
940 * threshold
941 */
942 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
943 ixgb_clean_transmit_interrupts(adapter);
944 }
945 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
946 adapter->no_tx_desc_avail1++;
947 return (ENOBUFS);
948 }
949 /*
950 * Map the packet for DMA.
951 */
952 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
953 adapter->no_tx_map_avail++;
954 return (ENOMEM);
955 }
956 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
957 &nsegs, BUS_DMA_NOWAIT);
958 if (error != 0) {
959 adapter->no_tx_dma_setup++;
960 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
961 "error %u\n", error);
962 bus_dmamap_destroy(adapter->txtag, map);
963 return (error);
964 }
965 KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
966
967 if (nsegs > adapter->num_tx_desc_avail) {
968 adapter->no_tx_desc_avail2++;
969 bus_dmamap_destroy(adapter->txtag, map);
970 return (ENOBUFS);
971 }
972 if (ifp->if_hwassist > 0) {
973 ixgb_transmit_checksum_setup(adapter, m_head,
974 &txd_popts);
975 } else
976 txd_popts = 0;
977
978 /* Find out if we are in vlan mode */
979#if __FreeBSD_version < 500000
980 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
981 m_head->m_pkthdr.rcvif != NULL &&
982 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
983 ifv = m_head->m_pkthdr.rcvif->if_softc;
984#elseif __FreeBSD_version < 700000
985 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
986#endif
987 i = adapter->next_avail_tx_desc;
988 for (j = 0; j < nsegs; j++) {
989 tx_buffer = &adapter->tx_buffer_area[i];
990 current_tx_desc = &adapter->tx_desc_base[i];
991
992 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
993 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
994 current_tx_desc->popts = txd_popts;
995 if (++i == adapter->num_tx_desc)
996 i = 0;
997
998 tx_buffer->m_head = NULL;
999 }
1000
1001 adapter->num_tx_desc_avail -= nsegs;
1002 adapter->next_avail_tx_desc = i;
1003
1004#if __FreeBSD_version < 500000
1005 if (ifv != NULL) {
1006 /* Set the vlan id */
1007 current_tx_desc->vlan = ifv->ifv_tag;
1008#elseif __FreeBSD_version < 700000
1009 if (mtag != NULL) {
1010 /* Set the vlan id */
1011 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1012#else
1013 if (m_head->m_flags & M_VLANTAG) {
1014 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1015#endif
1016
1017 /* Tell hardware to add tag */
1018 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1019 }
1020 tx_buffer->m_head = m_head;
1021 tx_buffer->map = map;
1022 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1023
1024 /*
1025 * Last Descriptor of Packet needs End Of Packet (EOP)
1026 */
1027 current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1028
1029 /*
1030 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1031 * that this frame is available to transmit.
1032 */
1033 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1034
1035 return (0);
1036}
1037
1038static void
1039ixgb_set_promisc(struct adapter * adapter)
1040{
1041
1042 u_int32_t reg_rctl;
1043 struct ifnet *ifp = adapter->ifp;
1044
1045 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1046
1047 if (ifp->if_flags & IFF_PROMISC) {
1048 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1049 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1050 } else if (ifp->if_flags & IFF_ALLMULTI) {
1051 reg_rctl |= IXGB_RCTL_MPE;
1052 reg_rctl &= ~IXGB_RCTL_UPE;
1053 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1054 }
1055 return;
1056}
1057
1058static void
1059ixgb_disable_promisc(struct adapter * adapter)
1060{
1061 u_int32_t reg_rctl;
1062
1063 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1064
1065 reg_rctl &= (~IXGB_RCTL_UPE);
1066 reg_rctl &= (~IXGB_RCTL_MPE);
1067 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1068
1069 return;
1070}
1071
1072
1073/*********************************************************************
1074 * Multicast Update
1075 *
1076 * This routine is called whenever multicast address list is updated.
1077 *
1078 **********************************************************************/
1079
1080static void
1081ixgb_set_multi(struct adapter * adapter)
1082{
1083 u_int32_t reg_rctl = 0;
1084 u_int8_t *mta;
1085 struct ifmultiaddr *ifma;
1086 int mcnt = 0;
1087 struct ifnet *ifp = adapter->ifp;
1088
1089 IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1090
1091 mta = adapter->mta;
1092 bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1093 MAX_NUM_MULTICAST_ADDRESSES);
1094
1095 if_maddr_rlock(ifp);
1096#if __FreeBSD_version < 500000
1097 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1098#else
1099 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1100#endif
1101 if (ifma->ifma_addr->sa_family != AF_LINK)
1102 continue;
1103
1104 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1105 &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1106 mcnt++;
1107 }
1108 if_maddr_runlock(ifp);
1109
1110 if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1111 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1112 reg_rctl |= IXGB_RCTL_MPE;
1113 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1114 } else
1115 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1116
1117 return;
1118}
1119
1120
1121/*********************************************************************
1122 * Timer routine
1123 *
1124 * This routine checks for link status and updates statistics.
1125 *
1126 **********************************************************************/
1127
1128static void
1129ixgb_local_timer(void *arg)
1130{
1131 struct ifnet *ifp;
1132 struct adapter *adapter = arg;
1133 ifp = adapter->ifp;
1134
1135 IXGB_LOCK_ASSERT(adapter);
1136
1137 ixgb_check_for_link(&adapter->hw);
1138 ixgb_print_link_status(adapter);
1139 ixgb_update_stats_counters(adapter);
1140 if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1141 ixgb_print_hw_stats(adapter);
1142 }
1143 if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1144 ixgb_watchdog(adapter);
1145 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1146}
1147
1148static void
1149ixgb_print_link_status(struct adapter * adapter)
1150{
1151 if (adapter->hw.link_up) {
1152 if (!adapter->link_active) {
1153 if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1154 10000,
1155 "Full Duplex");
1156 adapter->link_active = 1;
1157 }
1158 } else {
1159 if (adapter->link_active) {
1160 if_printf(adapter->ifp, "Link is Down \n");
1161 adapter->link_active = 0;
1162 }
1163 }
1164
1165 return;
1166}
1167
1168
1169
1170/*********************************************************************
1171 *
1172 * This routine disables all traffic on the adapter by issuing a
1173 * global reset on the MAC and deallocates TX/RX buffers.
1174 *
1175 **********************************************************************/
1176
1177static void
1178ixgb_stop(void *arg)
1179{
1180 struct ifnet *ifp;
1181 struct adapter *adapter = arg;
1182 ifp = adapter->ifp;
1183
1184 IXGB_LOCK_ASSERT(adapter);
1185
1186 INIT_DEBUGOUT("ixgb_stop: begin\n");
1187 ixgb_disable_intr(adapter);
1188 adapter->hw.adapter_stopped = FALSE;
1189 ixgb_adapter_stop(&adapter->hw);
1190 callout_stop(&adapter->timer);
1191 ixgb_free_transmit_structures(adapter);
1192 ixgb_free_receive_structures(adapter);
1193
1194 /* Tell the stack that the interface is no longer active */
1195 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1196 adapter->tx_timer = 0;
1197
1198 return;
1199}
1200
1201
1202/*********************************************************************
1203 *
1204 * Determine hardware revision.
1205 *
1206 **********************************************************************/
1207static void
1208ixgb_identify_hardware(struct adapter * adapter)
1209{
1210 device_t dev = adapter->dev;
1211
1212 /* Make sure our PCI config space has the necessary stuff set */
1213 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1214 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1215 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1216 device_printf(dev,
1217 "Memory Access and/or Bus Master bits were not set!\n");
1218 adapter->hw.pci_cmd_word |=
1219 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1220 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1221 }
1222 /* Save off the information about this board */
1223 adapter->hw.vendor_id = pci_get_vendor(dev);
1224 adapter->hw.device_id = pci_get_device(dev);
1225 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1226 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1227 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1228
1229 /* Set MacType, etc. based on this PCI info */
1230 switch (adapter->hw.device_id) {
1231 case IXGB_DEVICE_ID_82597EX:
1232 case IXGB_DEVICE_ID_82597EX_SR:
1233 adapter->hw.mac_type = ixgb_82597;
1234 break;
1235 default:
1236 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1237 device_printf(dev, "unsupported device id 0x%x\n",
1238 adapter->hw.device_id);
1239 }
1240
1241 return;
1242}
1243
1244static int
1245ixgb_allocate_pci_resources(struct adapter * adapter)
1246{
1247 int rid;
1248 device_t dev = adapter->dev;
1249
1250 rid = IXGB_MMBA;
1251 adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1252 &rid, 0, ~0, 1,
1253 RF_ACTIVE);
1254 if (!(adapter->res_memory)) {
1255 device_printf(dev, "Unable to allocate bus resource: memory\n");
1256 return (ENXIO);
1257 }
1258 adapter->osdep.mem_bus_space_tag =
1259 rman_get_bustag(adapter->res_memory);
1260 adapter->osdep.mem_bus_space_handle =
1261 rman_get_bushandle(adapter->res_memory);
1262 adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1263
1264 rid = 0x0;
1265 adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1266 &rid, 0, ~0, 1,
1267 RF_SHAREABLE | RF_ACTIVE);
1268 if (!(adapter->res_interrupt)) {
1269 device_printf(dev,
1270 "Unable to allocate bus resource: interrupt\n");
1271 return (ENXIO);
1272 }
1273 if (bus_setup_intr(dev, adapter->res_interrupt,
1274 INTR_TYPE_NET | INTR_MPSAFE,
1275 NULL, (void (*) (void *))ixgb_intr, adapter,
1276 &adapter->int_handler_tag)) {
1277 device_printf(dev, "Error registering interrupt handler!\n");
1278 return (ENXIO);
1279 }
1280 adapter->hw.back = &adapter->osdep;
1281
1282 return (0);
1283}
1284
1285static void
1286ixgb_free_pci_resources(struct adapter * adapter)
1287{
1288 device_t dev = adapter->dev;
1289
1290 if (adapter->res_interrupt != NULL) {
1291 bus_teardown_intr(dev, adapter->res_interrupt,
1292 adapter->int_handler_tag);
1293 bus_release_resource(dev, SYS_RES_IRQ, 0,
1294 adapter->res_interrupt);
1295 }
1296 if (adapter->res_memory != NULL) {
1297 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1298 adapter->res_memory);
1299 }
1300 if (adapter->res_ioport != NULL) {
1301 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1302 adapter->res_ioport);
1303 }
1304 return;
1305}
1306
1307/*********************************************************************
1308 *
1309 * Initialize the hardware to a configuration as specified by the
1310 * adapter structure. The controller is reset, the EEPROM is
1311 * verified, the MAC address is set, then the shared initialization
1312 * routines are called.
1313 *
1314 **********************************************************************/
1315static int
1316ixgb_hardware_init(struct adapter * adapter)
1317{
1318 /* Issue a global reset */
1319 adapter->hw.adapter_stopped = FALSE;
1320 ixgb_adapter_stop(&adapter->hw);
1321
1322 /* Make sure we have a good EEPROM before we read from it */
1323 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1324 device_printf(adapter->dev,
1325 "The EEPROM Checksum Is Not Valid\n");
1326 return (EIO);
1327 }
1328 if (!ixgb_init_hw(&adapter->hw)) {
1329 device_printf(adapter->dev, "Hardware Initialization Failed");
1330 return (EIO);
1331 }
1332
1333 return (0);
1334}
1335
1336/*********************************************************************
1337 *
1338 * Setup networking device structure and register an interface.
1339 *
1340 **********************************************************************/
1341static int
1342ixgb_setup_interface(device_t dev, struct adapter * adapter)
1343{
1344 struct ifnet *ifp;
1345 INIT_DEBUGOUT("ixgb_setup_interface: begin");
1346
1347 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1348 if (ifp == NULL) {
1349 device_printf(dev, "can not allocate ifnet structure\n");
1350 return (-1);
1351 }
1352#if __FreeBSD_version >= 502000
1353 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1354#else
1355 ifp->if_unit = device_get_unit(dev);
1356 ifp->if_name = "ixgb";
1357#endif
1358 ifp->if_baudrate = 1000000000;
1359 ifp->if_init = ixgb_init;
1360 ifp->if_softc = adapter;
1361 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1362 ifp->if_ioctl = ixgb_ioctl;
1363 ifp->if_start = ixgb_start;
1364 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1365
1366#if __FreeBSD_version < 500000
1367 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1368#else
1369 ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1370#endif
1371
1372 ifp->if_capabilities = IFCAP_HWCSUM;
1373
1374 /*
1375 * Tell the upper layer(s) we support long frames.
1376 */
1377 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1378
1379#if __FreeBSD_version >= 500000
1380 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1381#endif
1382
1383 ifp->if_capenable = ifp->if_capabilities;
1384
1385#ifdef DEVICE_POLLING
1386 ifp->if_capabilities |= IFCAP_POLLING;
1387#endif
1388
1389 /*
1390 * Specify the media types supported by this adapter and register
1391 * callbacks to update media and link information
1392 */
1393 ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1394 ixgb_media_status);
1395 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1396 0, NULL);
1397 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1398 0, NULL);
1399 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1400 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1401
1402 return (0);
1403}
1404
1405/********************************************************************
1406 * Manage DMA'able memory.
1407 *******************************************************************/
1408static void
1409ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1410{
1411 if (error)
1412 return;
1413 *(bus_addr_t *) arg = segs->ds_addr;
1414 return;
1415}
1416
1417static int
1418ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1419 struct ixgb_dma_alloc * dma, int mapflags)
1420{
1421 device_t dev;
1422 int r;
1423
1424 dev = adapter->dev;
1425 r = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1426 PAGE_SIZE, 0, /* alignment, bounds */
1427 BUS_SPACE_MAXADDR, /* lowaddr */
1428 BUS_SPACE_MAXADDR, /* highaddr */
1429 NULL, NULL, /* filter, filterarg */
1430 size, /* maxsize */
1431 1, /* nsegments */
1432 size, /* maxsegsize */
1433 BUS_DMA_ALLOCNOW, /* flags */
1434#if __FreeBSD_version >= 502000
1435 NULL, /* lockfunc */
1436 NULL, /* lockfuncarg */
1437#endif
1438 &dma->dma_tag);
1439 if (r != 0) {
1440 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1441 "error %u\n", r);
1442 goto fail_0;
1443 }
1444 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1445 BUS_DMA_NOWAIT, &dma->dma_map);
1446 if (r != 0) {
1447 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1448 "error %u\n", r);
1449 goto fail_1;
1450 }
1451 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1452 size,
1453 ixgb_dmamap_cb,
1454 &dma->dma_paddr,
1455 mapflags | BUS_DMA_NOWAIT);
1456 if (r != 0) {
1457 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1458 "error %u\n", r);
1459 goto fail_2;
1460 }
1461 dma->dma_size = size;
1462 return (0);
1463fail_2:
1464 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1465fail_1:
1466 bus_dma_tag_destroy(dma->dma_tag);
1467fail_0:
1468 dma->dma_map = NULL;
1469 dma->dma_tag = NULL;
1470 return (r);
1471}
1472
1473
1474
1475static void
1476ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1477{
1478 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1479 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1480 bus_dma_tag_destroy(dma->dma_tag);
1481}
1482
1483/*********************************************************************
1484 *
1485 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1486 * the information needed to transmit a packet on the wire.
1487 *
1488 **********************************************************************/
1489static int
1490ixgb_allocate_transmit_structures(struct adapter * adapter)
1491{
1492 if (!(adapter->tx_buffer_area =
1493 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1494 adapter->num_tx_desc, M_DEVBUF,
1495 M_NOWAIT | M_ZERO))) {
1496 device_printf(adapter->dev,
1497 "Unable to allocate tx_buffer memory\n");
1498 return ENOMEM;
1499 }
1500 bzero(adapter->tx_buffer_area,
1501 sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1502
1503 return 0;
1504}
1505
1506/*********************************************************************
1507 *
1508 * Allocate and initialize transmit structures.
1509 *
1510 **********************************************************************/
1511static int
1512ixgb_setup_transmit_structures(struct adapter * adapter)
1513{
1514 /*
1515 * Setup DMA descriptor areas.
1516 */
1517 if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1518 PAGE_SIZE, 0, /* alignment, bounds */
1519 BUS_SPACE_MAXADDR, /* lowaddr */
1520 BUS_SPACE_MAXADDR, /* highaddr */
1521 NULL, NULL, /* filter, filterarg */
1522 MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */
1523 IXGB_MAX_SCATTER, /* nsegments */
1524 MCLBYTES, /* maxsegsize */
1525 BUS_DMA_ALLOCNOW, /* flags */
1526#if __FreeBSD_version >= 502000
1527 NULL, /* lockfunc */
1528 NULL, /* lockfuncarg */
1529#endif
1530 &adapter->txtag)) {
1531 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1532 return (ENOMEM);
1533 }
1534 if (ixgb_allocate_transmit_structures(adapter))
1535 return ENOMEM;
1536
1537 bzero((void *)adapter->tx_desc_base,
1538 (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1539
1540 adapter->next_avail_tx_desc = 0;
1541 adapter->oldest_used_tx_desc = 0;
1542
1543 /* Set number of descriptors available */
1544 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1545
1546 /* Set checksum context */
1547 adapter->active_checksum_context = OFFLOAD_NONE;
1548
1549 return 0;
1550}
1551
1552/*********************************************************************
1553 *
1554 * Enable transmit unit.
1555 *
1556 **********************************************************************/
1557static void
1558ixgb_initialize_transmit_unit(struct adapter * adapter)
1559{
1560 u_int32_t reg_tctl;
1561 u_int64_t tdba = adapter->txdma.dma_paddr;
1562
1563 /* Setup the Base and Length of the Tx Descriptor Ring */
1564 IXGB_WRITE_REG(&adapter->hw, TDBAL,
1565 (tdba & 0x00000000ffffffffULL));
1566 IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1567 IXGB_WRITE_REG(&adapter->hw, TDLEN,
1568 adapter->num_tx_desc *
1569 sizeof(struct ixgb_tx_desc));
1570
1571 /* Setup the HW Tx Head and Tail descriptor pointers */
1572 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1573 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1574
1575
1576 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1577 IXGB_READ_REG(&adapter->hw, TDBAL),
1578 IXGB_READ_REG(&adapter->hw, TDLEN));
1579
1580 IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1581
1582
1583 /* Program the Transmit Control Register */
1584 reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1585 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1586 IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1587
1588 /* Setup Transmit Descriptor Settings for this adapter */
1589 adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1590
1591 if (adapter->tx_int_delay > 0)
1592 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1593 return;
1594}
1595
1596/*********************************************************************
1597 *
1598 * Free all transmit related data structures.
1599 *
1600 **********************************************************************/
1601static void
1602ixgb_free_transmit_structures(struct adapter * adapter)
1603{
1604 struct ixgb_buffer *tx_buffer;
1605 int i;
1606
1607 INIT_DEBUGOUT("free_transmit_structures: begin");
1608
1609 if (adapter->tx_buffer_area != NULL) {
1610 tx_buffer = adapter->tx_buffer_area;
1611 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1612 if (tx_buffer->m_head != NULL) {
1613 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1614 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1615 m_freem(tx_buffer->m_head);
1616 }
1617 tx_buffer->m_head = NULL;
1618 }
1619 }
1620 if (adapter->tx_buffer_area != NULL) {
1621 free(adapter->tx_buffer_area, M_DEVBUF);
1622 adapter->tx_buffer_area = NULL;
1623 }
1624 if (adapter->txtag != NULL) {
1625 bus_dma_tag_destroy(adapter->txtag);
1626 adapter->txtag = NULL;
1627 }
1628 return;
1629}
1630
1631/*********************************************************************
1632 *
1633 * The offload context needs to be set when we transfer the first
1634 * packet of a particular protocol (TCP/UDP). We change the
1635 * context only if the protocol type changes.
1636 *
1637 **********************************************************************/
1638static void
1639ixgb_transmit_checksum_setup(struct adapter * adapter,
1640 struct mbuf * mp,
1641 u_int8_t * txd_popts)
1642{
1643 struct ixgb_context_desc *TXD;
1644 struct ixgb_buffer *tx_buffer;
1645 int curr_txd;
1646
1647 if (mp->m_pkthdr.csum_flags) {
1648
1649 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1650 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1651 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1652 return;
1653 else
1654 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1655 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1656 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1657 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1658 return;
1659 else
1660 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1661 } else {
1662 *txd_popts = 0;
1663 return;
1664 }
1665 } else {
1666 *txd_popts = 0;
1667 return;
1668 }
1669
1670 /*
1671 * If we reach this point, the checksum offload context needs to be
1672 * reset.
1673 */
1674 curr_txd = adapter->next_avail_tx_desc;
1675 tx_buffer = &adapter->tx_buffer_area[curr_txd];
1676 TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1677
1678
1679 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1680 TXD->tucse = 0;
1681
1682 TXD->mss = 0;
1683
1684 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1685 TXD->tucso =
1686 ENET_HEADER_SIZE + sizeof(struct ip) +
1687 offsetof(struct tcphdr, th_sum);
1688 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1689 TXD->tucso =
1690 ENET_HEADER_SIZE + sizeof(struct ip) +
1691 offsetof(struct udphdr, uh_sum);
1692 }
1693 TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1694
1695 tx_buffer->m_head = NULL;
1696
1697 if (++curr_txd == adapter->num_tx_desc)
1698 curr_txd = 0;
1699
1700 adapter->num_tx_desc_avail--;
1701 adapter->next_avail_tx_desc = curr_txd;
1702 return;
1703}
1704
1705/**********************************************************************
1706 *
1707 * Examine each tx_buffer in the used queue. If the hardware is done
1708 * processing the packet then free associated resources. The
1709 * tx_buffer is put back on the free queue.
1710 *
1711 **********************************************************************/
1712static void
1713ixgb_clean_transmit_interrupts(struct adapter * adapter)
1714{
1715 int i, num_avail;
1716 struct ixgb_buffer *tx_buffer;
1717 struct ixgb_tx_desc *tx_desc;
1718
1719 IXGB_LOCK_ASSERT(adapter);
1720
1721 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1722 return;
1723
1724#ifdef _SV_
1725 adapter->clean_tx_interrupts++;
1726#endif
1727 num_avail = adapter->num_tx_desc_avail;
1728 i = adapter->oldest_used_tx_desc;
1729
1730 tx_buffer = &adapter->tx_buffer_area[i];
1731 tx_desc = &adapter->tx_desc_base[i];
1732
1733 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1734
1735 tx_desc->status = 0;
1736 num_avail++;
1737
1738 if (tx_buffer->m_head) {
1739 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1740 BUS_DMASYNC_POSTWRITE);
1741 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1742 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1743 m_freem(tx_buffer->m_head);
1744 tx_buffer->m_head = NULL;
1745 }
1746 if (++i == adapter->num_tx_desc)
1747 i = 0;
1748
1749 tx_buffer = &adapter->tx_buffer_area[i];
1750 tx_desc = &adapter->tx_desc_base[i];
1751 }
1752
1753 adapter->oldest_used_tx_desc = i;
1754
1755 /*
1756 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1757 * it is OK to send packets. If there are no pending descriptors,
1758 * clear the timeout. Otherwise, if some descriptors have been freed,
1759 * restart the timeout.
1760 */
1761 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1762 struct ifnet *ifp = adapter->ifp;
1763
1764 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1765 if (num_avail == adapter->num_tx_desc)
1766 adapter->tx_timer = 0;
1767 else if (num_avail == adapter->num_tx_desc_avail)
1768 adapter->tx_timer = IXGB_TX_TIMEOUT;
1769 }
1770 adapter->num_tx_desc_avail = num_avail;
1771 return;
1772}
1773
1774
1775/*********************************************************************
1776 *
1777 * Get a buffer from system mbuf buffer pool.
1778 *
1779 **********************************************************************/
1780static int
1781ixgb_get_buf(int i, struct adapter * adapter,
1782 struct mbuf * nmp)
1783{
1784 register struct mbuf *mp = nmp;
1785 struct ixgb_buffer *rx_buffer;
1786 struct ifnet *ifp;
1787 bus_addr_t paddr;
1788 int error;
1789
1790 ifp = adapter->ifp;
1791
1792 if (mp == NULL) {
1793
1794 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1795
1796 if (mp == NULL) {
1797 adapter->mbuf_alloc_failed++;
1798 return (ENOBUFS);
1799 }
1800 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1801 } else {
1802 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1803 mp->m_data = mp->m_ext.ext_buf;
1804 mp->m_next = NULL;
1805 }
1806
1807 if (ifp->if_mtu <= ETHERMTU) {
1808 m_adj(mp, ETHER_ALIGN);
1809 }
1810 rx_buffer = &adapter->rx_buffer_area[i];
1811
1812 /*
1813 * Using memory from the mbuf cluster pool, invoke the bus_dma
1814 * machinery to arrange the memory mapping.
1815 */
1816 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1817 mtod(mp, void *), mp->m_len,
1818 ixgb_dmamap_cb, &paddr, 0);
1819 if (error) {
1820 m_free(mp);
1821 return (error);
1822 }
1823 rx_buffer->m_head = mp;
1824 adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1825 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1826
1827 return (0);
1828}
1829
1830/*********************************************************************
1831 *
1832 * Allocate memory for rx_buffer structures. Since we use one
1833 * rx_buffer per received packet, the maximum number of rx_buffer's
1834 * that we'll need is equal to the number of receive descriptors
1835 * that we've allocated.
1836 *
1837 **********************************************************************/
1838static int
1839ixgb_allocate_receive_structures(struct adapter * adapter)
1840{
1841 int i, error;
1842 struct ixgb_buffer *rx_buffer;
1843
1844 if (!(adapter->rx_buffer_area =
1845 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1846 adapter->num_rx_desc, M_DEVBUF,
1847 M_NOWAIT | M_ZERO))) {
1848 device_printf(adapter->dev,
1849 "Unable to allocate rx_buffer memory\n");
1850 return (ENOMEM);
1851 }
1852 bzero(adapter->rx_buffer_area,
1853 sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1854
1855 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1856 PAGE_SIZE, 0, /* alignment, bounds */
1857 BUS_SPACE_MAXADDR, /* lowaddr */
1858 BUS_SPACE_MAXADDR, /* highaddr */
1859 NULL, NULL, /* filter, filterarg */
1860 MCLBYTES, /* maxsize */
1861 1, /* nsegments */
1862 MCLBYTES, /* maxsegsize */
1863 BUS_DMA_ALLOCNOW, /* flags */
1864#if __FreeBSD_version >= 502000
1865 NULL, /* lockfunc */
1866 NULL, /* lockfuncarg */
1867#endif
1868 &adapter->rxtag);
1869 if (error != 0) {
1870 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1871 "bus_dma_tag_create failed; error %u\n",
1872 error);
1873 goto fail_0;
1874 }
1875 rx_buffer = adapter->rx_buffer_area;
1876 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1877 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1878 &rx_buffer->map);
1879 if (error != 0) {
1880 device_printf(adapter->dev,
1881 "ixgb_allocate_receive_structures: "
1882 "bus_dmamap_create failed; error %u\n",
1883 error);
1884 goto fail_1;
1885 }
1886 }
1887
1888 for (i = 0; i < adapter->num_rx_desc; i++) {
1889 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1890 adapter->rx_buffer_area[i].m_head = NULL;
1891 adapter->rx_desc_base[i].buff_addr = 0;
1892 return (ENOBUFS);
1893 }
1894 }
1895
1896 return (0);
1897fail_1:
1898 bus_dma_tag_destroy(adapter->rxtag);
1899fail_0:
1900 adapter->rxtag = NULL;
1901 free(adapter->rx_buffer_area, M_DEVBUF);
1902 adapter->rx_buffer_area = NULL;
1903 return (error);
1904}
1905
1906/*********************************************************************
1907 *
1908 * Allocate and initialize receive structures.
1909 *
1910 **********************************************************************/
1911static int
1912ixgb_setup_receive_structures(struct adapter * adapter)
1913{
1914 bzero((void *)adapter->rx_desc_base,
1915 (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1916
1917 if (ixgb_allocate_receive_structures(adapter))
1918 return ENOMEM;
1919
1920 /* Setup our descriptor pointers */
1921 adapter->next_rx_desc_to_check = 0;
1922 adapter->next_rx_desc_to_use = 0;
1923 return (0);
1924}
1925
1926/*********************************************************************
1927 *
1928 * Enable receive unit.
1929 *
1930 **********************************************************************/
1931static void
1932ixgb_initialize_receive_unit(struct adapter * adapter)
1933{
1934 u_int32_t reg_rctl;
1935 u_int32_t reg_rxcsum;
1936 u_int32_t reg_rxdctl;
1937 struct ifnet *ifp;
1938 u_int64_t rdba = adapter->rxdma.dma_paddr;
1939
1940 ifp = adapter->ifp;
1941
1942 /*
1943 * Make sure receives are disabled while setting up the descriptor
1944 * ring
1945 */
1946 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1947 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1948
1949 /* Set the Receive Delay Timer Register */
1950 IXGB_WRITE_REG(&adapter->hw, RDTR,
1951 adapter->rx_int_delay);
1952
1953
1954 /* Setup the Base and Length of the Rx Descriptor Ring */
1955 IXGB_WRITE_REG(&adapter->hw, RDBAL,
1956 (rdba & 0x00000000ffffffffULL));
1957 IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1958 IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1959 sizeof(struct ixgb_rx_desc));
1960
1961 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1962 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1963
1964 IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1965
1966
1967
1968 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1969 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1970 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1971 IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1972
1973
1974 adapter->raidc = 1;
1975 if (adapter->raidc) {
1976 uint32_t raidc;
1977 uint8_t poll_threshold;
1978#define IXGB_RAIDC_POLL_DEFAULT 120
1979
1980 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1981 poll_threshold >>= 1;
1982 poll_threshold &= 0x3F;
1983 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1984 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1985 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1986 poll_threshold;
1987 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1988 }
1989 /* Enable Receive Checksum Offload for TCP and UDP ? */
1990 if (ifp->if_capenable & IFCAP_RXCSUM) {
1991 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1992 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1993 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1994 }
1995 /* Setup the Receive Control Register */
1996 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1997 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1998 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1999 IXGB_RCTL_CFF |
2000 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
2001
2002 switch (adapter->rx_buffer_len) {
2003 default:
2004 case IXGB_RXBUFFER_2048:
2005 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2006 break;
2007 case IXGB_RXBUFFER_4096:
2008 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2009 break;
2010 case IXGB_RXBUFFER_8192:
2011 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2012 break;
2013 case IXGB_RXBUFFER_16384:
2014 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2015 break;
2016 }
2017
2018 reg_rctl |= IXGB_RCTL_RXEN;
2019
2020
2021 /* Enable Receives */
2022 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2023
2024 return;
2025}
2026
2027/*********************************************************************
2028 *
2029 * Free receive related data structures.
2030 *
2031 **********************************************************************/
2032static void
2033ixgb_free_receive_structures(struct adapter * adapter)
2034{
2035 struct ixgb_buffer *rx_buffer;
2036 int i;
2037
2038 INIT_DEBUGOUT("free_receive_structures: begin");
2039
2040 if (adapter->rx_buffer_area != NULL) {
2041 rx_buffer = adapter->rx_buffer_area;
2042 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2043 if (rx_buffer->map != NULL) {
2044 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2045 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2046 }
2047 if (rx_buffer->m_head != NULL)
2048 m_freem(rx_buffer->m_head);
2049 rx_buffer->m_head = NULL;
2050 }
2051 }
2052 if (adapter->rx_buffer_area != NULL) {
2053 free(adapter->rx_buffer_area, M_DEVBUF);
2054 adapter->rx_buffer_area = NULL;
2055 }
2056 if (adapter->rxtag != NULL) {
2057 bus_dma_tag_destroy(adapter->rxtag);
2058 adapter->rxtag = NULL;
2059 }
2060 return;
2061}
2062
2063/*********************************************************************
2064 *
2065 * This routine executes in interrupt context. It replenishes
2066 * the mbufs in the descriptor and sends data which has been
2067 * dma'ed into host memory to upper layer.
2068 *
2069 * We loop at most count times if count is > 0, or until done if
2070 * count < 0.
2071 *
2072 *********************************************************************/
2073static int
2074ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2075{
2076 struct ifnet *ifp;
2077 struct mbuf *mp;
2078#if __FreeBSD_version < 500000
2079 struct ether_header *eh;
2080#endif
2081 int eop = 0;
2082 int len;
2083 u_int8_t accept_frame = 0;
2084 int i;
2085 int next_to_use = 0;
2086 int eop_desc;
2087 int rx_npkts = 0;
2088 /* Pointer to the receive descriptor being examined. */
2089 struct ixgb_rx_desc *current_desc;
2090
2091 IXGB_LOCK_ASSERT(adapter);
2092
2093 ifp = adapter->ifp;
2094 i = adapter->next_rx_desc_to_check;
2095 next_to_use = adapter->next_rx_desc_to_use;
2096 eop_desc = adapter->next_rx_desc_to_check;
2097 current_desc = &adapter->rx_desc_base[i];
2098
2099 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2100#ifdef _SV_
2101 adapter->no_pkts_avail++;
2102#endif
2103 return (rx_npkts);
2104 }
2105 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2106
2107 mp = adapter->rx_buffer_area[i].m_head;
2108 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2109 BUS_DMASYNC_POSTREAD);
2110 accept_frame = 1;
2111 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2112 count--;
2113 eop = 1;
2114 } else {
2115 eop = 0;
2116 }
2117 len = current_desc->length;
2118
2119 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2120 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2121 IXGB_RX_DESC_ERRORS_RXE)) {
2122 accept_frame = 0;
2123 }
2124 if (accept_frame) {
2125
2126 /* Assign correct length to the current fragment */
2127 mp->m_len = len;
2128
2129 if (adapter->fmp == NULL) {
2130 mp->m_pkthdr.len = len;
2131 adapter->fmp = mp; /* Store the first mbuf */
2132 adapter->lmp = mp;
2133 } else {
2134 /* Chain mbuf's together */
2135 mp->m_flags &= ~M_PKTHDR;
2136 adapter->lmp->m_next = mp;
2137 adapter->lmp = adapter->lmp->m_next;
2138 adapter->fmp->m_pkthdr.len += len;
2139 }
2140
2141 if (eop) {
2142 eop_desc = i;
2143 adapter->fmp->m_pkthdr.rcvif = ifp;
2144
2145#if __FreeBSD_version < 500000
2146 eh = mtod(adapter->fmp, struct ether_header *);
2147
2148 /* Remove ethernet header from mbuf */
2149 m_adj(adapter->fmp, sizeof(struct ether_header));
2150 ixgb_receive_checksum(adapter, current_desc,
2151 adapter->fmp);
2152
2153 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2154 VLAN_INPUT_TAG(eh, adapter->fmp,
2155 current_desc->special);
2156 else
2157 ether_input(ifp, eh, adapter->fmp);
2158#else
2159 ixgb_receive_checksum(adapter, current_desc,
2160 adapter->fmp);
2161#if __FreeBSD_version < 700000
2162 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2163 VLAN_INPUT_TAG(ifp, adapter->fmp,
2164 current_desc->special);
2165#else
2166 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2167 adapter->fmp->m_pkthdr.ether_vtag =
2168 current_desc->special;
2169 adapter->fmp->m_flags |= M_VLANTAG;
2170 }
2171#endif
2172
2173 if (adapter->fmp != NULL) {
2174 IXGB_UNLOCK(adapter);
2175 (*ifp->if_input) (ifp, adapter->fmp);
2176 IXGB_LOCK(adapter);
2177 rx_npkts++;
2178 }
2179#endif
2180 adapter->fmp = NULL;
2181 adapter->lmp = NULL;
2182 }
2183 adapter->rx_buffer_area[i].m_head = NULL;
2184 } else {
2185 adapter->dropped_pkts++;
2186 if (adapter->fmp != NULL)
2187 m_freem(adapter->fmp);
2188 adapter->fmp = NULL;
2189 adapter->lmp = NULL;
2190 }
2191
2192 /* Zero out the receive descriptors status */
2193 current_desc->status = 0;
2194
2195 /* Advance our pointers to the next descriptor */
2196 if (++i == adapter->num_rx_desc) {
2197 i = 0;
2198 current_desc = adapter->rx_desc_base;
2199 } else
2200 current_desc++;
2201 }
2202 adapter->next_rx_desc_to_check = i;
2203
2204 if (--i < 0)
2205 i = (adapter->num_rx_desc - 1);
2206
2207 /*
2208 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2209 * memory corruption). Avoid using and re-submitting the most recently received RX
2210 * descriptor back to hardware.
2211 *
2212 * if(Last written back descriptor == EOP bit set descriptor)
2213 * then avoid re-submitting the most recently received RX descriptor
2214 * back to hardware.
2215 * if(Last written back descriptor != EOP bit set descriptor)
2216 * then avoid re-submitting the most recently received RX descriptors
2217 * till last EOP bit set descriptor.
2218 */
2219 if (eop_desc != i) {
2220 if (++eop_desc == adapter->num_rx_desc)
2221 eop_desc = 0;
2222 i = eop_desc;
2223 }
2224 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2225 while (next_to_use != i) {
2226 current_desc = &adapter->rx_desc_base[next_to_use];
2227 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2228 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2229 IXGB_RX_DESC_ERRORS_RXE))) {
2230 mp = adapter->rx_buffer_area[next_to_use].m_head;
2231 ixgb_get_buf(next_to_use, adapter, mp);
2232 } else {
2233 if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2234 break;
2235 }
2236 /* Advance our pointers to the next descriptor */
2237 if (++next_to_use == adapter->num_rx_desc) {
2238 next_to_use = 0;
2239 current_desc = adapter->rx_desc_base;
2240 } else
2241 current_desc++;
2242 }
2243 adapter->next_rx_desc_to_use = next_to_use;
2244 if (--next_to_use < 0)
2245 next_to_use = (adapter->num_rx_desc - 1);
2246 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */
2247 IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2248
2249 return (rx_npkts);
2250}
2251
2252/*********************************************************************
2253 *
2254 * Verify that the hardware indicated that the checksum is valid.
2255 * Inform the stack about the status of checksum so that stack
2256 * doesn't spend time verifying the checksum.
2257 *
2258 *********************************************************************/
2259static void
2260ixgb_receive_checksum(struct adapter * adapter,
2261 struct ixgb_rx_desc * rx_desc,
2262 struct mbuf * mp)
2263{
2264 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2265 mp->m_pkthdr.csum_flags = 0;
2266 return;
2267 }
2268 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2269 /* Did it pass? */
2270 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2271 /* IP Checksum Good */
2272 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2273 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2274
2275 } else {
2276 mp->m_pkthdr.csum_flags = 0;
2277 }
2278 }
2279 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2280 /* Did it pass? */
2281 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2282 mp->m_pkthdr.csum_flags |=
2283 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2284 mp->m_pkthdr.csum_data = htons(0xffff);
2285 }
2286 }
2287 return;
2288}
2289
2290
2291static void
2292ixgb_enable_vlans(struct adapter * adapter)
2293{
2294 uint32_t ctrl;
2295
2296 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2297 ctrl |= IXGB_CTRL0_VME;
2298 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2299
2300 return;
2301}
2302
2303
2304static void
2305ixgb_enable_intr(struct adapter * adapter)
2306{
2307 IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2308 IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2309 return;
2310}
2311
2312static void
2313ixgb_disable_intr(struct adapter * adapter)
2314{
2315 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2316 return;
2317}
2318
2319void
2320ixgb_write_pci_cfg(struct ixgb_hw * hw,
2321 uint32_t reg,
2322 uint16_t * value)
2323{
2324 pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2325 *value, 2);
2326}
2327
2328/**********************************************************************
2329 *
2330 * Update the board statistics counters.
2331 *
2332 **********************************************************************/
2333static void
2334ixgb_update_stats_counters(struct adapter * adapter)
2335{
2336 struct ifnet *ifp;
2337
2338 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2339 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2340 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2341 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2342 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2343 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2344 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2345 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2346 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2347 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2348
2349 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2350 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2351 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2352 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2353 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2354 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2355 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2356 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2357 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2358 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2359 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2360 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2361 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2362 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2363 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2364 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2365 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2366 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2367 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2368 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2369 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2370 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2371 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2372 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2373 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2374 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2375 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2376
2377 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2378 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2379 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2380 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2381 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2382 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2383 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2384 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2385 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2386 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2387 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2388 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2389 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2390 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2391 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2392 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2393 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2394 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2395 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2396 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2397 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2398 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2399
2400 ifp = adapter->ifp;
2401
2402 /* Fill out the OS statistics structure */
2403 ifp->if_ipackets = adapter->stats.gprcl;
2404 ifp->if_opackets = adapter->stats.gptcl;
2405 ifp->if_ibytes = adapter->stats.gorcl;
2406 ifp->if_obytes = adapter->stats.gotcl;
2407 ifp->if_imcasts = adapter->stats.mprcl;
2408 ifp->if_collisions = 0;
2409
2410 /* Rx Errors */
2411 ifp->if_ierrors =
2412 adapter->dropped_pkts +
2413 adapter->stats.crcerrs +
2414 adapter->stats.rnbc +
2415 adapter->stats.mpc +
2416 adapter->stats.rlec;
2417
2418
2419}
2420
2421
2422/**********************************************************************
2423 *
2424 * This routine is called only when ixgb_display_debug_stats is enabled.
2425 * This routine provides a way to take a look at important statistics
2426 * maintained by the driver and hardware.
2427 *
2428 **********************************************************************/
2429static void
2430ixgb_print_hw_stats(struct adapter * adapter)
2431{
2432 char buf_speed[100], buf_type[100];
2433 ixgb_bus_speed bus_speed;
2434 ixgb_bus_type bus_type;
2435 device_t dev;
2436
2437 dev = adapter->dev;
2438#ifdef _SV_
2439 device_printf(dev, "Packets not Avail = %ld\n",
2440 adapter->no_pkts_avail);
2441 device_printf(dev, "CleanTxInterrupts = %ld\n",
2442 adapter->clean_tx_interrupts);
2443 device_printf(dev, "ICR RXDMT0 = %lld\n",
2444 (long long)adapter->sv_stats.icr_rxdmt0);
2445 device_printf(dev, "ICR RXO = %lld\n",
2446 (long long)adapter->sv_stats.icr_rxo);
2447 device_printf(dev, "ICR RXT0 = %lld\n",
2448 (long long)adapter->sv_stats.icr_rxt0);
2449 device_printf(dev, "ICR TXDW = %lld\n",
2450 (long long)adapter->sv_stats.icr_TXDW);
2451#endif /* _SV_ */
2452
2453 bus_speed = adapter->hw.bus.speed;
2454 bus_type = adapter->hw.bus.type;
2455 sprintf(buf_speed,
2456 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2457 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2458 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2459 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2460 "UNKNOWN");
2461 device_printf(dev, "PCI_Bus_Speed = %s\n",
2462 buf_speed);
2463
2464 sprintf(buf_type,
2465 bus_type == ixgb_bus_type_pci ? "PCI" :
2466 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2467 "UNKNOWN");
2468 device_printf(dev, "PCI_Bus_Type = %s\n",
2469 buf_type);
2470
2471 device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2472 adapter->no_tx_desc_avail1);
2473 device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2474 adapter->no_tx_desc_avail2);
2475 device_printf(dev, "Std Mbuf Failed = %ld\n",
2476 adapter->mbuf_alloc_failed);
2477 device_printf(dev, "Std Cluster Failed = %ld\n",
2478 adapter->mbuf_cluster_failed);
2479
2480 device_printf(dev, "Defer count = %lld\n",
2481 (long long)adapter->stats.dc);
2482 device_printf(dev, "Missed Packets = %lld\n",
2483 (long long)adapter->stats.mpc);
2484 device_printf(dev, "Receive No Buffers = %lld\n",
2485 (long long)adapter->stats.rnbc);
2486 device_printf(dev, "Receive length errors = %lld\n",
2487 (long long)adapter->stats.rlec);
2488 device_printf(dev, "Crc errors = %lld\n",
2489 (long long)adapter->stats.crcerrs);
2490 device_printf(dev, "Driver dropped packets = %ld\n",
2491 adapter->dropped_pkts);
2492
2493 device_printf(dev, "XON Rcvd = %lld\n",
2494 (long long)adapter->stats.xonrxc);
2495 device_printf(dev, "XON Xmtd = %lld\n",
2496 (long long)adapter->stats.xontxc);
2497 device_printf(dev, "XOFF Rcvd = %lld\n",
2498 (long long)adapter->stats.xoffrxc);
2499 device_printf(dev, "XOFF Xmtd = %lld\n",
2500 (long long)adapter->stats.xofftxc);
2501
2502 device_printf(dev, "Good Packets Rcvd = %lld\n",
2503 (long long)adapter->stats.gprcl);
2504 device_printf(dev, "Good Packets Xmtd = %lld\n",
2505 (long long)adapter->stats.gptcl);
2506
2507 device_printf(dev, "Jumbo frames recvd = %lld\n",
2508 (long long)adapter->stats.jprcl);
2509 device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2510 (long long)adapter->stats.jptcl);
2511
2512 return;
2513
2514}
2515
2516static int
2517ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2518{
2519 int error;
2520 int result;
2521 struct adapter *adapter;
2522
2523 result = -1;
2524 error = sysctl_handle_int(oidp, &result, 0, req);
2525
2526 if (error || !req->newptr)
2527 return (error);
2528
2529 if (result == 1) {
2530 adapter = (struct adapter *) arg1;
2531 ixgb_print_hw_stats(adapter);
2532 }
2533 return error;
2534}