Deleted Added
full compact
33c33
< /*$FreeBSD: head/sys/dev/ixgbe/ixgbe.c 181003 2008-07-30 18:15:18Z jfv $*/
---
> /*$FreeBSD: head/sys/dev/ixgbe/ixgbe.c 185352 2008-11-26 23:41:18Z jfv $*/
39,41d38
< /* Undefine this if not using CURRENT */
< #define IXGBE_VLAN_EVENTS
<
52c49
< char ixgbe_driver_version[] = "1.4.7";
---
> char ixgbe_driver_version[] = "1.6.2";
68d64
< {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT_DUAL_PORT, 0, 0, 0},
69a66,68
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72a72,73
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
131c132
< static void ixgbe_rx_checksum(struct adapter *, u32, struct mbuf *);
---
> static void ixgbe_rx_checksum(u32, struct mbuf *);
138c139
< static int ixgbe_get_buf(struct rx_ring *, int);
---
> static int ixgbe_get_buf(struct rx_ring *, int, u8);
150c151
< static void ixgbe_set_ivar(struct adapter *, u16, u8);
---
> static void ixgbe_set_ivar(struct adapter *, u16, u8, s8);
154c155
< #ifdef IXGBE_VLAN_EVENTS
---
> #ifdef IXGBE_HW_VLAN_SUPPORT
158a160,164
> static void ixgbe_update_aim(struct rx_ring *);
>
> /* Support for pluggable optic modules */
> static bool ixgbe_sfp_probe(struct adapter *);
>
171,173d176
< #ifndef NO_82598_A0_SUPPORT
< static void desc_flip(void *);
< #endif
201a205,220
> /*
> ** These parameters are used in Adaptive
> ** Interrupt Moderation. The value is set
> ** into EITR and controls the interrupt
> ** frequency. They can be modified but
> ** be careful in tuning them.
> */
> static int ixgbe_enable_aim = TRUE;
> TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
> static int ixgbe_low_latency = IXGBE_LOW_LATENCY;
> TUNABLE_INT("hw.ixgbe.low_latency", &ixgbe_low_latency);
> static int ixgbe_ave_latency = IXGBE_LOW_LATENCY;
> TUNABLE_INT("hw.ixgbe.ave_latency", &ixgbe_low_latency);
> static int ixgbe_bulk_latency = IXGBE_BULK_LATENCY;
> TUNABLE_INT("hw.ixgbe.bulk_latency", &ixgbe_bulk_latency);
>
207c226
< static int ixgbe_flow_control = 3;
---
> static int ixgbe_flow_control = ixgbe_fc_none;
216c235
< static int ixgbe_enable_lro = 0;
---
> static int ixgbe_enable_lro = 1;
226a246,251
> * Enable RX Header Split
> */
> static int ixgbe_rx_hdr_split = 1;
> TUNABLE_INT("hw.ixgbe.rx_hdr_split", &ixgbe_rx_hdr_split);
>
> /*
232c257
< static int ixgbe_rx_queues = 4;
---
> static int ixgbe_rx_queues = 1;
246,248d270
< /* Optics type of this interface */
< static int ixgbe_optics;
<
263,267c285,289
< u_int16_t pci_vendor_id = 0;
< u_int16_t pci_device_id = 0;
< u_int16_t pci_subvendor_id = 0;
< u_int16_t pci_subdevice_id = 0;
< char adapter_name[128];
---
> u16 pci_vendor_id = 0;
> u16 pci_device_id = 0;
> u16 pci_subvendor_id = 0;
> u16 pci_subdevice_id = 0;
> char adapter_name[256];
292,320d313
< switch (pci_device_id) {
< case IXGBE_DEV_ID_82598AT_DUAL_PORT :
< ixgbe_total_ports += 2;
< break;
< case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
< ixgbe_optics = IFM_10G_CX4;
< ixgbe_total_ports += 2;
< break;
< case IXGBE_DEV_ID_82598AF_DUAL_PORT :
< ixgbe_optics = IFM_10G_SR;
< ixgbe_total_ports += 2;
< break;
< case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
< ixgbe_optics = IFM_10G_SR;
< ixgbe_total_ports += 1;
< break;
< case IXGBE_DEV_ID_82598EB_XF_LR :
< ixgbe_optics = IFM_10G_LR;
< ixgbe_total_ports += 1;
< break;
< case IXGBE_DEV_ID_82598EB_CX4 :
< ixgbe_optics = IFM_10G_CX4;
< ixgbe_total_ports += 1;
< break;
< case IXGBE_DEV_ID_82598AT :
< ixgbe_total_ports += 1;
< default:
< break;
< }
326d318
<
345c337,338
< u32 ctrl_ext;
---
> u16 pci_device_id;
> u32 ctrl_ext;
355a349,379
> /* Keep track of number of ports and optics */
> pci_device_id = pci_get_device(dev);
> switch (pci_device_id) {
> case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
> adapter->optics = IFM_10G_CX4;
> ixgbe_total_ports += 2;
> break;
> case IXGBE_DEV_ID_82598AF_DUAL_PORT :
> adapter->optics = IFM_10G_SR;
> ixgbe_total_ports += 2;
> break;
> case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
> adapter->optics = IFM_10G_SR;
> ixgbe_total_ports += 1;
> break;
> case IXGBE_DEV_ID_82598EB_XF_LR :
> adapter->optics = IFM_10G_LR;
> ixgbe_total_ports += 1;
> break;
> case IXGBE_DEV_ID_82598EB_CX4 :
> adapter->optics = IFM_10G_CX4;
> ixgbe_total_ports += 1;
> break;
> case IXGBE_DEV_ID_82598AT :
> ixgbe_total_ports += 1;
> case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
> ixgbe_total_ports += 2;
> default:
> break;
> }
>
376a401,425
> SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
> SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
> OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
> &ixgbe_enable_aim, 1, "Interrupt Moderation");
>
> SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
> SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
> OID_AUTO, "low_latency", CTLTYPE_INT|CTLFLAG_RW,
> &ixgbe_low_latency, 1, "Low Latency");
>
> SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
> SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
> OID_AUTO, "ave_latency", CTLTYPE_INT|CTLFLAG_RW,
> &ixgbe_ave_latency, 1, "Average Latency");
>
> SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
> SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
> OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
> &ixgbe_bulk_latency, 1, "Bulk Latency");
>
> SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
> SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
> OID_AUTO, "hdr_split", CTLTYPE_INT|CTLFLAG_RW,
> &ixgbe_rx_hdr_split, 1, "RX Header Split");
>
383,385d431
< /* Indicate to RX setup to use Jumbo Clusters */
< adapter->bigbufs = TRUE;
<
431c477,490
< if (ixgbe_init_shared_code(&adapter->hw)) {
---
> error = ixgbe_init_shared_code(&adapter->hw);
> if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
> /*
> ** No optics in this port, set up
> ** so the timer routine will probe
> ** for later insertion.
> */
> adapter->sfp_probe = TRUE;
> error = 0;
> } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
> device_printf(dev,"Unsupported SFP+ module detected!\n");
> error = EIO;
> goto err_late;
> } else if (error) {
462c521
< #ifdef IXGBE_VLAN_EVENTS
---
> #ifdef IXGBE_HW_VLAN_SUPPORT
469c528
<
---
>
524d582
< txr->tq = NULL;
532d589
< rxr->tq = NULL;
536c593,598
< #ifdef IXGBE_VLAN_EVENTS
---
> /* let hardware know driver is unloading */
> ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
> ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
>
> #ifdef IXGBE_HW_VLAN_SUPPORT
542c604
< #endif
---
> #endif
544,548d605
< /* let hardware know driver is unloading */
< ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
< ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
<
850a908,909
> struct rx_ring *rxr = adapter->rx_rings;
> struct tx_ring *txr = adapter->tx_rings;
854c913,914
< u32 txdctl, rxdctl, mhadd, gpie;
---
> u32 k, txdctl, mhadd, gpie;
> u32 rxdctl, rxctrl;
875,876c935
< #ifndef IXGBE_VLAN_EVENTS
< /* With events this is done when a vlan registers */
---
> #ifndef IXGBE_HW_VLAN_SUPPORT
878c937,938
< u32 ctrl;
---
> u32 ctrl;
>
885d944
<
894a954,960
> /* TX irq moderation rate is fixed */
> for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
> IXGBE_WRITE_REG(&adapter->hw,
> IXGBE_EITR(txr->msix), ixgbe_ave_latency);
> txr->watchdog_timer = FALSE;
> }
>
899,900c965,966
< ** If we are resetting MTU smaller than 2K
< ** drop to small RX buffers
---
> ** Determine the correct mbuf pool
> ** for doing jumbo/headersplit
902,903c968,971
< if (adapter->max_frame_size <= MCLBYTES)
< adapter->bigbufs = FALSE;
---
> if (ifp->if_mtu > ETHERMTU)
> adapter->rx_mbuf_sz = MJUMPAGESIZE;
> else
> adapter->rx_mbuf_sz = MCLBYTES;
914a983,992
> /* RX moderation will be adapted over time, set default */
> for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
> IXGBE_WRITE_REG(&adapter->hw,
> IXGBE_EITR(rxr->msix), ixgbe_low_latency);
> }
>
> /* Set Link moderation */
> IXGBE_WRITE_REG(&adapter->hw,
> IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
>
915a994
>
918a998
>
957a1038,1046
> for (k = 0; k < 10; k++) {
> if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
> IXGBE_RXDCTL_ENABLE)
> break;
> else
> msec_delay(1);
> }
> wmb();
> IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
959a1049,1055
> /* Enable Receive engine */
> rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
> if (adapter->hw.mac.type == ixgbe_mac_82598EB)
> rxctrl |= IXGBE_RXCTRL_DMBYPS;
> rxctrl |= IXGBE_RXCTRL_RXEN;
> IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
>
963c1059,1060
< ixgbe_configure_ivars(adapter);
---
> if (ixgbe_enable_msix)
> ixgbe_configure_ivars(adapter);
987c1084
< ** Legacy Deferred Interrupt Handlers
---
> ** MSIX Interrupt Handlers
995c1092,1093
< u32 loop = 0;
---
> u32 loop = MAX_LOOP;
> bool more;
997,999c1095,1099
< while (loop++ < MAX_INTR)
< if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
< break;
---
> do {
> more = ixgbe_rxeof(rxr, -1);
> } while (loop-- && more);
> /* Reenable this interrupt */
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1008c1108,1109
< u32 loop = 0;
---
> u32 loop = MAX_LOOP;
> bool more;
1010,1016c1111,1122
< IXGBE_TX_LOCK(txr);
< while (loop++ < MAX_INTR)
< if (ixgbe_txeof(txr) == 0)
< break;
< if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
< ixgbe_start_locked(txr, ifp);
< IXGBE_TX_UNLOCK(txr);
---
> IXGBE_TX_LOCK(txr);
> do {
> more = ixgbe_txeof(txr);
> } while (loop-- && more);
>
> if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
> ixgbe_start_locked(txr, ifp);
>
> IXGBE_TX_UNLOCK(txr);
>
> /* Reenable this interrupt */
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1029d1134
< u32 reg_eicr;
1030a1136
> struct ixgbe_hw *hw = &adapter->hw;
1033c1139
< struct ixgbe_hw *hw;
---
> u32 reg_eicr;
1035,1037c1141,1145
< hw = &adapter->hw;
< reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
< if (reg_eicr == 0)
---
>
> reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
>
> if (reg_eicr == 0) {
> ixgbe_enable_intr(adapter);
1038a1147
> }
1040c1149
< if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
---
> if (ixgbe_rxeof(rxr, adapter->rx_process_limit))
1042,1043c1151,1152
< if (ixgbe_txeof(txr) != 0)
< taskqueue_enqueue(txr->tq, &txr->tx_task);
---
> if (ixgbe_txeof(txr))
> taskqueue_enqueue(txr->tq, &txr->tx_task);
1050,1051c1159
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
< IXGBE_EICR_GPI_SDP1);
---
> IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1052a1161
>
1056a1166
> ixgbe_enable_intr(adapter);
1070,1072c1180,1182
< struct tx_ring *txr = arg;
< struct adapter *adapter = txr->adapter;
< u32 loop = 0;
---
> struct tx_ring *txr = arg;
> struct adapter *adapter = txr->adapter;
> bool more;
1074d1183
< ++txr->tx_irq;
1076,1078c1185,1186
< while (loop++ < MAX_INTR)
< if (ixgbe_txeof(txr) == 0)
< break;
---
> ++txr->tx_irq;
> more = ixgbe_txeof(txr);
1080,1082c1188,1191
< /* Reenable this interrupt */
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
<
---
> if (more)
> taskqueue_enqueue(txr->tq, &txr->tx_task);
> else /* Reenable this interrupt */
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1085a1195
>
1088c1198
< * MSI RX Interrupt Service routine
---
> * MSIX RX Interrupt Service routine
1096,1097c1206,1207
< struct adapter *adapter = rxr->adapter;
< u32 loop = 0;
---
> struct adapter *adapter = rxr->adapter;
> bool more;
1100,1104c1210,1217
< while (loop++ < MAX_INTR)
< if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
< break;
< /* Reenable this interrupt */
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
---
> more = ixgbe_rxeof(rxr, -1);
> if (more)
> taskqueue_enqueue(rxr->tq, &rxr->rx_task);
> else
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
> /* Update interrupt rate */
> if (ixgbe_enable_aim == TRUE)
> ixgbe_update_aim(rxr);
1107a1221,1231
> /*
> ** Routine to do adjust the RX EITR value based on traffic,
> ** its a simple three state model, but seems to help.
> **
> ** Note that the three EITR values are tuneable using
> ** sysctl in real time. The feature can be effectively
> ** nullified by setting them equal.
> */
> #define BULK_THRESHOLD 10000
> #define AVE_THRESHOLD 1600
>
1108a1233,1271
> ixgbe_update_aim(struct rx_ring *rxr)
> {
> struct adapter *adapter = rxr->adapter;
> u32 olditr, newitr;
>
> /* Update interrupt moderation based on traffic */
> olditr = rxr->eitr_setting;
> newitr = olditr;
>
> /* Idle, don't change setting */
> if (rxr->bytes == 0)
> return;
>
> if (olditr == ixgbe_low_latency) {
> if (rxr->bytes > AVE_THRESHOLD)
> newitr = ixgbe_ave_latency;
> } else if (olditr == ixgbe_ave_latency) {
> if (rxr->bytes < AVE_THRESHOLD)
> newitr = ixgbe_low_latency;
> else if (rxr->bytes > BULK_THRESHOLD)
> newitr = ixgbe_bulk_latency;
> } else if (olditr == ixgbe_bulk_latency) {
> if (rxr->bytes < BULK_THRESHOLD)
> newitr = ixgbe_ave_latency;
> }
>
> if (olditr != newitr) {
> /* Change interrupt rate */
> rxr->eitr_setting = newitr;
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rxr->me),
> newitr | (newitr << 16));
> }
>
> rxr->bytes = 0;
> return;
> }
>
>
> static void
1167c1330
< ifmr->ifm_active |= ixgbe_optics | IFM_FDX;
---
> ifmr->ifm_active |= adapter->optics | IFM_FDX;
1223c1386
< u32 paylen;
---
> u32 paylen = 0;
1233d1395
< paylen = 0;
1277c1439
< adapter->mbuf_alloc_failed++;
---
> adapter->mbuf_defrag_failed++;
1328a1491,1495
> /* Record payload length */
> if (paylen == 0)
> olinfo_status |= m_head->m_pkthdr.len <<
> IXGBE_ADVTXD_PAYLEN_SHIFT;
>
1349,1359d1515
< /*
< ** we have to do this inside the loop right now
< ** because of the hardware workaround.
< */
< if (j == (nsegs -1)) /* Last descriptor gets EOP and RS */
< txd->read.cmd_type_len |=
< htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
< #ifndef NO_82598_A0_SUPPORT
< if (adapter->hw.revision_id == 0)
< desc_flip(txd);
< #endif
1361a1518,1519
> txd->read.cmd_type_len |=
> htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1377a1536
> ++txr->total_packets;
1379d1537
< ++txr->tx_packets;
1506a1665,1669
> /* Check for pluggable optics */
> if (adapter->sfp_probe)
> if (!ixgbe_sfp_probe(adapter))
> goto out; /* Nothing to do */
>
1513c1676
< * Each second we check the watchdog
---
> * Each tick we check the watchdog
1517a1681,1684
> out:
> /* Trigger an RX interrupt on all queues */
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, adapter->rx_mask);
>
1703a1871,1875
> TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
> txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
> taskqueue_thread_enqueue, &txr->tq);
> taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
> device_get_nameunit(adapter->dev));
1727a1900,1906
> /* used in local timer */
> adapter->rx_mask |= rxr->eims;
> TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
> rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
> taskqueue_thread_enqueue, &rxr->tq);
> taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
> device_get_nameunit(adapter->dev));
1761a1941,1944
> /* Override by tuneable */
> if (ixgbe_enable_msix == 0)
> goto msi;
>
1763c1946
< rid = PCIR_BAR(IXGBE_MSIX_BAR);
---
> rid = PCIR_BAR(MSIX_82598_BAR);
1766a1950,1954
> rid += 4; /* 82599 maps in higher BAR */
> adapter->msix_mem = bus_alloc_resource_any(dev,
> SYS_RES_MEMORY, &rid, RF_ACTIVE);
> }
> if (!adapter->msix_mem) {
1776c1964
< PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
---
> rid, adapter->msix_mem);
1856c2044,2045
< device_t dev = adapter->dev;
---
> device_t dev = adapter->dev;
> int rid;
1864a2054,2055
> rid = PCIR_BAR(MSIX_82598_BAR);
>
1888c2079
< PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
---
> rid, adapter->msix_mem);
1923c2114
< adapter->hw.fc.type = ixgbe_fc_full;
---
> adapter->hw.fc.requested_mode = ixgbe_fc_full;
1980,1981c2171
< if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
< (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT))
---
> if (hw->device_id == IXGBE_DEV_ID_82598AT)
1994c2184
< ifmedia_add(&adapter->media, IFM_ETHER | ixgbe_optics |
---
> ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
1996,1997c2186
< if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
< (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT)) {
---
> if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2098d2286
< char name_string[16];
2137c2325
< snprintf(name_string, sizeof(name_string), "%s:tx(%d)",
---
> snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2139c2327
< mtx_init(&txr->tx_mtx, name_string, NULL, MTX_DEF);
---
> mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2172,2173c2360,2361
< /* Initialize the TX side lock */
< snprintf(name_string, sizeof(name_string), "%s:rx(%d)",
---
> /* Initialize the RX side lock */
> snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2175c2363
< mtx_init(&rxr->rx_mtx, name_string, NULL, MTX_DEF);
---
> mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2557,2561d2744
< #ifndef NO_82598_A0_SUPPORT
< if (adapter->hw.revision_id == 0)
< desc_flip(TXD);
< #endif
<
2655,2659d2837
< #ifndef NO_82598_A0_SUPPORT
< if (adapter->hw.revision_id == 0)
< desc_flip(TXD);
< #endif
<
2781c2959
< ixgbe_get_buf(struct rx_ring *rxr, int i)
---
> ixgbe_get_buf(struct rx_ring *rxr, int i, u8 clean)
2783,2790c2961,2962
< struct adapter *adapter = rxr->adapter;
< struct mbuf *mp;
< bus_dmamap_t map;
< int nsegs, error, old, s = 0;
< int size = MCLBYTES;
<
<
< bus_dma_segment_t segs[1];
---
> struct adapter *adapter = rxr->adapter;
> bus_dma_segment_t seg[2];
2791a2964,2967
> struct mbuf *mh, *mp;
> bus_dmamap_t map;
> int nsegs, error;
> int merr = 0;
2793,2803d2968
< /* Are we going to Jumbo clusters? */
< if (adapter->bigbufs) {
< size = MJUMPAGESIZE;
< s = 1;
< };
<
< mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
< if (mp == NULL) {
< adapter->mbuf_alloc_failed++;
< return (ENOBUFS);
< }
2805c2970
< mp->m_len = mp->m_pkthdr.len = size;
---
> rxbuf = &rxr->rx_buffers[i];
2807,2808c2972,2978
< if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
< m_adj(mp, ETHER_ALIGN);
---
> /* First get our header and payload mbuf */
> if (clean & IXGBE_CLEAN_HDR) {
> mh = m_gethdr(M_DONTWAIT, MT_DATA);
> if (mh == NULL)
> goto remap;
> } else /* reuse */
> mh = rxr->rx_buffers[i].m_head;
2809a2980,2994
> mh->m_len = MHLEN;
> mh->m_flags |= M_PKTHDR;
>
> if (clean & IXGBE_CLEAN_PKT) {
> mp = m_getjcl(M_DONTWAIT, MT_DATA,
> M_PKTHDR, adapter->rx_mbuf_sz);
> if (mp == NULL)
> goto remap;
> mp->m_len = adapter->rx_mbuf_sz;
> mp->m_flags &= ~M_PKTHDR;
> } else { /* reusing */
> mp = rxr->rx_buffers[i].m_pack;
> mp->m_len = adapter->rx_mbuf_sz;
> mp->m_flags &= ~M_PKTHDR;
> }
2811,2817c2996,3007
< * Using memory from the mbuf cluster pool, invoke the bus_dma
< * machinery to arrange the memory mapping.
< */
< error = bus_dmamap_load_mbuf_sg(rxr->rxtag[s], rxr->spare_map[s],
< mp, segs, &nsegs, BUS_DMA_NOWAIT);
< if (error) {
< m_free(mp);
---
> ** Need to create a chain for the following
> ** dmamap call at this point.
> */
> mh->m_next = mp;
> mh->m_pkthdr.len = mh->m_len + mp->m_len;
>
> /* Get the memory mapping */
> error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
> rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
> if (error != 0) {
> printf("GET BUF: dmamap load failure - %d\n", error);
> m_free(mh);
2821,2823c3011
< /* Now check our target buffer for existing mapping */
< rxbuf = &rxr->rx_buffers[i];
< old = rxbuf->bigbuf;
---
> /* Unload old mapping and update buffer struct */
2825c3013,3020
< bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
---
> bus_dmamap_unload(rxr->rxtag, rxbuf->map);
> map = rxbuf->map;
> rxbuf->map = rxr->spare_map;
> rxr->spare_map = map;
> rxbuf->m_head = mh;
> rxbuf->m_pack = mp;
> bus_dmamap_sync(rxr->rxtag,
> rxbuf->map, BUS_DMASYNC_PREREAD);
2827,2832c3022,3024
< map = rxbuf->map[old];
< rxbuf->map[s] = rxr->spare_map[s];
< rxr->spare_map[old] = map;
< bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s], BUS_DMASYNC_PREREAD);
< rxbuf->m_head = mp;
< rxbuf->bigbuf = s;
---
> /* Update descriptor */
> rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
> rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
2834c3026
< rxr->rx_base[i].read.pkt_addr = htole64(segs[0].ds_addr);
---
> return (0);
2836,2844c3028,3053
< #ifndef NO_82598_A0_SUPPORT
< /* A0 needs to One's Compliment descriptors */
< if (adapter->hw.revision_id == 0) {
< struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
< struct dhack *d;
<
< d = (struct dhack *)&rxr->rx_base[i];
< d->a1 = ~(d->a1);
< d->a2 = ~(d->a2);
---
> /*
> ** If we get here, we have an mbuf resource
> ** issue, so we discard the incoming packet
> ** and attempt to reuse existing mbufs next
> ** pass thru the ring, but to do so we must
> ** fix up the descriptor which had the address
> ** clobbered with writeback info.
> */
> remap:
> adapter->mbuf_header_failed++;
> merr = ENOBUFS;
> /* Is there a reusable buffer? */
> mh = rxr->rx_buffers[i].m_head;
> if (mh == NULL) /* Nope, init error */
> return (merr);
> mp = rxr->rx_buffers[i].m_pack;
> if (mp == NULL) /* Nope, init error */
> return (merr);
> /* Get our old mapping */
> rxbuf = &rxr->rx_buffers[i];
> error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
> rxbuf->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
> if (error != 0) {
> /* We really have a problem */
> m_free(mh);
> return (error);
2846c3055,3057
< #endif
---
> /* Now fix the descriptor as needed */
> rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
> rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
2848c3059
< return (0);
---
> return (merr);
2850a3062
>
2876c3088,3092
< /* First make the small (2K) tag/map */
---
> /*
> ** The tag is made to accomodate the largest buffer size
> ** with packet split (hence the two segments, even though
> ** it may not always use this.
> */
2882,2900c3098,3099
< MCLBYTES, /* maxsize */
< 1, /* nsegments */
< MCLBYTES, /* maxsegsize */
< 0, /* flags */
< NULL, /* lockfunc */
< NULL, /* lockfuncarg */
< &rxr->rxtag[0]))) {
< device_printf(dev, "Unable to create RX Small DMA tag\n");
< goto fail;
< }
<
< /* Next make the large (4K) tag/map */
< if ((error = bus_dma_tag_create(NULL, /* parent */
< PAGE_SIZE, 0, /* alignment, bounds */
< BUS_SPACE_MAXADDR, /* lowaddr */
< BUS_SPACE_MAXADDR, /* highaddr */
< NULL, NULL, /* filter, filterarg */
< MJUMPAGESIZE, /* maxsize */
< 1, /* nsegments */
---
> MJUM16BYTES, /* maxsize */
> 2, /* nsegments */
2905,2906c3104,3105
< &rxr->rxtag[1]))) {
< device_printf(dev, "Unable to create RX Large DMA tag\n");
---
> &rxr->rxtag))) {
> device_printf(dev, "Unable to create RX DMA tag\n");
2910,2914c3109,3111
< /* Create the spare maps (used by getbuf) */
< error = bus_dmamap_create(rxr->rxtag[0], BUS_DMA_NOWAIT,
< &rxr->spare_map[0]);
< error = bus_dmamap_create(rxr->rxtag[1], BUS_DMA_NOWAIT,
< &rxr->spare_map[1]);
---
> /* Create the spare map (used by getbuf) */
> error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
> &rxr->spare_map);
2923,2924c3120,3121
< error = bus_dmamap_create(rxr->rxtag[0],
< BUS_DMA_NOWAIT, &rxbuf->map[0]);
---
> error = bus_dmamap_create(rxr->rxtag,
> BUS_DMA_NOWAIT, &rxbuf->map);
2926c3123
< device_printf(dev, "Unable to create Small RX DMA map\n");
---
> device_printf(dev, "Unable to create RX DMA map\n");
2929,2934d3125
< error = bus_dmamap_create(rxr->rxtag[1],
< BUS_DMA_NOWAIT, &rxbuf->map[1]);
< if (error) {
< device_printf(dev, "Unable to create Large RX DMA map\n");
< goto fail;
< }
2957c3148
< int j, rsize, s = 0;
---
> int j, rsize;
2961,2962c3152
< rsize = roundup2(adapter->num_rx_desc *
< sizeof(union ixgbe_adv_rx_desc), 4096);
---
>
2963a3154,3155
> rsize = roundup2(adapter->num_rx_desc *
> sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2967,2969c3159
< ** Free current RX buffers: the size buffer
< ** that is loaded is indicated by the buffer
< ** bigbuf value.
---
> ** Free current RX buffer structs and their mbufs
2973d3162
< s = rxbuf->bigbuf;
2975c3164
< bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
---
> bus_dmamap_sync(rxr->rxtag, rxbuf->map,
2977,2978c3166,3170
< bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
< m_freem(rxbuf->m_head);
---
> bus_dmamap_unload(rxr->rxtag, rxbuf->map);
> if (rxbuf->m_head) {
> rxbuf->m_head->m_next = rxbuf->m_pack;
> m_freem(rxbuf->m_head);
> }
2979a3172
> rxbuf->m_pack = NULL;
2982a3176
> /* Now refresh the mbufs */
2984c3178
< if (ixgbe_get_buf(rxr, j) == ENOBUFS) {
---
> if (ixgbe_get_buf(rxr, j, IXGBE_CLEAN_ALL) == ENOBUFS) {
2985a3180,3181
> rxr->rx_buffers[j].m_pack = NULL;
> rxr->rx_base[j].read.hdr_addr = 0;
2987,2988d3182
< /* If we fail some may have change size */
< s = adapter->bigbufs;
3004c3198
< device_printf(dev,"LRO Initialization failed!\n");
---
> INIT_DEBUGOUT("LRO Initialization failed!\n");
3007c3201
< device_printf(dev,"RX LRO Initialized\n");
---
> INIT_DEBUGOUT("RX LRO Initialized\n");
3011d3204
<
3012a3206
>
3015,3017c3209,3210
< * We need to clean up any buffers allocated so far
< * 'j' is the failing index, decrement it to get the
< * last success.
---
> * We need to clean up any buffers allocated
> * so far, 'j' is the failing index.
3019,3020c3212,3213
< for (--j; j < 0; j--) {
< rxbuf = &rxr->rx_buffers[j];
---
> for (int i = 0; i < j; i++) {
> rxbuf = &rxr->rx_buffers[i];
3022c3215
< bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
---
> bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3024c3217
< bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
---
> bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3041c3234
< int i, j, s;
---
> int j;
3043c3236
< for (i = 0; i < adapter->num_rx_queues; i++, rxr++)
---
> for (j = 0; j < adapter->num_rx_queues; j++, rxr++)
3052,3053c3245
< * cleaned up for itself. The value of 'i' will be the
< * failed ring so we must pre-decrement it.
---
> * cleaned up for itself. 'j' failed, so its the terminus.
3055,3057c3247,3249
< rxr = adapter->rx_rings;
< for (--i; i > 0; i--, rxr++) {
< for (j = 0; j < adapter->num_rx_desc; j++) {
---
> for (int i = 0; i < j; ++i) {
> rxr = &adapter->rx_rings[i];
> for (int n = 0; n < adapter->num_rx_desc; n++) {
3059,3060c3251
< rxbuf = &rxr->rx_buffers[j];
< s = rxbuf->bigbuf;
---
> rxbuf = &rxr->rx_buffers[n];
3062c3253
< bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
---
> bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3064c3255
< bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
---
> bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3076c3267
< * Enable receive unit.
---
> * Setup receive registers and features.
3078a3270,3271
> #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
>
3082a3276
> struct ixgbe_hw *hw = &adapter->hw;
3085,3091c3279
< u32 mrqc, hlreg, linkvec;
< u32 random[10];
< int i,j;
< union {
< u8 c[128];
< u32 i[32];
< } reta;
---
> u32 reta, mrqc = 0, hlreg, random[10];
3098,3099c3286,3287
< rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
---
> rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
> IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3103c3291
< fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
---
> fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3105c3293,3295
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
---
> fctrl |= IXGBE_FCTRL_DPF;
> fctrl |= IXGBE_FCTRL_PMCF;
> IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3107,3114c3297
< hlreg = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
< if (ifp->if_mtu > ETHERMTU)
< hlreg |= IXGBE_HLREG0_JUMBOEN;
< else
< hlreg &= ~IXGBE_HLREG0_JUMBOEN;
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, hlreg);
<
< srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
---
> srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(0));
3117c3300,3304
< if (adapter->bigbufs)
---
>
> hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
> /* Set for Jumbo Frames? */
> if (ifp->if_mtu > ETHERMTU) {
> hlreg |= IXGBE_HLREG0_JUMBOEN;
3119c3306,3307
< else
---
> } else {
> hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3121,3122c3309,3310
< srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
---
> }
> IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3124,3126c3312,3318
< /* Set Queue moderation rate */
< for (i = 0; i < IXGBE_MSGS; i++)
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(i), DEFAULT_ITR);
---
> if (ixgbe_rx_hdr_split) {
> /* Use a standard mbuf for the header */
> srrctl |= ((IXGBE_RX_HDR << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
> & IXGBE_SRRCTL_BSIZEHDR_MASK);
> srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
> } else
> srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3128,3130c3320
< /* Set Link moderation lower */
< linkvec = adapter->num_tx_queues + adapter->num_rx_queues;
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(linkvec), LINK_ITR);
---
> IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(0), srrctl);
3135c3325
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(i),
---
> IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3137,3138c3327,3328
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(i), (rdba >> 32));
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(i),
---
> IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
> IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3142,3144c3332,3333
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(i), 0);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(i),
< adapter->num_rx_desc - 1);
---
> IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
> IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3147c3336
< rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
---
> rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3148a3338
> /* Setup RSS */
3149a3340,3342
> int i, j;
> reta = 0;
>
3153,3158d3345
< /* Create reta data */
< for (i = 0; i < 128; )
< for (j = 0; j < adapter->num_rx_queues &&
< i < 128; j++, i++)
< reta.c[i] = j;
<
3160,3161c3347,3352
< for (i = 0; i < 32; i++)
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta.i[i]);
---
> for (i = 0, j = 0; i < 128; i++, j++) {
> if (j == adapter->num_rx_queues) j = 0;
> reta = (reta << 8) | (j * 0x11);
> if ((i & 3) == 3)
> IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
> }
3165,3166c3356
< IXGBE_WRITE_REG_ARRAY(&adapter->hw,
< IXGBE_RSSRK(0), i, random[i]);
---
> IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3168,3179c3358,3369
< mrqc = IXGBE_MRQC_RSSEN
< /* Perform hash on these packet types */
< | IXGBE_MRQC_RSS_FIELD_IPV4
< | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
< | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
< | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
< | IXGBE_MRQC_RSS_FIELD_IPV6_EX
< | IXGBE_MRQC_RSS_FIELD_IPV6
< | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
< | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
< | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_MRQC, mrqc);
---
> /* Perform hash on these packet types */
> mrqc |= IXGBE_MRQC_RSSEN
> | IXGBE_MRQC_RSS_FIELD_IPV4
> | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
> | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
> | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
> | IXGBE_MRQC_RSS_FIELD_IPV6_EX
> | IXGBE_MRQC_RSS_FIELD_IPV6
> | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
> | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
> | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
> IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3191c3381
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
---
> IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3193,3196d3382
< /* Enable Receive engine */
< rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl);
<
3238d3423
< int s = rxbuf->bigbuf;
3240,3241c3425,3428
< bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
< bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
---
> bus_dmamap_sync(rxr->rxtag, rxbuf->map,
> BUS_DMASYNC_POSTREAD);
> bus_dmamap_unload(rxr->rxtag, rxbuf->map);
> bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
3254,3258c3441,3443
< for (int s = 0; s < 2; s++) {
< if (rxr->rxtag[s] != NULL) {
< bus_dma_tag_destroy(rxr->rxtag[s]);
< rxr->rxtag[s] = NULL;
< }
---
> if (rxr->rxtag != NULL) {
> bus_dma_tag_destroy(rxr->rxtag);
> rxr->rxtag = NULL;
3271a3457
> * Return TRUE for more work, FALSE for all clean.
3280,3283c3466,3467
< struct mbuf *mp;
< int len, i, eop = 0;
< u8 accept_frame = 0;
< u32 staterr;
---
> int i;
> u32 staterr;
3296a3481,3484
> /* Sync the ring */
> bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
> BUS_DMASYNC_POSTREAD);
>
3299,3300c3487,3489
< struct mbuf *m = NULL;
< int s;
---
> struct mbuf *sendmp, *mh, *mp;
> u16 hlen, plen, hdr;
> u8 dopayload, accept_frame, eop;
3302,3305c3491
< mp = rxr->rx_buffers[i].m_head;
< s = rxr->rx_buffers[i].bigbuf;
< bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
< BUS_DMASYNC_POSTREAD);
---
>
3306a3493,3552
> hlen = plen = 0;
> sendmp = mh = mp = NULL;
>
> /* Sync the buffers */
> bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
> BUS_DMASYNC_POSTREAD);
>
> /*
> ** The way the hardware is configured to
> ** split, it will ONLY use the header buffer
> ** when header split is enabled, otherwise we
> ** get normal behavior, ie, both header and
> ** payload are DMA'd into the payload buffer.
> **
> ** The fmp test is to catch the case where a
> ** packet spans multiple descriptors, in that
> ** case only the first header is valid.
> */
> if ((ixgbe_rx_hdr_split) && (rxr->fmp == NULL)){
> hdr = le16toh(cur->
> wb.lower.lo_dword.hs_rss.hdr_info);
> hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
> IXGBE_RXDADV_HDRBUFLEN_SHIFT;
> if (hlen > IXGBE_RX_HDR)
> hlen = IXGBE_RX_HDR;
> plen = le16toh(cur->wb.upper.length);
> /* Handle the header mbuf */
> mh = rxr->rx_buffers[i].m_head;
> mh->m_len = hlen;
> dopayload = IXGBE_CLEAN_HDR;
> /*
> ** Get the payload length, this
> ** could be zero if its a small
> ** packet.
> */
> if (plen) {
> mp = rxr->rx_buffers[i].m_pack;
> mp->m_len = plen;
> mp->m_next = NULL;
> mp->m_flags &= ~M_PKTHDR;
> mh->m_next = mp;
> mh->m_flags |= M_PKTHDR;
> dopayload = IXGBE_CLEAN_ALL;
> rxr->rx_split_packets++;
> } else { /* small packets */
> mh->m_flags &= ~M_PKTHDR;
> mh->m_next = NULL;
> }
> } else {
> /*
> ** Either no header split, or a
> ** secondary piece of a fragmented
> ** split packet.
> */
> mh = rxr->rx_buffers[i].m_pack;
> mh->m_flags |= M_PKTHDR;
> mh->m_len = le16toh(cur->wb.upper.length);
> dopayload = IXGBE_CLEAN_PKT;
> }
>
3310c3556
< } else {
---
> } else
3312,3313d3557
< }
< len = cur->wb.upper.length;
3319,3320c3563
< /* Get a fresh buffer first */
< if (ixgbe_get_buf(rxr, i) != 0) {
---
> if (ixgbe_get_buf(rxr, i, dopayload) != 0) {
3324,3327c3567
<
< /* Assign correct length to the current fragment */
< mp->m_len = len;
<
---
> /* Initial frame - setup */
3329,3331c3569,3576
< mp->m_pkthdr.len = len;
< rxr->fmp = mp; /* Store the first mbuf */
< rxr->lmp = mp;
---
> mh->m_flags |= M_PKTHDR;
> mh->m_pkthdr.len = mh->m_len;
> rxr->fmp = mh; /* Store the first mbuf */
> rxr->lmp = mh;
> if (mp) { /* Add payload if split */
> mh->m_pkthdr.len += mp->m_len;
> rxr->lmp = mh->m_next;
> }
3334,3335c3579,3580
< mp->m_flags &= ~M_PKTHDR;
< rxr->lmp->m_next = mp;
---
> mh->m_flags &= ~M_PKTHDR;
> rxr->lmp->m_next = mh;
3337c3582
< rxr->fmp->m_pkthdr.len += len;
---
> rxr->fmp->m_pkthdr.len += mh->m_len;
3343,3348c3588,3595
< rxr->packet_count++;
< rxr->byte_count += rxr->fmp->m_pkthdr.len;
<
< ixgbe_rx_checksum(adapter,
< staterr, rxr->fmp);
<
---
> rxr->rx_packets++;
> /* capture data for AIM */
> rxr->bytes += rxr->fmp->m_pkthdr.len;
> rxr->rx_bytes += rxr->bytes;
> if (ifp->if_capenable & IFCAP_RXCSUM)
> ixgbe_rx_checksum(staterr, rxr->fmp);
> else
> rxr->fmp->m_pkthdr.csum_flags = 0;
3350,3354d3596
< #if __FreeBSD_version < 700000
< VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
< (le16toh(cur->wb.upper.vlan) &
< IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
< #else
3356,3358c3598,3599
< le16toh(cur->wb.upper.vlan);
< rxr->fmp->m_flags |= M_VLANTAG;
< #endif
---
> le16toh(cur->wb.upper.vlan);
> rxr->fmp->m_flags |= M_VLANTAG;
3360c3601
< m = rxr->fmp;
---
> sendmp = rxr->fmp;
3368,3370c3609,3615
< mp = rxr->rx_buffers[i].m_head;
< mp->m_len = mp->m_pkthdr.len =
< (rxr->rx_buffers[i].bigbuf ? MJUMPAGESIZE:MCLBYTES);
---
> if (hlen) {
> mh = rxr->rx_buffers[i].m_head;
> mh->m_len = MHLEN;
> mh->m_next = NULL;
> }
> mp = rxr->rx_buffers[i].m_pack;
> mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3373c3618,3619
< if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
---
> if (adapter->max_frame_size <=
> (MCLBYTES - ETHER_ALIGN))
3375a3622
> /* handles the whole chain */
3380c3627
< m = NULL;
---
> sendmp = NULL;
3382,3384d3628
<
< /* Zero out the receive descriptors status */
< cur->wb.upper.status_error = 0;
3393,3395c3637,3643
< /* Now send up to the stack */
< if (m != NULL) {
< rxr->next_to_check = i;
---
> /*
> ** Now send up to the stack,
> ** note the the value of next_to_check
> ** is safe because we keep the RX lock
> ** thru this call.
> */
> if (sendmp != NULL) {
3397,3402c3645,3646
< if ((!lro->lro_cnt) || (tcp_lro_rx(lro, m, 0))) {
< IXGBE_RX_UNLOCK(rxr);
< (*ifp->if_input)(ifp, m);
< IXGBE_RX_LOCK(rxr);
< i = rxr->next_to_check;
< }
---
> if ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0)))
> (*ifp->if_input)(ifp, sendmp);
3403a3648
>
3412d3656
< IXGBE_RX_UNLOCK(rxr);
3415,3419c3659,3661
< ** Flush any outstanding LRO work
< ** this may call into the stack and
< ** must not hold a driver lock.
< */
< while(!SLIST_EMPTY(&lro->lro_active)) {
---
> * Flush any outstanding LRO work
> */
> while (!SLIST_EMPTY(&lro->lro_active)) {
3425,3426c3667
< if (!(staterr & IXGBE_RXD_STAT_DD))
< return FALSE;
---
> IXGBE_RX_UNLOCK(rxr);
3428c3669,3678
< return TRUE;
---
> /*
> ** Leaving with more to clean?
> ** then schedule another interrupt.
> */
> if (staterr & IXGBE_RXD_STAT_DD) {
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rxr->eims);
> return TRUE;
> }
>
> return FALSE;
3439,3440c3689
< ixgbe_rx_checksum(struct adapter *adapter,
< u32 staterr, struct mbuf * mp)
---
> ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
3442d3690
< struct ifnet *ifp = adapter->ifp;
3446,3451d3693
< /* Not offloading */
< if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
< mp->m_pkthdr.csum_flags = 0;
< return;
< }
<
3473c3715,3716
< #ifdef IXGBE_VLAN_EVENTS
---
>
> #ifdef IXGBE_HW_VLAN_SUPPORT
3482c3725
< u32 ctrl;
---
> u32 ctrl, rctl, index, vfta;
3500a3744
> u32 index, vfta;
3516c3760
< #endif /* IXGBE_VLAN_EVENTS */
---
> #endif
3526a3771,3773
>
> IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
>
3532,3533c3779
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
< adapter->eims_mask | mask);
---
> IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3536d3781
< IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3562a3808,3814
> /*
> ** Setup the correct IVAR register for a particular MSIX interrupt
> ** (yes this is all very magic and confusing :)
> ** - entry is the register array entry
> ** - vector is the MSIX vector for this queue
> ** - type is RX/TX/MISC
> */
3564c3816
< ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector)
---
> ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector, s8 type)
3565a3818
> struct ixgbe_hw *hw = &adapter->hw;
3569,3573c3822,3839
< index = (entry >> 2) & 0x1F;
< ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
< ivar &= ~(0xFF << (8 * (entry & 0x3)));
< ivar |= (vector << (8 * (entry & 0x3)));
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
---
>
> switch (hw->mac.type) {
>
> case ixgbe_mac_82598EB:
> if (type == -1)
> entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
> else
> entry += (type * 64);
> index = (entry >> 2) & 0x1F;
> ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
> ivar &= ~(0xFF << (8 * (entry & 0x3)));
> ivar |= (vector << (8 * (entry & 0x3)));
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
> break;
>
> default:
> break;
> }
3582,3585c3848,3849
< for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
< ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), rxr->msix);
< adapter->eims_mask |= rxr->eims;
< }
---
> for (int i = 0; i < adapter->num_rx_queues; i++, rxr++)
> ixgbe_set_ivar(adapter, i, rxr->msix, 0);
3587,3590c3851,3852
< for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
< ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), txr->msix);
< adapter->eims_mask |= txr->eims;
< }
---
> for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
> ixgbe_set_ivar(adapter, i, txr->msix, 1);
3593,3595c3855
< ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
< adapter->linkvec);
< adapter->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX;
---
> ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
3597a3858,3889
> /*
> ** ixgbe_sfp_probe - called in the local timer to
> ** determine if a port had optics inserted.
> */
> static bool ixgbe_sfp_probe(struct adapter *adapter)
> {
> struct ixgbe_hw *hw = &adapter->hw;
> device_t dev = adapter->dev;
> bool result = FALSE;
>
> if ((hw->phy.type == ixgbe_phy_nl) &&
> (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
> s32 ret = hw->phy.ops.identify_sfp(hw);
> if (ret)
> goto out;
> ret = hw->phy.ops.reset(hw);
> if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
> device_printf(dev,"Unsupported SFP+ module detected!");
> printf(" Reload driver with supported module.\n");
> adapter->sfp_probe = FALSE;
> goto out;
> } else
> device_printf(dev,"SFP+ module detected!\n");
> /* We now have supported optics */
> adapter->sfp_probe = FALSE;
> result = TRUE;
> }
> out:
> return (result);
> }
>
>
3646,3648d3937
< adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
< adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
<
3703,3706c3992
< adapter->mbuf_alloc_failed);
< device_printf(dev,"Std Cluster Failed = %lu\n",
< adapter->mbuf_cluster_failed);
<
---
> adapter->mbuf_defrag_failed);
3763,3764c4049,4052
< device_printf(dev,"RX(%d) Packets Received: %lu\n",
< rxr->me, (long)rxr->packet_count);
---
> device_printf(dev,"RX(%d) Packets Received: %lld\n",
> rxr->me, (long long)rxr->rx_packets);
> device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
> rxr->me, (long long)rxr->rx_split_packets);
3766c4054
< rxr->me, (long)rxr->byte_count);
---
> rxr->me, (long)rxr->rx_bytes);
3780c4068
< txr->me, (long)txr->tx_packets);
---
> txr->me, (long)txr->total_packets);
3855c4143
< adapter->hw.fc.type = ixgbe_flow_control;
---
> adapter->hw.fc.requested_mode = ixgbe_flow_control;
3859c4147
< adapter->hw.fc.type = ixgbe_fc_none;
---
> adapter->hw.fc.requested_mode = ixgbe_fc_none;
3875,3897d4162
<
< #ifndef NO_82598_A0_SUPPORT
< /*
< * A0 Workaround: invert descriptor for hardware
< */
< void
< desc_flip(void *desc)
< {
< struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
< struct dhack *d;
<
< d = (struct dhack *)desc;
< d->a1 = ~(d->a1);
< d->a2 = ~(d->a2);
< d->b1 = ~(d->b1);
< d->b2 = ~(d->b2);
< d->b2 &= 0xFFFFFFF0;
< d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;
< }
< #endif
<
<
<