Lines Matching refs:ss

198 	struct myri10ge_slice_state *ss;
917 struct myri10ge_slice_state *ss;
945 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
1002 ss = &mgp->ss[i];
1003 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
1004 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
1013 ss = &mgp->ss[i];
1014 ss->irq_claim =
1034 ss = &mgp->ss[i];
1036 ss->dca_tag = (__iomem __be32 *)
1039 ss->dca_tag = NULL;
1048 ss = &mgp->ss[i];
1050 memset(ss->rx_done.entry, 0, bytes);
1051 ss->tx.req = 0;
1052 ss->tx.done = 0;
1053 ss->tx.pkt_start = 0;
1054 ss->tx.pkt_done = 0;
1055 ss->rx_big.cnt = 0;
1056 ss->rx_small.cnt = 0;
1057 ss->rx_done.idx = 0;
1058 ss->rx_done.cnt = 0;
1059 ss->tx.wake_queue = 0;
1060 ss->tx.stop_queue = 0;
1087 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1089 ss->cached_dca_tag = tag;
1090 put_be32(htonl(tag), ss->dca_tag);
1093 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1098 if (cpu != ss->cpu) {
1099 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
1100 if (ss->cached_dca_tag != tag)
1101 myri10ge_write_dca(ss, cpu, tag);
1102 ss->cpu = cpu;
1112 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
1128 mgp->ss[i].cpu = -1;
1129 mgp->ss[i].cached_dca_tag = -1;
1130 myri10ge_update_dca(&mgp->ss[i]);
1307 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1309 struct myri10ge_priv *mgp = ss->mgp;
1319 rx = &ss->rx_small;
1322 rx = &ss->rx_big;
1331 skb = napi_get_frags(&ss->napi);
1333 ss->stats.rx_dropped++;
1369 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1371 napi_gro_frags(&ss->napi);
1377 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1379 struct pci_dev *pdev = ss->mgp->pdev;
1380 struct myri10ge_tx_buf *tx = &ss->tx;
1399 ss->stats.tx_bytes += skb->len;
1400 ss->stats.tx_packets++;
1416 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
1426 if ((ss->mgp->dev->real_num_tx_queues > 1) &&
1439 ss->mgp->running == MYRI10GE_ETH_RUNNING) {
1446 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1448 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1449 struct myri10ge_priv *mgp = ss->mgp;
1463 rx_ok = myri10ge_rx_done(ss, length, checksum);
1472 ss->stats.rx_packets += rx_packets;
1473 ss->stats.rx_bytes += rx_bytes;
1476 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
1477 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
1479 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
1480 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1487 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1522 struct myri10ge_slice_state *ss =
1527 if (ss->mgp->dca_enabled)
1528 myri10ge_update_dca(ss);
1531 work_done = myri10ge_clean_rx_done(ss, budget);
1535 put_be32(htonl(3), ss->irq_claim);
1542 struct myri10ge_slice_state *ss = arg;
1543 struct myri10ge_priv *mgp = ss->mgp;
1544 struct mcp_irq_data *stats = ss->fw_stats;
1545 struct myri10ge_tx_buf *tx = &ss->tx;
1551 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1552 napi_schedule(&ss->napi);
1563 napi_schedule(&ss->napi);
1580 myri10ge_tx_done(ss, (int)send_done_count);
1593 if (ss == mgp->ss)
1596 put_be32(htonl(3), ss->irq_claim + 1);
1713 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1714 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1716 ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
1794 struct myri10ge_slice_state *ss;
1815 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1821 ss = &mgp->ss[0];
1822 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1823 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1825 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
1826 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
1827 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
1828 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
1829 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
1831 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
1832 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
1833 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
1834 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1835 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1838 ss = &mgp->ss[slice];
1840 data[i++] = (unsigned int)ss->tx.pkt_start;
1841 data[i++] = (unsigned int)ss->tx.pkt_done;
1842 data[i++] = (unsigned int)ss->tx.req;
1843 data[i++] = (unsigned int)ss->tx.done;
1844 data[i++] = (unsigned int)ss->rx_small.cnt;
1845 data[i++] = (unsigned int)ss->rx_big.cnt;
1846 data[i++] = (unsigned int)ss->tx.wake_queue;
1847 data[i++] = (unsigned int)ss->tx.stop_queue;
1848 data[i++] = (unsigned int)ss->tx.linearized;
1936 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1938 struct myri10ge_priv *mgp = ss->mgp;
1947 slice = ss - mgp->ss;
1959 ss->tx.mask = tx_ring_entries - 1;
1960 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
1967 * sizeof(*ss->tx.req_list);
1968 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
1969 if (ss->tx.req_bytes == NULL)
1973 ss->tx.req_list = (struct mcp_kreq_ether_send *)
1974 ALIGN((unsigned long)ss->tx.req_bytes, 8);
1975 ss->tx.queue_active = 0;
1977 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
1978 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
1979 if (ss->rx_small.shadow == NULL)
1982 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
1983 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
1984 if (ss->rx_big.shadow == NULL)
1989 bytes = tx_ring_entries * sizeof(*ss->tx.info);
1990 ss->tx.info = kzalloc(bytes, GFP_KERNEL);
1991 if (ss->tx.info == NULL)
1994 bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
1995 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
1996 if (ss->rx_small.info == NULL)
1999 bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
2000 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
2001 if (ss->rx_big.info == NULL)
2005 ss->rx_big.cnt = 0;
2006 ss->rx_small.cnt = 0;
2007 ss->rx_big.fill_cnt = 0;
2008 ss->rx_small.fill_cnt = 0;
2009 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
2010 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
2011 ss->rx_small.watchdog_needed = 0;
2012 ss->rx_big.watchdog_needed = 0;
2014 ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
2016 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
2020 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
2022 slice, ss->rx_small.fill_cnt);
2026 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
2027 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
2029 slice, ss->rx_big.fill_cnt);
2036 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2037 int idx = i & ss->rx_big.mask;
2038 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2040 put_page(ss->rx_big.info[idx].page);
2045 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2046 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2047 int idx = i & ss->rx_small.mask;
2048 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2050 put_page(ss->rx_small.info[idx].page);
2053 kfree(ss->rx_big.info);
2056 kfree(ss->rx_small.info);
2059 kfree(ss->tx.info);
2062 kfree(ss->rx_big.shadow);
2065 kfree(ss->rx_small.shadow);
2068 kfree(ss->tx.req_bytes);
2069 ss->tx.req_bytes = NULL;
2070 ss->tx.req_list = NULL;
2076 static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
2078 struct myri10ge_priv *mgp = ss->mgp;
2084 if (ss->tx.req_list == NULL)
2087 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2088 idx = i & ss->rx_big.mask;
2089 if (i == ss->rx_big.fill_cnt - 1)
2090 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
2091 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2093 put_page(ss->rx_big.info[idx].page);
2097 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2098 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2099 idx = i & ss->rx_small.mask;
2100 if (i == ss->rx_small.fill_cnt - 1)
2101 ss->rx_small.info[idx].page_offset =
2103 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2105 put_page(ss->rx_small.info[idx].page);
2107 tx = &ss->tx;
2118 ss->stats.tx_dropped++;
2133 kfree(ss->rx_big.info);
2135 kfree(ss->rx_small.info);
2137 kfree(ss->tx.info);
2139 kfree(ss->rx_big.shadow);
2141 kfree(ss->rx_small.shadow);
2143 kfree(ss->tx.req_bytes);
2144 ss->tx.req_bytes = NULL;
2145 ss->tx.req_list = NULL;
2151 struct myri10ge_slice_state *ss;
2183 ss = &mgp->ss[i];
2184 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
2187 myri10ge_intr, 0, ss->irq_desc,
2188 ss);
2195 &mgp->ss[i]);
2204 mgp->dev->name, &mgp->ss[0]);
2221 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2223 free_irq(pdev->irq, &mgp->ss[0]);
2234 struct myri10ge_slice_state *ss;
2237 ss = &mgp->ss[slice];
2243 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
2249 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
2254 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
2257 ss->tx.send_go = (__iomem __be32 *)
2259 ss->tx.send_stop = (__iomem __be32 *)
2268 struct myri10ge_slice_state *ss;
2271 ss = &mgp->ss[slice];
2272 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
2273 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
2277 dma_addr_t bus = ss->fw_stats_bus;
2296 struct myri10ge_slice_state *ss;
2390 ss = &mgp->ss[slice];
2397 status = myri10ge_allocate_rings(ss);
2412 napi_enable(&(ss)->napi);
2460 napi_disable(&mgp->ss[slice].napi);
2463 myri10ge_free_rings(&mgp->ss[i]);
2482 if (mgp->ss[0].tx.req_bytes == NULL)
2488 napi_disable(&mgp->ss[i].napi);
2509 myri10ge_free_rings(&mgp->ss[i]);
2625 struct myri10ge_slice_state *ss;
2640 ss = &mgp->ss[queue];
2642 tx = &ss->tx;
2719 ss->stats.tx_dropped += 1;
2884 ss->stats.tx_dropped += 1;
2894 struct myri10ge_slice_state *ss;
2917 ss = &mgp->ss[skb_get_queue_mapping(skb)];
2919 ss->stats.tx_dropped += 1;
2931 slice_stats = &mgp->ss[i].stats;
3342 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
3345 struct myri10ge_priv *mgp = ss->mgp;
3346 int slice = ss - mgp->ss;
3348 if (ss->tx.req != ss->tx.done &&
3349 ss->tx.done == ss->watchdog_tx_done &&
3350 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3359 slice, ss->tx.queue_active, ss->tx.req,
3360 ss->tx.done, ss->tx.pkt_start,
3361 ss->tx.pkt_done,
3362 (int)ntohl(mgp->ss[slice].fw_stats->
3365 ss->stuck = 1;
3368 if (ss->watchdog_tx_done != ss->tx.done ||
3369 ss->watchdog_rx_done != ss->rx_done.cnt) {
3372 ss->watchdog_tx_done = ss->tx.done;
3373 ss->watchdog_tx_req = ss->tx.req;
3374 ss->watchdog_rx_done = ss->rx_done.cnt;
3385 struct myri10ge_slice_state *ss;
3436 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3438 ss = mgp->ss;
3439 if (ss->stuck) {
3440 myri10ge_check_slice(ss, &reset_needed,
3443 ss->stuck = 0;
3476 struct myri10ge_slice_state *ss;
3483 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3488 ss = &mgp->ss[i];
3489 if (ss->rx_small.watchdog_needed) {
3490 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3493 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3495 ss->rx_small.watchdog_needed = 0;
3497 if (ss->rx_big.watchdog_needed) {
3498 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3500 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3502 ss->rx_big.watchdog_needed = 0;
3504 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
3529 struct myri10ge_slice_state *ss;
3534 if (mgp->ss == NULL)
3538 ss = &mgp->ss[i];
3539 if (ss->rx_done.entry != NULL) {
3541 sizeof(*ss->rx_done.entry);
3543 ss->rx_done.entry, ss->rx_done.bus);
3544 ss->rx_done.entry = NULL;
3546 if (ss->fw_stats != NULL) {
3547 bytes = sizeof(*ss->fw_stats);
3549 ss->fw_stats, ss->fw_stats_bus);
3550 ss->fw_stats = NULL;
3552 __netif_napi_del(&ss->napi);
3554 /* Wait till napi structs are no longer used, and then free ss. */
3556 kfree(mgp->ss);
3557 mgp->ss = NULL;
3562 struct myri10ge_slice_state *ss;
3567 bytes = sizeof(*mgp->ss) * mgp->num_slices;
3568 mgp->ss = kzalloc(bytes, GFP_KERNEL);
3569 if (mgp->ss == NULL) {
3574 ss = &mgp->ss[i];
3575 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3576 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3577 &ss->rx_done.bus,
3579 if (ss->rx_done.entry == NULL)
3581 bytes = sizeof(*ss->fw_stats);
3582 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3583 &ss->fw_stats_bus,
3585 if (ss->fw_stats == NULL)
3587 ss->mgp = mgp;
3588 ss->dev = mgp->dev;
3589 netif_napi_add_weight(ss->dev, &ss->napi, myri10ge_poll,