Lines Matching defs:nic

16 #include "nic.h"
22 static void nicvf_get_page(struct nicvf *nic)
24 if (!nic->rb_pageref || !nic->rb_page)
27 page_ref_add(nic->rb_page, nic->rb_pageref);
28 nic->rb_pageref = 0;
32 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
43 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
49 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
54 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
60 dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
72 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
77 dma_free_coherent(&nic->pdev->dev, dmem->size,
93 static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
126 this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
131 nic->rb_page = page;
176 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
184 if (!rbdr->is_xdp && nic->rb_page &&
185 ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
186 nic->rb_pageref++;
190 nicvf_get_page(nic);
191 nic->rb_page = NULL;
194 pgcache = nicvf_alloc_page(nic, rbdr, gfp);
195 if (!pgcache && !nic->rb_page) {
196 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
200 nic->rb_page_offset = 0;
208 nic->rb_page = pgcache->page;
214 *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
215 nic->rb_page_offset, buf_len,
218 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
219 if (!nic->rb_page_offset)
220 __free_pages(nic->rb_page, 0);
221 nic->rb_page = NULL;
226 nic->rb_page_offset += buf_len;
233 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
253 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
261 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
284 if (!nic->pnicvf->xdp_prog) {
299 nic->rb_page = NULL;
301 err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
313 nicvf_get_page(nic);
319 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
340 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
341 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
351 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
352 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
375 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
380 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
382 struct queue_set *qs = nic->qs;
401 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
413 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
418 if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
427 nicvf_get_page(nic);
434 nic->rb_alloc_fail = true;
436 nic->rb_alloc_fail = false;
439 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
443 if (!nic->rb_alloc_fail && rbdr->enable &&
444 netif_running(nic->pnicvf->netdev))
445 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
454 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
456 nicvf_refill_rbdr(nic, GFP_KERNEL);
457 if (nic->rb_alloc_fail)
458 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
460 nic->rb_work_scheduled = false;
466 struct nicvf *nic = from_tasklet(nic, t, rbdr_task);
468 nicvf_refill_rbdr(nic, GFP_ATOMIC);
469 if (nic->rb_alloc_fail) {
470 nic->rb_work_scheduled = true;
471 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
476 static int nicvf_init_cmp_queue(struct nicvf *nic,
481 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
487 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
488 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
493 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
500 nicvf_free_q_desc_mem(nic, &cq->dmem);
504 static int nicvf_init_snd_queue(struct nicvf *nic,
509 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
524 if (nic->sqs_mode)
525 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS);
526 if (qidx < nic->pnicvf->xdp_tx_queues) {
543 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
554 void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
566 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
572 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
585 dma_free_coherent(&nic->pdev->dev,
610 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
613 nicvf_unmap_sndq_buffers(nic, sq, sq->head,
624 nicvf_free_q_desc_mem(nic, &sq->dmem);
627 static void nicvf_reclaim_snd_queue(struct nicvf *nic,
631 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
633 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
636 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
639 static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
646 nicvf_send_msg_to_pf(nic, &mbx);
649 static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
653 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
655 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
657 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
660 static void nicvf_reclaim_rbdr(struct nicvf *nic,
667 rbdr->head = nicvf_queue_reg_read(nic,
670 rbdr->tail = nicvf_queue_reg_read(nic,
677 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
679 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
683 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
684 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
687 tmp = nicvf_queue_reg_read(nic,
695 netdev_err(nic->netdev,
700 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
703 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
705 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
706 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
710 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
715 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
722 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
725 for (sqs = 0; sqs < nic->sqs_count; sqs++)
726 if (nic->snicvf[sqs])
727 nicvf_queue_reg_write(nic->snicvf[sqs],
731 static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
741 nicvf_send_msg_to_pf(nic, &mbx);
745 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
756 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
759 nicvf_reclaim_rcv_queue(nic, qs, qidx);
774 WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx, 0) < 0);
784 nicvf_send_msg_to_pf(nic, &mbx);
790 nicvf_send_msg_to_pf(nic, &mbx);
799 nicvf_send_msg_to_pf(nic, &mbx);
801 if (!nic->sqs_mode && (qidx == 0)) {
805 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
807 nicvf_config_vlan_stripping(nic, nic->netdev->features);
814 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
818 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
828 nicvf_reclaim_cmp_queue(nic, qs, qidx);
833 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
840 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
850 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
853 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
854 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
859 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
870 nicvf_reclaim_snd_queue(nic, qs, qidx);
875 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
884 mbx.sq.sqs_mode = nic->sqs_mode;
886 nicvf_send_msg_to_pf(nic, &mbx);
889 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
903 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
906 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
911 netif_set_xps_queue(nic->netdev,
917 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
924 nicvf_reclaim_rbdr(nic, rbdr, qidx);
929 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
941 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
945 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
949 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
954 void nicvf_qset_config(struct nicvf *nic, bool enable)
957 struct queue_set *qs = nic->qs;
961 netdev_warn(nic->netdev,
967 qs->vnic_id = nic->vf_id;
972 mbx.qs.sqs_count = nic->sqs_count;
983 if (nic->ptp_clock)
986 nicvf_send_msg_to_pf(nic, &mbx);
989 static void nicvf_free_resources(struct nicvf *nic)
992 struct queue_set *qs = nic->qs;
996 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1000 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1004 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1007 static int nicvf_alloc_resources(struct nicvf *nic)
1010 struct queue_set *qs = nic->qs;
1014 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1021 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1027 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
1033 nicvf_free_resources(nic);
1037 int nicvf_set_qset_resources(struct nicvf *nic)
1041 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
1044 nic->qs = qs;
1057 nic->rx_queues = qs->rq_cnt;
1058 nic->tx_queues = qs->sq_cnt;
1059 nic->xdp_tx_queues = 0;
1064 int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
1067 struct queue_set *qs = nic->qs;
1068 struct queue_set *pqs = nic->pnicvf->qs;
1078 if (nic->sqs_mode && pqs) {
1084 if (nicvf_alloc_resources(nic))
1088 nicvf_snd_queue_config(nic, qs, qidx, enable);
1090 nicvf_cmp_queue_config(nic, qs, qidx, enable);
1092 nicvf_rbdr_config(nic, qs, qidx, enable);
1094 nicvf_rcv_queue_config(nic, qs, qidx, enable);
1097 nicvf_rcv_queue_config(nic, qs, qidx, disable);
1099 nicvf_rbdr_config(nic, qs, qidx, disable);
1101 nicvf_snd_queue_config(nic, qs, qidx, disable);
1103 nicvf_cmp_queue_config(nic, qs, qidx, disable);
1105 nicvf_free_resources(nic);
1111 nicvf_reset_rcv_queue_stats(nic);
1160 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1164 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1166 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1168 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1171 void nicvf_sq_disable(struct nicvf *nic, int qidx)
1175 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1177 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1185 struct nicvf *nic = netdev_priv(netdev);
1188 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1206 void nicvf_xdp_sq_doorbell(struct nicvf *nic,
1216 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1236 int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
1307 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
1311 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
1317 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
1330 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1346 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
1383 if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1389 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1403 if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
1465 static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
1470 txq = netdev_get_tx_queue(nic->pnicvf->netdev,
1479 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1486 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1533 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
1543 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
1545 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1550 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1558 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1565 if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
1566 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
1569 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1577 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1580 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1596 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1601 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1605 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1613 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1618 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
1624 nic = nic->pnicvf;
1625 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1638 static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
1658 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
1663 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
1683 if (!nic->hw_tso)
1690 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1699 nicvf_unmap_rcv_buffer(nic,
1702 skb = nicvf_rb_ptr_to_skb(nic,
1711 nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
1757 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1762 netdev_dbg(nic->netdev,
1766 nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1767 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1771 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1776 netdev_dbg(nic->netdev,
1781 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1785 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1790 netdev_dbg(nic->netdev,
1795 nicvf_reg_write(nic, NIC_VF_INT, mask);
1799 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1804 netdev_dbg(nic->netdev,
1809 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1812 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1817 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1820 rq = &nic->qs->rq[rq_idx];
1825 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1830 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1833 sq = &nic->qs->sq[sq_idx];
1839 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1841 netif_err(nic, rx_err, nic->netdev,
1847 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
1850 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
1853 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
1856 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
1859 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
1862 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
1865 this_cpu_inc(nic->drv_stats->rx_oversize);
1868 this_cpu_inc(nic->drv_stats->rx_undersize);
1871 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
1874 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
1877 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
1880 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
1883 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
1886 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
1889 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
1892 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
1895 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
1898 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
1901 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
1904 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
1907 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
1910 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
1913 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
1916 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
1924 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
1928 this_cpu_inc(nic->drv_stats->tx_desc_fault);
1931 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
1934 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
1937 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1940 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
1943 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
1946 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
1949 this_cpu_inc(nic->drv_stats->tx_lock_viol);
1952 this_cpu_inc(nic->drv_stats->tx_data_fault);
1955 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
1958 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
1961 this_cpu_inc(nic->drv_stats->tx_mem_fault);
1964 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
1967 this_cpu_inc(nic->drv_stats->tx_csum_overflow);