Lines Matching refs:pfvf

29 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
35 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
41 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
45 dev_err(pfvf->dev, "CQ stopped due to error");
83 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
101 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
104 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
109 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
115 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
126 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
127 otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
133 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
146 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
148 pfvf->netdev->name, cq->cint_idx,
159 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
160 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
171 otx2_dma_unmap_skb_frags(pfvf, sg);
176 static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
182 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
185 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
187 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
194 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
202 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
211 otx2_set_rxtstamp(pfvf, skb, va);
220 len - off, pfvf->rbsize);
227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
232 static void otx2_set_rxhash(struct otx2_nic *pfvf,
239 if (!(pfvf->netdev->features & NETIF_F_RXHASH))
242 rss = &pfvf->hw.rss_info;
254 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
268 pfvf->hw_ops->aura_freeptr(pfvf, qidx,
274 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
277 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
280 if (netif_msg_rx_err(pfvf))
281 netdev_err(pfvf->netdev,
329 if (pfvf->netdev->features & NETIF_F_RXALL)
334 otx2_free_rcv_seg(pfvf, cqe, qidx);
338 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
352 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
356 if (pfvf->xdp_prog)
357 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
371 if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
377 otx2_set_rxhash(pfvf, cqe, skb);
380 if (pfvf->netdev->features & NETIF_F_RXCSUM)
388 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
399 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
414 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
425 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
433 struct otx2_nic *pfvf = dev;
438 if (otx2_alloc_buffer(pfvf, cq, &bufptr))
440 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
447 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
458 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
462 qidx = cq->cq_idx - pfvf->hw.rx_queues;
463 sq = &pfvf->qset.sq[qidx];
473 qidx = cq->cq_idx - pfvf->hw.rx_queues;
476 otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
478 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
490 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
496 qidx = cq->cq_idx - pfvf->hw.rx_queues;
498 if (qidx >= pfvf->hw.tx_queues)
499 qidx -= pfvf->hw.xdp_queues;
500 txq = netdev_get_tx_queue(pfvf->netdev, qidx);
505 netif_carrier_ok(pfvf->netdev))
511 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
523 dim_update_sample(pfvf->napi_events,
537 struct otx2_nic *pfvf;
541 pfvf = (struct otx2_nic *)cq_poll->dev;
542 qset = &pfvf->qset;
551 workdone += otx2_rx_napi_handler(pfvf, napi,
554 workdone += otx2_tx_napi_handler(pfvf, cq, budget);
559 filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
561 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
565 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
569 if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
570 otx2_adjust_adaptive_coalese(pfvf, cq_poll);
576 work = &pfvf->refill_wrk[cq->cq_idx];
587 otx2_write64(pfvf,
614 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
640 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
641 if (dma_mapping_error(pfvf->dev, dma_addr))
659 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
673 ext->lso_format = pfvf->hw.lso_tsov4_idx;
682 ext->lso_format = pfvf->hw.lso_tsov6_idx;
699 ext->lso_format = pfvf->hw.lso_udpv4_idx;
702 ext->lso_format = pfvf->hw.lso_udpv6_idx;
750 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
765 sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ?
766 qidx + pfvf->hw.xdp_queues : qidx;
798 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
816 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
817 if (dma_mapping_error(pfvf->dev, dma_addr))
827 otx2_dma_unmap_skb_frags(pfvf, sg);
880 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
883 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
895 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
912 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
961 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
965 static bool is_hw_tso_supported(struct otx2_nic *pfvf,
970 if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
974 if (!is_96xx_B0(pfvf->pdev))
990 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
996 if (is_hw_tso_supported(pfvf, skb))
1078 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
1092 if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
1097 ts = ns_to_timespec64(pfvf->ptp->tstamp);
1135 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
1145 struct otx2_nic *pfvf = netdev_priv(netdev);
1156 if (free_desc < otx2_get_sqe_count(pfvf, skb))
1172 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
1176 otx2_sq_append_tso(pfvf, sq, skb, qidx);
1185 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
1189 otx2_sqe_add_ext(pfvf, sq, skb, &offset);
1192 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
1193 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
1197 otx2_set_txtstamp(pfvf, skb, sq, &offset);
1204 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1210 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
1218 if (pfvf->xdp_prog)
1221 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1224 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
1225 pool = &pfvf->qset.pool[pool_id];
1235 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
1240 otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
1244 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1248 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
1259 qidx = cq->cq_idx - pfvf->hw.rx_queues;
1260 sq = &pfvf->qset.sq[qidx];
1262 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1277 otx2_dma_unmap_skb_frags(pfvf, sg);
1284 if (qidx >= pfvf->hw.tx_queues)
1285 qidx -= pfvf->hw.xdp_queues;
1286 txq = netdev_get_tx_queue(pfvf->netdev, qidx);
1290 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1294 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
1299 mutex_lock(&pfvf->mbox.lock);
1301 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
1303 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
1306 mutex_unlock(&pfvf->mbox.lock);
1310 err = otx2_sync_mbox_msg(&pfvf->mbox);
1311 mutex_unlock(&pfvf->mbox.lock);
1315 void otx2_free_pending_sqe(struct otx2_nic *pfvf)
1324 for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
1325 sq = &pfvf->qset.sq[sq_idx];
1332 otx2_dma_unmap_skb_frags(pfvf, sg);
1340 txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
1367 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
1373 sq = &pfvf->qset.sq[qidx];
1395 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1400 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
1415 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1418 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
1430 qidx += pfvf->hw.tx_queues;
1432 return otx2_xdp_sq_append_pkt(pfvf, iova,
1436 err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
1438 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
1447 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
1450 trace_xdp_exception(pfvf->netdev, prog, act);
1453 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,