Lines Matching refs:sq

46 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
52 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
54 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
119 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
123 if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
131 sq->stats->csum_partial_inner++;
134 sq->stats->csum_partial++;
139 sq->stats->csum_partial++;
142 sq->stats->csum_none++;
149 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
151 struct mlx5e_sq_stats *stats = sq->stats;
177 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
186 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
188 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
192 dseg->lkey = sq->mkey_be;
195 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
204 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
206 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
210 dseg->lkey = sq->mkey_be;
213 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
221 mlx5e_dma_unmap_wqe_err(sq, num_dma);
243 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253 mode = sq->min_inline_mode;
256 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
262 static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
266 struct mlx5e_sq_stats *stats = sq->stats;
270 u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
283 u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
297 attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
342 static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
344 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
345 netif_tx_stop_queue(sq->txq);
346 sq->stats->stopped++;
350 static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
357 mlx5e_tx_mpwqe_ensure_complete(sq);
359 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
360 wi = &sq->db.wqe_info[pi];
366 wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
367 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
371 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
377 struct mlx5_wq_cyc *wq = &sq->wq;
388 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
389 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
393 sq->pc += wi->num_wqebbs;
395 mlx5e_tx_check_stop(sq);
397 if (unlikely(sq->ptpsq &&
401 mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
404 mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
408 mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
409 if (!netif_tx_queue_stopped(sq->txq) &&
410 mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
411 netif_tx_stop_queue(sq->txq);
412 sq->stats->stopped++;
417 send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
419 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
423 mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
433 struct mlx5e_sq_stats *stats = sq->stats;
439 wi = &sq->db.wqe_info[pi];
490 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
495 mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
502 mlx5e_tx_flush(sq);
511 static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
513 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
519 static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
522 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
526 pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
527 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
540 sq->stats->mpwqe_blks++;
543 static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
545 return sq->mpwqe.wqe;
548 static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
550 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
560 dseg->lkey = sq->mkey_be;
563 sq->stats->mpwqe_pkts++;
566 static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
568 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
575 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
576 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
578 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
579 wi = &sq->db.wqe_info[pi];
588 sq->pc += wi->num_wqebbs;
592 mlx5e_tx_check_stop(sq);
598 mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
607 txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
608 if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
611 if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
612 mlx5e_tx_mpwqe_session_start(sq, eseg);
613 } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
614 mlx5e_tx_mpwqe_session_complete(sq);
615 mlx5e_tx_mpwqe_session_start(sq, eseg);
618 sq->stats->xmit_more += xmit_more;
620 mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
621 mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
622 mlx5e_tx_mpwqe_add_dseg(sq, &txd);
625 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
627 cseg = mlx5e_tx_mpwqe_session_complete(sq);
629 if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
630 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
631 } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
633 cseg = mlx5e_tx_mpwqe_session_complete(sq);
635 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
641 mlx5e_dma_unmap_wqe_err(sq, 1);
642 sq->stats->dropped++;
644 mlx5e_tx_flush(sq);
647 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
650 if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
651 mlx5e_tx_mpwqe_session_complete(sq);
662 static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
667 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
668 if (unlikely(sq->ptpsq))
669 mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
679 struct mlx5e_txqsq *sq;
689 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
690 if (unlikely(!sq)) {
691 /* Two cases when sq can be NULL:
702 if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
705 mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
707 if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
711 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs);
712 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
716 mlx5e_tx_mpwqe_ensure_complete(sq);
720 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
721 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
724 mlx5e_accel_tx_finish(sq, wqe, &accel,
726 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs);
727 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
732 static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
738 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
740 mlx5e_tx_dma_unmap(sq->pdev, dma);
744 static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
751 hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
752 if (sq->ptpsq)
754 hwts.hwtstamp, sq->ptpsq->cq_stats);
762 static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
768 struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
770 mlx5e_consume_skb(sq, skb, cqe, napi_budget);
774 void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
776 if (netif_tx_queue_stopped(sq->txq) &&
777 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
778 !mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq) &&
779 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
780 netif_tx_wake_queue(sq->txq);
781 sq->stats->wake++;
788 struct mlx5e_txqsq *sq;
796 sq = container_of(cq, struct mlx5e_txqsq, cq);
798 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
805 stats = sq->stats;
810 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
813 sqcc = sq->cc;
815 /* avoid dirtying sq cache line every cqe */
816 dma_fifo_cc = sq->dma_fifo_cc;
832 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
833 wi = &sq->db.wqe_info[ci];
838 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
839 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
846 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
851 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
852 mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
861 &sq->state)) {
862 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
864 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
865 queue_work(cq->workqueue, &sq->recover_work);
879 sq->dma_fifo_cc = dma_fifo_cc;
880 sq->cc = sqcc;
882 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
884 mlx5e_txqsq_wake(sq);
889 static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
894 dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
897 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
903 sqcc = sq->cc;
904 dma_fifo_cc = sq->dma_fifo_cc;
906 while (sqcc != sq->pc) {
907 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
908 wi = &sq->db.wqe_info[ci];
913 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
921 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
925 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
926 mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
933 sq->dma_fifo_cc = dma_fifo_cc;
934 sq->cc = sqcc;
936 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
972 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
985 struct mlx5e_sq_stats *stats = sq->stats;
989 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
992 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
993 wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
998 wi = &sq->db.wqe_info[pi];
1006 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
1038 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
1043 mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
1050 mlx5e_tx_flush(sq);