Lines Matching defs:sq

35 mlx5e_do_send_cqe_inline(struct mlx5e_sq *sq)
37 sq->cev_counter++;
39 if (sq->cev_counter >= sq->cev_factor) {
40 sq->cev_counter = 0;
47 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
50 return (mlx5e_do_send_cqe_inline(sq));
54 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
56 u16 pi = sq->pc & sq->wq.sz_m1;
57 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
61 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
62 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
63 if (mlx5e_do_send_cqe_inline(sq))
69 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
71 sq->mbuf[pi].mbuf = NULL;
72 sq->mbuf[pi].num_bytes = 0;
73 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
74 sq->pc += sq->mbuf[pi].num_wqebbs;
92 struct mlx5e_sq *sq;
103 sq = container_of(mb_tag,
104 struct mlx5e_rl_channel, tag)->sq;
113 sq = &container_of(mb_tag,
114 struct mlx5e_channel, tag)->sq[0];
124 sq = NULL;
129 if (sq != NULL && READ_ONCE(sq->running) != 0)
130 return (sq);
139 struct mlx5e_sq *sq;
171 sq = &priv->channel[ch].sq[tc];
172 if (likely(READ_ONCE(sq->running) != 0))
173 return (sq);
178 mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb)
220 return (MIN(mb->m_pkthdr.len, sq->max_inline));
547 mlx5e_sq_dump_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **mbp)
567 pi = sq->pc & sq->wq.sz_m1;
569 sq->mbuf[pi].num_bytes = mb->m_pkthdr.len;
570 sq->mbuf[pi].num_wqebbs = 0;
573 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
577 sq->stats.defragged++;
585 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
593 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
597 msb = sq->priv->params_ethtool.hw_mtu_msb;
603 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
610 if (unlikely(!mlx5e_sq_has_room_for(sq, xsegs))) {
611 sq->stats.enobuf++;
612 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
618 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
619 wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, sq->wq.sz_m1);
632 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
633 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
638 wqe->data.lkey = sq->mkey_be;
643 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, 0);
647 sq->mbuf[pi].num_wqebbs++;
648 sq->pc++;
652 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
653 wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, (sq->pc - 1) & sq->wq.sz_m1);
659 if (mlx5e_do_send_cqe_inline(sq))
663 memcpy(sq->doorbell.d32, wqe_last, sizeof(sq->doorbell.d32));
666 sq->mbuf[pi].mbuf = mb;
667 sq->mbuf[pi].mst = m_snd_tag_ref(parg->mst);
670 sq->stats.packets++;
671 sq->stats.bytes += sq->mbuf[pi].num_bytes;
677 sq->stats.dropped++;
684 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
703 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
704 sq->stats.enobuf++;
709 pi = ((~sq->pc) & sq->wq.sz_m1);
712 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
713 pi = ((~sq->pc) & sq->wq.sz_m1);
715 sq->stats.enobuf++;
722 switch (mlx5e_sq_tls_xmit(sq, &args, mbp)) {
738 pi = sq->pc & sq->wq.sz_m1;
739 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
740 ifp = sq->ifp;
758 sq->stats.csum_offload_none++;
778 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * args.ihs);
781 sq->stats.tso_packets++;
782 sq->stats.tso_bytes += payload_len;
812 sq->mbuf[pi].num_bytes = payload_len +
815 sq->stats.tso_packets++;
816 sq->stats.tso_bytes += payload_len;
849 sq->mbuf[pi].num_bytes = max_t (unsigned int,
856 switch (sq->min_inline_mode) {
861 args.ihs = mlx5e_get_l2_header_size(sq, mb);
864 args.ihs = mlx5e_get_l2_header_size(sq, mb);
870 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) {
876 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) {
881 args.ihs = mlx5e_get_l2_header_size(sq, mb);
886 sq->mbuf[pi].num_bytes = max_t (unsigned int,
897 if (unlikely(args.ihs > (sq->max_inline - ETHER_VLAN_ENCAP_LEN))) {
902 args.ihs = (sq->max_inline - ETHER_VLAN_ENCAP_LEN);
921 if (unlikely(args.ihs > sq->max_inline)) {
927 args.ihs = sq->max_inline;
941 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
945 sq->stats.defragged++;
953 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
962 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
966 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
975 dseg->lkey = sq->mkey_be;
982 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
983 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
986 if (mlx5e_do_send_cqe_inline(sq))
992 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
995 sq->mbuf[pi].mbuf = mb;
996 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
998 sq->mbuf[pi].mst = m_snd_tag_ref(args.mst);
1000 MPASS(sq->mbuf[pi].mst == NULL);
1002 sq->pc += sq->mbuf[pi].num_wqebbs;
1005 sq->stats.packets++;
1006 sq->stats.bytes += sq->mbuf[pi].num_bytes;
1012 sq->stats.dropped++;
1019 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
1024 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1027 sqcc = sq->cc;
1039 cqe = mlx5e_get_cqe(&sq->cq);
1043 mlx5_cqwq_pop(&sq->cq.wq);
1047 mlx5e_dump_err_cqe(&sq->cq, sq->sqn, (const void *)cqe);
1048 sq->stats.cqe_err++;
1056 budget -= sq->cev_factor;
1061 } else if (unlikely(x == sq->cev_factor)) {
1063 sq->stats.cqe_err++;
1066 ci = sqcc & sq->wq.sz_m1;
1068 match = (delta < sq->mbuf[ci].num_wqebbs);
1069 mb = sq->mbuf[ci].mbuf;
1070 sq->mbuf[ci].mbuf = NULL;
1071 mst = sq->mbuf[ci].mst;
1072 sq->mbuf[ci].mst = NULL;
1075 if (unlikely(sq->mbuf[ci].num_bytes == 0))
1076 sq->stats.nop++;
1078 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
1080 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
1089 sqcc += sq->mbuf[ci].num_wqebbs;
1093 mlx5_cqwq_update_db_record(&sq->cq.wq);
1098 sq->cc = sqcc;
1102 mlx5e_xmit_locked(if_t ifp, struct mlx5e_sq *sq, struct mbuf *mb)
1107 READ_ONCE(sq->running) == 0)) {
1113 if (mlx5e_sq_xmit(sq, &mb) != 0) {
1120 mlx5e_tx_notify_hw(sq, false);
1126 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
1127 sq->cev_factor != 1)) {
1129 mlx5e_sq_cev_timeout(sq);
1132 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1140 struct mlx5e_sq *sq;
1145 sq = mlx5e_select_queue_by_send_tag(ifp, mb);
1146 if (unlikely(sq == NULL)) {
1151 sq = mlx5e_select_queue(ifp, mb);
1152 if (unlikely(sq == NULL)) {
1161 mtx_lock(&sq->lock);
1162 ret = mlx5e_xmit_locked(ifp, sq, mb);
1163 mtx_unlock(&sq->lock);
1171 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
1173 mtx_lock(&sq->comp_lock);
1174 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
1175 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
1176 mtx_unlock(&sq->comp_lock);