mlx5_en_tx.c revision 359853
1/*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 359853 2020-04-13 08:58:35Z hselasky $ 26 */ 27 28#include "en.h" 29#include <machine/atomic.h> 30 31static inline bool 32mlx5e_do_send_cqe(struct mlx5e_sq *sq) 33{ 34 sq->cev_counter++; 35 /* interleave the CQEs */ 36 if (sq->cev_counter >= sq->cev_factor) { 37 sq->cev_counter = 0; 38 return (1); 39 } 40 return (0); 41} 42 43void 44mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt) 45{ 46 u16 pi = sq->pc & sq->wq.sz_m1; 47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 48 49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 50 51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 53 if (mlx5e_do_send_cqe(sq)) 54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 55 else 56 wqe->ctrl.fm_ce_se = 0; 57 58 /* Copy data for doorbell */ 59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 60 61 sq->mbuf[pi].mbuf = NULL; 62 sq->mbuf[pi].num_bytes = 0; 63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 64 sq->pc += sq->mbuf[pi].num_wqebbs; 65} 66 67#if (__FreeBSD_version >= 1100000) 68static uint32_t mlx5e_hash_value; 69 70static void 71mlx5e_hash_init(void *arg) 72{ 73 mlx5e_hash_value = m_ether_tcpip_hash_init(); 74} 75 76/* Make kernel call mlx5e_hash_init after the random stack finished initializing */ 77SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL); 78#endif 79 80static struct mlx5e_sq * 81mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) 82{ 83 struct mlx5e_priv *priv = ifp->if_softc; 84 struct mlx5e_sq *sq; 85 u32 ch; 86 u32 tc; 87 88 /* obtain VLAN information if present */ 89 if (mb->m_flags & M_VLANTAG) { 90 tc = (mb->m_pkthdr.ether_vtag >> 13); 91 if (tc >= priv->num_tc) 92 tc = priv->default_vlan_prio; 93 } else { 94 tc = priv->default_vlan_prio; 95 } 96 97 ch = priv->params.num_channels; 98 99 /* check if flowid is set */ 100 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) { 101#ifdef RSS 102 u32 temp; 103 104 if (rss_hash2bucket(mb->m_pkthdr.flowid, 105 M_HASHTYPE_GET(mb), &temp) == 0) 106 ch = temp % ch; 107 else 108#endif 109 ch = (mb->m_pkthdr.flowid % 128) % ch; 110 } else { 111#if (__FreeBSD_version >= 1100000) 112 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | 113 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch; 114#else 115 /* 116 * m_ether_tcpip_hash not present in stable, so just 117 * throw unhashed mbufs on queue 0 118 */ 119 ch = 0; 120#endif 121 } 122 123 /* check if send queue is running */ 124 sq = &priv->channel[ch].sq[tc]; 125 if (likely(READ_ONCE(sq->running) != 0)) 126 return (sq); 127 return (NULL); 128} 129 130static inline u16 131mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb) 132{ 133 struct ether_vlan_header *eh; 134 uint16_t eth_type; 135 int min_inline; 136 137 eh = mtod(mb, struct ether_vlan_header *); 138 if (unlikely(mb->m_len < ETHER_HDR_LEN)) { 139 goto max_inline; 140 } else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 141 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))) 142 goto max_inline; 143 eth_type = ntohs(eh->evl_proto); 144 min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 145 } else { 146 eth_type = ntohs(eh->evl_encap_proto); 147 min_inline = ETHER_HDR_LEN; 148 } 149 150 switch (eth_type) { 151 case ETHERTYPE_IP: 152 case ETHERTYPE_IPV6: 153 /* 154 * Make sure the TOS(IPv4) or traffic class(IPv6) 155 * field gets inlined. Else the SQ may stall. 156 */ 157 min_inline += 4; 158 break; 159 default: 160 goto max_inline; 161 } 162 163 /* 164 * m_copydata() will be used on the remaining header which 165 * does not need to reside within the first m_len bytes of 166 * data: 167 */ 168 if (mb->m_pkthdr.len < min_inline) 169 goto max_inline; 170 return (min_inline); 171 172max_inline: 173 return (MIN(mb->m_pkthdr.len, sq->max_inline)); 174} 175 176static int 177mlx5e_get_full_header_size(struct mbuf *mb) 178{ 179 struct ether_vlan_header *eh; 180 struct tcphdr *th; 181 struct ip *ip; 182 int ip_hlen, tcp_hlen; 183 struct ip6_hdr *ip6; 184 uint16_t eth_type; 185 int eth_hdr_len; 186 187 eh = mtod(mb, struct ether_vlan_header *); 188 if (mb->m_len < ETHER_HDR_LEN) 189 return (0); 190 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 191 if (mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)) 192 return (0); 193 eth_type = ntohs(eh->evl_proto); 194 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 195 } else { 196 eth_type = ntohs(eh->evl_encap_proto); 197 eth_hdr_len = ETHER_HDR_LEN; 198 } 199 switch (eth_type) { 200 case ETHERTYPE_IP: 201 ip = (struct ip *)(mb->m_data + eth_hdr_len); 202 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 203 return (0); 204 switch (ip->ip_p) { 205 case IPPROTO_TCP: 206 ip_hlen = ip->ip_hl << 2; 207 eth_hdr_len += ip_hlen; 208 break; 209 case IPPROTO_UDP: 210 ip_hlen = ip->ip_hl << 2; 211 eth_hdr_len += ip_hlen + 8; 212 goto done; 213 default: 214 return (0); 215 } 216 break; 217 case ETHERTYPE_IPV6: 218 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); 219 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 220 return (0); 221 switch (ip6->ip6_nxt) { 222 case IPPROTO_TCP: 223 eth_hdr_len += sizeof(*ip6); 224 break; 225 case IPPROTO_UDP: 226 eth_hdr_len += sizeof(*ip6) + 8; 227 goto done; 228 default: 229 return (0); 230 } 231 break; 232 default: 233 return (0); 234 } 235 if (mb->m_len < eth_hdr_len + sizeof(*th)) 236 return (0); 237 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); 238 tcp_hlen = th->th_off << 2; 239 eth_hdr_len += tcp_hlen; 240done: 241 /* 242 * m_copydata() will be used on the remaining header which 243 * does not need to reside within the first m_len bytes of 244 * data: 245 */ 246 if (mb->m_pkthdr.len < eth_hdr_len) 247 return (0); 248 return (eth_hdr_len); 249} 250 251static int 252mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp) 253{ 254 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS]; 255 struct mlx5_wqe_data_seg *dseg; 256 struct mlx5e_tx_wqe *wqe; 257 struct ifnet *ifp; 258 int nsegs; 259 int err; 260 int x; 261 struct mbuf *mb = *mbp; 262 u16 ds_cnt; 263 u16 ihs; 264 u16 pi; 265 u8 opcode; 266 267 /* Return ENOBUFS if the queue is full */ 268 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) { 269 sq->stats.enobuf++; 270 return (ENOBUFS); 271 } 272 273 /* Align SQ edge with NOPs to avoid WQE wrap around */ 274 pi = ((~sq->pc) & sq->wq.sz_m1); 275 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 276 /* Send one multi NOP message instead of many */ 277 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS); 278 pi = ((~sq->pc) & sq->wq.sz_m1); 279 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 280 sq->stats.enobuf++; 281 return (ENOMEM); 282 } 283 } 284 285 /* Setup local variables */ 286 pi = sq->pc & sq->wq.sz_m1; 287 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 288 ifp = sq->ifp; 289 290 memset(wqe, 0, sizeof(*wqe)); 291 292 /* Send a copy of the frame to the BPF listener, if any */ 293 if (ifp != NULL && ifp->if_bpf != NULL) 294 ETHER_BPF_MTAP(ifp, mb); 295 296 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) { 297 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM; 298 } 299 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) { 300 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM; 301 } 302 if (wqe->eth.cs_flags == 0) { 303 sq->stats.csum_offload_none++; 304 } 305 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 306 u32 payload_len; 307 u32 mss = mb->m_pkthdr.tso_segsz; 308 u32 num_pkts; 309 310 wqe->eth.mss = cpu_to_be16(mss); 311 opcode = MLX5_OPCODE_LSO; 312 ihs = mlx5e_get_full_header_size(mb); 313 if (unlikely(ihs == 0)) { 314 err = EINVAL; 315 goto tx_drop; 316 } 317 payload_len = mb->m_pkthdr.len - ihs; 318 if (payload_len == 0) 319 num_pkts = 1; 320 else 321 num_pkts = DIV_ROUND_UP(payload_len, mss); 322 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs); 323 324 sq->stats.tso_packets++; 325 sq->stats.tso_bytes += payload_len; 326 } else { 327 opcode = MLX5_OPCODE_SEND; 328 329 switch (sq->min_inline_mode) { 330 case MLX5_INLINE_MODE_IP: 331 case MLX5_INLINE_MODE_TCP_UDP: 332 ihs = mlx5e_get_full_header_size(mb); 333 if (unlikely(ihs == 0)) 334 ihs = mlx5e_get_l2_header_size(sq, mb); 335 break; 336 case MLX5_INLINE_MODE_L2: 337 ihs = mlx5e_get_l2_header_size(sq, mb); 338 break; 339 case MLX5_INLINE_MODE_NONE: 340 /* FALLTHROUGH */ 341 default: 342 if ((mb->m_flags & M_VLANTAG) != 0 && 343 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) { 344 /* inlining VLAN data is not required */ 345 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */ 346 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag); 347 ihs = 0; 348 } else if ((mb->m_flags & M_VLANTAG) == 0 && 349 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) { 350 /* inlining non-VLAN data is not required */ 351 ihs = 0; 352 } else { 353 /* we are forced to inlining L2 header, if any */ 354 ihs = mlx5e_get_l2_header_size(sq, mb); 355 } 356 break; 357 } 358 sq->mbuf[pi].num_bytes = max_t (unsigned int, 359 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 360 } 361 362 if (likely(ihs == 0)) { 363 /* nothing to inline */ 364 } else if ((mb->m_flags & M_VLANTAG) != 0) { 365 struct ether_vlan_header *eh = (struct ether_vlan_header *) 366 wqe->eth.inline_hdr_start; 367 368 /* Range checks */ 369 if (unlikely(ihs > (sq->max_inline - ETHER_VLAN_ENCAP_LEN))) { 370 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 371 err = EINVAL; 372 goto tx_drop; 373 } 374 ihs = (sq->max_inline - ETHER_VLAN_ENCAP_LEN); 375 } else if (unlikely(ihs < ETHER_HDR_LEN)) { 376 err = EINVAL; 377 goto tx_drop; 378 } 379 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh); 380 m_adj(mb, ETHER_HDR_LEN); 381 /* Insert 4 bytes VLAN tag into data stream */ 382 eh->evl_proto = eh->evl_encap_proto; 383 eh->evl_encap_proto = htons(ETHERTYPE_VLAN); 384 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag); 385 /* Copy rest of header data, if any */ 386 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1)); 387 m_adj(mb, ihs - ETHER_HDR_LEN); 388 /* Extend header by 4 bytes */ 389 ihs += ETHER_VLAN_ENCAP_LEN; 390 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 391 } else { 392 /* check if inline header size is too big */ 393 if (unlikely(ihs > sq->max_inline)) { 394 if (unlikely(mb->m_pkthdr.csum_flags & CSUM_TSO)) { 395 err = EINVAL; 396 goto tx_drop; 397 } 398 ihs = sq->max_inline; 399 } 400 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start); 401 m_adj(mb, ihs); 402 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 403 } 404 405 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 406 if (ihs > sizeof(wqe->eth.inline_hdr_start)) { 407 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start), 408 MLX5_SEND_WQE_DS); 409 } 410 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt; 411 412 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 413 mb, segs, &nsegs, BUS_DMA_NOWAIT); 414 if (err == EFBIG) { 415 /* Update statistics */ 416 sq->stats.defragged++; 417 /* Too many mbuf fragments */ 418 mb = m_defrag(*mbp, M_NOWAIT); 419 if (mb == NULL) { 420 mb = *mbp; 421 goto tx_drop; 422 } 423 /* Try again */ 424 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 425 mb, segs, &nsegs, BUS_DMA_NOWAIT); 426 } 427 /* Catch errors */ 428 if (err != 0) 429 goto tx_drop; 430 431 /* Make sure all mbuf data, if any, is written to RAM */ 432 if (nsegs != 0) { 433 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, 434 BUS_DMASYNC_PREWRITE); 435 } else { 436 /* All data was inlined, free the mbuf. */ 437 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map); 438 m_freem(mb); 439 mb = NULL; 440 } 441 442 for (x = 0; x != nsegs; x++) { 443 if (segs[x].ds_len == 0) 444 continue; 445 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr); 446 dseg->lkey = sq->mkey_be; 447 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len); 448 dseg++; 449 } 450 451 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl)); 452 453 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 454 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 455 if (mlx5e_do_send_cqe(sq)) 456 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 457 else 458 wqe->ctrl.fm_ce_se = 0; 459 460 /* Copy data for doorbell */ 461 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 462 463 /* Store pointer to mbuf */ 464 sq->mbuf[pi].mbuf = mb; 465 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 466 sq->pc += sq->mbuf[pi].num_wqebbs; 467 468 /* Count all traffic going out */ 469 sq->stats.packets++; 470 sq->stats.bytes += sq->mbuf[pi].num_bytes; 471 472 *mbp = NULL; /* safety clear */ 473 return (0); 474 475tx_drop: 476 sq->stats.dropped++; 477 *mbp = NULL; 478 m_freem(mb); 479 return err; 480} 481 482static void 483mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget) 484{ 485 u16 sqcc; 486 487 /* 488 * sq->cc must be updated only after mlx5_cqwq_update_db_record(), 489 * otherwise a cq overrun may occur 490 */ 491 sqcc = sq->cc; 492 493 while (budget > 0) { 494 struct mlx5_cqe64 *cqe; 495 struct mbuf *mb; 496 u16 x; 497 u16 ci; 498 499 cqe = mlx5e_get_cqe(&sq->cq); 500 if (!cqe) 501 break; 502 503 mlx5_cqwq_pop(&sq->cq.wq); 504 505 /* update budget according to the event factor */ 506 budget -= sq->cev_factor; 507 508 for (x = 0; x != sq->cev_factor; x++) { 509 ci = sqcc & sq->wq.sz_m1; 510 mb = sq->mbuf[ci].mbuf; 511 sq->mbuf[ci].mbuf = NULL; 512 513 if (mb == NULL) { 514 if (sq->mbuf[ci].num_bytes == 0) { 515 /* NOP */ 516 sq->stats.nop++; 517 } 518 } else { 519 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map, 520 BUS_DMASYNC_POSTWRITE); 521 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map); 522 523 /* Free transmitted mbuf */ 524 m_freem(mb); 525 } 526 sqcc += sq->mbuf[ci].num_wqebbs; 527 } 528 } 529 530 mlx5_cqwq_update_db_record(&sq->cq.wq); 531 532 /* Ensure cq space is freed before enabling more cqes */ 533 atomic_thread_fence_rel(); 534 535 sq->cc = sqcc; 536} 537 538static int 539mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 540{ 541 int err = 0; 542 543 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 544 READ_ONCE(sq->running) == 0)) { 545 m_freem(mb); 546 return (ENETDOWN); 547 } 548 549 /* Do transmit */ 550 if (mlx5e_sq_xmit(sq, &mb) != 0) { 551 /* NOTE: m_freem() is NULL safe */ 552 m_freem(mb); 553 err = ENOBUFS; 554 } 555 556 /* Check if we need to write the doorbell */ 557 if (likely(sq->doorbell.d64 != 0)) { 558 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 559 sq->doorbell.d64 = 0; 560 } 561 562 /* 563 * Check if we need to start the event timer which flushes the 564 * transmit ring on timeout: 565 */ 566 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 567 sq->cev_factor != 1)) { 568 /* start the timer */ 569 mlx5e_sq_cev_timeout(sq); 570 } else { 571 /* don't send NOPs yet */ 572 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 573 } 574 return (err); 575} 576 577int 578mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb) 579{ 580 struct mlx5e_sq *sq; 581 int ret; 582 583 sq = mlx5e_select_queue(ifp, mb); 584 if (unlikely(sq == NULL)) { 585 /* Invalid send queue */ 586 m_freem(mb); 587 return (ENXIO); 588 } 589 590 mtx_lock(&sq->lock); 591 ret = mlx5e_xmit_locked(ifp, sq, mb); 592 mtx_unlock(&sq->lock); 593 594 return (ret); 595} 596 597void 598mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) 599{ 600 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); 601 602 mtx_lock(&sq->comp_lock); 603 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX); 604 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock)); 605 mtx_unlock(&sq->comp_lock); 606} 607