mlx5_en_tx.c revision 341972
1/*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 341972 2018-12-12 13:00:56Z hselasky $ 26 */ 27 28#include "en.h" 29#include <machine/atomic.h> 30 31static inline bool 32mlx5e_do_send_cqe(struct mlx5e_sq *sq) 33{ 34 sq->cev_counter++; 35 /* interleave the CQEs */ 36 if (sq->cev_counter >= sq->cev_factor) { 37 sq->cev_counter = 0; 38 return (1); 39 } 40 return (0); 41} 42 43void 44mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt) 45{ 46 u16 pi = sq->pc & sq->wq.sz_m1; 47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 48 49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 50 51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 53 if (mlx5e_do_send_cqe(sq)) 54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 55 else 56 wqe->ctrl.fm_ce_se = 0; 57 58 /* Copy data for doorbell */ 59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 60 61 sq->mbuf[pi].mbuf = NULL; 62 sq->mbuf[pi].num_bytes = 0; 63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 64 sq->pc += sq->mbuf[pi].num_wqebbs; 65} 66 67#if (__FreeBSD_version >= 1100000) 68static uint32_t mlx5e_hash_value; 69 70static void 71mlx5e_hash_init(void *arg) 72{ 73 mlx5e_hash_value = m_ether_tcpip_hash_init(); 74} 75 76/* Make kernel call mlx5e_hash_init after the random stack finished initializing */ 77SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL); 78#endif 79 80static struct mlx5e_sq * 81mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) 82{ 83 struct mlx5e_priv *priv = ifp->if_softc; 84 struct mlx5e_channel * volatile *ppch; 85 struct mlx5e_channel *pch; 86 u32 ch; 87 u32 tc; 88 89 ppch = priv->channel; 90 91 /* check if channels are successfully opened */ 92 if (unlikely(ppch == NULL)) 93 return (NULL); 94 95 /* obtain VLAN information if present */ 96 if (mb->m_flags & M_VLANTAG) { 97 tc = (mb->m_pkthdr.ether_vtag >> 13); 98 if (tc >= priv->num_tc) 99 tc = priv->default_vlan_prio; 100 } else { 101 tc = priv->default_vlan_prio; 102 } 103 104 ch = priv->params.num_channels; 105 106 /* check if flowid is set */ 107 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) { 108#ifdef RSS 109 u32 temp; 110 111 if (rss_hash2bucket(mb->m_pkthdr.flowid, 112 M_HASHTYPE_GET(mb), &temp) == 0) 113 ch = temp % ch; 114 else 115#endif 116 ch = (mb->m_pkthdr.flowid % 128) % ch; 117 } else { 118#if (__FreeBSD_version >= 1100000) 119 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | 120 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch; 121#else 122 /* 123 * m_ether_tcpip_hash not present in stable, so just 124 * throw unhashed mbufs on queue 0 125 */ 126 ch = 0; 127#endif 128 } 129 130 /* check if channel is allocated and not stopped */ 131 pch = ppch[ch]; 132 if (likely(pch != NULL && pch->sq[tc].stopped == 0)) 133 return (&pch->sq[tc]); 134 return (NULL); 135} 136 137static inline u16 138mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb) 139{ 140 struct ether_vlan_header *eh; 141 uint16_t eth_type; 142 int min_inline; 143 144 eh = mtod(mb, struct ether_vlan_header *); 145 if (unlikely(mb->m_len < ETHER_HDR_LEN)) { 146 goto max_inline; 147 } else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 148 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))) 149 goto max_inline; 150 eth_type = ntohs(eh->evl_proto); 151 min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 152 } else { 153 eth_type = ntohs(eh->evl_encap_proto); 154 min_inline = ETHER_HDR_LEN; 155 } 156 157 switch (eth_type) { 158 case ETHERTYPE_IP: 159 case ETHERTYPE_IPV6: 160 /* 161 * Make sure the TOS(IPv4) or traffic class(IPv6) 162 * field gets inlined. Else the SQ may stall. 163 */ 164 min_inline += 4; 165 break; 166 default: 167 goto max_inline; 168 } 169 170 /* 171 * m_copydata() will be used on the remaining header which 172 * does not need to reside within the first m_len bytes of 173 * data: 174 */ 175 if (mb->m_pkthdr.len < min_inline) 176 goto max_inline; 177 return (min_inline); 178 179max_inline: 180 return (MIN(mb->m_pkthdr.len, sq->max_inline)); 181} 182 183static int 184mlx5e_get_full_header_size(struct mbuf *mb) 185{ 186 struct ether_vlan_header *eh; 187 struct tcphdr *th; 188 struct ip *ip; 189 int ip_hlen, tcp_hlen; 190 struct ip6_hdr *ip6; 191 uint16_t eth_type; 192 int eth_hdr_len; 193 194 eh = mtod(mb, struct ether_vlan_header *); 195 if (mb->m_len < ETHER_HDR_LEN) 196 return (0); 197 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 198 if (mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)) 199 return (0); 200 eth_type = ntohs(eh->evl_proto); 201 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 202 } else { 203 eth_type = ntohs(eh->evl_encap_proto); 204 eth_hdr_len = ETHER_HDR_LEN; 205 } 206 switch (eth_type) { 207 case ETHERTYPE_IP: 208 ip = (struct ip *)(mb->m_data + eth_hdr_len); 209 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 210 return (0); 211 switch (ip->ip_p) { 212 case IPPROTO_TCP: 213 ip_hlen = ip->ip_hl << 2; 214 eth_hdr_len += ip_hlen; 215 break; 216 case IPPROTO_UDP: 217 ip_hlen = ip->ip_hl << 2; 218 eth_hdr_len += ip_hlen + 8; 219 goto done; 220 default: 221 return (0); 222 } 223 break; 224 case ETHERTYPE_IPV6: 225 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); 226 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 227 return (0); 228 switch (ip6->ip6_nxt) { 229 case IPPROTO_TCP: 230 eth_hdr_len += sizeof(*ip6); 231 break; 232 case IPPROTO_UDP: 233 eth_hdr_len += sizeof(*ip6) + 8; 234 goto done; 235 default: 236 return (0); 237 } 238 break; 239 default: 240 return (0); 241 } 242 if (mb->m_len < eth_hdr_len + sizeof(*th)) 243 return (0); 244 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); 245 tcp_hlen = th->th_off << 2; 246 eth_hdr_len += tcp_hlen; 247done: 248 /* 249 * m_copydata() will be used on the remaining header which 250 * does not need to reside within the first m_len bytes of 251 * data: 252 */ 253 if (mb->m_pkthdr.len < eth_hdr_len) 254 return (0); 255 return (eth_hdr_len); 256} 257 258static int 259mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp) 260{ 261 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS]; 262 struct mlx5_wqe_data_seg *dseg; 263 struct mlx5e_tx_wqe *wqe; 264 struct ifnet *ifp; 265 int nsegs; 266 int err; 267 int x; 268 struct mbuf *mb = *mbp; 269 u16 ds_cnt; 270 u16 ihs; 271 u16 pi; 272 u8 opcode; 273 274 /* Return ENOBUFS if the queue is full */ 275 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) 276 return (ENOBUFS); 277 278 /* Align SQ edge with NOPs to avoid WQE wrap around */ 279 pi = ((~sq->pc) & sq->wq.sz_m1); 280 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 281 /* Send one multi NOP message instead of many */ 282 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS); 283 pi = ((~sq->pc) & sq->wq.sz_m1); 284 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) 285 return (ENOMEM); 286 } 287 288 /* Setup local variables */ 289 pi = sq->pc & sq->wq.sz_m1; 290 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 291 ifp = sq->ifp; 292 293 memset(wqe, 0, sizeof(*wqe)); 294 295 /* Send a copy of the frame to the BPF listener, if any */ 296 if (ifp != NULL && ifp->if_bpf != NULL) 297 ETHER_BPF_MTAP(ifp, mb); 298 299 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) { 300 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM; 301 } 302 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) { 303 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM; 304 } 305 if (wqe->eth.cs_flags == 0) { 306 sq->stats.csum_offload_none++; 307 } 308 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 309 u32 payload_len; 310 u32 mss = mb->m_pkthdr.tso_segsz; 311 u32 num_pkts; 312 313 wqe->eth.mss = cpu_to_be16(mss); 314 opcode = MLX5_OPCODE_LSO; 315 ihs = mlx5e_get_full_header_size(mb); 316 if (unlikely(ihs == 0)) { 317 err = EINVAL; 318 goto tx_drop; 319 } 320 payload_len = mb->m_pkthdr.len - ihs; 321 if (payload_len == 0) 322 num_pkts = 1; 323 else 324 num_pkts = DIV_ROUND_UP(payload_len, mss); 325 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs); 326 327 sq->stats.tso_packets++; 328 sq->stats.tso_bytes += payload_len; 329 } else { 330 opcode = MLX5_OPCODE_SEND; 331 332 switch (sq->min_inline_mode) { 333 case MLX5_INLINE_MODE_IP: 334 case MLX5_INLINE_MODE_TCP_UDP: 335 ihs = mlx5e_get_full_header_size(mb); 336 if (unlikely(ihs == 0)) 337 ihs = mlx5e_get_l2_header_size(sq, mb); 338 break; 339 case MLX5_INLINE_MODE_L2: 340 ihs = mlx5e_get_l2_header_size(sq, mb); 341 break; 342 case MLX5_INLINE_MODE_NONE: 343 /* FALLTHROUGH */ 344 default: 345 if ((mb->m_flags & M_VLANTAG) != 0 && 346 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) { 347 /* inlining VLAN data is not required */ 348 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */ 349 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag); 350 ihs = 0; 351 } else if ((mb->m_flags & M_VLANTAG) == 0 && 352 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) { 353 /* inlining non-VLAN data is not required */ 354 ihs = 0; 355 } else { 356 /* we are forced to inlining L2 header, if any */ 357 ihs = mlx5e_get_l2_header_size(sq, mb); 358 } 359 break; 360 } 361 sq->mbuf[pi].num_bytes = max_t (unsigned int, 362 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 363 } 364 365 if (likely(ihs == 0)) { 366 /* nothing to inline */ 367 } else if (unlikely(ihs > sq->max_inline)) { 368 /* inline header size is too big */ 369 err = EINVAL; 370 goto tx_drop; 371 } else if ((mb->m_flags & M_VLANTAG) != 0) { 372 struct ether_vlan_header *eh = (struct ether_vlan_header *) 373 wqe->eth.inline_hdr_start; 374 375 /* Range checks */ 376 if (unlikely(ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))) 377 ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN); 378 else if (unlikely(ihs < ETHER_HDR_LEN)) { 379 err = EINVAL; 380 goto tx_drop; 381 } 382 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh); 383 m_adj(mb, ETHER_HDR_LEN); 384 /* Insert 4 bytes VLAN tag into data stream */ 385 eh->evl_proto = eh->evl_encap_proto; 386 eh->evl_encap_proto = htons(ETHERTYPE_VLAN); 387 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag); 388 /* Copy rest of header data, if any */ 389 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1)); 390 m_adj(mb, ihs - ETHER_HDR_LEN); 391 /* Extend header by 4 bytes */ 392 ihs += ETHER_VLAN_ENCAP_LEN; 393 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 394 } else { 395 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start); 396 m_adj(mb, ihs); 397 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 398 } 399 400 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 401 if (ihs > sizeof(wqe->eth.inline_hdr_start)) { 402 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start), 403 MLX5_SEND_WQE_DS); 404 } 405 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt; 406 407 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 408 mb, segs, &nsegs, BUS_DMA_NOWAIT); 409 if (err == EFBIG) { 410 /* Update statistics */ 411 sq->stats.defragged++; 412 /* Too many mbuf fragments */ 413 mb = m_defrag(*mbp, M_NOWAIT); 414 if (mb == NULL) { 415 mb = *mbp; 416 goto tx_drop; 417 } 418 /* Try again */ 419 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 420 mb, segs, &nsegs, BUS_DMA_NOWAIT); 421 } 422 /* Catch errors */ 423 if (err != 0) 424 goto tx_drop; 425 426 /* Make sure all mbuf data, if any, is written to RAM */ 427 if (nsegs != 0) { 428 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, 429 BUS_DMASYNC_PREWRITE); 430 } else { 431 /* All data was inlined, free the mbuf. */ 432 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map); 433 m_freem(mb); 434 mb = NULL; 435 } 436 437 for (x = 0; x != nsegs; x++) { 438 if (segs[x].ds_len == 0) 439 continue; 440 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr); 441 dseg->lkey = sq->mkey_be; 442 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len); 443 dseg++; 444 } 445 446 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl)); 447 448 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 449 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 450 if (mlx5e_do_send_cqe(sq)) 451 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 452 else 453 wqe->ctrl.fm_ce_se = 0; 454 455 /* Copy data for doorbell */ 456 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 457 458 /* Store pointer to mbuf */ 459 sq->mbuf[pi].mbuf = mb; 460 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 461 sq->pc += sq->mbuf[pi].num_wqebbs; 462 463 sq->stats.packets++; 464 *mbp = NULL; /* safety clear */ 465 return (0); 466 467tx_drop: 468 sq->stats.dropped++; 469 *mbp = NULL; 470 m_freem(mb); 471 return err; 472} 473 474static void 475mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget) 476{ 477 u16 sqcc; 478 479 /* 480 * sq->cc must be updated only after mlx5_cqwq_update_db_record(), 481 * otherwise a cq overrun may occur 482 */ 483 sqcc = sq->cc; 484 485 while (budget > 0) { 486 struct mlx5_cqe64 *cqe; 487 struct mbuf *mb; 488 u16 x; 489 u16 ci; 490 491 cqe = mlx5e_get_cqe(&sq->cq); 492 if (!cqe) 493 break; 494 495 mlx5_cqwq_pop(&sq->cq.wq); 496 497 /* update budget according to the event factor */ 498 budget -= sq->cev_factor; 499 500 for (x = 0; x != sq->cev_factor; x++) { 501 ci = sqcc & sq->wq.sz_m1; 502 mb = sq->mbuf[ci].mbuf; 503 sq->mbuf[ci].mbuf = NULL; /* Safety clear */ 504 505 if (mb == NULL) { 506 if (sq->mbuf[ci].num_bytes == 0) { 507 /* NOP */ 508 sq->stats.nop++; 509 } 510 } else { 511 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map, 512 BUS_DMASYNC_POSTWRITE); 513 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map); 514 515 /* Free transmitted mbuf */ 516 m_freem(mb); 517 } 518 sqcc += sq->mbuf[ci].num_wqebbs; 519 } 520 } 521 522 mlx5_cqwq_update_db_record(&sq->cq.wq); 523 524 /* Ensure cq space is freed before enabling more cqes */ 525 atomic_thread_fence_rel(); 526 527 sq->cc = sqcc; 528} 529 530static int 531mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 532{ 533 int err = 0; 534 535 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 536 sq->stopped != 0)) { 537 m_freem(mb); 538 return (ENETDOWN); 539 } 540 541 /* Do transmit */ 542 if (mlx5e_sq_xmit(sq, &mb) != 0) { 543 /* NOTE: m_freem() is NULL safe */ 544 m_freem(mb); 545 err = ENOBUFS; 546 } 547 548 /* Check if we need to write the doorbell */ 549 if (likely(sq->doorbell.d64 != 0)) { 550 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 551 sq->doorbell.d64 = 0; 552 } 553 554 /* 555 * Check if we need to start the event timer which flushes the 556 * transmit ring on timeout: 557 */ 558 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 559 sq->cev_factor != 1)) { 560 /* start the timer */ 561 mlx5e_sq_cev_timeout(sq); 562 } else { 563 /* don't send NOPs yet */ 564 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 565 } 566 return (err); 567} 568 569int 570mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb) 571{ 572 struct mlx5e_sq *sq; 573 int ret; 574 575 sq = mlx5e_select_queue(ifp, mb); 576 if (unlikely(sq == NULL)) { 577 /* Invalid send queue */ 578 m_freem(mb); 579 return (ENXIO); 580 } 581 582 mtx_lock(&sq->lock); 583 ret = mlx5e_xmit_locked(ifp, sq, mb); 584 mtx_unlock(&sq->lock); 585 586 return (ret); 587} 588 589void 590mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) 591{ 592 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); 593 594 mtx_lock(&sq->comp_lock); 595 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX); 596 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock)); 597 mtx_unlock(&sq->comp_lock); 598} 599