mlx5_en_tx.c revision 341981
1/*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 341981 2018-12-12 13:12:30Z hselasky $ 26 */ 27 28#include "en.h" 29#include <machine/atomic.h> 30 31static inline bool 32mlx5e_do_send_cqe(struct mlx5e_sq *sq) 33{ 34 sq->cev_counter++; 35 /* interleave the CQEs */ 36 if (sq->cev_counter >= sq->cev_factor) { 37 sq->cev_counter = 0; 38 return (1); 39 } 40 return (0); 41} 42 43void 44mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt) 45{ 46 u16 pi = sq->pc & sq->wq.sz_m1; 47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 48 49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 50 51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 53 if (mlx5e_do_send_cqe(sq)) 54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 55 else 56 wqe->ctrl.fm_ce_se = 0; 57 58 /* Copy data for doorbell */ 59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 60 61 sq->mbuf[pi].mbuf = NULL; 62 sq->mbuf[pi].num_bytes = 0; 63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 64 sq->pc += sq->mbuf[pi].num_wqebbs; 65} 66 67#if (__FreeBSD_version >= 1100000) 68static uint32_t mlx5e_hash_value; 69 70static void 71mlx5e_hash_init(void *arg) 72{ 73 mlx5e_hash_value = m_ether_tcpip_hash_init(); 74} 75 76/* Make kernel call mlx5e_hash_init after the random stack finished initializing */ 77SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL); 78#endif 79 80static struct mlx5e_sq * 81mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) 82{ 83 struct mlx5e_priv *priv = ifp->if_softc; 84 struct mlx5e_sq *sq; 85 u32 ch; 86 u32 tc; 87 88 /* obtain VLAN information if present */ 89 if (mb->m_flags & M_VLANTAG) { 90 tc = (mb->m_pkthdr.ether_vtag >> 13); 91 if (tc >= priv->num_tc) 92 tc = priv->default_vlan_prio; 93 } else { 94 tc = priv->default_vlan_prio; 95 } 96 97 ch = priv->params.num_channels; 98 99 /* check if flowid is set */ 100 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) { 101#ifdef RSS 102 u32 temp; 103 104 if (rss_hash2bucket(mb->m_pkthdr.flowid, 105 M_HASHTYPE_GET(mb), &temp) == 0) 106 ch = temp % ch; 107 else 108#endif 109 ch = (mb->m_pkthdr.flowid % 128) % ch; 110 } else { 111#if (__FreeBSD_version >= 1100000) 112 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | 113 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch; 114#else 115 /* 116 * m_ether_tcpip_hash not present in stable, so just 117 * throw unhashed mbufs on queue 0 118 */ 119 ch = 0; 120#endif 121 } 122 123 /* check if send queue is running */ 124 sq = &priv->channel[ch].sq[tc]; 125 if (likely(READ_ONCE(sq->running) != 0)) 126 return (sq); 127 return (NULL); 128} 129 130static inline u16 131mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb) 132{ 133 struct ether_vlan_header *eh; 134 uint16_t eth_type; 135 int min_inline; 136 137 eh = mtod(mb, struct ether_vlan_header *); 138 if (unlikely(mb->m_len < ETHER_HDR_LEN)) { 139 goto max_inline; 140 } else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 141 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))) 142 goto max_inline; 143 eth_type = ntohs(eh->evl_proto); 144 min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 145 } else { 146 eth_type = ntohs(eh->evl_encap_proto); 147 min_inline = ETHER_HDR_LEN; 148 } 149 150 switch (eth_type) { 151 case ETHERTYPE_IP: 152 case ETHERTYPE_IPV6: 153 /* 154 * Make sure the TOS(IPv4) or traffic class(IPv6) 155 * field gets inlined. Else the SQ may stall. 156 */ 157 min_inline += 4; 158 break; 159 default: 160 goto max_inline; 161 } 162 163 /* 164 * m_copydata() will be used on the remaining header which 165 * does not need to reside within the first m_len bytes of 166 * data: 167 */ 168 if (mb->m_pkthdr.len < min_inline) 169 goto max_inline; 170 return (min_inline); 171 172max_inline: 173 return (MIN(mb->m_pkthdr.len, sq->max_inline)); 174} 175 176static int 177mlx5e_get_full_header_size(struct mbuf *mb) 178{ 179 struct ether_vlan_header *eh; 180 struct tcphdr *th; 181 struct ip *ip; 182 int ip_hlen, tcp_hlen; 183 struct ip6_hdr *ip6; 184 uint16_t eth_type; 185 int eth_hdr_len; 186 187 eh = mtod(mb, struct ether_vlan_header *); 188 if (mb->m_len < ETHER_HDR_LEN) 189 return (0); 190 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 191 if (mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)) 192 return (0); 193 eth_type = ntohs(eh->evl_proto); 194 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 195 } else { 196 eth_type = ntohs(eh->evl_encap_proto); 197 eth_hdr_len = ETHER_HDR_LEN; 198 } 199 switch (eth_type) { 200 case ETHERTYPE_IP: 201 ip = (struct ip *)(mb->m_data + eth_hdr_len); 202 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 203 return (0); 204 switch (ip->ip_p) { 205 case IPPROTO_TCP: 206 ip_hlen = ip->ip_hl << 2; 207 eth_hdr_len += ip_hlen; 208 break; 209 case IPPROTO_UDP: 210 ip_hlen = ip->ip_hl << 2; 211 eth_hdr_len += ip_hlen + 8; 212 goto done; 213 default: 214 return (0); 215 } 216 break; 217 case ETHERTYPE_IPV6: 218 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); 219 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 220 return (0); 221 switch (ip6->ip6_nxt) { 222 case IPPROTO_TCP: 223 eth_hdr_len += sizeof(*ip6); 224 break; 225 case IPPROTO_UDP: 226 eth_hdr_len += sizeof(*ip6) + 8; 227 goto done; 228 default: 229 return (0); 230 } 231 break; 232 default: 233 return (0); 234 } 235 if (mb->m_len < eth_hdr_len + sizeof(*th)) 236 return (0); 237 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); 238 tcp_hlen = th->th_off << 2; 239 eth_hdr_len += tcp_hlen; 240done: 241 /* 242 * m_copydata() will be used on the remaining header which 243 * does not need to reside within the first m_len bytes of 244 * data: 245 */ 246 if (mb->m_pkthdr.len < eth_hdr_len) 247 return (0); 248 return (eth_hdr_len); 249} 250 251static int 252mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp) 253{ 254 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS]; 255 struct mlx5_wqe_data_seg *dseg; 256 struct mlx5e_tx_wqe *wqe; 257 struct ifnet *ifp; 258 int nsegs; 259 int err; 260 int x; 261 struct mbuf *mb = *mbp; 262 u16 ds_cnt; 263 u16 ihs; 264 u16 pi; 265 u8 opcode; 266 267 /* Return ENOBUFS if the queue is full */ 268 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) 269 return (ENOBUFS); 270 271 /* Align SQ edge with NOPs to avoid WQE wrap around */ 272 pi = ((~sq->pc) & sq->wq.sz_m1); 273 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 274 /* Send one multi NOP message instead of many */ 275 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS); 276 pi = ((~sq->pc) & sq->wq.sz_m1); 277 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) 278 return (ENOMEM); 279 } 280 281 /* Setup local variables */ 282 pi = sq->pc & sq->wq.sz_m1; 283 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 284 ifp = sq->ifp; 285 286 memset(wqe, 0, sizeof(*wqe)); 287 288 /* Send a copy of the frame to the BPF listener, if any */ 289 if (ifp != NULL && ifp->if_bpf != NULL) 290 ETHER_BPF_MTAP(ifp, mb); 291 292 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) { 293 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM; 294 } 295 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) { 296 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM; 297 } 298 if (wqe->eth.cs_flags == 0) { 299 sq->stats.csum_offload_none++; 300 } 301 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 302 u32 payload_len; 303 u32 mss = mb->m_pkthdr.tso_segsz; 304 u32 num_pkts; 305 306 wqe->eth.mss = cpu_to_be16(mss); 307 opcode = MLX5_OPCODE_LSO; 308 ihs = mlx5e_get_full_header_size(mb); 309 if (unlikely(ihs == 0)) { 310 err = EINVAL; 311 goto tx_drop; 312 } 313 payload_len = mb->m_pkthdr.len - ihs; 314 if (payload_len == 0) 315 num_pkts = 1; 316 else 317 num_pkts = DIV_ROUND_UP(payload_len, mss); 318 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs); 319 320 sq->stats.tso_packets++; 321 sq->stats.tso_bytes += payload_len; 322 } else { 323 opcode = MLX5_OPCODE_SEND; 324 325 switch (sq->min_inline_mode) { 326 case MLX5_INLINE_MODE_IP: 327 case MLX5_INLINE_MODE_TCP_UDP: 328 ihs = mlx5e_get_full_header_size(mb); 329 if (unlikely(ihs == 0)) 330 ihs = mlx5e_get_l2_header_size(sq, mb); 331 break; 332 case MLX5_INLINE_MODE_L2: 333 ihs = mlx5e_get_l2_header_size(sq, mb); 334 break; 335 case MLX5_INLINE_MODE_NONE: 336 /* FALLTHROUGH */ 337 default: 338 if ((mb->m_flags & M_VLANTAG) != 0 && 339 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) { 340 /* inlining VLAN data is not required */ 341 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */ 342 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag); 343 ihs = 0; 344 } else if ((mb->m_flags & M_VLANTAG) == 0 && 345 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) { 346 /* inlining non-VLAN data is not required */ 347 ihs = 0; 348 } else { 349 /* we are forced to inlining L2 header, if any */ 350 ihs = mlx5e_get_l2_header_size(sq, mb); 351 } 352 break; 353 } 354 sq->mbuf[pi].num_bytes = max_t (unsigned int, 355 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 356 } 357 358 if (likely(ihs == 0)) { 359 /* nothing to inline */ 360 } else if (unlikely(ihs > sq->max_inline)) { 361 /* inline header size is too big */ 362 err = EINVAL; 363 goto tx_drop; 364 } else if ((mb->m_flags & M_VLANTAG) != 0) { 365 struct ether_vlan_header *eh = (struct ether_vlan_header *) 366 wqe->eth.inline_hdr_start; 367 368 /* Range checks */ 369 if (unlikely(ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))) 370 ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN); 371 else if (unlikely(ihs < ETHER_HDR_LEN)) { 372 err = EINVAL; 373 goto tx_drop; 374 } 375 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh); 376 m_adj(mb, ETHER_HDR_LEN); 377 /* Insert 4 bytes VLAN tag into data stream */ 378 eh->evl_proto = eh->evl_encap_proto; 379 eh->evl_encap_proto = htons(ETHERTYPE_VLAN); 380 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag); 381 /* Copy rest of header data, if any */ 382 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1)); 383 m_adj(mb, ihs - ETHER_HDR_LEN); 384 /* Extend header by 4 bytes */ 385 ihs += ETHER_VLAN_ENCAP_LEN; 386 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 387 } else { 388 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start); 389 m_adj(mb, ihs); 390 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 391 } 392 393 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 394 if (ihs > sizeof(wqe->eth.inline_hdr_start)) { 395 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start), 396 MLX5_SEND_WQE_DS); 397 } 398 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt; 399 400 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 401 mb, segs, &nsegs, BUS_DMA_NOWAIT); 402 if (err == EFBIG) { 403 /* Update statistics */ 404 sq->stats.defragged++; 405 /* Too many mbuf fragments */ 406 mb = m_defrag(*mbp, M_NOWAIT); 407 if (mb == NULL) { 408 mb = *mbp; 409 goto tx_drop; 410 } 411 /* Try again */ 412 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 413 mb, segs, &nsegs, BUS_DMA_NOWAIT); 414 } 415 /* Catch errors */ 416 if (err != 0) 417 goto tx_drop; 418 419 /* Make sure all mbuf data, if any, is written to RAM */ 420 if (nsegs != 0) { 421 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, 422 BUS_DMASYNC_PREWRITE); 423 } else { 424 /* All data was inlined, free the mbuf. */ 425 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map); 426 m_freem(mb); 427 mb = NULL; 428 } 429 430 for (x = 0; x != nsegs; x++) { 431 if (segs[x].ds_len == 0) 432 continue; 433 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr); 434 dseg->lkey = sq->mkey_be; 435 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len); 436 dseg++; 437 } 438 439 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl)); 440 441 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 442 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 443 if (mlx5e_do_send_cqe(sq)) 444 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 445 else 446 wqe->ctrl.fm_ce_se = 0; 447 448 /* Copy data for doorbell */ 449 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 450 451 /* Store pointer to mbuf */ 452 sq->mbuf[pi].mbuf = mb; 453 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 454 sq->pc += sq->mbuf[pi].num_wqebbs; 455 456 /* Count all traffic going out */ 457 sq->stats.packets++; 458 sq->stats.bytes += sq->mbuf[pi].num_bytes; 459 460 *mbp = NULL; /* safety clear */ 461 return (0); 462 463tx_drop: 464 sq->stats.dropped++; 465 *mbp = NULL; 466 m_freem(mb); 467 return err; 468} 469 470static void 471mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget) 472{ 473 u16 sqcc; 474 475 /* 476 * sq->cc must be updated only after mlx5_cqwq_update_db_record(), 477 * otherwise a cq overrun may occur 478 */ 479 sqcc = sq->cc; 480 481 while (budget > 0) { 482 struct mlx5_cqe64 *cqe; 483 struct mbuf *mb; 484 u16 x; 485 u16 ci; 486 487 cqe = mlx5e_get_cqe(&sq->cq); 488 if (!cqe) 489 break; 490 491 mlx5_cqwq_pop(&sq->cq.wq); 492 493 /* update budget according to the event factor */ 494 budget -= sq->cev_factor; 495 496 for (x = 0; x != sq->cev_factor; x++) { 497 ci = sqcc & sq->wq.sz_m1; 498 mb = sq->mbuf[ci].mbuf; 499 sq->mbuf[ci].mbuf = NULL; /* Safety clear */ 500 501 if (mb == NULL) { 502 if (sq->mbuf[ci].num_bytes == 0) { 503 /* NOP */ 504 sq->stats.nop++; 505 } 506 } else { 507 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map, 508 BUS_DMASYNC_POSTWRITE); 509 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map); 510 511 /* Free transmitted mbuf */ 512 m_freem(mb); 513 } 514 sqcc += sq->mbuf[ci].num_wqebbs; 515 } 516 } 517 518 mlx5_cqwq_update_db_record(&sq->cq.wq); 519 520 /* Ensure cq space is freed before enabling more cqes */ 521 atomic_thread_fence_rel(); 522 523 sq->cc = sqcc; 524} 525 526static int 527mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 528{ 529 int err = 0; 530 531 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 532 READ_ONCE(sq->running) == 0)) { 533 m_freem(mb); 534 return (ENETDOWN); 535 } 536 537 /* Do transmit */ 538 if (mlx5e_sq_xmit(sq, &mb) != 0) { 539 /* NOTE: m_freem() is NULL safe */ 540 m_freem(mb); 541 err = ENOBUFS; 542 } 543 544 /* Check if we need to write the doorbell */ 545 if (likely(sq->doorbell.d64 != 0)) { 546 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 547 sq->doorbell.d64 = 0; 548 } 549 550 /* 551 * Check if we need to start the event timer which flushes the 552 * transmit ring on timeout: 553 */ 554 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 555 sq->cev_factor != 1)) { 556 /* start the timer */ 557 mlx5e_sq_cev_timeout(sq); 558 } else { 559 /* don't send NOPs yet */ 560 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 561 } 562 return (err); 563} 564 565int 566mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb) 567{ 568 struct mlx5e_sq *sq; 569 int ret; 570 571 sq = mlx5e_select_queue(ifp, mb); 572 if (unlikely(sq == NULL)) { 573 /* Invalid send queue */ 574 m_freem(mb); 575 return (ENXIO); 576 } 577 578 mtx_lock(&sq->lock); 579 ret = mlx5e_xmit_locked(ifp, sq, mb); 580 mtx_unlock(&sq->lock); 581 582 return (ret); 583} 584 585void 586mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) 587{ 588 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); 589 590 mtx_lock(&sq->comp_lock); 591 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX); 592 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock)); 593 mtx_unlock(&sq->comp_lock); 594} 595