mlx5_en_tx.c revision 362306
1/*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 362306 2020-06-18 10:12:17Z hselasky $ 26 */ 27 28#include "en.h" 29#include <machine/atomic.h> 30 31static inline bool 32mlx5e_do_send_cqe(struct mlx5e_sq *sq) 33{ 34 sq->cev_counter++; 35 /* interleave the CQEs */ 36 if (sq->cev_counter >= sq->cev_factor) { 37 sq->cev_counter = 0; 38 return (1); 39 } 40 return (0); 41} 42 43void 44mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt) 45{ 46 u16 pi = sq->pc & sq->wq.sz_m1; 47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 48 49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 50 51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 53 if (mlx5e_do_send_cqe(sq)) 54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 55 else 56 wqe->ctrl.fm_ce_se = 0; 57 58 /* Copy data for doorbell */ 59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 60 61 sq->mbuf[pi].mbuf = NULL; 62 sq->mbuf[pi].num_bytes = 0; 63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 64 sq->pc += sq->mbuf[pi].num_wqebbs; 65} 66 67#if (__FreeBSD_version >= 1100000) 68static uint32_t mlx5e_hash_value; 69 70static void 71mlx5e_hash_init(void *arg) 72{ 73 mlx5e_hash_value = m_ether_tcpip_hash_init(); 74} 75 76/* Make kernel call mlx5e_hash_init after the random stack finished initializing */ 77SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL); 78#endif 79 80static struct mlx5e_sq * 81mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) 82{ 83 struct mlx5e_priv *priv = ifp->if_softc; 84 struct mlx5e_sq *sq; 85 u32 ch; 86 u32 tc; 87 88 /* obtain VLAN information if present */ 89 if (mb->m_flags & M_VLANTAG) { 90 tc = (mb->m_pkthdr.ether_vtag >> 13); 91 if (tc >= priv->num_tc) 92 tc = priv->default_vlan_prio; 93 } else { 94 tc = priv->default_vlan_prio; 95 } 96 97 ch = priv->params.num_channels; 98 99 /* check if flowid is set */ 100 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) { 101#ifdef RSS 102 u32 temp; 103 104 if (rss_hash2bucket(mb->m_pkthdr.flowid, 105 M_HASHTYPE_GET(mb), &temp) == 0) 106 ch = temp % ch; 107 else 108#endif 109 ch = (mb->m_pkthdr.flowid % 128) % ch; 110 } else { 111#if (__FreeBSD_version >= 1100000) 112 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | 113 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch; 114#else 115 /* 116 * m_ether_tcpip_hash not present in stable, so just 117 * throw unhashed mbufs on queue 0 118 */ 119 ch = 0; 120#endif 121 } 122 123 /* check if send queue is running */ 124 sq = &priv->channel[ch].sq[tc]; 125 if (likely(READ_ONCE(sq->running) != 0)) 126 return (sq); 127 return (NULL); 128} 129 130static inline u16 131mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb) 132{ 133 struct ether_vlan_header *eh; 134 uint16_t eth_type; 135 int min_inline; 136 137 eh = mtod(mb, struct ether_vlan_header *); 138 if (unlikely(mb->m_len < ETHER_HDR_LEN)) { 139 goto max_inline; 140 } else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 141 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))) 142 goto max_inline; 143 eth_type = ntohs(eh->evl_proto); 144 min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 145 } else { 146 eth_type = ntohs(eh->evl_encap_proto); 147 min_inline = ETHER_HDR_LEN; 148 } 149 150 switch (eth_type) { 151 case ETHERTYPE_IP: 152 case ETHERTYPE_IPV6: 153 /* 154 * Make sure the TOS(IPv4) or traffic class(IPv6) 155 * field gets inlined. Else the SQ may stall. 156 */ 157 min_inline += 4; 158 break; 159 default: 160 goto max_inline; 161 } 162 163 /* 164 * m_copydata() will be used on the remaining header which 165 * does not need to reside within the first m_len bytes of 166 * data: 167 */ 168 if (mb->m_pkthdr.len < min_inline) 169 goto max_inline; 170 return (min_inline); 171 172max_inline: 173 return (MIN(mb->m_pkthdr.len, sq->max_inline)); 174} 175 176/* 177 * This function parse IPv4 and IPv6 packets looking for TCP and UDP 178 * headers. 179 * 180 * The return value indicates the number of bytes from the beginning 181 * of the packet until the first byte after the TCP or UDP header. If 182 * this function returns zero, the parsing failed. 183 */ 184static int 185mlx5e_get_full_header_size(const struct mbuf *mb) 186{ 187 const struct ether_vlan_header *eh; 188 const struct tcphdr *th; 189 const struct ip *ip; 190 int ip_hlen, tcp_hlen; 191 const struct ip6_hdr *ip6; 192 uint16_t eth_type; 193 int eth_hdr_len; 194 195 eh = mtod(mb, const struct ether_vlan_header *); 196 if (mb->m_len < ETHER_HDR_LEN) 197 return (0); 198 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 199 if (mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)) 200 return (0); 201 eth_type = ntohs(eh->evl_proto); 202 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 203 } else { 204 eth_type = ntohs(eh->evl_encap_proto); 205 eth_hdr_len = ETHER_HDR_LEN; 206 } 207 switch (eth_type) { 208 case ETHERTYPE_IP: 209 ip = (const struct ip *)(mb->m_data + eth_hdr_len); 210 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 211 return (0); 212 switch (ip->ip_p) { 213 case IPPROTO_TCP: 214 ip_hlen = ip->ip_hl << 2; 215 eth_hdr_len += ip_hlen; 216 break; 217 case IPPROTO_UDP: 218 ip_hlen = ip->ip_hl << 2; 219 eth_hdr_len += ip_hlen + 8; 220 goto done; 221 default: 222 return (0); 223 } 224 break; 225 case ETHERTYPE_IPV6: 226 ip6 = (const struct ip6_hdr *)(mb->m_data + eth_hdr_len); 227 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 228 return (0); 229 switch (ip6->ip6_nxt) { 230 case IPPROTO_TCP: 231 eth_hdr_len += sizeof(*ip6); 232 break; 233 case IPPROTO_UDP: 234 eth_hdr_len += sizeof(*ip6) + 8; 235 goto done; 236 default: 237 return (0); 238 } 239 break; 240 default: 241 return (0); 242 } 243 if (mb->m_len < eth_hdr_len + sizeof(*th)) 244 return (0); 245 th = (const struct tcphdr *)(mb->m_data + eth_hdr_len); 246 tcp_hlen = th->th_off << 2; 247 eth_hdr_len += tcp_hlen; 248done: 249 /* 250 * m_copydata() will be used on the remaining header which 251 * does not need to reside within the first m_len bytes of 252 * data: 253 */ 254 if (mb->m_pkthdr.len < eth_hdr_len) 255 return (0); 256 return (eth_hdr_len); 257} 258 259static int 260mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp) 261{ 262 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS]; 263 struct mlx5_wqe_data_seg *dseg; 264 struct mlx5e_tx_wqe *wqe; 265 struct ifnet *ifp; 266 int nsegs; 267 int err; 268 int x; 269 struct mbuf *mb = *mbp; 270 u16 ds_cnt; 271 u16 ihs; 272 u16 pi; 273 u8 opcode; 274 275 /* Return ENOBUFS if the queue is full */ 276 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) { 277 sq->stats.enobuf++; 278 return (ENOBUFS); 279 } 280 281 /* Align SQ edge with NOPs to avoid WQE wrap around */ 282 pi = ((~sq->pc) & sq->wq.sz_m1); 283 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 284 /* Send one multi NOP message instead of many */ 285 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS); 286 pi = ((~sq->pc) & sq->wq.sz_m1); 287 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 288 sq->stats.enobuf++; 289 return (ENOMEM); 290 } 291 } 292 293 /* Setup local variables */ 294 pi = sq->pc & sq->wq.sz_m1; 295 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 296 ifp = sq->ifp; 297 298 memset(wqe, 0, sizeof(*wqe)); 299 300 /* Send a copy of the frame to the BPF listener, if any */ 301 if (ifp != NULL && ifp->if_bpf != NULL) 302 ETHER_BPF_MTAP(ifp, mb); 303 304 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) { 305 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM; 306 } 307 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) { 308 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM; 309 } 310 if (wqe->eth.cs_flags == 0) { 311 sq->stats.csum_offload_none++; 312 } 313 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 314 u32 payload_len; 315 u32 mss = mb->m_pkthdr.tso_segsz; 316 u32 num_pkts; 317 318 wqe->eth.mss = cpu_to_be16(mss); 319 opcode = MLX5_OPCODE_LSO; 320 ihs = mlx5e_get_full_header_size(mb); 321 if (unlikely(ihs == 0)) { 322 err = EINVAL; 323 goto tx_drop; 324 } 325 payload_len = mb->m_pkthdr.len - ihs; 326 if (payload_len == 0) 327 num_pkts = 1; 328 else 329 num_pkts = DIV_ROUND_UP(payload_len, mss); 330 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs); 331 332 sq->stats.tso_packets++; 333 sq->stats.tso_bytes += payload_len; 334 } else { 335 opcode = MLX5_OPCODE_SEND; 336 337 switch (sq->min_inline_mode) { 338 case MLX5_INLINE_MODE_IP: 339 case MLX5_INLINE_MODE_TCP_UDP: 340 ihs = mlx5e_get_full_header_size(mb); 341 if (unlikely(ihs == 0)) 342 ihs = mlx5e_get_l2_header_size(sq, mb); 343 break; 344 case MLX5_INLINE_MODE_L2: 345 ihs = mlx5e_get_l2_header_size(sq, mb); 346 break; 347 case MLX5_INLINE_MODE_NONE: 348 /* FALLTHROUGH */ 349 default: 350 if ((mb->m_flags & M_VLANTAG) != 0 && 351 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) { 352 /* inlining VLAN data is not required */ 353 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */ 354 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag); 355 ihs = 0; 356 } else if ((mb->m_flags & M_VLANTAG) == 0 && 357 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) { 358 /* inlining non-VLAN data is not required */ 359 ihs = 0; 360 } else { 361 /* we are forced to inlining L2 header, if any */ 362 ihs = mlx5e_get_l2_header_size(sq, mb); 363 } 364 break; 365 } 366 sq->mbuf[pi].num_bytes = max_t (unsigned int, 367 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 368 } 369 370 if (likely(ihs == 0)) { 371 /* nothing to inline */ 372 } else if ((mb->m_flags & M_VLANTAG) != 0) { 373 struct ether_vlan_header *eh = (struct ether_vlan_header *) 374 wqe->eth.inline_hdr_start; 375 376 /* Range checks */ 377 if (unlikely(ihs > (sq->max_inline - ETHER_VLAN_ENCAP_LEN))) { 378 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 379 err = EINVAL; 380 goto tx_drop; 381 } 382 ihs = (sq->max_inline - ETHER_VLAN_ENCAP_LEN); 383 } else if (unlikely(ihs < ETHER_HDR_LEN)) { 384 err = EINVAL; 385 goto tx_drop; 386 } 387 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh); 388 m_adj(mb, ETHER_HDR_LEN); 389 /* Insert 4 bytes VLAN tag into data stream */ 390 eh->evl_proto = eh->evl_encap_proto; 391 eh->evl_encap_proto = htons(ETHERTYPE_VLAN); 392 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag); 393 /* Copy rest of header data, if any */ 394 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1)); 395 m_adj(mb, ihs - ETHER_HDR_LEN); 396 /* Extend header by 4 bytes */ 397 ihs += ETHER_VLAN_ENCAP_LEN; 398 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 399 } else { 400 /* check if inline header size is too big */ 401 if (unlikely(ihs > sq->max_inline)) { 402 if (unlikely(mb->m_pkthdr.csum_flags & CSUM_TSO)) { 403 err = EINVAL; 404 goto tx_drop; 405 } 406 ihs = sq->max_inline; 407 } 408 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start); 409 m_adj(mb, ihs); 410 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 411 } 412 413 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 414 if (ihs > sizeof(wqe->eth.inline_hdr_start)) { 415 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start), 416 MLX5_SEND_WQE_DS); 417 } 418 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt; 419 420 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 421 mb, segs, &nsegs, BUS_DMA_NOWAIT); 422 if (err == EFBIG) { 423 /* Update statistics */ 424 sq->stats.defragged++; 425 /* Too many mbuf fragments */ 426 mb = m_defrag(*mbp, M_NOWAIT); 427 if (mb == NULL) { 428 mb = *mbp; 429 goto tx_drop; 430 } 431 /* Try again */ 432 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 433 mb, segs, &nsegs, BUS_DMA_NOWAIT); 434 } 435 /* Catch errors */ 436 if (err != 0) 437 goto tx_drop; 438 439 /* Make sure all mbuf data, if any, is written to RAM */ 440 if (nsegs != 0) { 441 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, 442 BUS_DMASYNC_PREWRITE); 443 } else { 444 /* All data was inlined, free the mbuf. */ 445 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map); 446 m_freem(mb); 447 mb = NULL; 448 } 449 450 for (x = 0; x != nsegs; x++) { 451 if (segs[x].ds_len == 0) 452 continue; 453 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr); 454 dseg->lkey = sq->mkey_be; 455 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len); 456 dseg++; 457 } 458 459 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl)); 460 461 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 462 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 463 if (mlx5e_do_send_cqe(sq)) 464 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 465 else 466 wqe->ctrl.fm_ce_se = 0; 467 468 /* Copy data for doorbell */ 469 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 470 471 /* Store pointer to mbuf */ 472 sq->mbuf[pi].mbuf = mb; 473 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 474 sq->pc += sq->mbuf[pi].num_wqebbs; 475 476 /* Count all traffic going out */ 477 sq->stats.packets++; 478 sq->stats.bytes += sq->mbuf[pi].num_bytes; 479 480 *mbp = NULL; /* safety clear */ 481 return (0); 482 483tx_drop: 484 sq->stats.dropped++; 485 *mbp = NULL; 486 m_freem(mb); 487 return err; 488} 489 490static void 491mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget) 492{ 493 u16 sqcc; 494 495 /* 496 * sq->cc must be updated only after mlx5_cqwq_update_db_record(), 497 * otherwise a cq overrun may occur 498 */ 499 sqcc = sq->cc; 500 501 while (budget > 0) { 502 struct mlx5_cqe64 *cqe; 503 struct mbuf *mb; 504 u16 x; 505 u16 ci; 506 507 cqe = mlx5e_get_cqe(&sq->cq); 508 if (!cqe) 509 break; 510 511 mlx5_cqwq_pop(&sq->cq.wq); 512 513 /* update budget according to the event factor */ 514 budget -= sq->cev_factor; 515 516 for (x = 0; x != sq->cev_factor; x++) { 517 ci = sqcc & sq->wq.sz_m1; 518 mb = sq->mbuf[ci].mbuf; 519 sq->mbuf[ci].mbuf = NULL; 520 521 if (mb == NULL) { 522 if (sq->mbuf[ci].num_bytes == 0) { 523 /* NOP */ 524 sq->stats.nop++; 525 } 526 } else { 527 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map, 528 BUS_DMASYNC_POSTWRITE); 529 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map); 530 531 /* Free transmitted mbuf */ 532 m_freem(mb); 533 } 534 sqcc += sq->mbuf[ci].num_wqebbs; 535 } 536 } 537 538 mlx5_cqwq_update_db_record(&sq->cq.wq); 539 540 /* Ensure cq space is freed before enabling more cqes */ 541 atomic_thread_fence_rel(); 542 543 sq->cc = sqcc; 544} 545 546static int 547mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 548{ 549 int err = 0; 550 551 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 552 READ_ONCE(sq->running) == 0)) { 553 m_freem(mb); 554 return (ENETDOWN); 555 } 556 557 /* Do transmit */ 558 if (mlx5e_sq_xmit(sq, &mb) != 0) { 559 /* NOTE: m_freem() is NULL safe */ 560 m_freem(mb); 561 err = ENOBUFS; 562 } 563 564 /* Check if we need to write the doorbell */ 565 if (likely(sq->doorbell.d64 != 0)) { 566 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 567 sq->doorbell.d64 = 0; 568 } 569 570 /* 571 * Check if we need to start the event timer which flushes the 572 * transmit ring on timeout: 573 */ 574 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 575 sq->cev_factor != 1)) { 576 /* start the timer */ 577 mlx5e_sq_cev_timeout(sq); 578 } else { 579 /* don't send NOPs yet */ 580 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 581 } 582 return (err); 583} 584 585int 586mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb) 587{ 588 struct mlx5e_sq *sq; 589 int ret; 590 591 sq = mlx5e_select_queue(ifp, mb); 592 if (unlikely(sq == NULL)) { 593 /* Invalid send queue */ 594 m_freem(mb); 595 return (ENXIO); 596 } 597 598 mtx_lock(&sq->lock); 599 ret = mlx5e_xmit_locked(ifp, sq, mb); 600 mtx_unlock(&sq->lock); 601 602 return (ret); 603} 604 605void 606mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) 607{ 608 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); 609 610 mtx_lock(&sq->comp_lock); 611 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX); 612 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock)); 613 mtx_unlock(&sq->comp_lock); 614} 615