1/*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 362312 2020-06-18 10:40:16Z hselasky $ 26 */ 27 28#include "en.h" 29#include <machine/atomic.h> 30 31static inline bool 32mlx5e_do_send_cqe(struct mlx5e_sq *sq) 33{ 34 sq->cev_counter++; 35 /* interleave the CQEs */ 36 if (sq->cev_counter >= sq->cev_factor) { 37 sq->cev_counter = 0; 38 return (1); 39 } 40 return (0); 41} 42 43void 44mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt) 45{ 46 u16 pi = sq->pc & sq->wq.sz_m1; 47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 48 49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 50 51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 53 if (mlx5e_do_send_cqe(sq)) 54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 55 else 56 wqe->ctrl.fm_ce_se = 0; 57 58 /* Copy data for doorbell */ 59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 60 61 sq->mbuf[pi].mbuf = NULL; 62 sq->mbuf[pi].num_bytes = 0; 63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 64 sq->pc += sq->mbuf[pi].num_wqebbs; 65} 66 67#if (__FreeBSD_version >= 1100000) 68static uint32_t mlx5e_hash_value; 69 70static void 71mlx5e_hash_init(void *arg) 72{ 73 mlx5e_hash_value = m_ether_tcpip_hash_init(); 74} 75 76/* Make kernel call mlx5e_hash_init after the random stack finished initializing */ 77SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL); 78#endif 79 80static struct mlx5e_sq * 81mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) 82{ 83 struct mlx5e_priv *priv = ifp->if_softc; 84 struct mlx5e_sq *sq; 85 u32 ch; 86 u32 tc; 87 88 /* obtain VLAN information if present */ 89 if (mb->m_flags & M_VLANTAG) { 90 tc = (mb->m_pkthdr.ether_vtag >> 13); 91 if (tc >= priv->num_tc) 92 tc = priv->default_vlan_prio; 93 } else { 94 tc = priv->default_vlan_prio; 95 } 96 97 ch = priv->params.num_channels; 98 99 /* check if flowid is set */ 100 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) { 101#ifdef RSS 102 u32 temp; 103 104 if (rss_hash2bucket(mb->m_pkthdr.flowid, 105 M_HASHTYPE_GET(mb), &temp) == 0) 106 ch = temp % ch; 107 else 108#endif 109 ch = (mb->m_pkthdr.flowid % 128) % ch; 110 } else { 111#if (__FreeBSD_version >= 1100000) 112 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | 113 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch; 114#else 115 /* 116 * m_ether_tcpip_hash not present in stable, so just 117 * throw unhashed mbufs on queue 0 118 */ 119 ch = 0; 120#endif 121 } 122 123 /* check if send queue is running */ 124 sq = &priv->channel[ch].sq[tc]; 125 if (likely(READ_ONCE(sq->running) != 0)) 126 return (sq); 127 return (NULL); 128} 129 130static inline u16 131mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb) 132{ 133 struct ether_vlan_header *eh; 134 uint16_t eth_type; 135 int min_inline; 136 137 eh = mtod(mb, struct ether_vlan_header *); 138 if (unlikely(mb->m_len < ETHER_HDR_LEN)) { 139 goto max_inline; 140 } else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 141 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))) 142 goto max_inline; 143 eth_type = ntohs(eh->evl_proto); 144 min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 145 } else { 146 eth_type = ntohs(eh->evl_encap_proto); 147 min_inline = ETHER_HDR_LEN; 148 } 149 150 switch (eth_type) { 151 case ETHERTYPE_IP: 152 case ETHERTYPE_IPV6: 153 /* 154 * Make sure the TOS(IPv4) or traffic class(IPv6) 155 * field gets inlined. Else the SQ may stall. 156 */ 157 min_inline += 4; 158 break; 159 default: 160 goto max_inline; 161 } 162 163 /* 164 * m_copydata() will be used on the remaining header which 165 * does not need to reside within the first m_len bytes of 166 * data: 167 */ 168 if (mb->m_pkthdr.len < min_inline) 169 goto max_inline; 170 return (min_inline); 171 172max_inline: 173 return (MIN(mb->m_pkthdr.len, sq->max_inline)); 174} 175 176/* 177 * This function parse IPv4 and IPv6 packets looking for TCP and UDP 178 * headers. 179 * 180 * The return value indicates the number of bytes from the beginning 181 * of the packet until the first byte after the TCP or UDP header. If 182 * this function returns zero, the parsing failed. 183 */ 184static int 185mlx5e_get_full_header_size(const struct mbuf *mb) 186{ 187 const struct ether_vlan_header *eh; 188 const struct tcphdr *th; 189 const struct ip *ip; 190 int ip_hlen, tcp_hlen; 191 const struct ip6_hdr *ip6; 192 uint16_t eth_type; 193 int eth_hdr_len; 194 195 eh = mtod(mb, const struct ether_vlan_header *); 196 if (unlikely(mb->m_len < ETHER_HDR_LEN)) 197 return (0); 198 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 199 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))) 200 return (0); 201 eth_type = ntohs(eh->evl_proto); 202 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 203 } else { 204 eth_type = ntohs(eh->evl_encap_proto); 205 eth_hdr_len = ETHER_HDR_LEN; 206 } 207 switch (eth_type) { 208 case ETHERTYPE_IP: 209 ip = (const struct ip *)(mb->m_data + eth_hdr_len); 210 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip))) 211 return (0); 212 switch (ip->ip_p) { 213 case IPPROTO_TCP: 214 ip_hlen = ip->ip_hl << 2; 215 eth_hdr_len += ip_hlen; 216 break; 217 case IPPROTO_UDP: 218 ip_hlen = ip->ip_hl << 2; 219 eth_hdr_len += ip_hlen + 8; 220 goto done; 221 default: 222 return (0); 223 } 224 break; 225 case ETHERTYPE_IPV6: 226 ip6 = (const struct ip6_hdr *)(mb->m_data + eth_hdr_len); 227 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip6))) 228 return (0); 229 switch (ip6->ip6_nxt) { 230 case IPPROTO_TCP: 231 eth_hdr_len += sizeof(*ip6); 232 break; 233 case IPPROTO_UDP: 234 eth_hdr_len += sizeof(*ip6) + 8; 235 goto done; 236 default: 237 return (0); 238 } 239 break; 240 default: 241 return (0); 242 } 243 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*th))) { 244 const struct mbuf *m_th = mb->m_next; 245 if (unlikely(mb->m_len != eth_hdr_len || 246 m_th == NULL || m_th->m_len < sizeof(*th))) 247 return (0); 248 th = (const struct tcphdr *)(m_th->m_data); 249 } else { 250 th = (const struct tcphdr *)(mb->m_data + eth_hdr_len); 251 } 252 tcp_hlen = th->th_off << 2; 253 eth_hdr_len += tcp_hlen; 254done: 255 /* 256 * m_copydata() will be used on the remaining header which 257 * does not need to reside within the first m_len bytes of 258 * data: 259 */ 260 if (unlikely(mb->m_pkthdr.len < eth_hdr_len)) 261 return (0); 262 return (eth_hdr_len); 263} 264 265static int 266mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp) 267{ 268 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS]; 269 struct mlx5_wqe_data_seg *dseg; 270 struct mlx5e_tx_wqe *wqe; 271 struct ifnet *ifp; 272 int nsegs; 273 int err; 274 int x; 275 struct mbuf *mb = *mbp; 276 u16 ds_cnt; 277 u16 ihs; 278 u16 pi; 279 u8 opcode; 280 281 /* Return ENOBUFS if the queue is full */ 282 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) { 283 sq->stats.enobuf++; 284 return (ENOBUFS); 285 } 286 287 /* Align SQ edge with NOPs to avoid WQE wrap around */ 288 pi = ((~sq->pc) & sq->wq.sz_m1); 289 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 290 /* Send one multi NOP message instead of many */ 291 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS); 292 pi = ((~sq->pc) & sq->wq.sz_m1); 293 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 294 sq->stats.enobuf++; 295 return (ENOMEM); 296 } 297 } 298 299 /* Setup local variables */ 300 pi = sq->pc & sq->wq.sz_m1; 301 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 302 ifp = sq->ifp; 303 304 memset(wqe, 0, sizeof(*wqe)); 305 306 /* Send a copy of the frame to the BPF listener, if any */ 307 if (ifp != NULL && ifp->if_bpf != NULL) 308 ETHER_BPF_MTAP(ifp, mb); 309 310 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) { 311 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM; 312 } 313 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) { 314 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM; 315 } 316 if (wqe->eth.cs_flags == 0) { 317 sq->stats.csum_offload_none++; 318 } 319 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 320 u32 payload_len; 321 u32 mss = mb->m_pkthdr.tso_segsz; 322 u32 num_pkts; 323 324 wqe->eth.mss = cpu_to_be16(mss); 325 opcode = MLX5_OPCODE_LSO; 326 ihs = mlx5e_get_full_header_size(mb); 327 if (unlikely(ihs == 0)) { 328 err = EINVAL; 329 goto tx_drop; 330 } 331 payload_len = mb->m_pkthdr.len - ihs; 332 if (payload_len == 0) 333 num_pkts = 1; 334 else 335 num_pkts = DIV_ROUND_UP(payload_len, mss); 336 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs); 337 338 sq->stats.tso_packets++; 339 sq->stats.tso_bytes += payload_len; 340 } else { 341 opcode = MLX5_OPCODE_SEND; 342 343 switch (sq->min_inline_mode) { 344 case MLX5_INLINE_MODE_IP: 345 case MLX5_INLINE_MODE_TCP_UDP: 346 ihs = mlx5e_get_full_header_size(mb); 347 if (unlikely(ihs == 0)) 348 ihs = mlx5e_get_l2_header_size(sq, mb); 349 break; 350 case MLX5_INLINE_MODE_L2: 351 ihs = mlx5e_get_l2_header_size(sq, mb); 352 break; 353 case MLX5_INLINE_MODE_NONE: 354 /* FALLTHROUGH */ 355 default: 356 if ((mb->m_flags & M_VLANTAG) != 0 && 357 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) { 358 /* inlining VLAN data is not required */ 359 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */ 360 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag); 361 ihs = 0; 362 } else if ((mb->m_flags & M_VLANTAG) == 0 && 363 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) { 364 /* inlining non-VLAN data is not required */ 365 ihs = 0; 366 } else { 367 /* we are forced to inlining L2 header, if any */ 368 ihs = mlx5e_get_l2_header_size(sq, mb); 369 } 370 break; 371 } 372 sq->mbuf[pi].num_bytes = max_t (unsigned int, 373 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 374 } 375 376 if (likely(ihs == 0)) { 377 /* nothing to inline */ 378 } else if ((mb->m_flags & M_VLANTAG) != 0) { 379 struct ether_vlan_header *eh = (struct ether_vlan_header *) 380 wqe->eth.inline_hdr_start; 381 382 /* Range checks */ 383 if (unlikely(ihs > (sq->max_inline - ETHER_VLAN_ENCAP_LEN))) { 384 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 385 err = EINVAL; 386 goto tx_drop; 387 } 388 ihs = (sq->max_inline - ETHER_VLAN_ENCAP_LEN); 389 } else if (unlikely(ihs < ETHER_HDR_LEN)) { 390 err = EINVAL; 391 goto tx_drop; 392 } 393 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh); 394 m_adj(mb, ETHER_HDR_LEN); 395 /* Insert 4 bytes VLAN tag into data stream */ 396 eh->evl_proto = eh->evl_encap_proto; 397 eh->evl_encap_proto = htons(ETHERTYPE_VLAN); 398 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag); 399 /* Copy rest of header data, if any */ 400 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1)); 401 m_adj(mb, ihs - ETHER_HDR_LEN); 402 /* Extend header by 4 bytes */ 403 ihs += ETHER_VLAN_ENCAP_LEN; 404 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 405 } else { 406 /* check if inline header size is too big */ 407 if (unlikely(ihs > sq->max_inline)) { 408 if (unlikely(mb->m_pkthdr.csum_flags & CSUM_TSO)) { 409 err = EINVAL; 410 goto tx_drop; 411 } 412 ihs = sq->max_inline; 413 } 414 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start); 415 m_adj(mb, ihs); 416 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 417 } 418 419 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 420 if (ihs > sizeof(wqe->eth.inline_hdr_start)) { 421 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start), 422 MLX5_SEND_WQE_DS); 423 } 424 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt; 425 426 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 427 mb, segs, &nsegs, BUS_DMA_NOWAIT); 428 if (err == EFBIG) { 429 /* Update statistics */ 430 sq->stats.defragged++; 431 /* Too many mbuf fragments */ 432 mb = m_defrag(*mbp, M_NOWAIT); 433 if (mb == NULL) { 434 mb = *mbp; 435 goto tx_drop; 436 } 437 /* Try again */ 438 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 439 mb, segs, &nsegs, BUS_DMA_NOWAIT); 440 } 441 /* Catch errors */ 442 if (err != 0) 443 goto tx_drop; 444 445 /* Make sure all mbuf data, if any, is written to RAM */ 446 if (nsegs != 0) { 447 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, 448 BUS_DMASYNC_PREWRITE); 449 } else { 450 /* All data was inlined, free the mbuf. */ 451 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map); 452 m_freem(mb); 453 mb = NULL; 454 } 455 456 for (x = 0; x != nsegs; x++) { 457 if (segs[x].ds_len == 0) 458 continue; 459 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr); 460 dseg->lkey = sq->mkey_be; 461 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len); 462 dseg++; 463 } 464 465 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl)); 466 467 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 468 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 469 if (mlx5e_do_send_cqe(sq)) 470 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 471 else 472 wqe->ctrl.fm_ce_se = 0; 473 474 /* Copy data for doorbell */ 475 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 476 477 /* Store pointer to mbuf */ 478 sq->mbuf[pi].mbuf = mb; 479 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 480 sq->pc += sq->mbuf[pi].num_wqebbs; 481 482 /* Count all traffic going out */ 483 sq->stats.packets++; 484 sq->stats.bytes += sq->mbuf[pi].num_bytes; 485 486 *mbp = NULL; /* safety clear */ 487 return (0); 488 489tx_drop: 490 sq->stats.dropped++; 491 *mbp = NULL; 492 m_freem(mb); 493 return err; 494} 495 496static void 497mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget) 498{ 499 u16 sqcc; 500 501 /* 502 * sq->cc must be updated only after mlx5_cqwq_update_db_record(), 503 * otherwise a cq overrun may occur 504 */ 505 sqcc = sq->cc; 506 507 while (budget > 0) { 508 struct mlx5_cqe64 *cqe; 509 struct mbuf *mb; 510 u16 x; 511 u16 ci; 512 513 cqe = mlx5e_get_cqe(&sq->cq); 514 if (!cqe) 515 break; 516 517 mlx5_cqwq_pop(&sq->cq.wq); 518 519 /* update budget according to the event factor */ 520 budget -= sq->cev_factor; 521 522 for (x = 0; x != sq->cev_factor; x++) { 523 ci = sqcc & sq->wq.sz_m1; 524 mb = sq->mbuf[ci].mbuf; 525 sq->mbuf[ci].mbuf = NULL; 526 527 if (mb == NULL) { 528 if (sq->mbuf[ci].num_bytes == 0) { 529 /* NOP */ 530 sq->stats.nop++; 531 } 532 } else { 533 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map, 534 BUS_DMASYNC_POSTWRITE); 535 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map); 536 537 /* Free transmitted mbuf */ 538 m_freem(mb); 539 } 540 sqcc += sq->mbuf[ci].num_wqebbs; 541 } 542 } 543 544 mlx5_cqwq_update_db_record(&sq->cq.wq); 545 546 /* Ensure cq space is freed before enabling more cqes */ 547 atomic_thread_fence_rel(); 548 549 sq->cc = sqcc; 550} 551 552static int 553mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 554{ 555 int err = 0; 556 557 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 558 READ_ONCE(sq->running) == 0)) { 559 m_freem(mb); 560 return (ENETDOWN); 561 } 562 563 /* Do transmit */ 564 if (mlx5e_sq_xmit(sq, &mb) != 0) { 565 /* NOTE: m_freem() is NULL safe */ 566 m_freem(mb); 567 err = ENOBUFS; 568 } 569 570 /* Check if we need to write the doorbell */ 571 if (likely(sq->doorbell.d64 != 0)) { 572 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 573 sq->doorbell.d64 = 0; 574 } 575 576 /* 577 * Check if we need to start the event timer which flushes the 578 * transmit ring on timeout: 579 */ 580 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 581 sq->cev_factor != 1)) { 582 /* start the timer */ 583 mlx5e_sq_cev_timeout(sq); 584 } else { 585 /* don't send NOPs yet */ 586 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 587 } 588 return (err); 589} 590 591int 592mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb) 593{ 594 struct mlx5e_sq *sq; 595 int ret; 596 597 sq = mlx5e_select_queue(ifp, mb); 598 if (unlikely(sq == NULL)) { 599 /* Invalid send queue */ 600 m_freem(mb); 601 return (ENXIO); 602 } 603 604 mtx_lock(&sq->lock); 605 ret = mlx5e_xmit_locked(ifp, sq, mb); 606 mtx_unlock(&sq->lock); 607 608 return (ret); 609} 610 611void 612mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) 613{ 614 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); 615 616 mtx_lock(&sq->comp_lock); 617 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX); 618 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock)); 619 mtx_unlock(&sq->comp_lock); 620} 621