mlx5_en_tx.c revision 337114
1/*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 337114 2018-08-02 08:55:19Z hselasky $ 26 */ 27 28#include "en.h" 29#include <machine/atomic.h> 30 31static inline bool 32mlx5e_do_send_cqe(struct mlx5e_sq *sq) 33{ 34 sq->cev_counter++; 35 /* interleave the CQEs */ 36 if (sq->cev_counter >= sq->cev_factor) { 37 sq->cev_counter = 0; 38 return (1); 39 } 40 return (0); 41} 42 43void 44mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt) 45{ 46 u16 pi = sq->pc & sq->wq.sz_m1; 47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 48 49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 50 51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 53 if (mlx5e_do_send_cqe(sq)) 54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 55 else 56 wqe->ctrl.fm_ce_se = 0; 57 58 /* Copy data for doorbell */ 59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 60 61 sq->mbuf[pi].mbuf = NULL; 62 sq->mbuf[pi].num_bytes = 0; 63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 64 sq->pc += sq->mbuf[pi].num_wqebbs; 65} 66 67#if (__FreeBSD_version >= 1100000) 68static uint32_t mlx5e_hash_value; 69 70static void 71mlx5e_hash_init(void *arg) 72{ 73 mlx5e_hash_value = m_ether_tcpip_hash_init(); 74} 75 76/* Make kernel call mlx5e_hash_init after the random stack finished initializing */ 77SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL); 78#endif 79 80static struct mlx5e_sq * 81mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) 82{ 83 struct mlx5e_priv *priv = ifp->if_softc; 84 struct mlx5e_channel * volatile *ppch; 85 struct mlx5e_channel *pch; 86 u32 ch; 87 u32 tc; 88 89 ppch = priv->channel; 90 91 /* check if channels are successfully opened */ 92 if (unlikely(ppch == NULL)) 93 return (NULL); 94 95 /* obtain VLAN information if present */ 96 if (mb->m_flags & M_VLANTAG) { 97 tc = (mb->m_pkthdr.ether_vtag >> 13); 98 if (tc >= priv->num_tc) 99 tc = priv->default_vlan_prio; 100 } else { 101 tc = priv->default_vlan_prio; 102 } 103 104 ch = priv->params.num_channels; 105 106 /* check if flowid is set */ 107 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) { 108#ifdef RSS 109 u32 temp; 110 111 if (rss_hash2bucket(mb->m_pkthdr.flowid, 112 M_HASHTYPE_GET(mb), &temp) == 0) 113 ch = temp % ch; 114 else 115#endif 116 ch = (mb->m_pkthdr.flowid % 128) % ch; 117 } else { 118#if (__FreeBSD_version >= 1100000) 119 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | 120 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch; 121#else 122 /* 123 * m_ether_tcpip_hash not present in stable, so just 124 * throw unhashed mbufs on queue 0 125 */ 126 ch = 0; 127#endif 128 } 129 130 /* check if channel is allocated and not stopped */ 131 pch = ppch[ch]; 132 if (likely(pch != NULL && pch->sq[tc].stopped == 0)) 133 return (&pch->sq[tc]); 134 return (NULL); 135} 136 137static inline u16 138mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb) 139{ 140 141 switch(sq->min_inline_mode) { 142 case MLX5_INLINE_MODE_NONE: 143 /* 144 * When inline mode is NONE, we do not need to copy 145 * headers into WQEs, except when vlan tag framing is 146 * requested. Hardware might offload vlan tagging on 147 * transmit. This is a separate capability, which is 148 * known to be disabled on ConnectX-5 due to a hardware 149 * bug RM 931383. If vlan_inline_cap is not present and 150 * the packet has vlan tag, fall back to inlining. 151 */ 152 if ((mb->m_flags & M_VLANTAG) != 0 && 153 sq->vlan_inline_cap == 0) 154 break; 155 return (0); 156 case MLX5_INLINE_MODE_L2: 157 /* 158 * Due to hardware limitations, when trust mode is 159 * DSCP, the hardware may request MLX5_INLINE_MODE_L2 160 * while it really needs all L2 headers and the 4 first 161 * bytes of the IP header (which include the 162 * TOS/traffic-class). 163 * 164 * To avoid doing a firmware command for querying the 165 * trust state and parsing the mbuf for doing 166 * unnecessary checks (VLAN/eth_type) in the fast path, 167 * we are going for the worth case (22 Bytes) if 168 * the mb->m_pkthdr.len allows it. 169 */ 170 if (mb->m_pkthdr.len > ETHER_HDR_LEN + 171 ETHER_VLAN_ENCAP_LEN + 4) 172 return (MIN(sq->max_inline, ETHER_HDR_LEN + 173 ETHER_VLAN_ENCAP_LEN + 4)); 174 break; 175 } 176 return (MIN(sq->max_inline, mb->m_pkthdr.len)); 177} 178 179static int 180mlx5e_get_header_size(struct mbuf *mb) 181{ 182 struct ether_vlan_header *eh; 183 struct tcphdr *th; 184 struct ip *ip; 185 int ip_hlen, tcp_hlen; 186 struct ip6_hdr *ip6; 187 uint16_t eth_type; 188 int eth_hdr_len; 189 190 eh = mtod(mb, struct ether_vlan_header *); 191 if (mb->m_len < ETHER_HDR_LEN) 192 return (0); 193 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 194 eth_type = ntohs(eh->evl_proto); 195 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 196 } else { 197 eth_type = ntohs(eh->evl_encap_proto); 198 eth_hdr_len = ETHER_HDR_LEN; 199 } 200 if (mb->m_len < eth_hdr_len) 201 return (0); 202 switch (eth_type) { 203 case ETHERTYPE_IP: 204 ip = (struct ip *)(mb->m_data + eth_hdr_len); 205 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 206 return (0); 207 if (ip->ip_p != IPPROTO_TCP) 208 return (0); 209 ip_hlen = ip->ip_hl << 2; 210 eth_hdr_len += ip_hlen; 211 break; 212 case ETHERTYPE_IPV6: 213 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); 214 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 215 return (0); 216 if (ip6->ip6_nxt != IPPROTO_TCP) 217 return (0); 218 eth_hdr_len += sizeof(*ip6); 219 break; 220 default: 221 return (0); 222 } 223 if (mb->m_len < eth_hdr_len + sizeof(*th)) 224 return (0); 225 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); 226 tcp_hlen = th->th_off << 2; 227 eth_hdr_len += tcp_hlen; 228 if (mb->m_len < eth_hdr_len) 229 return (0); 230 return (eth_hdr_len); 231} 232 233/* 234 * The return value is not going back to the stack because of 235 * the drbr 236 */ 237static int 238mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp) 239{ 240 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS]; 241 struct mlx5_wqe_data_seg *dseg; 242 struct mlx5e_tx_wqe *wqe; 243 struct ifnet *ifp; 244 int nsegs; 245 int err; 246 int x; 247 struct mbuf *mb = *mbp; 248 u16 ds_cnt; 249 u16 ihs; 250 u16 pi; 251 u8 opcode; 252 253 /* 254 * Return ENOBUFS if the queue is full, this may trigger reinsertion 255 * of the mbuf into the drbr (see mlx5e_xmit_locked) 256 */ 257 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) { 258 return (ENOBUFS); 259 } 260 261 /* Align SQ edge with NOPs to avoid WQE wrap around */ 262 pi = ((~sq->pc) & sq->wq.sz_m1); 263 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 264 /* Send one multi NOP message instead of many */ 265 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS); 266 pi = ((~sq->pc) & sq->wq.sz_m1); 267 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) 268 return (ENOMEM); 269 } 270 271 /* Setup local variables */ 272 pi = sq->pc & sq->wq.sz_m1; 273 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 274 ifp = sq->ifp; 275 276 memset(wqe, 0, sizeof(*wqe)); 277 278 /* Send a copy of the frame to the BPF listener, if any */ 279 if (ifp != NULL && ifp->if_bpf != NULL) 280 ETHER_BPF_MTAP(ifp, mb); 281 282 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) { 283 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM; 284 } 285 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) { 286 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM; 287 } 288 if (wqe->eth.cs_flags == 0) { 289 sq->stats.csum_offload_none++; 290 } 291 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 292 u32 payload_len; 293 u32 mss = mb->m_pkthdr.tso_segsz; 294 u32 num_pkts; 295 296 wqe->eth.mss = cpu_to_be16(mss); 297 opcode = MLX5_OPCODE_LSO; 298 ihs = mlx5e_get_header_size(mb); 299 payload_len = mb->m_pkthdr.len - ihs; 300 if (payload_len == 0) 301 num_pkts = 1; 302 else 303 num_pkts = DIV_ROUND_UP(payload_len, mss); 304 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs); 305 306 sq->stats.tso_packets++; 307 sq->stats.tso_bytes += payload_len; 308 } else { 309 opcode = MLX5_OPCODE_SEND; 310 ihs = mlx5e_get_inline_hdr_size(sq, mb); 311 sq->mbuf[pi].num_bytes = max_t (unsigned int, 312 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 313 } 314 if (ihs == 0) { 315 if ((mb->m_flags & M_VLANTAG) != 0) { 316 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */ 317 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag); 318 } else { 319 wqe->eth.inline_hdr_sz = 0; 320 } 321 } else { 322 if ((mb->m_flags & M_VLANTAG) != 0) { 323 struct ether_vlan_header *eh = (struct ether_vlan_header 324 *)wqe->eth.inline_hdr_start; 325 326 /* Range checks */ 327 if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN)) 328 ihs = (MLX5E_MAX_TX_INLINE - 329 ETHER_VLAN_ENCAP_LEN); 330 else if (ihs < ETHER_HDR_LEN) { 331 err = EINVAL; 332 goto tx_drop; 333 } 334 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh); 335 m_adj(mb, ETHER_HDR_LEN); 336 /* Insert 4 bytes VLAN tag into data stream */ 337 eh->evl_proto = eh->evl_encap_proto; 338 eh->evl_encap_proto = htons(ETHERTYPE_VLAN); 339 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag); 340 /* Copy rest of header data, if any */ 341 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 342 1)); 343 m_adj(mb, ihs - ETHER_HDR_LEN); 344 /* Extend header by 4 bytes */ 345 ihs += ETHER_VLAN_ENCAP_LEN; 346 } else { 347 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start); 348 m_adj(mb, ihs); 349 } 350 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 351 } 352 353 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 354 if (ihs > sizeof(wqe->eth.inline_hdr_start)) { 355 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start), 356 MLX5_SEND_WQE_DS); 357 } 358 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt; 359 360 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 361 mb, segs, &nsegs, BUS_DMA_NOWAIT); 362 if (err == EFBIG) { 363 /* Update statistics */ 364 sq->stats.defragged++; 365 /* Too many mbuf fragments */ 366 mb = m_defrag(*mbp, M_NOWAIT); 367 if (mb == NULL) { 368 mb = *mbp; 369 goto tx_drop; 370 } 371 /* Try again */ 372 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 373 mb, segs, &nsegs, BUS_DMA_NOWAIT); 374 } 375 /* Catch errors */ 376 if (err != 0) 377 goto tx_drop; 378 379 /* Make sure all mbuf data, if any, is written to RAM */ 380 if (nsegs != 0) { 381 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, 382 BUS_DMASYNC_PREWRITE); 383 } else { 384 /* All data was inlined, free the mbuf. */ 385 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map); 386 m_freem(mb); 387 mb = NULL; 388 } 389 390 for (x = 0; x != nsegs; x++) { 391 if (segs[x].ds_len == 0) 392 continue; 393 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr); 394 dseg->lkey = sq->mkey_be; 395 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len); 396 dseg++; 397 } 398 399 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl)); 400 401 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 402 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 403 if (mlx5e_do_send_cqe(sq)) 404 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 405 else 406 wqe->ctrl.fm_ce_se = 0; 407 408 /* Copy data for doorbell */ 409 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 410 411 /* Store pointer to mbuf */ 412 sq->mbuf[pi].mbuf = mb; 413 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 414 sq->pc += sq->mbuf[pi].num_wqebbs; 415 416 sq->stats.packets++; 417 *mbp = NULL; /* safety clear */ 418 return (0); 419 420tx_drop: 421 sq->stats.dropped++; 422 *mbp = NULL; 423 m_freem(mb); 424 return err; 425} 426 427static void 428mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget) 429{ 430 u16 sqcc; 431 432 /* 433 * sq->cc must be updated only after mlx5_cqwq_update_db_record(), 434 * otherwise a cq overrun may occur 435 */ 436 sqcc = sq->cc; 437 438 while (budget > 0) { 439 struct mlx5_cqe64 *cqe; 440 struct mbuf *mb; 441 u16 x; 442 u16 ci; 443 444 cqe = mlx5e_get_cqe(&sq->cq); 445 if (!cqe) 446 break; 447 448 mlx5_cqwq_pop(&sq->cq.wq); 449 450 /* update budget according to the event factor */ 451 budget -= sq->cev_factor; 452 453 for (x = 0; x != sq->cev_factor; x++) { 454 ci = sqcc & sq->wq.sz_m1; 455 mb = sq->mbuf[ci].mbuf; 456 sq->mbuf[ci].mbuf = NULL; /* Safety clear */ 457 458 if (mb == NULL) { 459 if (sq->mbuf[ci].num_bytes == 0) { 460 /* NOP */ 461 sq->stats.nop++; 462 } 463 } else { 464 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map, 465 BUS_DMASYNC_POSTWRITE); 466 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map); 467 468 /* Free transmitted mbuf */ 469 m_freem(mb); 470 } 471 sqcc += sq->mbuf[ci].num_wqebbs; 472 } 473 } 474 475 mlx5_cqwq_update_db_record(&sq->cq.wq); 476 477 /* Ensure cq space is freed before enabling more cqes */ 478 atomic_thread_fence_rel(); 479 480 sq->cc = sqcc; 481 482 if (sq->sq_tq != NULL && 483 atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY)) 484 taskqueue_enqueue(sq->sq_tq, &sq->sq_task); 485} 486 487static int 488mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 489{ 490 struct mbuf *next; 491 int err = 0; 492 493 if (likely(mb != NULL)) { 494 /* 495 * If we can't insert mbuf into drbr, try to xmit anyway. 496 * We keep the error we got so we could return that after xmit. 497 */ 498 err = drbr_enqueue(ifp, sq->br, mb); 499 } 500 501 /* 502 * Check if the network interface is closed or if the SQ is 503 * being stopped: 504 */ 505 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 506 sq->stopped != 0)) 507 return (err); 508 509 /* Process the queue */ 510 while ((next = drbr_peek(ifp, sq->br)) != NULL) { 511 if (mlx5e_sq_xmit(sq, &next) != 0) { 512 if (next == NULL) { 513 drbr_advance(ifp, sq->br); 514 } else { 515 drbr_putback(ifp, sq->br, next); 516 atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL); 517 } 518 break; 519 } 520 drbr_advance(ifp, sq->br); 521 } 522 /* Check if we need to write the doorbell */ 523 if (likely(sq->doorbell.d64 != 0)) { 524 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 525 sq->doorbell.d64 = 0; 526 } 527 /* 528 * Check if we need to start the event timer which flushes the 529 * transmit ring on timeout: 530 */ 531 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 532 sq->cev_factor != 1)) { 533 /* start the timer */ 534 mlx5e_sq_cev_timeout(sq); 535 } else { 536 /* don't send NOPs yet */ 537 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 538 } 539 return (err); 540} 541 542static int 543mlx5e_xmit_locked_no_br(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 544{ 545 int err = 0; 546 547 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 548 sq->stopped != 0)) { 549 m_freem(mb); 550 return (ENETDOWN); 551 } 552 553 /* Do transmit */ 554 if (mlx5e_sq_xmit(sq, &mb) != 0) { 555 /* NOTE: m_freem() is NULL safe */ 556 m_freem(mb); 557 err = ENOBUFS; 558 } 559 560 /* Check if we need to write the doorbell */ 561 if (likely(sq->doorbell.d64 != 0)) { 562 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 563 sq->doorbell.d64 = 0; 564 } 565 566 /* 567 * Check if we need to start the event timer which flushes the 568 * transmit ring on timeout: 569 */ 570 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 571 sq->cev_factor != 1)) { 572 /* start the timer */ 573 mlx5e_sq_cev_timeout(sq); 574 } else { 575 /* don't send NOPs yet */ 576 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 577 } 578 return (err); 579} 580 581int 582mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb) 583{ 584 struct mlx5e_sq *sq; 585 int ret; 586 587 sq = mlx5e_select_queue(ifp, mb); 588 if (unlikely(sq == NULL)) { 589 /* Invalid send queue */ 590 m_freem(mb); 591 return (ENXIO); 592 } 593 594 if (unlikely(sq->br == NULL)) { 595 /* rate limited traffic */ 596 mtx_lock(&sq->lock); 597 ret = mlx5e_xmit_locked_no_br(ifp, sq, mb); 598 mtx_unlock(&sq->lock); 599 } else if (mtx_trylock(&sq->lock)) { 600 ret = mlx5e_xmit_locked(ifp, sq, mb); 601 mtx_unlock(&sq->lock); 602 } else { 603 ret = drbr_enqueue(ifp, sq->br, mb); 604 taskqueue_enqueue(sq->sq_tq, &sq->sq_task); 605 } 606 607 return (ret); 608} 609 610void 611mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) 612{ 613 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); 614 615 mtx_lock(&sq->comp_lock); 616 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX); 617 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock)); 618 mtx_unlock(&sq->comp_lock); 619} 620 621void 622mlx5e_tx_que(void *context, int pending) 623{ 624 struct mlx5e_sq *sq = context; 625 struct ifnet *ifp = sq->ifp; 626 627 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 628 mtx_lock(&sq->lock); 629 if (!drbr_empty(ifp, sq->br)) 630 mlx5e_xmit_locked(ifp, sq, NULL); 631 mtx_unlock(&sq->lock); 632 } 633} 634