1/*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 362313 2020-06-18 10:41:51Z hselasky $ 26 */ 27 28#include "en.h" 29#include <machine/atomic.h> 30 31static inline bool 32mlx5e_do_send_cqe(struct mlx5e_sq *sq) 33{ 34 sq->cev_counter++; 35 /* interleave the CQEs */ 36 if (sq->cev_counter >= sq->cev_factor) { 37 sq->cev_counter = 0; 38 return (1); 39 } 40 return (0); 41} 42 43void 44mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt) 45{ 46 u16 pi = sq->pc & sq->wq.sz_m1; 47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 48 49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 50 51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 53 if (mlx5e_do_send_cqe(sq)) 54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 55 else 56 wqe->ctrl.fm_ce_se = 0; 57 58 /* Copy data for doorbell */ 59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 60 61 sq->mbuf[pi].mbuf = NULL; 62 sq->mbuf[pi].num_bytes = 0; 63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 64 sq->pc += sq->mbuf[pi].num_wqebbs; 65} 66 67#if (__FreeBSD_version >= 1100000) 68static uint32_t mlx5e_hash_value; 69 70static void 71mlx5e_hash_init(void *arg) 72{ 73 mlx5e_hash_value = m_ether_tcpip_hash_init(); 74} 75 76/* Make kernel call mlx5e_hash_init after the random stack finished initializing */ 77SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL); 78#endif 79 80static struct mlx5e_sq * 81mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) 82{ 83 struct mlx5e_priv *priv = ifp->if_softc; 84 struct mlx5e_channel * volatile *ppch; 85 struct mlx5e_channel *pch; 86 u32 ch; 87 u32 tc; 88 89 ppch = priv->channel; 90 91 /* check if channels are successfully opened */ 92 if (unlikely(ppch == NULL)) 93 return (NULL); 94 95 /* obtain VLAN information if present */ 96 if (mb->m_flags & M_VLANTAG) { 97 tc = (mb->m_pkthdr.ether_vtag >> 13); 98 if (tc >= priv->num_tc) 99 tc = priv->default_vlan_prio; 100 } else { 101 tc = priv->default_vlan_prio; 102 } 103 104 ch = priv->params.num_channels; 105 106 /* check if flowid is set */ 107 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) { 108#ifdef RSS 109 u32 temp; 110 111 if (rss_hash2bucket(mb->m_pkthdr.flowid, 112 M_HASHTYPE_GET(mb), &temp) == 0) 113 ch = temp % ch; 114 else 115#endif 116 ch = (mb->m_pkthdr.flowid % 128) % ch; 117 } else { 118#if (__FreeBSD_version >= 1100000) 119 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | 120 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch; 121#else 122 /* 123 * m_ether_tcpip_hash not present in stable, so just 124 * throw unhashed mbufs on queue 0 125 */ 126 ch = 0; 127#endif 128 } 129 130 /* check if channel is allocated and not stopped */ 131 pch = ppch[ch]; 132 if (likely(pch != NULL && pch->sq[tc].stopped == 0)) 133 return (&pch->sq[tc]); 134 return (NULL); 135} 136 137static inline u16 138mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb) 139{ 140 141 switch(sq->min_inline_mode) { 142 case MLX5_INLINE_MODE_NONE: 143 /* 144 * When inline mode is NONE, we do not need to copy 145 * headers into WQEs, except when vlan tag framing is 146 * requested. Hardware might offload vlan tagging on 147 * transmit. This is a separate capability, which is 148 * known to be disabled on ConnectX-5 due to a hardware 149 * bug RM 931383. If vlan_inline_cap is not present and 150 * the packet has vlan tag, fall back to inlining. 151 */ 152 if ((mb->m_flags & M_VLANTAG) != 0 && 153 sq->vlan_inline_cap == 0) 154 break; 155 return (0); 156 case MLX5_INLINE_MODE_L2: 157 /* 158 * Due to hardware limitations, when trust mode is 159 * DSCP, the hardware may request MLX5_INLINE_MODE_L2 160 * while it really needs all L2 headers and the 4 first 161 * bytes of the IP header (which include the 162 * TOS/traffic-class). 163 * 164 * To avoid doing a firmware command for querying the 165 * trust state and parsing the mbuf for doing 166 * unnecessary checks (VLAN/eth_type) in the fast path, 167 * we are going for the worth case (22 Bytes) if 168 * the mb->m_pkthdr.len allows it. 169 */ 170 if (mb->m_pkthdr.len > ETHER_HDR_LEN + 171 ETHER_VLAN_ENCAP_LEN + 4) 172 return (MIN(sq->max_inline, ETHER_HDR_LEN + 173 ETHER_VLAN_ENCAP_LEN + 4)); 174 break; 175 } 176 return (MIN(sq->max_inline, mb->m_pkthdr.len)); 177} 178 179/* 180 * This function parse IPv4 and IPv6 packets looking for TCP and UDP 181 * headers. 182 * 183 * The return value indicates the number of bytes from the beginning 184 * of the packet until the first byte after the TCP or UDP header. If 185 * this function returns zero, the parsing failed. 186 */ 187static int 188mlx5e_get_header_size(const struct mbuf *mb) 189{ 190 const struct ether_vlan_header *eh; 191 const struct tcphdr *th; 192 const struct ip *ip; 193 int ip_hlen, tcp_hlen; 194 const struct ip6_hdr *ip6; 195 uint16_t eth_type; 196 int eth_hdr_len; 197 198 eh = mtod(mb, const struct ether_vlan_header *); 199 if (unlikely(mb->m_len < ETHER_HDR_LEN)) 200 return (0); 201 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 202 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))) 203 return (0); 204 eth_type = ntohs(eh->evl_proto); 205 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 206 } else { 207 eth_type = ntohs(eh->evl_encap_proto); 208 eth_hdr_len = ETHER_HDR_LEN; 209 } 210 switch (eth_type) { 211 case ETHERTYPE_IP: 212 ip = (const struct ip *)(mb->m_data + eth_hdr_len); 213 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip))) 214 return (0); 215 if (ip->ip_p != IPPROTO_TCP) 216 return (0); 217 ip_hlen = ip->ip_hl << 2; 218 eth_hdr_len += ip_hlen; 219 break; 220 case ETHERTYPE_IPV6: 221 ip6 = (const struct ip6_hdr *)(mb->m_data + eth_hdr_len); 222 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip6))) 223 return (0); 224 if (ip6->ip6_nxt != IPPROTO_TCP) 225 return (0); 226 eth_hdr_len += sizeof(*ip6); 227 break; 228 default: 229 return (0); 230 } 231 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*th))) { 232 const struct mbuf *m_th = mb->m_next; 233 if (unlikely(mb->m_len != eth_hdr_len || 234 m_th == NULL || m_th->m_len < sizeof(*th))) 235 return (0); 236 th = (const struct tcphdr *)(m_th->m_data); 237 } else { 238 th = (const struct tcphdr *)(mb->m_data + eth_hdr_len); 239 } 240 tcp_hlen = th->th_off << 2; 241 eth_hdr_len += tcp_hlen; 242 /* 243 * m_copydata() will be used on the remaining header which 244 * does not need to reside within the first m_len bytes of 245 * data: 246 */ 247 if (unlikely(mb->m_pkthdr.len < eth_hdr_len)) 248 return (0); 249 return (eth_hdr_len); 250} 251 252/* 253 * The return value is not going back to the stack because of 254 * the drbr 255 */ 256static int 257mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp) 258{ 259 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS]; 260 struct mlx5_wqe_data_seg *dseg; 261 struct mlx5e_tx_wqe *wqe; 262 struct ifnet *ifp; 263 int nsegs; 264 int err; 265 int x; 266 struct mbuf *mb = *mbp; 267 u16 ds_cnt; 268 u16 ihs; 269 u16 pi; 270 u8 opcode; 271 272 /* 273 * Return ENOBUFS if the queue is full, this may trigger reinsertion 274 * of the mbuf into the drbr (see mlx5e_xmit_locked) 275 */ 276 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) { 277 sq->stats.enobuf++; 278 return (ENOBUFS); 279 } 280 281 /* Align SQ edge with NOPs to avoid WQE wrap around */ 282 pi = ((~sq->pc) & sq->wq.sz_m1); 283 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 284 /* Send one multi NOP message instead of many */ 285 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS); 286 pi = ((~sq->pc) & sq->wq.sz_m1); 287 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 288 sq->stats.enobuf++; 289 return (ENOMEM); 290 } 291 } 292 293 /* Setup local variables */ 294 pi = sq->pc & sq->wq.sz_m1; 295 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 296 ifp = sq->ifp; 297 298 memset(wqe, 0, sizeof(*wqe)); 299 300 /* Send a copy of the frame to the BPF listener, if any */ 301 if (ifp != NULL && ifp->if_bpf != NULL) 302 ETHER_BPF_MTAP(ifp, mb); 303 304 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) { 305 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM; 306 } 307 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) { 308 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM; 309 } 310 if (wqe->eth.cs_flags == 0) { 311 sq->stats.csum_offload_none++; 312 } 313 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 314 u32 payload_len; 315 u32 mss = mb->m_pkthdr.tso_segsz; 316 u32 num_pkts; 317 318 wqe->eth.mss = cpu_to_be16(mss); 319 opcode = MLX5_OPCODE_LSO; 320 ihs = mlx5e_get_header_size(mb); 321 payload_len = mb->m_pkthdr.len - ihs; 322 if (payload_len == 0) 323 num_pkts = 1; 324 else 325 num_pkts = DIV_ROUND_UP(payload_len, mss); 326 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs); 327 328 sq->stats.tso_packets++; 329 sq->stats.tso_bytes += payload_len; 330 } else { 331 opcode = MLX5_OPCODE_SEND; 332 ihs = mlx5e_get_inline_hdr_size(sq, mb); 333 sq->mbuf[pi].num_bytes = max_t (unsigned int, 334 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 335 } 336 if (ihs == 0) { 337 if ((mb->m_flags & M_VLANTAG) != 0) { 338 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */ 339 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag); 340 } else { 341 wqe->eth.inline_hdr_sz = 0; 342 } 343 } else { 344 if ((mb->m_flags & M_VLANTAG) != 0) { 345 struct ether_vlan_header *eh = (struct ether_vlan_header 346 *)wqe->eth.inline_hdr_start; 347 348 /* Range checks */ 349 if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN)) 350 ihs = (MLX5E_MAX_TX_INLINE - 351 ETHER_VLAN_ENCAP_LEN); 352 else if (ihs < ETHER_HDR_LEN) { 353 err = EINVAL; 354 goto tx_drop; 355 } 356 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh); 357 m_adj(mb, ETHER_HDR_LEN); 358 /* Insert 4 bytes VLAN tag into data stream */ 359 eh->evl_proto = eh->evl_encap_proto; 360 eh->evl_encap_proto = htons(ETHERTYPE_VLAN); 361 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag); 362 /* Copy rest of header data, if any */ 363 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 364 1)); 365 m_adj(mb, ihs - ETHER_HDR_LEN); 366 /* Extend header by 4 bytes */ 367 ihs += ETHER_VLAN_ENCAP_LEN; 368 } else { 369 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start); 370 m_adj(mb, ihs); 371 } 372 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 373 } 374 375 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 376 if (ihs > sizeof(wqe->eth.inline_hdr_start)) { 377 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start), 378 MLX5_SEND_WQE_DS); 379 } 380 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt; 381 382 /* Trim off empty mbufs */ 383 while (mb->m_len == 0) { 384 mb = m_free(mb); 385 /* Check if all data has been inlined */ 386 if (mb == NULL) 387 goto skip_dma; 388 } 389 390 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 391 mb, segs, &nsegs, BUS_DMA_NOWAIT); 392 if (err == EFBIG) { 393 /* 394 * Update *mbp before defrag in case it was trimmed in the 395 * loop above 396 */ 397 *mbp = mb; 398 /* Update statistics */ 399 sq->stats.defragged++; 400 /* Too many mbuf fragments */ 401 mb = m_defrag(*mbp, M_NOWAIT); 402 if (mb == NULL) { 403 mb = *mbp; 404 goto tx_drop; 405 } 406 /* Try again */ 407 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 408 mb, segs, &nsegs, BUS_DMA_NOWAIT); 409 } 410 /* Catch errors */ 411 if (err != 0) 412 goto tx_drop; 413 414 for (x = 0; x != nsegs; x++) { 415 if (segs[x].ds_len == 0) 416 continue; 417 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr); 418 dseg->lkey = sq->mkey_be; 419 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len); 420 dseg++; 421 } 422skip_dma: 423 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl)); 424 425 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 426 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 427 if (mlx5e_do_send_cqe(sq)) 428 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 429 else 430 wqe->ctrl.fm_ce_se = 0; 431 432 /* Copy data for doorbell */ 433 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 434 435 /* Store pointer to mbuf */ 436 sq->mbuf[pi].mbuf = mb; 437 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 438 sq->pc += sq->mbuf[pi].num_wqebbs; 439 440 /* Make sure all mbuf data is written to RAM */ 441 if (mb != NULL) 442 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE); 443 444 sq->stats.packets++; 445 *mbp = NULL; /* safety clear */ 446 return (0); 447 448tx_drop: 449 sq->stats.dropped++; 450 *mbp = NULL; 451 m_freem(mb); 452 return err; 453} 454 455static void 456mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget) 457{ 458 u16 sqcc; 459 460 /* 461 * sq->cc must be updated only after mlx5_cqwq_update_db_record(), 462 * otherwise a cq overrun may occur 463 */ 464 sqcc = sq->cc; 465 466 while (budget > 0) { 467 struct mlx5_cqe64 *cqe; 468 struct mbuf *mb; 469 u16 x; 470 u16 ci; 471 472 cqe = mlx5e_get_cqe(&sq->cq); 473 if (!cqe) 474 break; 475 476 mlx5_cqwq_pop(&sq->cq.wq); 477 478 /* update budget according to the event factor */ 479 budget -= sq->cev_factor; 480 481 for (x = 0; x != sq->cev_factor; x++) { 482 ci = sqcc & sq->wq.sz_m1; 483 mb = sq->mbuf[ci].mbuf; 484 sq->mbuf[ci].mbuf = NULL; /* Safety clear */ 485 486 if (mb == NULL) { 487 if (sq->mbuf[ci].num_bytes == 0) { 488 /* NOP */ 489 sq->stats.nop++; 490 } 491 } else { 492 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map, 493 BUS_DMASYNC_POSTWRITE); 494 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map); 495 496 /* Free transmitted mbuf */ 497 m_freem(mb); 498 } 499 sqcc += sq->mbuf[ci].num_wqebbs; 500 } 501 } 502 503 mlx5_cqwq_update_db_record(&sq->cq.wq); 504 505 /* Ensure cq space is freed before enabling more cqes */ 506 wmb(); 507 508 sq->cc = sqcc; 509 510 if (sq->sq_tq != NULL && 511 atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY)) 512 taskqueue_enqueue(sq->sq_tq, &sq->sq_task); 513} 514 515static int 516mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 517{ 518 struct mbuf *next; 519 int err = 0; 520 521 if (likely(mb != NULL)) { 522 /* 523 * If we can't insert mbuf into drbr, try to xmit anyway. 524 * We keep the error we got so we could return that after xmit. 525 */ 526 err = drbr_enqueue(ifp, sq->br, mb); 527 } 528 529 /* 530 * Check if the network interface is closed or if the SQ is 531 * being stopped: 532 */ 533 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 534 sq->stopped != 0)) 535 return (err); 536 537 /* Process the queue */ 538 while ((next = drbr_peek(ifp, sq->br)) != NULL) { 539 if (mlx5e_sq_xmit(sq, &next) != 0) { 540 if (next != NULL) { 541 drbr_putback(ifp, sq->br, next); 542 atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL); 543 break; 544 } 545 } 546 drbr_advance(ifp, sq->br); 547 } 548 /* Check if we need to write the doorbell */ 549 if (likely(sq->doorbell.d64 != 0)) { 550 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 551 sq->doorbell.d64 = 0; 552 } 553 /* 554 * Check if we need to start the event timer which flushes the 555 * transmit ring on timeout: 556 */ 557 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 558 sq->cev_factor != 1)) { 559 /* start the timer */ 560 mlx5e_sq_cev_timeout(sq); 561 } else { 562 /* don't send NOPs yet */ 563 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 564 } 565 return (err); 566} 567 568static int 569mlx5e_xmit_locked_no_br(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 570{ 571 int err = 0; 572 573 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 574 sq->stopped != 0)) { 575 m_freem(mb); 576 return (ENETDOWN); 577 } 578 579 /* Do transmit */ 580 if (mlx5e_sq_xmit(sq, &mb) != 0) { 581 /* NOTE: m_freem() is NULL safe */ 582 m_freem(mb); 583 err = ENOBUFS; 584 } 585 586 /* Check if we need to write the doorbell */ 587 if (likely(sq->doorbell.d64 != 0)) { 588 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 589 sq->doorbell.d64 = 0; 590 } 591 592 /* 593 * Check if we need to start the event timer which flushes the 594 * transmit ring on timeout: 595 */ 596 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 597 sq->cev_factor != 1)) { 598 /* start the timer */ 599 mlx5e_sq_cev_timeout(sq); 600 } else { 601 /* don't send NOPs yet */ 602 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 603 } 604 return (err); 605} 606 607int 608mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb) 609{ 610 struct mlx5e_sq *sq; 611 int ret; 612 613 sq = mlx5e_select_queue(ifp, mb); 614 if (unlikely(sq == NULL)) { 615 /* Invalid send queue */ 616 m_freem(mb); 617 return (ENXIO); 618 } 619 620 if (unlikely(sq->br == NULL)) { 621 /* rate limited traffic */ 622 mtx_lock(&sq->lock); 623 ret = mlx5e_xmit_locked_no_br(ifp, sq, mb); 624 mtx_unlock(&sq->lock); 625 } else if (mtx_trylock(&sq->lock)) { 626 ret = mlx5e_xmit_locked(ifp, sq, mb); 627 mtx_unlock(&sq->lock); 628 } else { 629 ret = drbr_enqueue(ifp, sq->br, mb); 630 taskqueue_enqueue(sq->sq_tq, &sq->sq_task); 631 } 632 633 return (ret); 634} 635 636void 637mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) 638{ 639 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); 640 641 mtx_lock(&sq->comp_lock); 642 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX); 643 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock)); 644 mtx_unlock(&sq->comp_lock); 645} 646 647void 648mlx5e_tx_que(void *context, int pending) 649{ 650 struct mlx5e_sq *sq = context; 651 struct ifnet *ifp = sq->ifp; 652 653 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 654 mtx_lock(&sq->lock); 655 if (!drbr_empty(ifp, sq->br)) 656 mlx5e_xmit_locked(ifp, sq, NULL); 657 mtx_unlock(&sq->lock); 658 } 659} 660