sfxge_tx.c revision 279184
1/*- 2 * Copyright (c) 2010-2011 Solarflare Communications, Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* Theory of operation: 31 * 32 * Tx queues allocation and mapping 33 * 34 * One Tx queue with enabled checksum offload is allocated per Rx channel 35 * (event queue). Also 2 Tx queues (one without checksum offload and one 36 * with IP checksum offload only) are allocated and bound to event queue 0. 37 * sfxge_txq_type is used as Tx queue label. 38 * 39 * So, event queue plus label mapping to Tx queue index is: 40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) 41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 42 * See sfxge_get_txq_by_label() sfxge_ev.c 43 */ 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD: head/sys/dev/sfxge/sfxge_tx.c 279184 2015-02-22 19:25:57Z arybchik $"); 47 48#include <sys/types.h> 49#include <sys/mbuf.h> 50#include <sys/smp.h> 51#include <sys/socket.h> 52#include <sys/sysctl.h> 53#include <sys/syslog.h> 54 55#include <net/bpf.h> 56#include <net/ethernet.h> 57#include <net/if.h> 58#include <net/if_vlan_var.h> 59 60#include <netinet/in.h> 61#include <netinet/ip.h> 62#include <netinet/ip6.h> 63#include <netinet/tcp.h> 64 65#include "common/efx.h" 66 67#include "sfxge.h" 68#include "sfxge_tx.h" 69 70/* 71 * Estimate maximum number of Tx descriptors required for TSO packet. 72 * With minimum MSS and maximum mbuf length we might need more (even 73 * than a ring-ful of descriptors), but this should not happen in 74 * practice except due to deliberate attack. In that case we will 75 * truncate the output at a packet boundary. 76 */ 77#define SFXGE_TSO_MAX_DESC \ 78 (SFXGE_TSO_MAX_SEGS * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1) 79 80/* 81 * Set the block level to ensure there is space to generate a 82 * large number of descriptors for TSO. 83 */ 84#define SFXGE_TXQ_BLOCK_LEVEL(_entries) \ 85 (EFX_TXQ_LIMIT(_entries) - SFXGE_TSO_MAX_DESC) 86 87#ifdef SFXGE_HAVE_MQ 88 89#define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) 90static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; 91TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); 92SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, 93 &sfxge_tx_dpl_get_max, 0, 94 "Maximum number of any packets in deferred packet get-list"); 95 96#define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ 97 SFXGE_PARAM(tx_dpl_get_non_tcp_max) 98static int sfxge_tx_dpl_get_non_tcp_max = 99 SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; 100TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); 101SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, 102 &sfxge_tx_dpl_get_non_tcp_max, 0, 103 "Maximum number of non-TCP packets in deferred packet get-list"); 104 105#define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) 106static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; 107TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); 108SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, 109 &sfxge_tx_dpl_put_max, 0, 110 "Maximum number of any packets in deferred packet put-list"); 111 112#endif 113 114 115/* Forward declarations. */ 116static void sfxge_tx_qdpl_service(struct sfxge_txq *txq); 117static void sfxge_tx_qlist_post(struct sfxge_txq *txq); 118static void sfxge_tx_qunblock(struct sfxge_txq *txq); 119static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 120 const bus_dma_segment_t *dma_seg, int n_dma_seg); 121 122void 123sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) 124{ 125 unsigned int completed; 126 127 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 128 129 completed = txq->completed; 130 while (completed != txq->pending) { 131 struct sfxge_tx_mapping *stmp; 132 unsigned int id; 133 134 id = completed++ & txq->ptr_mask; 135 136 stmp = &txq->stmp[id]; 137 if (stmp->flags & TX_BUF_UNMAP) { 138 bus_dmamap_unload(txq->packet_dma_tag, stmp->map); 139 if (stmp->flags & TX_BUF_MBUF) { 140 struct mbuf *m = stmp->u.mbuf; 141 do 142 m = m_free(m); 143 while (m != NULL); 144 } else { 145 free(stmp->u.heap_buf, M_SFXGE); 146 } 147 stmp->flags = 0; 148 } 149 } 150 txq->completed = completed; 151 152 /* Check whether we need to unblock the queue. */ 153 mb(); 154 if (txq->blocked) { 155 unsigned int level; 156 157 level = txq->added - txq->completed; 158 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 159 sfxge_tx_qunblock(txq); 160 } 161} 162 163#ifdef SFXGE_HAVE_MQ 164 165static unsigned int 166sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) 167{ 168 /* Absense of TCP checksum flags does not mean that it is non-TCP 169 * but it should be true if user wants to achieve high throughput. 170 */ 171 return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); 172} 173 174/* 175 * Reorder the put list and append it to the get list. 176 */ 177static void 178sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) 179{ 180 struct sfxge_tx_dpl *stdp; 181 struct mbuf *mbuf, *get_next, **get_tailp; 182 volatile uintptr_t *putp; 183 uintptr_t put; 184 unsigned int count; 185 unsigned int non_tcp_count; 186 187 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 188 189 stdp = &txq->dpl; 190 191 /* Acquire the put list. */ 192 putp = &stdp->std_put; 193 put = atomic_readandclear_ptr(putp); 194 mbuf = (void *)put; 195 196 if (mbuf == NULL) 197 return; 198 199 /* Reverse the put list. */ 200 get_tailp = &mbuf->m_nextpkt; 201 get_next = NULL; 202 203 count = 0; 204 non_tcp_count = 0; 205 do { 206 struct mbuf *put_next; 207 208 non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); 209 put_next = mbuf->m_nextpkt; 210 mbuf->m_nextpkt = get_next; 211 get_next = mbuf; 212 mbuf = put_next; 213 214 count++; 215 } while (mbuf != NULL); 216 217 /* Append the reversed put list to the get list. */ 218 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); 219 *stdp->std_getp = get_next; 220 stdp->std_getp = get_tailp; 221 stdp->std_get_count += count; 222 stdp->std_get_non_tcp_count += non_tcp_count; 223} 224 225#endif /* SFXGE_HAVE_MQ */ 226 227static void 228sfxge_tx_qreap(struct sfxge_txq *txq) 229{ 230 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 231 232 txq->reaped = txq->completed; 233} 234 235static void 236sfxge_tx_qlist_post(struct sfxge_txq *txq) 237{ 238 unsigned int old_added; 239 unsigned int level; 240 int rc; 241 242 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 243 244 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); 245 KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC, 246 ("txq->n_pend_desc too large")); 247 KASSERT(!txq->blocked, ("txq->blocked")); 248 249 old_added = txq->added; 250 251 /* Post the fragment list. */ 252 rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc, 253 txq->reaped, &txq->added); 254 KASSERT(rc == 0, ("efx_tx_qpost() failed")); 255 256 /* If efx_tx_qpost() had to refragment, our information about 257 * buffers to free may be associated with the wrong 258 * descriptors. 259 */ 260 KASSERT(txq->added - old_added == txq->n_pend_desc, 261 ("efx_tx_qpost() refragmented descriptors")); 262 263 level = txq->added - txq->reaped; 264 KASSERT(level <= txq->entries, ("overfilled TX queue")); 265 266 /* Clear the fragment list. */ 267 txq->n_pend_desc = 0; 268 269 /* Have we reached the block level? */ 270 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) 271 return; 272 273 /* Reap, and check again */ 274 sfxge_tx_qreap(txq); 275 level = txq->added - txq->reaped; 276 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) 277 return; 278 279 txq->blocked = 1; 280 281 /* 282 * Avoid a race with completion interrupt handling that could leave 283 * the queue blocked. 284 */ 285 mb(); 286 sfxge_tx_qreap(txq); 287 level = txq->added - txq->reaped; 288 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) { 289 mb(); 290 txq->blocked = 0; 291 } 292} 293 294static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) 295{ 296 bus_dmamap_t *used_map; 297 bus_dmamap_t map; 298 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; 299 unsigned int id; 300 struct sfxge_tx_mapping *stmp; 301 efx_buffer_t *desc; 302 int n_dma_seg; 303 int rc; 304 int i; 305 306 KASSERT(!txq->blocked, ("txq->blocked")); 307 308 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) 309 prefetch_read_many(mbuf->m_data); 310 311 if (txq->init_state != SFXGE_TXQ_STARTED) { 312 rc = EINTR; 313 goto reject; 314 } 315 316 /* Load the packet for DMA. */ 317 id = txq->added & txq->ptr_mask; 318 stmp = &txq->stmp[id]; 319 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, 320 mbuf, dma_seg, &n_dma_seg, 0); 321 if (rc == EFBIG) { 322 /* Try again. */ 323 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, 324 SFXGE_TX_MAPPING_MAX_SEG); 325 if (new_mbuf == NULL) 326 goto reject; 327 ++txq->collapses; 328 mbuf = new_mbuf; 329 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, 330 stmp->map, mbuf, 331 dma_seg, &n_dma_seg, 0); 332 } 333 if (rc != 0) 334 goto reject; 335 336 /* Make the packet visible to the hardware. */ 337 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); 338 339 used_map = &stmp->map; 340 341 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 342 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg); 343 if (rc < 0) 344 goto reject_mapped; 345 stmp = &txq->stmp[rc]; 346 } else { 347 /* Add the mapping to the fragment list, and set flags 348 * for the buffer. 349 */ 350 i = 0; 351 for (;;) { 352 desc = &txq->pend_desc[i]; 353 desc->eb_addr = dma_seg[i].ds_addr; 354 desc->eb_size = dma_seg[i].ds_len; 355 if (i == n_dma_seg - 1) { 356 desc->eb_eop = 1; 357 break; 358 } 359 desc->eb_eop = 0; 360 i++; 361 362 stmp->flags = 0; 363 if (__predict_false(stmp == 364 &txq->stmp[txq->ptr_mask])) 365 stmp = &txq->stmp[0]; 366 else 367 stmp++; 368 } 369 txq->n_pend_desc = n_dma_seg; 370 } 371 372 /* 373 * If the mapping required more than one descriptor 374 * then we need to associate the DMA map with the last 375 * descriptor, not the first. 376 */ 377 if (used_map != &stmp->map) { 378 map = stmp->map; 379 stmp->map = *used_map; 380 *used_map = map; 381 } 382 383 stmp->u.mbuf = mbuf; 384 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; 385 386 /* Post the fragment list. */ 387 sfxge_tx_qlist_post(txq); 388 389 return (0); 390 391reject_mapped: 392 bus_dmamap_unload(txq->packet_dma_tag, *used_map); 393reject: 394 /* Drop the packet on the floor. */ 395 m_freem(mbuf); 396 ++txq->drops; 397 398 return (rc); 399} 400 401#ifdef SFXGE_HAVE_MQ 402 403/* 404 * Drain the deferred packet list into the transmit queue. 405 */ 406static void 407sfxge_tx_qdpl_drain(struct sfxge_txq *txq) 408{ 409 struct sfxge_softc *sc; 410 struct sfxge_tx_dpl *stdp; 411 struct mbuf *mbuf, *next; 412 unsigned int count; 413 unsigned int non_tcp_count; 414 unsigned int pushed; 415 int rc; 416 417 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 418 419 sc = txq->sc; 420 stdp = &txq->dpl; 421 pushed = txq->added; 422 423 prefetch_read_many(sc->enp); 424 prefetch_read_many(txq->common); 425 426 mbuf = stdp->std_get; 427 count = stdp->std_get_count; 428 non_tcp_count = stdp->std_get_non_tcp_count; 429 430 if (count > stdp->std_get_hiwat) 431 stdp->std_get_hiwat = count; 432 433 while (count != 0) { 434 KASSERT(mbuf != NULL, ("mbuf == NULL")); 435 436 next = mbuf->m_nextpkt; 437 mbuf->m_nextpkt = NULL; 438 439 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ 440 441 if (next != NULL) 442 prefetch_read_many(next); 443 444 rc = sfxge_tx_queue_mbuf(txq, mbuf); 445 --count; 446 non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); 447 mbuf = next; 448 if (rc != 0) 449 continue; 450 451 if (txq->blocked) 452 break; 453 454 /* Push the fragments to the hardware in batches. */ 455 if (txq->added - pushed >= SFXGE_TX_BATCH) { 456 efx_tx_qpush(txq->common, txq->added); 457 pushed = txq->added; 458 } 459 } 460 461 if (count == 0) { 462 KASSERT(mbuf == NULL, ("mbuf != NULL")); 463 KASSERT(non_tcp_count == 0, 464 ("inconsistent TCP/non-TCP detection")); 465 stdp->std_get = NULL; 466 stdp->std_get_count = 0; 467 stdp->std_get_non_tcp_count = 0; 468 stdp->std_getp = &stdp->std_get; 469 } else { 470 stdp->std_get = mbuf; 471 stdp->std_get_count = count; 472 stdp->std_get_non_tcp_count = non_tcp_count; 473 } 474 475 if (txq->added != pushed) 476 efx_tx_qpush(txq->common, txq->added); 477 478 KASSERT(txq->blocked || stdp->std_get_count == 0, 479 ("queue unblocked but count is non-zero")); 480} 481 482#define SFXGE_TX_QDPL_PENDING(_txq) \ 483 ((_txq)->dpl.std_put != 0) 484 485/* 486 * Service the deferred packet list. 487 * 488 * NOTE: drops the txq mutex! 489 */ 490static void 491sfxge_tx_qdpl_service(struct sfxge_txq *txq) 492{ 493 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 494 495 do { 496 if (SFXGE_TX_QDPL_PENDING(txq)) 497 sfxge_tx_qdpl_swizzle(txq); 498 499 if (!txq->blocked) 500 sfxge_tx_qdpl_drain(txq); 501 502 SFXGE_TXQ_UNLOCK(txq); 503 } while (SFXGE_TX_QDPL_PENDING(txq) && 504 SFXGE_TXQ_TRYLOCK(txq)); 505} 506 507/* 508 * Put a packet on the deferred packet list. 509 * 510 * If we are called with the txq lock held, we put the packet on the "get 511 * list", otherwise we atomically push it on the "put list". The swizzle 512 * function takes care of ordering. 513 * 514 * The length of the put list is bounded by SFXGE_TX_MAX_DEFERRED. We 515 * overload the csum_data field in the mbuf to keep track of this length 516 * because there is no cheap alternative to avoid races. 517 */ 518static int 519sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked) 520{ 521 struct sfxge_tx_dpl *stdp; 522 523 stdp = &txq->dpl; 524 525 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 526 527 if (locked) { 528 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 529 530 sfxge_tx_qdpl_swizzle(txq); 531 532 if (stdp->std_get_count >= stdp->std_get_max) { 533 txq->get_overflow++; 534 return (ENOBUFS); 535 } 536 if (sfxge_is_mbuf_non_tcp(mbuf)) { 537 if (stdp->std_get_non_tcp_count >= 538 stdp->std_get_non_tcp_max) { 539 txq->get_non_tcp_overflow++; 540 return (ENOBUFS); 541 } 542 stdp->std_get_non_tcp_count++; 543 } 544 545 *(stdp->std_getp) = mbuf; 546 stdp->std_getp = &mbuf->m_nextpkt; 547 stdp->std_get_count++; 548 } else { 549 volatile uintptr_t *putp; 550 uintptr_t old; 551 uintptr_t new; 552 unsigned old_len; 553 554 putp = &stdp->std_put; 555 new = (uintptr_t)mbuf; 556 557 do { 558 old = *putp; 559 if (old != 0) { 560 struct mbuf *mp = (struct mbuf *)old; 561 old_len = mp->m_pkthdr.csum_data; 562 } else 563 old_len = 0; 564 if (old_len >= stdp->std_put_max) { 565 atomic_add_long(&txq->put_overflow, 1); 566 return (ENOBUFS); 567 } 568 mbuf->m_pkthdr.csum_data = old_len + 1; 569 mbuf->m_nextpkt = (void *)old; 570 } while (atomic_cmpset_ptr(putp, old, new) == 0); 571 } 572 573 return (0); 574} 575 576/* 577 * Called from if_transmit - will try to grab the txq lock and enqueue to the 578 * put list if it succeeds, otherwise try to push onto the defer list if space. 579 */ 580int 581sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) 582{ 583 int locked; 584 int rc; 585 586 if (!SFXGE_LINK_UP(txq->sc)) { 587 rc = ENETDOWN; 588 atomic_add_long(&txq->netdown_drops, 1); 589 goto fail; 590 } 591 592 /* 593 * Try to grab the txq lock. If we are able to get the lock, 594 * the packet will be appended to the "get list" of the deferred 595 * packet list. Otherwise, it will be pushed on the "put list". 596 */ 597 locked = SFXGE_TXQ_TRYLOCK(txq); 598 599 if (sfxge_tx_qdpl_put(txq, m, locked) != 0) { 600 if (locked) 601 SFXGE_TXQ_UNLOCK(txq); 602 rc = ENOBUFS; 603 goto fail; 604 } 605 606 /* 607 * Try to grab the lock again. 608 * 609 * If we are able to get the lock, we need to process the deferred 610 * packet list. If we are not able to get the lock, another thread 611 * is processing the list. 612 */ 613 if (!locked) 614 locked = SFXGE_TXQ_TRYLOCK(txq); 615 616 if (locked) { 617 /* Try to service the list. */ 618 sfxge_tx_qdpl_service(txq); 619 /* Lock has been dropped. */ 620 } 621 622 return (0); 623 624fail: 625 m_freem(m); 626 return (rc); 627} 628 629static void 630sfxge_tx_qdpl_flush(struct sfxge_txq *txq) 631{ 632 struct sfxge_tx_dpl *stdp = &txq->dpl; 633 struct mbuf *mbuf, *next; 634 635 SFXGE_TXQ_LOCK(txq); 636 637 sfxge_tx_qdpl_swizzle(txq); 638 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { 639 next = mbuf->m_nextpkt; 640 m_freem(mbuf); 641 } 642 stdp->std_get = NULL; 643 stdp->std_get_count = 0; 644 stdp->std_get_non_tcp_count = 0; 645 stdp->std_getp = &stdp->std_get; 646 647 SFXGE_TXQ_UNLOCK(txq); 648} 649 650void 651sfxge_if_qflush(struct ifnet *ifp) 652{ 653 struct sfxge_softc *sc; 654 int i; 655 656 sc = ifp->if_softc; 657 658 for (i = 0; i < sc->txq_count; i++) 659 sfxge_tx_qdpl_flush(sc->txq[i]); 660} 661 662/* 663 * TX start -- called by the stack. 664 */ 665int 666sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) 667{ 668 struct sfxge_softc *sc; 669 struct sfxge_txq *txq; 670 int rc; 671 672 sc = (struct sfxge_softc *)ifp->if_softc; 673 674 KASSERT(ifp->if_flags & IFF_UP, ("interface not up")); 675 676 /* Pick the desired transmit queue. */ 677 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) { 678 int index = 0; 679 680 /* check if flowid is set */ 681 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 682 uint32_t hash = m->m_pkthdr.flowid; 683 684 index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX]; 685 } 686 txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index]; 687 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 688 txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; 689 } else { 690 txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; 691 } 692 693 rc = sfxge_tx_packet_add(txq, m); 694 695 return (rc); 696} 697 698#else /* !SFXGE_HAVE_MQ */ 699 700static void sfxge_if_start_locked(struct ifnet *ifp) 701{ 702 struct sfxge_softc *sc = ifp->if_softc; 703 struct sfxge_txq *txq; 704 struct mbuf *mbuf; 705 unsigned int pushed[SFXGE_TXQ_NTYPES]; 706 unsigned int q_index; 707 708 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 709 IFF_DRV_RUNNING) 710 return; 711 712 if (!sc->port.link_up) 713 return; 714 715 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { 716 txq = sc->txq[q_index]; 717 pushed[q_index] = txq->added; 718 } 719 720 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 721 IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf); 722 if (mbuf == NULL) 723 break; 724 725 ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */ 726 727 /* Pick the desired transmit queue. */ 728 if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) 729 q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM; 730 else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) 731 q_index = SFXGE_TXQ_IP_CKSUM; 732 else 733 q_index = SFXGE_TXQ_NON_CKSUM; 734 txq = sc->txq[q_index]; 735 736 if (sfxge_tx_queue_mbuf(txq, mbuf) != 0) 737 continue; 738 739 if (txq->blocked) { 740 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 741 break; 742 } 743 744 /* Push the fragments to the hardware in batches. */ 745 if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) { 746 efx_tx_qpush(txq->common, txq->added); 747 pushed[q_index] = txq->added; 748 } 749 } 750 751 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { 752 txq = sc->txq[q_index]; 753 if (txq->added != pushed[q_index]) 754 efx_tx_qpush(txq->common, txq->added); 755 } 756} 757 758void sfxge_if_start(struct ifnet *ifp) 759{ 760 struct sfxge_softc *sc = ifp->if_softc; 761 762 SFXGE_TXQ_LOCK(sc->txq[0]); 763 sfxge_if_start_locked(ifp); 764 SFXGE_TXQ_UNLOCK(sc->txq[0]); 765} 766 767static void 768sfxge_tx_qdpl_service(struct sfxge_txq *txq) 769{ 770 struct ifnet *ifp = txq->sc->ifnet; 771 772 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 773 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 774 sfxge_if_start_locked(ifp); 775 SFXGE_TXQ_UNLOCK(txq); 776} 777 778#endif /* SFXGE_HAVE_MQ */ 779 780/* 781 * Software "TSO". Not quite as good as doing it in hardware, but 782 * still faster than segmenting in the stack. 783 */ 784 785struct sfxge_tso_state { 786 /* Output position */ 787 unsigned out_len; /* Remaining length in current segment */ 788 unsigned seqnum; /* Current sequence number */ 789 unsigned packet_space; /* Remaining space in current packet */ 790 791 /* Input position */ 792 uint64_t dma_addr; /* DMA address of current position */ 793 unsigned in_len; /* Remaining length in current mbuf */ 794 795 const struct mbuf *mbuf; /* Input mbuf (head of chain) */ 796 u_short protocol; /* Network protocol (after VLAN decap) */ 797 ssize_t nh_off; /* Offset of network header */ 798 ssize_t tcph_off; /* Offset of TCP header */ 799 unsigned header_len; /* Number of bytes of header */ 800 unsigned seg_size; /* TCP segment size */ 801}; 802 803static const struct ip *tso_iph(const struct sfxge_tso_state *tso) 804{ 805 KASSERT(tso->protocol == htons(ETHERTYPE_IP), 806 ("tso_iph() in non-IPv4 state")); 807 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); 808} 809static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) 810{ 811 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 812 ("tso_ip6h() in non-IPv6 state")); 813 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); 814} 815static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) 816{ 817 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); 818} 819 820/* Size of preallocated TSO header buffers. Larger blocks must be 821 * allocated from the heap. 822 */ 823#define TSOH_STD_SIZE 128 824 825/* At most half the descriptors in the queue at any time will refer to 826 * a TSO header buffer, since they must always be followed by a 827 * payload descriptor referring to an mbuf. 828 */ 829#define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) 830#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 831#define TSOH_PAGE_COUNT(_txq_entries) \ 832 ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE) 833 834static int tso_init(struct sfxge_txq *txq) 835{ 836 struct sfxge_softc *sc = txq->sc; 837 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); 838 int i, rc; 839 840 /* Allocate TSO header buffers */ 841 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), 842 M_SFXGE, M_WAITOK); 843 844 for (i = 0; i < tsoh_page_count; i++) { 845 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); 846 if (rc != 0) 847 goto fail; 848 } 849 850 return (0); 851 852fail: 853 while (i-- > 0) 854 sfxge_dma_free(&txq->tsoh_buffer[i]); 855 free(txq->tsoh_buffer, M_SFXGE); 856 txq->tsoh_buffer = NULL; 857 return (rc); 858} 859 860static void tso_fini(struct sfxge_txq *txq) 861{ 862 int i; 863 864 if (txq->tsoh_buffer != NULL) { 865 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) 866 sfxge_dma_free(&txq->tsoh_buffer[i]); 867 free(txq->tsoh_buffer, M_SFXGE); 868 } 869} 870 871static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf) 872{ 873 struct ether_header *eh = mtod(mbuf, struct ether_header *); 874 const struct tcphdr *th; 875 struct tcphdr th_copy; 876 877 tso->mbuf = mbuf; 878 879 /* Find network protocol and header */ 880 tso->protocol = eh->ether_type; 881 if (tso->protocol == htons(ETHERTYPE_VLAN)) { 882 struct ether_vlan_header *veh = 883 mtod(mbuf, struct ether_vlan_header *); 884 tso->protocol = veh->evl_proto; 885 tso->nh_off = sizeof(*veh); 886 } else { 887 tso->nh_off = sizeof(*eh); 888 } 889 890 /* Find TCP header */ 891 if (tso->protocol == htons(ETHERTYPE_IP)) { 892 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, 893 ("TSO required on non-TCP packet")); 894 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; 895 } else { 896 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 897 ("TSO required on non-IP packet")); 898 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, 899 ("TSO required on non-TCP packet")); 900 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); 901 } 902 903 KASSERT(mbuf->m_len >= tso->tcph_off, 904 ("network header is fragmented in mbuf")); 905 /* We need TCP header including flags (window is the next) */ 906 if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) { 907 m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy), 908 (caddr_t)&th_copy); 909 th = &th_copy; 910 } else { 911 th = tso_tcph(tso); 912 } 913 914 tso->header_len = tso->tcph_off + 4 * th->th_off; 915 tso->seg_size = mbuf->m_pkthdr.tso_segsz; 916 917 tso->seqnum = ntohl(th->th_seq); 918 919 /* These flags must not be duplicated */ 920 KASSERT(!(th->th_flags & (TH_URG | TH_SYN | TH_RST)), 921 ("incompatible TCP flag on TSO packet")); 922 923 tso->out_len = mbuf->m_pkthdr.len - tso->header_len; 924} 925 926/* 927 * tso_fill_packet_with_fragment - form descriptors for the current fragment 928 * 929 * Form descriptors for the current fragment, until we reach the end 930 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 931 * space. 932 */ 933static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, 934 struct sfxge_tso_state *tso) 935{ 936 efx_buffer_t *desc; 937 int n; 938 939 if (tso->in_len == 0 || tso->packet_space == 0) 940 return; 941 942 KASSERT(tso->in_len > 0, ("TSO input length went negative")); 943 KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); 944 945 n = min(tso->in_len, tso->packet_space); 946 947 tso->packet_space -= n; 948 tso->out_len -= n; 949 tso->in_len -= n; 950 951 desc = &txq->pend_desc[txq->n_pend_desc++]; 952 desc->eb_addr = tso->dma_addr; 953 desc->eb_size = n; 954 desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0; 955 956 tso->dma_addr += n; 957} 958 959/* Callback from bus_dmamap_load() for long TSO headers. */ 960static void tso_map_long_header(void *dma_addr_ret, 961 bus_dma_segment_t *segs, int nseg, 962 int error) 963{ 964 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && 965 __predict_true(nseg == 1)) ? 966 segs->ds_addr : 0); 967} 968 969/* 970 * tso_start_new_packet - generate a new header and prepare for the new packet 971 * 972 * Generate a new header and prepare for the new packet. Return 0 on 973 * success, or an error code if failed to alloc header. 974 */ 975static int tso_start_new_packet(struct sfxge_txq *txq, 976 struct sfxge_tso_state *tso, 977 unsigned int id) 978{ 979 struct sfxge_tx_mapping *stmp = &txq->stmp[id]; 980 struct tcphdr *tsoh_th; 981 unsigned ip_length; 982 caddr_t header; 983 uint64_t dma_addr; 984 bus_dmamap_t map; 985 efx_buffer_t *desc; 986 int rc; 987 988 /* Allocate a DMA-mapped header buffer. */ 989 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { 990 unsigned int page_index = (id / 2) / TSOH_PER_PAGE; 991 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; 992 993 header = (txq->tsoh_buffer[page_index].esm_base + 994 buf_index * TSOH_STD_SIZE); 995 dma_addr = (txq->tsoh_buffer[page_index].esm_addr + 996 buf_index * TSOH_STD_SIZE); 997 map = txq->tsoh_buffer[page_index].esm_map; 998 999 stmp->flags = 0; 1000 } else { 1001 /* We cannot use bus_dmamem_alloc() as that may sleep */ 1002 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); 1003 if (__predict_false(!header)) 1004 return (ENOMEM); 1005 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, 1006 header, tso->header_len, 1007 tso_map_long_header, &dma_addr, 1008 BUS_DMA_NOWAIT); 1009 if (__predict_false(dma_addr == 0)) { 1010 if (rc == 0) { 1011 /* Succeeded but got >1 segment */ 1012 bus_dmamap_unload(txq->packet_dma_tag, 1013 stmp->map); 1014 rc = EINVAL; 1015 } 1016 free(header, M_SFXGE); 1017 return (rc); 1018 } 1019 map = stmp->map; 1020 1021 txq->tso_long_headers++; 1022 stmp->u.heap_buf = header; 1023 stmp->flags = TX_BUF_UNMAP; 1024 } 1025 1026 tsoh_th = (struct tcphdr *)(header + tso->tcph_off); 1027 1028 /* Copy and update the headers. */ 1029 m_copydata(tso->mbuf, 0, tso->header_len, header); 1030 1031 tsoh_th->th_seq = htonl(tso->seqnum); 1032 tso->seqnum += tso->seg_size; 1033 if (tso->out_len > tso->seg_size) { 1034 /* This packet will not finish the TSO burst. */ 1035 ip_length = tso->header_len - tso->nh_off + tso->seg_size; 1036 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); 1037 } else { 1038 /* This packet will be the last in the TSO burst. */ 1039 ip_length = tso->header_len - tso->nh_off + tso->out_len; 1040 } 1041 1042 if (tso->protocol == htons(ETHERTYPE_IP)) { 1043 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); 1044 tsoh_iph->ip_len = htons(ip_length); 1045 /* XXX We should increment ip_id, but FreeBSD doesn't 1046 * currently allocate extra IDs for multiple segments. 1047 */ 1048 } else { 1049 struct ip6_hdr *tsoh_iph = 1050 (struct ip6_hdr *)(header + tso->nh_off); 1051 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); 1052 } 1053 1054 /* Make the header visible to the hardware. */ 1055 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); 1056 1057 tso->packet_space = tso->seg_size; 1058 txq->tso_packets++; 1059 1060 /* Form a descriptor for this header. */ 1061 desc = &txq->pend_desc[txq->n_pend_desc++]; 1062 desc->eb_addr = dma_addr; 1063 desc->eb_size = tso->header_len; 1064 desc->eb_eop = 0; 1065 1066 return (0); 1067} 1068 1069static int 1070sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 1071 const bus_dma_segment_t *dma_seg, int n_dma_seg) 1072{ 1073 struct sfxge_tso_state tso; 1074 unsigned int id, next_id; 1075 unsigned skipped = 0; 1076 1077 tso_start(&tso, mbuf); 1078 1079 while (dma_seg->ds_len + skipped <= tso.header_len) { 1080 skipped += dma_seg->ds_len; 1081 --n_dma_seg; 1082 KASSERT(n_dma_seg, ("no payload found in TSO packet")); 1083 ++dma_seg; 1084 } 1085 tso.in_len = dma_seg->ds_len + (tso.header_len - skipped); 1086 tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); 1087 1088 id = txq->added & txq->ptr_mask; 1089 if (__predict_false(tso_start_new_packet(txq, &tso, id))) 1090 return (-1); 1091 1092 while (1) { 1093 id = (id + 1) & txq->ptr_mask; 1094 tso_fill_packet_with_fragment(txq, &tso); 1095 1096 /* Move onto the next fragment? */ 1097 if (tso.in_len == 0) { 1098 --n_dma_seg; 1099 if (n_dma_seg == 0) 1100 break; 1101 ++dma_seg; 1102 tso.in_len = dma_seg->ds_len; 1103 tso.dma_addr = dma_seg->ds_addr; 1104 } 1105 1106 /* End of packet? */ 1107 if (tso.packet_space == 0) { 1108 /* If the queue is now full due to tiny MSS, 1109 * or we can't create another header, discard 1110 * the remainder of the input mbuf but do not 1111 * roll back the work we have done. 1112 */ 1113 if (txq->n_pend_desc + 1 /* header */ + n_dma_seg > 1114 SFXGE_TSO_MAX_DESC) { 1115 txq->tso_pdrop_too_many++; 1116 break; 1117 } 1118 next_id = (id + 1) & txq->ptr_mask; 1119 if (__predict_false(tso_start_new_packet(txq, &tso, 1120 next_id))) { 1121 txq->tso_pdrop_no_rsrc++; 1122 break; 1123 } 1124 id = next_id; 1125 } 1126 } 1127 1128 txq->tso_bursts++; 1129 return (id); 1130} 1131 1132static void 1133sfxge_tx_qunblock(struct sfxge_txq *txq) 1134{ 1135 struct sfxge_softc *sc; 1136 struct sfxge_evq *evq; 1137 1138 sc = txq->sc; 1139 evq = sc->evq[txq->evq_index]; 1140 1141 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 1142 1143 if (txq->init_state != SFXGE_TXQ_STARTED) 1144 return; 1145 1146 SFXGE_TXQ_LOCK(txq); 1147 1148 if (txq->blocked) { 1149 unsigned int level; 1150 1151 level = txq->added - txq->completed; 1152 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) { 1153 /* reaped must be in sync with blocked */ 1154 sfxge_tx_qreap(txq); 1155 txq->blocked = 0; 1156 } 1157 } 1158 1159 sfxge_tx_qdpl_service(txq); 1160 /* note: lock has been dropped */ 1161} 1162 1163void 1164sfxge_tx_qflush_done(struct sfxge_txq *txq) 1165{ 1166 1167 txq->flush_state = SFXGE_FLUSH_DONE; 1168} 1169 1170static void 1171sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) 1172{ 1173 struct sfxge_txq *txq; 1174 struct sfxge_evq *evq; 1175 unsigned int count; 1176 1177 txq = sc->txq[index]; 1178 evq = sc->evq[txq->evq_index]; 1179 1180 SFXGE_TXQ_LOCK(txq); 1181 1182 KASSERT(txq->init_state == SFXGE_TXQ_STARTED, 1183 ("txq->init_state != SFXGE_TXQ_STARTED")); 1184 1185 txq->init_state = SFXGE_TXQ_INITIALIZED; 1186 txq->flush_state = SFXGE_FLUSH_PENDING; 1187 1188 /* Flush the transmit queue. */ 1189 efx_tx_qflush(txq->common); 1190 1191 SFXGE_TXQ_UNLOCK(txq); 1192 1193 count = 0; 1194 do { 1195 /* Spin for 100ms. */ 1196 DELAY(100000); 1197 1198 if (txq->flush_state != SFXGE_FLUSH_PENDING) 1199 break; 1200 } while (++count < 20); 1201 1202 SFXGE_EVQ_LOCK(evq); 1203 SFXGE_TXQ_LOCK(txq); 1204 1205 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, 1206 ("txq->flush_state == SFXGE_FLUSH_FAILED")); 1207 1208 txq->flush_state = SFXGE_FLUSH_DONE; 1209 1210 txq->blocked = 0; 1211 txq->pending = txq->added; 1212 1213 sfxge_tx_qcomplete(txq, evq); 1214 KASSERT(txq->completed == txq->added, 1215 ("txq->completed != txq->added")); 1216 1217 sfxge_tx_qreap(txq); 1218 KASSERT(txq->reaped == txq->completed, 1219 ("txq->reaped != txq->completed")); 1220 1221 txq->added = 0; 1222 txq->pending = 0; 1223 txq->completed = 0; 1224 txq->reaped = 0; 1225 1226 /* Destroy the common code transmit queue. */ 1227 efx_tx_qdestroy(txq->common); 1228 txq->common = NULL; 1229 1230 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1231 EFX_TXQ_NBUFS(sc->txq_entries)); 1232 1233 SFXGE_EVQ_UNLOCK(evq); 1234 SFXGE_TXQ_UNLOCK(txq); 1235} 1236 1237static int 1238sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) 1239{ 1240 struct sfxge_txq *txq; 1241 efsys_mem_t *esmp; 1242 uint16_t flags; 1243 struct sfxge_evq *evq; 1244 int rc; 1245 1246 txq = sc->txq[index]; 1247 esmp = &txq->mem; 1248 evq = sc->evq[txq->evq_index]; 1249 1250 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1251 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1252 KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1253 ("evq->init_state != SFXGE_EVQ_STARTED")); 1254 1255 /* Program the buffer table. */ 1256 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, 1257 EFX_TXQ_NBUFS(sc->txq_entries))) != 0) 1258 return (rc); 1259 1260 /* Determine the kind of queue we are creating. */ 1261 switch (txq->type) { 1262 case SFXGE_TXQ_NON_CKSUM: 1263 flags = 0; 1264 break; 1265 case SFXGE_TXQ_IP_CKSUM: 1266 flags = EFX_CKSUM_IPV4; 1267 break; 1268 case SFXGE_TXQ_IP_TCP_UDP_CKSUM: 1269 flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP; 1270 break; 1271 default: 1272 KASSERT(0, ("Impossible TX queue")); 1273 flags = 0; 1274 break; 1275 } 1276 1277 /* Create the common code transmit queue. */ 1278 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, 1279 sc->txq_entries, txq->buf_base_id, flags, evq->common, 1280 &txq->common)) != 0) 1281 goto fail; 1282 1283 SFXGE_TXQ_LOCK(txq); 1284 1285 /* Enable the transmit queue. */ 1286 efx_tx_qenable(txq->common); 1287 1288 txq->init_state = SFXGE_TXQ_STARTED; 1289 1290 SFXGE_TXQ_UNLOCK(txq); 1291 1292 return (0); 1293 1294fail: 1295 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1296 EFX_TXQ_NBUFS(sc->txq_entries)); 1297 return (rc); 1298} 1299 1300void 1301sfxge_tx_stop(struct sfxge_softc *sc) 1302{ 1303 int index; 1304 1305 index = sc->txq_count; 1306 while (--index >= 0) 1307 sfxge_tx_qstop(sc, index); 1308 1309 /* Tear down the transmit module */ 1310 efx_tx_fini(sc->enp); 1311} 1312 1313int 1314sfxge_tx_start(struct sfxge_softc *sc) 1315{ 1316 int index; 1317 int rc; 1318 1319 /* Initialize the common code transmit module. */ 1320 if ((rc = efx_tx_init(sc->enp)) != 0) 1321 return (rc); 1322 1323 for (index = 0; index < sc->txq_count; index++) { 1324 if ((rc = sfxge_tx_qstart(sc, index)) != 0) 1325 goto fail; 1326 } 1327 1328 return (0); 1329 1330fail: 1331 while (--index >= 0) 1332 sfxge_tx_qstop(sc, index); 1333 1334 efx_tx_fini(sc->enp); 1335 1336 return (rc); 1337} 1338 1339/** 1340 * Destroy a transmit queue. 1341 */ 1342static void 1343sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) 1344{ 1345 struct sfxge_txq *txq; 1346 unsigned int nmaps; 1347 1348 txq = sc->txq[index]; 1349 1350 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1351 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1352 1353 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) 1354 tso_fini(txq); 1355 1356 /* Free the context arrays. */ 1357 free(txq->pend_desc, M_SFXGE); 1358 nmaps = sc->txq_entries; 1359 while (nmaps-- != 0) 1360 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1361 free(txq->stmp, M_SFXGE); 1362 1363 /* Release DMA memory mapping. */ 1364 sfxge_dma_free(&txq->mem); 1365 1366 sc->txq[index] = NULL; 1367 1368#ifdef SFXGE_HAVE_MQ 1369 SFXGE_TXQ_LOCK_DESTROY(txq); 1370#endif 1371 1372 free(txq, M_SFXGE); 1373} 1374 1375static int 1376sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, 1377 enum sfxge_txq_type type, unsigned int evq_index) 1378{ 1379 char name[16]; 1380 struct sysctl_oid *txq_node; 1381 struct sfxge_txq *txq; 1382 struct sfxge_evq *evq; 1383#ifdef SFXGE_HAVE_MQ 1384 struct sfxge_tx_dpl *stdp; 1385#endif 1386 efsys_mem_t *esmp; 1387 unsigned int nmaps; 1388 int rc; 1389 1390 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); 1391 txq->sc = sc; 1392 txq->entries = sc->txq_entries; 1393 txq->ptr_mask = txq->entries - 1; 1394 1395 sc->txq[txq_index] = txq; 1396 esmp = &txq->mem; 1397 1398 evq = sc->evq[evq_index]; 1399 1400 /* Allocate and zero DMA space for the descriptor ring. */ 1401 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) 1402 return (rc); 1403 1404 /* Allocate buffer table entries. */ 1405 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), 1406 &txq->buf_base_id); 1407 1408 /* Create a DMA tag for packet mappings. */ 1409 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000, 1410 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, 1411 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL, 1412 &txq->packet_dma_tag) != 0) { 1413 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); 1414 rc = ENOMEM; 1415 goto fail; 1416 } 1417 1418 /* Allocate pending descriptor array for batching writes. */ 1419 txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries, 1420 M_SFXGE, M_ZERO | M_WAITOK); 1421 1422 /* Allocate and initialise mbuf DMA mapping array. */ 1423 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, 1424 M_SFXGE, M_ZERO | M_WAITOK); 1425 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { 1426 rc = bus_dmamap_create(txq->packet_dma_tag, 0, 1427 &txq->stmp[nmaps].map); 1428 if (rc != 0) 1429 goto fail2; 1430 } 1431 1432 snprintf(name, sizeof(name), "%u", txq_index); 1433 txq_node = SYSCTL_ADD_NODE( 1434 device_get_sysctl_ctx(sc->dev), 1435 SYSCTL_CHILDREN(sc->txqs_node), 1436 OID_AUTO, name, CTLFLAG_RD, NULL, ""); 1437 if (txq_node == NULL) { 1438 rc = ENOMEM; 1439 goto fail_txq_node; 1440 } 1441 1442 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && 1443 (rc = tso_init(txq)) != 0) 1444 goto fail3; 1445 1446#ifdef SFXGE_HAVE_MQ 1447 if (sfxge_tx_dpl_get_max <= 0) { 1448 log(LOG_ERR, "%s=%d must be greater than 0", 1449 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); 1450 rc = EINVAL; 1451 goto fail_tx_dpl_get_max; 1452 } 1453 if (sfxge_tx_dpl_get_non_tcp_max <= 0) { 1454 log(LOG_ERR, "%s=%d must be greater than 0", 1455 SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, 1456 sfxge_tx_dpl_get_non_tcp_max); 1457 rc = EINVAL; 1458 goto fail_tx_dpl_get_max; 1459 } 1460 if (sfxge_tx_dpl_put_max < 0) { 1461 log(LOG_ERR, "%s=%d must be greater or equal to 0", 1462 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); 1463 rc = EINVAL; 1464 goto fail_tx_dpl_put_max; 1465 } 1466 1467 /* Initialize the deferred packet list. */ 1468 stdp = &txq->dpl; 1469 stdp->std_put_max = sfxge_tx_dpl_put_max; 1470 stdp->std_get_max = sfxge_tx_dpl_get_max; 1471 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; 1472 stdp->std_getp = &stdp->std_get; 1473 1474 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); 1475 1476 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), 1477 SYSCTL_CHILDREN(txq_node), OID_AUTO, 1478 "dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS, 1479 &stdp->std_get_count, 0, ""); 1480 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), 1481 SYSCTL_CHILDREN(txq_node), OID_AUTO, 1482 "dpl_get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, 1483 &stdp->std_get_non_tcp_count, 0, ""); 1484 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), 1485 SYSCTL_CHILDREN(txq_node), OID_AUTO, 1486 "dpl_get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 1487 &stdp->std_get_hiwat, 0, ""); 1488#endif 1489 1490 txq->type = type; 1491 txq->evq_index = evq_index; 1492 txq->txq_index = txq_index; 1493 txq->init_state = SFXGE_TXQ_INITIALIZED; 1494 1495 return (0); 1496 1497fail_tx_dpl_put_max: 1498fail_tx_dpl_get_max: 1499fail3: 1500fail_txq_node: 1501 free(txq->pend_desc, M_SFXGE); 1502fail2: 1503 while (nmaps-- != 0) 1504 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1505 free(txq->stmp, M_SFXGE); 1506 bus_dma_tag_destroy(txq->packet_dma_tag); 1507 1508fail: 1509 sfxge_dma_free(esmp); 1510 1511 return (rc); 1512} 1513 1514static const struct { 1515 const char *name; 1516 size_t offset; 1517} sfxge_tx_stats[] = { 1518#define SFXGE_TX_STAT(name, member) \ 1519 { #name, offsetof(struct sfxge_txq, member) } 1520 SFXGE_TX_STAT(tso_bursts, tso_bursts), 1521 SFXGE_TX_STAT(tso_packets, tso_packets), 1522 SFXGE_TX_STAT(tso_long_headers, tso_long_headers), 1523 SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many), 1524 SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc), 1525 SFXGE_TX_STAT(tx_collapses, collapses), 1526 SFXGE_TX_STAT(tx_drops, drops), 1527 SFXGE_TX_STAT(tx_get_overflow, get_overflow), 1528 SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), 1529 SFXGE_TX_STAT(tx_put_overflow, put_overflow), 1530 SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), 1531}; 1532 1533static int 1534sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) 1535{ 1536 struct sfxge_softc *sc = arg1; 1537 unsigned int id = arg2; 1538 unsigned long sum; 1539 unsigned int index; 1540 1541 /* Sum across all TX queues */ 1542 sum = 0; 1543 for (index = 0; index < sc->txq_count; index++) 1544 sum += *(unsigned long *)((caddr_t)sc->txq[index] + 1545 sfxge_tx_stats[id].offset); 1546 1547 return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1548} 1549 1550static void 1551sfxge_tx_stat_init(struct sfxge_softc *sc) 1552{ 1553 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1554 struct sysctl_oid_list *stat_list; 1555 unsigned int id; 1556 1557 stat_list = SYSCTL_CHILDREN(sc->stats_node); 1558 1559 for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1560 SYSCTL_ADD_PROC( 1561 ctx, stat_list, 1562 OID_AUTO, sfxge_tx_stats[id].name, 1563 CTLTYPE_ULONG|CTLFLAG_RD, 1564 sc, id, sfxge_tx_stat_handler, "LU", 1565 ""); 1566 } 1567} 1568 1569uint64_t 1570sfxge_tx_get_drops(struct sfxge_softc *sc) 1571{ 1572 unsigned int index; 1573 uint64_t drops = 0; 1574 struct sfxge_txq *txq; 1575 1576 /* Sum across all TX queues */ 1577 for (index = 0; index < sc->txq_count; index++) { 1578 txq = sc->txq[index]; 1579 /* 1580 * In theory, txq->put_overflow and txq->netdown_drops 1581 * should use atomic operation and other should be 1582 * obtained under txq lock, but it is just statistics. 1583 */ 1584 drops += txq->drops + txq->get_overflow + 1585 txq->get_non_tcp_overflow + 1586 txq->put_overflow + txq->netdown_drops + 1587 txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc; 1588 } 1589 return (drops); 1590} 1591 1592void 1593sfxge_tx_fini(struct sfxge_softc *sc) 1594{ 1595 int index; 1596 1597 index = sc->txq_count; 1598 while (--index >= 0) 1599 sfxge_tx_qfini(sc, index); 1600 1601 sc->txq_count = 0; 1602} 1603 1604 1605int 1606sfxge_tx_init(struct sfxge_softc *sc) 1607{ 1608 struct sfxge_intr *intr; 1609 int index; 1610 int rc; 1611 1612 intr = &sc->intr; 1613 1614 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 1615 ("intr->state != SFXGE_INTR_INITIALIZED")); 1616 1617#ifdef SFXGE_HAVE_MQ 1618 sc->txq_count = SFXGE_TXQ_NTYPES - 1 + sc->intr.n_alloc; 1619#else 1620 sc->txq_count = SFXGE_TXQ_NTYPES; 1621#endif 1622 1623 sc->txqs_node = SYSCTL_ADD_NODE( 1624 device_get_sysctl_ctx(sc->dev), 1625 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 1626 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); 1627 if (sc->txqs_node == NULL) { 1628 rc = ENOMEM; 1629 goto fail_txq_node; 1630 } 1631 1632 /* Initialize the transmit queues */ 1633 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, 1634 SFXGE_TXQ_NON_CKSUM, 0)) != 0) 1635 goto fail; 1636 1637 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, 1638 SFXGE_TXQ_IP_CKSUM, 0)) != 0) 1639 goto fail2; 1640 1641 for (index = 0; 1642 index < sc->txq_count - SFXGE_TXQ_NTYPES + 1; 1643 index++) { 1644 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NTYPES - 1 + index, 1645 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) 1646 goto fail3; 1647 } 1648 1649 sfxge_tx_stat_init(sc); 1650 1651 return (0); 1652 1653fail3: 1654 while (--index >= 0) 1655 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1656 1657 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 1658 1659fail2: 1660 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 1661 1662fail: 1663fail_txq_node: 1664 sc->txq_count = 0; 1665 return (rc); 1666} 1667