sfxge_tx.c revision 278836
1/*- 2 * Copyright (c) 2010-2011 Solarflare Communications, Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* Theory of operation: 31 * 32 * Tx queues allocation and mapping 33 * 34 * One Tx queue with enabled checksum offload is allocated per Rx channel 35 * (event queue). Also 2 Tx queues (one without checksum offload and one 36 * with IP checksum offload only) are allocated and bound to event queue 0. 37 * sfxge_txq_type is used as Tx queue label. 38 * 39 * So, event queue plus label mapping to Tx queue index is: 40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) 41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 42 * See sfxge_get_txq_by_label() sfxge_ev.c 43 */ 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD: head/sys/dev/sfxge/sfxge_tx.c 278836 2015-02-16 06:07:01Z arybchik $"); 47 48#include <sys/types.h> 49#include <sys/mbuf.h> 50#include <sys/smp.h> 51#include <sys/socket.h> 52#include <sys/sysctl.h> 53#include <sys/syslog.h> 54 55#include <net/bpf.h> 56#include <net/ethernet.h> 57#include <net/if.h> 58#include <net/if_vlan_var.h> 59 60#include <netinet/in.h> 61#include <netinet/ip.h> 62#include <netinet/ip6.h> 63#include <netinet/tcp.h> 64 65#include "common/efx.h" 66 67#include "sfxge.h" 68#include "sfxge_tx.h" 69 70/* Set the block level to ensure there is space to generate a 71 * large number of descriptors for TSO. With minimum MSS and 72 * maximum mbuf length we might need more than a ring-ful of 73 * descriptors, but this should not happen in practice except 74 * due to deliberate attack. In that case we will truncate 75 * the output at a packet boundary. Allow for a reasonable 76 * minimum MSS of 512. 77 */ 78#define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1) 79#define SFXGE_TXQ_BLOCK_LEVEL(_entries) ((_entries) - SFXGE_TSO_MAX_DESC) 80 81#ifdef SFXGE_HAVE_MQ 82 83#define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) 84static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; 85TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); 86SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, 87 &sfxge_tx_dpl_get_max, 0, 88 "Maximum number of any packets in deferred packet get-list"); 89 90#define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ 91 SFXGE_PARAM(tx_dpl_get_non_tcp_max) 92static int sfxge_tx_dpl_get_non_tcp_max = 93 SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; 94TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); 95SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, 96 &sfxge_tx_dpl_get_non_tcp_max, 0, 97 "Maximum number of non-TCP packets in deferred packet get-list"); 98 99#define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) 100static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; 101TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); 102SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, 103 &sfxge_tx_dpl_put_max, 0, 104 "Maximum number of any packets in deferred packet put-list"); 105 106#endif 107 108 109/* Forward declarations. */ 110static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq); 111static void sfxge_tx_qlist_post(struct sfxge_txq *txq); 112static void sfxge_tx_qunblock(struct sfxge_txq *txq); 113static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 114 const bus_dma_segment_t *dma_seg, int n_dma_seg); 115 116void 117sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) 118{ 119 unsigned int completed; 120 121 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 122 123 completed = txq->completed; 124 while (completed != txq->pending) { 125 struct sfxge_tx_mapping *stmp; 126 unsigned int id; 127 128 id = completed++ & txq->ptr_mask; 129 130 stmp = &txq->stmp[id]; 131 if (stmp->flags & TX_BUF_UNMAP) { 132 bus_dmamap_unload(txq->packet_dma_tag, stmp->map); 133 if (stmp->flags & TX_BUF_MBUF) { 134 struct mbuf *m = stmp->u.mbuf; 135 do 136 m = m_free(m); 137 while (m != NULL); 138 } else { 139 free(stmp->u.heap_buf, M_SFXGE); 140 } 141 stmp->flags = 0; 142 } 143 } 144 txq->completed = completed; 145 146 /* Check whether we need to unblock the queue. */ 147 mb(); 148 if (txq->blocked) { 149 unsigned int level; 150 151 level = txq->added - txq->completed; 152 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 153 sfxge_tx_qunblock(txq); 154 } 155} 156 157#ifdef SFXGE_HAVE_MQ 158 159static inline unsigned int 160sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) 161{ 162 /* Absense of TCP checksum flags does not mean that it is non-TCP 163 * but it should be true if user wants to achieve high throughput. 164 */ 165 return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); 166} 167 168/* 169 * Reorder the put list and append it to the get list. 170 */ 171static void 172sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) 173{ 174 struct sfxge_tx_dpl *stdp; 175 struct mbuf *mbuf, *get_next, **get_tailp; 176 volatile uintptr_t *putp; 177 uintptr_t put; 178 unsigned int count; 179 unsigned int non_tcp_count; 180 181 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 182 183 stdp = &txq->dpl; 184 185 /* Acquire the put list. */ 186 putp = &stdp->std_put; 187 put = atomic_readandclear_ptr(putp); 188 mbuf = (void *)put; 189 190 if (mbuf == NULL) 191 return; 192 193 /* Reverse the put list. */ 194 get_tailp = &mbuf->m_nextpkt; 195 get_next = NULL; 196 197 count = 0; 198 non_tcp_count = 0; 199 do { 200 struct mbuf *put_next; 201 202 non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); 203 put_next = mbuf->m_nextpkt; 204 mbuf->m_nextpkt = get_next; 205 get_next = mbuf; 206 mbuf = put_next; 207 208 count++; 209 } while (mbuf != NULL); 210 211 /* Append the reversed put list to the get list. */ 212 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); 213 *stdp->std_getp = get_next; 214 stdp->std_getp = get_tailp; 215 stdp->std_get_count += count; 216 stdp->std_get_non_tcp_count += non_tcp_count; 217} 218 219#endif /* SFXGE_HAVE_MQ */ 220 221static void 222sfxge_tx_qreap(struct sfxge_txq *txq) 223{ 224 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 225 226 txq->reaped = txq->completed; 227} 228 229static void 230sfxge_tx_qlist_post(struct sfxge_txq *txq) 231{ 232 unsigned int old_added; 233 unsigned int level; 234 int rc; 235 236 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 237 238 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); 239 KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC, 240 ("txq->n_pend_desc too large")); 241 KASSERT(!txq->blocked, ("txq->blocked")); 242 243 old_added = txq->added; 244 245 /* Post the fragment list. */ 246 rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc, 247 txq->reaped, &txq->added); 248 KASSERT(rc == 0, ("efx_tx_qpost() failed")); 249 250 /* If efx_tx_qpost() had to refragment, our information about 251 * buffers to free may be associated with the wrong 252 * descriptors. 253 */ 254 KASSERT(txq->added - old_added == txq->n_pend_desc, 255 ("efx_tx_qpost() refragmented descriptors")); 256 257 level = txq->added - txq->reaped; 258 KASSERT(level <= txq->entries, ("overfilled TX queue")); 259 260 /* Clear the fragment list. */ 261 txq->n_pend_desc = 0; 262 263 /* Have we reached the block level? */ 264 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) 265 return; 266 267 /* Reap, and check again */ 268 sfxge_tx_qreap(txq); 269 level = txq->added - txq->reaped; 270 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) 271 return; 272 273 txq->blocked = 1; 274 275 /* 276 * Avoid a race with completion interrupt handling that could leave 277 * the queue blocked. 278 */ 279 mb(); 280 sfxge_tx_qreap(txq); 281 level = txq->added - txq->reaped; 282 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) { 283 mb(); 284 txq->blocked = 0; 285 } 286} 287 288static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) 289{ 290 bus_dmamap_t *used_map; 291 bus_dmamap_t map; 292 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; 293 unsigned int id; 294 struct sfxge_tx_mapping *stmp; 295 efx_buffer_t *desc; 296 int n_dma_seg; 297 int rc; 298 int i; 299 300 KASSERT(!txq->blocked, ("txq->blocked")); 301 302 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) 303 prefetch_read_many(mbuf->m_data); 304 305 if (txq->init_state != SFXGE_TXQ_STARTED) { 306 rc = EINTR; 307 goto reject; 308 } 309 310 /* Load the packet for DMA. */ 311 id = txq->added & txq->ptr_mask; 312 stmp = &txq->stmp[id]; 313 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, 314 mbuf, dma_seg, &n_dma_seg, 0); 315 if (rc == EFBIG) { 316 /* Try again. */ 317 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, 318 SFXGE_TX_MAPPING_MAX_SEG); 319 if (new_mbuf == NULL) 320 goto reject; 321 ++txq->collapses; 322 mbuf = new_mbuf; 323 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, 324 stmp->map, mbuf, 325 dma_seg, &n_dma_seg, 0); 326 } 327 if (rc != 0) 328 goto reject; 329 330 /* Make the packet visible to the hardware. */ 331 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); 332 333 used_map = &stmp->map; 334 335 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 336 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg); 337 if (rc < 0) 338 goto reject_mapped; 339 stmp = &txq->stmp[rc]; 340 } else { 341 /* Add the mapping to the fragment list, and set flags 342 * for the buffer. 343 */ 344 i = 0; 345 for (;;) { 346 desc = &txq->pend_desc[i]; 347 desc->eb_addr = dma_seg[i].ds_addr; 348 desc->eb_size = dma_seg[i].ds_len; 349 if (i == n_dma_seg - 1) { 350 desc->eb_eop = 1; 351 break; 352 } 353 desc->eb_eop = 0; 354 i++; 355 356 stmp->flags = 0; 357 if (__predict_false(stmp == 358 &txq->stmp[txq->ptr_mask])) 359 stmp = &txq->stmp[0]; 360 else 361 stmp++; 362 } 363 txq->n_pend_desc = n_dma_seg; 364 } 365 366 /* 367 * If the mapping required more than one descriptor 368 * then we need to associate the DMA map with the last 369 * descriptor, not the first. 370 */ 371 if (used_map != &stmp->map) { 372 map = stmp->map; 373 stmp->map = *used_map; 374 *used_map = map; 375 } 376 377 stmp->u.mbuf = mbuf; 378 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; 379 380 /* Post the fragment list. */ 381 sfxge_tx_qlist_post(txq); 382 383 return (0); 384 385reject_mapped: 386 bus_dmamap_unload(txq->packet_dma_tag, *used_map); 387reject: 388 /* Drop the packet on the floor. */ 389 m_freem(mbuf); 390 ++txq->drops; 391 392 return (rc); 393} 394 395#ifdef SFXGE_HAVE_MQ 396 397/* 398 * Drain the deferred packet list into the transmit queue. 399 */ 400static void 401sfxge_tx_qdpl_drain(struct sfxge_txq *txq) 402{ 403 struct sfxge_softc *sc; 404 struct sfxge_tx_dpl *stdp; 405 struct mbuf *mbuf, *next; 406 unsigned int count; 407 unsigned int non_tcp_count; 408 unsigned int pushed; 409 int rc; 410 411 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 412 413 sc = txq->sc; 414 stdp = &txq->dpl; 415 pushed = txq->added; 416 417 prefetch_read_many(sc->enp); 418 prefetch_read_many(txq->common); 419 420 mbuf = stdp->std_get; 421 count = stdp->std_get_count; 422 non_tcp_count = stdp->std_get_non_tcp_count; 423 424 if (count > stdp->std_get_hiwat) 425 stdp->std_get_hiwat = count; 426 427 while (count != 0) { 428 KASSERT(mbuf != NULL, ("mbuf == NULL")); 429 430 next = mbuf->m_nextpkt; 431 mbuf->m_nextpkt = NULL; 432 433 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ 434 435 if (next != NULL) 436 prefetch_read_many(next); 437 438 rc = sfxge_tx_queue_mbuf(txq, mbuf); 439 --count; 440 non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); 441 mbuf = next; 442 if (rc != 0) 443 continue; 444 445 if (txq->blocked) 446 break; 447 448 /* Push the fragments to the hardware in batches. */ 449 if (txq->added - pushed >= SFXGE_TX_BATCH) { 450 efx_tx_qpush(txq->common, txq->added); 451 pushed = txq->added; 452 } 453 } 454 455 if (count == 0) { 456 KASSERT(mbuf == NULL, ("mbuf != NULL")); 457 KASSERT(non_tcp_count == 0, 458 ("inconsistent TCP/non-TCP detection")); 459 stdp->std_get = NULL; 460 stdp->std_get_count = 0; 461 stdp->std_get_non_tcp_count = 0; 462 stdp->std_getp = &stdp->std_get; 463 } else { 464 stdp->std_get = mbuf; 465 stdp->std_get_count = count; 466 stdp->std_get_non_tcp_count = non_tcp_count; 467 } 468 469 if (txq->added != pushed) 470 efx_tx_qpush(txq->common, txq->added); 471 472 KASSERT(txq->blocked || stdp->std_get_count == 0, 473 ("queue unblocked but count is non-zero")); 474} 475 476#define SFXGE_TX_QDPL_PENDING(_txq) \ 477 ((_txq)->dpl.std_put != 0) 478 479/* 480 * Service the deferred packet list. 481 * 482 * NOTE: drops the txq mutex! 483 */ 484static inline void 485sfxge_tx_qdpl_service(struct sfxge_txq *txq) 486{ 487 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 488 489 do { 490 if (SFXGE_TX_QDPL_PENDING(txq)) 491 sfxge_tx_qdpl_swizzle(txq); 492 493 if (!txq->blocked) 494 sfxge_tx_qdpl_drain(txq); 495 496 SFXGE_TXQ_UNLOCK(txq); 497 } while (SFXGE_TX_QDPL_PENDING(txq) && 498 SFXGE_TXQ_TRYLOCK(txq)); 499} 500 501/* 502 * Put a packet on the deferred packet list. 503 * 504 * If we are called with the txq lock held, we put the packet on the "get 505 * list", otherwise we atomically push it on the "put list". The swizzle 506 * function takes care of ordering. 507 * 508 * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED. We 509 * overload the csum_data field in the mbuf to keep track of this length 510 * because there is no cheap alternative to avoid races. 511 */ 512static inline int 513sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked) 514{ 515 struct sfxge_tx_dpl *stdp; 516 517 stdp = &txq->dpl; 518 519 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 520 521 if (locked) { 522 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 523 524 sfxge_tx_qdpl_swizzle(txq); 525 526 if (stdp->std_get_count >= stdp->std_get_max) { 527 txq->get_overflow++; 528 return (ENOBUFS); 529 } 530 if (sfxge_is_mbuf_non_tcp(mbuf)) { 531 if (stdp->std_get_non_tcp_count >= 532 stdp->std_get_non_tcp_max) { 533 txq->get_non_tcp_overflow++; 534 return (ENOBUFS); 535 } 536 stdp->std_get_non_tcp_count++; 537 } 538 539 *(stdp->std_getp) = mbuf; 540 stdp->std_getp = &mbuf->m_nextpkt; 541 stdp->std_get_count++; 542 } else { 543 volatile uintptr_t *putp; 544 uintptr_t old; 545 uintptr_t new; 546 unsigned old_len; 547 548 putp = &stdp->std_put; 549 new = (uintptr_t)mbuf; 550 551 do { 552 old = *putp; 553 if (old != 0) { 554 struct mbuf *mp = (struct mbuf *)old; 555 old_len = mp->m_pkthdr.csum_data; 556 } else 557 old_len = 0; 558 if (old_len >= stdp->std_put_max) { 559 atomic_add_long(&txq->put_overflow, 1); 560 return (ENOBUFS); 561 } 562 mbuf->m_pkthdr.csum_data = old_len + 1; 563 mbuf->m_nextpkt = (void *)old; 564 } while (atomic_cmpset_ptr(putp, old, new) == 0); 565 } 566 567 return (0); 568} 569 570/* 571 * Called from if_transmit - will try to grab the txq lock and enqueue to the 572 * put list if it succeeds, otherwise will push onto the defer list. 573 */ 574int 575sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) 576{ 577 int locked; 578 int rc; 579 580 if (!SFXGE_LINK_UP(txq->sc)) { 581 rc = ENETDOWN; 582 atomic_add_long(&txq->netdown_drops, 1); 583 goto fail; 584 } 585 586 /* 587 * Try to grab the txq lock. If we are able to get the lock, 588 * the packet will be appended to the "get list" of the deferred 589 * packet list. Otherwise, it will be pushed on the "put list". 590 */ 591 locked = SFXGE_TXQ_TRYLOCK(txq); 592 593 if (sfxge_tx_qdpl_put(txq, m, locked) != 0) { 594 if (locked) 595 SFXGE_TXQ_UNLOCK(txq); 596 rc = ENOBUFS; 597 goto fail; 598 } 599 600 /* 601 * Try to grab the lock again. 602 * 603 * If we are able to get the lock, we need to process the deferred 604 * packet list. If we are not able to get the lock, another thread 605 * is processing the list. 606 */ 607 if (!locked) 608 locked = SFXGE_TXQ_TRYLOCK(txq); 609 610 if (locked) { 611 /* Try to service the list. */ 612 sfxge_tx_qdpl_service(txq); 613 /* Lock has been dropped. */ 614 } 615 616 return (0); 617 618fail: 619 m_freem(m); 620 return (rc); 621} 622 623static void 624sfxge_tx_qdpl_flush(struct sfxge_txq *txq) 625{ 626 struct sfxge_tx_dpl *stdp = &txq->dpl; 627 struct mbuf *mbuf, *next; 628 629 SFXGE_TXQ_LOCK(txq); 630 631 sfxge_tx_qdpl_swizzle(txq); 632 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { 633 next = mbuf->m_nextpkt; 634 m_freem(mbuf); 635 } 636 stdp->std_get = NULL; 637 stdp->std_get_count = 0; 638 stdp->std_get_non_tcp_count = 0; 639 stdp->std_getp = &stdp->std_get; 640 641 SFXGE_TXQ_UNLOCK(txq); 642} 643 644void 645sfxge_if_qflush(struct ifnet *ifp) 646{ 647 struct sfxge_softc *sc; 648 int i; 649 650 sc = ifp->if_softc; 651 652 for (i = 0; i < SFXGE_TX_SCALE(sc); i++) 653 sfxge_tx_qdpl_flush(sc->txq[i]); 654} 655 656/* 657 * TX start -- called by the stack. 658 */ 659int 660sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) 661{ 662 struct sfxge_softc *sc; 663 struct sfxge_txq *txq; 664 int rc; 665 666 sc = (struct sfxge_softc *)ifp->if_softc; 667 668 KASSERT(ifp->if_flags & IFF_UP, ("interface not up")); 669 670 /* Pick the desired transmit queue. */ 671 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) { 672 int index = 0; 673 674 /* check if flowid is set */ 675 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 676 uint32_t hash = m->m_pkthdr.flowid; 677 678 index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX]; 679 } 680 txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index]; 681 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 682 txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; 683 } else { 684 txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; 685 } 686 687 rc = sfxge_tx_packet_add(txq, m); 688 689 return (rc); 690} 691 692#else /* !SFXGE_HAVE_MQ */ 693 694static void sfxge_if_start_locked(struct ifnet *ifp) 695{ 696 struct sfxge_softc *sc = ifp->if_softc; 697 struct sfxge_txq *txq; 698 struct mbuf *mbuf; 699 unsigned int pushed[SFXGE_TXQ_NTYPES]; 700 unsigned int q_index; 701 702 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 703 IFF_DRV_RUNNING) 704 return; 705 706 if (!sc->port.link_up) 707 return; 708 709 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { 710 txq = sc->txq[q_index]; 711 pushed[q_index] = txq->added; 712 } 713 714 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 715 IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf); 716 if (mbuf == NULL) 717 break; 718 719 ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */ 720 721 /* Pick the desired transmit queue. */ 722 if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) 723 q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM; 724 else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) 725 q_index = SFXGE_TXQ_IP_CKSUM; 726 else 727 q_index = SFXGE_TXQ_NON_CKSUM; 728 txq = sc->txq[q_index]; 729 730 if (sfxge_tx_queue_mbuf(txq, mbuf) != 0) 731 continue; 732 733 if (txq->blocked) { 734 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 735 break; 736 } 737 738 /* Push the fragments to the hardware in batches. */ 739 if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) { 740 efx_tx_qpush(txq->common, txq->added); 741 pushed[q_index] = txq->added; 742 } 743 } 744 745 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { 746 txq = sc->txq[q_index]; 747 if (txq->added != pushed[q_index]) 748 efx_tx_qpush(txq->common, txq->added); 749 } 750} 751 752void sfxge_if_start(struct ifnet *ifp) 753{ 754 struct sfxge_softc *sc = ifp->if_softc; 755 756 SFXGE_TXQ_LOCK(sc->txq[0]); 757 sfxge_if_start_locked(ifp); 758 SFXGE_TXQ_UNLOCK(sc->txq[0]); 759} 760 761static inline void 762sfxge_tx_qdpl_service(struct sfxge_txq *txq) 763{ 764 struct ifnet *ifp = txq->sc->ifnet; 765 766 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 767 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 768 sfxge_if_start_locked(ifp); 769 SFXGE_TXQ_UNLOCK(txq); 770} 771 772#endif /* SFXGE_HAVE_MQ */ 773 774/* 775 * Software "TSO". Not quite as good as doing it in hardware, but 776 * still faster than segmenting in the stack. 777 */ 778 779struct sfxge_tso_state { 780 /* Output position */ 781 unsigned out_len; /* Remaining length in current segment */ 782 unsigned seqnum; /* Current sequence number */ 783 unsigned packet_space; /* Remaining space in current packet */ 784 785 /* Input position */ 786 uint64_t dma_addr; /* DMA address of current position */ 787 unsigned in_len; /* Remaining length in current mbuf */ 788 789 const struct mbuf *mbuf; /* Input mbuf (head of chain) */ 790 u_short protocol; /* Network protocol (after VLAN decap) */ 791 ssize_t nh_off; /* Offset of network header */ 792 ssize_t tcph_off; /* Offset of TCP header */ 793 unsigned header_len; /* Number of bytes of header */ 794}; 795 796static inline const struct ip *tso_iph(const struct sfxge_tso_state *tso) 797{ 798 KASSERT(tso->protocol == htons(ETHERTYPE_IP), 799 ("tso_iph() in non-IPv4 state")); 800 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); 801} 802static inline const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) 803{ 804 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 805 ("tso_ip6h() in non-IPv6 state")); 806 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); 807} 808static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) 809{ 810 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); 811} 812 813/* Size of preallocated TSO header buffers. Larger blocks must be 814 * allocated from the heap. 815 */ 816#define TSOH_STD_SIZE 128 817 818/* At most half the descriptors in the queue at any time will refer to 819 * a TSO header buffer, since they must always be followed by a 820 * payload descriptor referring to an mbuf. 821 */ 822#define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) 823#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 824#define TSOH_PAGE_COUNT(_txq_entries) \ 825 ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE) 826 827static int tso_init(struct sfxge_txq *txq) 828{ 829 struct sfxge_softc *sc = txq->sc; 830 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); 831 int i, rc; 832 833 /* Allocate TSO header buffers */ 834 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), 835 M_SFXGE, M_WAITOK); 836 837 for (i = 0; i < tsoh_page_count; i++) { 838 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); 839 if (rc != 0) 840 goto fail; 841 } 842 843 return (0); 844 845fail: 846 while (i-- > 0) 847 sfxge_dma_free(&txq->tsoh_buffer[i]); 848 free(txq->tsoh_buffer, M_SFXGE); 849 txq->tsoh_buffer = NULL; 850 return (rc); 851} 852 853static void tso_fini(struct sfxge_txq *txq) 854{ 855 int i; 856 857 if (txq->tsoh_buffer != NULL) { 858 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) 859 sfxge_dma_free(&txq->tsoh_buffer[i]); 860 free(txq->tsoh_buffer, M_SFXGE); 861 } 862} 863 864static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf) 865{ 866 struct ether_header *eh = mtod(mbuf, struct ether_header *); 867 868 tso->mbuf = mbuf; 869 870 /* Find network protocol and header */ 871 tso->protocol = eh->ether_type; 872 if (tso->protocol == htons(ETHERTYPE_VLAN)) { 873 struct ether_vlan_header *veh = 874 mtod(mbuf, struct ether_vlan_header *); 875 tso->protocol = veh->evl_proto; 876 tso->nh_off = sizeof(*veh); 877 } else { 878 tso->nh_off = sizeof(*eh); 879 } 880 881 /* Find TCP header */ 882 if (tso->protocol == htons(ETHERTYPE_IP)) { 883 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, 884 ("TSO required on non-TCP packet")); 885 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; 886 } else { 887 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 888 ("TSO required on non-IP packet")); 889 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, 890 ("TSO required on non-TCP packet")); 891 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); 892 } 893 894 tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off; 895 896 tso->seqnum = ntohl(tso_tcph(tso)->th_seq); 897 898 /* These flags must not be duplicated */ 899 KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)), 900 ("incompatible TCP flag on TSO packet")); 901 902 tso->out_len = mbuf->m_pkthdr.len - tso->header_len; 903} 904 905/* 906 * tso_fill_packet_with_fragment - form descriptors for the current fragment 907 * 908 * Form descriptors for the current fragment, until we reach the end 909 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 910 * space. 911 */ 912static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, 913 struct sfxge_tso_state *tso) 914{ 915 efx_buffer_t *desc; 916 int n; 917 918 if (tso->in_len == 0 || tso->packet_space == 0) 919 return; 920 921 KASSERT(tso->in_len > 0, ("TSO input length went negative")); 922 KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); 923 924 n = min(tso->in_len, tso->packet_space); 925 926 tso->packet_space -= n; 927 tso->out_len -= n; 928 tso->in_len -= n; 929 930 desc = &txq->pend_desc[txq->n_pend_desc++]; 931 desc->eb_addr = tso->dma_addr; 932 desc->eb_size = n; 933 desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0; 934 935 tso->dma_addr += n; 936} 937 938/* Callback from bus_dmamap_load() for long TSO headers. */ 939static void tso_map_long_header(void *dma_addr_ret, 940 bus_dma_segment_t *segs, int nseg, 941 int error) 942{ 943 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && 944 __predict_true(nseg == 1)) ? 945 segs->ds_addr : 0); 946} 947 948/* 949 * tso_start_new_packet - generate a new header and prepare for the new packet 950 * 951 * Generate a new header and prepare for the new packet. Return 0 on 952 * success, or an error code if failed to alloc header. 953 */ 954static int tso_start_new_packet(struct sfxge_txq *txq, 955 struct sfxge_tso_state *tso, 956 unsigned int id) 957{ 958 struct sfxge_tx_mapping *stmp = &txq->stmp[id]; 959 struct tcphdr *tsoh_th; 960 unsigned ip_length; 961 caddr_t header; 962 uint64_t dma_addr; 963 bus_dmamap_t map; 964 efx_buffer_t *desc; 965 int rc; 966 967 /* Allocate a DMA-mapped header buffer. */ 968 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { 969 unsigned int page_index = (id / 2) / TSOH_PER_PAGE; 970 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; 971 972 header = (txq->tsoh_buffer[page_index].esm_base + 973 buf_index * TSOH_STD_SIZE); 974 dma_addr = (txq->tsoh_buffer[page_index].esm_addr + 975 buf_index * TSOH_STD_SIZE); 976 map = txq->tsoh_buffer[page_index].esm_map; 977 978 stmp->flags = 0; 979 } else { 980 /* We cannot use bus_dmamem_alloc() as that may sleep */ 981 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); 982 if (__predict_false(!header)) 983 return (ENOMEM); 984 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, 985 header, tso->header_len, 986 tso_map_long_header, &dma_addr, 987 BUS_DMA_NOWAIT); 988 if (__predict_false(dma_addr == 0)) { 989 if (rc == 0) { 990 /* Succeeded but got >1 segment */ 991 bus_dmamap_unload(txq->packet_dma_tag, 992 stmp->map); 993 rc = EINVAL; 994 } 995 free(header, M_SFXGE); 996 return (rc); 997 } 998 map = stmp->map; 999 1000 txq->tso_long_headers++; 1001 stmp->u.heap_buf = header; 1002 stmp->flags = TX_BUF_UNMAP; 1003 } 1004 1005 tsoh_th = (struct tcphdr *)(header + tso->tcph_off); 1006 1007 /* Copy and update the headers. */ 1008 m_copydata(tso->mbuf, 0, tso->header_len, header); 1009 1010 tsoh_th->th_seq = htonl(tso->seqnum); 1011 tso->seqnum += tso->mbuf->m_pkthdr.tso_segsz; 1012 if (tso->out_len > tso->mbuf->m_pkthdr.tso_segsz) { 1013 /* This packet will not finish the TSO burst. */ 1014 ip_length = tso->header_len - tso->nh_off + 1015 tso->mbuf->m_pkthdr.tso_segsz; 1016 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); 1017 } else { 1018 /* This packet will be the last in the TSO burst. */ 1019 ip_length = tso->header_len - tso->nh_off + tso->out_len; 1020 } 1021 1022 if (tso->protocol == htons(ETHERTYPE_IP)) { 1023 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); 1024 tsoh_iph->ip_len = htons(ip_length); 1025 /* XXX We should increment ip_id, but FreeBSD doesn't 1026 * currently allocate extra IDs for multiple segments. 1027 */ 1028 } else { 1029 struct ip6_hdr *tsoh_iph = 1030 (struct ip6_hdr *)(header + tso->nh_off); 1031 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); 1032 } 1033 1034 /* Make the header visible to the hardware. */ 1035 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); 1036 1037 tso->packet_space = tso->mbuf->m_pkthdr.tso_segsz; 1038 txq->tso_packets++; 1039 1040 /* Form a descriptor for this header. */ 1041 desc = &txq->pend_desc[txq->n_pend_desc++]; 1042 desc->eb_addr = dma_addr; 1043 desc->eb_size = tso->header_len; 1044 desc->eb_eop = 0; 1045 1046 return (0); 1047} 1048 1049static int 1050sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 1051 const bus_dma_segment_t *dma_seg, int n_dma_seg) 1052{ 1053 struct sfxge_tso_state tso; 1054 unsigned int id, next_id; 1055 unsigned skipped = 0; 1056 1057 tso_start(&tso, mbuf); 1058 1059 while (dma_seg->ds_len + skipped <= tso.header_len) { 1060 skipped += dma_seg->ds_len; 1061 --n_dma_seg; 1062 KASSERT(n_dma_seg, ("no payload found in TSO packet")); 1063 ++dma_seg; 1064 } 1065 tso.in_len = dma_seg->ds_len + (tso.header_len - skipped); 1066 tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); 1067 1068 id = txq->added & txq->ptr_mask; 1069 if (__predict_false(tso_start_new_packet(txq, &tso, id))) 1070 return (-1); 1071 1072 while (1) { 1073 id = (id + 1) & txq->ptr_mask; 1074 tso_fill_packet_with_fragment(txq, &tso); 1075 1076 /* Move onto the next fragment? */ 1077 if (tso.in_len == 0) { 1078 --n_dma_seg; 1079 if (n_dma_seg == 0) 1080 break; 1081 ++dma_seg; 1082 tso.in_len = dma_seg->ds_len; 1083 tso.dma_addr = dma_seg->ds_addr; 1084 } 1085 1086 /* End of packet? */ 1087 if (tso.packet_space == 0) { 1088 /* If the queue is now full due to tiny MSS, 1089 * or we can't create another header, discard 1090 * the remainder of the input mbuf but do not 1091 * roll back the work we have done. 1092 */ 1093 if (txq->n_pend_desc > 1094 SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG)) { 1095 txq->tso_pdrop_too_many++; 1096 break; 1097 } 1098 next_id = (id + 1) & txq->ptr_mask; 1099 if (__predict_false(tso_start_new_packet(txq, &tso, 1100 next_id))) { 1101 txq->tso_pdrop_no_rsrc++; 1102 break; 1103 } 1104 id = next_id; 1105 } 1106 } 1107 1108 txq->tso_bursts++; 1109 return (id); 1110} 1111 1112static void 1113sfxge_tx_qunblock(struct sfxge_txq *txq) 1114{ 1115 struct sfxge_softc *sc; 1116 struct sfxge_evq *evq; 1117 1118 sc = txq->sc; 1119 evq = sc->evq[txq->evq_index]; 1120 1121 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 1122 1123 if (txq->init_state != SFXGE_TXQ_STARTED) 1124 return; 1125 1126 SFXGE_TXQ_LOCK(txq); 1127 1128 if (txq->blocked) { 1129 unsigned int level; 1130 1131 level = txq->added - txq->completed; 1132 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 1133 txq->blocked = 0; 1134 } 1135 1136 sfxge_tx_qdpl_service(txq); 1137 /* note: lock has been dropped */ 1138} 1139 1140void 1141sfxge_tx_qflush_done(struct sfxge_txq *txq) 1142{ 1143 1144 txq->flush_state = SFXGE_FLUSH_DONE; 1145} 1146 1147static void 1148sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) 1149{ 1150 struct sfxge_txq *txq; 1151 struct sfxge_evq *evq; 1152 unsigned int count; 1153 1154 txq = sc->txq[index]; 1155 evq = sc->evq[txq->evq_index]; 1156 1157 SFXGE_TXQ_LOCK(txq); 1158 1159 KASSERT(txq->init_state == SFXGE_TXQ_STARTED, 1160 ("txq->init_state != SFXGE_TXQ_STARTED")); 1161 1162 txq->init_state = SFXGE_TXQ_INITIALIZED; 1163 txq->flush_state = SFXGE_FLUSH_PENDING; 1164 1165 /* Flush the transmit queue. */ 1166 efx_tx_qflush(txq->common); 1167 1168 SFXGE_TXQ_UNLOCK(txq); 1169 1170 count = 0; 1171 do { 1172 /* Spin for 100ms. */ 1173 DELAY(100000); 1174 1175 if (txq->flush_state != SFXGE_FLUSH_PENDING) 1176 break; 1177 } while (++count < 20); 1178 1179 SFXGE_EVQ_LOCK(evq); 1180 SFXGE_TXQ_LOCK(txq); 1181 1182 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, 1183 ("txq->flush_state == SFXGE_FLUSH_FAILED")); 1184 1185 txq->flush_state = SFXGE_FLUSH_DONE; 1186 1187 txq->blocked = 0; 1188 txq->pending = txq->added; 1189 1190 sfxge_tx_qcomplete(txq, evq); 1191 KASSERT(txq->completed == txq->added, 1192 ("txq->completed != txq->added")); 1193 1194 sfxge_tx_qreap(txq); 1195 KASSERT(txq->reaped == txq->completed, 1196 ("txq->reaped != txq->completed")); 1197 1198 txq->added = 0; 1199 txq->pending = 0; 1200 txq->completed = 0; 1201 txq->reaped = 0; 1202 1203 /* Destroy the common code transmit queue. */ 1204 efx_tx_qdestroy(txq->common); 1205 txq->common = NULL; 1206 1207 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1208 EFX_TXQ_NBUFS(sc->txq_entries)); 1209 1210 SFXGE_EVQ_UNLOCK(evq); 1211 SFXGE_TXQ_UNLOCK(txq); 1212} 1213 1214static int 1215sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) 1216{ 1217 struct sfxge_txq *txq; 1218 efsys_mem_t *esmp; 1219 uint16_t flags; 1220 struct sfxge_evq *evq; 1221 int rc; 1222 1223 txq = sc->txq[index]; 1224 esmp = &txq->mem; 1225 evq = sc->evq[txq->evq_index]; 1226 1227 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1228 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1229 KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1230 ("evq->init_state != SFXGE_EVQ_STARTED")); 1231 1232 /* Program the buffer table. */ 1233 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, 1234 EFX_TXQ_NBUFS(sc->txq_entries))) != 0) 1235 return (rc); 1236 1237 /* Determine the kind of queue we are creating. */ 1238 switch (txq->type) { 1239 case SFXGE_TXQ_NON_CKSUM: 1240 flags = 0; 1241 break; 1242 case SFXGE_TXQ_IP_CKSUM: 1243 flags = EFX_CKSUM_IPV4; 1244 break; 1245 case SFXGE_TXQ_IP_TCP_UDP_CKSUM: 1246 flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP; 1247 break; 1248 default: 1249 KASSERT(0, ("Impossible TX queue")); 1250 flags = 0; 1251 break; 1252 } 1253 1254 /* Create the common code transmit queue. */ 1255 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, 1256 sc->txq_entries, txq->buf_base_id, flags, evq->common, 1257 &txq->common)) != 0) 1258 goto fail; 1259 1260 SFXGE_TXQ_LOCK(txq); 1261 1262 /* Enable the transmit queue. */ 1263 efx_tx_qenable(txq->common); 1264 1265 txq->init_state = SFXGE_TXQ_STARTED; 1266 1267 SFXGE_TXQ_UNLOCK(txq); 1268 1269 return (0); 1270 1271fail: 1272 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1273 EFX_TXQ_NBUFS(sc->txq_entries)); 1274 return (rc); 1275} 1276 1277void 1278sfxge_tx_stop(struct sfxge_softc *sc) 1279{ 1280 int index; 1281 1282 index = SFXGE_TX_SCALE(sc); 1283 while (--index >= 0) 1284 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1285 1286 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM); 1287 1288 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM); 1289 1290 /* Tear down the transmit module */ 1291 efx_tx_fini(sc->enp); 1292} 1293 1294int 1295sfxge_tx_start(struct sfxge_softc *sc) 1296{ 1297 int index; 1298 int rc; 1299 1300 /* Initialize the common code transmit module. */ 1301 if ((rc = efx_tx_init(sc->enp)) != 0) 1302 return (rc); 1303 1304 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0) 1305 goto fail; 1306 1307 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0) 1308 goto fail2; 1309 1310 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) { 1311 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + 1312 index)) != 0) 1313 goto fail3; 1314 } 1315 1316 return (0); 1317 1318fail3: 1319 while (--index >= 0) 1320 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1321 1322 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM); 1323 1324fail2: 1325 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM); 1326 1327fail: 1328 efx_tx_fini(sc->enp); 1329 1330 return (rc); 1331} 1332 1333/** 1334 * Destroy a transmit queue. 1335 */ 1336static void 1337sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) 1338{ 1339 struct sfxge_txq *txq; 1340 unsigned int nmaps; 1341 1342 txq = sc->txq[index]; 1343 1344 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1345 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1346 1347 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) 1348 tso_fini(txq); 1349 1350 /* Free the context arrays. */ 1351 free(txq->pend_desc, M_SFXGE); 1352 nmaps = sc->txq_entries; 1353 while (nmaps-- != 0) 1354 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1355 free(txq->stmp, M_SFXGE); 1356 1357 /* Release DMA memory mapping. */ 1358 sfxge_dma_free(&txq->mem); 1359 1360 sc->txq[index] = NULL; 1361 1362#ifdef SFXGE_HAVE_MQ 1363 SFXGE_TXQ_LOCK_DESTROY(txq); 1364#endif 1365 1366 free(txq, M_SFXGE); 1367} 1368 1369static int 1370sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, 1371 enum sfxge_txq_type type, unsigned int evq_index) 1372{ 1373 char name[16]; 1374 struct sysctl_oid *txq_node; 1375 struct sfxge_txq *txq; 1376 struct sfxge_evq *evq; 1377#ifdef SFXGE_HAVE_MQ 1378 struct sfxge_tx_dpl *stdp; 1379#endif 1380 efsys_mem_t *esmp; 1381 unsigned int nmaps; 1382 int rc; 1383 1384 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); 1385 txq->sc = sc; 1386 txq->entries = sc->txq_entries; 1387 txq->ptr_mask = txq->entries - 1; 1388 1389 sc->txq[txq_index] = txq; 1390 esmp = &txq->mem; 1391 1392 evq = sc->evq[evq_index]; 1393 1394 /* Allocate and zero DMA space for the descriptor ring. */ 1395 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) 1396 return (rc); 1397 (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries)); 1398 1399 /* Allocate buffer table entries. */ 1400 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), 1401 &txq->buf_base_id); 1402 1403 /* Create a DMA tag for packet mappings. */ 1404 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000, 1405 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, 1406 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL, 1407 &txq->packet_dma_tag) != 0) { 1408 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); 1409 rc = ENOMEM; 1410 goto fail; 1411 } 1412 1413 /* Allocate pending descriptor array for batching writes. */ 1414 txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries, 1415 M_SFXGE, M_ZERO | M_WAITOK); 1416 1417 /* Allocate and initialise mbuf DMA mapping array. */ 1418 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, 1419 M_SFXGE, M_ZERO | M_WAITOK); 1420 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { 1421 rc = bus_dmamap_create(txq->packet_dma_tag, 0, 1422 &txq->stmp[nmaps].map); 1423 if (rc != 0) 1424 goto fail2; 1425 } 1426 1427 snprintf(name, sizeof(name), "%u", txq_index); 1428 txq_node = SYSCTL_ADD_NODE( 1429 device_get_sysctl_ctx(sc->dev), 1430 SYSCTL_CHILDREN(sc->txqs_node), 1431 OID_AUTO, name, CTLFLAG_RD, NULL, ""); 1432 if (txq_node == NULL) { 1433 rc = ENOMEM; 1434 goto fail_txq_node; 1435 } 1436 1437 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && 1438 (rc = tso_init(txq)) != 0) 1439 goto fail3; 1440 1441#ifdef SFXGE_HAVE_MQ 1442 if (sfxge_tx_dpl_get_max <= 0) { 1443 log(LOG_ERR, "%s=%d must be greater than 0", 1444 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); 1445 rc = EINVAL; 1446 goto fail_tx_dpl_get_max; 1447 } 1448 if (sfxge_tx_dpl_get_non_tcp_max <= 0) { 1449 log(LOG_ERR, "%s=%d must be greater than 0", 1450 SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, 1451 sfxge_tx_dpl_get_non_tcp_max); 1452 rc = EINVAL; 1453 goto fail_tx_dpl_get_max; 1454 } 1455 if (sfxge_tx_dpl_put_max < 0) { 1456 log(LOG_ERR, "%s=%d must be greater or equal to 0", 1457 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); 1458 rc = EINVAL; 1459 goto fail_tx_dpl_put_max; 1460 } 1461 1462 /* Initialize the deferred packet list. */ 1463 stdp = &txq->dpl; 1464 stdp->std_put_max = sfxge_tx_dpl_put_max; 1465 stdp->std_get_max = sfxge_tx_dpl_get_max; 1466 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; 1467 stdp->std_getp = &stdp->std_get; 1468 1469 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); 1470 1471 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), 1472 SYSCTL_CHILDREN(txq_node), OID_AUTO, 1473 "dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS, 1474 &stdp->std_get_count, 0, ""); 1475 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), 1476 SYSCTL_CHILDREN(txq_node), OID_AUTO, 1477 "dpl_get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, 1478 &stdp->std_get_non_tcp_count, 0, ""); 1479 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), 1480 SYSCTL_CHILDREN(txq_node), OID_AUTO, 1481 "dpl_get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 1482 &stdp->std_get_hiwat, 0, ""); 1483#endif 1484 1485 txq->type = type; 1486 txq->evq_index = evq_index; 1487 txq->txq_index = txq_index; 1488 txq->init_state = SFXGE_TXQ_INITIALIZED; 1489 1490 return (0); 1491 1492fail_tx_dpl_put_max: 1493fail_tx_dpl_get_max: 1494fail3: 1495fail_txq_node: 1496 free(txq->pend_desc, M_SFXGE); 1497fail2: 1498 while (nmaps-- != 0) 1499 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1500 free(txq->stmp, M_SFXGE); 1501 bus_dma_tag_destroy(txq->packet_dma_tag); 1502 1503fail: 1504 sfxge_dma_free(esmp); 1505 1506 return (rc); 1507} 1508 1509static const struct { 1510 const char *name; 1511 size_t offset; 1512} sfxge_tx_stats[] = { 1513#define SFXGE_TX_STAT(name, member) \ 1514 { #name, offsetof(struct sfxge_txq, member) } 1515 SFXGE_TX_STAT(tso_bursts, tso_bursts), 1516 SFXGE_TX_STAT(tso_packets, tso_packets), 1517 SFXGE_TX_STAT(tso_long_headers, tso_long_headers), 1518 SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many), 1519 SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc), 1520 SFXGE_TX_STAT(tx_collapses, collapses), 1521 SFXGE_TX_STAT(tx_drops, drops), 1522 SFXGE_TX_STAT(tx_get_overflow, get_overflow), 1523 SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), 1524 SFXGE_TX_STAT(tx_put_overflow, put_overflow), 1525 SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), 1526}; 1527 1528static int 1529sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) 1530{ 1531 struct sfxge_softc *sc = arg1; 1532 unsigned int id = arg2; 1533 unsigned long sum; 1534 unsigned int index; 1535 1536 /* Sum across all TX queues */ 1537 sum = 0; 1538 for (index = 0; 1539 index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc); 1540 index++) 1541 sum += *(unsigned long *)((caddr_t)sc->txq[index] + 1542 sfxge_tx_stats[id].offset); 1543 1544 return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1545} 1546 1547static void 1548sfxge_tx_stat_init(struct sfxge_softc *sc) 1549{ 1550 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1551 struct sysctl_oid_list *stat_list; 1552 unsigned int id; 1553 1554 stat_list = SYSCTL_CHILDREN(sc->stats_node); 1555 1556 for (id = 0; 1557 id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]); 1558 id++) { 1559 SYSCTL_ADD_PROC( 1560 ctx, stat_list, 1561 OID_AUTO, sfxge_tx_stats[id].name, 1562 CTLTYPE_ULONG|CTLFLAG_RD, 1563 sc, id, sfxge_tx_stat_handler, "LU", 1564 ""); 1565 } 1566} 1567 1568void 1569sfxge_tx_fini(struct sfxge_softc *sc) 1570{ 1571 int index; 1572 1573 index = SFXGE_TX_SCALE(sc); 1574 while (--index >= 0) 1575 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1576 1577 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 1578 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 1579} 1580 1581 1582int 1583sfxge_tx_init(struct sfxge_softc *sc) 1584{ 1585 struct sfxge_intr *intr; 1586 int index; 1587 int rc; 1588 1589 intr = &sc->intr; 1590 1591 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 1592 ("intr->state != SFXGE_INTR_INITIALIZED")); 1593 1594 sc->txqs_node = SYSCTL_ADD_NODE( 1595 device_get_sysctl_ctx(sc->dev), 1596 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 1597 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); 1598 if (sc->txqs_node == NULL) { 1599 rc = ENOMEM; 1600 goto fail_txq_node; 1601 } 1602 1603 /* Initialize the transmit queues */ 1604 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, 1605 SFXGE_TXQ_NON_CKSUM, 0)) != 0) 1606 goto fail; 1607 1608 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, 1609 SFXGE_TXQ_IP_CKSUM, 0)) != 0) 1610 goto fail2; 1611 1612 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) { 1613 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index, 1614 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) 1615 goto fail3; 1616 } 1617 1618 sfxge_tx_stat_init(sc); 1619 1620 return (0); 1621 1622fail3: 1623 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 1624 1625 while (--index >= 0) 1626 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1627 1628fail2: 1629 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 1630 1631fail: 1632fail_txq_node: 1633 return (rc); 1634} 1635