1/*- 2 * Copyright (c) 2010-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * The views and conclusions contained in the software and documentation are 30 * those of the authors and should not be interpreted as representing official 31 * policies, either expressed or implied, of the FreeBSD Project. 32 */ 33 34/* Theory of operation: 35 * 36 * Tx queues allocation and mapping on Siena 37 * 38 * One Tx queue with enabled checksum offload is allocated per Rx channel 39 * (event queue). Also 2 Tx queues (one without checksum offload and one 40 * with IP checksum offload only) are allocated and bound to event queue 0. 41 * sfxge_txq_type is used as Tx queue label. 42 * 43 * So, event queue plus label mapping to Tx queue index is: 44 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) 45 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 46 * See sfxge_get_txq_by_label() sfxge_ev.c 47 * 48 * Tx queue allocation and mapping on EF10 49 * 50 * One Tx queue with enabled checksum offload is allocated per Rx 51 * channel (event queue). Checksum offload on all Tx queues is enabled or 52 * disabled dynamically by inserting option descriptors, so the additional 53 * queues used on Siena are not required. 54 * 55 * TxQ label is always set to zero on EF10 hardware. 56 * So, event queue to Tx queue mapping is simple: 57 * TxQ-index = EvQ-index 58 */ 59 60#include <sys/cdefs.h> 61__FBSDID("$FreeBSD: stable/11/sys/dev/sfxge/sfxge_tx.c 342455 2018-12-25 07:39:34Z arybchik $"); 62 63#include "opt_rss.h" 64 65#include <sys/param.h> 66#include <sys/malloc.h> 67#include <sys/mbuf.h> 68#include <sys/smp.h> 69#include <sys/socket.h> 70#include <sys/sysctl.h> 71#include <sys/syslog.h> 72#include <sys/limits.h> 73 74#include <net/bpf.h> 75#include <net/ethernet.h> 76#include <net/if.h> 77#include <net/if_vlan_var.h> 78 79#include <netinet/in.h> 80#include <netinet/ip.h> 81#include <netinet/ip6.h> 82#include <netinet/tcp.h> 83 84#ifdef RSS 85#include <net/rss_config.h> 86#endif 87 88#include "common/efx.h" 89 90#include "sfxge.h" 91#include "sfxge_tx.h" 92 93 94#define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) 95static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; 96TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); 97SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, 98 &sfxge_tx_dpl_get_max, 0, 99 "Maximum number of any packets in deferred packet get-list"); 100 101#define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ 102 SFXGE_PARAM(tx_dpl_get_non_tcp_max) 103static int sfxge_tx_dpl_get_non_tcp_max = 104 SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; 105TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); 106SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, 107 &sfxge_tx_dpl_get_non_tcp_max, 0, 108 "Maximum number of non-TCP packets in deferred packet get-list"); 109 110#define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) 111static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; 112TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); 113SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, 114 &sfxge_tx_dpl_put_max, 0, 115 "Maximum number of any packets in deferred packet put-list"); 116 117#define SFXGE_PARAM_TSO_FW_ASSISTED SFXGE_PARAM(tso_fw_assisted) 118static int sfxge_tso_fw_assisted = (SFXGE_FATSOV1 | SFXGE_FATSOV2); 119TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted); 120SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN, 121 &sfxge_tso_fw_assisted, 0, 122 "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware"); 123 124 125static const struct { 126 const char *name; 127 size_t offset; 128} sfxge_tx_stats[] = { 129#define SFXGE_TX_STAT(name, member) \ 130 { #name, offsetof(struct sfxge_txq, member) } 131 SFXGE_TX_STAT(tso_bursts, tso_bursts), 132 SFXGE_TX_STAT(tso_packets, tso_packets), 133 SFXGE_TX_STAT(tso_long_headers, tso_long_headers), 134 SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many), 135 SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc), 136 SFXGE_TX_STAT(tx_collapses, collapses), 137 SFXGE_TX_STAT(tx_drops, drops), 138 SFXGE_TX_STAT(tx_get_overflow, get_overflow), 139 SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), 140 SFXGE_TX_STAT(tx_put_overflow, put_overflow), 141 SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), 142}; 143 144 145/* Forward declarations. */ 146static void sfxge_tx_qdpl_service(struct sfxge_txq *txq); 147static void sfxge_tx_qlist_post(struct sfxge_txq *txq); 148static void sfxge_tx_qunblock(struct sfxge_txq *txq); 149static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 150 const bus_dma_segment_t *dma_seg, int n_dma_seg, 151 int n_extra_descs); 152 153static inline void 154sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp) 155{ 156 KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0")); 157 if (__predict_false(*pstmp == 158 &txq->stmp[txq->ptr_mask])) 159 *pstmp = &txq->stmp[0]; 160 else 161 (*pstmp)++; 162} 163 164static int 165sfxge_tx_maybe_toggle_cksum_offload(struct sfxge_txq *txq, struct mbuf *mbuf, 166 struct sfxge_tx_mapping **pstmp) 167{ 168 uint16_t new_hw_cksum_flags; 169 efx_desc_t *desc; 170 171 if (mbuf->m_pkthdr.csum_flags & 172 (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6 | CSUM_TSO)) { 173 /* 174 * We always set EFX_TXQ_CKSUM_IPV4 here because this 175 * configuration is the most useful, and this won't 176 * cause any trouble in case of IPv6 traffic anyway. 177 */ 178 new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; 179 } else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 180 new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4; 181 } else { 182 new_hw_cksum_flags = 0; 183 } 184 185 if (new_hw_cksum_flags == txq->hw_cksum_flags) 186 return (0); 187 188 desc = &txq->pend_desc[txq->n_pend_desc]; 189 efx_tx_qdesc_checksum_create(txq->common, new_hw_cksum_flags, desc); 190 txq->hw_cksum_flags = new_hw_cksum_flags; 191 txq->n_pend_desc++; 192 193 sfxge_next_stmp(txq, pstmp); 194 195 return (1); 196} 197 198static int 199sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf, 200 struct sfxge_tx_mapping **pstmp) 201{ 202 uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ? 203 mbuf->m_pkthdr.ether_vtag : 204 0); 205 efx_desc_t *desc; 206 207 if (this_tag == txq->hw_vlan_tci) 208 return (0); 209 210 desc = &txq->pend_desc[txq->n_pend_desc]; 211 efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), desc); 212 txq->hw_vlan_tci = this_tag; 213 txq->n_pend_desc++; 214 215 sfxge_next_stmp(txq, pstmp); 216 217 return (1); 218} 219 220void 221sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) 222{ 223 unsigned int completed; 224 225 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 226 227 completed = txq->completed; 228 while (completed != txq->pending) { 229 struct sfxge_tx_mapping *stmp; 230 unsigned int id; 231 232 id = completed++ & txq->ptr_mask; 233 234 stmp = &txq->stmp[id]; 235 if (stmp->flags & TX_BUF_UNMAP) { 236 bus_dmamap_unload(txq->packet_dma_tag, stmp->map); 237 if (stmp->flags & TX_BUF_MBUF) { 238 struct mbuf *m = stmp->u.mbuf; 239 do 240 m = m_free(m); 241 while (m != NULL); 242 } else { 243 free(stmp->u.heap_buf, M_SFXGE); 244 } 245 stmp->flags = 0; 246 } 247 } 248 txq->completed = completed; 249 250 /* Check whether we need to unblock the queue. */ 251 mb(); 252 if (txq->blocked) { 253 unsigned int level; 254 255 level = txq->added - txq->completed; 256 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 257 sfxge_tx_qunblock(txq); 258 } 259} 260 261static unsigned int 262sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) 263{ 264 /* Absence of TCP checksum flags does not mean that it is non-TCP 265 * but it should be true if user wants to achieve high throughput. 266 */ 267 return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); 268} 269 270/* 271 * Reorder the put list and append it to the get list. 272 */ 273static void 274sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) 275{ 276 struct sfxge_tx_dpl *stdp; 277 struct mbuf *mbuf, *get_next, **get_tailp; 278 volatile uintptr_t *putp; 279 uintptr_t put; 280 unsigned int count; 281 unsigned int non_tcp_count; 282 283 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 284 285 stdp = &txq->dpl; 286 287 /* Acquire the put list. */ 288 putp = &stdp->std_put; 289 put = atomic_readandclear_ptr(putp); 290 mbuf = (void *)put; 291 292 if (mbuf == NULL) 293 return; 294 295 /* Reverse the put list. */ 296 get_tailp = &mbuf->m_nextpkt; 297 get_next = NULL; 298 299 count = 0; 300 non_tcp_count = 0; 301 do { 302 struct mbuf *put_next; 303 304 non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); 305 put_next = mbuf->m_nextpkt; 306 mbuf->m_nextpkt = get_next; 307 get_next = mbuf; 308 mbuf = put_next; 309 310 count++; 311 } while (mbuf != NULL); 312 313 if (count > stdp->std_put_hiwat) 314 stdp->std_put_hiwat = count; 315 316 /* Append the reversed put list to the get list. */ 317 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); 318 *stdp->std_getp = get_next; 319 stdp->std_getp = get_tailp; 320 stdp->std_get_count += count; 321 stdp->std_get_non_tcp_count += non_tcp_count; 322} 323 324static void 325sfxge_tx_qreap(struct sfxge_txq *txq) 326{ 327 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 328 329 txq->reaped = txq->completed; 330} 331 332static void 333sfxge_tx_qlist_post(struct sfxge_txq *txq) 334{ 335 unsigned int old_added; 336 unsigned int block_level; 337 unsigned int level; 338 int rc; 339 340 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 341 342 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); 343 KASSERT(txq->n_pend_desc <= txq->max_pkt_desc, 344 ("txq->n_pend_desc too large")); 345 KASSERT(!txq->blocked, ("txq->blocked")); 346 347 old_added = txq->added; 348 349 /* Post the fragment list. */ 350 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc, 351 txq->reaped, &txq->added); 352 KASSERT(rc == 0, ("efx_tx_qdesc_post() failed")); 353 354 /* If efx_tx_qdesc_post() had to refragment, our information about 355 * buffers to free may be associated with the wrong 356 * descriptors. 357 */ 358 KASSERT(txq->added - old_added == txq->n_pend_desc, 359 ("efx_tx_qdesc_post() refragmented descriptors")); 360 361 level = txq->added - txq->reaped; 362 KASSERT(level <= txq->entries, ("overfilled TX queue")); 363 364 /* Clear the fragment list. */ 365 txq->n_pend_desc = 0; 366 367 /* 368 * Set the block level to ensure there is space to generate a 369 * large number of descriptors for TSO. 370 */ 371 block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc; 372 373 /* Have we reached the block level? */ 374 if (level < block_level) 375 return; 376 377 /* Reap, and check again */ 378 sfxge_tx_qreap(txq); 379 level = txq->added - txq->reaped; 380 if (level < block_level) 381 return; 382 383 txq->blocked = 1; 384 385 /* 386 * Avoid a race with completion interrupt handling that could leave 387 * the queue blocked. 388 */ 389 mb(); 390 sfxge_tx_qreap(txq); 391 level = txq->added - txq->reaped; 392 if (level < block_level) { 393 mb(); 394 txq->blocked = 0; 395 } 396} 397 398static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) 399{ 400 bus_dmamap_t *used_map; 401 bus_dmamap_t map; 402 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; 403 unsigned int id; 404 struct sfxge_tx_mapping *stmp; 405 efx_desc_t *desc; 406 int n_dma_seg; 407 int rc; 408 int i; 409 int eop; 410 uint16_t hw_cksum_flags_prev; 411 uint16_t hw_vlan_tci_prev; 412 int n_extra_descs; 413 414 KASSERT(!txq->blocked, ("txq->blocked")); 415 416#if SFXGE_TX_PARSE_EARLY 417 /* 418 * If software TSO is used, we still need to copy packet header, 419 * even if we have already parsed it early before enqueue. 420 */ 421 if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) && 422 (txq->tso_fw_assisted == 0)) 423 prefetch_read_many(mbuf->m_data); 424#else 425 /* 426 * Prefetch packet header since we need to parse it and extract 427 * IP ID, TCP sequence number and flags. 428 */ 429 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) 430 prefetch_read_many(mbuf->m_data); 431#endif 432 433 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) { 434 rc = EINTR; 435 goto reject; 436 } 437 438 /* Load the packet for DMA. */ 439 id = txq->added & txq->ptr_mask; 440 stmp = &txq->stmp[id]; 441 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, 442 mbuf, dma_seg, &n_dma_seg, 0); 443 if (rc == EFBIG) { 444 /* Try again. */ 445 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, 446 SFXGE_TX_MAPPING_MAX_SEG); 447 if (new_mbuf == NULL) 448 goto reject; 449 ++txq->collapses; 450 mbuf = new_mbuf; 451 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, 452 stmp->map, mbuf, 453 dma_seg, &n_dma_seg, 0); 454 } 455 if (rc != 0) 456 goto reject; 457 458 /* Make the packet visible to the hardware. */ 459 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); 460 461 used_map = &stmp->map; 462 463 hw_cksum_flags_prev = txq->hw_cksum_flags; 464 hw_vlan_tci_prev = txq->hw_vlan_tci; 465 466 /* 467 * The order of option descriptors, which are used to leverage VLAN tag 468 * and checksum offloads, might be important. Changing checksum offload 469 * between VLAN option and packet descriptors probably does not work. 470 */ 471 n_extra_descs = sfxge_tx_maybe_toggle_cksum_offload(txq, mbuf, &stmp); 472 n_extra_descs += sfxge_tx_maybe_insert_tag(txq, mbuf, &stmp); 473 474 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 475 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg, 476 n_extra_descs); 477 if (rc < 0) 478 goto reject_mapped; 479 stmp = &txq->stmp[(rc - 1) & txq->ptr_mask]; 480 } else { 481 /* Add the mapping to the fragment list, and set flags 482 * for the buffer. 483 */ 484 485 i = 0; 486 for (;;) { 487 desc = &txq->pend_desc[i + n_extra_descs]; 488 eop = (i == n_dma_seg - 1); 489 efx_tx_qdesc_dma_create(txq->common, 490 dma_seg[i].ds_addr, 491 dma_seg[i].ds_len, 492 eop, 493 desc); 494 if (eop) 495 break; 496 i++; 497 sfxge_next_stmp(txq, &stmp); 498 } 499 txq->n_pend_desc = n_dma_seg + n_extra_descs; 500 } 501 502 /* 503 * If the mapping required more than one descriptor 504 * then we need to associate the DMA map with the last 505 * descriptor, not the first. 506 */ 507 if (used_map != &stmp->map) { 508 map = stmp->map; 509 stmp->map = *used_map; 510 *used_map = map; 511 } 512 513 stmp->u.mbuf = mbuf; 514 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; 515 516 /* Post the fragment list. */ 517 sfxge_tx_qlist_post(txq); 518 519 return (0); 520 521reject_mapped: 522 txq->hw_vlan_tci = hw_vlan_tci_prev; 523 txq->hw_cksum_flags = hw_cksum_flags_prev; 524 bus_dmamap_unload(txq->packet_dma_tag, *used_map); 525reject: 526 /* Drop the packet on the floor. */ 527 m_freem(mbuf); 528 ++txq->drops; 529 530 return (rc); 531} 532 533/* 534 * Drain the deferred packet list into the transmit queue. 535 */ 536static void 537sfxge_tx_qdpl_drain(struct sfxge_txq *txq) 538{ 539 struct sfxge_softc *sc; 540 struct sfxge_tx_dpl *stdp; 541 struct mbuf *mbuf, *next; 542 unsigned int count; 543 unsigned int non_tcp_count; 544 unsigned int pushed; 545 int rc; 546 547 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 548 549 sc = txq->sc; 550 stdp = &txq->dpl; 551 pushed = txq->added; 552 553 if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) { 554 prefetch_read_many(sc->enp); 555 prefetch_read_many(txq->common); 556 } 557 558 mbuf = stdp->std_get; 559 count = stdp->std_get_count; 560 non_tcp_count = stdp->std_get_non_tcp_count; 561 562 if (count > stdp->std_get_hiwat) 563 stdp->std_get_hiwat = count; 564 565 while (count != 0) { 566 KASSERT(mbuf != NULL, ("mbuf == NULL")); 567 568 next = mbuf->m_nextpkt; 569 mbuf->m_nextpkt = NULL; 570 571 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ 572 573 if (next != NULL) 574 prefetch_read_many(next); 575 576 rc = sfxge_tx_queue_mbuf(txq, mbuf); 577 --count; 578 non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); 579 mbuf = next; 580 if (rc != 0) 581 continue; 582 583 if (txq->blocked) 584 break; 585 586 /* Push the fragments to the hardware in batches. */ 587 if (txq->added - pushed >= SFXGE_TX_BATCH) { 588 efx_tx_qpush(txq->common, txq->added, pushed); 589 pushed = txq->added; 590 } 591 } 592 593 if (count == 0) { 594 KASSERT(mbuf == NULL, ("mbuf != NULL")); 595 KASSERT(non_tcp_count == 0, 596 ("inconsistent TCP/non-TCP detection")); 597 stdp->std_get = NULL; 598 stdp->std_get_count = 0; 599 stdp->std_get_non_tcp_count = 0; 600 stdp->std_getp = &stdp->std_get; 601 } else { 602 stdp->std_get = mbuf; 603 stdp->std_get_count = count; 604 stdp->std_get_non_tcp_count = non_tcp_count; 605 } 606 607 if (txq->added != pushed) 608 efx_tx_qpush(txq->common, txq->added, pushed); 609 610 KASSERT(txq->blocked || stdp->std_get_count == 0, 611 ("queue unblocked but count is non-zero")); 612} 613 614#define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0) 615 616/* 617 * Service the deferred packet list. 618 * 619 * NOTE: drops the txq mutex! 620 */ 621static void 622sfxge_tx_qdpl_service(struct sfxge_txq *txq) 623{ 624 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 625 626 do { 627 if (SFXGE_TX_QDPL_PENDING(txq)) 628 sfxge_tx_qdpl_swizzle(txq); 629 630 if (!txq->blocked) 631 sfxge_tx_qdpl_drain(txq); 632 633 SFXGE_TXQ_UNLOCK(txq); 634 } while (SFXGE_TX_QDPL_PENDING(txq) && 635 SFXGE_TXQ_TRYLOCK(txq)); 636} 637 638/* 639 * Put a packet on the deferred packet get-list. 640 */ 641static int 642sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf) 643{ 644 struct sfxge_tx_dpl *stdp; 645 646 stdp = &txq->dpl; 647 648 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 649 650 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 651 652 if (stdp->std_get_count >= stdp->std_get_max) { 653 txq->get_overflow++; 654 return (ENOBUFS); 655 } 656 if (sfxge_is_mbuf_non_tcp(mbuf)) { 657 if (stdp->std_get_non_tcp_count >= 658 stdp->std_get_non_tcp_max) { 659 txq->get_non_tcp_overflow++; 660 return (ENOBUFS); 661 } 662 stdp->std_get_non_tcp_count++; 663 } 664 665 *(stdp->std_getp) = mbuf; 666 stdp->std_getp = &mbuf->m_nextpkt; 667 stdp->std_get_count++; 668 669 return (0); 670} 671 672/* 673 * Put a packet on the deferred packet put-list. 674 * 675 * We overload the csum_data field in the mbuf to keep track of this length 676 * because there is no cheap alternative to avoid races. 677 */ 678static int 679sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf) 680{ 681 struct sfxge_tx_dpl *stdp; 682 volatile uintptr_t *putp; 683 uintptr_t old; 684 uintptr_t new; 685 unsigned int put_count; 686 687 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 688 689 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); 690 691 stdp = &txq->dpl; 692 putp = &stdp->std_put; 693 new = (uintptr_t)mbuf; 694 695 do { 696 old = *putp; 697 if (old != 0) { 698 struct mbuf *mp = (struct mbuf *)old; 699 put_count = mp->m_pkthdr.csum_data; 700 } else 701 put_count = 0; 702 if (put_count >= stdp->std_put_max) { 703 atomic_add_long(&txq->put_overflow, 1); 704 return (ENOBUFS); 705 } 706 mbuf->m_pkthdr.csum_data = put_count + 1; 707 mbuf->m_nextpkt = (void *)old; 708 } while (atomic_cmpset_ptr(putp, old, new) == 0); 709 710 return (0); 711} 712 713/* 714 * Called from if_transmit - will try to grab the txq lock and enqueue to the 715 * put list if it succeeds, otherwise try to push onto the defer list if space. 716 */ 717static int 718sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) 719{ 720 int rc; 721 722 if (!SFXGE_LINK_UP(txq->sc)) { 723 atomic_add_long(&txq->netdown_drops, 1); 724 return (ENETDOWN); 725 } 726 727 /* 728 * Try to grab the txq lock. If we are able to get the lock, 729 * the packet will be appended to the "get list" of the deferred 730 * packet list. Otherwise, it will be pushed on the "put list". 731 */ 732 if (SFXGE_TXQ_TRYLOCK(txq)) { 733 /* First swizzle put-list to get-list to keep order */ 734 sfxge_tx_qdpl_swizzle(txq); 735 736 rc = sfxge_tx_qdpl_put_locked(txq, m); 737 738 /* Try to service the list. */ 739 sfxge_tx_qdpl_service(txq); 740 /* Lock has been dropped. */ 741 } else { 742 rc = sfxge_tx_qdpl_put_unlocked(txq, m); 743 744 /* 745 * Try to grab the lock again. 746 * 747 * If we are able to get the lock, we need to process 748 * the deferred packet list. If we are not able to get 749 * the lock, another thread is processing the list. 750 */ 751 if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) { 752 sfxge_tx_qdpl_service(txq); 753 /* Lock has been dropped. */ 754 } 755 } 756 757 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); 758 759 return (rc); 760} 761 762static void 763sfxge_tx_qdpl_flush(struct sfxge_txq *txq) 764{ 765 struct sfxge_tx_dpl *stdp = &txq->dpl; 766 struct mbuf *mbuf, *next; 767 768 SFXGE_TXQ_LOCK(txq); 769 770 sfxge_tx_qdpl_swizzle(txq); 771 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { 772 next = mbuf->m_nextpkt; 773 m_freem(mbuf); 774 } 775 stdp->std_get = NULL; 776 stdp->std_get_count = 0; 777 stdp->std_get_non_tcp_count = 0; 778 stdp->std_getp = &stdp->std_get; 779 780 SFXGE_TXQ_UNLOCK(txq); 781} 782 783void 784sfxge_if_qflush(struct ifnet *ifp) 785{ 786 struct sfxge_softc *sc; 787 unsigned int i; 788 789 sc = ifp->if_softc; 790 791 for (i = 0; i < sc->txq_count; i++) 792 sfxge_tx_qdpl_flush(sc->txq[i]); 793} 794 795#if SFXGE_TX_PARSE_EARLY 796 797/* There is little space for user data in mbuf pkthdr, so we 798 * use l*hlen fields which are not used by the driver otherwise 799 * to store header offsets. 800 * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes. 801 */ 802 803 804#define TSO_MBUF_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0]) 805/* We abuse l5hlen here because PH_loc can hold only 64 bits of data */ 806#define TSO_MBUF_FLAGS(_mbuf) ((_mbuf)->m_pkthdr.l5hlen) 807#define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1]) 808#define TSO_MBUF_SEQNUM(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1]) 809 810static void sfxge_parse_tx_packet(struct mbuf *mbuf) 811{ 812 struct ether_header *eh = mtod(mbuf, struct ether_header *); 813 const struct tcphdr *th; 814 struct tcphdr th_copy; 815 816 /* Find network protocol and header */ 817 TSO_MBUF_PROTO(mbuf) = eh->ether_type; 818 if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_VLAN)) { 819 struct ether_vlan_header *veh = 820 mtod(mbuf, struct ether_vlan_header *); 821 TSO_MBUF_PROTO(mbuf) = veh->evl_proto; 822 mbuf->m_pkthdr.l2hlen = sizeof(*veh); 823 } else { 824 mbuf->m_pkthdr.l2hlen = sizeof(*eh); 825 } 826 827 /* Find TCP header */ 828 if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IP)) { 829 const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen); 830 831 KASSERT(iph->ip_p == IPPROTO_TCP, 832 ("TSO required on non-TCP packet")); 833 mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl; 834 TSO_MBUF_PACKETID(mbuf) = iph->ip_id; 835 } else { 836 KASSERT(TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IPV6), 837 ("TSO required on non-IP packet")); 838 KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt == 839 IPPROTO_TCP, 840 ("TSO required on non-TCP packet")); 841 mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr); 842 TSO_MBUF_PACKETID(mbuf) = 0; 843 } 844 845 KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen, 846 ("network header is fragmented in mbuf")); 847 848 /* We need TCP header including flags (window is the next) */ 849 if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) { 850 m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy), 851 (caddr_t)&th_copy); 852 th = &th_copy; 853 } else { 854 th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen); 855 } 856 857 mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off; 858 TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq); 859 860 /* These flags must not be duplicated */ 861 /* 862 * RST should not be duplicated as well, but FreeBSD kernel 863 * generates TSO packets with RST flag. So, do not assert 864 * its absence. 865 */ 866 KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), 867 ("incompatible TCP flag 0x%x on TSO packet", 868 th->th_flags & (TH_URG | TH_SYN))); 869 TSO_MBUF_FLAGS(mbuf) = th->th_flags; 870} 871#endif 872 873/* 874 * TX start -- called by the stack. 875 */ 876int 877sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) 878{ 879 struct sfxge_softc *sc; 880 struct sfxge_txq *txq; 881 int rc; 882 883 sc = (struct sfxge_softc *)ifp->if_softc; 884 885 /* 886 * Transmit may be called when interface is up from the kernel 887 * point of view, but not yet up (in progress) from the driver 888 * point of view. I.e. link aggregation bring up. 889 * Transmit may be called when interface is up from the driver 890 * point of view, but already down from the kernel point of 891 * view. I.e. Rx when interface shutdown is in progress. 892 */ 893 KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP), 894 ("interface not up")); 895 896 /* Pick the desired transmit queue. */ 897 if (sc->txq_dynamic_cksum_toggle_supported | 898 (m->m_pkthdr.csum_flags & 899 (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO))) { 900 int index = 0; 901 902#ifdef RSS 903 uint32_t bucket_id; 904 905 /* 906 * Select a TX queue which matches the corresponding 907 * RX queue for the hash in order to assign both 908 * TX and RX parts of the flow to the same CPU 909 */ 910 if (rss_m2bucket(m, &bucket_id) == 0) 911 index = bucket_id % (sc->txq_count - (SFXGE_TXQ_NTYPES - 1)); 912#else 913 /* check if flowid is set */ 914 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 915 uint32_t hash = m->m_pkthdr.flowid; 916 uint32_t idx = hash % nitems(sc->rx_indir_table); 917 918 index = sc->rx_indir_table[idx]; 919 } 920#endif 921#if SFXGE_TX_PARSE_EARLY 922 if (m->m_pkthdr.csum_flags & CSUM_TSO) 923 sfxge_parse_tx_packet(m); 924#endif 925 index += (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) ? 926 SFXGE_TXQ_IP_TCP_UDP_CKSUM : 0; 927 txq = sc->txq[index]; 928 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 929 txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; 930 } else { 931 txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; 932 } 933 934 rc = sfxge_tx_packet_add(txq, m); 935 if (rc != 0) 936 m_freem(m); 937 938 return (rc); 939} 940 941/* 942 * Software "TSO". Not quite as good as doing it in hardware, but 943 * still faster than segmenting in the stack. 944 */ 945 946struct sfxge_tso_state { 947 /* Output position */ 948 unsigned out_len; /* Remaining length in current segment */ 949 unsigned seqnum; /* Current sequence number */ 950 unsigned packet_space; /* Remaining space in current packet */ 951 unsigned segs_space; /* Remaining number of DMA segments 952 for the packet (FATSOv2 only) */ 953 954 /* Input position */ 955 uint64_t dma_addr; /* DMA address of current position */ 956 unsigned in_len; /* Remaining length in current mbuf */ 957 958 const struct mbuf *mbuf; /* Input mbuf (head of chain) */ 959 u_short protocol; /* Network protocol (after VLAN decap) */ 960 ssize_t nh_off; /* Offset of network header */ 961 ssize_t tcph_off; /* Offset of TCP header */ 962 unsigned header_len; /* Number of bytes of header */ 963 unsigned seg_size; /* TCP segment size */ 964 int fw_assisted; /* Use FW-assisted TSO */ 965 u_short packet_id; /* IPv4 packet ID from the original packet */ 966 uint8_t tcp_flags; /* TCP flags */ 967 efx_desc_t header_desc; /* Precomputed header descriptor for 968 * FW-assisted TSO */ 969}; 970 971#if !SFXGE_TX_PARSE_EARLY 972static const struct ip *tso_iph(const struct sfxge_tso_state *tso) 973{ 974 KASSERT(tso->protocol == htons(ETHERTYPE_IP), 975 ("tso_iph() in non-IPv4 state")); 976 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); 977} 978 979static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) 980{ 981 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 982 ("tso_ip6h() in non-IPv6 state")); 983 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); 984} 985 986static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) 987{ 988 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); 989} 990#endif 991 992 993/* Size of preallocated TSO header buffers. Larger blocks must be 994 * allocated from the heap. 995 */ 996#define TSOH_STD_SIZE 128 997 998/* At most half the descriptors in the queue at any time will refer to 999 * a TSO header buffer, since they must always be followed by a 1000 * payload descriptor referring to an mbuf. 1001 */ 1002#define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) 1003#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 1004#define TSOH_PAGE_COUNT(_txq_entries) \ 1005 howmany(TSOH_COUNT(_txq_entries), TSOH_PER_PAGE) 1006 1007static int tso_init(struct sfxge_txq *txq) 1008{ 1009 struct sfxge_softc *sc = txq->sc; 1010 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); 1011 int i, rc; 1012 1013 /* Allocate TSO header buffers */ 1014 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), 1015 M_SFXGE, M_WAITOK); 1016 1017 for (i = 0; i < tsoh_page_count; i++) { 1018 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); 1019 if (rc != 0) 1020 goto fail; 1021 } 1022 1023 return (0); 1024 1025fail: 1026 while (i-- > 0) 1027 sfxge_dma_free(&txq->tsoh_buffer[i]); 1028 free(txq->tsoh_buffer, M_SFXGE); 1029 txq->tsoh_buffer = NULL; 1030 return (rc); 1031} 1032 1033static void tso_fini(struct sfxge_txq *txq) 1034{ 1035 int i; 1036 1037 if (txq->tsoh_buffer != NULL) { 1038 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) 1039 sfxge_dma_free(&txq->tsoh_buffer[i]); 1040 free(txq->tsoh_buffer, M_SFXGE); 1041 } 1042} 1043 1044static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso, 1045 const bus_dma_segment_t *hdr_dma_seg, 1046 struct mbuf *mbuf) 1047{ 1048 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp); 1049#if !SFXGE_TX_PARSE_EARLY 1050 struct ether_header *eh = mtod(mbuf, struct ether_header *); 1051 const struct tcphdr *th; 1052 struct tcphdr th_copy; 1053#endif 1054 1055 tso->fw_assisted = txq->tso_fw_assisted; 1056 tso->mbuf = mbuf; 1057 1058 /* Find network protocol and header */ 1059#if !SFXGE_TX_PARSE_EARLY 1060 tso->protocol = eh->ether_type; 1061 if (tso->protocol == htons(ETHERTYPE_VLAN)) { 1062 struct ether_vlan_header *veh = 1063 mtod(mbuf, struct ether_vlan_header *); 1064 tso->protocol = veh->evl_proto; 1065 tso->nh_off = sizeof(*veh); 1066 } else { 1067 tso->nh_off = sizeof(*eh); 1068 } 1069#else 1070 tso->protocol = TSO_MBUF_PROTO(mbuf); 1071 tso->nh_off = mbuf->m_pkthdr.l2hlen; 1072 tso->tcph_off = mbuf->m_pkthdr.l3hlen; 1073 tso->packet_id = ntohs(TSO_MBUF_PACKETID(mbuf)); 1074#endif 1075 1076#if !SFXGE_TX_PARSE_EARLY 1077 /* Find TCP header */ 1078 if (tso->protocol == htons(ETHERTYPE_IP)) { 1079 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, 1080 ("TSO required on non-TCP packet")); 1081 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; 1082 tso->packet_id = ntohs(tso_iph(tso)->ip_id); 1083 } else { 1084 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 1085 ("TSO required on non-IP packet")); 1086 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, 1087 ("TSO required on non-TCP packet")); 1088 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); 1089 tso->packet_id = 0; 1090 } 1091#endif 1092 1093 1094 if (tso->fw_assisted && 1095 __predict_false(tso->tcph_off > 1096 encp->enc_tx_tso_tcp_header_offset_limit)) { 1097 tso->fw_assisted = 0; 1098 } 1099 1100 1101#if !SFXGE_TX_PARSE_EARLY 1102 KASSERT(mbuf->m_len >= tso->tcph_off, 1103 ("network header is fragmented in mbuf")); 1104 /* We need TCP header including flags (window is the next) */ 1105 if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) { 1106 m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy), 1107 (caddr_t)&th_copy); 1108 th = &th_copy; 1109 } else { 1110 th = tso_tcph(tso); 1111 } 1112 tso->header_len = tso->tcph_off + 4 * th->th_off; 1113#else 1114 tso->header_len = mbuf->m_pkthdr.l4hlen; 1115#endif 1116 tso->seg_size = mbuf->m_pkthdr.tso_segsz; 1117 1118#if !SFXGE_TX_PARSE_EARLY 1119 tso->seqnum = ntohl(th->th_seq); 1120 1121 /* These flags must not be duplicated */ 1122 /* 1123 * RST should not be duplicated as well, but FreeBSD kernel 1124 * generates TSO packets with RST flag. So, do not assert 1125 * its absence. 1126 */ 1127 KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), 1128 ("incompatible TCP flag 0x%x on TSO packet", 1129 th->th_flags & (TH_URG | TH_SYN))); 1130 tso->tcp_flags = th->th_flags; 1131#else 1132 tso->seqnum = TSO_MBUF_SEQNUM(mbuf); 1133 tso->tcp_flags = TSO_MBUF_FLAGS(mbuf); 1134#endif 1135 1136 tso->out_len = mbuf->m_pkthdr.len - tso->header_len; 1137 1138 if (tso->fw_assisted) { 1139 if (hdr_dma_seg->ds_len >= tso->header_len) 1140 efx_tx_qdesc_dma_create(txq->common, 1141 hdr_dma_seg->ds_addr, 1142 tso->header_len, 1143 B_FALSE, 1144 &tso->header_desc); 1145 else 1146 tso->fw_assisted = 0; 1147 } 1148} 1149 1150/* 1151 * tso_fill_packet_with_fragment - form descriptors for the current fragment 1152 * 1153 * Form descriptors for the current fragment, until we reach the end 1154 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 1155 * space. 1156 */ 1157static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, 1158 struct sfxge_tso_state *tso) 1159{ 1160 efx_desc_t *desc; 1161 int n; 1162 uint64_t dma_addr = tso->dma_addr; 1163 boolean_t eop; 1164 1165 if (tso->in_len == 0 || tso->packet_space == 0) 1166 return; 1167 1168 KASSERT(tso->in_len > 0, ("TSO input length went negative")); 1169 KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); 1170 1171 if (tso->fw_assisted & SFXGE_FATSOV2) { 1172 n = tso->in_len; 1173 tso->out_len -= n; 1174 tso->seqnum += n; 1175 tso->in_len = 0; 1176 if (n < tso->packet_space) { 1177 tso->packet_space -= n; 1178 tso->segs_space--; 1179 } else { 1180 tso->packet_space = tso->seg_size - 1181 (n - tso->packet_space) % tso->seg_size; 1182 tso->segs_space = 1183 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 - 1184 (tso->packet_space != tso->seg_size); 1185 } 1186 } else { 1187 n = min(tso->in_len, tso->packet_space); 1188 tso->packet_space -= n; 1189 tso->out_len -= n; 1190 tso->dma_addr += n; 1191 tso->in_len -= n; 1192 } 1193 1194 /* 1195 * It is OK to use binary OR below to avoid extra branching 1196 * since all conditions may always be checked. 1197 */ 1198 eop = (tso->out_len == 0) | (tso->packet_space == 0) | 1199 (tso->segs_space == 0); 1200 1201 desc = &txq->pend_desc[txq->n_pend_desc++]; 1202 efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc); 1203} 1204 1205/* Callback from bus_dmamap_load() for long TSO headers. */ 1206static void tso_map_long_header(void *dma_addr_ret, 1207 bus_dma_segment_t *segs, int nseg, 1208 int error) 1209{ 1210 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && 1211 __predict_true(nseg == 1)) ? 1212 segs->ds_addr : 0); 1213} 1214 1215/* 1216 * tso_start_new_packet - generate a new header and prepare for the new packet 1217 * 1218 * Generate a new header and prepare for the new packet. Return 0 on 1219 * success, or an error code if failed to alloc header. 1220 */ 1221static int tso_start_new_packet(struct sfxge_txq *txq, 1222 struct sfxge_tso_state *tso, 1223 unsigned int *idp) 1224{ 1225 unsigned int id = *idp; 1226 struct tcphdr *tsoh_th; 1227 unsigned ip_length; 1228 caddr_t header; 1229 uint64_t dma_addr; 1230 bus_dmamap_t map; 1231 efx_desc_t *desc; 1232 int rc; 1233 1234 if (tso->fw_assisted) { 1235 if (tso->fw_assisted & SFXGE_FATSOV2) { 1236 /* Add 2 FATSOv2 option descriptors */ 1237 desc = &txq->pend_desc[txq->n_pend_desc]; 1238 efx_tx_qdesc_tso2_create(txq->common, 1239 tso->packet_id, 1240 tso->seqnum, 1241 tso->seg_size, 1242 desc, 1243 EFX_TX_FATSOV2_OPT_NDESCS); 1244 desc += EFX_TX_FATSOV2_OPT_NDESCS; 1245 txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS; 1246 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1247 id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask; 1248 1249 tso->segs_space = 1250 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1; 1251 } else { 1252 uint8_t tcp_flags = tso->tcp_flags; 1253 1254 if (tso->out_len > tso->seg_size) 1255 tcp_flags &= ~(TH_FIN | TH_PUSH); 1256 1257 /* Add FATSOv1 option descriptor */ 1258 desc = &txq->pend_desc[txq->n_pend_desc++]; 1259 efx_tx_qdesc_tso_create(txq->common, 1260 tso->packet_id, 1261 tso->seqnum, 1262 tcp_flags, 1263 desc++); 1264 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1265 id = (id + 1) & txq->ptr_mask; 1266 1267 tso->seqnum += tso->seg_size; 1268 tso->segs_space = UINT_MAX; 1269 } 1270 1271 /* Header DMA descriptor */ 1272 *desc = tso->header_desc; 1273 txq->n_pend_desc++; 1274 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1275 id = (id + 1) & txq->ptr_mask; 1276 } else { 1277 /* Allocate a DMA-mapped header buffer. */ 1278 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { 1279 unsigned int page_index = (id / 2) / TSOH_PER_PAGE; 1280 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; 1281 1282 header = (txq->tsoh_buffer[page_index].esm_base + 1283 buf_index * TSOH_STD_SIZE); 1284 dma_addr = (txq->tsoh_buffer[page_index].esm_addr + 1285 buf_index * TSOH_STD_SIZE); 1286 map = txq->tsoh_buffer[page_index].esm_map; 1287 1288 KASSERT(txq->stmp[id].flags == 0, 1289 ("stmp flags are not 0")); 1290 } else { 1291 struct sfxge_tx_mapping *stmp = &txq->stmp[id]; 1292 1293 /* We cannot use bus_dmamem_alloc() as that may sleep */ 1294 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); 1295 if (__predict_false(!header)) 1296 return (ENOMEM); 1297 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, 1298 header, tso->header_len, 1299 tso_map_long_header, &dma_addr, 1300 BUS_DMA_NOWAIT); 1301 if (__predict_false(dma_addr == 0)) { 1302 if (rc == 0) { 1303 /* Succeeded but got >1 segment */ 1304 bus_dmamap_unload(txq->packet_dma_tag, 1305 stmp->map); 1306 rc = EINVAL; 1307 } 1308 free(header, M_SFXGE); 1309 return (rc); 1310 } 1311 map = stmp->map; 1312 1313 txq->tso_long_headers++; 1314 stmp->u.heap_buf = header; 1315 stmp->flags = TX_BUF_UNMAP; 1316 } 1317 1318 tsoh_th = (struct tcphdr *)(header + tso->tcph_off); 1319 1320 /* Copy and update the headers. */ 1321 m_copydata(tso->mbuf, 0, tso->header_len, header); 1322 1323 tsoh_th->th_seq = htonl(tso->seqnum); 1324 tso->seqnum += tso->seg_size; 1325 if (tso->out_len > tso->seg_size) { 1326 /* This packet will not finish the TSO burst. */ 1327 ip_length = tso->header_len - tso->nh_off + tso->seg_size; 1328 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); 1329 } else { 1330 /* This packet will be the last in the TSO burst. */ 1331 ip_length = tso->header_len - tso->nh_off + tso->out_len; 1332 } 1333 1334 if (tso->protocol == htons(ETHERTYPE_IP)) { 1335 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); 1336 tsoh_iph->ip_len = htons(ip_length); 1337 /* XXX We should increment ip_id, but FreeBSD doesn't 1338 * currently allocate extra IDs for multiple segments. 1339 */ 1340 } else { 1341 struct ip6_hdr *tsoh_iph = 1342 (struct ip6_hdr *)(header + tso->nh_off); 1343 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); 1344 } 1345 1346 /* Make the header visible to the hardware. */ 1347 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); 1348 1349 /* Form a descriptor for this header. */ 1350 desc = &txq->pend_desc[txq->n_pend_desc++]; 1351 efx_tx_qdesc_dma_create(txq->common, 1352 dma_addr, 1353 tso->header_len, 1354 0, 1355 desc); 1356 id = (id + 1) & txq->ptr_mask; 1357 1358 tso->segs_space = UINT_MAX; 1359 } 1360 tso->packet_space = tso->seg_size; 1361 txq->tso_packets++; 1362 *idp = id; 1363 1364 return (0); 1365} 1366 1367static int 1368sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 1369 const bus_dma_segment_t *dma_seg, int n_dma_seg, 1370 int n_extra_descs) 1371{ 1372 struct sfxge_tso_state tso; 1373 unsigned int id; 1374 unsigned skipped = 0; 1375 1376 tso_start(txq, &tso, dma_seg, mbuf); 1377 1378 while (dma_seg->ds_len + skipped <= tso.header_len) { 1379 skipped += dma_seg->ds_len; 1380 --n_dma_seg; 1381 KASSERT(n_dma_seg, ("no payload found in TSO packet")); 1382 ++dma_seg; 1383 } 1384 tso.in_len = dma_seg->ds_len - (tso.header_len - skipped); 1385 tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); 1386 1387 id = (txq->added + n_extra_descs) & txq->ptr_mask; 1388 if (__predict_false(tso_start_new_packet(txq, &tso, &id))) 1389 return (-1); 1390 1391 while (1) { 1392 tso_fill_packet_with_fragment(txq, &tso); 1393 /* Exactly one DMA descriptor is added */ 1394 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1395 id = (id + 1) & txq->ptr_mask; 1396 1397 /* Move onto the next fragment? */ 1398 if (tso.in_len == 0) { 1399 --n_dma_seg; 1400 if (n_dma_seg == 0) 1401 break; 1402 ++dma_seg; 1403 tso.in_len = dma_seg->ds_len; 1404 tso.dma_addr = dma_seg->ds_addr; 1405 } 1406 1407 /* End of packet? */ 1408 if ((tso.packet_space == 0) | (tso.segs_space == 0)) { 1409 unsigned int n_fatso_opt_desc = 1410 (tso.fw_assisted & SFXGE_FATSOV2) ? 1411 EFX_TX_FATSOV2_OPT_NDESCS : 1412 (tso.fw_assisted & SFXGE_FATSOV1) ? 1 : 0; 1413 1414 /* If the queue is now full due to tiny MSS, 1415 * or we can't create another header, discard 1416 * the remainder of the input mbuf but do not 1417 * roll back the work we have done. 1418 */ 1419 if (txq->n_pend_desc + n_fatso_opt_desc + 1420 1 /* header */ + n_dma_seg > txq->max_pkt_desc) { 1421 txq->tso_pdrop_too_many++; 1422 break; 1423 } 1424 if (__predict_false(tso_start_new_packet(txq, &tso, 1425 &id))) { 1426 txq->tso_pdrop_no_rsrc++; 1427 break; 1428 } 1429 } 1430 } 1431 1432 txq->tso_bursts++; 1433 return (id); 1434} 1435 1436static void 1437sfxge_tx_qunblock(struct sfxge_txq *txq) 1438{ 1439 struct sfxge_softc *sc; 1440 struct sfxge_evq *evq; 1441 1442 sc = txq->sc; 1443 evq = sc->evq[txq->evq_index]; 1444 1445 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 1446 1447 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) 1448 return; 1449 1450 SFXGE_TXQ_LOCK(txq); 1451 1452 if (txq->blocked) { 1453 unsigned int level; 1454 1455 level = txq->added - txq->completed; 1456 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) { 1457 /* reaped must be in sync with blocked */ 1458 sfxge_tx_qreap(txq); 1459 txq->blocked = 0; 1460 } 1461 } 1462 1463 sfxge_tx_qdpl_service(txq); 1464 /* note: lock has been dropped */ 1465} 1466 1467void 1468sfxge_tx_qflush_done(struct sfxge_txq *txq) 1469{ 1470 1471 txq->flush_state = SFXGE_FLUSH_DONE; 1472} 1473 1474static void 1475sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) 1476{ 1477 struct sfxge_txq *txq; 1478 struct sfxge_evq *evq; 1479 unsigned int count; 1480 1481 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 1482 1483 txq = sc->txq[index]; 1484 evq = sc->evq[txq->evq_index]; 1485 1486 SFXGE_EVQ_LOCK(evq); 1487 SFXGE_TXQ_LOCK(txq); 1488 1489 KASSERT(txq->init_state == SFXGE_TXQ_STARTED, 1490 ("txq->init_state != SFXGE_TXQ_STARTED")); 1491 1492 txq->init_state = SFXGE_TXQ_INITIALIZED; 1493 1494 if (txq->flush_state != SFXGE_FLUSH_DONE) { 1495 txq->flush_state = SFXGE_FLUSH_PENDING; 1496 1497 SFXGE_EVQ_UNLOCK(evq); 1498 SFXGE_TXQ_UNLOCK(txq); 1499 1500 /* Flush the transmit queue. */ 1501 if (efx_tx_qflush(txq->common) != 0) { 1502 log(LOG_ERR, "%s: Flushing Tx queue %u failed\n", 1503 device_get_nameunit(sc->dev), index); 1504 txq->flush_state = SFXGE_FLUSH_DONE; 1505 } else { 1506 count = 0; 1507 do { 1508 /* Spin for 100ms. */ 1509 DELAY(100000); 1510 if (txq->flush_state != SFXGE_FLUSH_PENDING) 1511 break; 1512 } while (++count < 20); 1513 } 1514 SFXGE_EVQ_LOCK(evq); 1515 SFXGE_TXQ_LOCK(txq); 1516 1517 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, 1518 ("txq->flush_state == SFXGE_FLUSH_FAILED")); 1519 1520 if (txq->flush_state != SFXGE_FLUSH_DONE) { 1521 /* Flush timeout */ 1522 log(LOG_ERR, "%s: Cannot flush Tx queue %u\n", 1523 device_get_nameunit(sc->dev), index); 1524 txq->flush_state = SFXGE_FLUSH_DONE; 1525 } 1526 } 1527 1528 txq->blocked = 0; 1529 txq->pending = txq->added; 1530 1531 sfxge_tx_qcomplete(txq, evq); 1532 KASSERT(txq->completed == txq->added, 1533 ("txq->completed != txq->added")); 1534 1535 sfxge_tx_qreap(txq); 1536 KASSERT(txq->reaped == txq->completed, 1537 ("txq->reaped != txq->completed")); 1538 1539 txq->added = 0; 1540 txq->pending = 0; 1541 txq->completed = 0; 1542 txq->reaped = 0; 1543 1544 /* Destroy the common code transmit queue. */ 1545 efx_tx_qdestroy(txq->common); 1546 txq->common = NULL; 1547 1548 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1549 EFX_TXQ_NBUFS(sc->txq_entries)); 1550 1551 txq->hw_cksum_flags = 0; 1552 1553 SFXGE_EVQ_UNLOCK(evq); 1554 SFXGE_TXQ_UNLOCK(txq); 1555} 1556 1557/* 1558 * Estimate maximum number of Tx descriptors required for TSO packet. 1559 * With minimum MSS and maximum mbuf length we might need more (even 1560 * than a ring-ful of descriptors), but this should not happen in 1561 * practice except due to deliberate attack. In that case we will 1562 * truncate the output at a packet boundary. 1563 */ 1564static unsigned int 1565sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type, 1566 unsigned int tso_fw_assisted) 1567{ 1568 /* One descriptor for every input fragment */ 1569 unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG; 1570 unsigned int sw_tso_max_descs; 1571 unsigned int fa_tso_v1_max_descs = 0; 1572 unsigned int fa_tso_v2_max_descs = 0; 1573 1574 /* Checksum offload Tx option descriptor may be required */ 1575 if (sc->txq_dynamic_cksum_toggle_supported) 1576 max_descs++; 1577 1578 /* VLAN tagging Tx option descriptor may be required */ 1579 if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled) 1580 max_descs++; 1581 1582 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) { 1583 /* 1584 * Plus header and payload descriptor for each output segment. 1585 * Minus one since header fragment is already counted. 1586 * Even if FATSO is used, we should be ready to fallback 1587 * to do it in the driver. 1588 */ 1589 sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1; 1590 1591 /* FW assisted TSOv1 requires one more descriptor per segment 1592 * in comparison to SW TSO */ 1593 if (tso_fw_assisted & SFXGE_FATSOV1) 1594 fa_tso_v1_max_descs = 1595 sw_tso_max_descs + SFXGE_TSO_MAX_SEGS; 1596 1597 /* FW assisted TSOv2 requires 3 (2 FATSO plus header) extra 1598 * descriptors per superframe limited by number of DMA fetches 1599 * per packet. The first packet header is already counted. 1600 */ 1601 if (tso_fw_assisted & SFXGE_FATSOV2) { 1602 fa_tso_v2_max_descs = 1603 howmany(SFXGE_TX_MAPPING_MAX_SEG, 1604 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) * 1605 (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1; 1606 } 1607 1608 max_descs += MAX(sw_tso_max_descs, 1609 MAX(fa_tso_v1_max_descs, fa_tso_v2_max_descs)); 1610 } 1611 1612 return (max_descs); 1613} 1614 1615static int 1616sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) 1617{ 1618 struct sfxge_txq *txq; 1619 efsys_mem_t *esmp; 1620 uint16_t flags; 1621 unsigned int tso_fw_assisted; 1622 unsigned int label; 1623 struct sfxge_evq *evq; 1624 unsigned int desc_index; 1625 int rc; 1626 1627 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 1628 1629 txq = sc->txq[index]; 1630 esmp = &txq->mem; 1631 evq = sc->evq[txq->evq_index]; 1632 1633 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1634 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1635 KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1636 ("evq->init_state != SFXGE_EVQ_STARTED")); 1637 1638 /* Program the buffer table. */ 1639 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, 1640 EFX_TXQ_NBUFS(sc->txq_entries))) != 0) 1641 return (rc); 1642 1643 /* Determine the kind of queue we are creating. */ 1644 tso_fw_assisted = 0; 1645 switch (txq->type) { 1646 case SFXGE_TXQ_NON_CKSUM: 1647 flags = 0; 1648 break; 1649 case SFXGE_TXQ_IP_CKSUM: 1650 flags = EFX_TXQ_CKSUM_IPV4; 1651 break; 1652 case SFXGE_TXQ_IP_TCP_UDP_CKSUM: 1653 flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; 1654 tso_fw_assisted = sc->tso_fw_assisted; 1655 if (tso_fw_assisted & SFXGE_FATSOV2) 1656 flags |= EFX_TXQ_FATSOV2; 1657 break; 1658 default: 1659 KASSERT(0, ("Impossible TX queue")); 1660 flags = 0; 1661 break; 1662 } 1663 1664 label = (sc->txq_dynamic_cksum_toggle_supported) ? 0 : txq->type; 1665 1666 /* Create the common code transmit queue. */ 1667 if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp, 1668 sc->txq_entries, txq->buf_base_id, flags, evq->common, 1669 &txq->common, &desc_index)) != 0) { 1670 /* Retry if no FATSOv2 resources, otherwise fail */ 1671 if ((rc != ENOSPC) || (~flags & EFX_TXQ_FATSOV2)) 1672 goto fail; 1673 1674 /* Looks like all FATSOv2 contexts are used */ 1675 flags &= ~EFX_TXQ_FATSOV2; 1676 tso_fw_assisted &= ~SFXGE_FATSOV2; 1677 if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp, 1678 sc->txq_entries, txq->buf_base_id, flags, evq->common, 1679 &txq->common, &desc_index)) != 0) 1680 goto fail; 1681 } 1682 1683 /* Initialise queue descriptor indexes */ 1684 txq->added = txq->pending = txq->completed = txq->reaped = desc_index; 1685 1686 SFXGE_TXQ_LOCK(txq); 1687 1688 /* Enable the transmit queue. */ 1689 efx_tx_qenable(txq->common); 1690 1691 txq->init_state = SFXGE_TXQ_STARTED; 1692 txq->flush_state = SFXGE_FLUSH_REQUIRED; 1693 txq->tso_fw_assisted = tso_fw_assisted; 1694 1695 txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type, 1696 tso_fw_assisted); 1697 1698 txq->hw_vlan_tci = 0; 1699 1700 txq->hw_cksum_flags = flags & 1701 (EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP); 1702 1703 SFXGE_TXQ_UNLOCK(txq); 1704 1705 return (0); 1706 1707fail: 1708 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1709 EFX_TXQ_NBUFS(sc->txq_entries)); 1710 return (rc); 1711} 1712 1713void 1714sfxge_tx_stop(struct sfxge_softc *sc) 1715{ 1716 int index; 1717 1718 index = sc->txq_count; 1719 while (--index >= 0) 1720 sfxge_tx_qstop(sc, index); 1721 1722 /* Tear down the transmit module */ 1723 efx_tx_fini(sc->enp); 1724} 1725 1726int 1727sfxge_tx_start(struct sfxge_softc *sc) 1728{ 1729 int index; 1730 int rc; 1731 1732 /* Initialize the common code transmit module. */ 1733 if ((rc = efx_tx_init(sc->enp)) != 0) 1734 return (rc); 1735 1736 for (index = 0; index < sc->txq_count; index++) { 1737 if ((rc = sfxge_tx_qstart(sc, index)) != 0) 1738 goto fail; 1739 } 1740 1741 return (0); 1742 1743fail: 1744 while (--index >= 0) 1745 sfxge_tx_qstop(sc, index); 1746 1747 efx_tx_fini(sc->enp); 1748 1749 return (rc); 1750} 1751 1752static int 1753sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node) 1754{ 1755 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev); 1756 struct sysctl_oid *stat_node; 1757 unsigned int id; 1758 1759 stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, 1760 "stats", CTLFLAG_RD, NULL, 1761 "Tx queue statistics"); 1762 if (stat_node == NULL) 1763 return (ENOMEM); 1764 1765 for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1766 SYSCTL_ADD_ULONG( 1767 ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO, 1768 sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS, 1769 (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset), 1770 ""); 1771 } 1772 1773 return (0); 1774} 1775 1776/** 1777 * Destroy a transmit queue. 1778 */ 1779static void 1780sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) 1781{ 1782 struct sfxge_txq *txq; 1783 unsigned int nmaps; 1784 1785 txq = sc->txq[index]; 1786 1787 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1788 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1789 1790 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) 1791 tso_fini(txq); 1792 1793 /* Free the context arrays. */ 1794 free(txq->pend_desc, M_SFXGE); 1795 nmaps = sc->txq_entries; 1796 while (nmaps-- != 0) 1797 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1798 free(txq->stmp, M_SFXGE); 1799 1800 /* Release DMA memory mapping. */ 1801 sfxge_dma_free(&txq->mem); 1802 1803 sc->txq[index] = NULL; 1804 1805 SFXGE_TXQ_LOCK_DESTROY(txq); 1806 1807 free(txq, M_SFXGE); 1808} 1809 1810static int 1811sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, 1812 enum sfxge_txq_type type, unsigned int evq_index) 1813{ 1814 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); 1815 char name[16]; 1816 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1817 struct sysctl_oid *txq_node; 1818 struct sfxge_txq *txq; 1819 struct sfxge_evq *evq; 1820 struct sfxge_tx_dpl *stdp; 1821 struct sysctl_oid *dpl_node; 1822 efsys_mem_t *esmp; 1823 unsigned int nmaps; 1824 int rc; 1825 1826 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); 1827 txq->sc = sc; 1828 txq->entries = sc->txq_entries; 1829 txq->ptr_mask = txq->entries - 1; 1830 1831 sc->txq[txq_index] = txq; 1832 esmp = &txq->mem; 1833 1834 evq = sc->evq[evq_index]; 1835 1836 /* Allocate and zero DMA space for the descriptor ring. */ 1837 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) 1838 return (rc); 1839 1840 /* Allocate buffer table entries. */ 1841 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), 1842 &txq->buf_base_id); 1843 1844 /* Create a DMA tag for packet mappings. */ 1845 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 1846 encp->enc_tx_dma_desc_boundary, 1847 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, 1848 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 1849 encp->enc_tx_dma_desc_size_max, 0, NULL, NULL, 1850 &txq->packet_dma_tag) != 0) { 1851 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); 1852 rc = ENOMEM; 1853 goto fail; 1854 } 1855 1856 /* Allocate pending descriptor array for batching writes. */ 1857 txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries, 1858 M_SFXGE, M_ZERO | M_WAITOK); 1859 1860 /* Allocate and initialise mbuf DMA mapping array. */ 1861 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, 1862 M_SFXGE, M_ZERO | M_WAITOK); 1863 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { 1864 rc = bus_dmamap_create(txq->packet_dma_tag, 0, 1865 &txq->stmp[nmaps].map); 1866 if (rc != 0) 1867 goto fail2; 1868 } 1869 1870 snprintf(name, sizeof(name), "%u", txq_index); 1871 txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node), 1872 OID_AUTO, name, CTLFLAG_RD, NULL, ""); 1873 if (txq_node == NULL) { 1874 rc = ENOMEM; 1875 goto fail_txq_node; 1876 } 1877 1878 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && 1879 (rc = tso_init(txq)) != 0) 1880 goto fail3; 1881 1882 /* Initialize the deferred packet list. */ 1883 stdp = &txq->dpl; 1884 stdp->std_put_max = sfxge_tx_dpl_put_max; 1885 stdp->std_get_max = sfxge_tx_dpl_get_max; 1886 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; 1887 stdp->std_getp = &stdp->std_get; 1888 1889 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); 1890 1891 dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, 1892 "dpl", CTLFLAG_RD, NULL, 1893 "Deferred packet list statistics"); 1894 if (dpl_node == NULL) { 1895 rc = ENOMEM; 1896 goto fail_dpl_node; 1897 } 1898 1899 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 1900 "get_count", CTLFLAG_RD | CTLFLAG_STATS, 1901 &stdp->std_get_count, 0, ""); 1902 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 1903 "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, 1904 &stdp->std_get_non_tcp_count, 0, ""); 1905 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 1906 "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 1907 &stdp->std_get_hiwat, 0, ""); 1908 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 1909 "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 1910 &stdp->std_put_hiwat, 0, ""); 1911 1912 rc = sfxge_txq_stat_init(txq, txq_node); 1913 if (rc != 0) 1914 goto fail_txq_stat_init; 1915 1916 txq->type = type; 1917 txq->evq_index = evq_index; 1918 txq->init_state = SFXGE_TXQ_INITIALIZED; 1919 1920 return (0); 1921 1922fail_txq_stat_init: 1923fail_dpl_node: 1924fail3: 1925fail_txq_node: 1926 free(txq->pend_desc, M_SFXGE); 1927fail2: 1928 while (nmaps-- != 0) 1929 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1930 free(txq->stmp, M_SFXGE); 1931 bus_dma_tag_destroy(txq->packet_dma_tag); 1932 1933fail: 1934 sfxge_dma_free(esmp); 1935 1936 return (rc); 1937} 1938 1939static int 1940sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) 1941{ 1942 struct sfxge_softc *sc = arg1; 1943 unsigned int id = arg2; 1944 unsigned long sum; 1945 unsigned int index; 1946 1947 /* Sum across all TX queues */ 1948 sum = 0; 1949 for (index = 0; index < sc->txq_count; index++) 1950 sum += *(unsigned long *)((caddr_t)sc->txq[index] + 1951 sfxge_tx_stats[id].offset); 1952 1953 return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1954} 1955 1956static void 1957sfxge_tx_stat_init(struct sfxge_softc *sc) 1958{ 1959 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1960 struct sysctl_oid_list *stat_list; 1961 unsigned int id; 1962 1963 stat_list = SYSCTL_CHILDREN(sc->stats_node); 1964 1965 for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1966 SYSCTL_ADD_PROC( 1967 ctx, stat_list, 1968 OID_AUTO, sfxge_tx_stats[id].name, 1969 CTLTYPE_ULONG|CTLFLAG_RD, 1970 sc, id, sfxge_tx_stat_handler, "LU", 1971 ""); 1972 } 1973} 1974 1975uint64_t 1976sfxge_tx_get_drops(struct sfxge_softc *sc) 1977{ 1978 unsigned int index; 1979 uint64_t drops = 0; 1980 struct sfxge_txq *txq; 1981 1982 /* Sum across all TX queues */ 1983 for (index = 0; index < sc->txq_count; index++) { 1984 txq = sc->txq[index]; 1985 /* 1986 * In theory, txq->put_overflow and txq->netdown_drops 1987 * should use atomic operation and other should be 1988 * obtained under txq lock, but it is just statistics. 1989 */ 1990 drops += txq->drops + txq->get_overflow + 1991 txq->get_non_tcp_overflow + 1992 txq->put_overflow + txq->netdown_drops + 1993 txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc; 1994 } 1995 return (drops); 1996} 1997 1998void 1999sfxge_tx_fini(struct sfxge_softc *sc) 2000{ 2001 int index; 2002 2003 index = sc->txq_count; 2004 while (--index >= 0) 2005 sfxge_tx_qfini(sc, index); 2006 2007 sc->txq_count = 0; 2008} 2009 2010 2011int 2012sfxge_tx_init(struct sfxge_softc *sc) 2013{ 2014 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); 2015 struct sfxge_intr *intr; 2016 int index; 2017 int rc; 2018 2019 intr = &sc->intr; 2020 2021 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 2022 ("intr->state != SFXGE_INTR_INITIALIZED")); 2023 2024 if (sfxge_tx_dpl_get_max <= 0) { 2025 log(LOG_ERR, "%s=%d must be greater than 0", 2026 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); 2027 rc = EINVAL; 2028 goto fail_tx_dpl_get_max; 2029 } 2030 if (sfxge_tx_dpl_get_non_tcp_max <= 0) { 2031 log(LOG_ERR, "%s=%d must be greater than 0", 2032 SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, 2033 sfxge_tx_dpl_get_non_tcp_max); 2034 rc = EINVAL; 2035 goto fail_tx_dpl_get_non_tcp_max; 2036 } 2037 if (sfxge_tx_dpl_put_max < 0) { 2038 log(LOG_ERR, "%s=%d must be greater or equal to 0", 2039 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); 2040 rc = EINVAL; 2041 goto fail_tx_dpl_put_max; 2042 } 2043 2044 sc->txq_count = SFXGE_EVQ0_N_TXQ(sc) - 1 + sc->intr.n_alloc; 2045 2046 sc->tso_fw_assisted = sfxge_tso_fw_assisted; 2047 if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) || 2048 (!encp->enc_fw_assisted_tso_enabled)) 2049 sc->tso_fw_assisted &= ~SFXGE_FATSOV1; 2050 if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) || 2051 (!encp->enc_fw_assisted_tso_v2_enabled)) 2052 sc->tso_fw_assisted &= ~SFXGE_FATSOV2; 2053 2054 sc->txqs_node = SYSCTL_ADD_NODE( 2055 device_get_sysctl_ctx(sc->dev), 2056 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 2057 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); 2058 if (sc->txqs_node == NULL) { 2059 rc = ENOMEM; 2060 goto fail_txq_node; 2061 } 2062 2063 /* Initialize the transmit queues */ 2064 if (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) { 2065 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, 2066 SFXGE_TXQ_NON_CKSUM, 0)) != 0) 2067 goto fail; 2068 2069 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, 2070 SFXGE_TXQ_IP_CKSUM, 0)) != 0) 2071 goto fail2; 2072 } 2073 2074 for (index = 0; 2075 index < sc->txq_count - SFXGE_EVQ0_N_TXQ(sc) + 1; 2076 index++) { 2077 if ((rc = sfxge_tx_qinit(sc, SFXGE_EVQ0_N_TXQ(sc) - 1 + index, 2078 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) 2079 goto fail3; 2080 } 2081 2082 sfxge_tx_stat_init(sc); 2083 2084 return (0); 2085 2086fail3: 2087 while (--index >= 0) 2088 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 2089 2090 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 2091 2092fail2: 2093 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 2094 2095fail: 2096fail_txq_node: 2097 sc->txq_count = 0; 2098fail_tx_dpl_put_max: 2099fail_tx_dpl_get_non_tcp_max: 2100fail_tx_dpl_get_max: 2101 return (rc); 2102} 2103