1/*- 2 * Copyright (c) 2010-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * The views and conclusions contained in the software and documentation are 30 * those of the authors and should not be interpreted as representing official 31 * policies, either expressed or implied, of the FreeBSD Project. 32 */ 33 34/* Theory of operation: 35 * 36 * Tx queues allocation and mapping on Siena 37 * 38 * One Tx queue with enabled checksum offload is allocated per Rx channel 39 * (event queue). Also 2 Tx queues (one without checksum offload and one 40 * with IP checksum offload only) are allocated and bound to event queue 0. 41 * sfxge_txq_type is used as Tx queue label. 42 * 43 * So, event queue plus label mapping to Tx queue index is: 44 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) 45 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 46 * See sfxge_get_txq_by_label() sfxge_ev.c 47 * 48 * Tx queue allocation and mapping on EF10 49 * 50 * One Tx queue with enabled checksum offload is allocated per Rx 51 * channel (event queue). Checksum offload on all Tx queues is enabled or 52 * disabled dynamically by inserting option descriptors, so the additional 53 * queues used on Siena are not required. 54 * 55 * TxQ label is always set to zero on EF10 hardware. 56 * So, event queue to Tx queue mapping is simple: 57 * TxQ-index = EvQ-index 58 */ 59 60#include <sys/cdefs.h> 61__FBSDID("$FreeBSD: stable/10/sys/dev/sfxge/sfxge_tx.c 342529 2018-12-26 10:39:34Z arybchik $"); 62 63#include <sys/types.h> 64#include <sys/mbuf.h> 65#include <sys/smp.h> 66#include <sys/socket.h> 67#include <sys/sysctl.h> 68#include <sys/syslog.h> 69#include <sys/limits.h> 70 71#include <net/bpf.h> 72#include <net/ethernet.h> 73#include <net/if.h> 74#include <net/if_vlan_var.h> 75 76#include <netinet/in.h> 77#include <netinet/ip.h> 78#include <netinet/ip6.h> 79#include <netinet/tcp.h> 80 81#include "common/efx.h" 82 83#include "sfxge.h" 84#include "sfxge_tx.h" 85 86 87#define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) 88static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; 89TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); 90SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, 91 &sfxge_tx_dpl_get_max, 0, 92 "Maximum number of any packets in deferred packet get-list"); 93 94#define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ 95 SFXGE_PARAM(tx_dpl_get_non_tcp_max) 96static int sfxge_tx_dpl_get_non_tcp_max = 97 SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; 98TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); 99SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, 100 &sfxge_tx_dpl_get_non_tcp_max, 0, 101 "Maximum number of non-TCP packets in deferred packet get-list"); 102 103#define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) 104static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; 105TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); 106SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, 107 &sfxge_tx_dpl_put_max, 0, 108 "Maximum number of any packets in deferred packet put-list"); 109 110#define SFXGE_PARAM_TSO_FW_ASSISTED SFXGE_PARAM(tso_fw_assisted) 111static int sfxge_tso_fw_assisted = (SFXGE_FATSOV1 | SFXGE_FATSOV2); 112TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted); 113SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN, 114 &sfxge_tso_fw_assisted, 0, 115 "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware"); 116 117 118static const struct { 119 const char *name; 120 size_t offset; 121} sfxge_tx_stats[] = { 122#define SFXGE_TX_STAT(name, member) \ 123 { #name, offsetof(struct sfxge_txq, member) } 124 SFXGE_TX_STAT(tso_bursts, tso_bursts), 125 SFXGE_TX_STAT(tso_packets, tso_packets), 126 SFXGE_TX_STAT(tso_long_headers, tso_long_headers), 127 SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many), 128 SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc), 129 SFXGE_TX_STAT(tx_collapses, collapses), 130 SFXGE_TX_STAT(tx_drops, drops), 131 SFXGE_TX_STAT(tx_get_overflow, get_overflow), 132 SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), 133 SFXGE_TX_STAT(tx_put_overflow, put_overflow), 134 SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), 135}; 136 137 138/* Forward declarations. */ 139static void sfxge_tx_qdpl_service(struct sfxge_txq *txq); 140static void sfxge_tx_qlist_post(struct sfxge_txq *txq); 141static void sfxge_tx_qunblock(struct sfxge_txq *txq); 142static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 143 const bus_dma_segment_t *dma_seg, int n_dma_seg, 144 int n_extra_descs); 145 146static inline void 147sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp) 148{ 149 KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0")); 150 if (__predict_false(*pstmp == 151 &txq->stmp[txq->ptr_mask])) 152 *pstmp = &txq->stmp[0]; 153 else 154 (*pstmp)++; 155} 156 157static int 158sfxge_tx_maybe_toggle_cksum_offload(struct sfxge_txq *txq, struct mbuf *mbuf, 159 struct sfxge_tx_mapping **pstmp) 160{ 161 uint16_t new_hw_cksum_flags; 162 efx_desc_t *desc; 163 164 if (mbuf->m_pkthdr.csum_flags & 165 (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6 | CSUM_TSO)) { 166 /* 167 * We always set EFX_TXQ_CKSUM_IPV4 here because this 168 * configuration is the most useful, and this won't 169 * cause any trouble in case of IPv6 traffic anyway. 170 */ 171 new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; 172 } else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 173 new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4; 174 } else { 175 new_hw_cksum_flags = 0; 176 } 177 178 if (new_hw_cksum_flags == txq->hw_cksum_flags) 179 return (0); 180 181 desc = &txq->pend_desc[txq->n_pend_desc]; 182 efx_tx_qdesc_checksum_create(txq->common, new_hw_cksum_flags, desc); 183 txq->hw_cksum_flags = new_hw_cksum_flags; 184 txq->n_pend_desc++; 185 186 sfxge_next_stmp(txq, pstmp); 187 188 return (1); 189} 190 191static int 192sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf, 193 struct sfxge_tx_mapping **pstmp) 194{ 195 uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ? 196 mbuf->m_pkthdr.ether_vtag : 197 0); 198 efx_desc_t *desc; 199 200 if (this_tag == txq->hw_vlan_tci) 201 return (0); 202 203 desc = &txq->pend_desc[txq->n_pend_desc]; 204 efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), desc); 205 txq->hw_vlan_tci = this_tag; 206 txq->n_pend_desc++; 207 208 sfxge_next_stmp(txq, pstmp); 209 210 return (1); 211} 212 213void 214sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) 215{ 216 unsigned int completed; 217 218 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 219 220 completed = txq->completed; 221 while (completed != txq->pending) { 222 struct sfxge_tx_mapping *stmp; 223 unsigned int id; 224 225 id = completed++ & txq->ptr_mask; 226 227 stmp = &txq->stmp[id]; 228 if (stmp->flags & TX_BUF_UNMAP) { 229 bus_dmamap_unload(txq->packet_dma_tag, stmp->map); 230 if (stmp->flags & TX_BUF_MBUF) { 231 struct mbuf *m = stmp->u.mbuf; 232 do 233 m = m_free(m); 234 while (m != NULL); 235 } else { 236 free(stmp->u.heap_buf, M_SFXGE); 237 } 238 stmp->flags = 0; 239 } 240 } 241 txq->completed = completed; 242 243 /* Check whether we need to unblock the queue. */ 244 mb(); 245 if (txq->blocked) { 246 unsigned int level; 247 248 level = txq->added - txq->completed; 249 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 250 sfxge_tx_qunblock(txq); 251 } 252} 253 254static unsigned int 255sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) 256{ 257 /* Absense of TCP checksum flags does not mean that it is non-TCP 258 * but it should be true if user wants to achieve high throughput. 259 */ 260 return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); 261} 262 263/* 264 * Reorder the put list and append it to the get list. 265 */ 266static void 267sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) 268{ 269 struct sfxge_tx_dpl *stdp; 270 struct mbuf *mbuf, *get_next, **get_tailp; 271 volatile uintptr_t *putp; 272 uintptr_t put; 273 unsigned int count; 274 unsigned int non_tcp_count; 275 276 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 277 278 stdp = &txq->dpl; 279 280 /* Acquire the put list. */ 281 putp = &stdp->std_put; 282 put = atomic_readandclear_ptr(putp); 283 mbuf = (void *)put; 284 285 if (mbuf == NULL) 286 return; 287 288 /* Reverse the put list. */ 289 get_tailp = &mbuf->m_nextpkt; 290 get_next = NULL; 291 292 count = 0; 293 non_tcp_count = 0; 294 do { 295 struct mbuf *put_next; 296 297 non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); 298 put_next = mbuf->m_nextpkt; 299 mbuf->m_nextpkt = get_next; 300 get_next = mbuf; 301 mbuf = put_next; 302 303 count++; 304 } while (mbuf != NULL); 305 306 if (count > stdp->std_put_hiwat) 307 stdp->std_put_hiwat = count; 308 309 /* Append the reversed put list to the get list. */ 310 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); 311 *stdp->std_getp = get_next; 312 stdp->std_getp = get_tailp; 313 stdp->std_get_count += count; 314 stdp->std_get_non_tcp_count += non_tcp_count; 315} 316 317static void 318sfxge_tx_qreap(struct sfxge_txq *txq) 319{ 320 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 321 322 txq->reaped = txq->completed; 323} 324 325static void 326sfxge_tx_qlist_post(struct sfxge_txq *txq) 327{ 328 unsigned int old_added; 329 unsigned int block_level; 330 unsigned int level; 331 int rc; 332 333 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 334 335 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); 336 KASSERT(txq->n_pend_desc <= txq->max_pkt_desc, 337 ("txq->n_pend_desc too large")); 338 KASSERT(!txq->blocked, ("txq->blocked")); 339 340 old_added = txq->added; 341 342 /* Post the fragment list. */ 343 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc, 344 txq->reaped, &txq->added); 345 KASSERT(rc == 0, ("efx_tx_qdesc_post() failed")); 346 347 /* If efx_tx_qdesc_post() had to refragment, our information about 348 * buffers to free may be associated with the wrong 349 * descriptors. 350 */ 351 KASSERT(txq->added - old_added == txq->n_pend_desc, 352 ("efx_tx_qdesc_post() refragmented descriptors")); 353 354 level = txq->added - txq->reaped; 355 KASSERT(level <= txq->entries, ("overfilled TX queue")); 356 357 /* Clear the fragment list. */ 358 txq->n_pend_desc = 0; 359 360 /* 361 * Set the block level to ensure there is space to generate a 362 * large number of descriptors for TSO. 363 */ 364 block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc; 365 366 /* Have we reached the block level? */ 367 if (level < block_level) 368 return; 369 370 /* Reap, and check again */ 371 sfxge_tx_qreap(txq); 372 level = txq->added - txq->reaped; 373 if (level < block_level) 374 return; 375 376 txq->blocked = 1; 377 378 /* 379 * Avoid a race with completion interrupt handling that could leave 380 * the queue blocked. 381 */ 382 mb(); 383 sfxge_tx_qreap(txq); 384 level = txq->added - txq->reaped; 385 if (level < block_level) { 386 mb(); 387 txq->blocked = 0; 388 } 389} 390 391static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) 392{ 393 bus_dmamap_t *used_map; 394 bus_dmamap_t map; 395 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; 396 unsigned int id; 397 struct sfxge_tx_mapping *stmp; 398 efx_desc_t *desc; 399 int n_dma_seg; 400 int rc; 401 int i; 402 int eop; 403 uint16_t hw_cksum_flags_prev; 404 uint16_t hw_vlan_tci_prev; 405 int n_extra_descs; 406 407 KASSERT(!txq->blocked, ("txq->blocked")); 408 409#if SFXGE_TX_PARSE_EARLY 410 /* 411 * If software TSO is used, we still need to copy packet header, 412 * even if we have already parsed it early before enqueue. 413 */ 414 if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) && 415 (txq->tso_fw_assisted == 0)) 416 prefetch_read_many(mbuf->m_data); 417#else 418 /* 419 * Prefetch packet header since we need to parse it and extract 420 * IP ID, TCP sequence number and flags. 421 */ 422 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) 423 prefetch_read_many(mbuf->m_data); 424#endif 425 426 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) { 427 rc = EINTR; 428 goto reject; 429 } 430 431 /* Load the packet for DMA. */ 432 id = txq->added & txq->ptr_mask; 433 stmp = &txq->stmp[id]; 434 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, 435 mbuf, dma_seg, &n_dma_seg, 0); 436 if (rc == EFBIG) { 437 /* Try again. */ 438 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, 439 SFXGE_TX_MAPPING_MAX_SEG); 440 if (new_mbuf == NULL) 441 goto reject; 442 ++txq->collapses; 443 mbuf = new_mbuf; 444 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, 445 stmp->map, mbuf, 446 dma_seg, &n_dma_seg, 0); 447 } 448 if (rc != 0) 449 goto reject; 450 451 /* Make the packet visible to the hardware. */ 452 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); 453 454 used_map = &stmp->map; 455 456 hw_cksum_flags_prev = txq->hw_cksum_flags; 457 hw_vlan_tci_prev = txq->hw_vlan_tci; 458 459 /* 460 * The order of option descriptors, which are used to leverage VLAN tag 461 * and checksum offloads, might be important. Changing checksum offload 462 * between VLAN option and packet descriptors probably does not work. 463 */ 464 n_extra_descs = sfxge_tx_maybe_toggle_cksum_offload(txq, mbuf, &stmp); 465 n_extra_descs += sfxge_tx_maybe_insert_tag(txq, mbuf, &stmp); 466 467 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 468 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg, 469 n_extra_descs); 470 if (rc < 0) 471 goto reject_mapped; 472 stmp = &txq->stmp[(rc - 1) & txq->ptr_mask]; 473 } else { 474 /* Add the mapping to the fragment list, and set flags 475 * for the buffer. 476 */ 477 478 i = 0; 479 for (;;) { 480 desc = &txq->pend_desc[i + n_extra_descs]; 481 eop = (i == n_dma_seg - 1); 482 efx_tx_qdesc_dma_create(txq->common, 483 dma_seg[i].ds_addr, 484 dma_seg[i].ds_len, 485 eop, 486 desc); 487 if (eop) 488 break; 489 i++; 490 sfxge_next_stmp(txq, &stmp); 491 } 492 txq->n_pend_desc = n_dma_seg + n_extra_descs; 493 } 494 495 /* 496 * If the mapping required more than one descriptor 497 * then we need to associate the DMA map with the last 498 * descriptor, not the first. 499 */ 500 if (used_map != &stmp->map) { 501 map = stmp->map; 502 stmp->map = *used_map; 503 *used_map = map; 504 } 505 506 stmp->u.mbuf = mbuf; 507 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; 508 509 /* Post the fragment list. */ 510 sfxge_tx_qlist_post(txq); 511 512 return (0); 513 514reject_mapped: 515 txq->hw_vlan_tci = hw_vlan_tci_prev; 516 txq->hw_cksum_flags = hw_cksum_flags_prev; 517 bus_dmamap_unload(txq->packet_dma_tag, *used_map); 518reject: 519 /* Drop the packet on the floor. */ 520 m_freem(mbuf); 521 ++txq->drops; 522 523 return (rc); 524} 525 526/* 527 * Drain the deferred packet list into the transmit queue. 528 */ 529static void 530sfxge_tx_qdpl_drain(struct sfxge_txq *txq) 531{ 532 struct sfxge_softc *sc; 533 struct sfxge_tx_dpl *stdp; 534 struct mbuf *mbuf, *next; 535 unsigned int count; 536 unsigned int non_tcp_count; 537 unsigned int pushed; 538 int rc; 539 540 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 541 542 sc = txq->sc; 543 stdp = &txq->dpl; 544 pushed = txq->added; 545 546 if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) { 547 prefetch_read_many(sc->enp); 548 prefetch_read_many(txq->common); 549 } 550 551 mbuf = stdp->std_get; 552 count = stdp->std_get_count; 553 non_tcp_count = stdp->std_get_non_tcp_count; 554 555 if (count > stdp->std_get_hiwat) 556 stdp->std_get_hiwat = count; 557 558 while (count != 0) { 559 KASSERT(mbuf != NULL, ("mbuf == NULL")); 560 561 next = mbuf->m_nextpkt; 562 mbuf->m_nextpkt = NULL; 563 564 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ 565 566 if (next != NULL) 567 prefetch_read_many(next); 568 569 rc = sfxge_tx_queue_mbuf(txq, mbuf); 570 --count; 571 non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); 572 mbuf = next; 573 if (rc != 0) 574 continue; 575 576 if (txq->blocked) 577 break; 578 579 /* Push the fragments to the hardware in batches. */ 580 if (txq->added - pushed >= SFXGE_TX_BATCH) { 581 efx_tx_qpush(txq->common, txq->added, pushed); 582 pushed = txq->added; 583 } 584 } 585 586 if (count == 0) { 587 KASSERT(mbuf == NULL, ("mbuf != NULL")); 588 KASSERT(non_tcp_count == 0, 589 ("inconsistent TCP/non-TCP detection")); 590 stdp->std_get = NULL; 591 stdp->std_get_count = 0; 592 stdp->std_get_non_tcp_count = 0; 593 stdp->std_getp = &stdp->std_get; 594 } else { 595 stdp->std_get = mbuf; 596 stdp->std_get_count = count; 597 stdp->std_get_non_tcp_count = non_tcp_count; 598 } 599 600 if (txq->added != pushed) 601 efx_tx_qpush(txq->common, txq->added, pushed); 602 603 KASSERT(txq->blocked || stdp->std_get_count == 0, 604 ("queue unblocked but count is non-zero")); 605} 606 607#define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0) 608 609/* 610 * Service the deferred packet list. 611 * 612 * NOTE: drops the txq mutex! 613 */ 614static void 615sfxge_tx_qdpl_service(struct sfxge_txq *txq) 616{ 617 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 618 619 do { 620 if (SFXGE_TX_QDPL_PENDING(txq)) 621 sfxge_tx_qdpl_swizzle(txq); 622 623 if (!txq->blocked) 624 sfxge_tx_qdpl_drain(txq); 625 626 SFXGE_TXQ_UNLOCK(txq); 627 } while (SFXGE_TX_QDPL_PENDING(txq) && 628 SFXGE_TXQ_TRYLOCK(txq)); 629} 630 631/* 632 * Put a packet on the deferred packet get-list. 633 */ 634static int 635sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf) 636{ 637 struct sfxge_tx_dpl *stdp; 638 639 stdp = &txq->dpl; 640 641 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 642 643 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); 644 645 if (stdp->std_get_count >= stdp->std_get_max) { 646 txq->get_overflow++; 647 return (ENOBUFS); 648 } 649 if (sfxge_is_mbuf_non_tcp(mbuf)) { 650 if (stdp->std_get_non_tcp_count >= 651 stdp->std_get_non_tcp_max) { 652 txq->get_non_tcp_overflow++; 653 return (ENOBUFS); 654 } 655 stdp->std_get_non_tcp_count++; 656 } 657 658 *(stdp->std_getp) = mbuf; 659 stdp->std_getp = &mbuf->m_nextpkt; 660 stdp->std_get_count++; 661 662 return (0); 663} 664 665/* 666 * Put a packet on the deferred packet put-list. 667 * 668 * We overload the csum_data field in the mbuf to keep track of this length 669 * because there is no cheap alternative to avoid races. 670 */ 671static int 672sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf) 673{ 674 struct sfxge_tx_dpl *stdp; 675 volatile uintptr_t *putp; 676 uintptr_t old; 677 uintptr_t new; 678 unsigned int put_count; 679 680 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 681 682 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); 683 684 stdp = &txq->dpl; 685 putp = &stdp->std_put; 686 new = (uintptr_t)mbuf; 687 688 do { 689 old = *putp; 690 if (old != 0) { 691 struct mbuf *mp = (struct mbuf *)old; 692 put_count = mp->m_pkthdr.csum_data; 693 } else 694 put_count = 0; 695 if (put_count >= stdp->std_put_max) { 696 atomic_add_long(&txq->put_overflow, 1); 697 return (ENOBUFS); 698 } 699 mbuf->m_pkthdr.csum_data = put_count + 1; 700 mbuf->m_nextpkt = (void *)old; 701 } while (atomic_cmpset_ptr(putp, old, new) == 0); 702 703 return (0); 704} 705 706/* 707 * Called from if_transmit - will try to grab the txq lock and enqueue to the 708 * put list if it succeeds, otherwise try to push onto the defer list if space. 709 */ 710static int 711sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) 712{ 713 int rc; 714 715 if (!SFXGE_LINK_UP(txq->sc)) { 716 atomic_add_long(&txq->netdown_drops, 1); 717 return (ENETDOWN); 718 } 719 720 /* 721 * Try to grab the txq lock. If we are able to get the lock, 722 * the packet will be appended to the "get list" of the deferred 723 * packet list. Otherwise, it will be pushed on the "put list". 724 */ 725 if (SFXGE_TXQ_TRYLOCK(txq)) { 726 /* First swizzle put-list to get-list to keep order */ 727 sfxge_tx_qdpl_swizzle(txq); 728 729 rc = sfxge_tx_qdpl_put_locked(txq, m); 730 731 /* Try to service the list. */ 732 sfxge_tx_qdpl_service(txq); 733 /* Lock has been dropped. */ 734 } else { 735 rc = sfxge_tx_qdpl_put_unlocked(txq, m); 736 737 /* 738 * Try to grab the lock again. 739 * 740 * If we are able to get the lock, we need to process 741 * the deferred packet list. If we are not able to get 742 * the lock, another thread is processing the list. 743 */ 744 if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) { 745 sfxge_tx_qdpl_service(txq); 746 /* Lock has been dropped. */ 747 } 748 } 749 750 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); 751 752 return (rc); 753} 754 755static void 756sfxge_tx_qdpl_flush(struct sfxge_txq *txq) 757{ 758 struct sfxge_tx_dpl *stdp = &txq->dpl; 759 struct mbuf *mbuf, *next; 760 761 SFXGE_TXQ_LOCK(txq); 762 763 sfxge_tx_qdpl_swizzle(txq); 764 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { 765 next = mbuf->m_nextpkt; 766 m_freem(mbuf); 767 } 768 stdp->std_get = NULL; 769 stdp->std_get_count = 0; 770 stdp->std_get_non_tcp_count = 0; 771 stdp->std_getp = &stdp->std_get; 772 773 SFXGE_TXQ_UNLOCK(txq); 774} 775 776void 777sfxge_if_qflush(struct ifnet *ifp) 778{ 779 struct sfxge_softc *sc; 780 unsigned int i; 781 782 sc = ifp->if_softc; 783 784 for (i = 0; i < sc->txq_count; i++) 785 sfxge_tx_qdpl_flush(sc->txq[i]); 786} 787 788#if SFXGE_TX_PARSE_EARLY 789 790/* There is little space for user data in mbuf pkthdr, so we 791 * use l*hlen fields which are not used by the driver otherwise 792 * to store header offsets. 793 * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes. 794 */ 795 796 797#define TSO_MBUF_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0]) 798/* We abuse l5hlen here because PH_loc can hold only 64 bits of data */ 799#define TSO_MBUF_FLAGS(_mbuf) ((_mbuf)->m_pkthdr.l5hlen) 800#define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1]) 801#define TSO_MBUF_SEQNUM(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1]) 802 803static void sfxge_parse_tx_packet(struct mbuf *mbuf) 804{ 805 struct ether_header *eh = mtod(mbuf, struct ether_header *); 806 const struct tcphdr *th; 807 struct tcphdr th_copy; 808 809 /* Find network protocol and header */ 810 TSO_MBUF_PROTO(mbuf) = eh->ether_type; 811 if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_VLAN)) { 812 struct ether_vlan_header *veh = 813 mtod(mbuf, struct ether_vlan_header *); 814 TSO_MBUF_PROTO(mbuf) = veh->evl_proto; 815 mbuf->m_pkthdr.l2hlen = sizeof(*veh); 816 } else { 817 mbuf->m_pkthdr.l2hlen = sizeof(*eh); 818 } 819 820 /* Find TCP header */ 821 if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IP)) { 822 const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen); 823 824 KASSERT(iph->ip_p == IPPROTO_TCP, 825 ("TSO required on non-TCP packet")); 826 mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl; 827 TSO_MBUF_PACKETID(mbuf) = iph->ip_id; 828 } else { 829 KASSERT(TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IPV6), 830 ("TSO required on non-IP packet")); 831 KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt == 832 IPPROTO_TCP, 833 ("TSO required on non-TCP packet")); 834 mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr); 835 TSO_MBUF_PACKETID(mbuf) = 0; 836 } 837 838 KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen, 839 ("network header is fragmented in mbuf")); 840 841 /* We need TCP header including flags (window is the next) */ 842 if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) { 843 m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy), 844 (caddr_t)&th_copy); 845 th = &th_copy; 846 } else { 847 th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen); 848 } 849 850 mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off; 851 TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq); 852 853 /* These flags must not be duplicated */ 854 /* 855 * RST should not be duplicated as well, but FreeBSD kernel 856 * generates TSO packets with RST flag. So, do not assert 857 * its absence. 858 */ 859 KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), 860 ("incompatible TCP flag 0x%x on TSO packet", 861 th->th_flags & (TH_URG | TH_SYN))); 862 TSO_MBUF_FLAGS(mbuf) = th->th_flags; 863} 864#endif 865 866/* 867 * TX start -- called by the stack. 868 */ 869int 870sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) 871{ 872 struct sfxge_softc *sc; 873 struct sfxge_txq *txq; 874 int rc; 875 876 sc = (struct sfxge_softc *)ifp->if_softc; 877 878 /* 879 * Transmit may be called when interface is up from the kernel 880 * point of view, but not yet up (in progress) from the driver 881 * point of view. I.e. link aggregation bring up. 882 * Transmit may be called when interface is up from the driver 883 * point of view, but already down from the kernel point of 884 * view. I.e. Rx when interface shutdown is in progress. 885 */ 886 KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP), 887 ("interface not up")); 888 889 /* Pick the desired transmit queue. */ 890 if (sc->txq_dynamic_cksum_toggle_supported | 891 (m->m_pkthdr.csum_flags & 892 (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO))) { 893 int index = 0; 894 895 /* check if flowid is set */ 896 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 897 uint32_t hash = m->m_pkthdr.flowid; 898 uint32_t idx = hash % nitems(sc->rx_indir_table); 899 900 index = sc->rx_indir_table[idx]; 901 } 902#if SFXGE_TX_PARSE_EARLY 903 if (m->m_pkthdr.csum_flags & CSUM_TSO) 904 sfxge_parse_tx_packet(m); 905#endif 906 index += (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) ? 907 SFXGE_TXQ_IP_TCP_UDP_CKSUM : 0; 908 txq = sc->txq[index]; 909 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 910 txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; 911 } else { 912 txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; 913 } 914 915 rc = sfxge_tx_packet_add(txq, m); 916 if (rc != 0) 917 m_freem(m); 918 919 return (rc); 920} 921 922/* 923 * Software "TSO". Not quite as good as doing it in hardware, but 924 * still faster than segmenting in the stack. 925 */ 926 927struct sfxge_tso_state { 928 /* Output position */ 929 unsigned out_len; /* Remaining length in current segment */ 930 unsigned seqnum; /* Current sequence number */ 931 unsigned packet_space; /* Remaining space in current packet */ 932 unsigned segs_space; /* Remaining number of DMA segments 933 for the packet (FATSOv2 only) */ 934 935 /* Input position */ 936 uint64_t dma_addr; /* DMA address of current position */ 937 unsigned in_len; /* Remaining length in current mbuf */ 938 939 const struct mbuf *mbuf; /* Input mbuf (head of chain) */ 940 u_short protocol; /* Network protocol (after VLAN decap) */ 941 ssize_t nh_off; /* Offset of network header */ 942 ssize_t tcph_off; /* Offset of TCP header */ 943 unsigned header_len; /* Number of bytes of header */ 944 unsigned seg_size; /* TCP segment size */ 945 int fw_assisted; /* Use FW-assisted TSO */ 946 u_short packet_id; /* IPv4 packet ID from the original packet */ 947 uint8_t tcp_flags; /* TCP flags */ 948 efx_desc_t header_desc; /* Precomputed header descriptor for 949 * FW-assisted TSO */ 950}; 951 952#if !SFXGE_TX_PARSE_EARLY 953static const struct ip *tso_iph(const struct sfxge_tso_state *tso) 954{ 955 KASSERT(tso->protocol == htons(ETHERTYPE_IP), 956 ("tso_iph() in non-IPv4 state")); 957 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); 958} 959 960static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) 961{ 962 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 963 ("tso_ip6h() in non-IPv6 state")); 964 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); 965} 966 967static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) 968{ 969 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); 970} 971#endif 972 973 974/* Size of preallocated TSO header buffers. Larger blocks must be 975 * allocated from the heap. 976 */ 977#define TSOH_STD_SIZE 128 978 979/* At most half the descriptors in the queue at any time will refer to 980 * a TSO header buffer, since they must always be followed by a 981 * payload descriptor referring to an mbuf. 982 */ 983#define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) 984#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 985#define TSOH_PAGE_COUNT(_txq_entries) \ 986 ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE) 987 988static int tso_init(struct sfxge_txq *txq) 989{ 990 struct sfxge_softc *sc = txq->sc; 991 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); 992 int i, rc; 993 994 /* Allocate TSO header buffers */ 995 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), 996 M_SFXGE, M_WAITOK); 997 998 for (i = 0; i < tsoh_page_count; i++) { 999 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); 1000 if (rc != 0) 1001 goto fail; 1002 } 1003 1004 return (0); 1005 1006fail: 1007 while (i-- > 0) 1008 sfxge_dma_free(&txq->tsoh_buffer[i]); 1009 free(txq->tsoh_buffer, M_SFXGE); 1010 txq->tsoh_buffer = NULL; 1011 return (rc); 1012} 1013 1014static void tso_fini(struct sfxge_txq *txq) 1015{ 1016 int i; 1017 1018 if (txq->tsoh_buffer != NULL) { 1019 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) 1020 sfxge_dma_free(&txq->tsoh_buffer[i]); 1021 free(txq->tsoh_buffer, M_SFXGE); 1022 } 1023} 1024 1025static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso, 1026 const bus_dma_segment_t *hdr_dma_seg, 1027 struct mbuf *mbuf) 1028{ 1029 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp); 1030#if !SFXGE_TX_PARSE_EARLY 1031 struct ether_header *eh = mtod(mbuf, struct ether_header *); 1032 const struct tcphdr *th; 1033 struct tcphdr th_copy; 1034#endif 1035 1036 tso->fw_assisted = txq->tso_fw_assisted; 1037 tso->mbuf = mbuf; 1038 1039 /* Find network protocol and header */ 1040#if !SFXGE_TX_PARSE_EARLY 1041 tso->protocol = eh->ether_type; 1042 if (tso->protocol == htons(ETHERTYPE_VLAN)) { 1043 struct ether_vlan_header *veh = 1044 mtod(mbuf, struct ether_vlan_header *); 1045 tso->protocol = veh->evl_proto; 1046 tso->nh_off = sizeof(*veh); 1047 } else { 1048 tso->nh_off = sizeof(*eh); 1049 } 1050#else 1051 tso->protocol = TSO_MBUF_PROTO(mbuf); 1052 tso->nh_off = mbuf->m_pkthdr.l2hlen; 1053 tso->tcph_off = mbuf->m_pkthdr.l3hlen; 1054 tso->packet_id = ntohs(TSO_MBUF_PACKETID(mbuf)); 1055#endif 1056 1057#if !SFXGE_TX_PARSE_EARLY 1058 /* Find TCP header */ 1059 if (tso->protocol == htons(ETHERTYPE_IP)) { 1060 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, 1061 ("TSO required on non-TCP packet")); 1062 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; 1063 tso->packet_id = ntohs(tso_iph(tso)->ip_id); 1064 } else { 1065 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 1066 ("TSO required on non-IP packet")); 1067 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, 1068 ("TSO required on non-TCP packet")); 1069 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); 1070 tso->packet_id = 0; 1071 } 1072#endif 1073 1074 1075 if (tso->fw_assisted && 1076 __predict_false(tso->tcph_off > 1077 encp->enc_tx_tso_tcp_header_offset_limit)) { 1078 tso->fw_assisted = 0; 1079 } 1080 1081 1082#if !SFXGE_TX_PARSE_EARLY 1083 KASSERT(mbuf->m_len >= tso->tcph_off, 1084 ("network header is fragmented in mbuf")); 1085 /* We need TCP header including flags (window is the next) */ 1086 if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) { 1087 m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy), 1088 (caddr_t)&th_copy); 1089 th = &th_copy; 1090 } else { 1091 th = tso_tcph(tso); 1092 } 1093 tso->header_len = tso->tcph_off + 4 * th->th_off; 1094#else 1095 tso->header_len = mbuf->m_pkthdr.l4hlen; 1096#endif 1097 tso->seg_size = mbuf->m_pkthdr.tso_segsz; 1098 1099#if !SFXGE_TX_PARSE_EARLY 1100 tso->seqnum = ntohl(th->th_seq); 1101 1102 /* These flags must not be duplicated */ 1103 /* 1104 * RST should not be duplicated as well, but FreeBSD kernel 1105 * generates TSO packets with RST flag. So, do not assert 1106 * its absence. 1107 */ 1108 KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), 1109 ("incompatible TCP flag 0x%x on TSO packet", 1110 th->th_flags & (TH_URG | TH_SYN))); 1111 tso->tcp_flags = th->th_flags; 1112#else 1113 tso->seqnum = TSO_MBUF_SEQNUM(mbuf); 1114 tso->tcp_flags = TSO_MBUF_FLAGS(mbuf); 1115#endif 1116 1117 tso->out_len = mbuf->m_pkthdr.len - tso->header_len; 1118 1119 if (tso->fw_assisted) { 1120 if (hdr_dma_seg->ds_len >= tso->header_len) 1121 efx_tx_qdesc_dma_create(txq->common, 1122 hdr_dma_seg->ds_addr, 1123 tso->header_len, 1124 B_FALSE, 1125 &tso->header_desc); 1126 else 1127 tso->fw_assisted = 0; 1128 } 1129} 1130 1131/* 1132 * tso_fill_packet_with_fragment - form descriptors for the current fragment 1133 * 1134 * Form descriptors for the current fragment, until we reach the end 1135 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 1136 * space. 1137 */ 1138static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, 1139 struct sfxge_tso_state *tso) 1140{ 1141 efx_desc_t *desc; 1142 int n; 1143 uint64_t dma_addr = tso->dma_addr; 1144 boolean_t eop; 1145 1146 if (tso->in_len == 0 || tso->packet_space == 0) 1147 return; 1148 1149 KASSERT(tso->in_len > 0, ("TSO input length went negative")); 1150 KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); 1151 1152 if (tso->fw_assisted & SFXGE_FATSOV2) { 1153 n = tso->in_len; 1154 tso->out_len -= n; 1155 tso->seqnum += n; 1156 tso->in_len = 0; 1157 if (n < tso->packet_space) { 1158 tso->packet_space -= n; 1159 tso->segs_space--; 1160 } else { 1161 tso->packet_space = tso->seg_size - 1162 (n - tso->packet_space) % tso->seg_size; 1163 tso->segs_space = 1164 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 - 1165 (tso->packet_space != tso->seg_size); 1166 } 1167 } else { 1168 n = min(tso->in_len, tso->packet_space); 1169 tso->packet_space -= n; 1170 tso->out_len -= n; 1171 tso->dma_addr += n; 1172 tso->in_len -= n; 1173 } 1174 1175 /* 1176 * It is OK to use binary OR below to avoid extra branching 1177 * since all conditions may always be checked. 1178 */ 1179 eop = (tso->out_len == 0) | (tso->packet_space == 0) | 1180 (tso->segs_space == 0); 1181 1182 desc = &txq->pend_desc[txq->n_pend_desc++]; 1183 efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc); 1184} 1185 1186/* Callback from bus_dmamap_load() for long TSO headers. */ 1187static void tso_map_long_header(void *dma_addr_ret, 1188 bus_dma_segment_t *segs, int nseg, 1189 int error) 1190{ 1191 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && 1192 __predict_true(nseg == 1)) ? 1193 segs->ds_addr : 0); 1194} 1195 1196/* 1197 * tso_start_new_packet - generate a new header and prepare for the new packet 1198 * 1199 * Generate a new header and prepare for the new packet. Return 0 on 1200 * success, or an error code if failed to alloc header. 1201 */ 1202static int tso_start_new_packet(struct sfxge_txq *txq, 1203 struct sfxge_tso_state *tso, 1204 unsigned int *idp) 1205{ 1206 unsigned int id = *idp; 1207 struct tcphdr *tsoh_th; 1208 unsigned ip_length; 1209 caddr_t header; 1210 uint64_t dma_addr; 1211 bus_dmamap_t map; 1212 efx_desc_t *desc; 1213 int rc; 1214 1215 if (tso->fw_assisted) { 1216 if (tso->fw_assisted & SFXGE_FATSOV2) { 1217 /* Add 2 FATSOv2 option descriptors */ 1218 desc = &txq->pend_desc[txq->n_pend_desc]; 1219 efx_tx_qdesc_tso2_create(txq->common, 1220 tso->packet_id, 1221 tso->seqnum, 1222 tso->seg_size, 1223 desc, 1224 EFX_TX_FATSOV2_OPT_NDESCS); 1225 desc += EFX_TX_FATSOV2_OPT_NDESCS; 1226 txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS; 1227 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1228 id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask; 1229 1230 tso->segs_space = 1231 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1; 1232 } else { 1233 uint8_t tcp_flags = tso->tcp_flags; 1234 1235 if (tso->out_len > tso->seg_size) 1236 tcp_flags &= ~(TH_FIN | TH_PUSH); 1237 1238 /* Add FATSOv1 option descriptor */ 1239 desc = &txq->pend_desc[txq->n_pend_desc++]; 1240 efx_tx_qdesc_tso_create(txq->common, 1241 tso->packet_id, 1242 tso->seqnum, 1243 tcp_flags, 1244 desc++); 1245 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1246 id = (id + 1) & txq->ptr_mask; 1247 1248 tso->seqnum += tso->seg_size; 1249 tso->segs_space = UINT_MAX; 1250 } 1251 1252 /* Header DMA descriptor */ 1253 *desc = tso->header_desc; 1254 txq->n_pend_desc++; 1255 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1256 id = (id + 1) & txq->ptr_mask; 1257 } else { 1258 /* Allocate a DMA-mapped header buffer. */ 1259 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { 1260 unsigned int page_index = (id / 2) / TSOH_PER_PAGE; 1261 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; 1262 1263 header = (txq->tsoh_buffer[page_index].esm_base + 1264 buf_index * TSOH_STD_SIZE); 1265 dma_addr = (txq->tsoh_buffer[page_index].esm_addr + 1266 buf_index * TSOH_STD_SIZE); 1267 map = txq->tsoh_buffer[page_index].esm_map; 1268 1269 KASSERT(txq->stmp[id].flags == 0, 1270 ("stmp flags are not 0")); 1271 } else { 1272 struct sfxge_tx_mapping *stmp = &txq->stmp[id]; 1273 1274 /* We cannot use bus_dmamem_alloc() as that may sleep */ 1275 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); 1276 if (__predict_false(!header)) 1277 return (ENOMEM); 1278 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, 1279 header, tso->header_len, 1280 tso_map_long_header, &dma_addr, 1281 BUS_DMA_NOWAIT); 1282 if (__predict_false(dma_addr == 0)) { 1283 if (rc == 0) { 1284 /* Succeeded but got >1 segment */ 1285 bus_dmamap_unload(txq->packet_dma_tag, 1286 stmp->map); 1287 rc = EINVAL; 1288 } 1289 free(header, M_SFXGE); 1290 return (rc); 1291 } 1292 map = stmp->map; 1293 1294 txq->tso_long_headers++; 1295 stmp->u.heap_buf = header; 1296 stmp->flags = TX_BUF_UNMAP; 1297 } 1298 1299 tsoh_th = (struct tcphdr *)(header + tso->tcph_off); 1300 1301 /* Copy and update the headers. */ 1302 m_copydata(tso->mbuf, 0, tso->header_len, header); 1303 1304 tsoh_th->th_seq = htonl(tso->seqnum); 1305 tso->seqnum += tso->seg_size; 1306 if (tso->out_len > tso->seg_size) { 1307 /* This packet will not finish the TSO burst. */ 1308 ip_length = tso->header_len - tso->nh_off + tso->seg_size; 1309 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); 1310 } else { 1311 /* This packet will be the last in the TSO burst. */ 1312 ip_length = tso->header_len - tso->nh_off + tso->out_len; 1313 } 1314 1315 if (tso->protocol == htons(ETHERTYPE_IP)) { 1316 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); 1317 tsoh_iph->ip_len = htons(ip_length); 1318 /* XXX We should increment ip_id, but FreeBSD doesn't 1319 * currently allocate extra IDs for multiple segments. 1320 */ 1321 } else { 1322 struct ip6_hdr *tsoh_iph = 1323 (struct ip6_hdr *)(header + tso->nh_off); 1324 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); 1325 } 1326 1327 /* Make the header visible to the hardware. */ 1328 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); 1329 1330 /* Form a descriptor for this header. */ 1331 desc = &txq->pend_desc[txq->n_pend_desc++]; 1332 efx_tx_qdesc_dma_create(txq->common, 1333 dma_addr, 1334 tso->header_len, 1335 0, 1336 desc); 1337 id = (id + 1) & txq->ptr_mask; 1338 1339 tso->segs_space = UINT_MAX; 1340 } 1341 tso->packet_space = tso->seg_size; 1342 txq->tso_packets++; 1343 *idp = id; 1344 1345 return (0); 1346} 1347 1348static int 1349sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 1350 const bus_dma_segment_t *dma_seg, int n_dma_seg, 1351 int n_extra_descs) 1352{ 1353 struct sfxge_tso_state tso; 1354 unsigned int id; 1355 unsigned skipped = 0; 1356 1357 tso_start(txq, &tso, dma_seg, mbuf); 1358 1359 while (dma_seg->ds_len + skipped <= tso.header_len) { 1360 skipped += dma_seg->ds_len; 1361 --n_dma_seg; 1362 KASSERT(n_dma_seg, ("no payload found in TSO packet")); 1363 ++dma_seg; 1364 } 1365 tso.in_len = dma_seg->ds_len - (tso.header_len - skipped); 1366 tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); 1367 1368 id = (txq->added + n_extra_descs) & txq->ptr_mask; 1369 if (__predict_false(tso_start_new_packet(txq, &tso, &id))) 1370 return (-1); 1371 1372 while (1) { 1373 tso_fill_packet_with_fragment(txq, &tso); 1374 /* Exactly one DMA descriptor is added */ 1375 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); 1376 id = (id + 1) & txq->ptr_mask; 1377 1378 /* Move onto the next fragment? */ 1379 if (tso.in_len == 0) { 1380 --n_dma_seg; 1381 if (n_dma_seg == 0) 1382 break; 1383 ++dma_seg; 1384 tso.in_len = dma_seg->ds_len; 1385 tso.dma_addr = dma_seg->ds_addr; 1386 } 1387 1388 /* End of packet? */ 1389 if ((tso.packet_space == 0) | (tso.segs_space == 0)) { 1390 unsigned int n_fatso_opt_desc = 1391 (tso.fw_assisted & SFXGE_FATSOV2) ? 1392 EFX_TX_FATSOV2_OPT_NDESCS : 1393 (tso.fw_assisted & SFXGE_FATSOV1) ? 1 : 0; 1394 1395 /* If the queue is now full due to tiny MSS, 1396 * or we can't create another header, discard 1397 * the remainder of the input mbuf but do not 1398 * roll back the work we have done. 1399 */ 1400 if (txq->n_pend_desc + n_fatso_opt_desc + 1401 1 /* header */ + n_dma_seg > txq->max_pkt_desc) { 1402 txq->tso_pdrop_too_many++; 1403 break; 1404 } 1405 if (__predict_false(tso_start_new_packet(txq, &tso, 1406 &id))) { 1407 txq->tso_pdrop_no_rsrc++; 1408 break; 1409 } 1410 } 1411 } 1412 1413 txq->tso_bursts++; 1414 return (id); 1415} 1416 1417static void 1418sfxge_tx_qunblock(struct sfxge_txq *txq) 1419{ 1420 struct sfxge_softc *sc; 1421 struct sfxge_evq *evq; 1422 1423 sc = txq->sc; 1424 evq = sc->evq[txq->evq_index]; 1425 1426 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 1427 1428 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) 1429 return; 1430 1431 SFXGE_TXQ_LOCK(txq); 1432 1433 if (txq->blocked) { 1434 unsigned int level; 1435 1436 level = txq->added - txq->completed; 1437 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) { 1438 /* reaped must be in sync with blocked */ 1439 sfxge_tx_qreap(txq); 1440 txq->blocked = 0; 1441 } 1442 } 1443 1444 sfxge_tx_qdpl_service(txq); 1445 /* note: lock has been dropped */ 1446} 1447 1448void 1449sfxge_tx_qflush_done(struct sfxge_txq *txq) 1450{ 1451 1452 txq->flush_state = SFXGE_FLUSH_DONE; 1453} 1454 1455static void 1456sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) 1457{ 1458 struct sfxge_txq *txq; 1459 struct sfxge_evq *evq; 1460 unsigned int count; 1461 1462 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 1463 1464 txq = sc->txq[index]; 1465 evq = sc->evq[txq->evq_index]; 1466 1467 SFXGE_EVQ_LOCK(evq); 1468 SFXGE_TXQ_LOCK(txq); 1469 1470 KASSERT(txq->init_state == SFXGE_TXQ_STARTED, 1471 ("txq->init_state != SFXGE_TXQ_STARTED")); 1472 1473 txq->init_state = SFXGE_TXQ_INITIALIZED; 1474 1475 if (txq->flush_state != SFXGE_FLUSH_DONE) { 1476 txq->flush_state = SFXGE_FLUSH_PENDING; 1477 1478 SFXGE_EVQ_UNLOCK(evq); 1479 SFXGE_TXQ_UNLOCK(txq); 1480 1481 /* Flush the transmit queue. */ 1482 if (efx_tx_qflush(txq->common) != 0) { 1483 log(LOG_ERR, "%s: Flushing Tx queue %u failed\n", 1484 device_get_nameunit(sc->dev), index); 1485 txq->flush_state = SFXGE_FLUSH_DONE; 1486 } else { 1487 count = 0; 1488 do { 1489 /* Spin for 100ms. */ 1490 DELAY(100000); 1491 if (txq->flush_state != SFXGE_FLUSH_PENDING) 1492 break; 1493 } while (++count < 20); 1494 } 1495 SFXGE_EVQ_LOCK(evq); 1496 SFXGE_TXQ_LOCK(txq); 1497 1498 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, 1499 ("txq->flush_state == SFXGE_FLUSH_FAILED")); 1500 1501 if (txq->flush_state != SFXGE_FLUSH_DONE) { 1502 /* Flush timeout */ 1503 log(LOG_ERR, "%s: Cannot flush Tx queue %u\n", 1504 device_get_nameunit(sc->dev), index); 1505 txq->flush_state = SFXGE_FLUSH_DONE; 1506 } 1507 } 1508 1509 txq->blocked = 0; 1510 txq->pending = txq->added; 1511 1512 sfxge_tx_qcomplete(txq, evq); 1513 KASSERT(txq->completed == txq->added, 1514 ("txq->completed != txq->added")); 1515 1516 sfxge_tx_qreap(txq); 1517 KASSERT(txq->reaped == txq->completed, 1518 ("txq->reaped != txq->completed")); 1519 1520 txq->added = 0; 1521 txq->pending = 0; 1522 txq->completed = 0; 1523 txq->reaped = 0; 1524 1525 /* Destroy the common code transmit queue. */ 1526 efx_tx_qdestroy(txq->common); 1527 txq->common = NULL; 1528 1529 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1530 EFX_TXQ_NBUFS(sc->txq_entries)); 1531 1532 txq->hw_cksum_flags = 0; 1533 1534 SFXGE_EVQ_UNLOCK(evq); 1535 SFXGE_TXQ_UNLOCK(txq); 1536} 1537 1538/* 1539 * Estimate maximum number of Tx descriptors required for TSO packet. 1540 * With minimum MSS and maximum mbuf length we might need more (even 1541 * than a ring-ful of descriptors), but this should not happen in 1542 * practice except due to deliberate attack. In that case we will 1543 * truncate the output at a packet boundary. 1544 */ 1545static unsigned int 1546sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type, 1547 unsigned int tso_fw_assisted) 1548{ 1549 /* One descriptor for every input fragment */ 1550 unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG; 1551 unsigned int sw_tso_max_descs; 1552 unsigned int fa_tso_v1_max_descs = 0; 1553 unsigned int fa_tso_v2_max_descs = 0; 1554 1555 /* Checksum offload Tx option descriptor may be required */ 1556 if (sc->txq_dynamic_cksum_toggle_supported) 1557 max_descs++; 1558 1559 /* VLAN tagging Tx option descriptor may be required */ 1560 if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled) 1561 max_descs++; 1562 1563 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) { 1564 /* 1565 * Plus header and payload descriptor for each output segment. 1566 * Minus one since header fragment is already counted. 1567 * Even if FATSO is used, we should be ready to fallback 1568 * to do it in the driver. 1569 */ 1570 sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1; 1571 1572 /* FW assisted TSOv1 requires one more descriptor per segment 1573 * in comparison to SW TSO */ 1574 if (tso_fw_assisted & SFXGE_FATSOV1) 1575 fa_tso_v1_max_descs = 1576 sw_tso_max_descs + SFXGE_TSO_MAX_SEGS; 1577 1578 /* FW assisted TSOv2 requires 3 (2 FATSO plus header) extra 1579 * descriptors per superframe limited by number of DMA fetches 1580 * per packet. The first packet header is already counted. 1581 */ 1582 if (tso_fw_assisted & SFXGE_FATSOV2) { 1583 fa_tso_v2_max_descs = 1584 howmany(SFXGE_TX_MAPPING_MAX_SEG, 1585 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) * 1586 (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1; 1587 } 1588 1589 max_descs += MAX(sw_tso_max_descs, 1590 MAX(fa_tso_v1_max_descs, fa_tso_v2_max_descs)); 1591 } 1592 1593 return (max_descs); 1594} 1595 1596static int 1597sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) 1598{ 1599 struct sfxge_txq *txq; 1600 efsys_mem_t *esmp; 1601 uint16_t flags; 1602 unsigned int tso_fw_assisted; 1603 unsigned int label; 1604 struct sfxge_evq *evq; 1605 unsigned int desc_index; 1606 int rc; 1607 1608 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 1609 1610 txq = sc->txq[index]; 1611 esmp = &txq->mem; 1612 evq = sc->evq[txq->evq_index]; 1613 1614 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1615 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1616 KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1617 ("evq->init_state != SFXGE_EVQ_STARTED")); 1618 1619 /* Program the buffer table. */ 1620 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, 1621 EFX_TXQ_NBUFS(sc->txq_entries))) != 0) 1622 return (rc); 1623 1624 /* Determine the kind of queue we are creating. */ 1625 tso_fw_assisted = 0; 1626 switch (txq->type) { 1627 case SFXGE_TXQ_NON_CKSUM: 1628 flags = 0; 1629 break; 1630 case SFXGE_TXQ_IP_CKSUM: 1631 flags = EFX_TXQ_CKSUM_IPV4; 1632 break; 1633 case SFXGE_TXQ_IP_TCP_UDP_CKSUM: 1634 flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; 1635 tso_fw_assisted = sc->tso_fw_assisted; 1636 if (tso_fw_assisted & SFXGE_FATSOV2) 1637 flags |= EFX_TXQ_FATSOV2; 1638 break; 1639 default: 1640 KASSERT(0, ("Impossible TX queue")); 1641 flags = 0; 1642 break; 1643 } 1644 1645 label = (sc->txq_dynamic_cksum_toggle_supported) ? 0 : txq->type; 1646 1647 /* Create the common code transmit queue. */ 1648 if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp, 1649 sc->txq_entries, txq->buf_base_id, flags, evq->common, 1650 &txq->common, &desc_index)) != 0) { 1651 /* Retry if no FATSOv2 resources, otherwise fail */ 1652 if ((rc != ENOSPC) || (~flags & EFX_TXQ_FATSOV2)) 1653 goto fail; 1654 1655 /* Looks like all FATSOv2 contexts are used */ 1656 flags &= ~EFX_TXQ_FATSOV2; 1657 tso_fw_assisted &= ~SFXGE_FATSOV2; 1658 if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp, 1659 sc->txq_entries, txq->buf_base_id, flags, evq->common, 1660 &txq->common, &desc_index)) != 0) 1661 goto fail; 1662 } 1663 1664 /* Initialise queue descriptor indexes */ 1665 txq->added = txq->pending = txq->completed = txq->reaped = desc_index; 1666 1667 SFXGE_TXQ_LOCK(txq); 1668 1669 /* Enable the transmit queue. */ 1670 efx_tx_qenable(txq->common); 1671 1672 txq->init_state = SFXGE_TXQ_STARTED; 1673 txq->flush_state = SFXGE_FLUSH_REQUIRED; 1674 txq->tso_fw_assisted = tso_fw_assisted; 1675 1676 txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type, 1677 tso_fw_assisted); 1678 1679 txq->hw_vlan_tci = 0; 1680 1681 txq->hw_cksum_flags = flags & 1682 (EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP); 1683 1684 SFXGE_TXQ_UNLOCK(txq); 1685 1686 return (0); 1687 1688fail: 1689 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1690 EFX_TXQ_NBUFS(sc->txq_entries)); 1691 return (rc); 1692} 1693 1694void 1695sfxge_tx_stop(struct sfxge_softc *sc) 1696{ 1697 int index; 1698 1699 index = sc->txq_count; 1700 while (--index >= 0) 1701 sfxge_tx_qstop(sc, index); 1702 1703 /* Tear down the transmit module */ 1704 efx_tx_fini(sc->enp); 1705} 1706 1707int 1708sfxge_tx_start(struct sfxge_softc *sc) 1709{ 1710 int index; 1711 int rc; 1712 1713 /* Initialize the common code transmit module. */ 1714 if ((rc = efx_tx_init(sc->enp)) != 0) 1715 return (rc); 1716 1717 for (index = 0; index < sc->txq_count; index++) { 1718 if ((rc = sfxge_tx_qstart(sc, index)) != 0) 1719 goto fail; 1720 } 1721 1722 return (0); 1723 1724fail: 1725 while (--index >= 0) 1726 sfxge_tx_qstop(sc, index); 1727 1728 efx_tx_fini(sc->enp); 1729 1730 return (rc); 1731} 1732 1733static int 1734sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node) 1735{ 1736 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev); 1737 struct sysctl_oid *stat_node; 1738 unsigned int id; 1739 1740 stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, 1741 "stats", CTLFLAG_RD, NULL, 1742 "Tx queue statistics"); 1743 if (stat_node == NULL) 1744 return (ENOMEM); 1745 1746 for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1747 SYSCTL_ADD_ULONG( 1748 ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO, 1749 sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS, 1750 (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset), 1751 ""); 1752 } 1753 1754 return (0); 1755} 1756 1757/** 1758 * Destroy a transmit queue. 1759 */ 1760static void 1761sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) 1762{ 1763 struct sfxge_txq *txq; 1764 unsigned int nmaps; 1765 1766 txq = sc->txq[index]; 1767 1768 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1769 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1770 1771 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) 1772 tso_fini(txq); 1773 1774 /* Free the context arrays. */ 1775 free(txq->pend_desc, M_SFXGE); 1776 nmaps = sc->txq_entries; 1777 while (nmaps-- != 0) 1778 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1779 free(txq->stmp, M_SFXGE); 1780 1781 /* Release DMA memory mapping. */ 1782 sfxge_dma_free(&txq->mem); 1783 1784 sc->txq[index] = NULL; 1785 1786 SFXGE_TXQ_LOCK_DESTROY(txq); 1787 1788 free(txq, M_SFXGE); 1789} 1790 1791static int 1792sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, 1793 enum sfxge_txq_type type, unsigned int evq_index) 1794{ 1795 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); 1796 char name[16]; 1797 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1798 struct sysctl_oid *txq_node; 1799 struct sfxge_txq *txq; 1800 struct sfxge_evq *evq; 1801 struct sfxge_tx_dpl *stdp; 1802 struct sysctl_oid *dpl_node; 1803 efsys_mem_t *esmp; 1804 unsigned int nmaps; 1805 int rc; 1806 1807 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); 1808 txq->sc = sc; 1809 txq->entries = sc->txq_entries; 1810 txq->ptr_mask = txq->entries - 1; 1811 1812 sc->txq[txq_index] = txq; 1813 esmp = &txq->mem; 1814 1815 evq = sc->evq[evq_index]; 1816 1817 /* Allocate and zero DMA space for the descriptor ring. */ 1818 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) 1819 return (rc); 1820 1821 /* Allocate buffer table entries. */ 1822 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), 1823 &txq->buf_base_id); 1824 1825 /* Create a DMA tag for packet mappings. */ 1826 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 1827 encp->enc_tx_dma_desc_boundary, 1828 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, 1829 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 1830 encp->enc_tx_dma_desc_size_max, 0, NULL, NULL, 1831 &txq->packet_dma_tag) != 0) { 1832 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); 1833 rc = ENOMEM; 1834 goto fail; 1835 } 1836 1837 /* Allocate pending descriptor array for batching writes. */ 1838 txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries, 1839 M_SFXGE, M_ZERO | M_WAITOK); 1840 1841 /* Allocate and initialise mbuf DMA mapping array. */ 1842 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, 1843 M_SFXGE, M_ZERO | M_WAITOK); 1844 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { 1845 rc = bus_dmamap_create(txq->packet_dma_tag, 0, 1846 &txq->stmp[nmaps].map); 1847 if (rc != 0) 1848 goto fail2; 1849 } 1850 1851 snprintf(name, sizeof(name), "%u", txq_index); 1852 txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node), 1853 OID_AUTO, name, CTLFLAG_RD, NULL, ""); 1854 if (txq_node == NULL) { 1855 rc = ENOMEM; 1856 goto fail_txq_node; 1857 } 1858 1859 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && 1860 (rc = tso_init(txq)) != 0) 1861 goto fail3; 1862 1863 /* Initialize the deferred packet list. */ 1864 stdp = &txq->dpl; 1865 stdp->std_put_max = sfxge_tx_dpl_put_max; 1866 stdp->std_get_max = sfxge_tx_dpl_get_max; 1867 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; 1868 stdp->std_getp = &stdp->std_get; 1869 1870 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); 1871 1872 dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, 1873 "dpl", CTLFLAG_RD, NULL, 1874 "Deferred packet list statistics"); 1875 if (dpl_node == NULL) { 1876 rc = ENOMEM; 1877 goto fail_dpl_node; 1878 } 1879 1880 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 1881 "get_count", CTLFLAG_RD | CTLFLAG_STATS, 1882 &stdp->std_get_count, 0, ""); 1883 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 1884 "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, 1885 &stdp->std_get_non_tcp_count, 0, ""); 1886 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 1887 "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 1888 &stdp->std_get_hiwat, 0, ""); 1889 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, 1890 "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS, 1891 &stdp->std_put_hiwat, 0, ""); 1892 1893 rc = sfxge_txq_stat_init(txq, txq_node); 1894 if (rc != 0) 1895 goto fail_txq_stat_init; 1896 1897 txq->type = type; 1898 txq->evq_index = evq_index; 1899 txq->init_state = SFXGE_TXQ_INITIALIZED; 1900 1901 return (0); 1902 1903fail_txq_stat_init: 1904fail_dpl_node: 1905fail3: 1906fail_txq_node: 1907 free(txq->pend_desc, M_SFXGE); 1908fail2: 1909 while (nmaps-- != 0) 1910 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1911 free(txq->stmp, M_SFXGE); 1912 bus_dma_tag_destroy(txq->packet_dma_tag); 1913 1914fail: 1915 sfxge_dma_free(esmp); 1916 1917 return (rc); 1918} 1919 1920static int 1921sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) 1922{ 1923 struct sfxge_softc *sc = arg1; 1924 unsigned int id = arg2; 1925 unsigned long sum; 1926 unsigned int index; 1927 1928 /* Sum across all TX queues */ 1929 sum = 0; 1930 for (index = 0; index < sc->txq_count; index++) 1931 sum += *(unsigned long *)((caddr_t)sc->txq[index] + 1932 sfxge_tx_stats[id].offset); 1933 1934 return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1935} 1936 1937static void 1938sfxge_tx_stat_init(struct sfxge_softc *sc) 1939{ 1940 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1941 struct sysctl_oid_list *stat_list; 1942 unsigned int id; 1943 1944 stat_list = SYSCTL_CHILDREN(sc->stats_node); 1945 1946 for (id = 0; id < nitems(sfxge_tx_stats); id++) { 1947 SYSCTL_ADD_PROC( 1948 ctx, stat_list, 1949 OID_AUTO, sfxge_tx_stats[id].name, 1950 CTLTYPE_ULONG|CTLFLAG_RD, 1951 sc, id, sfxge_tx_stat_handler, "LU", 1952 ""); 1953 } 1954} 1955 1956static uint64_t 1957sfxge_tx_get_drops(struct sfxge_softc *sc) 1958{ 1959 unsigned int index; 1960 uint64_t drops = 0; 1961 struct sfxge_txq *txq; 1962 1963 /* Sum across all TX queues */ 1964 for (index = 0; index < sc->txq_count; index++) { 1965 txq = sc->txq[index]; 1966 /* 1967 * In theory, txq->put_overflow and txq->netdown_drops 1968 * should use atomic operation and other should be 1969 * obtained under txq lock, but it is just statistics. 1970 */ 1971 drops += txq->drops + txq->get_overflow + 1972 txq->get_non_tcp_overflow + 1973 txq->put_overflow + txq->netdown_drops + 1974 txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc; 1975 } 1976 return (drops); 1977} 1978 1979void 1980sfxge_tx_update_stats(struct sfxge_softc *sc) 1981{ 1982 sc->ifnet->if_oerrors += sfxge_tx_get_drops(sc); 1983} 1984 1985void 1986sfxge_tx_fini(struct sfxge_softc *sc) 1987{ 1988 int index; 1989 1990 index = sc->txq_count; 1991 while (--index >= 0) 1992 sfxge_tx_qfini(sc, index); 1993 1994 sc->txq_count = 0; 1995} 1996 1997 1998int 1999sfxge_tx_init(struct sfxge_softc *sc) 2000{ 2001 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); 2002 struct sfxge_intr *intr; 2003 int index; 2004 int rc; 2005 2006 intr = &sc->intr; 2007 2008 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 2009 ("intr->state != SFXGE_INTR_INITIALIZED")); 2010 2011 if (sfxge_tx_dpl_get_max <= 0) { 2012 log(LOG_ERR, "%s=%d must be greater than 0", 2013 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); 2014 rc = EINVAL; 2015 goto fail_tx_dpl_get_max; 2016 } 2017 if (sfxge_tx_dpl_get_non_tcp_max <= 0) { 2018 log(LOG_ERR, "%s=%d must be greater than 0", 2019 SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, 2020 sfxge_tx_dpl_get_non_tcp_max); 2021 rc = EINVAL; 2022 goto fail_tx_dpl_get_non_tcp_max; 2023 } 2024 if (sfxge_tx_dpl_put_max < 0) { 2025 log(LOG_ERR, "%s=%d must be greater or equal to 0", 2026 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); 2027 rc = EINVAL; 2028 goto fail_tx_dpl_put_max; 2029 } 2030 2031 sc->txq_count = SFXGE_EVQ0_N_TXQ(sc) - 1 + sc->intr.n_alloc; 2032 2033 sc->tso_fw_assisted = sfxge_tso_fw_assisted; 2034 if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) || 2035 (!encp->enc_fw_assisted_tso_enabled)) 2036 sc->tso_fw_assisted &= ~SFXGE_FATSOV1; 2037 if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) || 2038 (!encp->enc_fw_assisted_tso_v2_enabled)) 2039 sc->tso_fw_assisted &= ~SFXGE_FATSOV2; 2040 2041 sc->txqs_node = SYSCTL_ADD_NODE( 2042 device_get_sysctl_ctx(sc->dev), 2043 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 2044 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); 2045 if (sc->txqs_node == NULL) { 2046 rc = ENOMEM; 2047 goto fail_txq_node; 2048 } 2049 2050 /* Initialize the transmit queues */ 2051 if (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) { 2052 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, 2053 SFXGE_TXQ_NON_CKSUM, 0)) != 0) 2054 goto fail; 2055 2056 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, 2057 SFXGE_TXQ_IP_CKSUM, 0)) != 0) 2058 goto fail2; 2059 } 2060 2061 for (index = 0; 2062 index < sc->txq_count - SFXGE_EVQ0_N_TXQ(sc) + 1; 2063 index++) { 2064 if ((rc = sfxge_tx_qinit(sc, SFXGE_EVQ0_N_TXQ(sc) - 1 + index, 2065 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) 2066 goto fail3; 2067 } 2068 2069 sfxge_tx_stat_init(sc); 2070 2071 return (0); 2072 2073fail3: 2074 while (--index >= 0) 2075 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 2076 2077 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 2078 2079fail2: 2080 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 2081 2082fail: 2083fail_txq_node: 2084 sc->txq_count = 0; 2085fail_tx_dpl_put_max: 2086fail_tx_dpl_get_non_tcp_max: 2087fail_tx_dpl_get_max: 2088 return (rc); 2089} 2090