nicvf_queues.c revision 299444
1/* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 299444 2016-05-11 13:22:13Z zbb $ 27 * 28 */ 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_queues.c 299444 2016-05-11 13:22:13Z zbb $"); 31 32#include "opt_inet.h" 33#include "opt_inet6.h" 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/bitset.h> 38#include <sys/bitstring.h> 39#include <sys/buf_ring.h> 40#include <sys/bus.h> 41#include <sys/endian.h> 42#include <sys/kernel.h> 43#include <sys/malloc.h> 44#include <sys/module.h> 45#include <sys/rman.h> 46#include <sys/pciio.h> 47#include <sys/pcpu.h> 48#include <sys/proc.h> 49#include <sys/sockio.h> 50#include <sys/socket.h> 51#include <sys/cpuset.h> 52#include <sys/lock.h> 53#include <sys/mutex.h> 54#include <sys/smp.h> 55#include <sys/taskqueue.h> 56 57#include <vm/vm.h> 58#include <vm/pmap.h> 59 60#include <machine/bus.h> 61#include <machine/vmparam.h> 62 63#include <net/if.h> 64#include <net/if_var.h> 65#include <net/if_media.h> 66#include <net/ifq.h> 67#include <net/bpf.h> 68#include <net/ethernet.h> 69 70#include <netinet/in_systm.h> 71#include <netinet/in.h> 72#include <netinet/if_ether.h> 73#include <netinet/ip.h> 74#include <netinet/ip6.h> 75#include <netinet/sctp.h> 76#include <netinet/tcp.h> 77#include <netinet/tcp_lro.h> 78#include <netinet/udp.h> 79 80#include <dev/pci/pcireg.h> 81#include <dev/pci/pcivar.h> 82 83#include "thunder_bgx.h" 84#include "nic_reg.h" 85#include "nic.h" 86#include "q_struct.h" 87#include "nicvf_queues.h" 88 89#define DEBUG 90#undef DEBUG 91 92#ifdef DEBUG 93#define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) 94#else 95#define dprintf(dev, fmt, ...) 96#endif 97 98MALLOC_DECLARE(M_NICVF); 99 100static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); 101static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); 102static void nicvf_sq_disable(struct nicvf *, int); 103static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); 104static void nicvf_put_sq_desc(struct snd_queue *, int); 105static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, 106 boolean_t); 107static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); 108 109static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **); 110 111static void nicvf_rbdr_task(void *, int); 112static void nicvf_rbdr_task_nowait(void *, int); 113 114struct rbuf_info { 115 bus_dma_tag_t dmat; 116 bus_dmamap_t dmap; 117 struct mbuf * mbuf; 118}; 119 120#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) 121 122/* Poll a register for a specific value */ 123static int nicvf_poll_reg(struct nicvf *nic, int qidx, 124 uint64_t reg, int bit_pos, int bits, int val) 125{ 126 uint64_t bit_mask; 127 uint64_t reg_val; 128 int timeout = 10; 129 130 bit_mask = (1UL << bits) - 1; 131 bit_mask = (bit_mask << bit_pos); 132 133 while (timeout) { 134 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 135 if (((reg_val & bit_mask) >> bit_pos) == val) 136 return (0); 137 138 DELAY(1000); 139 timeout--; 140 } 141 device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); 142 return (ETIMEDOUT); 143} 144 145/* Callback for bus_dmamap_load() */ 146static void 147nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 148{ 149 bus_addr_t *paddr; 150 151 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 152 paddr = arg; 153 *paddr = segs->ds_addr; 154} 155 156/* Allocate memory for a queue's descriptors */ 157static int 158nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 159 int q_len, int desc_size, int align_bytes) 160{ 161 int err, err_dmat; 162 163 /* Create DMA tag first */ 164 err = bus_dma_tag_create( 165 bus_get_dma_tag(nic->dev), /* parent tag */ 166 align_bytes, /* alignment */ 167 0, /* boundary */ 168 BUS_SPACE_MAXADDR, /* lowaddr */ 169 BUS_SPACE_MAXADDR, /* highaddr */ 170 NULL, NULL, /* filtfunc, filtfuncarg */ 171 (q_len * desc_size), /* maxsize */ 172 1, /* nsegments */ 173 (q_len * desc_size), /* maxsegsize */ 174 0, /* flags */ 175 NULL, NULL, /* lockfunc, lockfuncarg */ 176 &dmem->dmat); /* dmat */ 177 178 if (err != 0) { 179 device_printf(nic->dev, 180 "Failed to create busdma tag for descriptors ring\n"); 181 return (err); 182 } 183 184 /* Allocate segment of continuous DMA safe memory */ 185 err = bus_dmamem_alloc( 186 dmem->dmat, /* DMA tag */ 187 &dmem->base, /* virtual address */ 188 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ 189 &dmem->dmap); /* DMA map */ 190 if (err != 0) { 191 device_printf(nic->dev, "Failed to allocate DMA safe memory for" 192 "descriptors ring\n"); 193 goto dmamem_fail; 194 } 195 196 err = bus_dmamap_load( 197 dmem->dmat, 198 dmem->dmap, 199 dmem->base, 200 (q_len * desc_size), /* allocation size */ 201 nicvf_dmamap_q_cb, /* map to DMA address cb. */ 202 &dmem->phys_base, /* physical address */ 203 BUS_DMA_NOWAIT); 204 if (err != 0) { 205 device_printf(nic->dev, 206 "Cannot load DMA map of descriptors ring\n"); 207 goto dmamap_fail; 208 } 209 210 dmem->q_len = q_len; 211 dmem->size = (desc_size * q_len); 212 213 return (0); 214 215dmamap_fail: 216 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 217 dmem->phys_base = 0; 218dmamem_fail: 219 err_dmat = bus_dma_tag_destroy(dmem->dmat); 220 dmem->base = NULL; 221 KASSERT(err_dmat == 0, 222 ("%s: Trying to destroy BUSY DMA tag", __func__)); 223 224 return (err); 225} 226 227/* Free queue's descriptor memory */ 228static void 229nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 230{ 231 int err; 232 233 if ((dmem == NULL) || (dmem->base == NULL)) 234 return; 235 236 /* Unload a map */ 237 bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); 238 bus_dmamap_unload(dmem->dmat, dmem->dmap); 239 /* Free DMA memory */ 240 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 241 /* Destroy DMA tag */ 242 err = bus_dma_tag_destroy(dmem->dmat); 243 244 KASSERT(err == 0, 245 ("%s: Trying to destroy BUSY DMA tag", __func__)); 246 247 dmem->phys_base = 0; 248 dmem->base = NULL; 249} 250 251/* 252 * Allocate buffer for packet reception 253 * HW returns memory address where packet is DMA'ed but not a pointer 254 * into RBDR ring, so save buffer address at the start of fragment and 255 * align the start address to a cache aligned address 256 */ 257static __inline int 258nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, 259 bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) 260{ 261 struct mbuf *mbuf; 262 struct rbuf_info *rinfo; 263 bus_dma_segment_t segs[1]; 264 int nsegs; 265 int err; 266 267 mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); 268 if (mbuf == NULL) 269 return (ENOMEM); 270 271 /* 272 * The length is equal to the actual length + one 128b line 273 * used as a room for rbuf_info structure. 274 */ 275 mbuf->m_len = mbuf->m_pkthdr.len = buf_len; 276 277 err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, 278 &nsegs, BUS_DMA_NOWAIT); 279 if (err != 0) { 280 device_printf(nic->dev, 281 "Failed to map mbuf into DMA visible memory, err: %d\n", 282 err); 283 m_freem(mbuf); 284 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); 285 return (err); 286 } 287 if (nsegs != 1) 288 panic("Unexpected number of DMA segments for RB: %d", nsegs); 289 /* 290 * Now use the room for rbuf_info structure 291 * and adjust mbuf data and length. 292 */ 293 rinfo = (struct rbuf_info *)mbuf->m_data; 294 m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); 295 296 rinfo->dmat = rbdr->rbdr_buff_dmat; 297 rinfo->dmap = dmap; 298 rinfo->mbuf = mbuf; 299 300 *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; 301 302 return (0); 303} 304 305/* Retrieve mbuf for received packet */ 306static struct mbuf * 307nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) 308{ 309 struct mbuf *mbuf; 310 struct rbuf_info *rinfo; 311 312 /* Get buffer start address and alignment offset */ 313 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); 314 315 /* Now retrieve mbuf to give to stack */ 316 mbuf = rinfo->mbuf; 317 if (__predict_false(mbuf == NULL)) { 318 panic("%s: Received packet fragment with NULL mbuf", 319 device_get_nameunit(nic->dev)); 320 } 321 /* 322 * Clear the mbuf in the descriptor to indicate 323 * that this slot is processed and free to use. 324 */ 325 rinfo->mbuf = NULL; 326 327 bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); 328 bus_dmamap_unload(rinfo->dmat, rinfo->dmap); 329 330 return (mbuf); 331} 332 333/* Allocate RBDR ring and populate receive buffers */ 334static int 335nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, 336 int buf_size, int qidx) 337{ 338 bus_dmamap_t dmap; 339 bus_addr_t rbuf; 340 struct rbdr_entry_t *desc; 341 int idx; 342 int err; 343 344 /* Allocate rbdr descriptors ring */ 345 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 346 sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); 347 if (err != 0) { 348 device_printf(nic->dev, 349 "Failed to create RBDR descriptors ring\n"); 350 return (err); 351 } 352 353 rbdr->desc = rbdr->dmem.base; 354 /* 355 * Buffer size has to be in multiples of 128 bytes. 356 * Make room for metadata of size of one line (128 bytes). 357 */ 358 rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; 359 rbdr->enable = TRUE; 360 rbdr->thresh = RBDR_THRESH; 361 rbdr->nic = nic; 362 rbdr->idx = qidx; 363 364 /* 365 * Create DMA tag for Rx buffers. 366 * Each map created using this tag is intended to store Rx payload for 367 * one fragment and one header structure containing rbuf_info (thus 368 * additional 128 byte line since RB must be a multiple of 128 byte 369 * cache line). 370 */ 371 if (buf_size > MCLBYTES) { 372 device_printf(nic->dev, 373 "Buffer size to large for mbuf cluster\n"); 374 return (EINVAL); 375 } 376 err = bus_dma_tag_create( 377 bus_get_dma_tag(nic->dev), /* parent tag */ 378 NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 379 0, /* boundary */ 380 DMAP_MAX_PHYSADDR, /* lowaddr */ 381 DMAP_MIN_PHYSADDR, /* highaddr */ 382 NULL, NULL, /* filtfunc, filtfuncarg */ 383 roundup2(buf_size, MCLBYTES), /* maxsize */ 384 1, /* nsegments */ 385 roundup2(buf_size, MCLBYTES), /* maxsegsize */ 386 0, /* flags */ 387 NULL, NULL, /* lockfunc, lockfuncarg */ 388 &rbdr->rbdr_buff_dmat); /* dmat */ 389 390 if (err != 0) { 391 device_printf(nic->dev, 392 "Failed to create busdma tag for RBDR buffers\n"); 393 return (err); 394 } 395 396 rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * 397 ring_len, M_NICVF, (M_WAITOK | M_ZERO)); 398 399 for (idx = 0; idx < ring_len; idx++) { 400 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); 401 if (err != 0) { 402 device_printf(nic->dev, 403 "Failed to create DMA map for RB\n"); 404 return (err); 405 } 406 rbdr->rbdr_buff_dmaps[idx] = dmap; 407 408 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, 409 DMA_BUFFER_LEN, &rbuf); 410 if (err != 0) 411 return (err); 412 413 desc = GET_RBDR_DESC(rbdr, idx); 414 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 415 } 416 417 /* Allocate taskqueue */ 418 TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); 419 TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); 420 rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, 421 taskqueue_thread_enqueue, &rbdr->rbdr_taskq); 422 taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", 423 device_get_nameunit(nic->dev)); 424 425 return (0); 426} 427 428/* Free RBDR ring and its receive buffers */ 429static void 430nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 431{ 432 struct mbuf *mbuf; 433 struct queue_set *qs; 434 struct rbdr_entry_t *desc; 435 struct rbuf_info *rinfo; 436 bus_addr_t buf_addr; 437 int head, tail, idx; 438 int err; 439 440 qs = nic->qs; 441 442 if ((qs == NULL) || (rbdr == NULL)) 443 return; 444 445 rbdr->enable = FALSE; 446 if (rbdr->rbdr_taskq != NULL) { 447 /* Remove tasks */ 448 while (taskqueue_cancel(rbdr->rbdr_taskq, 449 &rbdr->rbdr_task_nowait, NULL) != 0) { 450 /* Finish the nowait task first */ 451 taskqueue_drain(rbdr->rbdr_taskq, 452 &rbdr->rbdr_task_nowait); 453 } 454 taskqueue_free(rbdr->rbdr_taskq); 455 rbdr->rbdr_taskq = NULL; 456 457 while (taskqueue_cancel(taskqueue_thread, 458 &rbdr->rbdr_task, NULL) != 0) { 459 /* Now finish the sleepable task */ 460 taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); 461 } 462 } 463 464 /* 465 * Free all of the memory under the RB descriptors. 466 * There are assumptions here: 467 * 1. Corresponding RBDR is disabled 468 * - it is safe to operate using head and tail indexes 469 * 2. All bffers that were received are properly freed by 470 * the receive handler 471 * - there is no need to unload DMA map and free MBUF for other 472 * descriptors than unused ones 473 */ 474 if (rbdr->rbdr_buff_dmat != NULL) { 475 head = rbdr->head; 476 tail = rbdr->tail; 477 while (head != tail) { 478 desc = GET_RBDR_DESC(rbdr, head); 479 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 480 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 481 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 482 mbuf = rinfo->mbuf; 483 /* This will destroy everything including rinfo! */ 484 m_freem(mbuf); 485 head++; 486 head &= (rbdr->dmem.q_len - 1); 487 } 488 /* Free tail descriptor */ 489 desc = GET_RBDR_DESC(rbdr, tail); 490 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 491 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 492 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 493 mbuf = rinfo->mbuf; 494 /* This will destroy everything including rinfo! */ 495 m_freem(mbuf); 496 497 /* Destroy DMA maps */ 498 for (idx = 0; idx < qs->rbdr_len; idx++) { 499 if (rbdr->rbdr_buff_dmaps[idx] == NULL) 500 continue; 501 err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, 502 rbdr->rbdr_buff_dmaps[idx]); 503 KASSERT(err == 0, 504 ("%s: Could not destroy DMA map for RB, desc: %d", 505 __func__, idx)); 506 rbdr->rbdr_buff_dmaps[idx] = NULL; 507 } 508 509 /* Now destroy the tag */ 510 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); 511 KASSERT(err == 0, 512 ("%s: Trying to destroy BUSY DMA tag", __func__)); 513 514 rbdr->head = 0; 515 rbdr->tail = 0; 516 } 517 518 /* Free RBDR ring */ 519 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 520} 521 522/* 523 * Refill receive buffer descriptors with new buffers. 524 */ 525static int 526nicvf_refill_rbdr(struct rbdr *rbdr, int mflags) 527{ 528 struct nicvf *nic; 529 struct queue_set *qs; 530 int rbdr_idx; 531 int tail, qcount; 532 int refill_rb_cnt; 533 struct rbdr_entry_t *desc; 534 bus_dmamap_t dmap; 535 bus_addr_t rbuf; 536 boolean_t rb_alloc_fail; 537 int new_rb; 538 539 rb_alloc_fail = TRUE; 540 new_rb = 0; 541 nic = rbdr->nic; 542 qs = nic->qs; 543 rbdr_idx = rbdr->idx; 544 545 /* Check if it's enabled */ 546 if (!rbdr->enable) 547 return (0); 548 549 /* Get no of desc's to be refilled */ 550 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 551 qcount &= 0x7FFFF; 552 /* Doorbell can be ringed with a max of ring size minus 1 */ 553 if (qcount >= (qs->rbdr_len - 1)) { 554 rb_alloc_fail = FALSE; 555 goto out; 556 } else 557 refill_rb_cnt = qs->rbdr_len - qcount - 1; 558 559 /* Start filling descs from tail */ 560 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 561 while (refill_rb_cnt) { 562 tail++; 563 tail &= (rbdr->dmem.q_len - 1); 564 565 dmap = rbdr->rbdr_buff_dmaps[tail]; 566 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, 567 DMA_BUFFER_LEN, &rbuf)) { 568 /* Something went wrong. Resign */ 569 break; 570 } 571 desc = GET_RBDR_DESC(rbdr, tail); 572 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 573 refill_rb_cnt--; 574 new_rb++; 575 } 576 577 /* make sure all memory stores are done before ringing doorbell */ 578 wmb(); 579 580 /* Check if buffer allocation failed */ 581 if (refill_rb_cnt == 0) 582 rb_alloc_fail = FALSE; 583 584 /* Notify HW */ 585 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 586 rbdr_idx, new_rb); 587out: 588 if (!rb_alloc_fail) { 589 /* 590 * Re-enable RBDR interrupts only 591 * if buffer allocation is success. 592 */ 593 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 594 595 return (0); 596 } 597 598 return (ENOMEM); 599} 600 601/* Refill RBs even if sleep is needed to reclaim memory */ 602static void 603nicvf_rbdr_task(void *arg, int pending) 604{ 605 struct rbdr *rbdr; 606 int err; 607 608 rbdr = (struct rbdr *)arg; 609 610 err = nicvf_refill_rbdr(rbdr, M_WAITOK); 611 if (__predict_false(err != 0)) { 612 panic("%s: Failed to refill RBs even when sleep enabled", 613 __func__); 614 } 615} 616 617/* Refill RBs as soon as possible without waiting */ 618static void 619nicvf_rbdr_task_nowait(void *arg, int pending) 620{ 621 struct rbdr *rbdr; 622 int err; 623 624 rbdr = (struct rbdr *)arg; 625 626 err = nicvf_refill_rbdr(rbdr, M_NOWAIT); 627 if (err != 0) { 628 /* 629 * Schedule another, sleepable kernel thread 630 * that will for sure refill the buffers. 631 */ 632 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); 633 } 634} 635 636static int 637nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 638 struct cqe_rx_t *cqe_rx, int cqe_type) 639{ 640 struct mbuf *mbuf; 641 struct rcv_queue *rq; 642 int rq_idx; 643 int err = 0; 644 645 rq_idx = cqe_rx->rq_idx; 646 rq = &nic->qs->rq[rq_idx]; 647 648 /* Check for errors */ 649 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 650 if (err && !cqe_rx->rb_cnt) 651 return (0); 652 653 mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); 654 if (mbuf == NULL) { 655 dprintf(nic->dev, "Packet not received\n"); 656 return (0); 657 } 658 659 /* If error packet */ 660 if (err != 0) { 661 m_freem(mbuf); 662 return (0); 663 } 664 665 if (rq->lro_enabled && 666 ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) && 667 (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 668 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { 669 /* 670 * At this point it is known that there are no errors in the 671 * packet. Attempt to LRO enqueue. Send to stack if no resources 672 * or enqueue error. 673 */ 674 if ((rq->lro.lro_cnt != 0) && 675 (tcp_lro_rx(&rq->lro, mbuf, 0) == 0)) 676 return (0); 677 } 678 /* 679 * Push this packet to the stack later to avoid 680 * unlocking completion task in the middle of work. 681 */ 682 err = buf_ring_enqueue(cq->rx_br, mbuf); 683 if (err != 0) { 684 /* 685 * Failed to enqueue this mbuf. 686 * We don't drop it, just schedule another task. 687 */ 688 return (err); 689 } 690 691 return (0); 692} 693 694static int 695nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 696 struct cqe_send_t *cqe_tx, int cqe_type) 697{ 698 bus_dmamap_t dmap; 699 struct mbuf *mbuf; 700 struct snd_queue *sq; 701 struct sq_hdr_subdesc *hdr; 702 703 mbuf = NULL; 704 sq = &nic->qs->sq[cqe_tx->sq_idx]; 705 /* Avoid blocking here since we hold a non-sleepable NICVF_CMP_LOCK */ 706 if (NICVF_TX_TRYLOCK(sq) == 0) 707 return (EAGAIN); 708 709 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); 710 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 711 NICVF_TX_UNLOCK(sq); 712 return (0); 713 } 714 715 dprintf(nic->dev, 716 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", 717 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 718 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 719 720 dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; 721 bus_dmamap_unload(sq->snd_buff_dmat, dmap); 722 723 mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; 724 if (mbuf != NULL) { 725 m_freem(mbuf); 726 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; 727 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 728 } 729 730 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 731 732 NICVF_TX_UNLOCK(sq); 733 return (0); 734} 735 736static int 737nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) 738{ 739 struct mbuf *mbuf; 740 struct ifnet *ifp; 741 int processed_cqe, work_done = 0, tx_done = 0; 742 int cqe_count, cqe_head; 743 struct queue_set *qs = nic->qs; 744 struct cmp_queue *cq = &qs->cq[cq_idx]; 745 struct snd_queue *sq = &qs->sq[cq_idx]; 746 struct rcv_queue *rq; 747 struct cqe_rx_t *cq_desc; 748 struct lro_ctrl *lro; 749 int rq_idx; 750 int cmp_err; 751 752 NICVF_CMP_LOCK(cq); 753 cmp_err = 0; 754 processed_cqe = 0; 755 /* Get no of valid CQ entries to process */ 756 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); 757 cqe_count &= CQ_CQE_COUNT; 758 if (cqe_count == 0) 759 goto out; 760 761 /* Get head of the valid CQ entries */ 762 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 763 cqe_head &= 0xFFFF; 764 765 dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", 766 __func__, cq_idx, cqe_count, cqe_head); 767 while (processed_cqe < cqe_count) { 768 /* Get the CQ descriptor */ 769 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 770 cqe_head++; 771 cqe_head &= (cq->dmem.q_len - 1); 772 /* Prefetch next CQ descriptor */ 773 __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); 774 775 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, 776 cq_desc->cqe_type); 777 switch (cq_desc->cqe_type) { 778 case CQE_TYPE_RX: 779 cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, 780 CQE_TYPE_RX); 781 if (__predict_false(cmp_err != 0)) { 782 /* 783 * Ups. Cannot finish now. 784 * Let's try again later. 785 */ 786 goto done; 787 } 788 work_done++; 789 break; 790 case CQE_TYPE_SEND: 791 cmp_err = nicvf_snd_pkt_handler(nic, cq, 792 (void *)cq_desc, CQE_TYPE_SEND); 793 if (__predict_false(cmp_err != 0)) { 794 /* 795 * Ups. Cannot finish now. 796 * Let's try again later. 797 */ 798 goto done; 799 } 800 801 tx_done++; 802 break; 803 case CQE_TYPE_INVALID: 804 case CQE_TYPE_RX_SPLIT: 805 case CQE_TYPE_RX_TCP: 806 case CQE_TYPE_SEND_PTP: 807 /* Ignore for now */ 808 break; 809 } 810 processed_cqe++; 811 } 812done: 813 dprintf(nic->dev, 814 "%s CQ%d processed_cqe %d work_done %d\n", 815 __func__, cq_idx, processed_cqe, work_done); 816 817 /* Ring doorbell to inform H/W to reuse processed CQEs */ 818 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); 819 820 if ((tx_done > 0) && 821 ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { 822 /* Reenable TXQ if its stopped earlier due to SQ full */ 823 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 824 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); 825 } 826out: 827 /* 828 * Flush any outstanding LRO work 829 */ 830 rq_idx = cq_idx; 831 rq = &nic->qs->rq[rq_idx]; 832 lro = &rq->lro; 833 tcp_lro_flush_all(lro); 834 835 NICVF_CMP_UNLOCK(cq); 836 837 ifp = nic->ifp; 838 /* Push received MBUFs to the stack */ 839 while (!buf_ring_empty(cq->rx_br)) { 840 mbuf = buf_ring_dequeue_mc(cq->rx_br); 841 if (__predict_true(mbuf != NULL)) 842 (*ifp->if_input)(ifp, mbuf); 843 } 844 845 return (cmp_err); 846} 847 848/* 849 * Qset error interrupt handler 850 * 851 * As of now only CQ errors are handled 852 */ 853static void 854nicvf_qs_err_task(void *arg, int pending) 855{ 856 struct nicvf *nic; 857 struct queue_set *qs; 858 int qidx; 859 uint64_t status; 860 boolean_t enable = TRUE; 861 862 nic = (struct nicvf *)arg; 863 qs = nic->qs; 864 865 /* Deactivate network interface */ 866 if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 867 868 /* Check if it is CQ err */ 869 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 870 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, 871 qidx); 872 if ((status & CQ_ERR_MASK) == 0) 873 continue; 874 /* Process already queued CQEs and reconfig CQ */ 875 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 876 nicvf_sq_disable(nic, qidx); 877 (void)nicvf_cq_intr_handler(nic, qidx); 878 nicvf_cmp_queue_config(nic, qs, qidx, enable); 879 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); 880 nicvf_sq_enable(nic, &qs->sq[qidx], qidx); 881 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 882 } 883 884 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 885 /* Re-enable Qset error interrupt */ 886 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 887} 888 889static void 890nicvf_cmp_task(void *arg, int pending) 891{ 892 struct cmp_queue *cq; 893 struct nicvf *nic; 894 int cmp_err; 895 896 cq = (struct cmp_queue *)arg; 897 nic = cq->nic; 898 899 /* Handle CQ descriptors */ 900 cmp_err = nicvf_cq_intr_handler(nic, cq->idx); 901 if (__predict_false(cmp_err != 0)) { 902 /* 903 * Schedule another thread here since we did not 904 * process the entire CQ due to Tx or Rx CQ parse error. 905 */ 906 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); 907 908 } 909 910 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 911 /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ 912 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); 913 914} 915 916/* Initialize completion queue */ 917static int 918nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, 919 int qidx) 920{ 921 int err; 922 923 /* Initizalize lock */ 924 snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", 925 device_get_nameunit(nic->dev), qidx); 926 mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); 927 928 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 929 NICVF_CQ_BASE_ALIGN_BYTES); 930 931 if (err != 0) { 932 device_printf(nic->dev, 933 "Could not allocate DMA memory for CQ\n"); 934 return (err); 935 } 936 937 cq->desc = cq->dmem.base; 938 cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH; 939 cq->nic = nic; 940 cq->idx = qidx; 941 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 942 943 cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, 944 &cq->mtx); 945 946 /* Allocate taskqueue */ 947 TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); 948 cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, 949 taskqueue_thread_enqueue, &cq->cmp_taskq); 950 taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", 951 device_get_nameunit(nic->dev), qidx); 952 953 return (0); 954} 955 956static void 957nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 958{ 959 960 if (cq == NULL) 961 return; 962 /* 963 * The completion queue itself should be disabled by now 964 * (ref. nicvf_snd_queue_config()). 965 * Ensure that it is safe to disable it or panic. 966 */ 967 if (cq->enable) 968 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); 969 970 if (cq->cmp_taskq != NULL) { 971 /* Remove task */ 972 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) 973 taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); 974 975 taskqueue_free(cq->cmp_taskq); 976 cq->cmp_taskq = NULL; 977 } 978 /* 979 * Completion interrupt will possibly enable interrupts again 980 * so disable interrupting now after we finished processing 981 * completion task. It is safe to do so since the corresponding CQ 982 * was already disabled. 983 */ 984 nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); 985 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 986 987 NICVF_CMP_LOCK(cq); 988 nicvf_free_q_desc_mem(nic, &cq->dmem); 989 drbr_free(cq->rx_br, M_DEVBUF); 990 NICVF_CMP_UNLOCK(cq); 991 mtx_destroy(&cq->mtx); 992 memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); 993} 994 995int 996nicvf_xmit_locked(struct snd_queue *sq) 997{ 998 struct nicvf *nic; 999 struct ifnet *ifp; 1000 struct mbuf *next; 1001 int err; 1002 1003 NICVF_TX_LOCK_ASSERT(sq); 1004 1005 nic = sq->nic; 1006 ifp = nic->ifp; 1007 err = 0; 1008 1009 while ((next = drbr_peek(ifp, sq->br)) != NULL) { 1010 err = nicvf_tx_mbuf_locked(sq, &next); 1011 if (err != 0) { 1012 if (next == NULL) 1013 drbr_advance(ifp, sq->br); 1014 else 1015 drbr_putback(ifp, sq->br, next); 1016 1017 break; 1018 } 1019 drbr_advance(ifp, sq->br); 1020 /* Send a copy of the frame to the BPF listener */ 1021 ETHER_BPF_MTAP(ifp, next); 1022 } 1023 return (err); 1024} 1025 1026static void 1027nicvf_snd_task(void *arg, int pending) 1028{ 1029 struct snd_queue *sq = (struct snd_queue *)arg; 1030 struct nicvf *nic; 1031 struct ifnet *ifp; 1032 int err; 1033 1034 nic = sq->nic; 1035 ifp = nic->ifp; 1036 1037 /* 1038 * Skip sending anything if the driver is not running, 1039 * SQ full or link is down. 1040 */ 1041 if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1042 IFF_DRV_RUNNING) || !nic->link_up) 1043 return; 1044 1045 NICVF_TX_LOCK(sq); 1046 err = nicvf_xmit_locked(sq); 1047 NICVF_TX_UNLOCK(sq); 1048 /* Try again */ 1049 if (err != 0) 1050 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); 1051} 1052 1053/* Initialize transmit queue */ 1054static int 1055nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, 1056 int qidx) 1057{ 1058 size_t i; 1059 int err; 1060 1061 /* Initizalize TX lock for this queue */ 1062 snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", 1063 device_get_nameunit(nic->dev), qidx); 1064 mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); 1065 1066 NICVF_TX_LOCK(sq); 1067 /* Allocate buffer ring */ 1068 sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, 1069 M_NOWAIT, &sq->mtx); 1070 if (sq->br == NULL) { 1071 device_printf(nic->dev, 1072 "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); 1073 err = ENOMEM; 1074 goto error; 1075 } 1076 1077 /* Allocate DMA memory for Tx descriptors */ 1078 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 1079 NICVF_SQ_BASE_ALIGN_BYTES); 1080 if (err != 0) { 1081 device_printf(nic->dev, 1082 "Could not allocate DMA memory for SQ\n"); 1083 goto error; 1084 } 1085 1086 sq->desc = sq->dmem.base; 1087 sq->head = sq->tail = 0; 1088 sq->free_cnt = q_len - 1; 1089 sq->thresh = SND_QUEUE_THRESH; 1090 sq->idx = qidx; 1091 sq->nic = nic; 1092 1093 /* 1094 * Allocate DMA maps for Tx buffers 1095 */ 1096 1097 /* Create DMA tag first */ 1098 err = bus_dma_tag_create( 1099 bus_get_dma_tag(nic->dev), /* parent tag */ 1100 1, /* alignment */ 1101 0, /* boundary */ 1102 BUS_SPACE_MAXADDR, /* lowaddr */ 1103 BUS_SPACE_MAXADDR, /* highaddr */ 1104 NULL, NULL, /* filtfunc, filtfuncarg */ 1105 NICVF_TSO_MAXSIZE, /* maxsize */ 1106 NICVF_TSO_NSEGS, /* nsegments */ 1107 MCLBYTES, /* maxsegsize */ 1108 0, /* flags */ 1109 NULL, NULL, /* lockfunc, lockfuncarg */ 1110 &sq->snd_buff_dmat); /* dmat */ 1111 1112 if (err != 0) { 1113 device_printf(nic->dev, 1114 "Failed to create busdma tag for Tx buffers\n"); 1115 goto error; 1116 } 1117 1118 /* Allocate send buffers array */ 1119 sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, 1120 (M_NOWAIT | M_ZERO)); 1121 if (sq->snd_buff == NULL) { 1122 device_printf(nic->dev, 1123 "Could not allocate memory for Tx buffers array\n"); 1124 err = ENOMEM; 1125 goto error; 1126 } 1127 1128 /* Now populate maps */ 1129 for (i = 0; i < q_len; i++) { 1130 err = bus_dmamap_create(sq->snd_buff_dmat, 0, 1131 &sq->snd_buff[i].dmap); 1132 if (err != 0) { 1133 device_printf(nic->dev, 1134 "Failed to create DMA maps for Tx buffers\n"); 1135 goto error; 1136 } 1137 } 1138 NICVF_TX_UNLOCK(sq); 1139 1140 /* Allocate taskqueue */ 1141 TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); 1142 sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, 1143 taskqueue_thread_enqueue, &sq->snd_taskq); 1144 taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", 1145 device_get_nameunit(nic->dev), qidx); 1146 1147 return (0); 1148error: 1149 NICVF_TX_UNLOCK(sq); 1150 return (err); 1151} 1152 1153static void 1154nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 1155{ 1156 struct queue_set *qs = nic->qs; 1157 size_t i; 1158 int err; 1159 1160 if (sq == NULL) 1161 return; 1162 1163 if (sq->snd_taskq != NULL) { 1164 /* Remove task */ 1165 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) 1166 taskqueue_drain(sq->snd_taskq, &sq->snd_task); 1167 1168 taskqueue_free(sq->snd_taskq); 1169 sq->snd_taskq = NULL; 1170 } 1171 1172 NICVF_TX_LOCK(sq); 1173 if (sq->snd_buff_dmat != NULL) { 1174 if (sq->snd_buff != NULL) { 1175 for (i = 0; i < qs->sq_len; i++) { 1176 m_freem(sq->snd_buff[i].mbuf); 1177 sq->snd_buff[i].mbuf = NULL; 1178 1179 bus_dmamap_unload(sq->snd_buff_dmat, 1180 sq->snd_buff[i].dmap); 1181 err = bus_dmamap_destroy(sq->snd_buff_dmat, 1182 sq->snd_buff[i].dmap); 1183 /* 1184 * If bus_dmamap_destroy fails it can cause 1185 * random panic later if the tag is also 1186 * destroyed in the process. 1187 */ 1188 KASSERT(err == 0, 1189 ("%s: Could not destroy DMA map for SQ", 1190 __func__)); 1191 } 1192 } 1193 1194 free(sq->snd_buff, M_NICVF); 1195 1196 err = bus_dma_tag_destroy(sq->snd_buff_dmat); 1197 KASSERT(err == 0, 1198 ("%s: Trying to destroy BUSY DMA tag", __func__)); 1199 } 1200 1201 /* Free private driver ring for this send queue */ 1202 if (sq->br != NULL) 1203 drbr_free(sq->br, M_DEVBUF); 1204 1205 if (sq->dmem.base != NULL) 1206 nicvf_free_q_desc_mem(nic, &sq->dmem); 1207 1208 NICVF_TX_UNLOCK(sq); 1209 /* Destroy Tx lock */ 1210 mtx_destroy(&sq->mtx); 1211 memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); 1212} 1213 1214static void 1215nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1216{ 1217 1218 /* Disable send queue */ 1219 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 1220 /* Check if SQ is stopped */ 1221 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 1222 return; 1223 /* Reset send queue */ 1224 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1225} 1226 1227static void 1228nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1229{ 1230 union nic_mbx mbx = {}; 1231 1232 /* Make sure all packets in the pipeline are written back into mem */ 1233 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 1234 nicvf_send_msg_to_pf(nic, &mbx); 1235} 1236 1237static void 1238nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1239{ 1240 1241 /* Disable timer threshold (doesn't get reset upon CQ reset */ 1242 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 1243 /* Disable completion queue */ 1244 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 1245 /* Reset completion queue */ 1246 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1247} 1248 1249static void 1250nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) 1251{ 1252 uint64_t tmp, fifo_state; 1253 int timeout = 10; 1254 1255 /* Save head and tail pointers for feeing up buffers */ 1256 rbdr->head = 1257 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; 1258 rbdr->tail = 1259 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; 1260 1261 /* 1262 * If RBDR FIFO is in 'FAIL' state then do a reset first 1263 * before relaiming. 1264 */ 1265 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 1266 if (((fifo_state >> 62) & 0x03) == 0x3) { 1267 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 1268 qidx, NICVF_RBDR_RESET); 1269 } 1270 1271 /* Disable RBDR */ 1272 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 1273 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1274 return; 1275 while (1) { 1276 tmp = nicvf_queue_reg_read(nic, 1277 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); 1278 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 1279 break; 1280 1281 DELAY(1000); 1282 timeout--; 1283 if (!timeout) { 1284 device_printf(nic->dev, 1285 "Failed polling on prefetch status\n"); 1286 return; 1287 } 1288 } 1289 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1290 NICVF_RBDR_RESET); 1291 1292 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 1293 return; 1294 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 1295 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1296 return; 1297} 1298 1299/* Configures receive queue */ 1300static void 1301nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 1302 int qidx, bool enable) 1303{ 1304 union nic_mbx mbx = {}; 1305 struct rcv_queue *rq; 1306 struct rq_cfg rq_cfg; 1307 struct ifnet *ifp; 1308 struct lro_ctrl *lro; 1309 1310 ifp = nic->ifp; 1311 1312 rq = &qs->rq[qidx]; 1313 rq->enable = enable; 1314 1315 lro = &rq->lro; 1316 1317 /* Disable receive queue */ 1318 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 1319 1320 if (!rq->enable) { 1321 nicvf_reclaim_rcv_queue(nic, qs, qidx); 1322 /* Free LRO memory */ 1323 tcp_lro_free(lro); 1324 rq->lro_enabled = FALSE; 1325 return; 1326 } 1327 1328 /* Configure LRO if enabled */ 1329 rq->lro_enabled = FALSE; 1330 if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) { 1331 if (tcp_lro_init(lro) != 0) { 1332 device_printf(nic->dev, 1333 "Failed to initialize LRO for RXQ%d\n", qidx); 1334 } else { 1335 rq->lro_enabled = TRUE; 1336 lro->ifp = nic->ifp; 1337 } 1338 } 1339 1340 rq->cq_qs = qs->vnic_id; 1341 rq->cq_idx = qidx; 1342 rq->start_rbdr_qs = qs->vnic_id; 1343 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 1344 rq->cont_rbdr_qs = qs->vnic_id; 1345 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 1346 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 1347 rq->caching = 1; 1348 1349 /* Send a mailbox msg to PF to config RQ */ 1350 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 1351 mbx.rq.qs_num = qs->vnic_id; 1352 mbx.rq.rq_num = qidx; 1353 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 1354 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 1355 (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | 1356 (rq->start_qs_rbdr_idx); 1357 nicvf_send_msg_to_pf(nic, &mbx); 1358 1359 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 1360 mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); 1361 nicvf_send_msg_to_pf(nic, &mbx); 1362 1363 /* 1364 * RQ drop config 1365 * Enable CQ drop to reserve sufficient CQEs for all tx packets 1366 */ 1367 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 1368 mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); 1369 nicvf_send_msg_to_pf(nic, &mbx); 1370 1371 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 1372 1373 /* Enable Receive queue */ 1374 rq_cfg.ena = 1; 1375 rq_cfg.tcp_ena = 0; 1376 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 1377 *(uint64_t *)&rq_cfg); 1378} 1379 1380/* Configures completion queue */ 1381static void 1382nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 1383 int qidx, boolean_t enable) 1384{ 1385 struct cmp_queue *cq; 1386 struct cq_cfg cq_cfg; 1387 1388 cq = &qs->cq[qidx]; 1389 cq->enable = enable; 1390 1391 if (!cq->enable) { 1392 nicvf_reclaim_cmp_queue(nic, qs, qidx); 1393 return; 1394 } 1395 1396 /* Reset completion queue */ 1397 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1398 1399 /* Set completion queue base address */ 1400 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, 1401 (uint64_t)(cq->dmem.phys_base)); 1402 1403 /* Enable Completion queue */ 1404 cq_cfg.ena = 1; 1405 cq_cfg.reset = 0; 1406 cq_cfg.caching = 0; 1407 cq_cfg.qsize = CMP_QSIZE; 1408 cq_cfg.avg_con = 0; 1409 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); 1410 1411 /* Set threshold value for interrupt generation */ 1412 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 1413 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 1414 nic->cq_coalesce_usecs); 1415} 1416 1417/* Configures transmit queue */ 1418static void 1419nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1420 boolean_t enable) 1421{ 1422 union nic_mbx mbx = {}; 1423 struct snd_queue *sq; 1424 struct sq_cfg sq_cfg; 1425 1426 sq = &qs->sq[qidx]; 1427 sq->enable = enable; 1428 1429 if (!sq->enable) { 1430 nicvf_reclaim_snd_queue(nic, qs, qidx); 1431 return; 1432 } 1433 1434 /* Reset send queue */ 1435 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1436 1437 sq->cq_qs = qs->vnic_id; 1438 sq->cq_idx = qidx; 1439 1440 /* Send a mailbox msg to PF to config SQ */ 1441 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 1442 mbx.sq.qs_num = qs->vnic_id; 1443 mbx.sq.sq_num = qidx; 1444 mbx.sq.sqs_mode = nic->sqs_mode; 1445 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 1446 nicvf_send_msg_to_pf(nic, &mbx); 1447 1448 /* Set queue base address */ 1449 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, 1450 (uint64_t)(sq->dmem.phys_base)); 1451 1452 /* Enable send queue & set queue size */ 1453 sq_cfg.ena = 1; 1454 sq_cfg.reset = 0; 1455 sq_cfg.ldwb = 0; 1456 sq_cfg.qsize = SND_QSIZE; 1457 sq_cfg.tstmp_bgx_intf = 0; 1458 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); 1459 1460 /* Set threshold value for interrupt generation */ 1461 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 1462} 1463 1464/* Configures receive buffer descriptor ring */ 1465static void 1466nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1467 boolean_t enable) 1468{ 1469 struct rbdr *rbdr; 1470 struct rbdr_cfg rbdr_cfg; 1471 1472 rbdr = &qs->rbdr[qidx]; 1473 nicvf_reclaim_rbdr(nic, rbdr, qidx); 1474 if (!enable) 1475 return; 1476 1477 /* Set descriptor base address */ 1478 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, 1479 (uint64_t)(rbdr->dmem.phys_base)); 1480 1481 /* Enable RBDR & set queue size */ 1482 /* Buffer size should be in multiples of 128 bytes */ 1483 rbdr_cfg.ena = 1; 1484 rbdr_cfg.reset = 0; 1485 rbdr_cfg.ldwb = 0; 1486 rbdr_cfg.qsize = RBDR_SIZE; 1487 rbdr_cfg.avg_con = 0; 1488 rbdr_cfg.lines = rbdr->dma_size / 128; 1489 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1490 *(uint64_t *)&rbdr_cfg); 1491 1492 /* Notify HW */ 1493 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, 1494 qs->rbdr_len - 1); 1495 1496 /* Set threshold value for interrupt generation */ 1497 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, 1498 rbdr->thresh - 1); 1499} 1500 1501/* Requests PF to assign and enable Qset */ 1502void 1503nicvf_qset_config(struct nicvf *nic, boolean_t enable) 1504{ 1505 union nic_mbx mbx = {}; 1506 struct queue_set *qs; 1507 struct qs_cfg *qs_cfg; 1508 1509 qs = nic->qs; 1510 if (qs == NULL) { 1511 device_printf(nic->dev, 1512 "Qset is still not allocated, don't init queues\n"); 1513 return; 1514 } 1515 1516 qs->enable = enable; 1517 qs->vnic_id = nic->vf_id; 1518 1519 /* Send a mailbox msg to PF to config Qset */ 1520 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 1521 mbx.qs.num = qs->vnic_id; 1522 1523 mbx.qs.cfg = 0; 1524 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 1525 if (qs->enable) { 1526 qs_cfg->ena = 1; 1527 qs_cfg->vnic = qs->vnic_id; 1528 } 1529 nicvf_send_msg_to_pf(nic, &mbx); 1530} 1531 1532static void 1533nicvf_free_resources(struct nicvf *nic) 1534{ 1535 int qidx; 1536 struct queue_set *qs; 1537 1538 qs = nic->qs; 1539 /* 1540 * Remove QS error task first since it has to be dead 1541 * to safely free completion queue tasks. 1542 */ 1543 if (qs->qs_err_taskq != NULL) { 1544 /* Shut down QS error tasks */ 1545 while (taskqueue_cancel(qs->qs_err_taskq, 1546 &qs->qs_err_task, NULL) != 0) { 1547 taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); 1548 1549 } 1550 taskqueue_free(qs->qs_err_taskq); 1551 qs->qs_err_taskq = NULL; 1552 } 1553 /* Free receive buffer descriptor ring */ 1554 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1555 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 1556 1557 /* Free completion queue */ 1558 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1559 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 1560 1561 /* Free send queue */ 1562 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1563 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 1564} 1565 1566static int 1567nicvf_alloc_resources(struct nicvf *nic) 1568{ 1569 struct queue_set *qs = nic->qs; 1570 int qidx; 1571 1572 /* Alloc receive buffer descriptor ring */ 1573 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 1574 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 1575 DMA_BUFFER_LEN, qidx)) 1576 goto alloc_fail; 1577 } 1578 1579 /* Alloc send queue */ 1580 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 1581 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) 1582 goto alloc_fail; 1583 } 1584 1585 /* Alloc completion queue */ 1586 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1587 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) 1588 goto alloc_fail; 1589 } 1590 1591 /* Allocate QS error taskqueue */ 1592 TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); 1593 qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, 1594 taskqueue_thread_enqueue, &qs->qs_err_taskq); 1595 taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", 1596 device_get_nameunit(nic->dev)); 1597 1598 return (0); 1599alloc_fail: 1600 nicvf_free_resources(nic); 1601 return (ENOMEM); 1602} 1603 1604int 1605nicvf_set_qset_resources(struct nicvf *nic) 1606{ 1607 struct queue_set *qs; 1608 1609 qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); 1610 nic->qs = qs; 1611 1612 /* Set count of each queue */ 1613 qs->rbdr_cnt = RBDR_CNT; 1614 qs->rq_cnt = RCV_QUEUE_CNT; 1615 1616 qs->sq_cnt = SND_QUEUE_CNT; 1617 qs->cq_cnt = CMP_QUEUE_CNT; 1618 1619 /* Set queue lengths */ 1620 qs->rbdr_len = RCV_BUF_COUNT; 1621 qs->sq_len = SND_QUEUE_LEN; 1622 qs->cq_len = CMP_QUEUE_LEN; 1623 1624 nic->rx_queues = qs->rq_cnt; 1625 nic->tx_queues = qs->sq_cnt; 1626 1627 return (0); 1628} 1629 1630int 1631nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) 1632{ 1633 boolean_t disable = FALSE; 1634 struct queue_set *qs; 1635 int qidx; 1636 1637 qs = nic->qs; 1638 if (qs == NULL) 1639 return (0); 1640 1641 if (enable) { 1642 if (nicvf_alloc_resources(nic) != 0) 1643 return (ENOMEM); 1644 1645 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1646 nicvf_snd_queue_config(nic, qs, qidx, enable); 1647 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1648 nicvf_cmp_queue_config(nic, qs, qidx, enable); 1649 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1650 nicvf_rbdr_config(nic, qs, qidx, enable); 1651 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1652 nicvf_rcv_queue_config(nic, qs, qidx, enable); 1653 } else { 1654 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1655 nicvf_rcv_queue_config(nic, qs, qidx, disable); 1656 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1657 nicvf_rbdr_config(nic, qs, qidx, disable); 1658 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1659 nicvf_snd_queue_config(nic, qs, qidx, disable); 1660 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1661 nicvf_cmp_queue_config(nic, qs, qidx, disable); 1662 1663 nicvf_free_resources(nic); 1664 } 1665 1666 return (0); 1667} 1668 1669/* 1670 * Get a free desc from SQ 1671 * returns descriptor ponter & descriptor number 1672 */ 1673static __inline int 1674nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 1675{ 1676 int qentry; 1677 1678 qentry = sq->tail; 1679 sq->free_cnt -= desc_cnt; 1680 sq->tail += desc_cnt; 1681 sq->tail &= (sq->dmem.q_len - 1); 1682 1683 return (qentry); 1684} 1685 1686/* Free descriptor back to SQ for future use */ 1687static void 1688nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 1689{ 1690 1691 sq->free_cnt += desc_cnt; 1692 sq->head += desc_cnt; 1693 sq->head &= (sq->dmem.q_len - 1); 1694} 1695 1696static __inline int 1697nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 1698{ 1699 qentry++; 1700 qentry &= (sq->dmem.q_len - 1); 1701 return (qentry); 1702} 1703 1704static void 1705nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 1706{ 1707 uint64_t sq_cfg; 1708 1709 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1710 sq_cfg |= NICVF_SQ_EN; 1711 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1712 /* Ring doorbell so that H/W restarts processing SQEs */ 1713 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 1714} 1715 1716static void 1717nicvf_sq_disable(struct nicvf *nic, int qidx) 1718{ 1719 uint64_t sq_cfg; 1720 1721 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1722 sq_cfg &= ~NICVF_SQ_EN; 1723 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1724} 1725 1726static void 1727nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) 1728{ 1729 uint64_t head, tail; 1730 struct snd_buff *snd_buff; 1731 struct sq_hdr_subdesc *hdr; 1732 1733 NICVF_TX_LOCK(sq); 1734 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 1735 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 1736 while (sq->head != head) { 1737 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 1738 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 1739 nicvf_put_sq_desc(sq, 1); 1740 continue; 1741 } 1742 snd_buff = &sq->snd_buff[sq->head]; 1743 if (snd_buff->mbuf != NULL) { 1744 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1745 m_freem(snd_buff->mbuf); 1746 sq->snd_buff[sq->head].mbuf = NULL; 1747 } 1748 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 1749 } 1750 NICVF_TX_UNLOCK(sq); 1751} 1752 1753/* 1754 * Add SQ HEADER subdescriptor. 1755 * First subdescriptor for every send descriptor. 1756 */ 1757static __inline int 1758nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, 1759 int subdesc_cnt, struct mbuf *mbuf, int len) 1760{ 1761 struct nicvf *nic; 1762 struct sq_hdr_subdesc *hdr; 1763 struct ether_vlan_header *eh; 1764#ifdef INET 1765 struct ip *ip; 1766 struct tcphdr *th; 1767#endif 1768 uint16_t etype; 1769 int ehdrlen, iphlen, poff; 1770 1771 nic = sq->nic; 1772 1773 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1774 sq->snd_buff[qentry].mbuf = mbuf; 1775 1776 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1777 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1778 /* Enable notification via CQE after processing SQE */ 1779 hdr->post_cqe = 1; 1780 /* No of subdescriptors following this */ 1781 hdr->subdesc_cnt = subdesc_cnt; 1782 hdr->tot_len = len; 1783 1784 eh = mtod(mbuf, struct ether_vlan_header *); 1785 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1786 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1787 etype = ntohs(eh->evl_proto); 1788 } else { 1789 ehdrlen = ETHER_HDR_LEN; 1790 etype = ntohs(eh->evl_encap_proto); 1791 } 1792 1793 switch (etype) { 1794#ifdef INET6 1795 case ETHERTYPE_IPV6: 1796 /* ARM64TODO: Add support for IPv6 */ 1797 hdr->csum_l3 = 0; 1798 sq->snd_buff[qentry].mbuf = NULL; 1799 return (ENXIO); 1800#endif 1801#ifdef INET 1802 case ETHERTYPE_IP: 1803 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { 1804 mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip)); 1805 sq->snd_buff[qentry].mbuf = mbuf; 1806 if (mbuf == NULL) 1807 return (ENOBUFS); 1808 } 1809 1810 ip = (struct ip *)(mbuf->m_data + ehdrlen); 1811 iphlen = ip->ip_hl << 2; 1812 poff = ehdrlen + iphlen; 1813 1814 if (mbuf->m_pkthdr.csum_flags != 0) { 1815 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1816 switch (ip->ip_p) { 1817 case IPPROTO_TCP: 1818 if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) 1819 break; 1820 1821 if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { 1822 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr)); 1823 sq->snd_buff[qentry].mbuf = mbuf; 1824 if (mbuf == NULL) 1825 return (ENOBUFS); 1826 } 1827 hdr->csum_l4 = SEND_L4_CSUM_TCP; 1828 break; 1829 case IPPROTO_UDP: 1830 if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) 1831 break; 1832 1833 if (mbuf->m_len < (poff + sizeof(struct udphdr))) { 1834 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr)); 1835 sq->snd_buff[qentry].mbuf = mbuf; 1836 if (mbuf == NULL) 1837 return (ENOBUFS); 1838 } 1839 hdr->csum_l4 = SEND_L4_CSUM_UDP; 1840 break; 1841 case IPPROTO_SCTP: 1842 if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) 1843 break; 1844 1845 if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { 1846 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr)); 1847 sq->snd_buff[qentry].mbuf = mbuf; 1848 if (mbuf == NULL) 1849 return (ENOBUFS); 1850 } 1851 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1852 break; 1853 default: 1854 break; 1855 } 1856 hdr->l3_offset = ehdrlen; 1857 hdr->l4_offset = ehdrlen + iphlen; 1858 } 1859 1860 if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) { 1861 /* 1862 * Extract ip again as m_data could have been modified. 1863 */ 1864 ip = (struct ip *)(mbuf->m_data + ehdrlen); 1865 th = (struct tcphdr *)((caddr_t)ip + iphlen); 1866 1867 hdr->tso = 1; 1868 hdr->tso_start = ehdrlen + iphlen + (th->th_off * 4); 1869 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz; 1870 hdr->inner_l3_offset = ehdrlen - 2; 1871 nic->drv_stats.tx_tso++; 1872 } 1873 break; 1874#endif 1875 default: 1876 hdr->csum_l3 = 0; 1877 } 1878 1879 return (0); 1880} 1881 1882/* 1883 * SQ GATHER subdescriptor 1884 * Must follow HDR descriptor 1885 */ 1886static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1887 int size, uint64_t data) 1888{ 1889 struct sq_gather_subdesc *gather; 1890 1891 qentry &= (sq->dmem.q_len - 1); 1892 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1893 1894 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1895 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1896 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1897 gather->size = size; 1898 gather->addr = data; 1899} 1900 1901/* Put an mbuf to a SQ for packet transfer. */ 1902static int 1903nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp) 1904{ 1905 bus_dma_segment_t segs[256]; 1906 struct snd_buff *snd_buff; 1907 size_t seg; 1908 int nsegs, qentry; 1909 int subdesc_cnt; 1910 int err; 1911 1912 NICVF_TX_LOCK_ASSERT(sq); 1913 1914 if (sq->free_cnt == 0) 1915 return (ENOBUFS); 1916 1917 snd_buff = &sq->snd_buff[sq->tail]; 1918 1919 err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, 1920 *mbufp, segs, &nsegs, BUS_DMA_NOWAIT); 1921 if (__predict_false(err != 0)) { 1922 /* ARM64TODO: Add mbuf defragmenting if we lack maps */ 1923 m_freem(*mbufp); 1924 *mbufp = NULL; 1925 return (err); 1926 } 1927 1928 /* Set how many subdescriptors is required */ 1929 subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1; 1930 if (subdesc_cnt > sq->free_cnt) { 1931 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ 1932 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1933 return (ENOBUFS); 1934 } 1935 1936 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1937 1938 /* Add SQ header subdesc */ 1939 err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp, 1940 (*mbufp)->m_pkthdr.len); 1941 if (err != 0) { 1942 nicvf_put_sq_desc(sq, subdesc_cnt); 1943 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1944 if (err == ENOBUFS) { 1945 m_freem(*mbufp); 1946 *mbufp = NULL; 1947 } 1948 return (err); 1949 } 1950 1951 /* Add SQ gather subdescs */ 1952 for (seg = 0; seg < nsegs; seg++) { 1953 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1954 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, 1955 segs[seg].ds_addr); 1956 } 1957 1958 /* make sure all memory stores are done before ringing doorbell */ 1959 bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); 1960 1961 dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", 1962 __func__, sq->idx, subdesc_cnt); 1963 /* Inform HW to xmit new packet */ 1964 nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, 1965 sq->idx, subdesc_cnt); 1966 return (0); 1967} 1968 1969static __inline u_int 1970frag_num(u_int i) 1971{ 1972#if BYTE_ORDER == BIG_ENDIAN 1973 return ((i & ~3) + 3 - (i & 3)); 1974#else 1975 return (i); 1976#endif 1977} 1978 1979/* Returns MBUF for a received packet */ 1980struct mbuf * 1981nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1982{ 1983 int frag; 1984 int payload_len = 0; 1985 struct mbuf *mbuf; 1986 struct mbuf *mbuf_frag; 1987 uint16_t *rb_lens = NULL; 1988 uint64_t *rb_ptrs = NULL; 1989 1990 mbuf = NULL; 1991 rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); 1992 rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); 1993 1994 dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", 1995 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1996 1997 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1998 payload_len = rb_lens[frag_num(frag)]; 1999 if (frag == 0) { 2000 /* First fragment */ 2001 mbuf = nicvf_rb_ptr_to_mbuf(nic, 2002 (*rb_ptrs - cqe_rx->align_pad)); 2003 mbuf->m_len = payload_len; 2004 mbuf->m_data += cqe_rx->align_pad; 2005 if_setrcvif(mbuf, nic->ifp); 2006 } else { 2007 /* Add fragments */ 2008 mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); 2009 m_append(mbuf, payload_len, mbuf_frag->m_data); 2010 m_freem(mbuf_frag); 2011 } 2012 /* Next buffer pointer */ 2013 rb_ptrs++; 2014 } 2015 2016 if (__predict_true(mbuf != NULL)) { 2017 m_fixhdr(mbuf); 2018 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; 2019 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); 2020 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { 2021 /* 2022 * HW by default verifies IP & TCP/UDP/SCTP checksums 2023 */ 2024 if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) { 2025 mbuf->m_pkthdr.csum_flags = 2026 (CSUM_IP_CHECKED | CSUM_IP_VALID); 2027 } 2028 2029 switch (cqe_rx->l4_type) { 2030 case L4TYPE_UDP: 2031 case L4TYPE_TCP: /* fall through */ 2032 mbuf->m_pkthdr.csum_flags |= 2033 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2034 mbuf->m_pkthdr.csum_data = 0xffff; 2035 break; 2036 case L4TYPE_SCTP: 2037 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 2038 break; 2039 default: 2040 break; 2041 } 2042 } 2043 } 2044 2045 return (mbuf); 2046} 2047 2048/* Enable interrupt */ 2049void 2050nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 2051{ 2052 uint64_t reg_val; 2053 2054 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2055 2056 switch (int_type) { 2057 case NICVF_INTR_CQ: 2058 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2059 break; 2060 case NICVF_INTR_SQ: 2061 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2062 break; 2063 case NICVF_INTR_RBDR: 2064 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2065 break; 2066 case NICVF_INTR_PKT_DROP: 2067 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2068 break; 2069 case NICVF_INTR_TCP_TIMER: 2070 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2071 break; 2072 case NICVF_INTR_MBOX: 2073 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2074 break; 2075 case NICVF_INTR_QS_ERR: 2076 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2077 break; 2078 default: 2079 device_printf(nic->dev, 2080 "Failed to enable interrupt: unknown type\n"); 2081 break; 2082 } 2083 2084 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 2085} 2086 2087/* Disable interrupt */ 2088void 2089nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 2090{ 2091 uint64_t reg_val = 0; 2092 2093 switch (int_type) { 2094 case NICVF_INTR_CQ: 2095 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2096 break; 2097 case NICVF_INTR_SQ: 2098 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2099 break; 2100 case NICVF_INTR_RBDR: 2101 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2102 break; 2103 case NICVF_INTR_PKT_DROP: 2104 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2105 break; 2106 case NICVF_INTR_TCP_TIMER: 2107 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2108 break; 2109 case NICVF_INTR_MBOX: 2110 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2111 break; 2112 case NICVF_INTR_QS_ERR: 2113 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2114 break; 2115 default: 2116 device_printf(nic->dev, 2117 "Failed to disable interrupt: unknown type\n"); 2118 break; 2119 } 2120 2121 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 2122} 2123 2124/* Clear interrupt */ 2125void 2126nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 2127{ 2128 uint64_t reg_val = 0; 2129 2130 switch (int_type) { 2131 case NICVF_INTR_CQ: 2132 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2133 break; 2134 case NICVF_INTR_SQ: 2135 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2136 break; 2137 case NICVF_INTR_RBDR: 2138 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2139 break; 2140 case NICVF_INTR_PKT_DROP: 2141 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2142 break; 2143 case NICVF_INTR_TCP_TIMER: 2144 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2145 break; 2146 case NICVF_INTR_MBOX: 2147 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); 2148 break; 2149 case NICVF_INTR_QS_ERR: 2150 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2151 break; 2152 default: 2153 device_printf(nic->dev, 2154 "Failed to clear interrupt: unknown type\n"); 2155 break; 2156 } 2157 2158 nicvf_reg_write(nic, NIC_VF_INT, reg_val); 2159} 2160 2161/* Check if interrupt is enabled */ 2162int 2163nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 2164{ 2165 uint64_t reg_val; 2166 uint64_t mask = 0xff; 2167 2168 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2169 2170 switch (int_type) { 2171 case NICVF_INTR_CQ: 2172 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2173 break; 2174 case NICVF_INTR_SQ: 2175 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2176 break; 2177 case NICVF_INTR_RBDR: 2178 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2179 break; 2180 case NICVF_INTR_PKT_DROP: 2181 mask = NICVF_INTR_PKT_DROP_MASK; 2182 break; 2183 case NICVF_INTR_TCP_TIMER: 2184 mask = NICVF_INTR_TCP_TIMER_MASK; 2185 break; 2186 case NICVF_INTR_MBOX: 2187 mask = NICVF_INTR_MBOX_MASK; 2188 break; 2189 case NICVF_INTR_QS_ERR: 2190 mask = NICVF_INTR_QS_ERR_MASK; 2191 break; 2192 default: 2193 device_printf(nic->dev, 2194 "Failed to check interrupt enable: unknown type\n"); 2195 break; 2196 } 2197 2198 return (reg_val & mask); 2199} 2200 2201void 2202nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 2203{ 2204 struct rcv_queue *rq; 2205 2206#define GET_RQ_STATS(reg) \ 2207 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 2208 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2209 2210 rq = &nic->qs->rq[rq_idx]; 2211 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 2212 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 2213} 2214 2215void 2216nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 2217{ 2218 struct snd_queue *sq; 2219 2220#define GET_SQ_STATS(reg) \ 2221 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 2222 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2223 2224 sq = &nic->qs->sq[sq_idx]; 2225 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 2226 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 2227} 2228 2229/* Check for errors in the receive cmp.queue entry */ 2230int 2231nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, 2232 struct cqe_rx_t *cqe_rx) 2233{ 2234 struct nicvf_hw_stats *stats = &nic->hw_stats; 2235 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 2236 2237 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 2238 drv_stats->rx_frames_ok++; 2239 return (0); 2240 } 2241 2242 switch (cqe_rx->err_opcode) { 2243 case CQ_RX_ERROP_RE_PARTIAL: 2244 stats->rx_bgx_truncated_pkts++; 2245 break; 2246 case CQ_RX_ERROP_RE_JABBER: 2247 stats->rx_jabber_errs++; 2248 break; 2249 case CQ_RX_ERROP_RE_FCS: 2250 stats->rx_fcs_errs++; 2251 break; 2252 case CQ_RX_ERROP_RE_RX_CTL: 2253 stats->rx_bgx_errs++; 2254 break; 2255 case CQ_RX_ERROP_PREL2_ERR: 2256 stats->rx_prel2_errs++; 2257 break; 2258 case CQ_RX_ERROP_L2_MAL: 2259 stats->rx_l2_hdr_malformed++; 2260 break; 2261 case CQ_RX_ERROP_L2_OVERSIZE: 2262 stats->rx_oversize++; 2263 break; 2264 case CQ_RX_ERROP_L2_UNDERSIZE: 2265 stats->rx_undersize++; 2266 break; 2267 case CQ_RX_ERROP_L2_LENMISM: 2268 stats->rx_l2_len_mismatch++; 2269 break; 2270 case CQ_RX_ERROP_L2_PCLP: 2271 stats->rx_l2_pclp++; 2272 break; 2273 case CQ_RX_ERROP_IP_NOT: 2274 stats->rx_ip_ver_errs++; 2275 break; 2276 case CQ_RX_ERROP_IP_CSUM_ERR: 2277 stats->rx_ip_csum_errs++; 2278 break; 2279 case CQ_RX_ERROP_IP_MAL: 2280 stats->rx_ip_hdr_malformed++; 2281 break; 2282 case CQ_RX_ERROP_IP_MALD: 2283 stats->rx_ip_payload_malformed++; 2284 break; 2285 case CQ_RX_ERROP_IP_HOP: 2286 stats->rx_ip_ttl_errs++; 2287 break; 2288 case CQ_RX_ERROP_L3_PCLP: 2289 stats->rx_l3_pclp++; 2290 break; 2291 case CQ_RX_ERROP_L4_MAL: 2292 stats->rx_l4_malformed++; 2293 break; 2294 case CQ_RX_ERROP_L4_CHK: 2295 stats->rx_l4_csum_errs++; 2296 break; 2297 case CQ_RX_ERROP_UDP_LEN: 2298 stats->rx_udp_len_errs++; 2299 break; 2300 case CQ_RX_ERROP_L4_PORT: 2301 stats->rx_l4_port_errs++; 2302 break; 2303 case CQ_RX_ERROP_TCP_FLAG: 2304 stats->rx_tcp_flag_errs++; 2305 break; 2306 case CQ_RX_ERROP_TCP_OFFSET: 2307 stats->rx_tcp_offset_errs++; 2308 break; 2309 case CQ_RX_ERROP_L4_PCLP: 2310 stats->rx_l4_pclp++; 2311 break; 2312 case CQ_RX_ERROP_RBDR_TRUNC: 2313 stats->rx_truncated_pkts++; 2314 break; 2315 } 2316 2317 return (1); 2318} 2319 2320/* Check for errors in the send cmp.queue entry */ 2321int 2322nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, 2323 struct cqe_send_t *cqe_tx) 2324{ 2325 struct cmp_queue_stats *stats = &cq->stats; 2326 2327 switch (cqe_tx->send_status) { 2328 case CQ_TX_ERROP_GOOD: 2329 stats->tx.good++; 2330 return (0); 2331 case CQ_TX_ERROP_DESC_FAULT: 2332 stats->tx.desc_fault++; 2333 break; 2334 case CQ_TX_ERROP_HDR_CONS_ERR: 2335 stats->tx.hdr_cons_err++; 2336 break; 2337 case CQ_TX_ERROP_SUBDC_ERR: 2338 stats->tx.subdesc_err++; 2339 break; 2340 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 2341 stats->tx.imm_size_oflow++; 2342 break; 2343 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 2344 stats->tx.data_seq_err++; 2345 break; 2346 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 2347 stats->tx.mem_seq_err++; 2348 break; 2349 case CQ_TX_ERROP_LOCK_VIOL: 2350 stats->tx.lock_viol++; 2351 break; 2352 case CQ_TX_ERROP_DATA_FAULT: 2353 stats->tx.data_fault++; 2354 break; 2355 case CQ_TX_ERROP_TSTMP_CONFLICT: 2356 stats->tx.tstmp_conflict++; 2357 break; 2358 case CQ_TX_ERROP_TSTMP_TIMEOUT: 2359 stats->tx.tstmp_timeout++; 2360 break; 2361 case CQ_TX_ERROP_MEM_FAULT: 2362 stats->tx.mem_fault++; 2363 break; 2364 case CQ_TX_ERROP_CK_OVERLAP: 2365 stats->tx.csum_overlap++; 2366 break; 2367 case CQ_TX_ERROP_CK_OFLOW: 2368 stats->tx.csum_overflow++; 2369 break; 2370 } 2371 2372 return (1); 2373} 2374