nicvf_queues.c revision 296030
1/* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 296030 2016-02-25 14:12:51Z zbb $ 27 * 28 */ 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_queues.c 296030 2016-02-25 14:12:51Z zbb $"); 31 32#include "opt_inet.h" 33#include "opt_inet6.h" 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/bitset.h> 38#include <sys/bitstring.h> 39#include <sys/buf_ring.h> 40#include <sys/bus.h> 41#include <sys/endian.h> 42#include <sys/kernel.h> 43#include <sys/malloc.h> 44#include <sys/module.h> 45#include <sys/rman.h> 46#include <sys/pciio.h> 47#include <sys/pcpu.h> 48#include <sys/proc.h> 49#include <sys/sockio.h> 50#include <sys/socket.h> 51#include <sys/stdatomic.h> 52#include <sys/cpuset.h> 53#include <sys/lock.h> 54#include <sys/mutex.h> 55#include <sys/smp.h> 56#include <sys/taskqueue.h> 57 58#include <vm/vm.h> 59#include <vm/pmap.h> 60 61#include <machine/bus.h> 62#include <machine/vmparam.h> 63 64#include <net/ethernet.h> 65#include <net/if.h> 66#include <net/if_var.h> 67#include <net/if_media.h> 68#include <net/ifq.h> 69 70#include <netinet/in_systm.h> 71#include <netinet/in.h> 72#include <netinet/if_ether.h> 73#include <netinet/ip.h> 74#include <netinet/ip6.h> 75#include <netinet/sctp.h> 76#include <netinet/tcp.h> 77#include <netinet/tcp_lro.h> 78#include <netinet/udp.h> 79 80#include <dev/pci/pcireg.h> 81#include <dev/pci/pcivar.h> 82 83#include "thunder_bgx.h" 84#include "nic_reg.h" 85#include "nic.h" 86#include "q_struct.h" 87#include "nicvf_queues.h" 88 89#define DEBUG 90#undef DEBUG 91 92#ifdef DEBUG 93#define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) 94#else 95#define dprintf(dev, fmt, ...) 96#endif 97 98MALLOC_DECLARE(M_NICVF); 99 100static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); 101static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf *); 102static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); 103static void nicvf_sq_disable(struct nicvf *, int); 104static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); 105static void nicvf_put_sq_desc(struct snd_queue *, int); 106static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, 107 boolean_t); 108static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); 109 110static void nicvf_rbdr_task(void *, int); 111static void nicvf_rbdr_task_nowait(void *, int); 112 113struct rbuf_info { 114 bus_dma_tag_t dmat; 115 bus_dmamap_t dmap; 116 struct mbuf * mbuf; 117}; 118 119#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) 120 121/* Poll a register for a specific value */ 122static int nicvf_poll_reg(struct nicvf *nic, int qidx, 123 uint64_t reg, int bit_pos, int bits, int val) 124{ 125 uint64_t bit_mask; 126 uint64_t reg_val; 127 int timeout = 10; 128 129 bit_mask = (1UL << bits) - 1; 130 bit_mask = (bit_mask << bit_pos); 131 132 while (timeout) { 133 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 134 if (((reg_val & bit_mask) >> bit_pos) == val) 135 return (0); 136 137 DELAY(1000); 138 timeout--; 139 } 140 device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); 141 return (ETIMEDOUT); 142} 143 144/* Callback for bus_dmamap_load() */ 145static void 146nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 147{ 148 bus_addr_t *paddr; 149 150 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 151 paddr = arg; 152 *paddr = segs->ds_addr; 153} 154 155/* Allocate memory for a queue's descriptors */ 156static int 157nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 158 int q_len, int desc_size, int align_bytes) 159{ 160 int err, err_dmat; 161 162 /* Create DMA tag first */ 163 err = bus_dma_tag_create( 164 bus_get_dma_tag(nic->dev), /* parent tag */ 165 align_bytes, /* alignment */ 166 0, /* boundary */ 167 BUS_SPACE_MAXADDR, /* lowaddr */ 168 BUS_SPACE_MAXADDR, /* highaddr */ 169 NULL, NULL, /* filtfunc, filtfuncarg */ 170 (q_len * desc_size), /* maxsize */ 171 1, /* nsegments */ 172 (q_len * desc_size), /* maxsegsize */ 173 0, /* flags */ 174 NULL, NULL, /* lockfunc, lockfuncarg */ 175 &dmem->dmat); /* dmat */ 176 177 if (err != 0) { 178 device_printf(nic->dev, 179 "Failed to create busdma tag for descriptors ring\n"); 180 return (err); 181 } 182 183 /* Allocate segment of continuous DMA safe memory */ 184 err = bus_dmamem_alloc( 185 dmem->dmat, /* DMA tag */ 186 &dmem->base, /* virtual address */ 187 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ 188 &dmem->dmap); /* DMA map */ 189 if (err != 0) { 190 device_printf(nic->dev, "Failed to allocate DMA safe memory for" 191 "descriptors ring\n"); 192 goto dmamem_fail; 193 } 194 195 err = bus_dmamap_load( 196 dmem->dmat, 197 dmem->dmap, 198 dmem->base, 199 (q_len * desc_size), /* allocation size */ 200 nicvf_dmamap_q_cb, /* map to DMA address cb. */ 201 &dmem->phys_base, /* physical address */ 202 BUS_DMA_NOWAIT); 203 if (err != 0) { 204 device_printf(nic->dev, 205 "Cannot load DMA map of descriptors ring\n"); 206 goto dmamap_fail; 207 } 208 209 dmem->q_len = q_len; 210 dmem->size = (desc_size * q_len); 211 212 return (0); 213 214dmamap_fail: 215 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 216 dmem->phys_base = 0; 217dmamem_fail: 218 err_dmat = bus_dma_tag_destroy(dmem->dmat); 219 dmem->base = NULL; 220 KASSERT(err_dmat == 0, 221 ("%s: Trying to destroy BUSY DMA tag", __func__)); 222 223 return (err); 224} 225 226/* Free queue's descriptor memory */ 227static void 228nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 229{ 230 int err; 231 232 if ((dmem == NULL) || (dmem->base == NULL)) 233 return; 234 235 /* Unload a map */ 236 bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); 237 bus_dmamap_unload(dmem->dmat, dmem->dmap); 238 /* Free DMA memory */ 239 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 240 /* Destroy DMA tag */ 241 err = bus_dma_tag_destroy(dmem->dmat); 242 243 KASSERT(err == 0, 244 ("%s: Trying to destroy BUSY DMA tag", __func__)); 245 246 dmem->phys_base = 0; 247 dmem->base = NULL; 248} 249 250/* 251 * Allocate buffer for packet reception 252 * HW returns memory address where packet is DMA'ed but not a pointer 253 * into RBDR ring, so save buffer address at the start of fragment and 254 * align the start address to a cache aligned address 255 */ 256static __inline int 257nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, 258 bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) 259{ 260 struct mbuf *mbuf; 261 struct rbuf_info *rinfo; 262 bus_dma_segment_t segs[1]; 263 int nsegs; 264 int err; 265 266 mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); 267 if (mbuf == NULL) 268 return (ENOMEM); 269 270 /* 271 * The length is equal to the actual length + one 128b line 272 * used as a room for rbuf_info structure. 273 */ 274 mbuf->m_len = mbuf->m_pkthdr.len = buf_len; 275 276 err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, 277 &nsegs, BUS_DMA_NOWAIT); 278 if (err != 0) { 279 device_printf(nic->dev, 280 "Failed to map mbuf into DMA visible memory, err: %d\n", 281 err); 282 m_freem(mbuf); 283 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); 284 return (err); 285 } 286 if (nsegs != 1) 287 panic("Unexpected number of DMA segments for RB: %d", nsegs); 288 /* 289 * Now use the room for rbuf_info structure 290 * and adjust mbuf data and length. 291 */ 292 rinfo = (struct rbuf_info *)mbuf->m_data; 293 m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); 294 295 rinfo->dmat = rbdr->rbdr_buff_dmat; 296 rinfo->dmap = dmap; 297 rinfo->mbuf = mbuf; 298 299 *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; 300 301 return (0); 302} 303 304/* Retrieve mbuf for received packet */ 305static struct mbuf * 306nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) 307{ 308 struct mbuf *mbuf; 309 struct rbuf_info *rinfo; 310 311 /* Get buffer start address and alignment offset */ 312 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); 313 314 /* Now retrieve mbuf to give to stack */ 315 mbuf = rinfo->mbuf; 316 if (__predict_false(mbuf == NULL)) { 317 panic("%s: Received packet fragment with NULL mbuf", 318 device_get_nameunit(nic->dev)); 319 } 320 /* 321 * Clear the mbuf in the descriptor to indicate 322 * that this slot is processed and free to use. 323 */ 324 rinfo->mbuf = NULL; 325 326 bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); 327 bus_dmamap_unload(rinfo->dmat, rinfo->dmap); 328 329 return (mbuf); 330} 331 332/* Allocate RBDR ring and populate receive buffers */ 333static int 334nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, 335 int buf_size, int qidx) 336{ 337 bus_dmamap_t dmap; 338 bus_addr_t rbuf; 339 struct rbdr_entry_t *desc; 340 int idx; 341 int err; 342 343 /* Allocate rbdr descriptors ring */ 344 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 345 sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); 346 if (err != 0) { 347 device_printf(nic->dev, 348 "Failed to create RBDR descriptors ring\n"); 349 return (err); 350 } 351 352 rbdr->desc = rbdr->dmem.base; 353 /* 354 * Buffer size has to be in multiples of 128 bytes. 355 * Make room for metadata of size of one line (128 bytes). 356 */ 357 rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; 358 rbdr->enable = TRUE; 359 rbdr->thresh = RBDR_THRESH; 360 rbdr->nic = nic; 361 rbdr->idx = qidx; 362 363 /* 364 * Create DMA tag for Rx buffers. 365 * Each map created using this tag is intended to store Rx payload for 366 * one fragment and one header structure containing rbuf_info (thus 367 * additional 128 byte line since RB must be a multiple of 128 byte 368 * cache line). 369 */ 370 if (buf_size > MCLBYTES) { 371 device_printf(nic->dev, 372 "Buffer size to large for mbuf cluster\n"); 373 return (EINVAL); 374 } 375 err = bus_dma_tag_create( 376 bus_get_dma_tag(nic->dev), /* parent tag */ 377 NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 378 0, /* boundary */ 379 DMAP_MAX_PHYSADDR, /* lowaddr */ 380 DMAP_MIN_PHYSADDR, /* highaddr */ 381 NULL, NULL, /* filtfunc, filtfuncarg */ 382 roundup2(buf_size, MCLBYTES), /* maxsize */ 383 1, /* nsegments */ 384 roundup2(buf_size, MCLBYTES), /* maxsegsize */ 385 0, /* flags */ 386 NULL, NULL, /* lockfunc, lockfuncarg */ 387 &rbdr->rbdr_buff_dmat); /* dmat */ 388 389 if (err != 0) { 390 device_printf(nic->dev, 391 "Failed to create busdma tag for RBDR buffers\n"); 392 return (err); 393 } 394 395 rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * 396 ring_len, M_NICVF, (M_WAITOK | M_ZERO)); 397 398 for (idx = 0; idx < ring_len; idx++) { 399 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); 400 if (err != 0) { 401 device_printf(nic->dev, 402 "Failed to create DMA map for RB\n"); 403 return (err); 404 } 405 rbdr->rbdr_buff_dmaps[idx] = dmap; 406 407 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, 408 DMA_BUFFER_LEN, &rbuf); 409 if (err != 0) 410 return (err); 411 412 desc = GET_RBDR_DESC(rbdr, idx); 413 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 414 } 415 416 /* Allocate taskqueue */ 417 TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); 418 TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); 419 rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, 420 taskqueue_thread_enqueue, &rbdr->rbdr_taskq); 421 taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", 422 device_get_nameunit(nic->dev)); 423 424 return (0); 425} 426 427/* Free RBDR ring and its receive buffers */ 428static void 429nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 430{ 431 struct mbuf *mbuf; 432 struct queue_set *qs; 433 struct rbdr_entry_t *desc; 434 struct rbuf_info *rinfo; 435 bus_addr_t buf_addr; 436 int head, tail, idx; 437 int err; 438 439 qs = nic->qs; 440 441 if ((qs == NULL) || (rbdr == NULL)) 442 return; 443 444 rbdr->enable = FALSE; 445 if (rbdr->rbdr_taskq != NULL) { 446 /* Remove tasks */ 447 while (taskqueue_cancel(rbdr->rbdr_taskq, 448 &rbdr->rbdr_task_nowait, NULL) != 0) { 449 /* Finish the nowait task first */ 450 taskqueue_drain(rbdr->rbdr_taskq, 451 &rbdr->rbdr_task_nowait); 452 } 453 taskqueue_free(rbdr->rbdr_taskq); 454 rbdr->rbdr_taskq = NULL; 455 456 while (taskqueue_cancel(taskqueue_thread, 457 &rbdr->rbdr_task, NULL) != 0) { 458 /* Now finish the sleepable task */ 459 taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); 460 } 461 } 462 463 /* 464 * Free all of the memory under the RB descriptors. 465 * There are assumptions here: 466 * 1. Corresponding RBDR is disabled 467 * - it is safe to operate using head and tail indexes 468 * 2. All bffers that were received are properly freed by 469 * the receive handler 470 * - there is no need to unload DMA map and free MBUF for other 471 * descriptors than unused ones 472 */ 473 if (rbdr->rbdr_buff_dmat != NULL) { 474 head = rbdr->head; 475 tail = rbdr->tail; 476 while (head != tail) { 477 desc = GET_RBDR_DESC(rbdr, head); 478 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 479 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 480 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 481 mbuf = rinfo->mbuf; 482 /* This will destroy everything including rinfo! */ 483 m_freem(mbuf); 484 head++; 485 head &= (rbdr->dmem.q_len - 1); 486 } 487 /* Free tail descriptor */ 488 desc = GET_RBDR_DESC(rbdr, tail); 489 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 490 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 491 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 492 mbuf = rinfo->mbuf; 493 /* This will destroy everything including rinfo! */ 494 m_freem(mbuf); 495 496 /* Destroy DMA maps */ 497 for (idx = 0; idx < qs->rbdr_len; idx++) { 498 if (rbdr->rbdr_buff_dmaps[idx] == NULL) 499 continue; 500 err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, 501 rbdr->rbdr_buff_dmaps[idx]); 502 KASSERT(err == 0, 503 ("%s: Could not destroy DMA map for RB, desc: %d", 504 __func__, idx)); 505 rbdr->rbdr_buff_dmaps[idx] = NULL; 506 } 507 508 /* Now destroy the tag */ 509 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); 510 KASSERT(err == 0, 511 ("%s: Trying to destroy BUSY DMA tag", __func__)); 512 513 rbdr->head = 0; 514 rbdr->tail = 0; 515 } 516 517 /* Free RBDR ring */ 518 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 519} 520 521/* 522 * Refill receive buffer descriptors with new buffers. 523 */ 524static int 525nicvf_refill_rbdr(struct rbdr *rbdr, int mflags) 526{ 527 struct nicvf *nic; 528 struct queue_set *qs; 529 int rbdr_idx; 530 int tail, qcount; 531 int refill_rb_cnt; 532 struct rbdr_entry_t *desc; 533 bus_dmamap_t dmap; 534 bus_addr_t rbuf; 535 boolean_t rb_alloc_fail; 536 int new_rb; 537 538 rb_alloc_fail = TRUE; 539 new_rb = 0; 540 nic = rbdr->nic; 541 qs = nic->qs; 542 rbdr_idx = rbdr->idx; 543 544 /* Check if it's enabled */ 545 if (!rbdr->enable) 546 return (0); 547 548 /* Get no of desc's to be refilled */ 549 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 550 qcount &= 0x7FFFF; 551 /* Doorbell can be ringed with a max of ring size minus 1 */ 552 if (qcount >= (qs->rbdr_len - 1)) { 553 rb_alloc_fail = FALSE; 554 goto out; 555 } else 556 refill_rb_cnt = qs->rbdr_len - qcount - 1; 557 558 /* Start filling descs from tail */ 559 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 560 while (refill_rb_cnt) { 561 tail++; 562 tail &= (rbdr->dmem.q_len - 1); 563 564 dmap = rbdr->rbdr_buff_dmaps[tail]; 565 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, 566 DMA_BUFFER_LEN, &rbuf)) { 567 /* Something went wrong. Resign */ 568 break; 569 } 570 desc = GET_RBDR_DESC(rbdr, tail); 571 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 572 refill_rb_cnt--; 573 new_rb++; 574 } 575 576 /* make sure all memory stores are done before ringing doorbell */ 577 wmb(); 578 579 /* Check if buffer allocation failed */ 580 if (refill_rb_cnt == 0) 581 rb_alloc_fail = FALSE; 582 583 /* Notify HW */ 584 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 585 rbdr_idx, new_rb); 586out: 587 if (!rb_alloc_fail) { 588 /* 589 * Re-enable RBDR interrupts only 590 * if buffer allocation is success. 591 */ 592 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 593 594 return (0); 595 } 596 597 return (ENOMEM); 598} 599 600/* Refill RBs even if sleep is needed to reclaim memory */ 601static void 602nicvf_rbdr_task(void *arg, int pending) 603{ 604 struct rbdr *rbdr; 605 int err; 606 607 rbdr = (struct rbdr *)arg; 608 609 err = nicvf_refill_rbdr(rbdr, M_WAITOK); 610 if (__predict_false(err != 0)) { 611 panic("%s: Failed to refill RBs even when sleep enabled", 612 __func__); 613 } 614} 615 616/* Refill RBs as soon as possible without waiting */ 617static void 618nicvf_rbdr_task_nowait(void *arg, int pending) 619{ 620 struct rbdr *rbdr; 621 int err; 622 623 rbdr = (struct rbdr *)arg; 624 625 err = nicvf_refill_rbdr(rbdr, M_NOWAIT); 626 if (err != 0) { 627 /* 628 * Schedule another, sleepable kernel thread 629 * that will for sure refill the buffers. 630 */ 631 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); 632 } 633} 634 635static int 636nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 637 struct cqe_rx_t *cqe_rx, int cqe_type) 638{ 639 struct mbuf *mbuf; 640 int rq_idx; 641 int err = 0; 642 643 rq_idx = cqe_rx->rq_idx; 644 645 /* Check for errors */ 646 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 647 if (err && !cqe_rx->rb_cnt) 648 return (0); 649 650 mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); 651 if (mbuf == NULL) { 652 dprintf(nic->dev, "Packet not received\n"); 653 return (0); 654 } 655 656 /* If error packet */ 657 if (err != 0) { 658 m_freem(mbuf); 659 return (0); 660 } 661 662 /* 663 * Push this packet to the stack later to avoid 664 * unlocking completion task in the middle of work. 665 */ 666 err = buf_ring_enqueue(cq->rx_br, mbuf); 667 if (err != 0) { 668 /* 669 * Failed to enqueue this mbuf. 670 * We don't drop it, just schedule another task. 671 */ 672 return (err); 673 } 674 675 return (0); 676} 677 678static int 679nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 680 struct cqe_send_t *cqe_tx, int cqe_type) 681{ 682 bus_dmamap_t dmap; 683 struct mbuf *mbuf; 684 struct snd_queue *sq; 685 struct sq_hdr_subdesc *hdr; 686 687 mbuf = NULL; 688 sq = &nic->qs->sq[cqe_tx->sq_idx]; 689 /* Avoid blocking here since we hold a non-sleepable NICVF_CMP_LOCK */ 690 if (NICVF_TX_TRYLOCK(sq) == 0) 691 return (EAGAIN); 692 693 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); 694 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 695 NICVF_TX_UNLOCK(sq); 696 return (0); 697 } 698 699 dprintf(nic->dev, 700 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", 701 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 702 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 703 704 dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; 705 bus_dmamap_unload(sq->snd_buff_dmat, dmap); 706 707 mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; 708 if (mbuf != NULL) { 709 m_freem(mbuf); 710 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; 711 } 712 713 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 714 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 715 716 NICVF_TX_UNLOCK(sq); 717 return (0); 718} 719 720static int 721nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) 722{ 723 struct mbuf *mbuf; 724 struct ifnet *ifp; 725 int processed_cqe, work_done = 0, tx_done = 0; 726 int cqe_count, cqe_head; 727 struct queue_set *qs = nic->qs; 728 struct cmp_queue *cq = &qs->cq[cq_idx]; 729 struct cqe_rx_t *cq_desc; 730 int cmp_err; 731 732 NICVF_CMP_LOCK(cq); 733 cmp_err = 0; 734 processed_cqe = 0; 735 /* Get no of valid CQ entries to process */ 736 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); 737 cqe_count &= CQ_CQE_COUNT; 738 if (cqe_count == 0) 739 goto out; 740 741 /* Get head of the valid CQ entries */ 742 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 743 cqe_head &= 0xFFFF; 744 745 dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", 746 __func__, cq_idx, cqe_count, cqe_head); 747 while (processed_cqe < cqe_count) { 748 /* Get the CQ descriptor */ 749 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 750 cqe_head++; 751 cqe_head &= (cq->dmem.q_len - 1); 752 753 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, 754 cq_desc->cqe_type); 755 switch (cq_desc->cqe_type) { 756 case CQE_TYPE_RX: 757 cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, 758 CQE_TYPE_RX); 759 if (__predict_false(cmp_err != 0)) { 760 /* 761 * Ups. Cannot finish now. 762 * Let's try again later. 763 */ 764 goto done; 765 } 766 work_done++; 767 break; 768 case CQE_TYPE_SEND: 769 cmp_err = nicvf_snd_pkt_handler(nic, cq, 770 (void *)cq_desc, CQE_TYPE_SEND); 771 if (__predict_false(cmp_err != 0)) { 772 /* 773 * Ups. Cannot finish now. 774 * Let's try again later. 775 */ 776 goto done; 777 } 778 779 tx_done++; 780 break; 781 case CQE_TYPE_INVALID: 782 case CQE_TYPE_RX_SPLIT: 783 case CQE_TYPE_RX_TCP: 784 case CQE_TYPE_SEND_PTP: 785 /* Ignore for now */ 786 break; 787 } 788 processed_cqe++; 789 } 790done: 791 dprintf(nic->dev, 792 "%s CQ%d processed_cqe %d work_done %d\n", 793 __func__, cq_idx, processed_cqe, work_done); 794 795 /* Ring doorbell to inform H/W to reuse processed CQEs */ 796 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); 797 798 if ((tx_done > 0) && 799 ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { 800 /* Reenable TXQ if its stopped earlier due to SQ full */ 801 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 802 } 803out: 804 NICVF_CMP_UNLOCK(cq); 805 806 ifp = nic->ifp; 807 /* Push received MBUFs to the stack */ 808 while (!buf_ring_empty(cq->rx_br)) { 809 mbuf = buf_ring_dequeue_mc(cq->rx_br); 810 if (__predict_true(mbuf != NULL)) 811 (*ifp->if_input)(ifp, mbuf); 812 } 813 814 return (cmp_err); 815} 816 817/* 818 * Qset error interrupt handler 819 * 820 * As of now only CQ errors are handled 821 */ 822static void 823nicvf_qs_err_task(void *arg, int pending) 824{ 825 struct nicvf *nic; 826 struct queue_set *qs; 827 int qidx; 828 uint64_t status; 829 boolean_t enable = TRUE; 830 831 nic = (struct nicvf *)arg; 832 qs = nic->qs; 833 834 /* Deactivate network interface */ 835 if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 836 837 /* Check if it is CQ err */ 838 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 839 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, 840 qidx); 841 if ((status & CQ_ERR_MASK) == 0) 842 continue; 843 /* Process already queued CQEs and reconfig CQ */ 844 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 845 nicvf_sq_disable(nic, qidx); 846 (void)nicvf_cq_intr_handler(nic, qidx); 847 nicvf_cmp_queue_config(nic, qs, qidx, enable); 848 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); 849 nicvf_sq_enable(nic, &qs->sq[qidx], qidx); 850 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 851 } 852 853 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 854 /* Re-enable Qset error interrupt */ 855 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 856} 857 858static void 859nicvf_cmp_task(void *arg, int pending) 860{ 861 uint64_t cq_head; 862 struct cmp_queue *cq; 863 struct nicvf *nic; 864 int cmp_err; 865 866 cq = (struct cmp_queue *)arg; 867 nic = cq->nic; 868 869 /* Handle CQ descriptors */ 870 cmp_err = nicvf_cq_intr_handler(nic, cq->idx); 871 /* Re-enable interrupts */ 872 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->idx); 873 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 874 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, cq->idx, cq_head); 875 876 if (__predict_false(cmp_err != 0)) { 877 /* 878 * Schedule another thread here since we did not 879 * process the entire CQ due to Tx or Rx CQ parse error. 880 */ 881 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); 882 883 } 884 885 /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ 886 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); 887 888} 889 890/* Initialize completion queue */ 891static int 892nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, 893 int qidx) 894{ 895 int err; 896 897 /* Initizalize lock */ 898 snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", 899 device_get_nameunit(nic->dev), qidx); 900 mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); 901 902 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 903 NICVF_CQ_BASE_ALIGN_BYTES); 904 905 if (err != 0) { 906 device_printf(nic->dev, 907 "Could not allocate DMA memory for CQ\n"); 908 return (err); 909 } 910 911 cq->desc = cq->dmem.base; 912 cq->thresh = CMP_QUEUE_CQE_THRESH; 913 cq->nic = nic; 914 cq->idx = qidx; 915 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 916 917 cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, 918 &cq->mtx); 919 920 /* Allocate taskqueue */ 921 TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); 922 cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, 923 taskqueue_thread_enqueue, &cq->cmp_taskq); 924 taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", 925 device_get_nameunit(nic->dev), qidx); 926 927 return (0); 928} 929 930static void 931nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 932{ 933 934 if (cq == NULL) 935 return; 936 /* 937 * The completion queue itself should be disabled by now 938 * (ref. nicvf_snd_queue_config()). 939 * Ensure that it is safe to disable it or panic. 940 */ 941 if (cq->enable) 942 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); 943 944 if (cq->cmp_taskq != NULL) { 945 /* Remove task */ 946 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) 947 taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); 948 949 taskqueue_free(cq->cmp_taskq); 950 cq->cmp_taskq = NULL; 951 } 952 /* 953 * Completion interrupt will possibly enable interrupts again 954 * so disable interrupting now after we finished processing 955 * completion task. It is safe to do so since the corresponding CQ 956 * was already disabled. 957 */ 958 nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); 959 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 960 961 NICVF_CMP_LOCK(cq); 962 nicvf_free_q_desc_mem(nic, &cq->dmem); 963 drbr_free(cq->rx_br, M_DEVBUF); 964 NICVF_CMP_UNLOCK(cq); 965 mtx_destroy(&cq->mtx); 966 memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); 967} 968 969static void 970nicvf_snd_task(void *arg, int pending) 971{ 972 struct snd_queue *sq = (struct snd_queue *)arg; 973 struct mbuf *mbuf; 974 975 NICVF_TX_LOCK(sq); 976 while (1) { 977 mbuf = drbr_dequeue(NULL, sq->br); 978 if (mbuf == NULL) 979 break; 980 981 if (nicvf_tx_mbuf_locked(sq, mbuf) != 0) { 982 /* XXX ARM64TODO: Increase Tx drop counter */ 983 m_freem(mbuf); 984 break; 985 } 986 } 987 NICVF_TX_UNLOCK(sq); 988} 989 990/* Initialize transmit queue */ 991static int 992nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, 993 int qidx) 994{ 995 size_t i; 996 int err; 997 998 /* Initizalize TX lock for this queue */ 999 snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", 1000 device_get_nameunit(nic->dev), qidx); 1001 mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); 1002 1003 NICVF_TX_LOCK(sq); 1004 /* Allocate buffer ring */ 1005 sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, 1006 M_NOWAIT, &sq->mtx); 1007 if (sq->br == NULL) { 1008 device_printf(nic->dev, 1009 "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); 1010 err = ENOMEM; 1011 goto error; 1012 } 1013 1014 /* Allocate DMA memory for Tx descriptors */ 1015 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 1016 NICVF_SQ_BASE_ALIGN_BYTES); 1017 if (err != 0) { 1018 device_printf(nic->dev, 1019 "Could not allocate DMA memory for SQ\n"); 1020 goto error; 1021 } 1022 1023 sq->desc = sq->dmem.base; 1024 sq->head = sq->tail = 0; 1025 atomic_store_rel_int(&sq->free_cnt, q_len - 1); 1026 sq->thresh = SND_QUEUE_THRESH; 1027 sq->idx = qidx; 1028 sq->nic = nic; 1029 1030 /* 1031 * Allocate DMA maps for Tx buffers 1032 */ 1033 1034 /* Create DMA tag first */ 1035 err = bus_dma_tag_create( 1036 bus_get_dma_tag(nic->dev), /* parent tag */ 1037 1, /* alignment */ 1038 0, /* boundary */ 1039 BUS_SPACE_MAXADDR, /* lowaddr */ 1040 BUS_SPACE_MAXADDR, /* highaddr */ 1041 NULL, NULL, /* filtfunc, filtfuncarg */ 1042 NICVF_TXBUF_MAXSIZE, /* maxsize */ 1043 NICVF_TXBUF_NSEGS, /* nsegments */ 1044 MCLBYTES, /* maxsegsize */ 1045 0, /* flags */ 1046 NULL, NULL, /* lockfunc, lockfuncarg */ 1047 &sq->snd_buff_dmat); /* dmat */ 1048 1049 if (err != 0) { 1050 device_printf(nic->dev, 1051 "Failed to create busdma tag for Tx buffers\n"); 1052 goto error; 1053 } 1054 1055 /* Allocate send buffers array */ 1056 sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, 1057 (M_NOWAIT | M_ZERO)); 1058 if (sq->snd_buff == NULL) { 1059 device_printf(nic->dev, 1060 "Could not allocate memory for Tx buffers array\n"); 1061 err = ENOMEM; 1062 goto error; 1063 } 1064 1065 /* Now populate maps */ 1066 for (i = 0; i < q_len; i++) { 1067 err = bus_dmamap_create(sq->snd_buff_dmat, 0, 1068 &sq->snd_buff[i].dmap); 1069 if (err != 0) { 1070 device_printf(nic->dev, 1071 "Failed to create DMA maps for Tx buffers\n"); 1072 goto error; 1073 } 1074 } 1075 NICVF_TX_UNLOCK(sq); 1076 1077 /* Allocate taskqueue */ 1078 TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); 1079 sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, 1080 taskqueue_thread_enqueue, &sq->snd_taskq); 1081 taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", 1082 device_get_nameunit(nic->dev), qidx); 1083 1084 return (0); 1085error: 1086 NICVF_TX_UNLOCK(sq); 1087 return (err); 1088} 1089 1090static void 1091nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 1092{ 1093 struct queue_set *qs = nic->qs; 1094 size_t i; 1095 int err; 1096 1097 if (sq == NULL) 1098 return; 1099 1100 if (sq->snd_taskq != NULL) { 1101 /* Remove task */ 1102 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) 1103 taskqueue_drain(sq->snd_taskq, &sq->snd_task); 1104 1105 taskqueue_free(sq->snd_taskq); 1106 sq->snd_taskq = NULL; 1107 } 1108 1109 NICVF_TX_LOCK(sq); 1110 if (sq->snd_buff_dmat != NULL) { 1111 if (sq->snd_buff != NULL) { 1112 for (i = 0; i < qs->sq_len; i++) { 1113 m_freem(sq->snd_buff[i].mbuf); 1114 sq->snd_buff[i].mbuf = NULL; 1115 1116 bus_dmamap_unload(sq->snd_buff_dmat, 1117 sq->snd_buff[i].dmap); 1118 err = bus_dmamap_destroy(sq->snd_buff_dmat, 1119 sq->snd_buff[i].dmap); 1120 /* 1121 * If bus_dmamap_destroy fails it can cause 1122 * random panic later if the tag is also 1123 * destroyed in the process. 1124 */ 1125 KASSERT(err == 0, 1126 ("%s: Could not destroy DMA map for SQ", 1127 __func__)); 1128 } 1129 } 1130 1131 free(sq->snd_buff, M_NICVF); 1132 1133 err = bus_dma_tag_destroy(sq->snd_buff_dmat); 1134 KASSERT(err == 0, 1135 ("%s: Trying to destroy BUSY DMA tag", __func__)); 1136 } 1137 1138 /* Free private driver ring for this send queue */ 1139 if (sq->br != NULL) 1140 drbr_free(sq->br, M_DEVBUF); 1141 1142 if (sq->dmem.base != NULL) 1143 nicvf_free_q_desc_mem(nic, &sq->dmem); 1144 1145 NICVF_TX_UNLOCK(sq); 1146 /* Destroy Tx lock */ 1147 mtx_destroy(&sq->mtx); 1148 memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); 1149} 1150 1151static void 1152nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1153{ 1154 1155 /* Disable send queue */ 1156 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 1157 /* Check if SQ is stopped */ 1158 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 1159 return; 1160 /* Reset send queue */ 1161 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1162} 1163 1164static void 1165nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1166{ 1167 union nic_mbx mbx = {}; 1168 1169 /* Make sure all packets in the pipeline are written back into mem */ 1170 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 1171 nicvf_send_msg_to_pf(nic, &mbx); 1172} 1173 1174static void 1175nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1176{ 1177 1178 /* Disable timer threshold (doesn't get reset upon CQ reset */ 1179 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 1180 /* Disable completion queue */ 1181 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 1182 /* Reset completion queue */ 1183 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1184} 1185 1186static void 1187nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) 1188{ 1189 uint64_t tmp, fifo_state; 1190 int timeout = 10; 1191 1192 /* Save head and tail pointers for feeing up buffers */ 1193 rbdr->head = 1194 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; 1195 rbdr->tail = 1196 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; 1197 1198 /* 1199 * If RBDR FIFO is in 'FAIL' state then do a reset first 1200 * before relaiming. 1201 */ 1202 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 1203 if (((fifo_state >> 62) & 0x03) == 0x3) { 1204 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 1205 qidx, NICVF_RBDR_RESET); 1206 } 1207 1208 /* Disable RBDR */ 1209 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 1210 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1211 return; 1212 while (1) { 1213 tmp = nicvf_queue_reg_read(nic, 1214 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); 1215 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 1216 break; 1217 1218 DELAY(1000); 1219 timeout--; 1220 if (!timeout) { 1221 device_printf(nic->dev, 1222 "Failed polling on prefetch status\n"); 1223 return; 1224 } 1225 } 1226 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1227 NICVF_RBDR_RESET); 1228 1229 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 1230 return; 1231 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 1232 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1233 return; 1234} 1235 1236/* Configures receive queue */ 1237static void 1238nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 1239 int qidx, bool enable) 1240{ 1241 union nic_mbx mbx = {}; 1242 struct rcv_queue *rq; 1243 struct rq_cfg rq_cfg; 1244 1245 rq = &qs->rq[qidx]; 1246 rq->enable = enable; 1247 1248 /* Disable receive queue */ 1249 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 1250 1251 if (!rq->enable) { 1252 nicvf_reclaim_rcv_queue(nic, qs, qidx); 1253 return; 1254 } 1255 1256 rq->cq_qs = qs->vnic_id; 1257 rq->cq_idx = qidx; 1258 rq->start_rbdr_qs = qs->vnic_id; 1259 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 1260 rq->cont_rbdr_qs = qs->vnic_id; 1261 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 1262 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 1263 rq->caching = 1; 1264 1265 /* Send a mailbox msg to PF to config RQ */ 1266 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 1267 mbx.rq.qs_num = qs->vnic_id; 1268 mbx.rq.rq_num = qidx; 1269 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 1270 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 1271 (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | 1272 (rq->start_qs_rbdr_idx); 1273 nicvf_send_msg_to_pf(nic, &mbx); 1274 1275 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 1276 mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); 1277 nicvf_send_msg_to_pf(nic, &mbx); 1278 1279 /* 1280 * RQ drop config 1281 * Enable CQ drop to reserve sufficient CQEs for all tx packets 1282 */ 1283 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 1284 mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); 1285 nicvf_send_msg_to_pf(nic, &mbx); 1286 1287 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 1288 1289 /* Enable Receive queue */ 1290 rq_cfg.ena = 1; 1291 rq_cfg.tcp_ena = 0; 1292 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 1293 *(uint64_t *)&rq_cfg); 1294} 1295 1296/* Configures completion queue */ 1297static void 1298nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 1299 int qidx, boolean_t enable) 1300{ 1301 struct cmp_queue *cq; 1302 struct cq_cfg cq_cfg; 1303 1304 cq = &qs->cq[qidx]; 1305 cq->enable = enable; 1306 1307 if (!cq->enable) { 1308 nicvf_reclaim_cmp_queue(nic, qs, qidx); 1309 return; 1310 } 1311 1312 /* Reset completion queue */ 1313 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1314 1315 /* Set completion queue base address */ 1316 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, 1317 (uint64_t)(cq->dmem.phys_base)); 1318 1319 /* Enable Completion queue */ 1320 cq_cfg.ena = 1; 1321 cq_cfg.reset = 0; 1322 cq_cfg.caching = 0; 1323 cq_cfg.qsize = CMP_QSIZE; 1324 cq_cfg.avg_con = 0; 1325 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); 1326 1327 /* Set threshold value for interrupt generation */ 1328 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 1329 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 1330 nic->cq_coalesce_usecs); 1331} 1332 1333/* Configures transmit queue */ 1334static void 1335nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1336 boolean_t enable) 1337{ 1338 union nic_mbx mbx = {}; 1339 struct snd_queue *sq; 1340 struct sq_cfg sq_cfg; 1341 1342 sq = &qs->sq[qidx]; 1343 sq->enable = enable; 1344 1345 if (!sq->enable) { 1346 nicvf_reclaim_snd_queue(nic, qs, qidx); 1347 return; 1348 } 1349 1350 /* Reset send queue */ 1351 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1352 1353 sq->cq_qs = qs->vnic_id; 1354 sq->cq_idx = qidx; 1355 1356 /* Send a mailbox msg to PF to config SQ */ 1357 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 1358 mbx.sq.qs_num = qs->vnic_id; 1359 mbx.sq.sq_num = qidx; 1360 mbx.sq.sqs_mode = nic->sqs_mode; 1361 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 1362 nicvf_send_msg_to_pf(nic, &mbx); 1363 1364 /* Set queue base address */ 1365 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, 1366 (uint64_t)(sq->dmem.phys_base)); 1367 1368 /* Enable send queue & set queue size */ 1369 sq_cfg.ena = 1; 1370 sq_cfg.reset = 0; 1371 sq_cfg.ldwb = 0; 1372 sq_cfg.qsize = SND_QSIZE; 1373 sq_cfg.tstmp_bgx_intf = 0; 1374 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); 1375 1376 /* Set threshold value for interrupt generation */ 1377 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 1378} 1379 1380/* Configures receive buffer descriptor ring */ 1381static void 1382nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1383 boolean_t enable) 1384{ 1385 struct rbdr *rbdr; 1386 struct rbdr_cfg rbdr_cfg; 1387 1388 rbdr = &qs->rbdr[qidx]; 1389 nicvf_reclaim_rbdr(nic, rbdr, qidx); 1390 if (!enable) 1391 return; 1392 1393 /* Set descriptor base address */ 1394 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, 1395 (uint64_t)(rbdr->dmem.phys_base)); 1396 1397 /* Enable RBDR & set queue size */ 1398 /* Buffer size should be in multiples of 128 bytes */ 1399 rbdr_cfg.ena = 1; 1400 rbdr_cfg.reset = 0; 1401 rbdr_cfg.ldwb = 0; 1402 rbdr_cfg.qsize = RBDR_SIZE; 1403 rbdr_cfg.avg_con = 0; 1404 rbdr_cfg.lines = rbdr->dma_size / 128; 1405 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1406 *(uint64_t *)&rbdr_cfg); 1407 1408 /* Notify HW */ 1409 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, 1410 qs->rbdr_len - 1); 1411 1412 /* Set threshold value for interrupt generation */ 1413 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, 1414 rbdr->thresh - 1); 1415} 1416 1417/* Requests PF to assign and enable Qset */ 1418void 1419nicvf_qset_config(struct nicvf *nic, boolean_t enable) 1420{ 1421 union nic_mbx mbx = {}; 1422 struct queue_set *qs; 1423 struct qs_cfg *qs_cfg; 1424 1425 qs = nic->qs; 1426 if (qs == NULL) { 1427 device_printf(nic->dev, 1428 "Qset is still not allocated, don't init queues\n"); 1429 return; 1430 } 1431 1432 qs->enable = enable; 1433 qs->vnic_id = nic->vf_id; 1434 1435 /* Send a mailbox msg to PF to config Qset */ 1436 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 1437 mbx.qs.num = qs->vnic_id; 1438 1439 mbx.qs.cfg = 0; 1440 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 1441 if (qs->enable) { 1442 qs_cfg->ena = 1; 1443 qs_cfg->vnic = qs->vnic_id; 1444 } 1445 nicvf_send_msg_to_pf(nic, &mbx); 1446} 1447 1448static void 1449nicvf_free_resources(struct nicvf *nic) 1450{ 1451 int qidx; 1452 struct queue_set *qs; 1453 1454 qs = nic->qs; 1455 /* 1456 * Remove QS error task first since it has to be dead 1457 * to safely free completion queue tasks. 1458 */ 1459 if (qs->qs_err_taskq != NULL) { 1460 /* Shut down QS error tasks */ 1461 while (taskqueue_cancel(qs->qs_err_taskq, 1462 &qs->qs_err_task, NULL) != 0) { 1463 taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); 1464 1465 } 1466 taskqueue_free(qs->qs_err_taskq); 1467 qs->qs_err_taskq = NULL; 1468 } 1469 /* Free receive buffer descriptor ring */ 1470 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1471 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 1472 1473 /* Free completion queue */ 1474 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1475 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 1476 1477 /* Free send queue */ 1478 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1479 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 1480} 1481 1482static int 1483nicvf_alloc_resources(struct nicvf *nic) 1484{ 1485 struct queue_set *qs = nic->qs; 1486 int qidx; 1487 1488 /* Alloc receive buffer descriptor ring */ 1489 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 1490 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 1491 DMA_BUFFER_LEN, qidx)) 1492 goto alloc_fail; 1493 } 1494 1495 /* Alloc send queue */ 1496 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 1497 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) 1498 goto alloc_fail; 1499 } 1500 1501 /* Alloc completion queue */ 1502 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1503 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) 1504 goto alloc_fail; 1505 } 1506 1507 /* Allocate QS error taskqueue */ 1508 TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); 1509 qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, 1510 taskqueue_thread_enqueue, &qs->qs_err_taskq); 1511 taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", 1512 device_get_nameunit(nic->dev)); 1513 1514 return (0); 1515alloc_fail: 1516 nicvf_free_resources(nic); 1517 return (ENOMEM); 1518} 1519 1520int 1521nicvf_set_qset_resources(struct nicvf *nic) 1522{ 1523 struct queue_set *qs; 1524 1525 qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); 1526 nic->qs = qs; 1527 1528 /* Set count of each queue */ 1529 qs->rbdr_cnt = RBDR_CNT; 1530 /* With no RSS we stay with single RQ */ 1531 qs->rq_cnt = 1; 1532 1533 qs->sq_cnt = SND_QUEUE_CNT; 1534 qs->cq_cnt = CMP_QUEUE_CNT; 1535 1536 /* Set queue lengths */ 1537 qs->rbdr_len = RCV_BUF_COUNT; 1538 qs->sq_len = SND_QUEUE_LEN; 1539 qs->cq_len = CMP_QUEUE_LEN; 1540 1541 nic->rx_queues = qs->rq_cnt; 1542 nic->tx_queues = qs->sq_cnt; 1543 1544 return (0); 1545} 1546 1547int 1548nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) 1549{ 1550 boolean_t disable = FALSE; 1551 struct queue_set *qs; 1552 int qidx; 1553 1554 qs = nic->qs; 1555 if (qs == NULL) 1556 return (0); 1557 1558 if (enable) { 1559 if (nicvf_alloc_resources(nic) != 0) 1560 return (ENOMEM); 1561 1562 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1563 nicvf_snd_queue_config(nic, qs, qidx, enable); 1564 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1565 nicvf_cmp_queue_config(nic, qs, qidx, enable); 1566 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1567 nicvf_rbdr_config(nic, qs, qidx, enable); 1568 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1569 nicvf_rcv_queue_config(nic, qs, qidx, enable); 1570 } else { 1571 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1572 nicvf_rcv_queue_config(nic, qs, qidx, disable); 1573 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1574 nicvf_rbdr_config(nic, qs, qidx, disable); 1575 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1576 nicvf_snd_queue_config(nic, qs, qidx, disable); 1577 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1578 nicvf_cmp_queue_config(nic, qs, qidx, disable); 1579 1580 nicvf_free_resources(nic); 1581 } 1582 1583 return (0); 1584} 1585 1586/* 1587 * Get a free desc from SQ 1588 * returns descriptor ponter & descriptor number 1589 */ 1590static __inline int 1591nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 1592{ 1593 int qentry; 1594 1595 qentry = sq->tail; 1596 atomic_subtract_int(&sq->free_cnt, desc_cnt); 1597 sq->tail += desc_cnt; 1598 sq->tail &= (sq->dmem.q_len - 1); 1599 1600 return (qentry); 1601} 1602 1603/* Free descriptor back to SQ for future use */ 1604static void 1605nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 1606{ 1607 1608 atomic_add_int(&sq->free_cnt, desc_cnt); 1609 sq->head += desc_cnt; 1610 sq->head &= (sq->dmem.q_len - 1); 1611} 1612 1613static __inline int 1614nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 1615{ 1616 qentry++; 1617 qentry &= (sq->dmem.q_len - 1); 1618 return (qentry); 1619} 1620 1621static void 1622nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 1623{ 1624 uint64_t sq_cfg; 1625 1626 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1627 sq_cfg |= NICVF_SQ_EN; 1628 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1629 /* Ring doorbell so that H/W restarts processing SQEs */ 1630 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 1631} 1632 1633static void 1634nicvf_sq_disable(struct nicvf *nic, int qidx) 1635{ 1636 uint64_t sq_cfg; 1637 1638 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1639 sq_cfg &= ~NICVF_SQ_EN; 1640 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1641} 1642 1643static void 1644nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) 1645{ 1646 uint64_t head, tail; 1647 struct snd_buff *snd_buff; 1648 struct sq_hdr_subdesc *hdr; 1649 1650 NICVF_TX_LOCK(sq); 1651 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 1652 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 1653 while (sq->head != head) { 1654 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 1655 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 1656 nicvf_put_sq_desc(sq, 1); 1657 continue; 1658 } 1659 snd_buff = &sq->snd_buff[sq->head]; 1660 if (snd_buff->mbuf != NULL) { 1661 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1662 m_freem(snd_buff->mbuf); 1663 sq->snd_buff[sq->head].mbuf = NULL; 1664 } 1665 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 1666 } 1667 NICVF_TX_UNLOCK(sq); 1668} 1669 1670/* 1671 * Add SQ HEADER subdescriptor. 1672 * First subdescriptor for every send descriptor. 1673 */ 1674static __inline int 1675nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, 1676 int subdesc_cnt, struct mbuf *mbuf, int len) 1677{ 1678 struct sq_hdr_subdesc *hdr; 1679 struct ether_vlan_header *eh; 1680#ifdef INET 1681 struct ip *ip; 1682#endif 1683 uint16_t etype; 1684 int ehdrlen, iphlen, poff; 1685 1686 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1687 sq->snd_buff[qentry].mbuf = mbuf; 1688 1689 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1690 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1691 /* Enable notification via CQE after processing SQE */ 1692 hdr->post_cqe = 1; 1693 /* No of subdescriptors following this */ 1694 hdr->subdesc_cnt = subdesc_cnt; 1695 hdr->tot_len = len; 1696 1697 if (mbuf->m_pkthdr.csum_flags != 0) { 1698 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1699 1700 eh = mtod(mbuf, struct ether_vlan_header *); 1701 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1702 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1703 etype = ntohs(eh->evl_proto); 1704 } else { 1705 ehdrlen = ETHER_HDR_LEN; 1706 etype = ntohs(eh->evl_encap_proto); 1707 } 1708 1709 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { 1710 mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip)); 1711 sq->snd_buff[qentry].mbuf = mbuf; 1712 if (mbuf == NULL) 1713 return (ENOBUFS); 1714 } 1715 1716 switch (etype) { 1717#ifdef INET6 1718 case ETHERTYPE_IPV6: 1719 /* ARM64TODO: Add support for IPv6 */ 1720 hdr->csum_l3 = 0; 1721 sq->snd_buff[qentry].mbuf = NULL; 1722 return (ENXIO); 1723#endif 1724#ifdef INET 1725 case ETHERTYPE_IP: 1726 ip = (struct ip *)(mbuf->m_data + ehdrlen); 1727 ip->ip_sum = 0; 1728 iphlen = ip->ip_hl << 2; 1729 poff = ehdrlen + iphlen; 1730 1731 switch (ip->ip_p) { 1732 case IPPROTO_TCP: 1733 if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) 1734 break; 1735 1736 if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { 1737 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr)); 1738 sq->snd_buff[qentry].mbuf = mbuf; 1739 if (mbuf == NULL) 1740 return (ENOBUFS); 1741 } 1742 hdr->csum_l4 = SEND_L4_CSUM_TCP; 1743 break; 1744 case IPPROTO_UDP: 1745 if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) 1746 break; 1747 1748 if (mbuf->m_len < (poff + sizeof(struct udphdr))) { 1749 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr)); 1750 sq->snd_buff[qentry].mbuf = mbuf; 1751 if (mbuf == NULL) 1752 return (ENOBUFS); 1753 } 1754 hdr->csum_l4 = SEND_L4_CSUM_UDP; 1755 break; 1756 case IPPROTO_SCTP: 1757 if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) 1758 break; 1759 1760 if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { 1761 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr)); 1762 sq->snd_buff[qentry].mbuf = mbuf; 1763 if (mbuf == NULL) 1764 return (ENOBUFS); 1765 } 1766 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1767 break; 1768 default: 1769 break; 1770 } 1771 break; 1772#endif 1773 default: 1774 hdr->csum_l3 = 0; 1775 return (0); 1776 } 1777 1778 hdr->l3_offset = ehdrlen; 1779 hdr->l4_offset = ehdrlen + iphlen; 1780 } else 1781 hdr->csum_l3 = 0; 1782 1783 return (0); 1784} 1785 1786/* 1787 * SQ GATHER subdescriptor 1788 * Must follow HDR descriptor 1789 */ 1790static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1791 int size, uint64_t data) 1792{ 1793 struct sq_gather_subdesc *gather; 1794 1795 qentry &= (sq->dmem.q_len - 1); 1796 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1797 1798 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1799 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1800 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1801 gather->size = size; 1802 gather->addr = data; 1803} 1804 1805/* Put an mbuf to a SQ for packet transfer. */ 1806static int 1807nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf) 1808{ 1809 bus_dma_segment_t segs[256]; 1810 struct snd_buff *snd_buff; 1811 size_t seg; 1812 int nsegs, qentry; 1813 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT - 1; 1814 int err; 1815 1816 NICVF_TX_LOCK_ASSERT(sq); 1817 1818 if (sq->free_cnt == 0) 1819 return (ENOBUFS); 1820 1821 snd_buff = &sq->snd_buff[sq->tail]; 1822 1823 err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, 1824 mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1825 if (err != 0) { 1826 /* ARM64TODO: Add mbuf defragmenting if we lack maps */ 1827 return (err); 1828 } 1829 1830 /* Set how many subdescriptors is required */ 1831 subdesc_cnt += nsegs; 1832 1833 if (subdesc_cnt > sq->free_cnt) { 1834 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ 1835 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1836 return (ENOBUFS); 1837 } 1838 1839 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1840 1841 /* Add SQ header subdesc */ 1842 err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, mbuf, 1843 mbuf->m_pkthdr.len); 1844 if (err != 0) { 1845 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1846 return (err); 1847 } 1848 1849 /* Add SQ gather subdescs */ 1850 for (seg = 0; seg < nsegs; seg++) { 1851 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1852 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, 1853 segs[seg].ds_addr); 1854 } 1855 1856 /* make sure all memory stores are done before ringing doorbell */ 1857 bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); 1858 1859 dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", 1860 __func__, sq->idx, subdesc_cnt); 1861 /* Inform HW to xmit new packet */ 1862 nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, 1863 sq->idx, subdesc_cnt); 1864 return (0); 1865} 1866 1867static __inline u_int 1868frag_num(u_int i) 1869{ 1870#if BYTE_ORDER == BIG_ENDIAN 1871 return ((i & ~3) + 3 - (i & 3)); 1872#else 1873 return (i); 1874#endif 1875} 1876 1877/* Returns MBUF for a received packet */ 1878struct mbuf * 1879nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1880{ 1881 int frag; 1882 int payload_len = 0; 1883 struct mbuf *mbuf; 1884 struct mbuf *mbuf_frag; 1885 uint16_t *rb_lens = NULL; 1886 uint64_t *rb_ptrs = NULL; 1887 1888 mbuf = NULL; 1889 rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); 1890 rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); 1891 1892 dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", 1893 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1894 1895 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1896 payload_len = rb_lens[frag_num(frag)]; 1897 if (frag == 0) { 1898 /* First fragment */ 1899 mbuf = nicvf_rb_ptr_to_mbuf(nic, 1900 (*rb_ptrs - cqe_rx->align_pad)); 1901 mbuf->m_len = payload_len; 1902 mbuf->m_data += cqe_rx->align_pad; 1903 if_setrcvif(mbuf, nic->ifp); 1904 } else { 1905 /* Add fragments */ 1906 mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); 1907 m_append(mbuf, payload_len, mbuf_frag->m_data); 1908 m_freem(mbuf_frag); 1909 } 1910 /* Next buffer pointer */ 1911 rb_ptrs++; 1912 } 1913 1914 if (__predict_true(mbuf != NULL)) { 1915 m_fixhdr(mbuf); 1916 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; 1917 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); 1918 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { 1919 /* 1920 * HW by default verifies IP & TCP/UDP/SCTP checksums 1921 */ 1922 1923 /* XXX: Do we need to include IP with options too? */ 1924 if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4 || 1925 cqe_rx->l3_type == L3TYPE_IPV6)) { 1926 mbuf->m_pkthdr.csum_flags = 1927 (CSUM_IP_CHECKED | CSUM_IP_VALID); 1928 } 1929 if (cqe_rx->l4_type == L4TYPE_TCP || 1930 cqe_rx->l4_type == L4TYPE_UDP || 1931 cqe_rx->l4_type == L4TYPE_SCTP) { 1932 mbuf->m_pkthdr.csum_flags |= 1933 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1934 mbuf->m_pkthdr.csum_data = htons(0xffff); 1935 } 1936 } 1937 } 1938 1939 return (mbuf); 1940} 1941 1942/* Enable interrupt */ 1943void 1944nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1945{ 1946 uint64_t reg_val; 1947 1948 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1949 1950 switch (int_type) { 1951 case NICVF_INTR_CQ: 1952 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 1953 break; 1954 case NICVF_INTR_SQ: 1955 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 1956 break; 1957 case NICVF_INTR_RBDR: 1958 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1959 break; 1960 case NICVF_INTR_PKT_DROP: 1961 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 1962 break; 1963 case NICVF_INTR_TCP_TIMER: 1964 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 1965 break; 1966 case NICVF_INTR_MBOX: 1967 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 1968 break; 1969 case NICVF_INTR_QS_ERR: 1970 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 1971 break; 1972 default: 1973 device_printf(nic->dev, 1974 "Failed to enable interrupt: unknown type\n"); 1975 break; 1976 } 1977 1978 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 1979} 1980 1981/* Disable interrupt */ 1982void 1983nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 1984{ 1985 uint64_t reg_val = 0; 1986 1987 switch (int_type) { 1988 case NICVF_INTR_CQ: 1989 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 1990 break; 1991 case NICVF_INTR_SQ: 1992 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 1993 break; 1994 case NICVF_INTR_RBDR: 1995 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1996 break; 1997 case NICVF_INTR_PKT_DROP: 1998 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 1999 break; 2000 case NICVF_INTR_TCP_TIMER: 2001 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2002 break; 2003 case NICVF_INTR_MBOX: 2004 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2005 break; 2006 case NICVF_INTR_QS_ERR: 2007 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2008 break; 2009 default: 2010 device_printf(nic->dev, 2011 "Failed to disable interrupt: unknown type\n"); 2012 break; 2013 } 2014 2015 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 2016} 2017 2018/* Clear interrupt */ 2019void 2020nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 2021{ 2022 uint64_t reg_val = 0; 2023 2024 switch (int_type) { 2025 case NICVF_INTR_CQ: 2026 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2027 break; 2028 case NICVF_INTR_SQ: 2029 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2030 break; 2031 case NICVF_INTR_RBDR: 2032 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2033 break; 2034 case NICVF_INTR_PKT_DROP: 2035 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2036 break; 2037 case NICVF_INTR_TCP_TIMER: 2038 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2039 break; 2040 case NICVF_INTR_MBOX: 2041 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); 2042 break; 2043 case NICVF_INTR_QS_ERR: 2044 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2045 break; 2046 default: 2047 device_printf(nic->dev, 2048 "Failed to clear interrupt: unknown type\n"); 2049 break; 2050 } 2051 2052 nicvf_reg_write(nic, NIC_VF_INT, reg_val); 2053} 2054 2055/* Check if interrupt is enabled */ 2056int 2057nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 2058{ 2059 uint64_t reg_val; 2060 uint64_t mask = 0xff; 2061 2062 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2063 2064 switch (int_type) { 2065 case NICVF_INTR_CQ: 2066 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2067 break; 2068 case NICVF_INTR_SQ: 2069 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2070 break; 2071 case NICVF_INTR_RBDR: 2072 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2073 break; 2074 case NICVF_INTR_PKT_DROP: 2075 mask = NICVF_INTR_PKT_DROP_MASK; 2076 break; 2077 case NICVF_INTR_TCP_TIMER: 2078 mask = NICVF_INTR_TCP_TIMER_MASK; 2079 break; 2080 case NICVF_INTR_MBOX: 2081 mask = NICVF_INTR_MBOX_MASK; 2082 break; 2083 case NICVF_INTR_QS_ERR: 2084 mask = NICVF_INTR_QS_ERR_MASK; 2085 break; 2086 default: 2087 device_printf(nic->dev, 2088 "Failed to check interrupt enable: unknown type\n"); 2089 break; 2090 } 2091 2092 return (reg_val & mask); 2093} 2094 2095void 2096nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 2097{ 2098 struct rcv_queue *rq; 2099 2100#define GET_RQ_STATS(reg) \ 2101 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 2102 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2103 2104 rq = &nic->qs->rq[rq_idx]; 2105 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 2106 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 2107} 2108 2109void 2110nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 2111{ 2112 struct snd_queue *sq; 2113 2114#define GET_SQ_STATS(reg) \ 2115 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 2116 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2117 2118 sq = &nic->qs->sq[sq_idx]; 2119 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 2120 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 2121} 2122 2123/* Check for errors in the receive cmp.queue entry */ 2124int 2125nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, 2126 struct cqe_rx_t *cqe_rx) 2127{ 2128 struct nicvf_hw_stats *stats = &nic->hw_stats; 2129 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 2130 2131 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 2132 drv_stats->rx_frames_ok++; 2133 return (0); 2134 } 2135 2136 switch (cqe_rx->err_opcode) { 2137 case CQ_RX_ERROP_RE_PARTIAL: 2138 stats->rx_bgx_truncated_pkts++; 2139 break; 2140 case CQ_RX_ERROP_RE_JABBER: 2141 stats->rx_jabber_errs++; 2142 break; 2143 case CQ_RX_ERROP_RE_FCS: 2144 stats->rx_fcs_errs++; 2145 break; 2146 case CQ_RX_ERROP_RE_RX_CTL: 2147 stats->rx_bgx_errs++; 2148 break; 2149 case CQ_RX_ERROP_PREL2_ERR: 2150 stats->rx_prel2_errs++; 2151 break; 2152 case CQ_RX_ERROP_L2_MAL: 2153 stats->rx_l2_hdr_malformed++; 2154 break; 2155 case CQ_RX_ERROP_L2_OVERSIZE: 2156 stats->rx_oversize++; 2157 break; 2158 case CQ_RX_ERROP_L2_UNDERSIZE: 2159 stats->rx_undersize++; 2160 break; 2161 case CQ_RX_ERROP_L2_LENMISM: 2162 stats->rx_l2_len_mismatch++; 2163 break; 2164 case CQ_RX_ERROP_L2_PCLP: 2165 stats->rx_l2_pclp++; 2166 break; 2167 case CQ_RX_ERROP_IP_NOT: 2168 stats->rx_ip_ver_errs++; 2169 break; 2170 case CQ_RX_ERROP_IP_CSUM_ERR: 2171 stats->rx_ip_csum_errs++; 2172 break; 2173 case CQ_RX_ERROP_IP_MAL: 2174 stats->rx_ip_hdr_malformed++; 2175 break; 2176 case CQ_RX_ERROP_IP_MALD: 2177 stats->rx_ip_payload_malformed++; 2178 break; 2179 case CQ_RX_ERROP_IP_HOP: 2180 stats->rx_ip_ttl_errs++; 2181 break; 2182 case CQ_RX_ERROP_L3_PCLP: 2183 stats->rx_l3_pclp++; 2184 break; 2185 case CQ_RX_ERROP_L4_MAL: 2186 stats->rx_l4_malformed++; 2187 break; 2188 case CQ_RX_ERROP_L4_CHK: 2189 stats->rx_l4_csum_errs++; 2190 break; 2191 case CQ_RX_ERROP_UDP_LEN: 2192 stats->rx_udp_len_errs++; 2193 break; 2194 case CQ_RX_ERROP_L4_PORT: 2195 stats->rx_l4_port_errs++; 2196 break; 2197 case CQ_RX_ERROP_TCP_FLAG: 2198 stats->rx_tcp_flag_errs++; 2199 break; 2200 case CQ_RX_ERROP_TCP_OFFSET: 2201 stats->rx_tcp_offset_errs++; 2202 break; 2203 case CQ_RX_ERROP_L4_PCLP: 2204 stats->rx_l4_pclp++; 2205 break; 2206 case CQ_RX_ERROP_RBDR_TRUNC: 2207 stats->rx_truncated_pkts++; 2208 break; 2209 } 2210 2211 return (1); 2212} 2213 2214/* Check for errors in the send cmp.queue entry */ 2215int 2216nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, 2217 struct cqe_send_t *cqe_tx) 2218{ 2219 struct cmp_queue_stats *stats = &cq->stats; 2220 2221 switch (cqe_tx->send_status) { 2222 case CQ_TX_ERROP_GOOD: 2223 stats->tx.good++; 2224 return (0); 2225 case CQ_TX_ERROP_DESC_FAULT: 2226 stats->tx.desc_fault++; 2227 break; 2228 case CQ_TX_ERROP_HDR_CONS_ERR: 2229 stats->tx.hdr_cons_err++; 2230 break; 2231 case CQ_TX_ERROP_SUBDC_ERR: 2232 stats->tx.subdesc_err++; 2233 break; 2234 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 2235 stats->tx.imm_size_oflow++; 2236 break; 2237 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 2238 stats->tx.data_seq_err++; 2239 break; 2240 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 2241 stats->tx.mem_seq_err++; 2242 break; 2243 case CQ_TX_ERROP_LOCK_VIOL: 2244 stats->tx.lock_viol++; 2245 break; 2246 case CQ_TX_ERROP_DATA_FAULT: 2247 stats->tx.data_fault++; 2248 break; 2249 case CQ_TX_ERROP_TSTMP_CONFLICT: 2250 stats->tx.tstmp_conflict++; 2251 break; 2252 case CQ_TX_ERROP_TSTMP_TIMEOUT: 2253 stats->tx.tstmp_timeout++; 2254 break; 2255 case CQ_TX_ERROP_MEM_FAULT: 2256 stats->tx.mem_fault++; 2257 break; 2258 case CQ_TX_ERROP_CK_OVERLAP: 2259 stats->tx.csum_overlap++; 2260 break; 2261 case CQ_TX_ERROP_CK_OFLOW: 2262 stats->tx.csum_overflow++; 2263 break; 2264 } 2265 2266 return (1); 2267} 2268