nicvf_queues.c revision 296602
1289550Szbb/* 2289550Szbb * Copyright (C) 2015 Cavium Inc. 3289550Szbb * All rights reserved. 4289550Szbb * 5289550Szbb * Redistribution and use in source and binary forms, with or without 6289550Szbb * modification, are permitted provided that the following conditions 7289550Szbb * are met: 8289550Szbb * 1. Redistributions of source code must retain the above copyright 9289550Szbb * notice, this list of conditions and the following disclaimer. 10289550Szbb * 2. Redistributions in binary form must reproduce the above copyright 11289550Szbb * notice, this list of conditions and the following disclaimer in the 12289550Szbb * documentation and/or other materials provided with the distribution. 13289550Szbb * 14289550Szbb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15289550Szbb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16289550Szbb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17289550Szbb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18289550Szbb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19289550Szbb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20289550Szbb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21289550Szbb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22289550Szbb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23289550Szbb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24289550Szbb * SUCH DAMAGE. 25289550Szbb * 26289550Szbb * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 296602 2016-03-10 05:45:24Z zbb $ 27289550Szbb * 28289550Szbb */ 29289551Szbb#include <sys/cdefs.h> 30289551Szbb__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_queues.c 296602 2016-03-10 05:45:24Z zbb $"); 31289550Szbb 32296030Szbb#include "opt_inet.h" 33296030Szbb#include "opt_inet6.h" 34296030Szbb 35289551Szbb#include <sys/param.h> 36289551Szbb#include <sys/systm.h> 37289551Szbb#include <sys/bitset.h> 38289551Szbb#include <sys/bitstring.h> 39289551Szbb#include <sys/buf_ring.h> 40289551Szbb#include <sys/bus.h> 41289551Szbb#include <sys/endian.h> 42289551Szbb#include <sys/kernel.h> 43289551Szbb#include <sys/malloc.h> 44289551Szbb#include <sys/module.h> 45289551Szbb#include <sys/rman.h> 46289551Szbb#include <sys/pciio.h> 47289551Szbb#include <sys/pcpu.h> 48289551Szbb#include <sys/proc.h> 49289551Szbb#include <sys/sockio.h> 50289551Szbb#include <sys/socket.h> 51289551Szbb#include <sys/stdatomic.h> 52289551Szbb#include <sys/cpuset.h> 53289551Szbb#include <sys/lock.h> 54289551Szbb#include <sys/mutex.h> 55289551Szbb#include <sys/smp.h> 56289551Szbb#include <sys/taskqueue.h> 57289550Szbb 58289551Szbb#include <vm/vm.h> 59289551Szbb#include <vm/pmap.h> 60289551Szbb 61289551Szbb#include <machine/bus.h> 62289551Szbb#include <machine/vmparam.h> 63289551Szbb 64289551Szbb#include <net/ethernet.h> 65289551Szbb#include <net/if.h> 66289551Szbb#include <net/if_var.h> 67289551Szbb#include <net/if_media.h> 68289551Szbb#include <net/ifq.h> 69289551Szbb 70296030Szbb#include <netinet/in_systm.h> 71296030Szbb#include <netinet/in.h> 72296030Szbb#include <netinet/if_ether.h> 73296030Szbb#include <netinet/ip.h> 74296030Szbb#include <netinet/ip6.h> 75296030Szbb#include <netinet/sctp.h> 76296030Szbb#include <netinet/tcp.h> 77296030Szbb#include <netinet/tcp_lro.h> 78296030Szbb#include <netinet/udp.h> 79296030Szbb 80289551Szbb#include <dev/pci/pcireg.h> 81289551Szbb#include <dev/pci/pcivar.h> 82289551Szbb 83289551Szbb#include "thunder_bgx.h" 84289550Szbb#include "nic_reg.h" 85289550Szbb#include "nic.h" 86289550Szbb#include "q_struct.h" 87289550Szbb#include "nicvf_queues.h" 88289550Szbb 89289551Szbb#define DEBUG 90289551Szbb#undef DEBUG 91289551Szbb 92289551Szbb#ifdef DEBUG 93289551Szbb#define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) 94289551Szbb#else 95289551Szbb#define dprintf(dev, fmt, ...) 96289551Szbb#endif 97289551Szbb 98289551SzbbMALLOC_DECLARE(M_NICVF); 99289551Szbb 100289551Szbbstatic void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); 101289551Szbbstatic struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); 102289551Szbbstatic void nicvf_sq_disable(struct nicvf *, int); 103289551Szbbstatic void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); 104289551Szbbstatic void nicvf_put_sq_desc(struct snd_queue *, int); 105289551Szbbstatic void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, 106289551Szbb boolean_t); 107289551Szbbstatic void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); 108289551Szbb 109289551Szbbstatic void nicvf_rbdr_task(void *, int); 110289551Szbbstatic void nicvf_rbdr_task_nowait(void *, int); 111289551Szbb 112289550Szbbstruct rbuf_info { 113289551Szbb bus_dma_tag_t dmat; 114289551Szbb bus_dmamap_t dmap; 115289551Szbb struct mbuf * mbuf; 116289550Szbb}; 117289550Szbb 118289551Szbb#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) 119289550Szbb 120289550Szbb/* Poll a register for a specific value */ 121289550Szbbstatic int nicvf_poll_reg(struct nicvf *nic, int qidx, 122289551Szbb uint64_t reg, int bit_pos, int bits, int val) 123289550Szbb{ 124289551Szbb uint64_t bit_mask; 125289551Szbb uint64_t reg_val; 126289550Szbb int timeout = 10; 127289550Szbb 128289551Szbb bit_mask = (1UL << bits) - 1; 129289550Szbb bit_mask = (bit_mask << bit_pos); 130289550Szbb 131289550Szbb while (timeout) { 132289550Szbb reg_val = nicvf_queue_reg_read(nic, reg, qidx); 133289550Szbb if (((reg_val & bit_mask) >> bit_pos) == val) 134289551Szbb return (0); 135289551Szbb 136289551Szbb DELAY(1000); 137289550Szbb timeout--; 138289550Szbb } 139289551Szbb device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); 140289551Szbb return (ETIMEDOUT); 141289550Szbb} 142289550Szbb 143289551Szbb/* Callback for bus_dmamap_load() */ 144289551Szbbstatic void 145289551Szbbnicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 146289551Szbb{ 147289551Szbb bus_addr_t *paddr; 148289551Szbb 149289551Szbb KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 150289551Szbb paddr = arg; 151289551Szbb *paddr = segs->ds_addr; 152289551Szbb} 153289551Szbb 154289550Szbb/* Allocate memory for a queue's descriptors */ 155289551Szbbstatic int 156289551Szbbnicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 157289551Szbb int q_len, int desc_size, int align_bytes) 158289550Szbb{ 159289551Szbb int err, err_dmat; 160289551Szbb 161289551Szbb /* Create DMA tag first */ 162289551Szbb err = bus_dma_tag_create( 163289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 164289551Szbb align_bytes, /* alignment */ 165289551Szbb 0, /* boundary */ 166289551Szbb BUS_SPACE_MAXADDR, /* lowaddr */ 167289551Szbb BUS_SPACE_MAXADDR, /* highaddr */ 168289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 169289551Szbb (q_len * desc_size), /* maxsize */ 170289551Szbb 1, /* nsegments */ 171289551Szbb (q_len * desc_size), /* maxsegsize */ 172289551Szbb 0, /* flags */ 173289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 174289551Szbb &dmem->dmat); /* dmat */ 175289551Szbb 176289551Szbb if (err != 0) { 177289551Szbb device_printf(nic->dev, 178289551Szbb "Failed to create busdma tag for descriptors ring\n"); 179289551Szbb return (err); 180289551Szbb } 181289551Szbb 182289551Szbb /* Allocate segment of continuous DMA safe memory */ 183289551Szbb err = bus_dmamem_alloc( 184289551Szbb dmem->dmat, /* DMA tag */ 185289551Szbb &dmem->base, /* virtual address */ 186289551Szbb (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ 187289551Szbb &dmem->dmap); /* DMA map */ 188289551Szbb if (err != 0) { 189289551Szbb device_printf(nic->dev, "Failed to allocate DMA safe memory for" 190289551Szbb "descriptors ring\n"); 191289551Szbb goto dmamem_fail; 192289551Szbb } 193289551Szbb 194289551Szbb err = bus_dmamap_load( 195289551Szbb dmem->dmat, 196289551Szbb dmem->dmap, 197289551Szbb dmem->base, 198289551Szbb (q_len * desc_size), /* allocation size */ 199289551Szbb nicvf_dmamap_q_cb, /* map to DMA address cb. */ 200289551Szbb &dmem->phys_base, /* physical address */ 201289551Szbb BUS_DMA_NOWAIT); 202289551Szbb if (err != 0) { 203289551Szbb device_printf(nic->dev, 204289551Szbb "Cannot load DMA map of descriptors ring\n"); 205289551Szbb goto dmamap_fail; 206289551Szbb } 207289551Szbb 208289550Szbb dmem->q_len = q_len; 209289551Szbb dmem->size = (desc_size * q_len); 210289550Szbb 211289551Szbb return (0); 212289551Szbb 213289551Szbbdmamap_fail: 214289551Szbb bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 215289551Szbb dmem->phys_base = 0; 216289551Szbbdmamem_fail: 217289551Szbb err_dmat = bus_dma_tag_destroy(dmem->dmat); 218289551Szbb dmem->base = NULL; 219289551Szbb KASSERT(err_dmat == 0, 220289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 221289551Szbb 222289551Szbb return (err); 223289550Szbb} 224289550Szbb 225289550Szbb/* Free queue's descriptor memory */ 226289551Szbbstatic void 227289551Szbbnicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 228289550Szbb{ 229289551Szbb int err; 230289551Szbb 231289551Szbb if ((dmem == NULL) || (dmem->base == NULL)) 232289550Szbb return; 233289550Szbb 234289551Szbb /* Unload a map */ 235289551Szbb bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); 236289551Szbb bus_dmamap_unload(dmem->dmat, dmem->dmap); 237289551Szbb /* Free DMA memory */ 238289551Szbb bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 239289551Szbb /* Destroy DMA tag */ 240289551Szbb err = bus_dma_tag_destroy(dmem->dmat); 241289551Szbb 242289551Szbb KASSERT(err == 0, 243289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 244289551Szbb 245289551Szbb dmem->phys_base = 0; 246289550Szbb dmem->base = NULL; 247289550Szbb} 248289550Szbb 249289551Szbb/* 250289551Szbb * Allocate buffer for packet reception 251289550Szbb * HW returns memory address where packet is DMA'ed but not a pointer 252289550Szbb * into RBDR ring, so save buffer address at the start of fragment and 253289550Szbb * align the start address to a cache aligned address 254289550Szbb */ 255289551Szbbstatic __inline int 256289551Szbbnicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, 257289551Szbb bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) 258289550Szbb{ 259289551Szbb struct mbuf *mbuf; 260289550Szbb struct rbuf_info *rinfo; 261289551Szbb bus_dma_segment_t segs[1]; 262289551Szbb int nsegs; 263289551Szbb int err; 264289550Szbb 265289551Szbb mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); 266289551Szbb if (mbuf == NULL) 267289551Szbb return (ENOMEM); 268289550Szbb 269289551Szbb /* 270289551Szbb * The length is equal to the actual length + one 128b line 271289551Szbb * used as a room for rbuf_info structure. 272289551Szbb */ 273289551Szbb mbuf->m_len = mbuf->m_pkthdr.len = buf_len; 274289551Szbb 275289551Szbb err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, 276289551Szbb &nsegs, BUS_DMA_NOWAIT); 277289551Szbb if (err != 0) { 278289551Szbb device_printf(nic->dev, 279289551Szbb "Failed to map mbuf into DMA visible memory, err: %d\n", 280289551Szbb err); 281289551Szbb m_freem(mbuf); 282289551Szbb bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); 283289551Szbb return (err); 284289550Szbb } 285289551Szbb if (nsegs != 1) 286289551Szbb panic("Unexpected number of DMA segments for RB: %d", nsegs); 287289551Szbb /* 288289551Szbb * Now use the room for rbuf_info structure 289289551Szbb * and adjust mbuf data and length. 290289551Szbb */ 291289551Szbb rinfo = (struct rbuf_info *)mbuf->m_data; 292289551Szbb m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); 293289550Szbb 294289551Szbb rinfo->dmat = rbdr->rbdr_buff_dmat; 295289551Szbb rinfo->dmap = dmap; 296289551Szbb rinfo->mbuf = mbuf; 297289550Szbb 298289551Szbb *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; 299289550Szbb 300289551Szbb return (0); 301289550Szbb} 302289550Szbb 303289551Szbb/* Retrieve mbuf for received packet */ 304289551Szbbstatic struct mbuf * 305289551Szbbnicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) 306289550Szbb{ 307289551Szbb struct mbuf *mbuf; 308289550Szbb struct rbuf_info *rinfo; 309289550Szbb 310289550Szbb /* Get buffer start address and alignment offset */ 311289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); 312289550Szbb 313289551Szbb /* Now retrieve mbuf to give to stack */ 314289551Szbb mbuf = rinfo->mbuf; 315289551Szbb if (__predict_false(mbuf == NULL)) { 316289551Szbb panic("%s: Received packet fragment with NULL mbuf", 317289551Szbb device_get_nameunit(nic->dev)); 318289550Szbb } 319289551Szbb /* 320289551Szbb * Clear the mbuf in the descriptor to indicate 321289551Szbb * that this slot is processed and free to use. 322289551Szbb */ 323289551Szbb rinfo->mbuf = NULL; 324289550Szbb 325289551Szbb bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); 326289551Szbb bus_dmamap_unload(rinfo->dmat, rinfo->dmap); 327289550Szbb 328289551Szbb return (mbuf); 329289550Szbb} 330289550Szbb 331289550Szbb/* Allocate RBDR ring and populate receive buffers */ 332289551Szbbstatic int 333289551Szbbnicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, 334289551Szbb int buf_size, int qidx) 335289550Szbb{ 336289551Szbb bus_dmamap_t dmap; 337289551Szbb bus_addr_t rbuf; 338289551Szbb struct rbdr_entry_t *desc; 339289550Szbb int idx; 340289550Szbb int err; 341289550Szbb 342289551Szbb /* Allocate rbdr descriptors ring */ 343289550Szbb err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 344289551Szbb sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); 345289551Szbb if (err != 0) { 346289551Szbb device_printf(nic->dev, 347289551Szbb "Failed to create RBDR descriptors ring\n"); 348289551Szbb return (err); 349289551Szbb } 350289550Szbb 351289550Szbb rbdr->desc = rbdr->dmem.base; 352289551Szbb /* 353289551Szbb * Buffer size has to be in multiples of 128 bytes. 354289551Szbb * Make room for metadata of size of one line (128 bytes). 355289551Szbb */ 356289551Szbb rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; 357289551Szbb rbdr->enable = TRUE; 358289550Szbb rbdr->thresh = RBDR_THRESH; 359289551Szbb rbdr->nic = nic; 360289551Szbb rbdr->idx = qidx; 361289550Szbb 362289551Szbb /* 363289551Szbb * Create DMA tag for Rx buffers. 364289551Szbb * Each map created using this tag is intended to store Rx payload for 365289551Szbb * one fragment and one header structure containing rbuf_info (thus 366289551Szbb * additional 128 byte line since RB must be a multiple of 128 byte 367289551Szbb * cache line). 368289551Szbb */ 369289551Szbb if (buf_size > MCLBYTES) { 370289551Szbb device_printf(nic->dev, 371289551Szbb "Buffer size to large for mbuf cluster\n"); 372289551Szbb return (EINVAL); 373289551Szbb } 374289551Szbb err = bus_dma_tag_create( 375289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 376289551Szbb NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 377289551Szbb 0, /* boundary */ 378289551Szbb DMAP_MAX_PHYSADDR, /* lowaddr */ 379289551Szbb DMAP_MIN_PHYSADDR, /* highaddr */ 380289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 381289551Szbb roundup2(buf_size, MCLBYTES), /* maxsize */ 382289551Szbb 1, /* nsegments */ 383289551Szbb roundup2(buf_size, MCLBYTES), /* maxsegsize */ 384289551Szbb 0, /* flags */ 385289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 386289551Szbb &rbdr->rbdr_buff_dmat); /* dmat */ 387289551Szbb 388289551Szbb if (err != 0) { 389289551Szbb device_printf(nic->dev, 390289551Szbb "Failed to create busdma tag for RBDR buffers\n"); 391289551Szbb return (err); 392289551Szbb } 393289551Szbb 394289551Szbb rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * 395289551Szbb ring_len, M_NICVF, (M_WAITOK | M_ZERO)); 396289551Szbb 397289550Szbb for (idx = 0; idx < ring_len; idx++) { 398289551Szbb err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); 399289551Szbb if (err != 0) { 400289551Szbb device_printf(nic->dev, 401289551Szbb "Failed to create DMA map for RB\n"); 402289551Szbb return (err); 403289551Szbb } 404289551Szbb rbdr->rbdr_buff_dmaps[idx] = dmap; 405289550Szbb 406289551Szbb err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, 407289551Szbb DMA_BUFFER_LEN, &rbuf); 408289551Szbb if (err != 0) 409289551Szbb return (err); 410289551Szbb 411289550Szbb desc = GET_RBDR_DESC(rbdr, idx); 412289551Szbb desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 413289550Szbb } 414289551Szbb 415289551Szbb /* Allocate taskqueue */ 416289551Szbb TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); 417289551Szbb TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); 418289551Szbb rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, 419289551Szbb taskqueue_thread_enqueue, &rbdr->rbdr_taskq); 420289551Szbb taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", 421289551Szbb device_get_nameunit(nic->dev)); 422289551Szbb 423289551Szbb return (0); 424289550Szbb} 425289550Szbb 426289550Szbb/* Free RBDR ring and its receive buffers */ 427289551Szbbstatic void 428289551Szbbnicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 429289550Szbb{ 430289551Szbb struct mbuf *mbuf; 431289551Szbb struct queue_set *qs; 432289550Szbb struct rbdr_entry_t *desc; 433289550Szbb struct rbuf_info *rinfo; 434289551Szbb bus_addr_t buf_addr; 435289551Szbb int head, tail, idx; 436289551Szbb int err; 437289550Szbb 438289551Szbb qs = nic->qs; 439289550Szbb 440289551Szbb if ((qs == NULL) || (rbdr == NULL)) 441289550Szbb return; 442289550Szbb 443289551Szbb rbdr->enable = FALSE; 444289551Szbb if (rbdr->rbdr_taskq != NULL) { 445289551Szbb /* Remove tasks */ 446289551Szbb while (taskqueue_cancel(rbdr->rbdr_taskq, 447289551Szbb &rbdr->rbdr_task_nowait, NULL) != 0) { 448289551Szbb /* Finish the nowait task first */ 449289551Szbb taskqueue_drain(rbdr->rbdr_taskq, 450289551Szbb &rbdr->rbdr_task_nowait); 451289551Szbb } 452289551Szbb taskqueue_free(rbdr->rbdr_taskq); 453289551Szbb rbdr->rbdr_taskq = NULL; 454289550Szbb 455289551Szbb while (taskqueue_cancel(taskqueue_thread, 456289551Szbb &rbdr->rbdr_task, NULL) != 0) { 457289551Szbb /* Now finish the sleepable task */ 458289551Szbb taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); 459289551Szbb } 460289551Szbb } 461289551Szbb 462289551Szbb /* 463289551Szbb * Free all of the memory under the RB descriptors. 464289551Szbb * There are assumptions here: 465289551Szbb * 1. Corresponding RBDR is disabled 466289551Szbb * - it is safe to operate using head and tail indexes 467289551Szbb * 2. All bffers that were received are properly freed by 468289551Szbb * the receive handler 469289551Szbb * - there is no need to unload DMA map and free MBUF for other 470289551Szbb * descriptors than unused ones 471289551Szbb */ 472289551Szbb if (rbdr->rbdr_buff_dmat != NULL) { 473289551Szbb head = rbdr->head; 474289551Szbb tail = rbdr->tail; 475289551Szbb while (head != tail) { 476289551Szbb desc = GET_RBDR_DESC(rbdr, head); 477289551Szbb buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 478289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 479289551Szbb bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 480289551Szbb mbuf = rinfo->mbuf; 481289551Szbb /* This will destroy everything including rinfo! */ 482289551Szbb m_freem(mbuf); 483289551Szbb head++; 484289551Szbb head &= (rbdr->dmem.q_len - 1); 485289551Szbb } 486289551Szbb /* Free tail descriptor */ 487289551Szbb desc = GET_RBDR_DESC(rbdr, tail); 488289550Szbb buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 489289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 490289551Szbb bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 491289551Szbb mbuf = rinfo->mbuf; 492289551Szbb /* This will destroy everything including rinfo! */ 493289551Szbb m_freem(mbuf); 494289551Szbb 495289551Szbb /* Destroy DMA maps */ 496289551Szbb for (idx = 0; idx < qs->rbdr_len; idx++) { 497289551Szbb if (rbdr->rbdr_buff_dmaps[idx] == NULL) 498289551Szbb continue; 499289551Szbb err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, 500289551Szbb rbdr->rbdr_buff_dmaps[idx]); 501289551Szbb KASSERT(err == 0, 502289551Szbb ("%s: Could not destroy DMA map for RB, desc: %d", 503289551Szbb __func__, idx)); 504289551Szbb rbdr->rbdr_buff_dmaps[idx] = NULL; 505289551Szbb } 506289551Szbb 507289551Szbb /* Now destroy the tag */ 508289551Szbb err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); 509289551Szbb KASSERT(err == 0, 510289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 511289551Szbb 512289551Szbb rbdr->head = 0; 513289551Szbb rbdr->tail = 0; 514289550Szbb } 515289550Szbb 516289550Szbb /* Free RBDR ring */ 517289550Szbb nicvf_free_q_desc_mem(nic, &rbdr->dmem); 518289550Szbb} 519289550Szbb 520289551Szbb/* 521289551Szbb * Refill receive buffer descriptors with new buffers. 522289550Szbb */ 523289551Szbbstatic int 524289551Szbbnicvf_refill_rbdr(struct rbdr *rbdr, int mflags) 525289550Szbb{ 526289551Szbb struct nicvf *nic; 527289551Szbb struct queue_set *qs; 528289551Szbb int rbdr_idx; 529289550Szbb int tail, qcount; 530289550Szbb int refill_rb_cnt; 531289550Szbb struct rbdr_entry_t *desc; 532289551Szbb bus_dmamap_t dmap; 533289551Szbb bus_addr_t rbuf; 534289551Szbb boolean_t rb_alloc_fail; 535289551Szbb int new_rb; 536289550Szbb 537289551Szbb rb_alloc_fail = TRUE; 538289551Szbb new_rb = 0; 539289551Szbb nic = rbdr->nic; 540289551Szbb qs = nic->qs; 541289551Szbb rbdr_idx = rbdr->idx; 542289551Szbb 543289550Szbb /* Check if it's enabled */ 544289550Szbb if (!rbdr->enable) 545289551Szbb return (0); 546289550Szbb 547289550Szbb /* Get no of desc's to be refilled */ 548289550Szbb qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 549289550Szbb qcount &= 0x7FFFF; 550289550Szbb /* Doorbell can be ringed with a max of ring size minus 1 */ 551289551Szbb if (qcount >= (qs->rbdr_len - 1)) { 552289551Szbb rb_alloc_fail = FALSE; 553289551Szbb goto out; 554289551Szbb } else 555289550Szbb refill_rb_cnt = qs->rbdr_len - qcount - 1; 556289550Szbb 557289550Szbb /* Start filling descs from tail */ 558289550Szbb tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 559289550Szbb while (refill_rb_cnt) { 560289550Szbb tail++; 561289550Szbb tail &= (rbdr->dmem.q_len - 1); 562289550Szbb 563289551Szbb dmap = rbdr->rbdr_buff_dmaps[tail]; 564289551Szbb if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, 565289551Szbb DMA_BUFFER_LEN, &rbuf)) { 566289551Szbb /* Something went wrong. Resign */ 567289550Szbb break; 568289551Szbb } 569289550Szbb desc = GET_RBDR_DESC(rbdr, tail); 570289551Szbb desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 571289550Szbb refill_rb_cnt--; 572289550Szbb new_rb++; 573289550Szbb } 574289550Szbb 575289550Szbb /* make sure all memory stores are done before ringing doorbell */ 576289551Szbb wmb(); 577289550Szbb 578289550Szbb /* Check if buffer allocation failed */ 579289551Szbb if (refill_rb_cnt == 0) 580289551Szbb rb_alloc_fail = FALSE; 581289550Szbb 582289550Szbb /* Notify HW */ 583289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 584289550Szbb rbdr_idx, new_rb); 585289551Szbbout: 586289551Szbb if (!rb_alloc_fail) { 587289551Szbb /* 588289551Szbb * Re-enable RBDR interrupts only 589289551Szbb * if buffer allocation is success. 590289551Szbb */ 591289550Szbb nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 592289550Szbb 593289551Szbb return (0); 594289551Szbb } 595289551Szbb 596289551Szbb return (ENOMEM); 597289550Szbb} 598289550Szbb 599289551Szbb/* Refill RBs even if sleep is needed to reclaim memory */ 600289551Szbbstatic void 601289551Szbbnicvf_rbdr_task(void *arg, int pending) 602289550Szbb{ 603289551Szbb struct rbdr *rbdr; 604289551Szbb int err; 605289550Szbb 606289551Szbb rbdr = (struct rbdr *)arg; 607289551Szbb 608289551Szbb err = nicvf_refill_rbdr(rbdr, M_WAITOK); 609289551Szbb if (__predict_false(err != 0)) { 610289551Szbb panic("%s: Failed to refill RBs even when sleep enabled", 611289551Szbb __func__); 612289551Szbb } 613289550Szbb} 614289550Szbb 615289551Szbb/* Refill RBs as soon as possible without waiting */ 616289551Szbbstatic void 617289551Szbbnicvf_rbdr_task_nowait(void *arg, int pending) 618289550Szbb{ 619289551Szbb struct rbdr *rbdr; 620289551Szbb int err; 621289550Szbb 622289551Szbb rbdr = (struct rbdr *)arg; 623289551Szbb 624289551Szbb err = nicvf_refill_rbdr(rbdr, M_NOWAIT); 625289551Szbb if (err != 0) { 626289551Szbb /* 627289551Szbb * Schedule another, sleepable kernel thread 628289551Szbb * that will for sure refill the buffers. 629289551Szbb */ 630289551Szbb taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); 631289550Szbb } 632289550Szbb} 633289550Szbb 634289551Szbbstatic int 635289551Szbbnicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 636289551Szbb struct cqe_rx_t *cqe_rx, int cqe_type) 637289551Szbb{ 638289551Szbb struct mbuf *mbuf; 639296031Szbb struct rcv_queue *rq; 640289551Szbb int rq_idx; 641289551Szbb int err = 0; 642289551Szbb 643289551Szbb rq_idx = cqe_rx->rq_idx; 644296031Szbb rq = &nic->qs->rq[rq_idx]; 645289551Szbb 646289551Szbb /* Check for errors */ 647289551Szbb err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 648289551Szbb if (err && !cqe_rx->rb_cnt) 649289551Szbb return (0); 650289551Szbb 651289551Szbb mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); 652289551Szbb if (mbuf == NULL) { 653289551Szbb dprintf(nic->dev, "Packet not received\n"); 654289551Szbb return (0); 655289551Szbb } 656289551Szbb 657289551Szbb /* If error packet */ 658289551Szbb if (err != 0) { 659289551Szbb m_freem(mbuf); 660289551Szbb return (0); 661289551Szbb } 662289551Szbb 663296031Szbb if (rq->lro_enabled && 664296031Szbb ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) && 665296031Szbb (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 666296031Szbb (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { 667296031Szbb /* 668296031Szbb * At this point it is known that there are no errors in the 669296031Szbb * packet. Attempt to LRO enqueue. Send to stack if no resources 670296031Szbb * or enqueue error. 671296031Szbb */ 672296031Szbb if ((rq->lro.lro_cnt != 0) && 673296031Szbb (tcp_lro_rx(&rq->lro, mbuf, 0) == 0)) 674296031Szbb return (0); 675296031Szbb } 676289551Szbb /* 677289551Szbb * Push this packet to the stack later to avoid 678289551Szbb * unlocking completion task in the middle of work. 679289551Szbb */ 680289551Szbb err = buf_ring_enqueue(cq->rx_br, mbuf); 681289551Szbb if (err != 0) { 682289551Szbb /* 683289551Szbb * Failed to enqueue this mbuf. 684289551Szbb * We don't drop it, just schedule another task. 685289551Szbb */ 686289551Szbb return (err); 687289551Szbb } 688289551Szbb 689289551Szbb return (0); 690289551Szbb} 691289551Szbb 692289551Szbbstatic int 693289551Szbbnicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 694289551Szbb struct cqe_send_t *cqe_tx, int cqe_type) 695289551Szbb{ 696289551Szbb bus_dmamap_t dmap; 697289551Szbb struct mbuf *mbuf; 698289551Szbb struct snd_queue *sq; 699289551Szbb struct sq_hdr_subdesc *hdr; 700289551Szbb 701289551Szbb mbuf = NULL; 702289551Szbb sq = &nic->qs->sq[cqe_tx->sq_idx]; 703289551Szbb /* Avoid blocking here since we hold a non-sleepable NICVF_CMP_LOCK */ 704289551Szbb if (NICVF_TX_TRYLOCK(sq) == 0) 705289551Szbb return (EAGAIN); 706289551Szbb 707289551Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); 708289551Szbb if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 709289551Szbb NICVF_TX_UNLOCK(sq); 710289551Szbb return (0); 711289551Szbb } 712289551Szbb 713289551Szbb dprintf(nic->dev, 714289551Szbb "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", 715289551Szbb __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 716289551Szbb cqe_tx->sqe_ptr, hdr->subdesc_cnt); 717289551Szbb 718289551Szbb dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; 719289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, dmap); 720289551Szbb 721289551Szbb mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; 722289551Szbb if (mbuf != NULL) { 723289551Szbb m_freem(mbuf); 724289551Szbb sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; 725296602Szbb nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 726289551Szbb } 727289551Szbb 728289551Szbb nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 729289551Szbb 730289551Szbb NICVF_TX_UNLOCK(sq); 731289551Szbb return (0); 732289551Szbb} 733289551Szbb 734289551Szbbstatic int 735289551Szbbnicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) 736289551Szbb{ 737289551Szbb struct mbuf *mbuf; 738289551Szbb struct ifnet *ifp; 739289551Szbb int processed_cqe, work_done = 0, tx_done = 0; 740289551Szbb int cqe_count, cqe_head; 741289551Szbb struct queue_set *qs = nic->qs; 742289551Szbb struct cmp_queue *cq = &qs->cq[cq_idx]; 743296031Szbb struct rcv_queue *rq; 744289551Szbb struct cqe_rx_t *cq_desc; 745296031Szbb struct lro_ctrl *lro; 746296031Szbb struct lro_entry *queued; 747296031Szbb int rq_idx; 748289551Szbb int cmp_err; 749289551Szbb 750289551Szbb NICVF_CMP_LOCK(cq); 751289551Szbb cmp_err = 0; 752289551Szbb processed_cqe = 0; 753289551Szbb /* Get no of valid CQ entries to process */ 754289551Szbb cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); 755289551Szbb cqe_count &= CQ_CQE_COUNT; 756289551Szbb if (cqe_count == 0) 757289551Szbb goto out; 758289551Szbb 759289551Szbb /* Get head of the valid CQ entries */ 760289551Szbb cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 761289551Szbb cqe_head &= 0xFFFF; 762289551Szbb 763289551Szbb dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", 764289551Szbb __func__, cq_idx, cqe_count, cqe_head); 765289551Szbb while (processed_cqe < cqe_count) { 766289551Szbb /* Get the CQ descriptor */ 767289551Szbb cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 768289551Szbb cqe_head++; 769289551Szbb cqe_head &= (cq->dmem.q_len - 1); 770296032Szbb /* Prefetch next CQ descriptor */ 771296032Szbb __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); 772289551Szbb 773289551Szbb dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, 774289551Szbb cq_desc->cqe_type); 775289551Szbb switch (cq_desc->cqe_type) { 776289551Szbb case CQE_TYPE_RX: 777289551Szbb cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, 778289551Szbb CQE_TYPE_RX); 779289551Szbb if (__predict_false(cmp_err != 0)) { 780289551Szbb /* 781289551Szbb * Ups. Cannot finish now. 782289551Szbb * Let's try again later. 783289551Szbb */ 784289551Szbb goto done; 785289551Szbb } 786289551Szbb work_done++; 787289551Szbb break; 788289551Szbb case CQE_TYPE_SEND: 789289551Szbb cmp_err = nicvf_snd_pkt_handler(nic, cq, 790289551Szbb (void *)cq_desc, CQE_TYPE_SEND); 791289551Szbb if (__predict_false(cmp_err != 0)) { 792289551Szbb /* 793289551Szbb * Ups. Cannot finish now. 794289551Szbb * Let's try again later. 795289551Szbb */ 796289551Szbb goto done; 797289551Szbb } 798289551Szbb 799289551Szbb tx_done++; 800289551Szbb break; 801289551Szbb case CQE_TYPE_INVALID: 802289551Szbb case CQE_TYPE_RX_SPLIT: 803289551Szbb case CQE_TYPE_RX_TCP: 804289551Szbb case CQE_TYPE_SEND_PTP: 805289551Szbb /* Ignore for now */ 806289551Szbb break; 807289551Szbb } 808289551Szbb processed_cqe++; 809289551Szbb } 810289551Szbbdone: 811289551Szbb dprintf(nic->dev, 812289551Szbb "%s CQ%d processed_cqe %d work_done %d\n", 813289551Szbb __func__, cq_idx, processed_cqe, work_done); 814289551Szbb 815289551Szbb /* Ring doorbell to inform H/W to reuse processed CQEs */ 816289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); 817289551Szbb 818289551Szbb if ((tx_done > 0) && 819289551Szbb ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { 820289551Szbb /* Reenable TXQ if its stopped earlier due to SQ full */ 821289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 822289551Szbb } 823289551Szbbout: 824296031Szbb /* 825296031Szbb * Flush any outstanding LRO work 826296031Szbb */ 827296031Szbb rq_idx = cq_idx; 828296031Szbb rq = &nic->qs->rq[rq_idx]; 829296031Szbb lro = &rq->lro; 830296031Szbb while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { 831296031Szbb SLIST_REMOVE_HEAD(&lro->lro_active, next); 832296031Szbb tcp_lro_flush(lro, queued); 833296031Szbb } 834296031Szbb 835289551Szbb NICVF_CMP_UNLOCK(cq); 836289551Szbb 837289551Szbb ifp = nic->ifp; 838289551Szbb /* Push received MBUFs to the stack */ 839289551Szbb while (!buf_ring_empty(cq->rx_br)) { 840289551Szbb mbuf = buf_ring_dequeue_mc(cq->rx_br); 841289551Szbb if (__predict_true(mbuf != NULL)) 842289551Szbb (*ifp->if_input)(ifp, mbuf); 843289551Szbb } 844289551Szbb 845289551Szbb return (cmp_err); 846289551Szbb} 847289551Szbb 848289551Szbb/* 849289551Szbb * Qset error interrupt handler 850289551Szbb * 851289551Szbb * As of now only CQ errors are handled 852289551Szbb */ 853289551Szbbstatic void 854289551Szbbnicvf_qs_err_task(void *arg, int pending) 855289551Szbb{ 856289551Szbb struct nicvf *nic; 857289551Szbb struct queue_set *qs; 858289551Szbb int qidx; 859289551Szbb uint64_t status; 860289551Szbb boolean_t enable = TRUE; 861289551Szbb 862289551Szbb nic = (struct nicvf *)arg; 863289551Szbb qs = nic->qs; 864289551Szbb 865289551Szbb /* Deactivate network interface */ 866289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 867289551Szbb 868289551Szbb /* Check if it is CQ err */ 869289551Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 870289551Szbb status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, 871289551Szbb qidx); 872289551Szbb if ((status & CQ_ERR_MASK) == 0) 873289551Szbb continue; 874289551Szbb /* Process already queued CQEs and reconfig CQ */ 875289551Szbb nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 876289551Szbb nicvf_sq_disable(nic, qidx); 877289551Szbb (void)nicvf_cq_intr_handler(nic, qidx); 878289551Szbb nicvf_cmp_queue_config(nic, qs, qidx, enable); 879289551Szbb nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); 880289551Szbb nicvf_sq_enable(nic, &qs->sq[qidx], qidx); 881289551Szbb nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 882289551Szbb } 883289551Szbb 884289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 885289551Szbb /* Re-enable Qset error interrupt */ 886289551Szbb nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 887289551Szbb} 888289551Szbb 889289551Szbbstatic void 890289551Szbbnicvf_cmp_task(void *arg, int pending) 891289551Szbb{ 892289551Szbb struct cmp_queue *cq; 893289551Szbb struct nicvf *nic; 894289551Szbb int cmp_err; 895289551Szbb 896289551Szbb cq = (struct cmp_queue *)arg; 897289551Szbb nic = cq->nic; 898289551Szbb 899289551Szbb /* Handle CQ descriptors */ 900289551Szbb cmp_err = nicvf_cq_intr_handler(nic, cq->idx); 901289551Szbb if (__predict_false(cmp_err != 0)) { 902289551Szbb /* 903289551Szbb * Schedule another thread here since we did not 904289551Szbb * process the entire CQ due to Tx or Rx CQ parse error. 905289551Szbb */ 906289551Szbb taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); 907289551Szbb 908289551Szbb } 909289551Szbb 910296601Szbb nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 911289551Szbb /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ 912289551Szbb nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); 913289551Szbb 914289551Szbb} 915289551Szbb 916289550Szbb/* Initialize completion queue */ 917289551Szbbstatic int 918289551Szbbnicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, 919289551Szbb int qidx) 920289550Szbb{ 921289550Szbb int err; 922289550Szbb 923289551Szbb /* Initizalize lock */ 924289551Szbb snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", 925289551Szbb device_get_nameunit(nic->dev), qidx); 926289551Szbb mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); 927289551Szbb 928289550Szbb err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 929289550Szbb NICVF_CQ_BASE_ALIGN_BYTES); 930289550Szbb 931289551Szbb if (err != 0) { 932289551Szbb device_printf(nic->dev, 933289551Szbb "Could not allocate DMA memory for CQ\n"); 934289551Szbb return (err); 935289551Szbb } 936289551Szbb 937289550Szbb cq->desc = cq->dmem.base; 938296038Szbb cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH; 939289551Szbb cq->nic = nic; 940289551Szbb cq->idx = qidx; 941289550Szbb nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 942289550Szbb 943289551Szbb cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, 944289551Szbb &cq->mtx); 945289551Szbb 946289551Szbb /* Allocate taskqueue */ 947289551Szbb TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); 948289551Szbb cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, 949289551Szbb taskqueue_thread_enqueue, &cq->cmp_taskq); 950289551Szbb taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", 951289551Szbb device_get_nameunit(nic->dev), qidx); 952289551Szbb 953289551Szbb return (0); 954289550Szbb} 955289550Szbb 956289551Szbbstatic void 957289551Szbbnicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 958289550Szbb{ 959289551Szbb 960289551Szbb if (cq == NULL) 961289550Szbb return; 962289551Szbb /* 963289551Szbb * The completion queue itself should be disabled by now 964289551Szbb * (ref. nicvf_snd_queue_config()). 965289551Szbb * Ensure that it is safe to disable it or panic. 966289551Szbb */ 967289551Szbb if (cq->enable) 968289551Szbb panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); 969289550Szbb 970289551Szbb if (cq->cmp_taskq != NULL) { 971289551Szbb /* Remove task */ 972289551Szbb while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) 973289551Szbb taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); 974289551Szbb 975289551Szbb taskqueue_free(cq->cmp_taskq); 976289551Szbb cq->cmp_taskq = NULL; 977289551Szbb } 978289551Szbb /* 979289551Szbb * Completion interrupt will possibly enable interrupts again 980289551Szbb * so disable interrupting now after we finished processing 981289551Szbb * completion task. It is safe to do so since the corresponding CQ 982289551Szbb * was already disabled. 983289551Szbb */ 984289551Szbb nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); 985289551Szbb nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 986289551Szbb 987289551Szbb NICVF_CMP_LOCK(cq); 988289550Szbb nicvf_free_q_desc_mem(nic, &cq->dmem); 989289551Szbb drbr_free(cq->rx_br, M_DEVBUF); 990289551Szbb NICVF_CMP_UNLOCK(cq); 991289551Szbb mtx_destroy(&cq->mtx); 992289551Szbb memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); 993289550Szbb} 994289550Szbb 995289551Szbbstatic void 996289551Szbbnicvf_snd_task(void *arg, int pending) 997289551Szbb{ 998289551Szbb struct snd_queue *sq = (struct snd_queue *)arg; 999289551Szbb struct mbuf *mbuf; 1000289551Szbb 1001289551Szbb NICVF_TX_LOCK(sq); 1002289551Szbb while (1) { 1003289551Szbb mbuf = drbr_dequeue(NULL, sq->br); 1004289551Szbb if (mbuf == NULL) 1005289551Szbb break; 1006289551Szbb 1007289551Szbb if (nicvf_tx_mbuf_locked(sq, mbuf) != 0) { 1008289551Szbb /* XXX ARM64TODO: Increase Tx drop counter */ 1009289551Szbb m_freem(mbuf); 1010289551Szbb break; 1011289551Szbb } 1012289551Szbb } 1013289551Szbb NICVF_TX_UNLOCK(sq); 1014289551Szbb} 1015289551Szbb 1016289550Szbb/* Initialize transmit queue */ 1017289551Szbbstatic int 1018289551Szbbnicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, 1019289551Szbb int qidx) 1020289550Szbb{ 1021289551Szbb size_t i; 1022289550Szbb int err; 1023289550Szbb 1024289551Szbb /* Initizalize TX lock for this queue */ 1025289551Szbb snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", 1026289551Szbb device_get_nameunit(nic->dev), qidx); 1027289551Szbb mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); 1028289551Szbb 1029289551Szbb NICVF_TX_LOCK(sq); 1030289551Szbb /* Allocate buffer ring */ 1031289551Szbb sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, 1032289551Szbb M_NOWAIT, &sq->mtx); 1033289551Szbb if (sq->br == NULL) { 1034289551Szbb device_printf(nic->dev, 1035289551Szbb "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); 1036289551Szbb err = ENOMEM; 1037289551Szbb goto error; 1038289551Szbb } 1039289551Szbb 1040289551Szbb /* Allocate DMA memory for Tx descriptors */ 1041289550Szbb err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 1042289550Szbb NICVF_SQ_BASE_ALIGN_BYTES); 1043289551Szbb if (err != 0) { 1044289551Szbb device_printf(nic->dev, 1045289551Szbb "Could not allocate DMA memory for SQ\n"); 1046289551Szbb goto error; 1047289551Szbb } 1048289550Szbb 1049289550Szbb sq->desc = sq->dmem.base; 1050289551Szbb sq->head = sq->tail = 0; 1051289551Szbb atomic_store_rel_int(&sq->free_cnt, q_len - 1); 1052289550Szbb sq->thresh = SND_QUEUE_THRESH; 1053289551Szbb sq->idx = qidx; 1054289551Szbb sq->nic = nic; 1055289550Szbb 1056289551Szbb /* 1057289551Szbb * Allocate DMA maps for Tx buffers 1058289551Szbb */ 1059289550Szbb 1060289551Szbb /* Create DMA tag first */ 1061289551Szbb err = bus_dma_tag_create( 1062289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 1063289551Szbb 1, /* alignment */ 1064289551Szbb 0, /* boundary */ 1065289551Szbb BUS_SPACE_MAXADDR, /* lowaddr */ 1066289551Szbb BUS_SPACE_MAXADDR, /* highaddr */ 1067289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 1068296039Szbb NICVF_TSO_MAXSIZE, /* maxsize */ 1069296039Szbb NICVF_TSO_NSEGS, /* nsegments */ 1070289551Szbb MCLBYTES, /* maxsegsize */ 1071289551Szbb 0, /* flags */ 1072289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 1073289551Szbb &sq->snd_buff_dmat); /* dmat */ 1074289551Szbb 1075289551Szbb if (err != 0) { 1076289551Szbb device_printf(nic->dev, 1077289551Szbb "Failed to create busdma tag for Tx buffers\n"); 1078289551Szbb goto error; 1079289551Szbb } 1080289551Szbb 1081289551Szbb /* Allocate send buffers array */ 1082289551Szbb sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, 1083289551Szbb (M_NOWAIT | M_ZERO)); 1084289551Szbb if (sq->snd_buff == NULL) { 1085289551Szbb device_printf(nic->dev, 1086289551Szbb "Could not allocate memory for Tx buffers array\n"); 1087289551Szbb err = ENOMEM; 1088289551Szbb goto error; 1089289551Szbb } 1090289551Szbb 1091289551Szbb /* Now populate maps */ 1092289551Szbb for (i = 0; i < q_len; i++) { 1093289551Szbb err = bus_dmamap_create(sq->snd_buff_dmat, 0, 1094289551Szbb &sq->snd_buff[i].dmap); 1095289551Szbb if (err != 0) { 1096289551Szbb device_printf(nic->dev, 1097289551Szbb "Failed to create DMA maps for Tx buffers\n"); 1098289551Szbb goto error; 1099289551Szbb } 1100289551Szbb } 1101289551Szbb NICVF_TX_UNLOCK(sq); 1102289551Szbb 1103289551Szbb /* Allocate taskqueue */ 1104289551Szbb TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); 1105289551Szbb sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, 1106289551Szbb taskqueue_thread_enqueue, &sq->snd_taskq); 1107289551Szbb taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", 1108289551Szbb device_get_nameunit(nic->dev), qidx); 1109289551Szbb 1110289551Szbb return (0); 1111289551Szbberror: 1112289551Szbb NICVF_TX_UNLOCK(sq); 1113289551Szbb return (err); 1114289550Szbb} 1115289550Szbb 1116289551Szbbstatic void 1117289551Szbbnicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 1118289550Szbb{ 1119289551Szbb struct queue_set *qs = nic->qs; 1120289551Szbb size_t i; 1121289551Szbb int err; 1122289551Szbb 1123289551Szbb if (sq == NULL) 1124289550Szbb return; 1125289550Szbb 1126289551Szbb if (sq->snd_taskq != NULL) { 1127289551Szbb /* Remove task */ 1128289551Szbb while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) 1129289551Szbb taskqueue_drain(sq->snd_taskq, &sq->snd_task); 1130289550Szbb 1131289551Szbb taskqueue_free(sq->snd_taskq); 1132289551Szbb sq->snd_taskq = NULL; 1133289551Szbb } 1134289551Szbb 1135289551Szbb NICVF_TX_LOCK(sq); 1136289551Szbb if (sq->snd_buff_dmat != NULL) { 1137289551Szbb if (sq->snd_buff != NULL) { 1138289551Szbb for (i = 0; i < qs->sq_len; i++) { 1139289551Szbb m_freem(sq->snd_buff[i].mbuf); 1140289551Szbb sq->snd_buff[i].mbuf = NULL; 1141289551Szbb 1142289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, 1143289551Szbb sq->snd_buff[i].dmap); 1144289551Szbb err = bus_dmamap_destroy(sq->snd_buff_dmat, 1145289551Szbb sq->snd_buff[i].dmap); 1146289551Szbb /* 1147289551Szbb * If bus_dmamap_destroy fails it can cause 1148289551Szbb * random panic later if the tag is also 1149289551Szbb * destroyed in the process. 1150289551Szbb */ 1151289551Szbb KASSERT(err == 0, 1152289551Szbb ("%s: Could not destroy DMA map for SQ", 1153289551Szbb __func__)); 1154289551Szbb } 1155289551Szbb } 1156289551Szbb 1157289551Szbb free(sq->snd_buff, M_NICVF); 1158289551Szbb 1159289551Szbb err = bus_dma_tag_destroy(sq->snd_buff_dmat); 1160289551Szbb KASSERT(err == 0, 1161289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 1162289551Szbb } 1163289551Szbb 1164289551Szbb /* Free private driver ring for this send queue */ 1165289551Szbb if (sq->br != NULL) 1166289551Szbb drbr_free(sq->br, M_DEVBUF); 1167289551Szbb 1168289551Szbb if (sq->dmem.base != NULL) 1169289551Szbb nicvf_free_q_desc_mem(nic, &sq->dmem); 1170289551Szbb 1171289551Szbb NICVF_TX_UNLOCK(sq); 1172289551Szbb /* Destroy Tx lock */ 1173289551Szbb mtx_destroy(&sq->mtx); 1174289551Szbb memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); 1175289550Szbb} 1176289550Szbb 1177289551Szbbstatic void 1178289551Szbbnicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1179289550Szbb{ 1180289551Szbb 1181289550Szbb /* Disable send queue */ 1182289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 1183289550Szbb /* Check if SQ is stopped */ 1184289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 1185289550Szbb return; 1186289550Szbb /* Reset send queue */ 1187289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1188289550Szbb} 1189289550Szbb 1190289551Szbbstatic void 1191289551Szbbnicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1192289550Szbb{ 1193289550Szbb union nic_mbx mbx = {}; 1194289550Szbb 1195289550Szbb /* Make sure all packets in the pipeline are written back into mem */ 1196289550Szbb mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 1197289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1198289550Szbb} 1199289550Szbb 1200289551Szbbstatic void 1201289551Szbbnicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1202289550Szbb{ 1203289551Szbb 1204289550Szbb /* Disable timer threshold (doesn't get reset upon CQ reset */ 1205289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 1206289550Szbb /* Disable completion queue */ 1207289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 1208289550Szbb /* Reset completion queue */ 1209289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1210289550Szbb} 1211289550Szbb 1212289551Szbbstatic void 1213289551Szbbnicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) 1214289550Szbb{ 1215289551Szbb uint64_t tmp, fifo_state; 1216289550Szbb int timeout = 10; 1217289550Szbb 1218289550Szbb /* Save head and tail pointers for feeing up buffers */ 1219289551Szbb rbdr->head = 1220289551Szbb nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; 1221289551Szbb rbdr->tail = 1222289551Szbb nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; 1223289550Szbb 1224289551Szbb /* 1225289551Szbb * If RBDR FIFO is in 'FAIL' state then do a reset first 1226289550Szbb * before relaiming. 1227289550Szbb */ 1228289550Szbb fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 1229289551Szbb if (((fifo_state >> 62) & 0x03) == 0x3) { 1230289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 1231289551Szbb qidx, NICVF_RBDR_RESET); 1232289551Szbb } 1233289550Szbb 1234289550Szbb /* Disable RBDR */ 1235289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 1236289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1237289550Szbb return; 1238289550Szbb while (1) { 1239289550Szbb tmp = nicvf_queue_reg_read(nic, 1240289551Szbb NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); 1241289550Szbb if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 1242289550Szbb break; 1243289551Szbb 1244289551Szbb DELAY(1000); 1245289550Szbb timeout--; 1246289550Szbb if (!timeout) { 1247289551Szbb device_printf(nic->dev, 1248289551Szbb "Failed polling on prefetch status\n"); 1249289550Szbb return; 1250289550Szbb } 1251289550Szbb } 1252289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1253289551Szbb NICVF_RBDR_RESET); 1254289550Szbb 1255289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 1256289550Szbb return; 1257289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 1258289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1259289550Szbb return; 1260289550Szbb} 1261289550Szbb 1262289550Szbb/* Configures receive queue */ 1263289551Szbbstatic void 1264289551Szbbnicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 1265289551Szbb int qidx, bool enable) 1266289550Szbb{ 1267289550Szbb union nic_mbx mbx = {}; 1268289550Szbb struct rcv_queue *rq; 1269289550Szbb struct rq_cfg rq_cfg; 1270296031Szbb struct ifnet *ifp; 1271296031Szbb struct lro_ctrl *lro; 1272289550Szbb 1273296031Szbb ifp = nic->ifp; 1274296031Szbb 1275289550Szbb rq = &qs->rq[qidx]; 1276289550Szbb rq->enable = enable; 1277289550Szbb 1278296031Szbb lro = &rq->lro; 1279296031Szbb 1280289550Szbb /* Disable receive queue */ 1281289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 1282289550Szbb 1283289550Szbb if (!rq->enable) { 1284289550Szbb nicvf_reclaim_rcv_queue(nic, qs, qidx); 1285296031Szbb /* Free LRO memory */ 1286296031Szbb tcp_lro_free(lro); 1287296031Szbb rq->lro_enabled = FALSE; 1288289550Szbb return; 1289289550Szbb } 1290289550Szbb 1291296031Szbb /* Configure LRO if enabled */ 1292296031Szbb rq->lro_enabled = FALSE; 1293296031Szbb if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) { 1294296031Szbb if (tcp_lro_init(lro) != 0) { 1295296031Szbb device_printf(nic->dev, 1296296031Szbb "Failed to initialize LRO for RXQ%d\n", qidx); 1297296031Szbb } else { 1298296031Szbb rq->lro_enabled = TRUE; 1299296031Szbb lro->ifp = nic->ifp; 1300296031Szbb } 1301296031Szbb } 1302296031Szbb 1303289550Szbb rq->cq_qs = qs->vnic_id; 1304289550Szbb rq->cq_idx = qidx; 1305289550Szbb rq->start_rbdr_qs = qs->vnic_id; 1306289550Szbb rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 1307289550Szbb rq->cont_rbdr_qs = qs->vnic_id; 1308289550Szbb rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 1309289550Szbb /* all writes of RBDR data to be loaded into L2 Cache as well*/ 1310289550Szbb rq->caching = 1; 1311289550Szbb 1312289550Szbb /* Send a mailbox msg to PF to config RQ */ 1313289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 1314289550Szbb mbx.rq.qs_num = qs->vnic_id; 1315289550Szbb mbx.rq.rq_num = qidx; 1316289550Szbb mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 1317289551Szbb (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 1318289551Szbb (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | 1319289551Szbb (rq->start_qs_rbdr_idx); 1320289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1321289550Szbb 1322289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 1323289551Szbb mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); 1324289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1325289550Szbb 1326289551Szbb /* 1327289551Szbb * RQ drop config 1328289550Szbb * Enable CQ drop to reserve sufficient CQEs for all tx packets 1329289550Szbb */ 1330289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 1331289551Szbb mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); 1332289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1333289550Szbb 1334289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 1335289550Szbb 1336289550Szbb /* Enable Receive queue */ 1337289550Szbb rq_cfg.ena = 1; 1338289550Szbb rq_cfg.tcp_ena = 0; 1339289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 1340289551Szbb *(uint64_t *)&rq_cfg); 1341289550Szbb} 1342289550Szbb 1343289550Szbb/* Configures completion queue */ 1344289551Szbbstatic void 1345289551Szbbnicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 1346289551Szbb int qidx, boolean_t enable) 1347289550Szbb{ 1348289550Szbb struct cmp_queue *cq; 1349289550Szbb struct cq_cfg cq_cfg; 1350289550Szbb 1351289550Szbb cq = &qs->cq[qidx]; 1352289550Szbb cq->enable = enable; 1353289550Szbb 1354289550Szbb if (!cq->enable) { 1355289550Szbb nicvf_reclaim_cmp_queue(nic, qs, qidx); 1356289550Szbb return; 1357289550Szbb } 1358289550Szbb 1359289550Szbb /* Reset completion queue */ 1360289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1361289550Szbb 1362289550Szbb /* Set completion queue base address */ 1363289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, 1364289551Szbb (uint64_t)(cq->dmem.phys_base)); 1365289550Szbb 1366289550Szbb /* Enable Completion queue */ 1367289550Szbb cq_cfg.ena = 1; 1368289550Szbb cq_cfg.reset = 0; 1369289550Szbb cq_cfg.caching = 0; 1370289550Szbb cq_cfg.qsize = CMP_QSIZE; 1371289550Szbb cq_cfg.avg_con = 0; 1372289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); 1373289550Szbb 1374289550Szbb /* Set threshold value for interrupt generation */ 1375289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 1376289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 1377289551Szbb nic->cq_coalesce_usecs); 1378289550Szbb} 1379289550Szbb 1380289550Szbb/* Configures transmit queue */ 1381289551Szbbstatic void 1382289551Szbbnicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1383289551Szbb boolean_t enable) 1384289550Szbb{ 1385289550Szbb union nic_mbx mbx = {}; 1386289550Szbb struct snd_queue *sq; 1387289550Szbb struct sq_cfg sq_cfg; 1388289550Szbb 1389289550Szbb sq = &qs->sq[qidx]; 1390289550Szbb sq->enable = enable; 1391289550Szbb 1392289550Szbb if (!sq->enable) { 1393289550Szbb nicvf_reclaim_snd_queue(nic, qs, qidx); 1394289550Szbb return; 1395289550Szbb } 1396289550Szbb 1397289550Szbb /* Reset send queue */ 1398289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1399289550Szbb 1400289550Szbb sq->cq_qs = qs->vnic_id; 1401289550Szbb sq->cq_idx = qidx; 1402289550Szbb 1403289550Szbb /* Send a mailbox msg to PF to config SQ */ 1404289550Szbb mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 1405289550Szbb mbx.sq.qs_num = qs->vnic_id; 1406289550Szbb mbx.sq.sq_num = qidx; 1407289550Szbb mbx.sq.sqs_mode = nic->sqs_mode; 1408289550Szbb mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 1409289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1410289550Szbb 1411289550Szbb /* Set queue base address */ 1412289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, 1413289551Szbb (uint64_t)(sq->dmem.phys_base)); 1414289550Szbb 1415289550Szbb /* Enable send queue & set queue size */ 1416289550Szbb sq_cfg.ena = 1; 1417289550Szbb sq_cfg.reset = 0; 1418289550Szbb sq_cfg.ldwb = 0; 1419289550Szbb sq_cfg.qsize = SND_QSIZE; 1420289550Szbb sq_cfg.tstmp_bgx_intf = 0; 1421289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); 1422289550Szbb 1423289550Szbb /* Set threshold value for interrupt generation */ 1424289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 1425289550Szbb} 1426289550Szbb 1427289550Szbb/* Configures receive buffer descriptor ring */ 1428289551Szbbstatic void 1429289551Szbbnicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1430289551Szbb boolean_t enable) 1431289550Szbb{ 1432289550Szbb struct rbdr *rbdr; 1433289550Szbb struct rbdr_cfg rbdr_cfg; 1434289550Szbb 1435289550Szbb rbdr = &qs->rbdr[qidx]; 1436289550Szbb nicvf_reclaim_rbdr(nic, rbdr, qidx); 1437289550Szbb if (!enable) 1438289550Szbb return; 1439289550Szbb 1440289550Szbb /* Set descriptor base address */ 1441289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, 1442289551Szbb (uint64_t)(rbdr->dmem.phys_base)); 1443289550Szbb 1444289550Szbb /* Enable RBDR & set queue size */ 1445289550Szbb /* Buffer size should be in multiples of 128 bytes */ 1446289550Szbb rbdr_cfg.ena = 1; 1447289550Szbb rbdr_cfg.reset = 0; 1448289550Szbb rbdr_cfg.ldwb = 0; 1449289550Szbb rbdr_cfg.qsize = RBDR_SIZE; 1450289550Szbb rbdr_cfg.avg_con = 0; 1451289550Szbb rbdr_cfg.lines = rbdr->dma_size / 128; 1452289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1453289551Szbb *(uint64_t *)&rbdr_cfg); 1454289550Szbb 1455289550Szbb /* Notify HW */ 1456289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, 1457289551Szbb qs->rbdr_len - 1); 1458289550Szbb 1459289550Szbb /* Set threshold value for interrupt generation */ 1460289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, 1461289551Szbb rbdr->thresh - 1); 1462289550Szbb} 1463289550Szbb 1464289550Szbb/* Requests PF to assign and enable Qset */ 1465289551Szbbvoid 1466289551Szbbnicvf_qset_config(struct nicvf *nic, boolean_t enable) 1467289550Szbb{ 1468289550Szbb union nic_mbx mbx = {}; 1469289551Szbb struct queue_set *qs; 1470289550Szbb struct qs_cfg *qs_cfg; 1471289550Szbb 1472289551Szbb qs = nic->qs; 1473289551Szbb if (qs == NULL) { 1474289551Szbb device_printf(nic->dev, 1475289551Szbb "Qset is still not allocated, don't init queues\n"); 1476289550Szbb return; 1477289550Szbb } 1478289550Szbb 1479289550Szbb qs->enable = enable; 1480289550Szbb qs->vnic_id = nic->vf_id; 1481289550Szbb 1482289550Szbb /* Send a mailbox msg to PF to config Qset */ 1483289550Szbb mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 1484289550Szbb mbx.qs.num = qs->vnic_id; 1485289550Szbb 1486289550Szbb mbx.qs.cfg = 0; 1487289550Szbb qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 1488289550Szbb if (qs->enable) { 1489289550Szbb qs_cfg->ena = 1; 1490289550Szbb qs_cfg->vnic = qs->vnic_id; 1491289550Szbb } 1492289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1493289550Szbb} 1494289550Szbb 1495289551Szbbstatic void 1496289551Szbbnicvf_free_resources(struct nicvf *nic) 1497289550Szbb{ 1498289550Szbb int qidx; 1499289551Szbb struct queue_set *qs; 1500289550Szbb 1501289551Szbb qs = nic->qs; 1502289551Szbb /* 1503289551Szbb * Remove QS error task first since it has to be dead 1504289551Szbb * to safely free completion queue tasks. 1505289551Szbb */ 1506289551Szbb if (qs->qs_err_taskq != NULL) { 1507289551Szbb /* Shut down QS error tasks */ 1508289551Szbb while (taskqueue_cancel(qs->qs_err_taskq, 1509289551Szbb &qs->qs_err_task, NULL) != 0) { 1510289551Szbb taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); 1511289551Szbb 1512289551Szbb } 1513289551Szbb taskqueue_free(qs->qs_err_taskq); 1514289551Szbb qs->qs_err_taskq = NULL; 1515289551Szbb } 1516289550Szbb /* Free receive buffer descriptor ring */ 1517289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1518289550Szbb nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 1519289550Szbb 1520289550Szbb /* Free completion queue */ 1521289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1522289550Szbb nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 1523289550Szbb 1524289550Szbb /* Free send queue */ 1525289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1526289550Szbb nicvf_free_snd_queue(nic, &qs->sq[qidx]); 1527289550Szbb} 1528289550Szbb 1529289551Szbbstatic int 1530289551Szbbnicvf_alloc_resources(struct nicvf *nic) 1531289550Szbb{ 1532289551Szbb struct queue_set *qs = nic->qs; 1533289550Szbb int qidx; 1534289550Szbb 1535289550Szbb /* Alloc receive buffer descriptor ring */ 1536289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 1537289550Szbb if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 1538289551Szbb DMA_BUFFER_LEN, qidx)) 1539289550Szbb goto alloc_fail; 1540289550Szbb } 1541289550Szbb 1542289550Szbb /* Alloc send queue */ 1543289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 1544289551Szbb if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) 1545289550Szbb goto alloc_fail; 1546289550Szbb } 1547289550Szbb 1548289550Szbb /* Alloc completion queue */ 1549289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1550289551Szbb if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) 1551289550Szbb goto alloc_fail; 1552289550Szbb } 1553289550Szbb 1554289551Szbb /* Allocate QS error taskqueue */ 1555289551Szbb TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); 1556289551Szbb qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, 1557289551Szbb taskqueue_thread_enqueue, &qs->qs_err_taskq); 1558289551Szbb taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", 1559289551Szbb device_get_nameunit(nic->dev)); 1560289551Szbb 1561289551Szbb return (0); 1562289550Szbballoc_fail: 1563289550Szbb nicvf_free_resources(nic); 1564289551Szbb return (ENOMEM); 1565289550Szbb} 1566289550Szbb 1567289551Szbbint 1568289551Szbbnicvf_set_qset_resources(struct nicvf *nic) 1569289550Szbb{ 1570289550Szbb struct queue_set *qs; 1571289550Szbb 1572289551Szbb qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); 1573289550Szbb nic->qs = qs; 1574289550Szbb 1575289550Szbb /* Set count of each queue */ 1576289550Szbb qs->rbdr_cnt = RBDR_CNT; 1577289551Szbb /* With no RSS we stay with single RQ */ 1578289550Szbb qs->rq_cnt = 1; 1579289551Szbb 1580289550Szbb qs->sq_cnt = SND_QUEUE_CNT; 1581289550Szbb qs->cq_cnt = CMP_QUEUE_CNT; 1582289550Szbb 1583289550Szbb /* Set queue lengths */ 1584289550Szbb qs->rbdr_len = RCV_BUF_COUNT; 1585289550Szbb qs->sq_len = SND_QUEUE_LEN; 1586289550Szbb qs->cq_len = CMP_QUEUE_LEN; 1587289550Szbb 1588289550Szbb nic->rx_queues = qs->rq_cnt; 1589289550Szbb nic->tx_queues = qs->sq_cnt; 1590289550Szbb 1591289551Szbb return (0); 1592289550Szbb} 1593289550Szbb 1594289551Szbbint 1595289551Szbbnicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) 1596289550Szbb{ 1597289551Szbb boolean_t disable = FALSE; 1598289551Szbb struct queue_set *qs; 1599289550Szbb int qidx; 1600289550Szbb 1601289551Szbb qs = nic->qs; 1602289551Szbb if (qs == NULL) 1603289551Szbb return (0); 1604289550Szbb 1605289550Szbb if (enable) { 1606289551Szbb if (nicvf_alloc_resources(nic) != 0) 1607289551Szbb return (ENOMEM); 1608289550Szbb 1609289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1610289550Szbb nicvf_snd_queue_config(nic, qs, qidx, enable); 1611289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1612289550Szbb nicvf_cmp_queue_config(nic, qs, qidx, enable); 1613289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1614289550Szbb nicvf_rbdr_config(nic, qs, qidx, enable); 1615289550Szbb for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1616289550Szbb nicvf_rcv_queue_config(nic, qs, qidx, enable); 1617289550Szbb } else { 1618289550Szbb for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1619289550Szbb nicvf_rcv_queue_config(nic, qs, qidx, disable); 1620289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1621289550Szbb nicvf_rbdr_config(nic, qs, qidx, disable); 1622289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1623289550Szbb nicvf_snd_queue_config(nic, qs, qidx, disable); 1624289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1625289550Szbb nicvf_cmp_queue_config(nic, qs, qidx, disable); 1626289550Szbb 1627289550Szbb nicvf_free_resources(nic); 1628289550Szbb } 1629289550Szbb 1630289551Szbb return (0); 1631289550Szbb} 1632289550Szbb 1633289551Szbb/* 1634289551Szbb * Get a free desc from SQ 1635289550Szbb * returns descriptor ponter & descriptor number 1636289550Szbb */ 1637289551Szbbstatic __inline int 1638289551Szbbnicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 1639289550Szbb{ 1640289550Szbb int qentry; 1641289550Szbb 1642289550Szbb qentry = sq->tail; 1643289551Szbb atomic_subtract_int(&sq->free_cnt, desc_cnt); 1644289550Szbb sq->tail += desc_cnt; 1645289550Szbb sq->tail &= (sq->dmem.q_len - 1); 1646289550Szbb 1647289551Szbb return (qentry); 1648289550Szbb} 1649289550Szbb 1650289550Szbb/* Free descriptor back to SQ for future use */ 1651289551Szbbstatic void 1652289551Szbbnicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 1653289550Szbb{ 1654289551Szbb 1655289551Szbb atomic_add_int(&sq->free_cnt, desc_cnt); 1656289550Szbb sq->head += desc_cnt; 1657289550Szbb sq->head &= (sq->dmem.q_len - 1); 1658289550Szbb} 1659289550Szbb 1660289551Szbbstatic __inline int 1661289551Szbbnicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 1662289550Szbb{ 1663289550Szbb qentry++; 1664289550Szbb qentry &= (sq->dmem.q_len - 1); 1665289551Szbb return (qentry); 1666289550Szbb} 1667289550Szbb 1668289551Szbbstatic void 1669289551Szbbnicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 1670289550Szbb{ 1671289551Szbb uint64_t sq_cfg; 1672289550Szbb 1673289550Szbb sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1674289550Szbb sq_cfg |= NICVF_SQ_EN; 1675289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1676289550Szbb /* Ring doorbell so that H/W restarts processing SQEs */ 1677289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 1678289550Szbb} 1679289550Szbb 1680289551Szbbstatic void 1681289551Szbbnicvf_sq_disable(struct nicvf *nic, int qidx) 1682289550Szbb{ 1683289551Szbb uint64_t sq_cfg; 1684289550Szbb 1685289550Szbb sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1686289550Szbb sq_cfg &= ~NICVF_SQ_EN; 1687289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1688289550Szbb} 1689289550Szbb 1690289551Szbbstatic void 1691289551Szbbnicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) 1692289550Szbb{ 1693289551Szbb uint64_t head, tail; 1694289551Szbb struct snd_buff *snd_buff; 1695289550Szbb struct sq_hdr_subdesc *hdr; 1696289550Szbb 1697289551Szbb NICVF_TX_LOCK(sq); 1698289550Szbb head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 1699289550Szbb tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 1700289550Szbb while (sq->head != head) { 1701289550Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 1702289550Szbb if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 1703289550Szbb nicvf_put_sq_desc(sq, 1); 1704289550Szbb continue; 1705289550Szbb } 1706289551Szbb snd_buff = &sq->snd_buff[sq->head]; 1707289551Szbb if (snd_buff->mbuf != NULL) { 1708289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1709289551Szbb m_freem(snd_buff->mbuf); 1710289551Szbb sq->snd_buff[sq->head].mbuf = NULL; 1711289551Szbb } 1712289550Szbb nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 1713289550Szbb } 1714289551Szbb NICVF_TX_UNLOCK(sq); 1715289550Szbb} 1716289550Szbb 1717289551Szbb/* 1718289551Szbb * Add SQ HEADER subdescriptor. 1719289550Szbb * First subdescriptor for every send descriptor. 1720289550Szbb */ 1721296030Szbbstatic __inline int 1722289550Szbbnicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, 1723289551Szbb int subdesc_cnt, struct mbuf *mbuf, int len) 1724289550Szbb{ 1725296039Szbb struct nicvf *nic; 1726289550Szbb struct sq_hdr_subdesc *hdr; 1727296030Szbb struct ether_vlan_header *eh; 1728296030Szbb#ifdef INET 1729296030Szbb struct ip *ip; 1730296039Szbb struct tcphdr *th; 1731296030Szbb#endif 1732296030Szbb uint16_t etype; 1733296030Szbb int ehdrlen, iphlen, poff; 1734289550Szbb 1735296039Szbb nic = sq->nic; 1736296039Szbb 1737289550Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1738289551Szbb sq->snd_buff[qentry].mbuf = mbuf; 1739289550Szbb 1740289550Szbb memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1741289550Szbb hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1742289550Szbb /* Enable notification via CQE after processing SQE */ 1743289550Szbb hdr->post_cqe = 1; 1744289550Szbb /* No of subdescriptors following this */ 1745289550Szbb hdr->subdesc_cnt = subdesc_cnt; 1746289550Szbb hdr->tot_len = len; 1747289550Szbb 1748296039Szbb eh = mtod(mbuf, struct ether_vlan_header *); 1749296039Szbb if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1750296039Szbb ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1751296039Szbb etype = ntohs(eh->evl_proto); 1752296039Szbb } else { 1753296039Szbb ehdrlen = ETHER_HDR_LEN; 1754296039Szbb etype = ntohs(eh->evl_encap_proto); 1755296039Szbb } 1756296030Szbb 1757296039Szbb switch (etype) { 1758296039Szbb#ifdef INET6 1759296039Szbb case ETHERTYPE_IPV6: 1760296039Szbb /* ARM64TODO: Add support for IPv6 */ 1761296039Szbb hdr->csum_l3 = 0; 1762296039Szbb sq->snd_buff[qentry].mbuf = NULL; 1763296039Szbb return (ENXIO); 1764296039Szbb#endif 1765296039Szbb#ifdef INET 1766296039Szbb case ETHERTYPE_IP: 1767296030Szbb if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { 1768296030Szbb mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip)); 1769296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1770296030Szbb if (mbuf == NULL) 1771296030Szbb return (ENOBUFS); 1772296030Szbb } 1773296030Szbb 1774296039Szbb ip = (struct ip *)(mbuf->m_data + ehdrlen); 1775296039Szbb ip->ip_sum = 0; 1776296039Szbb iphlen = ip->ip_hl << 2; 1777296039Szbb poff = ehdrlen + iphlen; 1778296030Szbb 1779296039Szbb if (mbuf->m_pkthdr.csum_flags != 0) { 1780296039Szbb hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1781296030Szbb switch (ip->ip_p) { 1782296030Szbb case IPPROTO_TCP: 1783296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) 1784296030Szbb break; 1785296030Szbb 1786296030Szbb if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { 1787296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr)); 1788296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1789296030Szbb if (mbuf == NULL) 1790296030Szbb return (ENOBUFS); 1791296030Szbb } 1792296030Szbb hdr->csum_l4 = SEND_L4_CSUM_TCP; 1793296030Szbb break; 1794296030Szbb case IPPROTO_UDP: 1795296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) 1796296030Szbb break; 1797296030Szbb 1798296030Szbb if (mbuf->m_len < (poff + sizeof(struct udphdr))) { 1799296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr)); 1800296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1801296030Szbb if (mbuf == NULL) 1802296030Szbb return (ENOBUFS); 1803296030Szbb } 1804296030Szbb hdr->csum_l4 = SEND_L4_CSUM_UDP; 1805296030Szbb break; 1806296030Szbb case IPPROTO_SCTP: 1807296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) 1808296030Szbb break; 1809296030Szbb 1810296030Szbb if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { 1811296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr)); 1812296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1813296030Szbb if (mbuf == NULL) 1814296030Szbb return (ENOBUFS); 1815296030Szbb } 1816296030Szbb hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1817296030Szbb break; 1818296030Szbb default: 1819296030Szbb break; 1820296030Szbb } 1821296039Szbb hdr->l3_offset = ehdrlen; 1822296039Szbb hdr->l4_offset = ehdrlen + iphlen; 1823296030Szbb } 1824296030Szbb 1825296039Szbb if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) { 1826296039Szbb /* 1827296039Szbb * Extract ip again as m_data could have been modified. 1828296039Szbb */ 1829296039Szbb ip = (struct ip *)(mbuf->m_data + ehdrlen); 1830296039Szbb th = (struct tcphdr *)((caddr_t)ip + iphlen); 1831296039Szbb 1832296039Szbb hdr->tso = 1; 1833296039Szbb hdr->tso_start = ehdrlen + iphlen + (th->th_off * 4); 1834296039Szbb hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz; 1835296039Szbb hdr->inner_l3_offset = ehdrlen - 2; 1836296039Szbb nic->drv_stats.tx_tso++; 1837296039Szbb } 1838296039Szbb break; 1839296039Szbb#endif 1840296039Szbb default: 1841296030Szbb hdr->csum_l3 = 0; 1842296039Szbb } 1843296030Szbb 1844296030Szbb return (0); 1845289550Szbb} 1846289550Szbb 1847289551Szbb/* 1848289551Szbb * SQ GATHER subdescriptor 1849289550Szbb * Must follow HDR descriptor 1850289550Szbb */ 1851289550Szbbstatic inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1852289551Szbb int size, uint64_t data) 1853289550Szbb{ 1854289550Szbb struct sq_gather_subdesc *gather; 1855289550Szbb 1856289550Szbb qentry &= (sq->dmem.q_len - 1); 1857289550Szbb gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1858289550Szbb 1859289550Szbb memset(gather, 0, SND_QUEUE_DESC_SIZE); 1860289550Szbb gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1861289550Szbb gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1862289550Szbb gather->size = size; 1863289550Szbb gather->addr = data; 1864289550Szbb} 1865289550Szbb 1866289551Szbb/* Put an mbuf to a SQ for packet transfer. */ 1867296035Szbbint 1868289551Szbbnicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf) 1869289550Szbb{ 1870289551Szbb bus_dma_segment_t segs[256]; 1871296039Szbb struct nicvf *nic; 1872289551Szbb struct snd_buff *snd_buff; 1873289551Szbb size_t seg; 1874289551Szbb int nsegs, qentry; 1875296039Szbb int subdesc_cnt; 1876289551Szbb int err; 1877289550Szbb 1878289551Szbb NICVF_TX_LOCK_ASSERT(sq); 1879289551Szbb 1880289551Szbb if (sq->free_cnt == 0) 1881289551Szbb return (ENOBUFS); 1882289551Szbb 1883289551Szbb snd_buff = &sq->snd_buff[sq->tail]; 1884289551Szbb 1885289551Szbb err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, 1886289551Szbb mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1887289551Szbb if (err != 0) { 1888289551Szbb /* ARM64TODO: Add mbuf defragmenting if we lack maps */ 1889289551Szbb return (err); 1890289550Szbb } 1891289550Szbb 1892289551Szbb /* Set how many subdescriptors is required */ 1893296039Szbb nic = sq->nic; 1894296039Szbb if (mbuf->m_pkthdr.tso_segsz != 0 && nic->hw_tso) 1895296039Szbb subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 1896296039Szbb else 1897296039Szbb subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1; 1898289550Szbb 1899289551Szbb if (subdesc_cnt > sq->free_cnt) { 1900289551Szbb /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ 1901289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1902289551Szbb return (ENOBUFS); 1903289551Szbb } 1904289550Szbb 1905289550Szbb qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1906289550Szbb 1907289550Szbb /* Add SQ header subdesc */ 1908296030Szbb err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, mbuf, 1909289551Szbb mbuf->m_pkthdr.len); 1910296030Szbb if (err != 0) { 1911296030Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1912296030Szbb return (err); 1913296030Szbb } 1914289550Szbb 1915289550Szbb /* Add SQ gather subdescs */ 1916289551Szbb for (seg = 0; seg < nsegs; seg++) { 1917289550Szbb qentry = nicvf_get_nxt_sqentry(sq, qentry); 1918289551Szbb nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, 1919289551Szbb segs[seg].ds_addr); 1920289550Szbb } 1921289550Szbb 1922289550Szbb /* make sure all memory stores are done before ringing doorbell */ 1923289551Szbb bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); 1924289550Szbb 1925289551Szbb dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", 1926289551Szbb __func__, sq->idx, subdesc_cnt); 1927289550Szbb /* Inform HW to xmit new packet */ 1928289551Szbb nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, 1929289551Szbb sq->idx, subdesc_cnt); 1930289551Szbb return (0); 1931289550Szbb} 1932289550Szbb 1933289551Szbbstatic __inline u_int 1934289551Szbbfrag_num(u_int i) 1935289550Szbb{ 1936289551Szbb#if BYTE_ORDER == BIG_ENDIAN 1937289551Szbb return ((i & ~3) + 3 - (i & 3)); 1938289550Szbb#else 1939289551Szbb return (i); 1940289550Szbb#endif 1941289550Szbb} 1942289550Szbb 1943289551Szbb/* Returns MBUF for a received packet */ 1944289551Szbbstruct mbuf * 1945289551Szbbnicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1946289550Szbb{ 1947289550Szbb int frag; 1948289550Szbb int payload_len = 0; 1949289551Szbb struct mbuf *mbuf; 1950289551Szbb struct mbuf *mbuf_frag; 1951289551Szbb uint16_t *rb_lens = NULL; 1952289551Szbb uint64_t *rb_ptrs = NULL; 1953289550Szbb 1954289551Szbb mbuf = NULL; 1955289551Szbb rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); 1956289551Szbb rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); 1957289550Szbb 1958289551Szbb dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", 1959289551Szbb __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1960289550Szbb 1961289550Szbb for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1962289550Szbb payload_len = rb_lens[frag_num(frag)]; 1963289551Szbb if (frag == 0) { 1964289550Szbb /* First fragment */ 1965289551Szbb mbuf = nicvf_rb_ptr_to_mbuf(nic, 1966289551Szbb (*rb_ptrs - cqe_rx->align_pad)); 1967289551Szbb mbuf->m_len = payload_len; 1968289551Szbb mbuf->m_data += cqe_rx->align_pad; 1969289551Szbb if_setrcvif(mbuf, nic->ifp); 1970289550Szbb } else { 1971289550Szbb /* Add fragments */ 1972289551Szbb mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); 1973289551Szbb m_append(mbuf, payload_len, mbuf_frag->m_data); 1974289551Szbb m_freem(mbuf_frag); 1975289550Szbb } 1976289550Szbb /* Next buffer pointer */ 1977289550Szbb rb_ptrs++; 1978289550Szbb } 1979289551Szbb 1980289551Szbb if (__predict_true(mbuf != NULL)) { 1981289551Szbb m_fixhdr(mbuf); 1982289551Szbb mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; 1983289551Szbb M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); 1984296030Szbb if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { 1985296030Szbb /* 1986296030Szbb * HW by default verifies IP & TCP/UDP/SCTP checksums 1987296030Szbb */ 1988296030Szbb 1989296030Szbb /* XXX: Do we need to include IP with options too? */ 1990296030Szbb if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4 || 1991296030Szbb cqe_rx->l3_type == L3TYPE_IPV6)) { 1992296030Szbb mbuf->m_pkthdr.csum_flags = 1993296030Szbb (CSUM_IP_CHECKED | CSUM_IP_VALID); 1994296030Szbb } 1995296030Szbb if (cqe_rx->l4_type == L4TYPE_TCP || 1996296030Szbb cqe_rx->l4_type == L4TYPE_UDP || 1997296030Szbb cqe_rx->l4_type == L4TYPE_SCTP) { 1998296030Szbb mbuf->m_pkthdr.csum_flags |= 1999296030Szbb (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2000296030Szbb mbuf->m_pkthdr.csum_data = htons(0xffff); 2001296030Szbb } 2002296030Szbb } 2003289551Szbb } 2004289551Szbb 2005289551Szbb return (mbuf); 2006289550Szbb} 2007289550Szbb 2008289550Szbb/* Enable interrupt */ 2009289551Szbbvoid 2010289551Szbbnicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 2011289550Szbb{ 2012289551Szbb uint64_t reg_val; 2013289550Szbb 2014289550Szbb reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2015289550Szbb 2016289550Szbb switch (int_type) { 2017289550Szbb case NICVF_INTR_CQ: 2018289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2019289550Szbb break; 2020289550Szbb case NICVF_INTR_SQ: 2021289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2022289550Szbb break; 2023289550Szbb case NICVF_INTR_RBDR: 2024289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2025289550Szbb break; 2026289550Szbb case NICVF_INTR_PKT_DROP: 2027289551Szbb reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2028289550Szbb break; 2029289550Szbb case NICVF_INTR_TCP_TIMER: 2030289551Szbb reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2031289550Szbb break; 2032289550Szbb case NICVF_INTR_MBOX: 2033289551Szbb reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2034289550Szbb break; 2035289550Szbb case NICVF_INTR_QS_ERR: 2036289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2037289550Szbb break; 2038289550Szbb default: 2039289551Szbb device_printf(nic->dev, 2040289550Szbb "Failed to enable interrupt: unknown type\n"); 2041289550Szbb break; 2042289550Szbb } 2043289550Szbb 2044289550Szbb nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 2045289550Szbb} 2046289550Szbb 2047289550Szbb/* Disable interrupt */ 2048289551Szbbvoid 2049289551Szbbnicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 2050289550Szbb{ 2051289551Szbb uint64_t reg_val = 0; 2052289550Szbb 2053289550Szbb switch (int_type) { 2054289550Szbb case NICVF_INTR_CQ: 2055289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2056289550Szbb break; 2057289550Szbb case NICVF_INTR_SQ: 2058289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2059289550Szbb break; 2060289550Szbb case NICVF_INTR_RBDR: 2061289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2062289550Szbb break; 2063289550Szbb case NICVF_INTR_PKT_DROP: 2064289551Szbb reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2065289550Szbb break; 2066289550Szbb case NICVF_INTR_TCP_TIMER: 2067289551Szbb reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2068289550Szbb break; 2069289550Szbb case NICVF_INTR_MBOX: 2070289551Szbb reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2071289550Szbb break; 2072289550Szbb case NICVF_INTR_QS_ERR: 2073289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2074289550Szbb break; 2075289550Szbb default: 2076289551Szbb device_printf(nic->dev, 2077289550Szbb "Failed to disable interrupt: unknown type\n"); 2078289550Szbb break; 2079289550Szbb } 2080289550Szbb 2081289550Szbb nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 2082289550Szbb} 2083289550Szbb 2084289550Szbb/* Clear interrupt */ 2085289551Szbbvoid 2086289551Szbbnicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 2087289550Szbb{ 2088289551Szbb uint64_t reg_val = 0; 2089289550Szbb 2090289550Szbb switch (int_type) { 2091289550Szbb case NICVF_INTR_CQ: 2092289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2093289550Szbb break; 2094289550Szbb case NICVF_INTR_SQ: 2095289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2096289550Szbb break; 2097289550Szbb case NICVF_INTR_RBDR: 2098289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2099289550Szbb break; 2100289550Szbb case NICVF_INTR_PKT_DROP: 2101289551Szbb reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2102289550Szbb break; 2103289550Szbb case NICVF_INTR_TCP_TIMER: 2104289551Szbb reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2105289550Szbb break; 2106289550Szbb case NICVF_INTR_MBOX: 2107289551Szbb reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); 2108289550Szbb break; 2109289550Szbb case NICVF_INTR_QS_ERR: 2110289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2111289550Szbb break; 2112289550Szbb default: 2113289551Szbb device_printf(nic->dev, 2114289550Szbb "Failed to clear interrupt: unknown type\n"); 2115289550Szbb break; 2116289550Szbb } 2117289550Szbb 2118289550Szbb nicvf_reg_write(nic, NIC_VF_INT, reg_val); 2119289550Szbb} 2120289550Szbb 2121289550Szbb/* Check if interrupt is enabled */ 2122289551Szbbint 2123289551Szbbnicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 2124289550Szbb{ 2125289551Szbb uint64_t reg_val; 2126289551Szbb uint64_t mask = 0xff; 2127289550Szbb 2128289550Szbb reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2129289550Szbb 2130289550Szbb switch (int_type) { 2131289550Szbb case NICVF_INTR_CQ: 2132289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2133289550Szbb break; 2134289550Szbb case NICVF_INTR_SQ: 2135289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2136289550Szbb break; 2137289550Szbb case NICVF_INTR_RBDR: 2138289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2139289550Szbb break; 2140289550Szbb case NICVF_INTR_PKT_DROP: 2141289550Szbb mask = NICVF_INTR_PKT_DROP_MASK; 2142289550Szbb break; 2143289550Szbb case NICVF_INTR_TCP_TIMER: 2144289550Szbb mask = NICVF_INTR_TCP_TIMER_MASK; 2145289550Szbb break; 2146289550Szbb case NICVF_INTR_MBOX: 2147289550Szbb mask = NICVF_INTR_MBOX_MASK; 2148289550Szbb break; 2149289550Szbb case NICVF_INTR_QS_ERR: 2150289550Szbb mask = NICVF_INTR_QS_ERR_MASK; 2151289550Szbb break; 2152289550Szbb default: 2153289551Szbb device_printf(nic->dev, 2154289550Szbb "Failed to check interrupt enable: unknown type\n"); 2155289550Szbb break; 2156289550Szbb } 2157289550Szbb 2158289550Szbb return (reg_val & mask); 2159289550Szbb} 2160289550Szbb 2161289551Szbbvoid 2162289551Szbbnicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 2163289550Szbb{ 2164289550Szbb struct rcv_queue *rq; 2165289550Szbb 2166289550Szbb#define GET_RQ_STATS(reg) \ 2167289550Szbb nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 2168289550Szbb (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2169289550Szbb 2170289550Szbb rq = &nic->qs->rq[rq_idx]; 2171289550Szbb rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 2172289550Szbb rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 2173289550Szbb} 2174289550Szbb 2175289551Szbbvoid 2176289551Szbbnicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 2177289550Szbb{ 2178289550Szbb struct snd_queue *sq; 2179289550Szbb 2180289550Szbb#define GET_SQ_STATS(reg) \ 2181289550Szbb nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 2182289550Szbb (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2183289550Szbb 2184289550Szbb sq = &nic->qs->sq[sq_idx]; 2185289550Szbb sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 2186289550Szbb sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 2187289550Szbb} 2188289550Szbb 2189289550Szbb/* Check for errors in the receive cmp.queue entry */ 2190289551Szbbint 2191289551Szbbnicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, 2192289551Szbb struct cqe_rx_t *cqe_rx) 2193289550Szbb{ 2194289550Szbb struct nicvf_hw_stats *stats = &nic->hw_stats; 2195289550Szbb struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 2196289550Szbb 2197289550Szbb if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 2198289550Szbb drv_stats->rx_frames_ok++; 2199289551Szbb return (0); 2200289550Szbb } 2201289550Szbb 2202289550Szbb switch (cqe_rx->err_opcode) { 2203289550Szbb case CQ_RX_ERROP_RE_PARTIAL: 2204289550Szbb stats->rx_bgx_truncated_pkts++; 2205289550Szbb break; 2206289550Szbb case CQ_RX_ERROP_RE_JABBER: 2207289550Szbb stats->rx_jabber_errs++; 2208289550Szbb break; 2209289550Szbb case CQ_RX_ERROP_RE_FCS: 2210289550Szbb stats->rx_fcs_errs++; 2211289550Szbb break; 2212289550Szbb case CQ_RX_ERROP_RE_RX_CTL: 2213289550Szbb stats->rx_bgx_errs++; 2214289550Szbb break; 2215289550Szbb case CQ_RX_ERROP_PREL2_ERR: 2216289550Szbb stats->rx_prel2_errs++; 2217289550Szbb break; 2218289550Szbb case CQ_RX_ERROP_L2_MAL: 2219289550Szbb stats->rx_l2_hdr_malformed++; 2220289550Szbb break; 2221289550Szbb case CQ_RX_ERROP_L2_OVERSIZE: 2222289550Szbb stats->rx_oversize++; 2223289550Szbb break; 2224289550Szbb case CQ_RX_ERROP_L2_UNDERSIZE: 2225289550Szbb stats->rx_undersize++; 2226289550Szbb break; 2227289550Szbb case CQ_RX_ERROP_L2_LENMISM: 2228289550Szbb stats->rx_l2_len_mismatch++; 2229289550Szbb break; 2230289550Szbb case CQ_RX_ERROP_L2_PCLP: 2231289550Szbb stats->rx_l2_pclp++; 2232289550Szbb break; 2233289550Szbb case CQ_RX_ERROP_IP_NOT: 2234289550Szbb stats->rx_ip_ver_errs++; 2235289550Szbb break; 2236289550Szbb case CQ_RX_ERROP_IP_CSUM_ERR: 2237289550Szbb stats->rx_ip_csum_errs++; 2238289550Szbb break; 2239289550Szbb case CQ_RX_ERROP_IP_MAL: 2240289550Szbb stats->rx_ip_hdr_malformed++; 2241289550Szbb break; 2242289550Szbb case CQ_RX_ERROP_IP_MALD: 2243289550Szbb stats->rx_ip_payload_malformed++; 2244289550Szbb break; 2245289550Szbb case CQ_RX_ERROP_IP_HOP: 2246289550Szbb stats->rx_ip_ttl_errs++; 2247289550Szbb break; 2248289550Szbb case CQ_RX_ERROP_L3_PCLP: 2249289550Szbb stats->rx_l3_pclp++; 2250289550Szbb break; 2251289550Szbb case CQ_RX_ERROP_L4_MAL: 2252289550Szbb stats->rx_l4_malformed++; 2253289550Szbb break; 2254289550Szbb case CQ_RX_ERROP_L4_CHK: 2255289550Szbb stats->rx_l4_csum_errs++; 2256289550Szbb break; 2257289550Szbb case CQ_RX_ERROP_UDP_LEN: 2258289550Szbb stats->rx_udp_len_errs++; 2259289550Szbb break; 2260289550Szbb case CQ_RX_ERROP_L4_PORT: 2261289550Szbb stats->rx_l4_port_errs++; 2262289550Szbb break; 2263289550Szbb case CQ_RX_ERROP_TCP_FLAG: 2264289550Szbb stats->rx_tcp_flag_errs++; 2265289550Szbb break; 2266289550Szbb case CQ_RX_ERROP_TCP_OFFSET: 2267289550Szbb stats->rx_tcp_offset_errs++; 2268289550Szbb break; 2269289550Szbb case CQ_RX_ERROP_L4_PCLP: 2270289550Szbb stats->rx_l4_pclp++; 2271289550Szbb break; 2272289550Szbb case CQ_RX_ERROP_RBDR_TRUNC: 2273289550Szbb stats->rx_truncated_pkts++; 2274289550Szbb break; 2275289550Szbb } 2276289550Szbb 2277289551Szbb return (1); 2278289550Szbb} 2279289550Szbb 2280289550Szbb/* Check for errors in the send cmp.queue entry */ 2281289551Szbbint 2282289551Szbbnicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, 2283289551Szbb struct cqe_send_t *cqe_tx) 2284289550Szbb{ 2285289550Szbb struct cmp_queue_stats *stats = &cq->stats; 2286289550Szbb 2287289550Szbb switch (cqe_tx->send_status) { 2288289550Szbb case CQ_TX_ERROP_GOOD: 2289289550Szbb stats->tx.good++; 2290289551Szbb return (0); 2291289550Szbb case CQ_TX_ERROP_DESC_FAULT: 2292289550Szbb stats->tx.desc_fault++; 2293289550Szbb break; 2294289550Szbb case CQ_TX_ERROP_HDR_CONS_ERR: 2295289550Szbb stats->tx.hdr_cons_err++; 2296289550Szbb break; 2297289550Szbb case CQ_TX_ERROP_SUBDC_ERR: 2298289550Szbb stats->tx.subdesc_err++; 2299289550Szbb break; 2300289550Szbb case CQ_TX_ERROP_IMM_SIZE_OFLOW: 2301289550Szbb stats->tx.imm_size_oflow++; 2302289550Szbb break; 2303289550Szbb case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 2304289550Szbb stats->tx.data_seq_err++; 2305289550Szbb break; 2306289550Szbb case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 2307289550Szbb stats->tx.mem_seq_err++; 2308289550Szbb break; 2309289550Szbb case CQ_TX_ERROP_LOCK_VIOL: 2310289550Szbb stats->tx.lock_viol++; 2311289550Szbb break; 2312289550Szbb case CQ_TX_ERROP_DATA_FAULT: 2313289550Szbb stats->tx.data_fault++; 2314289550Szbb break; 2315289550Szbb case CQ_TX_ERROP_TSTMP_CONFLICT: 2316289550Szbb stats->tx.tstmp_conflict++; 2317289550Szbb break; 2318289550Szbb case CQ_TX_ERROP_TSTMP_TIMEOUT: 2319289550Szbb stats->tx.tstmp_timeout++; 2320289550Szbb break; 2321289550Szbb case CQ_TX_ERROP_MEM_FAULT: 2322289550Szbb stats->tx.mem_fault++; 2323289550Szbb break; 2324289550Szbb case CQ_TX_ERROP_CK_OVERLAP: 2325289550Szbb stats->tx.csum_overlap++; 2326289550Szbb break; 2327289550Szbb case CQ_TX_ERROP_CK_OFLOW: 2328289550Szbb stats->tx.csum_overflow++; 2329289550Szbb break; 2330289550Szbb } 2331289550Szbb 2332289551Szbb return (1); 2333289550Szbb} 2334