nicvf_queues.c revision 296032
1289550Szbb/* 2289550Szbb * Copyright (C) 2015 Cavium Inc. 3289550Szbb * All rights reserved. 4289550Szbb * 5289550Szbb * Redistribution and use in source and binary forms, with or without 6289550Szbb * modification, are permitted provided that the following conditions 7289550Szbb * are met: 8289550Szbb * 1. Redistributions of source code must retain the above copyright 9289550Szbb * notice, this list of conditions and the following disclaimer. 10289550Szbb * 2. Redistributions in binary form must reproduce the above copyright 11289550Szbb * notice, this list of conditions and the following disclaimer in the 12289550Szbb * documentation and/or other materials provided with the distribution. 13289550Szbb * 14289550Szbb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15289550Szbb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16289550Szbb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17289550Szbb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18289550Szbb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19289550Szbb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20289550Szbb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21289550Szbb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22289550Szbb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23289550Szbb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24289550Szbb * SUCH DAMAGE. 25289550Szbb * 26289550Szbb * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 296032 2016-02-25 14:17:13Z zbb $ 27289550Szbb * 28289550Szbb */ 29289551Szbb#include <sys/cdefs.h> 30289551Szbb__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_queues.c 296032 2016-02-25 14:17:13Z zbb $"); 31289550Szbb 32296030Szbb#include "opt_inet.h" 33296030Szbb#include "opt_inet6.h" 34296030Szbb 35289551Szbb#include <sys/param.h> 36289551Szbb#include <sys/systm.h> 37289551Szbb#include <sys/bitset.h> 38289551Szbb#include <sys/bitstring.h> 39289551Szbb#include <sys/buf_ring.h> 40289551Szbb#include <sys/bus.h> 41289551Szbb#include <sys/endian.h> 42289551Szbb#include <sys/kernel.h> 43289551Szbb#include <sys/malloc.h> 44289551Szbb#include <sys/module.h> 45289551Szbb#include <sys/rman.h> 46289551Szbb#include <sys/pciio.h> 47289551Szbb#include <sys/pcpu.h> 48289551Szbb#include <sys/proc.h> 49289551Szbb#include <sys/sockio.h> 50289551Szbb#include <sys/socket.h> 51289551Szbb#include <sys/stdatomic.h> 52289551Szbb#include <sys/cpuset.h> 53289551Szbb#include <sys/lock.h> 54289551Szbb#include <sys/mutex.h> 55289551Szbb#include <sys/smp.h> 56289551Szbb#include <sys/taskqueue.h> 57289550Szbb 58289551Szbb#include <vm/vm.h> 59289551Szbb#include <vm/pmap.h> 60289551Szbb 61289551Szbb#include <machine/bus.h> 62289551Szbb#include <machine/vmparam.h> 63289551Szbb 64289551Szbb#include <net/ethernet.h> 65289551Szbb#include <net/if.h> 66289551Szbb#include <net/if_var.h> 67289551Szbb#include <net/if_media.h> 68289551Szbb#include <net/ifq.h> 69289551Szbb 70296030Szbb#include <netinet/in_systm.h> 71296030Szbb#include <netinet/in.h> 72296030Szbb#include <netinet/if_ether.h> 73296030Szbb#include <netinet/ip.h> 74296030Szbb#include <netinet/ip6.h> 75296030Szbb#include <netinet/sctp.h> 76296030Szbb#include <netinet/tcp.h> 77296030Szbb#include <netinet/tcp_lro.h> 78296030Szbb#include <netinet/udp.h> 79296030Szbb 80289551Szbb#include <dev/pci/pcireg.h> 81289551Szbb#include <dev/pci/pcivar.h> 82289551Szbb 83289551Szbb#include "thunder_bgx.h" 84289550Szbb#include "nic_reg.h" 85289550Szbb#include "nic.h" 86289550Szbb#include "q_struct.h" 87289550Szbb#include "nicvf_queues.h" 88289550Szbb 89289551Szbb#define DEBUG 90289551Szbb#undef DEBUG 91289551Szbb 92289551Szbb#ifdef DEBUG 93289551Szbb#define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) 94289551Szbb#else 95289551Szbb#define dprintf(dev, fmt, ...) 96289551Szbb#endif 97289551Szbb 98289551SzbbMALLOC_DECLARE(M_NICVF); 99289551Szbb 100289551Szbbstatic void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); 101289551Szbbstatic int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf *); 102289551Szbbstatic struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); 103289551Szbbstatic void nicvf_sq_disable(struct nicvf *, int); 104289551Szbbstatic void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); 105289551Szbbstatic void nicvf_put_sq_desc(struct snd_queue *, int); 106289551Szbbstatic void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, 107289551Szbb boolean_t); 108289551Szbbstatic void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); 109289551Szbb 110289551Szbbstatic void nicvf_rbdr_task(void *, int); 111289551Szbbstatic void nicvf_rbdr_task_nowait(void *, int); 112289551Szbb 113289550Szbbstruct rbuf_info { 114289551Szbb bus_dma_tag_t dmat; 115289551Szbb bus_dmamap_t dmap; 116289551Szbb struct mbuf * mbuf; 117289550Szbb}; 118289550Szbb 119289551Szbb#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) 120289550Szbb 121289550Szbb/* Poll a register for a specific value */ 122289550Szbbstatic int nicvf_poll_reg(struct nicvf *nic, int qidx, 123289551Szbb uint64_t reg, int bit_pos, int bits, int val) 124289550Szbb{ 125289551Szbb uint64_t bit_mask; 126289551Szbb uint64_t reg_val; 127289550Szbb int timeout = 10; 128289550Szbb 129289551Szbb bit_mask = (1UL << bits) - 1; 130289550Szbb bit_mask = (bit_mask << bit_pos); 131289550Szbb 132289550Szbb while (timeout) { 133289550Szbb reg_val = nicvf_queue_reg_read(nic, reg, qidx); 134289550Szbb if (((reg_val & bit_mask) >> bit_pos) == val) 135289551Szbb return (0); 136289551Szbb 137289551Szbb DELAY(1000); 138289550Szbb timeout--; 139289550Szbb } 140289551Szbb device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); 141289551Szbb return (ETIMEDOUT); 142289550Szbb} 143289550Szbb 144289551Szbb/* Callback for bus_dmamap_load() */ 145289551Szbbstatic void 146289551Szbbnicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 147289551Szbb{ 148289551Szbb bus_addr_t *paddr; 149289551Szbb 150289551Szbb KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 151289551Szbb paddr = arg; 152289551Szbb *paddr = segs->ds_addr; 153289551Szbb} 154289551Szbb 155289550Szbb/* Allocate memory for a queue's descriptors */ 156289551Szbbstatic int 157289551Szbbnicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 158289551Szbb int q_len, int desc_size, int align_bytes) 159289550Szbb{ 160289551Szbb int err, err_dmat; 161289551Szbb 162289551Szbb /* Create DMA tag first */ 163289551Szbb err = bus_dma_tag_create( 164289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 165289551Szbb align_bytes, /* alignment */ 166289551Szbb 0, /* boundary */ 167289551Szbb BUS_SPACE_MAXADDR, /* lowaddr */ 168289551Szbb BUS_SPACE_MAXADDR, /* highaddr */ 169289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 170289551Szbb (q_len * desc_size), /* maxsize */ 171289551Szbb 1, /* nsegments */ 172289551Szbb (q_len * desc_size), /* maxsegsize */ 173289551Szbb 0, /* flags */ 174289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 175289551Szbb &dmem->dmat); /* dmat */ 176289551Szbb 177289551Szbb if (err != 0) { 178289551Szbb device_printf(nic->dev, 179289551Szbb "Failed to create busdma tag for descriptors ring\n"); 180289551Szbb return (err); 181289551Szbb } 182289551Szbb 183289551Szbb /* Allocate segment of continuous DMA safe memory */ 184289551Szbb err = bus_dmamem_alloc( 185289551Szbb dmem->dmat, /* DMA tag */ 186289551Szbb &dmem->base, /* virtual address */ 187289551Szbb (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ 188289551Szbb &dmem->dmap); /* DMA map */ 189289551Szbb if (err != 0) { 190289551Szbb device_printf(nic->dev, "Failed to allocate DMA safe memory for" 191289551Szbb "descriptors ring\n"); 192289551Szbb goto dmamem_fail; 193289551Szbb } 194289551Szbb 195289551Szbb err = bus_dmamap_load( 196289551Szbb dmem->dmat, 197289551Szbb dmem->dmap, 198289551Szbb dmem->base, 199289551Szbb (q_len * desc_size), /* allocation size */ 200289551Szbb nicvf_dmamap_q_cb, /* map to DMA address cb. */ 201289551Szbb &dmem->phys_base, /* physical address */ 202289551Szbb BUS_DMA_NOWAIT); 203289551Szbb if (err != 0) { 204289551Szbb device_printf(nic->dev, 205289551Szbb "Cannot load DMA map of descriptors ring\n"); 206289551Szbb goto dmamap_fail; 207289551Szbb } 208289551Szbb 209289550Szbb dmem->q_len = q_len; 210289551Szbb dmem->size = (desc_size * q_len); 211289550Szbb 212289551Szbb return (0); 213289551Szbb 214289551Szbbdmamap_fail: 215289551Szbb bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 216289551Szbb dmem->phys_base = 0; 217289551Szbbdmamem_fail: 218289551Szbb err_dmat = bus_dma_tag_destroy(dmem->dmat); 219289551Szbb dmem->base = NULL; 220289551Szbb KASSERT(err_dmat == 0, 221289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 222289551Szbb 223289551Szbb return (err); 224289550Szbb} 225289550Szbb 226289550Szbb/* Free queue's descriptor memory */ 227289551Szbbstatic void 228289551Szbbnicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 229289550Szbb{ 230289551Szbb int err; 231289551Szbb 232289551Szbb if ((dmem == NULL) || (dmem->base == NULL)) 233289550Szbb return; 234289550Szbb 235289551Szbb /* Unload a map */ 236289551Szbb bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); 237289551Szbb bus_dmamap_unload(dmem->dmat, dmem->dmap); 238289551Szbb /* Free DMA memory */ 239289551Szbb bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 240289551Szbb /* Destroy DMA tag */ 241289551Szbb err = bus_dma_tag_destroy(dmem->dmat); 242289551Szbb 243289551Szbb KASSERT(err == 0, 244289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 245289551Szbb 246289551Szbb dmem->phys_base = 0; 247289550Szbb dmem->base = NULL; 248289550Szbb} 249289550Szbb 250289551Szbb/* 251289551Szbb * Allocate buffer for packet reception 252289550Szbb * HW returns memory address where packet is DMA'ed but not a pointer 253289550Szbb * into RBDR ring, so save buffer address at the start of fragment and 254289550Szbb * align the start address to a cache aligned address 255289550Szbb */ 256289551Szbbstatic __inline int 257289551Szbbnicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, 258289551Szbb bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) 259289550Szbb{ 260289551Szbb struct mbuf *mbuf; 261289550Szbb struct rbuf_info *rinfo; 262289551Szbb bus_dma_segment_t segs[1]; 263289551Szbb int nsegs; 264289551Szbb int err; 265289550Szbb 266289551Szbb mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); 267289551Szbb if (mbuf == NULL) 268289551Szbb return (ENOMEM); 269289550Szbb 270289551Szbb /* 271289551Szbb * The length is equal to the actual length + one 128b line 272289551Szbb * used as a room for rbuf_info structure. 273289551Szbb */ 274289551Szbb mbuf->m_len = mbuf->m_pkthdr.len = buf_len; 275289551Szbb 276289551Szbb err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, 277289551Szbb &nsegs, BUS_DMA_NOWAIT); 278289551Szbb if (err != 0) { 279289551Szbb device_printf(nic->dev, 280289551Szbb "Failed to map mbuf into DMA visible memory, err: %d\n", 281289551Szbb err); 282289551Szbb m_freem(mbuf); 283289551Szbb bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); 284289551Szbb return (err); 285289550Szbb } 286289551Szbb if (nsegs != 1) 287289551Szbb panic("Unexpected number of DMA segments for RB: %d", nsegs); 288289551Szbb /* 289289551Szbb * Now use the room for rbuf_info structure 290289551Szbb * and adjust mbuf data and length. 291289551Szbb */ 292289551Szbb rinfo = (struct rbuf_info *)mbuf->m_data; 293289551Szbb m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); 294289550Szbb 295289551Szbb rinfo->dmat = rbdr->rbdr_buff_dmat; 296289551Szbb rinfo->dmap = dmap; 297289551Szbb rinfo->mbuf = mbuf; 298289550Szbb 299289551Szbb *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; 300289550Szbb 301289551Szbb return (0); 302289550Szbb} 303289550Szbb 304289551Szbb/* Retrieve mbuf for received packet */ 305289551Szbbstatic struct mbuf * 306289551Szbbnicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) 307289550Szbb{ 308289551Szbb struct mbuf *mbuf; 309289550Szbb struct rbuf_info *rinfo; 310289550Szbb 311289550Szbb /* Get buffer start address and alignment offset */ 312289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); 313289550Szbb 314289551Szbb /* Now retrieve mbuf to give to stack */ 315289551Szbb mbuf = rinfo->mbuf; 316289551Szbb if (__predict_false(mbuf == NULL)) { 317289551Szbb panic("%s: Received packet fragment with NULL mbuf", 318289551Szbb device_get_nameunit(nic->dev)); 319289550Szbb } 320289551Szbb /* 321289551Szbb * Clear the mbuf in the descriptor to indicate 322289551Szbb * that this slot is processed and free to use. 323289551Szbb */ 324289551Szbb rinfo->mbuf = NULL; 325289550Szbb 326289551Szbb bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); 327289551Szbb bus_dmamap_unload(rinfo->dmat, rinfo->dmap); 328289550Szbb 329289551Szbb return (mbuf); 330289550Szbb} 331289550Szbb 332289550Szbb/* Allocate RBDR ring and populate receive buffers */ 333289551Szbbstatic int 334289551Szbbnicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, 335289551Szbb int buf_size, int qidx) 336289550Szbb{ 337289551Szbb bus_dmamap_t dmap; 338289551Szbb bus_addr_t rbuf; 339289551Szbb struct rbdr_entry_t *desc; 340289550Szbb int idx; 341289550Szbb int err; 342289550Szbb 343289551Szbb /* Allocate rbdr descriptors ring */ 344289550Szbb err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 345289551Szbb sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); 346289551Szbb if (err != 0) { 347289551Szbb device_printf(nic->dev, 348289551Szbb "Failed to create RBDR descriptors ring\n"); 349289551Szbb return (err); 350289551Szbb } 351289550Szbb 352289550Szbb rbdr->desc = rbdr->dmem.base; 353289551Szbb /* 354289551Szbb * Buffer size has to be in multiples of 128 bytes. 355289551Szbb * Make room for metadata of size of one line (128 bytes). 356289551Szbb */ 357289551Szbb rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; 358289551Szbb rbdr->enable = TRUE; 359289550Szbb rbdr->thresh = RBDR_THRESH; 360289551Szbb rbdr->nic = nic; 361289551Szbb rbdr->idx = qidx; 362289550Szbb 363289551Szbb /* 364289551Szbb * Create DMA tag for Rx buffers. 365289551Szbb * Each map created using this tag is intended to store Rx payload for 366289551Szbb * one fragment and one header structure containing rbuf_info (thus 367289551Szbb * additional 128 byte line since RB must be a multiple of 128 byte 368289551Szbb * cache line). 369289551Szbb */ 370289551Szbb if (buf_size > MCLBYTES) { 371289551Szbb device_printf(nic->dev, 372289551Szbb "Buffer size to large for mbuf cluster\n"); 373289551Szbb return (EINVAL); 374289551Szbb } 375289551Szbb err = bus_dma_tag_create( 376289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 377289551Szbb NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 378289551Szbb 0, /* boundary */ 379289551Szbb DMAP_MAX_PHYSADDR, /* lowaddr */ 380289551Szbb DMAP_MIN_PHYSADDR, /* highaddr */ 381289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 382289551Szbb roundup2(buf_size, MCLBYTES), /* maxsize */ 383289551Szbb 1, /* nsegments */ 384289551Szbb roundup2(buf_size, MCLBYTES), /* maxsegsize */ 385289551Szbb 0, /* flags */ 386289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 387289551Szbb &rbdr->rbdr_buff_dmat); /* dmat */ 388289551Szbb 389289551Szbb if (err != 0) { 390289551Szbb device_printf(nic->dev, 391289551Szbb "Failed to create busdma tag for RBDR buffers\n"); 392289551Szbb return (err); 393289551Szbb } 394289551Szbb 395289551Szbb rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * 396289551Szbb ring_len, M_NICVF, (M_WAITOK | M_ZERO)); 397289551Szbb 398289550Szbb for (idx = 0; idx < ring_len; idx++) { 399289551Szbb err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); 400289551Szbb if (err != 0) { 401289551Szbb device_printf(nic->dev, 402289551Szbb "Failed to create DMA map for RB\n"); 403289551Szbb return (err); 404289551Szbb } 405289551Szbb rbdr->rbdr_buff_dmaps[idx] = dmap; 406289550Szbb 407289551Szbb err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, 408289551Szbb DMA_BUFFER_LEN, &rbuf); 409289551Szbb if (err != 0) 410289551Szbb return (err); 411289551Szbb 412289550Szbb desc = GET_RBDR_DESC(rbdr, idx); 413289551Szbb desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 414289550Szbb } 415289551Szbb 416289551Szbb /* Allocate taskqueue */ 417289551Szbb TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); 418289551Szbb TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); 419289551Szbb rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, 420289551Szbb taskqueue_thread_enqueue, &rbdr->rbdr_taskq); 421289551Szbb taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", 422289551Szbb device_get_nameunit(nic->dev)); 423289551Szbb 424289551Szbb return (0); 425289550Szbb} 426289550Szbb 427289550Szbb/* Free RBDR ring and its receive buffers */ 428289551Szbbstatic void 429289551Szbbnicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 430289550Szbb{ 431289551Szbb struct mbuf *mbuf; 432289551Szbb struct queue_set *qs; 433289550Szbb struct rbdr_entry_t *desc; 434289550Szbb struct rbuf_info *rinfo; 435289551Szbb bus_addr_t buf_addr; 436289551Szbb int head, tail, idx; 437289551Szbb int err; 438289550Szbb 439289551Szbb qs = nic->qs; 440289550Szbb 441289551Szbb if ((qs == NULL) || (rbdr == NULL)) 442289550Szbb return; 443289550Szbb 444289551Szbb rbdr->enable = FALSE; 445289551Szbb if (rbdr->rbdr_taskq != NULL) { 446289551Szbb /* Remove tasks */ 447289551Szbb while (taskqueue_cancel(rbdr->rbdr_taskq, 448289551Szbb &rbdr->rbdr_task_nowait, NULL) != 0) { 449289551Szbb /* Finish the nowait task first */ 450289551Szbb taskqueue_drain(rbdr->rbdr_taskq, 451289551Szbb &rbdr->rbdr_task_nowait); 452289551Szbb } 453289551Szbb taskqueue_free(rbdr->rbdr_taskq); 454289551Szbb rbdr->rbdr_taskq = NULL; 455289550Szbb 456289551Szbb while (taskqueue_cancel(taskqueue_thread, 457289551Szbb &rbdr->rbdr_task, NULL) != 0) { 458289551Szbb /* Now finish the sleepable task */ 459289551Szbb taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); 460289551Szbb } 461289551Szbb } 462289551Szbb 463289551Szbb /* 464289551Szbb * Free all of the memory under the RB descriptors. 465289551Szbb * There are assumptions here: 466289551Szbb * 1. Corresponding RBDR is disabled 467289551Szbb * - it is safe to operate using head and tail indexes 468289551Szbb * 2. All bffers that were received are properly freed by 469289551Szbb * the receive handler 470289551Szbb * - there is no need to unload DMA map and free MBUF for other 471289551Szbb * descriptors than unused ones 472289551Szbb */ 473289551Szbb if (rbdr->rbdr_buff_dmat != NULL) { 474289551Szbb head = rbdr->head; 475289551Szbb tail = rbdr->tail; 476289551Szbb while (head != tail) { 477289551Szbb desc = GET_RBDR_DESC(rbdr, head); 478289551Szbb buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 479289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 480289551Szbb bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 481289551Szbb mbuf = rinfo->mbuf; 482289551Szbb /* This will destroy everything including rinfo! */ 483289551Szbb m_freem(mbuf); 484289551Szbb head++; 485289551Szbb head &= (rbdr->dmem.q_len - 1); 486289551Szbb } 487289551Szbb /* Free tail descriptor */ 488289551Szbb desc = GET_RBDR_DESC(rbdr, tail); 489289550Szbb buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 490289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 491289551Szbb bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 492289551Szbb mbuf = rinfo->mbuf; 493289551Szbb /* This will destroy everything including rinfo! */ 494289551Szbb m_freem(mbuf); 495289551Szbb 496289551Szbb /* Destroy DMA maps */ 497289551Szbb for (idx = 0; idx < qs->rbdr_len; idx++) { 498289551Szbb if (rbdr->rbdr_buff_dmaps[idx] == NULL) 499289551Szbb continue; 500289551Szbb err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, 501289551Szbb rbdr->rbdr_buff_dmaps[idx]); 502289551Szbb KASSERT(err == 0, 503289551Szbb ("%s: Could not destroy DMA map for RB, desc: %d", 504289551Szbb __func__, idx)); 505289551Szbb rbdr->rbdr_buff_dmaps[idx] = NULL; 506289551Szbb } 507289551Szbb 508289551Szbb /* Now destroy the tag */ 509289551Szbb err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); 510289551Szbb KASSERT(err == 0, 511289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 512289551Szbb 513289551Szbb rbdr->head = 0; 514289551Szbb rbdr->tail = 0; 515289550Szbb } 516289550Szbb 517289550Szbb /* Free RBDR ring */ 518289550Szbb nicvf_free_q_desc_mem(nic, &rbdr->dmem); 519289550Szbb} 520289550Szbb 521289551Szbb/* 522289551Szbb * Refill receive buffer descriptors with new buffers. 523289550Szbb */ 524289551Szbbstatic int 525289551Szbbnicvf_refill_rbdr(struct rbdr *rbdr, int mflags) 526289550Szbb{ 527289551Szbb struct nicvf *nic; 528289551Szbb struct queue_set *qs; 529289551Szbb int rbdr_idx; 530289550Szbb int tail, qcount; 531289550Szbb int refill_rb_cnt; 532289550Szbb struct rbdr_entry_t *desc; 533289551Szbb bus_dmamap_t dmap; 534289551Szbb bus_addr_t rbuf; 535289551Szbb boolean_t rb_alloc_fail; 536289551Szbb int new_rb; 537289550Szbb 538289551Szbb rb_alloc_fail = TRUE; 539289551Szbb new_rb = 0; 540289551Szbb nic = rbdr->nic; 541289551Szbb qs = nic->qs; 542289551Szbb rbdr_idx = rbdr->idx; 543289551Szbb 544289550Szbb /* Check if it's enabled */ 545289550Szbb if (!rbdr->enable) 546289551Szbb return (0); 547289550Szbb 548289550Szbb /* Get no of desc's to be refilled */ 549289550Szbb qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 550289550Szbb qcount &= 0x7FFFF; 551289550Szbb /* Doorbell can be ringed with a max of ring size minus 1 */ 552289551Szbb if (qcount >= (qs->rbdr_len - 1)) { 553289551Szbb rb_alloc_fail = FALSE; 554289551Szbb goto out; 555289551Szbb } else 556289550Szbb refill_rb_cnt = qs->rbdr_len - qcount - 1; 557289550Szbb 558289550Szbb /* Start filling descs from tail */ 559289550Szbb tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 560289550Szbb while (refill_rb_cnt) { 561289550Szbb tail++; 562289550Szbb tail &= (rbdr->dmem.q_len - 1); 563289550Szbb 564289551Szbb dmap = rbdr->rbdr_buff_dmaps[tail]; 565289551Szbb if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, 566289551Szbb DMA_BUFFER_LEN, &rbuf)) { 567289551Szbb /* Something went wrong. Resign */ 568289550Szbb break; 569289551Szbb } 570289550Szbb desc = GET_RBDR_DESC(rbdr, tail); 571289551Szbb desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 572289550Szbb refill_rb_cnt--; 573289550Szbb new_rb++; 574289550Szbb } 575289550Szbb 576289550Szbb /* make sure all memory stores are done before ringing doorbell */ 577289551Szbb wmb(); 578289550Szbb 579289550Szbb /* Check if buffer allocation failed */ 580289551Szbb if (refill_rb_cnt == 0) 581289551Szbb rb_alloc_fail = FALSE; 582289550Szbb 583289550Szbb /* Notify HW */ 584289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 585289550Szbb rbdr_idx, new_rb); 586289551Szbbout: 587289551Szbb if (!rb_alloc_fail) { 588289551Szbb /* 589289551Szbb * Re-enable RBDR interrupts only 590289551Szbb * if buffer allocation is success. 591289551Szbb */ 592289550Szbb nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 593289550Szbb 594289551Szbb return (0); 595289551Szbb } 596289551Szbb 597289551Szbb return (ENOMEM); 598289550Szbb} 599289550Szbb 600289551Szbb/* Refill RBs even if sleep is needed to reclaim memory */ 601289551Szbbstatic void 602289551Szbbnicvf_rbdr_task(void *arg, int pending) 603289550Szbb{ 604289551Szbb struct rbdr *rbdr; 605289551Szbb int err; 606289550Szbb 607289551Szbb rbdr = (struct rbdr *)arg; 608289551Szbb 609289551Szbb err = nicvf_refill_rbdr(rbdr, M_WAITOK); 610289551Szbb if (__predict_false(err != 0)) { 611289551Szbb panic("%s: Failed to refill RBs even when sleep enabled", 612289551Szbb __func__); 613289551Szbb } 614289550Szbb} 615289550Szbb 616289551Szbb/* Refill RBs as soon as possible without waiting */ 617289551Szbbstatic void 618289551Szbbnicvf_rbdr_task_nowait(void *arg, int pending) 619289550Szbb{ 620289551Szbb struct rbdr *rbdr; 621289551Szbb int err; 622289550Szbb 623289551Szbb rbdr = (struct rbdr *)arg; 624289551Szbb 625289551Szbb err = nicvf_refill_rbdr(rbdr, M_NOWAIT); 626289551Szbb if (err != 0) { 627289551Szbb /* 628289551Szbb * Schedule another, sleepable kernel thread 629289551Szbb * that will for sure refill the buffers. 630289551Szbb */ 631289551Szbb taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); 632289550Szbb } 633289550Szbb} 634289550Szbb 635289551Szbbstatic int 636289551Szbbnicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 637289551Szbb struct cqe_rx_t *cqe_rx, int cqe_type) 638289551Szbb{ 639289551Szbb struct mbuf *mbuf; 640296031Szbb struct rcv_queue *rq; 641289551Szbb int rq_idx; 642289551Szbb int err = 0; 643289551Szbb 644289551Szbb rq_idx = cqe_rx->rq_idx; 645296031Szbb rq = &nic->qs->rq[rq_idx]; 646289551Szbb 647289551Szbb /* Check for errors */ 648289551Szbb err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 649289551Szbb if (err && !cqe_rx->rb_cnt) 650289551Szbb return (0); 651289551Szbb 652289551Szbb mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); 653289551Szbb if (mbuf == NULL) { 654289551Szbb dprintf(nic->dev, "Packet not received\n"); 655289551Szbb return (0); 656289551Szbb } 657289551Szbb 658289551Szbb /* If error packet */ 659289551Szbb if (err != 0) { 660289551Szbb m_freem(mbuf); 661289551Szbb return (0); 662289551Szbb } 663289551Szbb 664296031Szbb if (rq->lro_enabled && 665296031Szbb ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) && 666296031Szbb (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 667296031Szbb (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { 668296031Szbb /* 669296031Szbb * At this point it is known that there are no errors in the 670296031Szbb * packet. Attempt to LRO enqueue. Send to stack if no resources 671296031Szbb * or enqueue error. 672296031Szbb */ 673296031Szbb if ((rq->lro.lro_cnt != 0) && 674296031Szbb (tcp_lro_rx(&rq->lro, mbuf, 0) == 0)) 675296031Szbb return (0); 676296031Szbb } 677289551Szbb /* 678289551Szbb * Push this packet to the stack later to avoid 679289551Szbb * unlocking completion task in the middle of work. 680289551Szbb */ 681289551Szbb err = buf_ring_enqueue(cq->rx_br, mbuf); 682289551Szbb if (err != 0) { 683289551Szbb /* 684289551Szbb * Failed to enqueue this mbuf. 685289551Szbb * We don't drop it, just schedule another task. 686289551Szbb */ 687289551Szbb return (err); 688289551Szbb } 689289551Szbb 690289551Szbb return (0); 691289551Szbb} 692289551Szbb 693289551Szbbstatic int 694289551Szbbnicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 695289551Szbb struct cqe_send_t *cqe_tx, int cqe_type) 696289551Szbb{ 697289551Szbb bus_dmamap_t dmap; 698289551Szbb struct mbuf *mbuf; 699289551Szbb struct snd_queue *sq; 700289551Szbb struct sq_hdr_subdesc *hdr; 701289551Szbb 702289551Szbb mbuf = NULL; 703289551Szbb sq = &nic->qs->sq[cqe_tx->sq_idx]; 704289551Szbb /* Avoid blocking here since we hold a non-sleepable NICVF_CMP_LOCK */ 705289551Szbb if (NICVF_TX_TRYLOCK(sq) == 0) 706289551Szbb return (EAGAIN); 707289551Szbb 708289551Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); 709289551Szbb if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 710289551Szbb NICVF_TX_UNLOCK(sq); 711289551Szbb return (0); 712289551Szbb } 713289551Szbb 714289551Szbb dprintf(nic->dev, 715289551Szbb "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", 716289551Szbb __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 717289551Szbb cqe_tx->sqe_ptr, hdr->subdesc_cnt); 718289551Szbb 719289551Szbb dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; 720289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, dmap); 721289551Szbb 722289551Szbb mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; 723289551Szbb if (mbuf != NULL) { 724289551Szbb m_freem(mbuf); 725289551Szbb sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; 726289551Szbb } 727289551Szbb 728289551Szbb nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 729289551Szbb nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 730289551Szbb 731289551Szbb NICVF_TX_UNLOCK(sq); 732289551Szbb return (0); 733289551Szbb} 734289551Szbb 735289551Szbbstatic int 736289551Szbbnicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) 737289551Szbb{ 738289551Szbb struct mbuf *mbuf; 739289551Szbb struct ifnet *ifp; 740289551Szbb int processed_cqe, work_done = 0, tx_done = 0; 741289551Szbb int cqe_count, cqe_head; 742289551Szbb struct queue_set *qs = nic->qs; 743289551Szbb struct cmp_queue *cq = &qs->cq[cq_idx]; 744296031Szbb struct rcv_queue *rq; 745289551Szbb struct cqe_rx_t *cq_desc; 746296031Szbb struct lro_ctrl *lro; 747296031Szbb struct lro_entry *queued; 748296031Szbb int rq_idx; 749289551Szbb int cmp_err; 750289551Szbb 751289551Szbb NICVF_CMP_LOCK(cq); 752289551Szbb cmp_err = 0; 753289551Szbb processed_cqe = 0; 754289551Szbb /* Get no of valid CQ entries to process */ 755289551Szbb cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); 756289551Szbb cqe_count &= CQ_CQE_COUNT; 757289551Szbb if (cqe_count == 0) 758289551Szbb goto out; 759289551Szbb 760289551Szbb /* Get head of the valid CQ entries */ 761289551Szbb cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 762289551Szbb cqe_head &= 0xFFFF; 763289551Szbb 764289551Szbb dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", 765289551Szbb __func__, cq_idx, cqe_count, cqe_head); 766289551Szbb while (processed_cqe < cqe_count) { 767289551Szbb /* Get the CQ descriptor */ 768289551Szbb cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 769289551Szbb cqe_head++; 770289551Szbb cqe_head &= (cq->dmem.q_len - 1); 771296032Szbb /* Prefetch next CQ descriptor */ 772296032Szbb __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); 773289551Szbb 774289551Szbb dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, 775289551Szbb cq_desc->cqe_type); 776289551Szbb switch (cq_desc->cqe_type) { 777289551Szbb case CQE_TYPE_RX: 778289551Szbb cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, 779289551Szbb CQE_TYPE_RX); 780289551Szbb if (__predict_false(cmp_err != 0)) { 781289551Szbb /* 782289551Szbb * Ups. Cannot finish now. 783289551Szbb * Let's try again later. 784289551Szbb */ 785289551Szbb goto done; 786289551Szbb } 787289551Szbb work_done++; 788289551Szbb break; 789289551Szbb case CQE_TYPE_SEND: 790289551Szbb cmp_err = nicvf_snd_pkt_handler(nic, cq, 791289551Szbb (void *)cq_desc, CQE_TYPE_SEND); 792289551Szbb if (__predict_false(cmp_err != 0)) { 793289551Szbb /* 794289551Szbb * Ups. Cannot finish now. 795289551Szbb * Let's try again later. 796289551Szbb */ 797289551Szbb goto done; 798289551Szbb } 799289551Szbb 800289551Szbb tx_done++; 801289551Szbb break; 802289551Szbb case CQE_TYPE_INVALID: 803289551Szbb case CQE_TYPE_RX_SPLIT: 804289551Szbb case CQE_TYPE_RX_TCP: 805289551Szbb case CQE_TYPE_SEND_PTP: 806289551Szbb /* Ignore for now */ 807289551Szbb break; 808289551Szbb } 809289551Szbb processed_cqe++; 810289551Szbb } 811289551Szbbdone: 812289551Szbb dprintf(nic->dev, 813289551Szbb "%s CQ%d processed_cqe %d work_done %d\n", 814289551Szbb __func__, cq_idx, processed_cqe, work_done); 815289551Szbb 816289551Szbb /* Ring doorbell to inform H/W to reuse processed CQEs */ 817289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); 818289551Szbb 819289551Szbb if ((tx_done > 0) && 820289551Szbb ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { 821289551Szbb /* Reenable TXQ if its stopped earlier due to SQ full */ 822289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 823289551Szbb } 824289551Szbbout: 825296031Szbb /* 826296031Szbb * Flush any outstanding LRO work 827296031Szbb */ 828296031Szbb rq_idx = cq_idx; 829296031Szbb rq = &nic->qs->rq[rq_idx]; 830296031Szbb lro = &rq->lro; 831296031Szbb while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { 832296031Szbb SLIST_REMOVE_HEAD(&lro->lro_active, next); 833296031Szbb tcp_lro_flush(lro, queued); 834296031Szbb } 835296031Szbb 836289551Szbb NICVF_CMP_UNLOCK(cq); 837289551Szbb 838289551Szbb ifp = nic->ifp; 839289551Szbb /* Push received MBUFs to the stack */ 840289551Szbb while (!buf_ring_empty(cq->rx_br)) { 841289551Szbb mbuf = buf_ring_dequeue_mc(cq->rx_br); 842289551Szbb if (__predict_true(mbuf != NULL)) 843289551Szbb (*ifp->if_input)(ifp, mbuf); 844289551Szbb } 845289551Szbb 846289551Szbb return (cmp_err); 847289551Szbb} 848289551Szbb 849289551Szbb/* 850289551Szbb * Qset error interrupt handler 851289551Szbb * 852289551Szbb * As of now only CQ errors are handled 853289551Szbb */ 854289551Szbbstatic void 855289551Szbbnicvf_qs_err_task(void *arg, int pending) 856289551Szbb{ 857289551Szbb struct nicvf *nic; 858289551Szbb struct queue_set *qs; 859289551Szbb int qidx; 860289551Szbb uint64_t status; 861289551Szbb boolean_t enable = TRUE; 862289551Szbb 863289551Szbb nic = (struct nicvf *)arg; 864289551Szbb qs = nic->qs; 865289551Szbb 866289551Szbb /* Deactivate network interface */ 867289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 868289551Szbb 869289551Szbb /* Check if it is CQ err */ 870289551Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 871289551Szbb status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, 872289551Szbb qidx); 873289551Szbb if ((status & CQ_ERR_MASK) == 0) 874289551Szbb continue; 875289551Szbb /* Process already queued CQEs and reconfig CQ */ 876289551Szbb nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 877289551Szbb nicvf_sq_disable(nic, qidx); 878289551Szbb (void)nicvf_cq_intr_handler(nic, qidx); 879289551Szbb nicvf_cmp_queue_config(nic, qs, qidx, enable); 880289551Szbb nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); 881289551Szbb nicvf_sq_enable(nic, &qs->sq[qidx], qidx); 882289551Szbb nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 883289551Szbb } 884289551Szbb 885289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 886289551Szbb /* Re-enable Qset error interrupt */ 887289551Szbb nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 888289551Szbb} 889289551Szbb 890289551Szbbstatic void 891289551Szbbnicvf_cmp_task(void *arg, int pending) 892289551Szbb{ 893289551Szbb uint64_t cq_head; 894289551Szbb struct cmp_queue *cq; 895289551Szbb struct nicvf *nic; 896289551Szbb int cmp_err; 897289551Szbb 898289551Szbb cq = (struct cmp_queue *)arg; 899289551Szbb nic = cq->nic; 900289551Szbb 901289551Szbb /* Handle CQ descriptors */ 902289551Szbb cmp_err = nicvf_cq_intr_handler(nic, cq->idx); 903289551Szbb /* Re-enable interrupts */ 904289551Szbb cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->idx); 905289551Szbb nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 906289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, cq->idx, cq_head); 907289551Szbb 908289551Szbb if (__predict_false(cmp_err != 0)) { 909289551Szbb /* 910289551Szbb * Schedule another thread here since we did not 911289551Szbb * process the entire CQ due to Tx or Rx CQ parse error. 912289551Szbb */ 913289551Szbb taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); 914289551Szbb 915289551Szbb } 916289551Szbb 917289551Szbb /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ 918289551Szbb nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); 919289551Szbb 920289551Szbb} 921289551Szbb 922289550Szbb/* Initialize completion queue */ 923289551Szbbstatic int 924289551Szbbnicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, 925289551Szbb int qidx) 926289550Szbb{ 927289550Szbb int err; 928289550Szbb 929289551Szbb /* Initizalize lock */ 930289551Szbb snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", 931289551Szbb device_get_nameunit(nic->dev), qidx); 932289551Szbb mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); 933289551Szbb 934289550Szbb err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 935289550Szbb NICVF_CQ_BASE_ALIGN_BYTES); 936289550Szbb 937289551Szbb if (err != 0) { 938289551Szbb device_printf(nic->dev, 939289551Szbb "Could not allocate DMA memory for CQ\n"); 940289551Szbb return (err); 941289551Szbb } 942289551Szbb 943289550Szbb cq->desc = cq->dmem.base; 944289550Szbb cq->thresh = CMP_QUEUE_CQE_THRESH; 945289551Szbb cq->nic = nic; 946289551Szbb cq->idx = qidx; 947289550Szbb nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 948289550Szbb 949289551Szbb cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, 950289551Szbb &cq->mtx); 951289551Szbb 952289551Szbb /* Allocate taskqueue */ 953289551Szbb TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); 954289551Szbb cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, 955289551Szbb taskqueue_thread_enqueue, &cq->cmp_taskq); 956289551Szbb taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", 957289551Szbb device_get_nameunit(nic->dev), qidx); 958289551Szbb 959289551Szbb return (0); 960289550Szbb} 961289550Szbb 962289551Szbbstatic void 963289551Szbbnicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 964289550Szbb{ 965289551Szbb 966289551Szbb if (cq == NULL) 967289550Szbb return; 968289551Szbb /* 969289551Szbb * The completion queue itself should be disabled by now 970289551Szbb * (ref. nicvf_snd_queue_config()). 971289551Szbb * Ensure that it is safe to disable it or panic. 972289551Szbb */ 973289551Szbb if (cq->enable) 974289551Szbb panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); 975289550Szbb 976289551Szbb if (cq->cmp_taskq != NULL) { 977289551Szbb /* Remove task */ 978289551Szbb while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) 979289551Szbb taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); 980289551Szbb 981289551Szbb taskqueue_free(cq->cmp_taskq); 982289551Szbb cq->cmp_taskq = NULL; 983289551Szbb } 984289551Szbb /* 985289551Szbb * Completion interrupt will possibly enable interrupts again 986289551Szbb * so disable interrupting now after we finished processing 987289551Szbb * completion task. It is safe to do so since the corresponding CQ 988289551Szbb * was already disabled. 989289551Szbb */ 990289551Szbb nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); 991289551Szbb nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 992289551Szbb 993289551Szbb NICVF_CMP_LOCK(cq); 994289550Szbb nicvf_free_q_desc_mem(nic, &cq->dmem); 995289551Szbb drbr_free(cq->rx_br, M_DEVBUF); 996289551Szbb NICVF_CMP_UNLOCK(cq); 997289551Szbb mtx_destroy(&cq->mtx); 998289551Szbb memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); 999289550Szbb} 1000289550Szbb 1001289551Szbbstatic void 1002289551Szbbnicvf_snd_task(void *arg, int pending) 1003289551Szbb{ 1004289551Szbb struct snd_queue *sq = (struct snd_queue *)arg; 1005289551Szbb struct mbuf *mbuf; 1006289551Szbb 1007289551Szbb NICVF_TX_LOCK(sq); 1008289551Szbb while (1) { 1009289551Szbb mbuf = drbr_dequeue(NULL, sq->br); 1010289551Szbb if (mbuf == NULL) 1011289551Szbb break; 1012289551Szbb 1013289551Szbb if (nicvf_tx_mbuf_locked(sq, mbuf) != 0) { 1014289551Szbb /* XXX ARM64TODO: Increase Tx drop counter */ 1015289551Szbb m_freem(mbuf); 1016289551Szbb break; 1017289551Szbb } 1018289551Szbb } 1019289551Szbb NICVF_TX_UNLOCK(sq); 1020289551Szbb} 1021289551Szbb 1022289550Szbb/* Initialize transmit queue */ 1023289551Szbbstatic int 1024289551Szbbnicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, 1025289551Szbb int qidx) 1026289550Szbb{ 1027289551Szbb size_t i; 1028289550Szbb int err; 1029289550Szbb 1030289551Szbb /* Initizalize TX lock for this queue */ 1031289551Szbb snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", 1032289551Szbb device_get_nameunit(nic->dev), qidx); 1033289551Szbb mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); 1034289551Szbb 1035289551Szbb NICVF_TX_LOCK(sq); 1036289551Szbb /* Allocate buffer ring */ 1037289551Szbb sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, 1038289551Szbb M_NOWAIT, &sq->mtx); 1039289551Szbb if (sq->br == NULL) { 1040289551Szbb device_printf(nic->dev, 1041289551Szbb "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); 1042289551Szbb err = ENOMEM; 1043289551Szbb goto error; 1044289551Szbb } 1045289551Szbb 1046289551Szbb /* Allocate DMA memory for Tx descriptors */ 1047289550Szbb err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 1048289550Szbb NICVF_SQ_BASE_ALIGN_BYTES); 1049289551Szbb if (err != 0) { 1050289551Szbb device_printf(nic->dev, 1051289551Szbb "Could not allocate DMA memory for SQ\n"); 1052289551Szbb goto error; 1053289551Szbb } 1054289550Szbb 1055289550Szbb sq->desc = sq->dmem.base; 1056289551Szbb sq->head = sq->tail = 0; 1057289551Szbb atomic_store_rel_int(&sq->free_cnt, q_len - 1); 1058289550Szbb sq->thresh = SND_QUEUE_THRESH; 1059289551Szbb sq->idx = qidx; 1060289551Szbb sq->nic = nic; 1061289550Szbb 1062289551Szbb /* 1063289551Szbb * Allocate DMA maps for Tx buffers 1064289551Szbb */ 1065289550Szbb 1066289551Szbb /* Create DMA tag first */ 1067289551Szbb err = bus_dma_tag_create( 1068289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 1069289551Szbb 1, /* alignment */ 1070289551Szbb 0, /* boundary */ 1071289551Szbb BUS_SPACE_MAXADDR, /* lowaddr */ 1072289551Szbb BUS_SPACE_MAXADDR, /* highaddr */ 1073289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 1074289551Szbb NICVF_TXBUF_MAXSIZE, /* maxsize */ 1075289551Szbb NICVF_TXBUF_NSEGS, /* nsegments */ 1076289551Szbb MCLBYTES, /* maxsegsize */ 1077289551Szbb 0, /* flags */ 1078289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 1079289551Szbb &sq->snd_buff_dmat); /* dmat */ 1080289551Szbb 1081289551Szbb if (err != 0) { 1082289551Szbb device_printf(nic->dev, 1083289551Szbb "Failed to create busdma tag for Tx buffers\n"); 1084289551Szbb goto error; 1085289551Szbb } 1086289551Szbb 1087289551Szbb /* Allocate send buffers array */ 1088289551Szbb sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, 1089289551Szbb (M_NOWAIT | M_ZERO)); 1090289551Szbb if (sq->snd_buff == NULL) { 1091289551Szbb device_printf(nic->dev, 1092289551Szbb "Could not allocate memory for Tx buffers array\n"); 1093289551Szbb err = ENOMEM; 1094289551Szbb goto error; 1095289551Szbb } 1096289551Szbb 1097289551Szbb /* Now populate maps */ 1098289551Szbb for (i = 0; i < q_len; i++) { 1099289551Szbb err = bus_dmamap_create(sq->snd_buff_dmat, 0, 1100289551Szbb &sq->snd_buff[i].dmap); 1101289551Szbb if (err != 0) { 1102289551Szbb device_printf(nic->dev, 1103289551Szbb "Failed to create DMA maps for Tx buffers\n"); 1104289551Szbb goto error; 1105289551Szbb } 1106289551Szbb } 1107289551Szbb NICVF_TX_UNLOCK(sq); 1108289551Szbb 1109289551Szbb /* Allocate taskqueue */ 1110289551Szbb TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); 1111289551Szbb sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, 1112289551Szbb taskqueue_thread_enqueue, &sq->snd_taskq); 1113289551Szbb taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", 1114289551Szbb device_get_nameunit(nic->dev), qidx); 1115289551Szbb 1116289551Szbb return (0); 1117289551Szbberror: 1118289551Szbb NICVF_TX_UNLOCK(sq); 1119289551Szbb return (err); 1120289550Szbb} 1121289550Szbb 1122289551Szbbstatic void 1123289551Szbbnicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 1124289550Szbb{ 1125289551Szbb struct queue_set *qs = nic->qs; 1126289551Szbb size_t i; 1127289551Szbb int err; 1128289551Szbb 1129289551Szbb if (sq == NULL) 1130289550Szbb return; 1131289550Szbb 1132289551Szbb if (sq->snd_taskq != NULL) { 1133289551Szbb /* Remove task */ 1134289551Szbb while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) 1135289551Szbb taskqueue_drain(sq->snd_taskq, &sq->snd_task); 1136289550Szbb 1137289551Szbb taskqueue_free(sq->snd_taskq); 1138289551Szbb sq->snd_taskq = NULL; 1139289551Szbb } 1140289551Szbb 1141289551Szbb NICVF_TX_LOCK(sq); 1142289551Szbb if (sq->snd_buff_dmat != NULL) { 1143289551Szbb if (sq->snd_buff != NULL) { 1144289551Szbb for (i = 0; i < qs->sq_len; i++) { 1145289551Szbb m_freem(sq->snd_buff[i].mbuf); 1146289551Szbb sq->snd_buff[i].mbuf = NULL; 1147289551Szbb 1148289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, 1149289551Szbb sq->snd_buff[i].dmap); 1150289551Szbb err = bus_dmamap_destroy(sq->snd_buff_dmat, 1151289551Szbb sq->snd_buff[i].dmap); 1152289551Szbb /* 1153289551Szbb * If bus_dmamap_destroy fails it can cause 1154289551Szbb * random panic later if the tag is also 1155289551Szbb * destroyed in the process. 1156289551Szbb */ 1157289551Szbb KASSERT(err == 0, 1158289551Szbb ("%s: Could not destroy DMA map for SQ", 1159289551Szbb __func__)); 1160289551Szbb } 1161289551Szbb } 1162289551Szbb 1163289551Szbb free(sq->snd_buff, M_NICVF); 1164289551Szbb 1165289551Szbb err = bus_dma_tag_destroy(sq->snd_buff_dmat); 1166289551Szbb KASSERT(err == 0, 1167289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 1168289551Szbb } 1169289551Szbb 1170289551Szbb /* Free private driver ring for this send queue */ 1171289551Szbb if (sq->br != NULL) 1172289551Szbb drbr_free(sq->br, M_DEVBUF); 1173289551Szbb 1174289551Szbb if (sq->dmem.base != NULL) 1175289551Szbb nicvf_free_q_desc_mem(nic, &sq->dmem); 1176289551Szbb 1177289551Szbb NICVF_TX_UNLOCK(sq); 1178289551Szbb /* Destroy Tx lock */ 1179289551Szbb mtx_destroy(&sq->mtx); 1180289551Szbb memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); 1181289550Szbb} 1182289550Szbb 1183289551Szbbstatic void 1184289551Szbbnicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1185289550Szbb{ 1186289551Szbb 1187289550Szbb /* Disable send queue */ 1188289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 1189289550Szbb /* Check if SQ is stopped */ 1190289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 1191289550Szbb return; 1192289550Szbb /* Reset send queue */ 1193289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1194289550Szbb} 1195289550Szbb 1196289551Szbbstatic void 1197289551Szbbnicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1198289550Szbb{ 1199289550Szbb union nic_mbx mbx = {}; 1200289550Szbb 1201289550Szbb /* Make sure all packets in the pipeline are written back into mem */ 1202289550Szbb mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 1203289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1204289550Szbb} 1205289550Szbb 1206289551Szbbstatic void 1207289551Szbbnicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1208289550Szbb{ 1209289551Szbb 1210289550Szbb /* Disable timer threshold (doesn't get reset upon CQ reset */ 1211289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 1212289550Szbb /* Disable completion queue */ 1213289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 1214289550Szbb /* Reset completion queue */ 1215289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1216289550Szbb} 1217289550Szbb 1218289551Szbbstatic void 1219289551Szbbnicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) 1220289550Szbb{ 1221289551Szbb uint64_t tmp, fifo_state; 1222289550Szbb int timeout = 10; 1223289550Szbb 1224289550Szbb /* Save head and tail pointers for feeing up buffers */ 1225289551Szbb rbdr->head = 1226289551Szbb nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; 1227289551Szbb rbdr->tail = 1228289551Szbb nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; 1229289550Szbb 1230289551Szbb /* 1231289551Szbb * If RBDR FIFO is in 'FAIL' state then do a reset first 1232289550Szbb * before relaiming. 1233289550Szbb */ 1234289550Szbb fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 1235289551Szbb if (((fifo_state >> 62) & 0x03) == 0x3) { 1236289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 1237289551Szbb qidx, NICVF_RBDR_RESET); 1238289551Szbb } 1239289550Szbb 1240289550Szbb /* Disable RBDR */ 1241289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 1242289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1243289550Szbb return; 1244289550Szbb while (1) { 1245289550Szbb tmp = nicvf_queue_reg_read(nic, 1246289551Szbb NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); 1247289550Szbb if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 1248289550Szbb break; 1249289551Szbb 1250289551Szbb DELAY(1000); 1251289550Szbb timeout--; 1252289550Szbb if (!timeout) { 1253289551Szbb device_printf(nic->dev, 1254289551Szbb "Failed polling on prefetch status\n"); 1255289550Szbb return; 1256289550Szbb } 1257289550Szbb } 1258289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1259289551Szbb NICVF_RBDR_RESET); 1260289550Szbb 1261289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 1262289550Szbb return; 1263289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 1264289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1265289550Szbb return; 1266289550Szbb} 1267289550Szbb 1268289550Szbb/* Configures receive queue */ 1269289551Szbbstatic void 1270289551Szbbnicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 1271289551Szbb int qidx, bool enable) 1272289550Szbb{ 1273289550Szbb union nic_mbx mbx = {}; 1274289550Szbb struct rcv_queue *rq; 1275289550Szbb struct rq_cfg rq_cfg; 1276296031Szbb struct ifnet *ifp; 1277296031Szbb struct lro_ctrl *lro; 1278289550Szbb 1279296031Szbb ifp = nic->ifp; 1280296031Szbb 1281289550Szbb rq = &qs->rq[qidx]; 1282289550Szbb rq->enable = enable; 1283289550Szbb 1284296031Szbb lro = &rq->lro; 1285296031Szbb 1286289550Szbb /* Disable receive queue */ 1287289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 1288289550Szbb 1289289550Szbb if (!rq->enable) { 1290289550Szbb nicvf_reclaim_rcv_queue(nic, qs, qidx); 1291296031Szbb /* Free LRO memory */ 1292296031Szbb tcp_lro_free(lro); 1293296031Szbb rq->lro_enabled = FALSE; 1294289550Szbb return; 1295289550Szbb } 1296289550Szbb 1297296031Szbb /* Configure LRO if enabled */ 1298296031Szbb rq->lro_enabled = FALSE; 1299296031Szbb if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) { 1300296031Szbb if (tcp_lro_init(lro) != 0) { 1301296031Szbb device_printf(nic->dev, 1302296031Szbb "Failed to initialize LRO for RXQ%d\n", qidx); 1303296031Szbb } else { 1304296031Szbb rq->lro_enabled = TRUE; 1305296031Szbb lro->ifp = nic->ifp; 1306296031Szbb } 1307296031Szbb } 1308296031Szbb 1309289550Szbb rq->cq_qs = qs->vnic_id; 1310289550Szbb rq->cq_idx = qidx; 1311289550Szbb rq->start_rbdr_qs = qs->vnic_id; 1312289550Szbb rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 1313289550Szbb rq->cont_rbdr_qs = qs->vnic_id; 1314289550Szbb rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 1315289550Szbb /* all writes of RBDR data to be loaded into L2 Cache as well*/ 1316289550Szbb rq->caching = 1; 1317289550Szbb 1318289550Szbb /* Send a mailbox msg to PF to config RQ */ 1319289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 1320289550Szbb mbx.rq.qs_num = qs->vnic_id; 1321289550Szbb mbx.rq.rq_num = qidx; 1322289550Szbb mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 1323289551Szbb (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 1324289551Szbb (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | 1325289551Szbb (rq->start_qs_rbdr_idx); 1326289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1327289550Szbb 1328289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 1329289551Szbb mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); 1330289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1331289550Szbb 1332289551Szbb /* 1333289551Szbb * RQ drop config 1334289550Szbb * Enable CQ drop to reserve sufficient CQEs for all tx packets 1335289550Szbb */ 1336289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 1337289551Szbb mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); 1338289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1339289550Szbb 1340289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 1341289550Szbb 1342289550Szbb /* Enable Receive queue */ 1343289550Szbb rq_cfg.ena = 1; 1344289550Szbb rq_cfg.tcp_ena = 0; 1345289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 1346289551Szbb *(uint64_t *)&rq_cfg); 1347289550Szbb} 1348289550Szbb 1349289550Szbb/* Configures completion queue */ 1350289551Szbbstatic void 1351289551Szbbnicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 1352289551Szbb int qidx, boolean_t enable) 1353289550Szbb{ 1354289550Szbb struct cmp_queue *cq; 1355289550Szbb struct cq_cfg cq_cfg; 1356289550Szbb 1357289550Szbb cq = &qs->cq[qidx]; 1358289550Szbb cq->enable = enable; 1359289550Szbb 1360289550Szbb if (!cq->enable) { 1361289550Szbb nicvf_reclaim_cmp_queue(nic, qs, qidx); 1362289550Szbb return; 1363289550Szbb } 1364289550Szbb 1365289550Szbb /* Reset completion queue */ 1366289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1367289550Szbb 1368289550Szbb /* Set completion queue base address */ 1369289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, 1370289551Szbb (uint64_t)(cq->dmem.phys_base)); 1371289550Szbb 1372289550Szbb /* Enable Completion queue */ 1373289550Szbb cq_cfg.ena = 1; 1374289550Szbb cq_cfg.reset = 0; 1375289550Szbb cq_cfg.caching = 0; 1376289550Szbb cq_cfg.qsize = CMP_QSIZE; 1377289550Szbb cq_cfg.avg_con = 0; 1378289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); 1379289550Szbb 1380289550Szbb /* Set threshold value for interrupt generation */ 1381289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 1382289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 1383289551Szbb nic->cq_coalesce_usecs); 1384289550Szbb} 1385289550Szbb 1386289550Szbb/* Configures transmit queue */ 1387289551Szbbstatic void 1388289551Szbbnicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1389289551Szbb boolean_t enable) 1390289550Szbb{ 1391289550Szbb union nic_mbx mbx = {}; 1392289550Szbb struct snd_queue *sq; 1393289550Szbb struct sq_cfg sq_cfg; 1394289550Szbb 1395289550Szbb sq = &qs->sq[qidx]; 1396289550Szbb sq->enable = enable; 1397289550Szbb 1398289550Szbb if (!sq->enable) { 1399289550Szbb nicvf_reclaim_snd_queue(nic, qs, qidx); 1400289550Szbb return; 1401289550Szbb } 1402289550Szbb 1403289550Szbb /* Reset send queue */ 1404289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1405289550Szbb 1406289550Szbb sq->cq_qs = qs->vnic_id; 1407289550Szbb sq->cq_idx = qidx; 1408289550Szbb 1409289550Szbb /* Send a mailbox msg to PF to config SQ */ 1410289550Szbb mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 1411289550Szbb mbx.sq.qs_num = qs->vnic_id; 1412289550Szbb mbx.sq.sq_num = qidx; 1413289550Szbb mbx.sq.sqs_mode = nic->sqs_mode; 1414289550Szbb mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 1415289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1416289550Szbb 1417289550Szbb /* Set queue base address */ 1418289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, 1419289551Szbb (uint64_t)(sq->dmem.phys_base)); 1420289550Szbb 1421289550Szbb /* Enable send queue & set queue size */ 1422289550Szbb sq_cfg.ena = 1; 1423289550Szbb sq_cfg.reset = 0; 1424289550Szbb sq_cfg.ldwb = 0; 1425289550Szbb sq_cfg.qsize = SND_QSIZE; 1426289550Szbb sq_cfg.tstmp_bgx_intf = 0; 1427289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); 1428289550Szbb 1429289550Szbb /* Set threshold value for interrupt generation */ 1430289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 1431289550Szbb} 1432289550Szbb 1433289550Szbb/* Configures receive buffer descriptor ring */ 1434289551Szbbstatic void 1435289551Szbbnicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1436289551Szbb boolean_t enable) 1437289550Szbb{ 1438289550Szbb struct rbdr *rbdr; 1439289550Szbb struct rbdr_cfg rbdr_cfg; 1440289550Szbb 1441289550Szbb rbdr = &qs->rbdr[qidx]; 1442289550Szbb nicvf_reclaim_rbdr(nic, rbdr, qidx); 1443289550Szbb if (!enable) 1444289550Szbb return; 1445289550Szbb 1446289550Szbb /* Set descriptor base address */ 1447289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, 1448289551Szbb (uint64_t)(rbdr->dmem.phys_base)); 1449289550Szbb 1450289550Szbb /* Enable RBDR & set queue size */ 1451289550Szbb /* Buffer size should be in multiples of 128 bytes */ 1452289550Szbb rbdr_cfg.ena = 1; 1453289550Szbb rbdr_cfg.reset = 0; 1454289550Szbb rbdr_cfg.ldwb = 0; 1455289550Szbb rbdr_cfg.qsize = RBDR_SIZE; 1456289550Szbb rbdr_cfg.avg_con = 0; 1457289550Szbb rbdr_cfg.lines = rbdr->dma_size / 128; 1458289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1459289551Szbb *(uint64_t *)&rbdr_cfg); 1460289550Szbb 1461289550Szbb /* Notify HW */ 1462289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, 1463289551Szbb qs->rbdr_len - 1); 1464289550Szbb 1465289550Szbb /* Set threshold value for interrupt generation */ 1466289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, 1467289551Szbb rbdr->thresh - 1); 1468289550Szbb} 1469289550Szbb 1470289550Szbb/* Requests PF to assign and enable Qset */ 1471289551Szbbvoid 1472289551Szbbnicvf_qset_config(struct nicvf *nic, boolean_t enable) 1473289550Szbb{ 1474289550Szbb union nic_mbx mbx = {}; 1475289551Szbb struct queue_set *qs; 1476289550Szbb struct qs_cfg *qs_cfg; 1477289550Szbb 1478289551Szbb qs = nic->qs; 1479289551Szbb if (qs == NULL) { 1480289551Szbb device_printf(nic->dev, 1481289551Szbb "Qset is still not allocated, don't init queues\n"); 1482289550Szbb return; 1483289550Szbb } 1484289550Szbb 1485289550Szbb qs->enable = enable; 1486289550Szbb qs->vnic_id = nic->vf_id; 1487289550Szbb 1488289550Szbb /* Send a mailbox msg to PF to config Qset */ 1489289550Szbb mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 1490289550Szbb mbx.qs.num = qs->vnic_id; 1491289550Szbb 1492289550Szbb mbx.qs.cfg = 0; 1493289550Szbb qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 1494289550Szbb if (qs->enable) { 1495289550Szbb qs_cfg->ena = 1; 1496289550Szbb qs_cfg->vnic = qs->vnic_id; 1497289550Szbb } 1498289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1499289550Szbb} 1500289550Szbb 1501289551Szbbstatic void 1502289551Szbbnicvf_free_resources(struct nicvf *nic) 1503289550Szbb{ 1504289550Szbb int qidx; 1505289551Szbb struct queue_set *qs; 1506289550Szbb 1507289551Szbb qs = nic->qs; 1508289551Szbb /* 1509289551Szbb * Remove QS error task first since it has to be dead 1510289551Szbb * to safely free completion queue tasks. 1511289551Szbb */ 1512289551Szbb if (qs->qs_err_taskq != NULL) { 1513289551Szbb /* Shut down QS error tasks */ 1514289551Szbb while (taskqueue_cancel(qs->qs_err_taskq, 1515289551Szbb &qs->qs_err_task, NULL) != 0) { 1516289551Szbb taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); 1517289551Szbb 1518289551Szbb } 1519289551Szbb taskqueue_free(qs->qs_err_taskq); 1520289551Szbb qs->qs_err_taskq = NULL; 1521289551Szbb } 1522289550Szbb /* Free receive buffer descriptor ring */ 1523289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1524289550Szbb nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 1525289550Szbb 1526289550Szbb /* Free completion queue */ 1527289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1528289550Szbb nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 1529289550Szbb 1530289550Szbb /* Free send queue */ 1531289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1532289550Szbb nicvf_free_snd_queue(nic, &qs->sq[qidx]); 1533289550Szbb} 1534289550Szbb 1535289551Szbbstatic int 1536289551Szbbnicvf_alloc_resources(struct nicvf *nic) 1537289550Szbb{ 1538289551Szbb struct queue_set *qs = nic->qs; 1539289550Szbb int qidx; 1540289550Szbb 1541289550Szbb /* Alloc receive buffer descriptor ring */ 1542289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 1543289550Szbb if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 1544289551Szbb DMA_BUFFER_LEN, qidx)) 1545289550Szbb goto alloc_fail; 1546289550Szbb } 1547289550Szbb 1548289550Szbb /* Alloc send queue */ 1549289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 1550289551Szbb if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) 1551289550Szbb goto alloc_fail; 1552289550Szbb } 1553289550Szbb 1554289550Szbb /* Alloc completion queue */ 1555289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1556289551Szbb if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) 1557289550Szbb goto alloc_fail; 1558289550Szbb } 1559289550Szbb 1560289551Szbb /* Allocate QS error taskqueue */ 1561289551Szbb TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); 1562289551Szbb qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, 1563289551Szbb taskqueue_thread_enqueue, &qs->qs_err_taskq); 1564289551Szbb taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", 1565289551Szbb device_get_nameunit(nic->dev)); 1566289551Szbb 1567289551Szbb return (0); 1568289550Szbballoc_fail: 1569289550Szbb nicvf_free_resources(nic); 1570289551Szbb return (ENOMEM); 1571289550Szbb} 1572289550Szbb 1573289551Szbbint 1574289551Szbbnicvf_set_qset_resources(struct nicvf *nic) 1575289550Szbb{ 1576289550Szbb struct queue_set *qs; 1577289550Szbb 1578289551Szbb qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); 1579289550Szbb nic->qs = qs; 1580289550Szbb 1581289550Szbb /* Set count of each queue */ 1582289550Szbb qs->rbdr_cnt = RBDR_CNT; 1583289551Szbb /* With no RSS we stay with single RQ */ 1584289550Szbb qs->rq_cnt = 1; 1585289551Szbb 1586289550Szbb qs->sq_cnt = SND_QUEUE_CNT; 1587289550Szbb qs->cq_cnt = CMP_QUEUE_CNT; 1588289550Szbb 1589289550Szbb /* Set queue lengths */ 1590289550Szbb qs->rbdr_len = RCV_BUF_COUNT; 1591289550Szbb qs->sq_len = SND_QUEUE_LEN; 1592289550Szbb qs->cq_len = CMP_QUEUE_LEN; 1593289550Szbb 1594289550Szbb nic->rx_queues = qs->rq_cnt; 1595289550Szbb nic->tx_queues = qs->sq_cnt; 1596289550Szbb 1597289551Szbb return (0); 1598289550Szbb} 1599289550Szbb 1600289551Szbbint 1601289551Szbbnicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) 1602289550Szbb{ 1603289551Szbb boolean_t disable = FALSE; 1604289551Szbb struct queue_set *qs; 1605289550Szbb int qidx; 1606289550Szbb 1607289551Szbb qs = nic->qs; 1608289551Szbb if (qs == NULL) 1609289551Szbb return (0); 1610289550Szbb 1611289550Szbb if (enable) { 1612289551Szbb if (nicvf_alloc_resources(nic) != 0) 1613289551Szbb return (ENOMEM); 1614289550Szbb 1615289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1616289550Szbb nicvf_snd_queue_config(nic, qs, qidx, enable); 1617289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1618289550Szbb nicvf_cmp_queue_config(nic, qs, qidx, enable); 1619289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1620289550Szbb nicvf_rbdr_config(nic, qs, qidx, enable); 1621289550Szbb for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1622289550Szbb nicvf_rcv_queue_config(nic, qs, qidx, enable); 1623289550Szbb } else { 1624289550Szbb for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1625289550Szbb nicvf_rcv_queue_config(nic, qs, qidx, disable); 1626289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1627289550Szbb nicvf_rbdr_config(nic, qs, qidx, disable); 1628289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1629289550Szbb nicvf_snd_queue_config(nic, qs, qidx, disable); 1630289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1631289550Szbb nicvf_cmp_queue_config(nic, qs, qidx, disable); 1632289550Szbb 1633289550Szbb nicvf_free_resources(nic); 1634289550Szbb } 1635289550Szbb 1636289551Szbb return (0); 1637289550Szbb} 1638289550Szbb 1639289551Szbb/* 1640289551Szbb * Get a free desc from SQ 1641289550Szbb * returns descriptor ponter & descriptor number 1642289550Szbb */ 1643289551Szbbstatic __inline int 1644289551Szbbnicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 1645289550Szbb{ 1646289550Szbb int qentry; 1647289550Szbb 1648289550Szbb qentry = sq->tail; 1649289551Szbb atomic_subtract_int(&sq->free_cnt, desc_cnt); 1650289550Szbb sq->tail += desc_cnt; 1651289550Szbb sq->tail &= (sq->dmem.q_len - 1); 1652289550Szbb 1653289551Szbb return (qentry); 1654289550Szbb} 1655289550Szbb 1656289550Szbb/* Free descriptor back to SQ for future use */ 1657289551Szbbstatic void 1658289551Szbbnicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 1659289550Szbb{ 1660289551Szbb 1661289551Szbb atomic_add_int(&sq->free_cnt, desc_cnt); 1662289550Szbb sq->head += desc_cnt; 1663289550Szbb sq->head &= (sq->dmem.q_len - 1); 1664289550Szbb} 1665289550Szbb 1666289551Szbbstatic __inline int 1667289551Szbbnicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 1668289550Szbb{ 1669289550Szbb qentry++; 1670289550Szbb qentry &= (sq->dmem.q_len - 1); 1671289551Szbb return (qentry); 1672289550Szbb} 1673289550Szbb 1674289551Szbbstatic void 1675289551Szbbnicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 1676289550Szbb{ 1677289551Szbb uint64_t sq_cfg; 1678289550Szbb 1679289550Szbb sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1680289550Szbb sq_cfg |= NICVF_SQ_EN; 1681289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1682289550Szbb /* Ring doorbell so that H/W restarts processing SQEs */ 1683289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 1684289550Szbb} 1685289550Szbb 1686289551Szbbstatic void 1687289551Szbbnicvf_sq_disable(struct nicvf *nic, int qidx) 1688289550Szbb{ 1689289551Szbb uint64_t sq_cfg; 1690289550Szbb 1691289550Szbb sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1692289550Szbb sq_cfg &= ~NICVF_SQ_EN; 1693289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1694289550Szbb} 1695289550Szbb 1696289551Szbbstatic void 1697289551Szbbnicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) 1698289550Szbb{ 1699289551Szbb uint64_t head, tail; 1700289551Szbb struct snd_buff *snd_buff; 1701289550Szbb struct sq_hdr_subdesc *hdr; 1702289550Szbb 1703289551Szbb NICVF_TX_LOCK(sq); 1704289550Szbb head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 1705289550Szbb tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 1706289550Szbb while (sq->head != head) { 1707289550Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 1708289550Szbb if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 1709289550Szbb nicvf_put_sq_desc(sq, 1); 1710289550Szbb continue; 1711289550Szbb } 1712289551Szbb snd_buff = &sq->snd_buff[sq->head]; 1713289551Szbb if (snd_buff->mbuf != NULL) { 1714289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1715289551Szbb m_freem(snd_buff->mbuf); 1716289551Szbb sq->snd_buff[sq->head].mbuf = NULL; 1717289551Szbb } 1718289550Szbb nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 1719289550Szbb } 1720289551Szbb NICVF_TX_UNLOCK(sq); 1721289550Szbb} 1722289550Szbb 1723289551Szbb/* 1724289551Szbb * Add SQ HEADER subdescriptor. 1725289550Szbb * First subdescriptor for every send descriptor. 1726289550Szbb */ 1727296030Szbbstatic __inline int 1728289550Szbbnicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, 1729289551Szbb int subdesc_cnt, struct mbuf *mbuf, int len) 1730289550Szbb{ 1731289550Szbb struct sq_hdr_subdesc *hdr; 1732296030Szbb struct ether_vlan_header *eh; 1733296030Szbb#ifdef INET 1734296030Szbb struct ip *ip; 1735296030Szbb#endif 1736296030Szbb uint16_t etype; 1737296030Szbb int ehdrlen, iphlen, poff; 1738289550Szbb 1739289550Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1740289551Szbb sq->snd_buff[qentry].mbuf = mbuf; 1741289550Szbb 1742289550Szbb memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1743289550Szbb hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1744289550Szbb /* Enable notification via CQE after processing SQE */ 1745289550Szbb hdr->post_cqe = 1; 1746289550Szbb /* No of subdescriptors following this */ 1747289550Szbb hdr->subdesc_cnt = subdesc_cnt; 1748289550Szbb hdr->tot_len = len; 1749289550Szbb 1750296030Szbb if (mbuf->m_pkthdr.csum_flags != 0) { 1751296030Szbb hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1752296030Szbb 1753296030Szbb eh = mtod(mbuf, struct ether_vlan_header *); 1754296030Szbb if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1755296030Szbb ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1756296030Szbb etype = ntohs(eh->evl_proto); 1757296030Szbb } else { 1758296030Szbb ehdrlen = ETHER_HDR_LEN; 1759296030Szbb etype = ntohs(eh->evl_encap_proto); 1760296030Szbb } 1761296030Szbb 1762296030Szbb if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { 1763296030Szbb mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip)); 1764296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1765296030Szbb if (mbuf == NULL) 1766296030Szbb return (ENOBUFS); 1767296030Szbb } 1768296030Szbb 1769296030Szbb switch (etype) { 1770296030Szbb#ifdef INET6 1771296030Szbb case ETHERTYPE_IPV6: 1772296030Szbb /* ARM64TODO: Add support for IPv6 */ 1773296030Szbb hdr->csum_l3 = 0; 1774296030Szbb sq->snd_buff[qentry].mbuf = NULL; 1775296030Szbb return (ENXIO); 1776296030Szbb#endif 1777296030Szbb#ifdef INET 1778296030Szbb case ETHERTYPE_IP: 1779296030Szbb ip = (struct ip *)(mbuf->m_data + ehdrlen); 1780296030Szbb ip->ip_sum = 0; 1781296030Szbb iphlen = ip->ip_hl << 2; 1782296030Szbb poff = ehdrlen + iphlen; 1783296030Szbb 1784296030Szbb switch (ip->ip_p) { 1785296030Szbb case IPPROTO_TCP: 1786296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) 1787296030Szbb break; 1788296030Szbb 1789296030Szbb if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { 1790296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr)); 1791296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1792296030Szbb if (mbuf == NULL) 1793296030Szbb return (ENOBUFS); 1794296030Szbb } 1795296030Szbb hdr->csum_l4 = SEND_L4_CSUM_TCP; 1796296030Szbb break; 1797296030Szbb case IPPROTO_UDP: 1798296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) 1799296030Szbb break; 1800296030Szbb 1801296030Szbb if (mbuf->m_len < (poff + sizeof(struct udphdr))) { 1802296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr)); 1803296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1804296030Szbb if (mbuf == NULL) 1805296030Szbb return (ENOBUFS); 1806296030Szbb } 1807296030Szbb hdr->csum_l4 = SEND_L4_CSUM_UDP; 1808296030Szbb break; 1809296030Szbb case IPPROTO_SCTP: 1810296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) 1811296030Szbb break; 1812296030Szbb 1813296030Szbb if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { 1814296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr)); 1815296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1816296030Szbb if (mbuf == NULL) 1817296030Szbb return (ENOBUFS); 1818296030Szbb } 1819296030Szbb hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1820296030Szbb break; 1821296030Szbb default: 1822296030Szbb break; 1823296030Szbb } 1824296030Szbb break; 1825296030Szbb#endif 1826296030Szbb default: 1827296030Szbb hdr->csum_l3 = 0; 1828296030Szbb return (0); 1829296030Szbb } 1830296030Szbb 1831296030Szbb hdr->l3_offset = ehdrlen; 1832296030Szbb hdr->l4_offset = ehdrlen + iphlen; 1833296030Szbb } else 1834296030Szbb hdr->csum_l3 = 0; 1835296030Szbb 1836296030Szbb return (0); 1837289550Szbb} 1838289550Szbb 1839289551Szbb/* 1840289551Szbb * SQ GATHER subdescriptor 1841289550Szbb * Must follow HDR descriptor 1842289550Szbb */ 1843289550Szbbstatic inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1844289551Szbb int size, uint64_t data) 1845289550Szbb{ 1846289550Szbb struct sq_gather_subdesc *gather; 1847289550Szbb 1848289550Szbb qentry &= (sq->dmem.q_len - 1); 1849289550Szbb gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1850289550Szbb 1851289550Szbb memset(gather, 0, SND_QUEUE_DESC_SIZE); 1852289550Szbb gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1853289550Szbb gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1854289550Szbb gather->size = size; 1855289550Szbb gather->addr = data; 1856289550Szbb} 1857289550Szbb 1858289551Szbb/* Put an mbuf to a SQ for packet transfer. */ 1859289551Szbbstatic int 1860289551Szbbnicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf) 1861289550Szbb{ 1862289551Szbb bus_dma_segment_t segs[256]; 1863289551Szbb struct snd_buff *snd_buff; 1864289551Szbb size_t seg; 1865289551Szbb int nsegs, qentry; 1866289551Szbb int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT - 1; 1867289551Szbb int err; 1868289550Szbb 1869289551Szbb NICVF_TX_LOCK_ASSERT(sq); 1870289551Szbb 1871289551Szbb if (sq->free_cnt == 0) 1872289551Szbb return (ENOBUFS); 1873289551Szbb 1874289551Szbb snd_buff = &sq->snd_buff[sq->tail]; 1875289551Szbb 1876289551Szbb err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, 1877289551Szbb mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1878289551Szbb if (err != 0) { 1879289551Szbb /* ARM64TODO: Add mbuf defragmenting if we lack maps */ 1880289551Szbb return (err); 1881289550Szbb } 1882289550Szbb 1883289551Szbb /* Set how many subdescriptors is required */ 1884289551Szbb subdesc_cnt += nsegs; 1885289550Szbb 1886289551Szbb if (subdesc_cnt > sq->free_cnt) { 1887289551Szbb /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ 1888289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1889289551Szbb return (ENOBUFS); 1890289551Szbb } 1891289550Szbb 1892289550Szbb qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1893289550Szbb 1894289550Szbb /* Add SQ header subdesc */ 1895296030Szbb err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, mbuf, 1896289551Szbb mbuf->m_pkthdr.len); 1897296030Szbb if (err != 0) { 1898296030Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1899296030Szbb return (err); 1900296030Szbb } 1901289550Szbb 1902289550Szbb /* Add SQ gather subdescs */ 1903289551Szbb for (seg = 0; seg < nsegs; seg++) { 1904289550Szbb qentry = nicvf_get_nxt_sqentry(sq, qentry); 1905289551Szbb nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, 1906289551Szbb segs[seg].ds_addr); 1907289550Szbb } 1908289550Szbb 1909289550Szbb /* make sure all memory stores are done before ringing doorbell */ 1910289551Szbb bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); 1911289550Szbb 1912289551Szbb dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", 1913289551Szbb __func__, sq->idx, subdesc_cnt); 1914289550Szbb /* Inform HW to xmit new packet */ 1915289551Szbb nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, 1916289551Szbb sq->idx, subdesc_cnt); 1917289551Szbb return (0); 1918289550Szbb} 1919289550Szbb 1920289551Szbbstatic __inline u_int 1921289551Szbbfrag_num(u_int i) 1922289550Szbb{ 1923289551Szbb#if BYTE_ORDER == BIG_ENDIAN 1924289551Szbb return ((i & ~3) + 3 - (i & 3)); 1925289550Szbb#else 1926289551Szbb return (i); 1927289550Szbb#endif 1928289550Szbb} 1929289550Szbb 1930289551Szbb/* Returns MBUF for a received packet */ 1931289551Szbbstruct mbuf * 1932289551Szbbnicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1933289550Szbb{ 1934289550Szbb int frag; 1935289550Szbb int payload_len = 0; 1936289551Szbb struct mbuf *mbuf; 1937289551Szbb struct mbuf *mbuf_frag; 1938289551Szbb uint16_t *rb_lens = NULL; 1939289551Szbb uint64_t *rb_ptrs = NULL; 1940289550Szbb 1941289551Szbb mbuf = NULL; 1942289551Szbb rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); 1943289551Szbb rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); 1944289550Szbb 1945289551Szbb dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", 1946289551Szbb __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1947289550Szbb 1948289550Szbb for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1949289550Szbb payload_len = rb_lens[frag_num(frag)]; 1950289551Szbb if (frag == 0) { 1951289550Szbb /* First fragment */ 1952289551Szbb mbuf = nicvf_rb_ptr_to_mbuf(nic, 1953289551Szbb (*rb_ptrs - cqe_rx->align_pad)); 1954289551Szbb mbuf->m_len = payload_len; 1955289551Szbb mbuf->m_data += cqe_rx->align_pad; 1956289551Szbb if_setrcvif(mbuf, nic->ifp); 1957289550Szbb } else { 1958289550Szbb /* Add fragments */ 1959289551Szbb mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); 1960289551Szbb m_append(mbuf, payload_len, mbuf_frag->m_data); 1961289551Szbb m_freem(mbuf_frag); 1962289550Szbb } 1963289550Szbb /* Next buffer pointer */ 1964289550Szbb rb_ptrs++; 1965289550Szbb } 1966289551Szbb 1967289551Szbb if (__predict_true(mbuf != NULL)) { 1968289551Szbb m_fixhdr(mbuf); 1969289551Szbb mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; 1970289551Szbb M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); 1971296030Szbb if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { 1972296030Szbb /* 1973296030Szbb * HW by default verifies IP & TCP/UDP/SCTP checksums 1974296030Szbb */ 1975296030Szbb 1976296030Szbb /* XXX: Do we need to include IP with options too? */ 1977296030Szbb if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4 || 1978296030Szbb cqe_rx->l3_type == L3TYPE_IPV6)) { 1979296030Szbb mbuf->m_pkthdr.csum_flags = 1980296030Szbb (CSUM_IP_CHECKED | CSUM_IP_VALID); 1981296030Szbb } 1982296030Szbb if (cqe_rx->l4_type == L4TYPE_TCP || 1983296030Szbb cqe_rx->l4_type == L4TYPE_UDP || 1984296030Szbb cqe_rx->l4_type == L4TYPE_SCTP) { 1985296030Szbb mbuf->m_pkthdr.csum_flags |= 1986296030Szbb (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1987296030Szbb mbuf->m_pkthdr.csum_data = htons(0xffff); 1988296030Szbb } 1989296030Szbb } 1990289551Szbb } 1991289551Szbb 1992289551Szbb return (mbuf); 1993289550Szbb} 1994289550Szbb 1995289550Szbb/* Enable interrupt */ 1996289551Szbbvoid 1997289551Szbbnicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1998289550Szbb{ 1999289551Szbb uint64_t reg_val; 2000289550Szbb 2001289550Szbb reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2002289550Szbb 2003289550Szbb switch (int_type) { 2004289550Szbb case NICVF_INTR_CQ: 2005289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2006289550Szbb break; 2007289550Szbb case NICVF_INTR_SQ: 2008289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2009289550Szbb break; 2010289550Szbb case NICVF_INTR_RBDR: 2011289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2012289550Szbb break; 2013289550Szbb case NICVF_INTR_PKT_DROP: 2014289551Szbb reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2015289550Szbb break; 2016289550Szbb case NICVF_INTR_TCP_TIMER: 2017289551Szbb reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2018289550Szbb break; 2019289550Szbb case NICVF_INTR_MBOX: 2020289551Szbb reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2021289550Szbb break; 2022289550Szbb case NICVF_INTR_QS_ERR: 2023289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2024289550Szbb break; 2025289550Szbb default: 2026289551Szbb device_printf(nic->dev, 2027289550Szbb "Failed to enable interrupt: unknown type\n"); 2028289550Szbb break; 2029289550Szbb } 2030289550Szbb 2031289550Szbb nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 2032289550Szbb} 2033289550Szbb 2034289550Szbb/* Disable interrupt */ 2035289551Szbbvoid 2036289551Szbbnicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 2037289550Szbb{ 2038289551Szbb uint64_t reg_val = 0; 2039289550Szbb 2040289550Szbb switch (int_type) { 2041289550Szbb case NICVF_INTR_CQ: 2042289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2043289550Szbb break; 2044289550Szbb case NICVF_INTR_SQ: 2045289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2046289550Szbb break; 2047289550Szbb case NICVF_INTR_RBDR: 2048289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2049289550Szbb break; 2050289550Szbb case NICVF_INTR_PKT_DROP: 2051289551Szbb reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2052289550Szbb break; 2053289550Szbb case NICVF_INTR_TCP_TIMER: 2054289551Szbb reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2055289550Szbb break; 2056289550Szbb case NICVF_INTR_MBOX: 2057289551Szbb reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2058289550Szbb break; 2059289550Szbb case NICVF_INTR_QS_ERR: 2060289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2061289550Szbb break; 2062289550Szbb default: 2063289551Szbb device_printf(nic->dev, 2064289550Szbb "Failed to disable interrupt: unknown type\n"); 2065289550Szbb break; 2066289550Szbb } 2067289550Szbb 2068289550Szbb nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 2069289550Szbb} 2070289550Szbb 2071289550Szbb/* Clear interrupt */ 2072289551Szbbvoid 2073289551Szbbnicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 2074289550Szbb{ 2075289551Szbb uint64_t reg_val = 0; 2076289550Szbb 2077289550Szbb switch (int_type) { 2078289550Szbb case NICVF_INTR_CQ: 2079289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2080289550Szbb break; 2081289550Szbb case NICVF_INTR_SQ: 2082289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2083289550Szbb break; 2084289550Szbb case NICVF_INTR_RBDR: 2085289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2086289550Szbb break; 2087289550Szbb case NICVF_INTR_PKT_DROP: 2088289551Szbb reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2089289550Szbb break; 2090289550Szbb case NICVF_INTR_TCP_TIMER: 2091289551Szbb reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2092289550Szbb break; 2093289550Szbb case NICVF_INTR_MBOX: 2094289551Szbb reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); 2095289550Szbb break; 2096289550Szbb case NICVF_INTR_QS_ERR: 2097289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2098289550Szbb break; 2099289550Szbb default: 2100289551Szbb device_printf(nic->dev, 2101289550Szbb "Failed to clear interrupt: unknown type\n"); 2102289550Szbb break; 2103289550Szbb } 2104289550Szbb 2105289550Szbb nicvf_reg_write(nic, NIC_VF_INT, reg_val); 2106289550Szbb} 2107289550Szbb 2108289550Szbb/* Check if interrupt is enabled */ 2109289551Szbbint 2110289551Szbbnicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 2111289550Szbb{ 2112289551Szbb uint64_t reg_val; 2113289551Szbb uint64_t mask = 0xff; 2114289550Szbb 2115289550Szbb reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2116289550Szbb 2117289550Szbb switch (int_type) { 2118289550Szbb case NICVF_INTR_CQ: 2119289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2120289550Szbb break; 2121289550Szbb case NICVF_INTR_SQ: 2122289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2123289550Szbb break; 2124289550Szbb case NICVF_INTR_RBDR: 2125289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2126289550Szbb break; 2127289550Szbb case NICVF_INTR_PKT_DROP: 2128289550Szbb mask = NICVF_INTR_PKT_DROP_MASK; 2129289550Szbb break; 2130289550Szbb case NICVF_INTR_TCP_TIMER: 2131289550Szbb mask = NICVF_INTR_TCP_TIMER_MASK; 2132289550Szbb break; 2133289550Szbb case NICVF_INTR_MBOX: 2134289550Szbb mask = NICVF_INTR_MBOX_MASK; 2135289550Szbb break; 2136289550Szbb case NICVF_INTR_QS_ERR: 2137289550Szbb mask = NICVF_INTR_QS_ERR_MASK; 2138289550Szbb break; 2139289550Szbb default: 2140289551Szbb device_printf(nic->dev, 2141289550Szbb "Failed to check interrupt enable: unknown type\n"); 2142289550Szbb break; 2143289550Szbb } 2144289550Szbb 2145289550Szbb return (reg_val & mask); 2146289550Szbb} 2147289550Szbb 2148289551Szbbvoid 2149289551Szbbnicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 2150289550Szbb{ 2151289550Szbb struct rcv_queue *rq; 2152289550Szbb 2153289550Szbb#define GET_RQ_STATS(reg) \ 2154289550Szbb nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 2155289550Szbb (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2156289550Szbb 2157289550Szbb rq = &nic->qs->rq[rq_idx]; 2158289550Szbb rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 2159289550Szbb rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 2160289550Szbb} 2161289550Szbb 2162289551Szbbvoid 2163289551Szbbnicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 2164289550Szbb{ 2165289550Szbb struct snd_queue *sq; 2166289550Szbb 2167289550Szbb#define GET_SQ_STATS(reg) \ 2168289550Szbb nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 2169289550Szbb (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2170289550Szbb 2171289550Szbb sq = &nic->qs->sq[sq_idx]; 2172289550Szbb sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 2173289550Szbb sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 2174289550Szbb} 2175289550Szbb 2176289550Szbb/* Check for errors in the receive cmp.queue entry */ 2177289551Szbbint 2178289551Szbbnicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, 2179289551Szbb struct cqe_rx_t *cqe_rx) 2180289550Szbb{ 2181289550Szbb struct nicvf_hw_stats *stats = &nic->hw_stats; 2182289550Szbb struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 2183289550Szbb 2184289550Szbb if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 2185289550Szbb drv_stats->rx_frames_ok++; 2186289551Szbb return (0); 2187289550Szbb } 2188289550Szbb 2189289550Szbb switch (cqe_rx->err_opcode) { 2190289550Szbb case CQ_RX_ERROP_RE_PARTIAL: 2191289550Szbb stats->rx_bgx_truncated_pkts++; 2192289550Szbb break; 2193289550Szbb case CQ_RX_ERROP_RE_JABBER: 2194289550Szbb stats->rx_jabber_errs++; 2195289550Szbb break; 2196289550Szbb case CQ_RX_ERROP_RE_FCS: 2197289550Szbb stats->rx_fcs_errs++; 2198289550Szbb break; 2199289550Szbb case CQ_RX_ERROP_RE_RX_CTL: 2200289550Szbb stats->rx_bgx_errs++; 2201289550Szbb break; 2202289550Szbb case CQ_RX_ERROP_PREL2_ERR: 2203289550Szbb stats->rx_prel2_errs++; 2204289550Szbb break; 2205289550Szbb case CQ_RX_ERROP_L2_MAL: 2206289550Szbb stats->rx_l2_hdr_malformed++; 2207289550Szbb break; 2208289550Szbb case CQ_RX_ERROP_L2_OVERSIZE: 2209289550Szbb stats->rx_oversize++; 2210289550Szbb break; 2211289550Szbb case CQ_RX_ERROP_L2_UNDERSIZE: 2212289550Szbb stats->rx_undersize++; 2213289550Szbb break; 2214289550Szbb case CQ_RX_ERROP_L2_LENMISM: 2215289550Szbb stats->rx_l2_len_mismatch++; 2216289550Szbb break; 2217289550Szbb case CQ_RX_ERROP_L2_PCLP: 2218289550Szbb stats->rx_l2_pclp++; 2219289550Szbb break; 2220289550Szbb case CQ_RX_ERROP_IP_NOT: 2221289550Szbb stats->rx_ip_ver_errs++; 2222289550Szbb break; 2223289550Szbb case CQ_RX_ERROP_IP_CSUM_ERR: 2224289550Szbb stats->rx_ip_csum_errs++; 2225289550Szbb break; 2226289550Szbb case CQ_RX_ERROP_IP_MAL: 2227289550Szbb stats->rx_ip_hdr_malformed++; 2228289550Szbb break; 2229289550Szbb case CQ_RX_ERROP_IP_MALD: 2230289550Szbb stats->rx_ip_payload_malformed++; 2231289550Szbb break; 2232289550Szbb case CQ_RX_ERROP_IP_HOP: 2233289550Szbb stats->rx_ip_ttl_errs++; 2234289550Szbb break; 2235289550Szbb case CQ_RX_ERROP_L3_PCLP: 2236289550Szbb stats->rx_l3_pclp++; 2237289550Szbb break; 2238289550Szbb case CQ_RX_ERROP_L4_MAL: 2239289550Szbb stats->rx_l4_malformed++; 2240289550Szbb break; 2241289550Szbb case CQ_RX_ERROP_L4_CHK: 2242289550Szbb stats->rx_l4_csum_errs++; 2243289550Szbb break; 2244289550Szbb case CQ_RX_ERROP_UDP_LEN: 2245289550Szbb stats->rx_udp_len_errs++; 2246289550Szbb break; 2247289550Szbb case CQ_RX_ERROP_L4_PORT: 2248289550Szbb stats->rx_l4_port_errs++; 2249289550Szbb break; 2250289550Szbb case CQ_RX_ERROP_TCP_FLAG: 2251289550Szbb stats->rx_tcp_flag_errs++; 2252289550Szbb break; 2253289550Szbb case CQ_RX_ERROP_TCP_OFFSET: 2254289550Szbb stats->rx_tcp_offset_errs++; 2255289550Szbb break; 2256289550Szbb case CQ_RX_ERROP_L4_PCLP: 2257289550Szbb stats->rx_l4_pclp++; 2258289550Szbb break; 2259289550Szbb case CQ_RX_ERROP_RBDR_TRUNC: 2260289550Szbb stats->rx_truncated_pkts++; 2261289550Szbb break; 2262289550Szbb } 2263289550Szbb 2264289551Szbb return (1); 2265289550Szbb} 2266289550Szbb 2267289550Szbb/* Check for errors in the send cmp.queue entry */ 2268289551Szbbint 2269289551Szbbnicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, 2270289551Szbb struct cqe_send_t *cqe_tx) 2271289550Szbb{ 2272289550Szbb struct cmp_queue_stats *stats = &cq->stats; 2273289550Szbb 2274289550Szbb switch (cqe_tx->send_status) { 2275289550Szbb case CQ_TX_ERROP_GOOD: 2276289550Szbb stats->tx.good++; 2277289551Szbb return (0); 2278289550Szbb case CQ_TX_ERROP_DESC_FAULT: 2279289550Szbb stats->tx.desc_fault++; 2280289550Szbb break; 2281289550Szbb case CQ_TX_ERROP_HDR_CONS_ERR: 2282289550Szbb stats->tx.hdr_cons_err++; 2283289550Szbb break; 2284289550Szbb case CQ_TX_ERROP_SUBDC_ERR: 2285289550Szbb stats->tx.subdesc_err++; 2286289550Szbb break; 2287289550Szbb case CQ_TX_ERROP_IMM_SIZE_OFLOW: 2288289550Szbb stats->tx.imm_size_oflow++; 2289289550Szbb break; 2290289550Szbb case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 2291289550Szbb stats->tx.data_seq_err++; 2292289550Szbb break; 2293289550Szbb case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 2294289550Szbb stats->tx.mem_seq_err++; 2295289550Szbb break; 2296289550Szbb case CQ_TX_ERROP_LOCK_VIOL: 2297289550Szbb stats->tx.lock_viol++; 2298289550Szbb break; 2299289550Szbb case CQ_TX_ERROP_DATA_FAULT: 2300289550Szbb stats->tx.data_fault++; 2301289550Szbb break; 2302289550Szbb case CQ_TX_ERROP_TSTMP_CONFLICT: 2303289550Szbb stats->tx.tstmp_conflict++; 2304289550Szbb break; 2305289550Szbb case CQ_TX_ERROP_TSTMP_TIMEOUT: 2306289550Szbb stats->tx.tstmp_timeout++; 2307289550Szbb break; 2308289550Szbb case CQ_TX_ERROP_MEM_FAULT: 2309289550Szbb stats->tx.mem_fault++; 2310289550Szbb break; 2311289550Szbb case CQ_TX_ERROP_CK_OVERLAP: 2312289550Szbb stats->tx.csum_overlap++; 2313289550Szbb break; 2314289550Szbb case CQ_TX_ERROP_CK_OFLOW: 2315289550Szbb stats->tx.csum_overflow++; 2316289550Szbb break; 2317289550Szbb } 2318289550Szbb 2319289551Szbb return (1); 2320289550Szbb} 2321