nicvf_queues.c revision 325916
1289550Szbb/* 2289550Szbb * Copyright (C) 2015 Cavium Inc. 3289550Szbb * All rights reserved. 4289550Szbb * 5289550Szbb * Redistribution and use in source and binary forms, with or without 6289550Szbb * modification, are permitted provided that the following conditions 7289550Szbb * are met: 8289550Szbb * 1. Redistributions of source code must retain the above copyright 9289550Szbb * notice, this list of conditions and the following disclaimer. 10289550Szbb * 2. Redistributions in binary form must reproduce the above copyright 11289550Szbb * notice, this list of conditions and the following disclaimer in the 12289550Szbb * documentation and/or other materials provided with the distribution. 13289550Szbb * 14289550Szbb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15289550Szbb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16289550Szbb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17289550Szbb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18289550Szbb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19289550Szbb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20289550Szbb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21289550Szbb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22289550Szbb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23289550Szbb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24289550Szbb * SUCH DAMAGE. 25289550Szbb * 26289550Szbb * $FreeBSD: stable/11/sys/dev/vnic/nicvf_queues.c 325916 2017-11-17 00:38:00Z emaste $ 27289550Szbb * 28289550Szbb */ 29289551Szbb#include <sys/cdefs.h> 30289551Szbb__FBSDID("$FreeBSD: stable/11/sys/dev/vnic/nicvf_queues.c 325916 2017-11-17 00:38:00Z emaste $"); 31289550Szbb 32296030Szbb#include "opt_inet.h" 33296030Szbb#include "opt_inet6.h" 34296030Szbb 35289551Szbb#include <sys/param.h> 36289551Szbb#include <sys/systm.h> 37289551Szbb#include <sys/bitset.h> 38289551Szbb#include <sys/bitstring.h> 39289551Szbb#include <sys/buf_ring.h> 40289551Szbb#include <sys/bus.h> 41289551Szbb#include <sys/endian.h> 42289551Szbb#include <sys/kernel.h> 43289551Szbb#include <sys/malloc.h> 44289551Szbb#include <sys/module.h> 45289551Szbb#include <sys/rman.h> 46289551Szbb#include <sys/pciio.h> 47289551Szbb#include <sys/pcpu.h> 48289551Szbb#include <sys/proc.h> 49289551Szbb#include <sys/sockio.h> 50289551Szbb#include <sys/socket.h> 51299446Szbb#include <sys/stdatomic.h> 52289551Szbb#include <sys/cpuset.h> 53289551Szbb#include <sys/lock.h> 54289551Szbb#include <sys/mutex.h> 55289551Szbb#include <sys/smp.h> 56289551Szbb#include <sys/taskqueue.h> 57289550Szbb 58289551Szbb#include <vm/vm.h> 59289551Szbb#include <vm/pmap.h> 60289551Szbb 61289551Szbb#include <machine/bus.h> 62289551Szbb#include <machine/vmparam.h> 63289551Szbb 64289551Szbb#include <net/if.h> 65289551Szbb#include <net/if_var.h> 66289551Szbb#include <net/if_media.h> 67289551Szbb#include <net/ifq.h> 68297450Szbb#include <net/bpf.h> 69297450Szbb#include <net/ethernet.h> 70289551Szbb 71296030Szbb#include <netinet/in_systm.h> 72296030Szbb#include <netinet/in.h> 73296030Szbb#include <netinet/if_ether.h> 74296030Szbb#include <netinet/ip.h> 75296030Szbb#include <netinet/ip6.h> 76296030Szbb#include <netinet/sctp.h> 77296030Szbb#include <netinet/tcp.h> 78296030Szbb#include <netinet/tcp_lro.h> 79296030Szbb#include <netinet/udp.h> 80296030Szbb 81289551Szbb#include <dev/pci/pcireg.h> 82289551Szbb#include <dev/pci/pcivar.h> 83289551Szbb 84289551Szbb#include "thunder_bgx.h" 85289550Szbb#include "nic_reg.h" 86289550Szbb#include "nic.h" 87289550Szbb#include "q_struct.h" 88289550Szbb#include "nicvf_queues.h" 89289550Szbb 90289551Szbb#define DEBUG 91289551Szbb#undef DEBUG 92289551Szbb 93289551Szbb#ifdef DEBUG 94289551Szbb#define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) 95289551Szbb#else 96289551Szbb#define dprintf(dev, fmt, ...) 97289551Szbb#endif 98289551Szbb 99289551SzbbMALLOC_DECLARE(M_NICVF); 100289551Szbb 101289551Szbbstatic void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); 102289551Szbbstatic struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); 103289551Szbbstatic void nicvf_sq_disable(struct nicvf *, int); 104289551Szbbstatic void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); 105289551Szbbstatic void nicvf_put_sq_desc(struct snd_queue *, int); 106289551Szbbstatic void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, 107289551Szbb boolean_t); 108289551Szbbstatic void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); 109289551Szbb 110297450Szbbstatic int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **); 111297450Szbb 112289551Szbbstatic void nicvf_rbdr_task(void *, int); 113289551Szbbstatic void nicvf_rbdr_task_nowait(void *, int); 114289551Szbb 115289550Szbbstruct rbuf_info { 116289551Szbb bus_dma_tag_t dmat; 117289551Szbb bus_dmamap_t dmap; 118289551Szbb struct mbuf * mbuf; 119289550Szbb}; 120289550Szbb 121289551Szbb#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) 122289550Szbb 123289550Szbb/* Poll a register for a specific value */ 124289550Szbbstatic int nicvf_poll_reg(struct nicvf *nic, int qidx, 125289551Szbb uint64_t reg, int bit_pos, int bits, int val) 126289550Szbb{ 127289551Szbb uint64_t bit_mask; 128289551Szbb uint64_t reg_val; 129289550Szbb int timeout = 10; 130289550Szbb 131289551Szbb bit_mask = (1UL << bits) - 1; 132289550Szbb bit_mask = (bit_mask << bit_pos); 133289550Szbb 134289550Szbb while (timeout) { 135289550Szbb reg_val = nicvf_queue_reg_read(nic, reg, qidx); 136289550Szbb if (((reg_val & bit_mask) >> bit_pos) == val) 137289551Szbb return (0); 138289551Szbb 139289551Szbb DELAY(1000); 140289550Szbb timeout--; 141289550Szbb } 142289551Szbb device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); 143289551Szbb return (ETIMEDOUT); 144289550Szbb} 145289550Szbb 146289551Szbb/* Callback for bus_dmamap_load() */ 147289551Szbbstatic void 148289551Szbbnicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 149289551Szbb{ 150289551Szbb bus_addr_t *paddr; 151289551Szbb 152289551Szbb KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 153289551Szbb paddr = arg; 154289551Szbb *paddr = segs->ds_addr; 155289551Szbb} 156289551Szbb 157289550Szbb/* Allocate memory for a queue's descriptors */ 158289551Szbbstatic int 159289551Szbbnicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 160289551Szbb int q_len, int desc_size, int align_bytes) 161289550Szbb{ 162289551Szbb int err, err_dmat; 163289551Szbb 164289551Szbb /* Create DMA tag first */ 165289551Szbb err = bus_dma_tag_create( 166289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 167289551Szbb align_bytes, /* alignment */ 168289551Szbb 0, /* boundary */ 169289551Szbb BUS_SPACE_MAXADDR, /* lowaddr */ 170289551Szbb BUS_SPACE_MAXADDR, /* highaddr */ 171289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 172289551Szbb (q_len * desc_size), /* maxsize */ 173289551Szbb 1, /* nsegments */ 174289551Szbb (q_len * desc_size), /* maxsegsize */ 175289551Szbb 0, /* flags */ 176289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 177289551Szbb &dmem->dmat); /* dmat */ 178289551Szbb 179289551Szbb if (err != 0) { 180289551Szbb device_printf(nic->dev, 181289551Szbb "Failed to create busdma tag for descriptors ring\n"); 182289551Szbb return (err); 183289551Szbb } 184289551Szbb 185289551Szbb /* Allocate segment of continuous DMA safe memory */ 186289551Szbb err = bus_dmamem_alloc( 187289551Szbb dmem->dmat, /* DMA tag */ 188289551Szbb &dmem->base, /* virtual address */ 189289551Szbb (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ 190289551Szbb &dmem->dmap); /* DMA map */ 191289551Szbb if (err != 0) { 192289551Szbb device_printf(nic->dev, "Failed to allocate DMA safe memory for" 193289551Szbb "descriptors ring\n"); 194289551Szbb goto dmamem_fail; 195289551Szbb } 196289551Szbb 197289551Szbb err = bus_dmamap_load( 198289551Szbb dmem->dmat, 199289551Szbb dmem->dmap, 200289551Szbb dmem->base, 201289551Szbb (q_len * desc_size), /* allocation size */ 202289551Szbb nicvf_dmamap_q_cb, /* map to DMA address cb. */ 203289551Szbb &dmem->phys_base, /* physical address */ 204289551Szbb BUS_DMA_NOWAIT); 205289551Szbb if (err != 0) { 206289551Szbb device_printf(nic->dev, 207289551Szbb "Cannot load DMA map of descriptors ring\n"); 208289551Szbb goto dmamap_fail; 209289551Szbb } 210289551Szbb 211289550Szbb dmem->q_len = q_len; 212289551Szbb dmem->size = (desc_size * q_len); 213289550Szbb 214289551Szbb return (0); 215289551Szbb 216289551Szbbdmamap_fail: 217289551Szbb bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 218289551Szbb dmem->phys_base = 0; 219289551Szbbdmamem_fail: 220289551Szbb err_dmat = bus_dma_tag_destroy(dmem->dmat); 221289551Szbb dmem->base = NULL; 222289551Szbb KASSERT(err_dmat == 0, 223289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 224289551Szbb 225289551Szbb return (err); 226289550Szbb} 227289550Szbb 228289550Szbb/* Free queue's descriptor memory */ 229289551Szbbstatic void 230289551Szbbnicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 231289550Szbb{ 232289551Szbb int err; 233289551Szbb 234289551Szbb if ((dmem == NULL) || (dmem->base == NULL)) 235289550Szbb return; 236289550Szbb 237289551Szbb /* Unload a map */ 238289551Szbb bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); 239289551Szbb bus_dmamap_unload(dmem->dmat, dmem->dmap); 240289551Szbb /* Free DMA memory */ 241289551Szbb bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 242289551Szbb /* Destroy DMA tag */ 243289551Szbb err = bus_dma_tag_destroy(dmem->dmat); 244289551Szbb 245289551Szbb KASSERT(err == 0, 246289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 247289551Szbb 248289551Szbb dmem->phys_base = 0; 249289550Szbb dmem->base = NULL; 250289550Szbb} 251289550Szbb 252289551Szbb/* 253289551Szbb * Allocate buffer for packet reception 254289550Szbb * HW returns memory address where packet is DMA'ed but not a pointer 255289550Szbb * into RBDR ring, so save buffer address at the start of fragment and 256289550Szbb * align the start address to a cache aligned address 257289550Szbb */ 258289551Szbbstatic __inline int 259289551Szbbnicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, 260289551Szbb bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) 261289550Szbb{ 262289551Szbb struct mbuf *mbuf; 263289550Szbb struct rbuf_info *rinfo; 264289551Szbb bus_dma_segment_t segs[1]; 265289551Szbb int nsegs; 266289551Szbb int err; 267289550Szbb 268289551Szbb mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); 269289551Szbb if (mbuf == NULL) 270289551Szbb return (ENOMEM); 271289550Szbb 272289551Szbb /* 273289551Szbb * The length is equal to the actual length + one 128b line 274289551Szbb * used as a room for rbuf_info structure. 275289551Szbb */ 276289551Szbb mbuf->m_len = mbuf->m_pkthdr.len = buf_len; 277289551Szbb 278289551Szbb err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, 279289551Szbb &nsegs, BUS_DMA_NOWAIT); 280289551Szbb if (err != 0) { 281289551Szbb device_printf(nic->dev, 282289551Szbb "Failed to map mbuf into DMA visible memory, err: %d\n", 283289551Szbb err); 284289551Szbb m_freem(mbuf); 285289551Szbb bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); 286289551Szbb return (err); 287289550Szbb } 288289551Szbb if (nsegs != 1) 289289551Szbb panic("Unexpected number of DMA segments for RB: %d", nsegs); 290289551Szbb /* 291289551Szbb * Now use the room for rbuf_info structure 292289551Szbb * and adjust mbuf data and length. 293289551Szbb */ 294289551Szbb rinfo = (struct rbuf_info *)mbuf->m_data; 295289551Szbb m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); 296289550Szbb 297289551Szbb rinfo->dmat = rbdr->rbdr_buff_dmat; 298289551Szbb rinfo->dmap = dmap; 299289551Szbb rinfo->mbuf = mbuf; 300289550Szbb 301289551Szbb *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; 302289550Szbb 303289551Szbb return (0); 304289550Szbb} 305289550Szbb 306289551Szbb/* Retrieve mbuf for received packet */ 307289551Szbbstatic struct mbuf * 308289551Szbbnicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) 309289550Szbb{ 310289551Szbb struct mbuf *mbuf; 311289550Szbb struct rbuf_info *rinfo; 312289550Szbb 313289550Szbb /* Get buffer start address and alignment offset */ 314289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); 315289550Szbb 316289551Szbb /* Now retrieve mbuf to give to stack */ 317289551Szbb mbuf = rinfo->mbuf; 318289551Szbb if (__predict_false(mbuf == NULL)) { 319289551Szbb panic("%s: Received packet fragment with NULL mbuf", 320289551Szbb device_get_nameunit(nic->dev)); 321289550Szbb } 322289551Szbb /* 323289551Szbb * Clear the mbuf in the descriptor to indicate 324289551Szbb * that this slot is processed and free to use. 325289551Szbb */ 326289551Szbb rinfo->mbuf = NULL; 327289550Szbb 328289551Szbb bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); 329289551Szbb bus_dmamap_unload(rinfo->dmat, rinfo->dmap); 330289550Szbb 331289551Szbb return (mbuf); 332289550Szbb} 333289550Szbb 334289550Szbb/* Allocate RBDR ring and populate receive buffers */ 335289551Szbbstatic int 336289551Szbbnicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, 337289551Szbb int buf_size, int qidx) 338289550Szbb{ 339289551Szbb bus_dmamap_t dmap; 340289551Szbb bus_addr_t rbuf; 341289551Szbb struct rbdr_entry_t *desc; 342289550Szbb int idx; 343289550Szbb int err; 344289550Szbb 345289551Szbb /* Allocate rbdr descriptors ring */ 346289550Szbb err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 347289551Szbb sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); 348289551Szbb if (err != 0) { 349289551Szbb device_printf(nic->dev, 350289551Szbb "Failed to create RBDR descriptors ring\n"); 351289551Szbb return (err); 352289551Szbb } 353289550Szbb 354289550Szbb rbdr->desc = rbdr->dmem.base; 355289551Szbb /* 356289551Szbb * Buffer size has to be in multiples of 128 bytes. 357289551Szbb * Make room for metadata of size of one line (128 bytes). 358289551Szbb */ 359289551Szbb rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; 360289551Szbb rbdr->enable = TRUE; 361289550Szbb rbdr->thresh = RBDR_THRESH; 362289551Szbb rbdr->nic = nic; 363289551Szbb rbdr->idx = qidx; 364289550Szbb 365289551Szbb /* 366289551Szbb * Create DMA tag for Rx buffers. 367289551Szbb * Each map created using this tag is intended to store Rx payload for 368289551Szbb * one fragment and one header structure containing rbuf_info (thus 369289551Szbb * additional 128 byte line since RB must be a multiple of 128 byte 370289551Szbb * cache line). 371289551Szbb */ 372289551Szbb if (buf_size > MCLBYTES) { 373289551Szbb device_printf(nic->dev, 374289551Szbb "Buffer size to large for mbuf cluster\n"); 375289551Szbb return (EINVAL); 376289551Szbb } 377289551Szbb err = bus_dma_tag_create( 378289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 379289551Szbb NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 380289551Szbb 0, /* boundary */ 381289551Szbb DMAP_MAX_PHYSADDR, /* lowaddr */ 382289551Szbb DMAP_MIN_PHYSADDR, /* highaddr */ 383289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 384289551Szbb roundup2(buf_size, MCLBYTES), /* maxsize */ 385289551Szbb 1, /* nsegments */ 386289551Szbb roundup2(buf_size, MCLBYTES), /* maxsegsize */ 387289551Szbb 0, /* flags */ 388289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 389289551Szbb &rbdr->rbdr_buff_dmat); /* dmat */ 390289551Szbb 391289551Szbb if (err != 0) { 392289551Szbb device_printf(nic->dev, 393289551Szbb "Failed to create busdma tag for RBDR buffers\n"); 394289551Szbb return (err); 395289551Szbb } 396289551Szbb 397289551Szbb rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * 398289551Szbb ring_len, M_NICVF, (M_WAITOK | M_ZERO)); 399289551Szbb 400289550Szbb for (idx = 0; idx < ring_len; idx++) { 401289551Szbb err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); 402289551Szbb if (err != 0) { 403289551Szbb device_printf(nic->dev, 404289551Szbb "Failed to create DMA map for RB\n"); 405289551Szbb return (err); 406289551Szbb } 407289551Szbb rbdr->rbdr_buff_dmaps[idx] = dmap; 408289550Szbb 409289551Szbb err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, 410289551Szbb DMA_BUFFER_LEN, &rbuf); 411289551Szbb if (err != 0) 412289551Szbb return (err); 413289551Szbb 414289550Szbb desc = GET_RBDR_DESC(rbdr, idx); 415289551Szbb desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 416289550Szbb } 417289551Szbb 418289551Szbb /* Allocate taskqueue */ 419289551Szbb TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); 420289551Szbb TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); 421289551Szbb rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, 422289551Szbb taskqueue_thread_enqueue, &rbdr->rbdr_taskq); 423289551Szbb taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", 424289551Szbb device_get_nameunit(nic->dev)); 425289551Szbb 426289551Szbb return (0); 427289550Szbb} 428289550Szbb 429289550Szbb/* Free RBDR ring and its receive buffers */ 430289551Szbbstatic void 431289551Szbbnicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 432289550Szbb{ 433289551Szbb struct mbuf *mbuf; 434289551Szbb struct queue_set *qs; 435289550Szbb struct rbdr_entry_t *desc; 436289550Szbb struct rbuf_info *rinfo; 437289551Szbb bus_addr_t buf_addr; 438289551Szbb int head, tail, idx; 439289551Szbb int err; 440289550Szbb 441289551Szbb qs = nic->qs; 442289550Szbb 443289551Szbb if ((qs == NULL) || (rbdr == NULL)) 444289550Szbb return; 445289550Szbb 446289551Szbb rbdr->enable = FALSE; 447289551Szbb if (rbdr->rbdr_taskq != NULL) { 448289551Szbb /* Remove tasks */ 449289551Szbb while (taskqueue_cancel(rbdr->rbdr_taskq, 450289551Szbb &rbdr->rbdr_task_nowait, NULL) != 0) { 451289551Szbb /* Finish the nowait task first */ 452289551Szbb taskqueue_drain(rbdr->rbdr_taskq, 453289551Szbb &rbdr->rbdr_task_nowait); 454289551Szbb } 455289551Szbb taskqueue_free(rbdr->rbdr_taskq); 456289551Szbb rbdr->rbdr_taskq = NULL; 457289550Szbb 458289551Szbb while (taskqueue_cancel(taskqueue_thread, 459289551Szbb &rbdr->rbdr_task, NULL) != 0) { 460289551Szbb /* Now finish the sleepable task */ 461289551Szbb taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); 462289551Szbb } 463289551Szbb } 464289551Szbb 465289551Szbb /* 466289551Szbb * Free all of the memory under the RB descriptors. 467289551Szbb * There are assumptions here: 468289551Szbb * 1. Corresponding RBDR is disabled 469289551Szbb * - it is safe to operate using head and tail indexes 470289551Szbb * 2. All bffers that were received are properly freed by 471289551Szbb * the receive handler 472289551Szbb * - there is no need to unload DMA map and free MBUF for other 473289551Szbb * descriptors than unused ones 474289551Szbb */ 475289551Szbb if (rbdr->rbdr_buff_dmat != NULL) { 476289551Szbb head = rbdr->head; 477289551Szbb tail = rbdr->tail; 478289551Szbb while (head != tail) { 479289551Szbb desc = GET_RBDR_DESC(rbdr, head); 480289551Szbb buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 481289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 482289551Szbb bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 483289551Szbb mbuf = rinfo->mbuf; 484289551Szbb /* This will destroy everything including rinfo! */ 485289551Szbb m_freem(mbuf); 486289551Szbb head++; 487289551Szbb head &= (rbdr->dmem.q_len - 1); 488289551Szbb } 489289551Szbb /* Free tail descriptor */ 490289551Szbb desc = GET_RBDR_DESC(rbdr, tail); 491289550Szbb buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 492289551Szbb rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 493289551Szbb bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 494289551Szbb mbuf = rinfo->mbuf; 495289551Szbb /* This will destroy everything including rinfo! */ 496289551Szbb m_freem(mbuf); 497289551Szbb 498289551Szbb /* Destroy DMA maps */ 499289551Szbb for (idx = 0; idx < qs->rbdr_len; idx++) { 500289551Szbb if (rbdr->rbdr_buff_dmaps[idx] == NULL) 501289551Szbb continue; 502289551Szbb err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, 503289551Szbb rbdr->rbdr_buff_dmaps[idx]); 504289551Szbb KASSERT(err == 0, 505289551Szbb ("%s: Could not destroy DMA map for RB, desc: %d", 506289551Szbb __func__, idx)); 507289551Szbb rbdr->rbdr_buff_dmaps[idx] = NULL; 508289551Szbb } 509289551Szbb 510289551Szbb /* Now destroy the tag */ 511289551Szbb err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); 512289551Szbb KASSERT(err == 0, 513289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 514289551Szbb 515289551Szbb rbdr->head = 0; 516289551Szbb rbdr->tail = 0; 517289550Szbb } 518289550Szbb 519289550Szbb /* Free RBDR ring */ 520289550Szbb nicvf_free_q_desc_mem(nic, &rbdr->dmem); 521289550Szbb} 522289550Szbb 523289551Szbb/* 524289551Szbb * Refill receive buffer descriptors with new buffers. 525289550Szbb */ 526289551Szbbstatic int 527289551Szbbnicvf_refill_rbdr(struct rbdr *rbdr, int mflags) 528289550Szbb{ 529289551Szbb struct nicvf *nic; 530289551Szbb struct queue_set *qs; 531289551Szbb int rbdr_idx; 532289550Szbb int tail, qcount; 533289550Szbb int refill_rb_cnt; 534289550Szbb struct rbdr_entry_t *desc; 535289551Szbb bus_dmamap_t dmap; 536289551Szbb bus_addr_t rbuf; 537289551Szbb boolean_t rb_alloc_fail; 538289551Szbb int new_rb; 539289550Szbb 540289551Szbb rb_alloc_fail = TRUE; 541289551Szbb new_rb = 0; 542289551Szbb nic = rbdr->nic; 543289551Szbb qs = nic->qs; 544289551Szbb rbdr_idx = rbdr->idx; 545289551Szbb 546289550Szbb /* Check if it's enabled */ 547289550Szbb if (!rbdr->enable) 548289551Szbb return (0); 549289550Szbb 550289550Szbb /* Get no of desc's to be refilled */ 551289550Szbb qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 552289550Szbb qcount &= 0x7FFFF; 553289550Szbb /* Doorbell can be ringed with a max of ring size minus 1 */ 554289551Szbb if (qcount >= (qs->rbdr_len - 1)) { 555289551Szbb rb_alloc_fail = FALSE; 556289551Szbb goto out; 557289551Szbb } else 558289550Szbb refill_rb_cnt = qs->rbdr_len - qcount - 1; 559289550Szbb 560289550Szbb /* Start filling descs from tail */ 561289550Szbb tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 562289550Szbb while (refill_rb_cnt) { 563289550Szbb tail++; 564289550Szbb tail &= (rbdr->dmem.q_len - 1); 565289550Szbb 566289551Szbb dmap = rbdr->rbdr_buff_dmaps[tail]; 567289551Szbb if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, 568289551Szbb DMA_BUFFER_LEN, &rbuf)) { 569289551Szbb /* Something went wrong. Resign */ 570289550Szbb break; 571289551Szbb } 572289550Szbb desc = GET_RBDR_DESC(rbdr, tail); 573289551Szbb desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); 574289550Szbb refill_rb_cnt--; 575289550Szbb new_rb++; 576289550Szbb } 577289550Szbb 578289550Szbb /* make sure all memory stores are done before ringing doorbell */ 579289551Szbb wmb(); 580289550Szbb 581289550Szbb /* Check if buffer allocation failed */ 582289551Szbb if (refill_rb_cnt == 0) 583289551Szbb rb_alloc_fail = FALSE; 584289550Szbb 585289550Szbb /* Notify HW */ 586289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 587289550Szbb rbdr_idx, new_rb); 588289551Szbbout: 589289551Szbb if (!rb_alloc_fail) { 590289551Szbb /* 591289551Szbb * Re-enable RBDR interrupts only 592289551Szbb * if buffer allocation is success. 593289551Szbb */ 594289550Szbb nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 595289550Szbb 596289551Szbb return (0); 597289551Szbb } 598289551Szbb 599289551Szbb return (ENOMEM); 600289550Szbb} 601289550Szbb 602289551Szbb/* Refill RBs even if sleep is needed to reclaim memory */ 603289551Szbbstatic void 604289551Szbbnicvf_rbdr_task(void *arg, int pending) 605289550Szbb{ 606289551Szbb struct rbdr *rbdr; 607289551Szbb int err; 608289550Szbb 609289551Szbb rbdr = (struct rbdr *)arg; 610289551Szbb 611289551Szbb err = nicvf_refill_rbdr(rbdr, M_WAITOK); 612289551Szbb if (__predict_false(err != 0)) { 613289551Szbb panic("%s: Failed to refill RBs even when sleep enabled", 614289551Szbb __func__); 615289551Szbb } 616289550Szbb} 617289550Szbb 618289551Szbb/* Refill RBs as soon as possible without waiting */ 619289551Szbbstatic void 620289551Szbbnicvf_rbdr_task_nowait(void *arg, int pending) 621289550Szbb{ 622289551Szbb struct rbdr *rbdr; 623289551Szbb int err; 624289550Szbb 625289551Szbb rbdr = (struct rbdr *)arg; 626289551Szbb 627289551Szbb err = nicvf_refill_rbdr(rbdr, M_NOWAIT); 628289551Szbb if (err != 0) { 629289551Szbb /* 630289551Szbb * Schedule another, sleepable kernel thread 631289551Szbb * that will for sure refill the buffers. 632289551Szbb */ 633289551Szbb taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); 634289550Szbb } 635289550Szbb} 636289550Szbb 637289551Szbbstatic int 638289551Szbbnicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 639289551Szbb struct cqe_rx_t *cqe_rx, int cqe_type) 640289551Szbb{ 641289551Szbb struct mbuf *mbuf; 642296031Szbb struct rcv_queue *rq; 643289551Szbb int rq_idx; 644289551Szbb int err = 0; 645289551Szbb 646289551Szbb rq_idx = cqe_rx->rq_idx; 647296031Szbb rq = &nic->qs->rq[rq_idx]; 648289551Szbb 649289551Szbb /* Check for errors */ 650289551Szbb err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 651289551Szbb if (err && !cqe_rx->rb_cnt) 652289551Szbb return (0); 653289551Szbb 654289551Szbb mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); 655289551Szbb if (mbuf == NULL) { 656289551Szbb dprintf(nic->dev, "Packet not received\n"); 657289551Szbb return (0); 658289551Szbb } 659289551Szbb 660289551Szbb /* If error packet */ 661289551Szbb if (err != 0) { 662289551Szbb m_freem(mbuf); 663289551Szbb return (0); 664289551Szbb } 665289551Szbb 666296031Szbb if (rq->lro_enabled && 667296031Szbb ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) && 668296031Szbb (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 669296031Szbb (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { 670296031Szbb /* 671296031Szbb * At this point it is known that there are no errors in the 672296031Szbb * packet. Attempt to LRO enqueue. Send to stack if no resources 673296031Szbb * or enqueue error. 674296031Szbb */ 675296031Szbb if ((rq->lro.lro_cnt != 0) && 676296031Szbb (tcp_lro_rx(&rq->lro, mbuf, 0) == 0)) 677296031Szbb return (0); 678296031Szbb } 679289551Szbb /* 680289551Szbb * Push this packet to the stack later to avoid 681289551Szbb * unlocking completion task in the middle of work. 682289551Szbb */ 683289551Szbb err = buf_ring_enqueue(cq->rx_br, mbuf); 684289551Szbb if (err != 0) { 685289551Szbb /* 686289551Szbb * Failed to enqueue this mbuf. 687289551Szbb * We don't drop it, just schedule another task. 688289551Szbb */ 689289551Szbb return (err); 690289551Szbb } 691289551Szbb 692289551Szbb return (0); 693289551Szbb} 694289551Szbb 695299446Szbbstatic void 696289551Szbbnicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 697289551Szbb struct cqe_send_t *cqe_tx, int cqe_type) 698289551Szbb{ 699289551Szbb bus_dmamap_t dmap; 700289551Szbb struct mbuf *mbuf; 701289551Szbb struct snd_queue *sq; 702289551Szbb struct sq_hdr_subdesc *hdr; 703289551Szbb 704289551Szbb mbuf = NULL; 705289551Szbb sq = &nic->qs->sq[cqe_tx->sq_idx]; 706289551Szbb 707289551Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); 708299446Szbb if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) 709299446Szbb return; 710289551Szbb 711289551Szbb dprintf(nic->dev, 712289551Szbb "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", 713289551Szbb __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 714289551Szbb cqe_tx->sqe_ptr, hdr->subdesc_cnt); 715289551Szbb 716289551Szbb dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; 717289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, dmap); 718289551Szbb 719289551Szbb mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; 720289551Szbb if (mbuf != NULL) { 721289551Szbb m_freem(mbuf); 722289551Szbb sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; 723296602Szbb nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 724289551Szbb } 725289551Szbb 726289551Szbb nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 727289551Szbb} 728289551Szbb 729289551Szbbstatic int 730289551Szbbnicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) 731289551Szbb{ 732289551Szbb struct mbuf *mbuf; 733289551Szbb struct ifnet *ifp; 734289551Szbb int processed_cqe, work_done = 0, tx_done = 0; 735289551Szbb int cqe_count, cqe_head; 736289551Szbb struct queue_set *qs = nic->qs; 737289551Szbb struct cmp_queue *cq = &qs->cq[cq_idx]; 738297450Szbb struct snd_queue *sq = &qs->sq[cq_idx]; 739296031Szbb struct rcv_queue *rq; 740289551Szbb struct cqe_rx_t *cq_desc; 741296031Szbb struct lro_ctrl *lro; 742296031Szbb int rq_idx; 743289551Szbb int cmp_err; 744289551Szbb 745289551Szbb NICVF_CMP_LOCK(cq); 746289551Szbb cmp_err = 0; 747289551Szbb processed_cqe = 0; 748289551Szbb /* Get no of valid CQ entries to process */ 749289551Szbb cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); 750289551Szbb cqe_count &= CQ_CQE_COUNT; 751289551Szbb if (cqe_count == 0) 752289551Szbb goto out; 753289551Szbb 754289551Szbb /* Get head of the valid CQ entries */ 755289551Szbb cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 756289551Szbb cqe_head &= 0xFFFF; 757289551Szbb 758289551Szbb dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", 759289551Szbb __func__, cq_idx, cqe_count, cqe_head); 760289551Szbb while (processed_cqe < cqe_count) { 761289551Szbb /* Get the CQ descriptor */ 762289551Szbb cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 763289551Szbb cqe_head++; 764289551Szbb cqe_head &= (cq->dmem.q_len - 1); 765296032Szbb /* Prefetch next CQ descriptor */ 766296032Szbb __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); 767289551Szbb 768289551Szbb dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, 769289551Szbb cq_desc->cqe_type); 770289551Szbb switch (cq_desc->cqe_type) { 771289551Szbb case CQE_TYPE_RX: 772289551Szbb cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, 773289551Szbb CQE_TYPE_RX); 774289551Szbb if (__predict_false(cmp_err != 0)) { 775289551Szbb /* 776289551Szbb * Ups. Cannot finish now. 777289551Szbb * Let's try again later. 778289551Szbb */ 779289551Szbb goto done; 780289551Szbb } 781289551Szbb work_done++; 782289551Szbb break; 783289551Szbb case CQE_TYPE_SEND: 784299446Szbb nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc, 785299446Szbb CQE_TYPE_SEND); 786289551Szbb tx_done++; 787289551Szbb break; 788289551Szbb case CQE_TYPE_INVALID: 789289551Szbb case CQE_TYPE_RX_SPLIT: 790289551Szbb case CQE_TYPE_RX_TCP: 791289551Szbb case CQE_TYPE_SEND_PTP: 792289551Szbb /* Ignore for now */ 793289551Szbb break; 794289551Szbb } 795289551Szbb processed_cqe++; 796289551Szbb } 797289551Szbbdone: 798289551Szbb dprintf(nic->dev, 799289551Szbb "%s CQ%d processed_cqe %d work_done %d\n", 800289551Szbb __func__, cq_idx, processed_cqe, work_done); 801289551Szbb 802289551Szbb /* Ring doorbell to inform H/W to reuse processed CQEs */ 803289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); 804289551Szbb 805289551Szbb if ((tx_done > 0) && 806289551Szbb ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { 807289551Szbb /* Reenable TXQ if its stopped earlier due to SQ full */ 808289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 809297450Szbb taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); 810289551Szbb } 811289551Szbbout: 812296031Szbb /* 813296031Szbb * Flush any outstanding LRO work 814296031Szbb */ 815296031Szbb rq_idx = cq_idx; 816296031Szbb rq = &nic->qs->rq[rq_idx]; 817296031Szbb lro = &rq->lro; 818297482Ssephe tcp_lro_flush_all(lro); 819296031Szbb 820289551Szbb NICVF_CMP_UNLOCK(cq); 821289551Szbb 822289551Szbb ifp = nic->ifp; 823289551Szbb /* Push received MBUFs to the stack */ 824289551Szbb while (!buf_ring_empty(cq->rx_br)) { 825289551Szbb mbuf = buf_ring_dequeue_mc(cq->rx_br); 826289551Szbb if (__predict_true(mbuf != NULL)) 827289551Szbb (*ifp->if_input)(ifp, mbuf); 828289551Szbb } 829289551Szbb 830289551Szbb return (cmp_err); 831289551Szbb} 832289551Szbb 833289551Szbb/* 834289551Szbb * Qset error interrupt handler 835289551Szbb * 836289551Szbb * As of now only CQ errors are handled 837289551Szbb */ 838289551Szbbstatic void 839289551Szbbnicvf_qs_err_task(void *arg, int pending) 840289551Szbb{ 841289551Szbb struct nicvf *nic; 842289551Szbb struct queue_set *qs; 843289551Szbb int qidx; 844289551Szbb uint64_t status; 845289551Szbb boolean_t enable = TRUE; 846289551Szbb 847289551Szbb nic = (struct nicvf *)arg; 848289551Szbb qs = nic->qs; 849289551Szbb 850289551Szbb /* Deactivate network interface */ 851289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 852289551Szbb 853289551Szbb /* Check if it is CQ err */ 854289551Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 855289551Szbb status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, 856289551Szbb qidx); 857289551Szbb if ((status & CQ_ERR_MASK) == 0) 858289551Szbb continue; 859289551Szbb /* Process already queued CQEs and reconfig CQ */ 860289551Szbb nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 861289551Szbb nicvf_sq_disable(nic, qidx); 862289551Szbb (void)nicvf_cq_intr_handler(nic, qidx); 863289551Szbb nicvf_cmp_queue_config(nic, qs, qidx, enable); 864289551Szbb nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); 865289551Szbb nicvf_sq_enable(nic, &qs->sq[qidx], qidx); 866289551Szbb nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 867289551Szbb } 868289551Szbb 869289551Szbb if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 870289551Szbb /* Re-enable Qset error interrupt */ 871289551Szbb nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 872289551Szbb} 873289551Szbb 874289551Szbbstatic void 875289551Szbbnicvf_cmp_task(void *arg, int pending) 876289551Szbb{ 877289551Szbb struct cmp_queue *cq; 878289551Szbb struct nicvf *nic; 879289551Szbb int cmp_err; 880289551Szbb 881289551Szbb cq = (struct cmp_queue *)arg; 882289551Szbb nic = cq->nic; 883289551Szbb 884289551Szbb /* Handle CQ descriptors */ 885289551Szbb cmp_err = nicvf_cq_intr_handler(nic, cq->idx); 886289551Szbb if (__predict_false(cmp_err != 0)) { 887289551Szbb /* 888289551Szbb * Schedule another thread here since we did not 889289551Szbb * process the entire CQ due to Tx or Rx CQ parse error. 890289551Szbb */ 891289551Szbb taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); 892289551Szbb 893289551Szbb } 894289551Szbb 895296601Szbb nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 896289551Szbb /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ 897289551Szbb nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); 898289551Szbb 899289551Szbb} 900289551Szbb 901289550Szbb/* Initialize completion queue */ 902289551Szbbstatic int 903289551Szbbnicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, 904289551Szbb int qidx) 905289550Szbb{ 906289550Szbb int err; 907289550Szbb 908289551Szbb /* Initizalize lock */ 909289551Szbb snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", 910289551Szbb device_get_nameunit(nic->dev), qidx); 911289551Szbb mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); 912289551Szbb 913289550Szbb err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 914289550Szbb NICVF_CQ_BASE_ALIGN_BYTES); 915289550Szbb 916289551Szbb if (err != 0) { 917289551Szbb device_printf(nic->dev, 918289551Szbb "Could not allocate DMA memory for CQ\n"); 919289551Szbb return (err); 920289551Szbb } 921289551Szbb 922289550Szbb cq->desc = cq->dmem.base; 923296038Szbb cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH; 924289551Szbb cq->nic = nic; 925289551Szbb cq->idx = qidx; 926289550Szbb nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 927289550Szbb 928289551Szbb cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, 929289551Szbb &cq->mtx); 930289551Szbb 931289551Szbb /* Allocate taskqueue */ 932289551Szbb TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); 933289551Szbb cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, 934289551Szbb taskqueue_thread_enqueue, &cq->cmp_taskq); 935289551Szbb taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", 936289551Szbb device_get_nameunit(nic->dev), qidx); 937289551Szbb 938289551Szbb return (0); 939289550Szbb} 940289550Szbb 941289551Szbbstatic void 942289551Szbbnicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 943289550Szbb{ 944289551Szbb 945289551Szbb if (cq == NULL) 946289550Szbb return; 947289551Szbb /* 948289551Szbb * The completion queue itself should be disabled by now 949289551Szbb * (ref. nicvf_snd_queue_config()). 950289551Szbb * Ensure that it is safe to disable it or panic. 951289551Szbb */ 952289551Szbb if (cq->enable) 953289551Szbb panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); 954289550Szbb 955289551Szbb if (cq->cmp_taskq != NULL) { 956289551Szbb /* Remove task */ 957289551Szbb while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) 958289551Szbb taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); 959289551Szbb 960289551Szbb taskqueue_free(cq->cmp_taskq); 961289551Szbb cq->cmp_taskq = NULL; 962289551Szbb } 963289551Szbb /* 964289551Szbb * Completion interrupt will possibly enable interrupts again 965289551Szbb * so disable interrupting now after we finished processing 966289551Szbb * completion task. It is safe to do so since the corresponding CQ 967289551Szbb * was already disabled. 968289551Szbb */ 969289551Szbb nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); 970289551Szbb nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 971289551Szbb 972289551Szbb NICVF_CMP_LOCK(cq); 973289550Szbb nicvf_free_q_desc_mem(nic, &cq->dmem); 974289551Szbb drbr_free(cq->rx_br, M_DEVBUF); 975289551Szbb NICVF_CMP_UNLOCK(cq); 976289551Szbb mtx_destroy(&cq->mtx); 977289551Szbb memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); 978289550Szbb} 979289550Szbb 980297450Szbbint 981297450Szbbnicvf_xmit_locked(struct snd_queue *sq) 982297450Szbb{ 983297450Szbb struct nicvf *nic; 984297450Szbb struct ifnet *ifp; 985297450Szbb struct mbuf *next; 986297450Szbb int err; 987297450Szbb 988297450Szbb NICVF_TX_LOCK_ASSERT(sq); 989297450Szbb 990297450Szbb nic = sq->nic; 991297450Szbb ifp = nic->ifp; 992297450Szbb err = 0; 993297450Szbb 994297450Szbb while ((next = drbr_peek(ifp, sq->br)) != NULL) { 995325916Semaste /* Send a copy of the frame to the BPF listener */ 996325916Semaste ETHER_BPF_MTAP(ifp, next); 997325916Semaste 998297450Szbb err = nicvf_tx_mbuf_locked(sq, &next); 999297450Szbb if (err != 0) { 1000297450Szbb if (next == NULL) 1001297450Szbb drbr_advance(ifp, sq->br); 1002297450Szbb else 1003297450Szbb drbr_putback(ifp, sq->br, next); 1004297450Szbb 1005297450Szbb break; 1006297450Szbb } 1007297450Szbb drbr_advance(ifp, sq->br); 1008297450Szbb } 1009297450Szbb return (err); 1010297450Szbb} 1011297450Szbb 1012289551Szbbstatic void 1013289551Szbbnicvf_snd_task(void *arg, int pending) 1014289551Szbb{ 1015289551Szbb struct snd_queue *sq = (struct snd_queue *)arg; 1016297450Szbb struct nicvf *nic; 1017297450Szbb struct ifnet *ifp; 1018297450Szbb int err; 1019289551Szbb 1020297450Szbb nic = sq->nic; 1021297450Szbb ifp = nic->ifp; 1022297450Szbb 1023297450Szbb /* 1024297450Szbb * Skip sending anything if the driver is not running, 1025297450Szbb * SQ full or link is down. 1026297450Szbb */ 1027297450Szbb if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1028297450Szbb IFF_DRV_RUNNING) || !nic->link_up) 1029297450Szbb return; 1030297450Szbb 1031289551Szbb NICVF_TX_LOCK(sq); 1032297450Szbb err = nicvf_xmit_locked(sq); 1033289551Szbb NICVF_TX_UNLOCK(sq); 1034297450Szbb /* Try again */ 1035297450Szbb if (err != 0) 1036297450Szbb taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); 1037289551Szbb} 1038289551Szbb 1039289550Szbb/* Initialize transmit queue */ 1040289551Szbbstatic int 1041289551Szbbnicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, 1042289551Szbb int qidx) 1043289550Szbb{ 1044289551Szbb size_t i; 1045289550Szbb int err; 1046289550Szbb 1047289551Szbb /* Initizalize TX lock for this queue */ 1048289551Szbb snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", 1049289551Szbb device_get_nameunit(nic->dev), qidx); 1050289551Szbb mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); 1051289551Szbb 1052289551Szbb NICVF_TX_LOCK(sq); 1053289551Szbb /* Allocate buffer ring */ 1054289551Szbb sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, 1055289551Szbb M_NOWAIT, &sq->mtx); 1056289551Szbb if (sq->br == NULL) { 1057289551Szbb device_printf(nic->dev, 1058289551Szbb "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); 1059289551Szbb err = ENOMEM; 1060289551Szbb goto error; 1061289551Szbb } 1062289551Szbb 1063289551Szbb /* Allocate DMA memory for Tx descriptors */ 1064289550Szbb err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 1065289550Szbb NICVF_SQ_BASE_ALIGN_BYTES); 1066289551Szbb if (err != 0) { 1067289551Szbb device_printf(nic->dev, 1068289551Szbb "Could not allocate DMA memory for SQ\n"); 1069289551Szbb goto error; 1070289551Szbb } 1071289550Szbb 1072289550Szbb sq->desc = sq->dmem.base; 1073289551Szbb sq->head = sq->tail = 0; 1074299446Szbb atomic_store_rel_int(&sq->free_cnt, q_len - 1); 1075289550Szbb sq->thresh = SND_QUEUE_THRESH; 1076289551Szbb sq->idx = qidx; 1077289551Szbb sq->nic = nic; 1078289550Szbb 1079289551Szbb /* 1080289551Szbb * Allocate DMA maps for Tx buffers 1081289551Szbb */ 1082289550Szbb 1083289551Szbb /* Create DMA tag first */ 1084289551Szbb err = bus_dma_tag_create( 1085289551Szbb bus_get_dma_tag(nic->dev), /* parent tag */ 1086289551Szbb 1, /* alignment */ 1087289551Szbb 0, /* boundary */ 1088289551Szbb BUS_SPACE_MAXADDR, /* lowaddr */ 1089289551Szbb BUS_SPACE_MAXADDR, /* highaddr */ 1090289551Szbb NULL, NULL, /* filtfunc, filtfuncarg */ 1091296039Szbb NICVF_TSO_MAXSIZE, /* maxsize */ 1092296039Szbb NICVF_TSO_NSEGS, /* nsegments */ 1093289551Szbb MCLBYTES, /* maxsegsize */ 1094289551Szbb 0, /* flags */ 1095289551Szbb NULL, NULL, /* lockfunc, lockfuncarg */ 1096289551Szbb &sq->snd_buff_dmat); /* dmat */ 1097289551Szbb 1098289551Szbb if (err != 0) { 1099289551Szbb device_printf(nic->dev, 1100289551Szbb "Failed to create busdma tag for Tx buffers\n"); 1101289551Szbb goto error; 1102289551Szbb } 1103289551Szbb 1104289551Szbb /* Allocate send buffers array */ 1105289551Szbb sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, 1106289551Szbb (M_NOWAIT | M_ZERO)); 1107289551Szbb if (sq->snd_buff == NULL) { 1108289551Szbb device_printf(nic->dev, 1109289551Szbb "Could not allocate memory for Tx buffers array\n"); 1110289551Szbb err = ENOMEM; 1111289551Szbb goto error; 1112289551Szbb } 1113289551Szbb 1114289551Szbb /* Now populate maps */ 1115289551Szbb for (i = 0; i < q_len; i++) { 1116289551Szbb err = bus_dmamap_create(sq->snd_buff_dmat, 0, 1117289551Szbb &sq->snd_buff[i].dmap); 1118289551Szbb if (err != 0) { 1119289551Szbb device_printf(nic->dev, 1120289551Szbb "Failed to create DMA maps for Tx buffers\n"); 1121289551Szbb goto error; 1122289551Szbb } 1123289551Szbb } 1124289551Szbb NICVF_TX_UNLOCK(sq); 1125289551Szbb 1126289551Szbb /* Allocate taskqueue */ 1127289551Szbb TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); 1128289551Szbb sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, 1129289551Szbb taskqueue_thread_enqueue, &sq->snd_taskq); 1130289551Szbb taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", 1131289551Szbb device_get_nameunit(nic->dev), qidx); 1132289551Szbb 1133289551Szbb return (0); 1134289551Szbberror: 1135289551Szbb NICVF_TX_UNLOCK(sq); 1136289551Szbb return (err); 1137289550Szbb} 1138289550Szbb 1139289551Szbbstatic void 1140289551Szbbnicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 1141289550Szbb{ 1142289551Szbb struct queue_set *qs = nic->qs; 1143289551Szbb size_t i; 1144289551Szbb int err; 1145289551Szbb 1146289551Szbb if (sq == NULL) 1147289550Szbb return; 1148289550Szbb 1149289551Szbb if (sq->snd_taskq != NULL) { 1150289551Szbb /* Remove task */ 1151289551Szbb while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) 1152289551Szbb taskqueue_drain(sq->snd_taskq, &sq->snd_task); 1153289550Szbb 1154289551Szbb taskqueue_free(sq->snd_taskq); 1155289551Szbb sq->snd_taskq = NULL; 1156289551Szbb } 1157289551Szbb 1158289551Szbb NICVF_TX_LOCK(sq); 1159289551Szbb if (sq->snd_buff_dmat != NULL) { 1160289551Szbb if (sq->snd_buff != NULL) { 1161289551Szbb for (i = 0; i < qs->sq_len; i++) { 1162289551Szbb m_freem(sq->snd_buff[i].mbuf); 1163289551Szbb sq->snd_buff[i].mbuf = NULL; 1164289551Szbb 1165289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, 1166289551Szbb sq->snd_buff[i].dmap); 1167289551Szbb err = bus_dmamap_destroy(sq->snd_buff_dmat, 1168289551Szbb sq->snd_buff[i].dmap); 1169289551Szbb /* 1170289551Szbb * If bus_dmamap_destroy fails it can cause 1171289551Szbb * random panic later if the tag is also 1172289551Szbb * destroyed in the process. 1173289551Szbb */ 1174289551Szbb KASSERT(err == 0, 1175289551Szbb ("%s: Could not destroy DMA map for SQ", 1176289551Szbb __func__)); 1177289551Szbb } 1178289551Szbb } 1179289551Szbb 1180289551Szbb free(sq->snd_buff, M_NICVF); 1181289551Szbb 1182289551Szbb err = bus_dma_tag_destroy(sq->snd_buff_dmat); 1183289551Szbb KASSERT(err == 0, 1184289551Szbb ("%s: Trying to destroy BUSY DMA tag", __func__)); 1185289551Szbb } 1186289551Szbb 1187289551Szbb /* Free private driver ring for this send queue */ 1188289551Szbb if (sq->br != NULL) 1189289551Szbb drbr_free(sq->br, M_DEVBUF); 1190289551Szbb 1191289551Szbb if (sq->dmem.base != NULL) 1192289551Szbb nicvf_free_q_desc_mem(nic, &sq->dmem); 1193289551Szbb 1194289551Szbb NICVF_TX_UNLOCK(sq); 1195289551Szbb /* Destroy Tx lock */ 1196289551Szbb mtx_destroy(&sq->mtx); 1197289551Szbb memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); 1198289550Szbb} 1199289550Szbb 1200289551Szbbstatic void 1201289551Szbbnicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1202289550Szbb{ 1203289551Szbb 1204289550Szbb /* Disable send queue */ 1205289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 1206289550Szbb /* Check if SQ is stopped */ 1207289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 1208289550Szbb return; 1209289550Szbb /* Reset send queue */ 1210289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1211289550Szbb} 1212289550Szbb 1213289551Szbbstatic void 1214289551Szbbnicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1215289550Szbb{ 1216289550Szbb union nic_mbx mbx = {}; 1217289550Szbb 1218289550Szbb /* Make sure all packets in the pipeline are written back into mem */ 1219289550Szbb mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 1220289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1221289550Szbb} 1222289550Szbb 1223289551Szbbstatic void 1224289551Szbbnicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) 1225289550Szbb{ 1226289551Szbb 1227289550Szbb /* Disable timer threshold (doesn't get reset upon CQ reset */ 1228289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 1229289550Szbb /* Disable completion queue */ 1230289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 1231289550Szbb /* Reset completion queue */ 1232289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1233289550Szbb} 1234289550Szbb 1235289551Szbbstatic void 1236289551Szbbnicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) 1237289550Szbb{ 1238289551Szbb uint64_t tmp, fifo_state; 1239289550Szbb int timeout = 10; 1240289550Szbb 1241289550Szbb /* Save head and tail pointers for feeing up buffers */ 1242289551Szbb rbdr->head = 1243289551Szbb nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; 1244289551Szbb rbdr->tail = 1245289551Szbb nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; 1246289550Szbb 1247289551Szbb /* 1248289551Szbb * If RBDR FIFO is in 'FAIL' state then do a reset first 1249289550Szbb * before relaiming. 1250289550Szbb */ 1251289550Szbb fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 1252289551Szbb if (((fifo_state >> 62) & 0x03) == 0x3) { 1253289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 1254289551Szbb qidx, NICVF_RBDR_RESET); 1255289551Szbb } 1256289550Szbb 1257289550Szbb /* Disable RBDR */ 1258289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 1259289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1260289550Szbb return; 1261289550Szbb while (1) { 1262289550Szbb tmp = nicvf_queue_reg_read(nic, 1263289551Szbb NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); 1264289550Szbb if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 1265289550Szbb break; 1266289551Szbb 1267289551Szbb DELAY(1000); 1268289550Szbb timeout--; 1269289550Szbb if (!timeout) { 1270289551Szbb device_printf(nic->dev, 1271289551Szbb "Failed polling on prefetch status\n"); 1272289550Szbb return; 1273289550Szbb } 1274289550Szbb } 1275289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1276289551Szbb NICVF_RBDR_RESET); 1277289550Szbb 1278289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 1279289550Szbb return; 1280289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 1281289550Szbb if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1282289550Szbb return; 1283289550Szbb} 1284289550Szbb 1285289550Szbb/* Configures receive queue */ 1286289551Szbbstatic void 1287289551Szbbnicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 1288289551Szbb int qidx, bool enable) 1289289550Szbb{ 1290289550Szbb union nic_mbx mbx = {}; 1291289550Szbb struct rcv_queue *rq; 1292289550Szbb struct rq_cfg rq_cfg; 1293296031Szbb struct ifnet *ifp; 1294296031Szbb struct lro_ctrl *lro; 1295289550Szbb 1296296031Szbb ifp = nic->ifp; 1297296031Szbb 1298289550Szbb rq = &qs->rq[qidx]; 1299289550Szbb rq->enable = enable; 1300289550Szbb 1301296031Szbb lro = &rq->lro; 1302296031Szbb 1303289550Szbb /* Disable receive queue */ 1304289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 1305289550Szbb 1306289550Szbb if (!rq->enable) { 1307289550Szbb nicvf_reclaim_rcv_queue(nic, qs, qidx); 1308296031Szbb /* Free LRO memory */ 1309296031Szbb tcp_lro_free(lro); 1310296031Szbb rq->lro_enabled = FALSE; 1311289550Szbb return; 1312289550Szbb } 1313289550Szbb 1314296031Szbb /* Configure LRO if enabled */ 1315296031Szbb rq->lro_enabled = FALSE; 1316296031Szbb if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) { 1317296031Szbb if (tcp_lro_init(lro) != 0) { 1318296031Szbb device_printf(nic->dev, 1319296031Szbb "Failed to initialize LRO for RXQ%d\n", qidx); 1320296031Szbb } else { 1321296031Szbb rq->lro_enabled = TRUE; 1322296031Szbb lro->ifp = nic->ifp; 1323296031Szbb } 1324296031Szbb } 1325296031Szbb 1326289550Szbb rq->cq_qs = qs->vnic_id; 1327289550Szbb rq->cq_idx = qidx; 1328289550Szbb rq->start_rbdr_qs = qs->vnic_id; 1329289550Szbb rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 1330289550Szbb rq->cont_rbdr_qs = qs->vnic_id; 1331289550Szbb rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 1332289550Szbb /* all writes of RBDR data to be loaded into L2 Cache as well*/ 1333289550Szbb rq->caching = 1; 1334289550Szbb 1335289550Szbb /* Send a mailbox msg to PF to config RQ */ 1336289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 1337289550Szbb mbx.rq.qs_num = qs->vnic_id; 1338289550Szbb mbx.rq.rq_num = qidx; 1339289550Szbb mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 1340289551Szbb (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 1341289551Szbb (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | 1342289551Szbb (rq->start_qs_rbdr_idx); 1343289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1344289550Szbb 1345289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 1346289551Szbb mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); 1347289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1348289550Szbb 1349289551Szbb /* 1350289551Szbb * RQ drop config 1351289550Szbb * Enable CQ drop to reserve sufficient CQEs for all tx packets 1352289550Szbb */ 1353289550Szbb mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 1354289551Szbb mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); 1355289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1356289550Szbb 1357289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 1358289550Szbb 1359289550Szbb /* Enable Receive queue */ 1360289550Szbb rq_cfg.ena = 1; 1361289550Szbb rq_cfg.tcp_ena = 0; 1362289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 1363289551Szbb *(uint64_t *)&rq_cfg); 1364289550Szbb} 1365289550Szbb 1366289550Szbb/* Configures completion queue */ 1367289551Szbbstatic void 1368289551Szbbnicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 1369289551Szbb int qidx, boolean_t enable) 1370289550Szbb{ 1371289550Szbb struct cmp_queue *cq; 1372289550Szbb struct cq_cfg cq_cfg; 1373289550Szbb 1374289550Szbb cq = &qs->cq[qidx]; 1375289550Szbb cq->enable = enable; 1376289550Szbb 1377289550Szbb if (!cq->enable) { 1378289550Szbb nicvf_reclaim_cmp_queue(nic, qs, qidx); 1379289550Szbb return; 1380289550Szbb } 1381289550Szbb 1382289550Szbb /* Reset completion queue */ 1383289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1384289550Szbb 1385289550Szbb /* Set completion queue base address */ 1386289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, 1387289551Szbb (uint64_t)(cq->dmem.phys_base)); 1388289550Szbb 1389289550Szbb /* Enable Completion queue */ 1390289550Szbb cq_cfg.ena = 1; 1391289550Szbb cq_cfg.reset = 0; 1392289550Szbb cq_cfg.caching = 0; 1393289550Szbb cq_cfg.qsize = CMP_QSIZE; 1394289550Szbb cq_cfg.avg_con = 0; 1395289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); 1396289550Szbb 1397289550Szbb /* Set threshold value for interrupt generation */ 1398289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 1399289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 1400289551Szbb nic->cq_coalesce_usecs); 1401289550Szbb} 1402289550Szbb 1403289550Szbb/* Configures transmit queue */ 1404289551Szbbstatic void 1405289551Szbbnicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1406289551Szbb boolean_t enable) 1407289550Szbb{ 1408289550Szbb union nic_mbx mbx = {}; 1409289550Szbb struct snd_queue *sq; 1410289550Szbb struct sq_cfg sq_cfg; 1411289550Szbb 1412289550Szbb sq = &qs->sq[qidx]; 1413289550Szbb sq->enable = enable; 1414289550Szbb 1415289550Szbb if (!sq->enable) { 1416289550Szbb nicvf_reclaim_snd_queue(nic, qs, qidx); 1417289550Szbb return; 1418289550Szbb } 1419289550Szbb 1420289550Szbb /* Reset send queue */ 1421289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1422289550Szbb 1423289550Szbb sq->cq_qs = qs->vnic_id; 1424289550Szbb sq->cq_idx = qidx; 1425289550Szbb 1426289550Szbb /* Send a mailbox msg to PF to config SQ */ 1427289550Szbb mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 1428289550Szbb mbx.sq.qs_num = qs->vnic_id; 1429289550Szbb mbx.sq.sq_num = qidx; 1430289550Szbb mbx.sq.sqs_mode = nic->sqs_mode; 1431289550Szbb mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 1432289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1433289550Szbb 1434289550Szbb /* Set queue base address */ 1435289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, 1436289551Szbb (uint64_t)(sq->dmem.phys_base)); 1437289550Szbb 1438289550Szbb /* Enable send queue & set queue size */ 1439289550Szbb sq_cfg.ena = 1; 1440289550Szbb sq_cfg.reset = 0; 1441289550Szbb sq_cfg.ldwb = 0; 1442289550Szbb sq_cfg.qsize = SND_QSIZE; 1443289550Szbb sq_cfg.tstmp_bgx_intf = 0; 1444289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); 1445289550Szbb 1446289550Szbb /* Set threshold value for interrupt generation */ 1447289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 1448289550Szbb} 1449289550Szbb 1450289550Szbb/* Configures receive buffer descriptor ring */ 1451289551Szbbstatic void 1452289551Szbbnicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1453289551Szbb boolean_t enable) 1454289550Szbb{ 1455289550Szbb struct rbdr *rbdr; 1456289550Szbb struct rbdr_cfg rbdr_cfg; 1457289550Szbb 1458289550Szbb rbdr = &qs->rbdr[qidx]; 1459289550Szbb nicvf_reclaim_rbdr(nic, rbdr, qidx); 1460289550Szbb if (!enable) 1461289550Szbb return; 1462289550Szbb 1463289550Szbb /* Set descriptor base address */ 1464289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, 1465289551Szbb (uint64_t)(rbdr->dmem.phys_base)); 1466289550Szbb 1467289550Szbb /* Enable RBDR & set queue size */ 1468289550Szbb /* Buffer size should be in multiples of 128 bytes */ 1469289550Szbb rbdr_cfg.ena = 1; 1470289550Szbb rbdr_cfg.reset = 0; 1471289550Szbb rbdr_cfg.ldwb = 0; 1472289550Szbb rbdr_cfg.qsize = RBDR_SIZE; 1473289550Szbb rbdr_cfg.avg_con = 0; 1474289550Szbb rbdr_cfg.lines = rbdr->dma_size / 128; 1475289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1476289551Szbb *(uint64_t *)&rbdr_cfg); 1477289550Szbb 1478289550Szbb /* Notify HW */ 1479289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, 1480289551Szbb qs->rbdr_len - 1); 1481289550Szbb 1482289550Szbb /* Set threshold value for interrupt generation */ 1483289551Szbb nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, 1484289551Szbb rbdr->thresh - 1); 1485289550Szbb} 1486289550Szbb 1487289550Szbb/* Requests PF to assign and enable Qset */ 1488289551Szbbvoid 1489289551Szbbnicvf_qset_config(struct nicvf *nic, boolean_t enable) 1490289550Szbb{ 1491289550Szbb union nic_mbx mbx = {}; 1492289551Szbb struct queue_set *qs; 1493289550Szbb struct qs_cfg *qs_cfg; 1494289550Szbb 1495289551Szbb qs = nic->qs; 1496289551Szbb if (qs == NULL) { 1497289551Szbb device_printf(nic->dev, 1498289551Szbb "Qset is still not allocated, don't init queues\n"); 1499289550Szbb return; 1500289550Szbb } 1501289550Szbb 1502289550Szbb qs->enable = enable; 1503289550Szbb qs->vnic_id = nic->vf_id; 1504289550Szbb 1505289550Szbb /* Send a mailbox msg to PF to config Qset */ 1506289550Szbb mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 1507289550Szbb mbx.qs.num = qs->vnic_id; 1508289550Szbb 1509289550Szbb mbx.qs.cfg = 0; 1510289550Szbb qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 1511289550Szbb if (qs->enable) { 1512289550Szbb qs_cfg->ena = 1; 1513289550Szbb qs_cfg->vnic = qs->vnic_id; 1514289550Szbb } 1515289550Szbb nicvf_send_msg_to_pf(nic, &mbx); 1516289550Szbb} 1517289550Szbb 1518289551Szbbstatic void 1519289551Szbbnicvf_free_resources(struct nicvf *nic) 1520289550Szbb{ 1521289550Szbb int qidx; 1522289551Szbb struct queue_set *qs; 1523289550Szbb 1524289551Szbb qs = nic->qs; 1525289551Szbb /* 1526289551Szbb * Remove QS error task first since it has to be dead 1527289551Szbb * to safely free completion queue tasks. 1528289551Szbb */ 1529289551Szbb if (qs->qs_err_taskq != NULL) { 1530289551Szbb /* Shut down QS error tasks */ 1531289551Szbb while (taskqueue_cancel(qs->qs_err_taskq, 1532289551Szbb &qs->qs_err_task, NULL) != 0) { 1533289551Szbb taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); 1534289551Szbb 1535289551Szbb } 1536289551Szbb taskqueue_free(qs->qs_err_taskq); 1537289551Szbb qs->qs_err_taskq = NULL; 1538289551Szbb } 1539289550Szbb /* Free receive buffer descriptor ring */ 1540289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1541289550Szbb nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 1542289550Szbb 1543289550Szbb /* Free completion queue */ 1544289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1545289550Szbb nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 1546289550Szbb 1547289550Szbb /* Free send queue */ 1548289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1549289550Szbb nicvf_free_snd_queue(nic, &qs->sq[qidx]); 1550289550Szbb} 1551289550Szbb 1552289551Szbbstatic int 1553289551Szbbnicvf_alloc_resources(struct nicvf *nic) 1554289550Szbb{ 1555289551Szbb struct queue_set *qs = nic->qs; 1556289550Szbb int qidx; 1557289550Szbb 1558289550Szbb /* Alloc receive buffer descriptor ring */ 1559289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 1560289550Szbb if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 1561289551Szbb DMA_BUFFER_LEN, qidx)) 1562289550Szbb goto alloc_fail; 1563289550Szbb } 1564289550Szbb 1565289550Szbb /* Alloc send queue */ 1566289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 1567289551Szbb if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) 1568289550Szbb goto alloc_fail; 1569289550Szbb } 1570289550Szbb 1571289550Szbb /* Alloc completion queue */ 1572289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1573289551Szbb if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) 1574289550Szbb goto alloc_fail; 1575289550Szbb } 1576289550Szbb 1577289551Szbb /* Allocate QS error taskqueue */ 1578289551Szbb TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); 1579289551Szbb qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, 1580289551Szbb taskqueue_thread_enqueue, &qs->qs_err_taskq); 1581289551Szbb taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", 1582289551Szbb device_get_nameunit(nic->dev)); 1583289551Szbb 1584289551Szbb return (0); 1585289550Szbballoc_fail: 1586289550Szbb nicvf_free_resources(nic); 1587289551Szbb return (ENOMEM); 1588289550Szbb} 1589289550Szbb 1590289551Szbbint 1591289551Szbbnicvf_set_qset_resources(struct nicvf *nic) 1592289550Szbb{ 1593289550Szbb struct queue_set *qs; 1594289550Szbb 1595289551Szbb qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); 1596289550Szbb nic->qs = qs; 1597289550Szbb 1598289550Szbb /* Set count of each queue */ 1599289550Szbb qs->rbdr_cnt = RBDR_CNT; 1600299444Szbb qs->rq_cnt = RCV_QUEUE_CNT; 1601289551Szbb 1602289550Szbb qs->sq_cnt = SND_QUEUE_CNT; 1603289550Szbb qs->cq_cnt = CMP_QUEUE_CNT; 1604289550Szbb 1605289550Szbb /* Set queue lengths */ 1606289550Szbb qs->rbdr_len = RCV_BUF_COUNT; 1607289550Szbb qs->sq_len = SND_QUEUE_LEN; 1608289550Szbb qs->cq_len = CMP_QUEUE_LEN; 1609289550Szbb 1610289550Szbb nic->rx_queues = qs->rq_cnt; 1611289550Szbb nic->tx_queues = qs->sq_cnt; 1612289550Szbb 1613289551Szbb return (0); 1614289550Szbb} 1615289550Szbb 1616289551Szbbint 1617289551Szbbnicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) 1618289550Szbb{ 1619289551Szbb boolean_t disable = FALSE; 1620289551Szbb struct queue_set *qs; 1621289550Szbb int qidx; 1622289550Szbb 1623289551Szbb qs = nic->qs; 1624289551Szbb if (qs == NULL) 1625289551Szbb return (0); 1626289550Szbb 1627289550Szbb if (enable) { 1628289551Szbb if (nicvf_alloc_resources(nic) != 0) 1629289551Szbb return (ENOMEM); 1630289550Szbb 1631289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1632289550Szbb nicvf_snd_queue_config(nic, qs, qidx, enable); 1633289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1634289550Szbb nicvf_cmp_queue_config(nic, qs, qidx, enable); 1635289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1636289550Szbb nicvf_rbdr_config(nic, qs, qidx, enable); 1637289550Szbb for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1638289550Szbb nicvf_rcv_queue_config(nic, qs, qidx, enable); 1639289550Szbb } else { 1640289550Szbb for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1641289550Szbb nicvf_rcv_queue_config(nic, qs, qidx, disable); 1642289550Szbb for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1643289550Szbb nicvf_rbdr_config(nic, qs, qidx, disable); 1644289550Szbb for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1645289550Szbb nicvf_snd_queue_config(nic, qs, qidx, disable); 1646289550Szbb for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1647289550Szbb nicvf_cmp_queue_config(nic, qs, qidx, disable); 1648289550Szbb 1649289550Szbb nicvf_free_resources(nic); 1650289550Szbb } 1651289550Szbb 1652289551Szbb return (0); 1653289550Szbb} 1654289550Szbb 1655289551Szbb/* 1656289551Szbb * Get a free desc from SQ 1657289550Szbb * returns descriptor ponter & descriptor number 1658289550Szbb */ 1659289551Szbbstatic __inline int 1660289551Szbbnicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 1661289550Szbb{ 1662289550Szbb int qentry; 1663289550Szbb 1664289550Szbb qentry = sq->tail; 1665299446Szbb atomic_subtract_int(&sq->free_cnt, desc_cnt); 1666289550Szbb sq->tail += desc_cnt; 1667289550Szbb sq->tail &= (sq->dmem.q_len - 1); 1668289550Szbb 1669289551Szbb return (qentry); 1670289550Szbb} 1671289550Szbb 1672289550Szbb/* Free descriptor back to SQ for future use */ 1673289551Szbbstatic void 1674289551Szbbnicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 1675289550Szbb{ 1676289551Szbb 1677299446Szbb atomic_add_int(&sq->free_cnt, desc_cnt); 1678289550Szbb sq->head += desc_cnt; 1679289550Szbb sq->head &= (sq->dmem.q_len - 1); 1680289550Szbb} 1681289550Szbb 1682289551Szbbstatic __inline int 1683289551Szbbnicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 1684289550Szbb{ 1685289550Szbb qentry++; 1686289550Szbb qentry &= (sq->dmem.q_len - 1); 1687289551Szbb return (qentry); 1688289550Szbb} 1689289550Szbb 1690289551Szbbstatic void 1691289551Szbbnicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 1692289550Szbb{ 1693289551Szbb uint64_t sq_cfg; 1694289550Szbb 1695289550Szbb sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1696289550Szbb sq_cfg |= NICVF_SQ_EN; 1697289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1698289550Szbb /* Ring doorbell so that H/W restarts processing SQEs */ 1699289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 1700289550Szbb} 1701289550Szbb 1702289551Szbbstatic void 1703289551Szbbnicvf_sq_disable(struct nicvf *nic, int qidx) 1704289550Szbb{ 1705289551Szbb uint64_t sq_cfg; 1706289550Szbb 1707289550Szbb sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1708289550Szbb sq_cfg &= ~NICVF_SQ_EN; 1709289550Szbb nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1710289550Szbb} 1711289550Szbb 1712289551Szbbstatic void 1713289551Szbbnicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) 1714289550Szbb{ 1715289551Szbb uint64_t head, tail; 1716289551Szbb struct snd_buff *snd_buff; 1717289550Szbb struct sq_hdr_subdesc *hdr; 1718289550Szbb 1719289551Szbb NICVF_TX_LOCK(sq); 1720289550Szbb head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 1721289550Szbb tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 1722289550Szbb while (sq->head != head) { 1723289550Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 1724289550Szbb if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 1725289550Szbb nicvf_put_sq_desc(sq, 1); 1726289550Szbb continue; 1727289550Szbb } 1728289551Szbb snd_buff = &sq->snd_buff[sq->head]; 1729289551Szbb if (snd_buff->mbuf != NULL) { 1730289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1731289551Szbb m_freem(snd_buff->mbuf); 1732289551Szbb sq->snd_buff[sq->head].mbuf = NULL; 1733289551Szbb } 1734289550Szbb nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 1735289550Szbb } 1736289551Szbb NICVF_TX_UNLOCK(sq); 1737289550Szbb} 1738289550Szbb 1739289551Szbb/* 1740289551Szbb * Add SQ HEADER subdescriptor. 1741289550Szbb * First subdescriptor for every send descriptor. 1742289550Szbb */ 1743296030Szbbstatic __inline int 1744289550Szbbnicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, 1745289551Szbb int subdesc_cnt, struct mbuf *mbuf, int len) 1746289550Szbb{ 1747296039Szbb struct nicvf *nic; 1748289550Szbb struct sq_hdr_subdesc *hdr; 1749296030Szbb struct ether_vlan_header *eh; 1750296030Szbb#ifdef INET 1751296030Szbb struct ip *ip; 1752296039Szbb struct tcphdr *th; 1753296030Szbb#endif 1754296030Szbb uint16_t etype; 1755296030Szbb int ehdrlen, iphlen, poff; 1756289550Szbb 1757296039Szbb nic = sq->nic; 1758296039Szbb 1759289550Szbb hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1760289551Szbb sq->snd_buff[qentry].mbuf = mbuf; 1761289550Szbb 1762289550Szbb memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1763289550Szbb hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1764289550Szbb /* Enable notification via CQE after processing SQE */ 1765289550Szbb hdr->post_cqe = 1; 1766289550Szbb /* No of subdescriptors following this */ 1767289550Szbb hdr->subdesc_cnt = subdesc_cnt; 1768289550Szbb hdr->tot_len = len; 1769289550Szbb 1770296039Szbb eh = mtod(mbuf, struct ether_vlan_header *); 1771296039Szbb if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1772296039Szbb ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1773296039Szbb etype = ntohs(eh->evl_proto); 1774296039Szbb } else { 1775296039Szbb ehdrlen = ETHER_HDR_LEN; 1776296039Szbb etype = ntohs(eh->evl_encap_proto); 1777296039Szbb } 1778296030Szbb 1779296039Szbb switch (etype) { 1780296039Szbb#ifdef INET6 1781296039Szbb case ETHERTYPE_IPV6: 1782296039Szbb /* ARM64TODO: Add support for IPv6 */ 1783296039Szbb hdr->csum_l3 = 0; 1784296039Szbb sq->snd_buff[qentry].mbuf = NULL; 1785296039Szbb return (ENXIO); 1786296039Szbb#endif 1787296039Szbb#ifdef INET 1788296039Szbb case ETHERTYPE_IP: 1789296030Szbb if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { 1790296030Szbb mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip)); 1791296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1792296030Szbb if (mbuf == NULL) 1793296030Szbb return (ENOBUFS); 1794296030Szbb } 1795296030Szbb 1796296039Szbb ip = (struct ip *)(mbuf->m_data + ehdrlen); 1797296039Szbb iphlen = ip->ip_hl << 2; 1798296039Szbb poff = ehdrlen + iphlen; 1799296030Szbb 1800296039Szbb if (mbuf->m_pkthdr.csum_flags != 0) { 1801296039Szbb hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1802296030Szbb switch (ip->ip_p) { 1803296030Szbb case IPPROTO_TCP: 1804296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) 1805296030Szbb break; 1806296030Szbb 1807296030Szbb if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { 1808296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr)); 1809296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1810296030Szbb if (mbuf == NULL) 1811296030Szbb return (ENOBUFS); 1812296030Szbb } 1813296030Szbb hdr->csum_l4 = SEND_L4_CSUM_TCP; 1814296030Szbb break; 1815296030Szbb case IPPROTO_UDP: 1816296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) 1817296030Szbb break; 1818296030Szbb 1819296030Szbb if (mbuf->m_len < (poff + sizeof(struct udphdr))) { 1820296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr)); 1821296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1822296030Szbb if (mbuf == NULL) 1823296030Szbb return (ENOBUFS); 1824296030Szbb } 1825296030Szbb hdr->csum_l4 = SEND_L4_CSUM_UDP; 1826296030Szbb break; 1827296030Szbb case IPPROTO_SCTP: 1828296030Szbb if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) 1829296030Szbb break; 1830296030Szbb 1831296030Szbb if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { 1832296030Szbb mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr)); 1833296030Szbb sq->snd_buff[qentry].mbuf = mbuf; 1834296030Szbb if (mbuf == NULL) 1835296030Szbb return (ENOBUFS); 1836296030Szbb } 1837296030Szbb hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1838296030Szbb break; 1839296030Szbb default: 1840296030Szbb break; 1841296030Szbb } 1842296039Szbb hdr->l3_offset = ehdrlen; 1843296039Szbb hdr->l4_offset = ehdrlen + iphlen; 1844296030Szbb } 1845296030Szbb 1846296039Szbb if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) { 1847296039Szbb /* 1848296039Szbb * Extract ip again as m_data could have been modified. 1849296039Szbb */ 1850296039Szbb ip = (struct ip *)(mbuf->m_data + ehdrlen); 1851296039Szbb th = (struct tcphdr *)((caddr_t)ip + iphlen); 1852296039Szbb 1853296039Szbb hdr->tso = 1; 1854296039Szbb hdr->tso_start = ehdrlen + iphlen + (th->th_off * 4); 1855296039Szbb hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz; 1856296039Szbb hdr->inner_l3_offset = ehdrlen - 2; 1857296039Szbb nic->drv_stats.tx_tso++; 1858296039Szbb } 1859296039Szbb break; 1860296039Szbb#endif 1861296039Szbb default: 1862296030Szbb hdr->csum_l3 = 0; 1863296039Szbb } 1864296030Szbb 1865296030Szbb return (0); 1866289550Szbb} 1867289550Szbb 1868289551Szbb/* 1869289551Szbb * SQ GATHER subdescriptor 1870289550Szbb * Must follow HDR descriptor 1871289550Szbb */ 1872289550Szbbstatic inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1873289551Szbb int size, uint64_t data) 1874289550Szbb{ 1875289550Szbb struct sq_gather_subdesc *gather; 1876289550Szbb 1877289550Szbb qentry &= (sq->dmem.q_len - 1); 1878289550Szbb gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1879289550Szbb 1880289550Szbb memset(gather, 0, SND_QUEUE_DESC_SIZE); 1881289550Szbb gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1882289550Szbb gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1883289550Szbb gather->size = size; 1884289550Szbb gather->addr = data; 1885289550Szbb} 1886289550Szbb 1887289551Szbb/* Put an mbuf to a SQ for packet transfer. */ 1888297450Szbbstatic int 1889297450Szbbnicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp) 1890289550Szbb{ 1891289551Szbb bus_dma_segment_t segs[256]; 1892289551Szbb struct snd_buff *snd_buff; 1893289551Szbb size_t seg; 1894289551Szbb int nsegs, qentry; 1895296039Szbb int subdesc_cnt; 1896289551Szbb int err; 1897289550Szbb 1898289551Szbb NICVF_TX_LOCK_ASSERT(sq); 1899289551Szbb 1900289551Szbb if (sq->free_cnt == 0) 1901289551Szbb return (ENOBUFS); 1902289551Szbb 1903289551Szbb snd_buff = &sq->snd_buff[sq->tail]; 1904289551Szbb 1905289551Szbb err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, 1906297450Szbb *mbufp, segs, &nsegs, BUS_DMA_NOWAIT); 1907297450Szbb if (__predict_false(err != 0)) { 1908289551Szbb /* ARM64TODO: Add mbuf defragmenting if we lack maps */ 1909297450Szbb m_freem(*mbufp); 1910297450Szbb *mbufp = NULL; 1911289551Szbb return (err); 1912289550Szbb } 1913289550Szbb 1914289551Szbb /* Set how many subdescriptors is required */ 1915297721Szbb subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1; 1916289551Szbb if (subdesc_cnt > sq->free_cnt) { 1917289551Szbb /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ 1918289551Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1919289551Szbb return (ENOBUFS); 1920289551Szbb } 1921289550Szbb 1922289550Szbb qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1923289550Szbb 1924289550Szbb /* Add SQ header subdesc */ 1925297450Szbb err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp, 1926297450Szbb (*mbufp)->m_pkthdr.len); 1927296030Szbb if (err != 0) { 1928297450Szbb nicvf_put_sq_desc(sq, subdesc_cnt); 1929296030Szbb bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1930297450Szbb if (err == ENOBUFS) { 1931297450Szbb m_freem(*mbufp); 1932297450Szbb *mbufp = NULL; 1933297450Szbb } 1934296030Szbb return (err); 1935296030Szbb } 1936289550Szbb 1937289550Szbb /* Add SQ gather subdescs */ 1938289551Szbb for (seg = 0; seg < nsegs; seg++) { 1939289550Szbb qentry = nicvf_get_nxt_sqentry(sq, qentry); 1940289551Szbb nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, 1941289551Szbb segs[seg].ds_addr); 1942289550Szbb } 1943289550Szbb 1944289550Szbb /* make sure all memory stores are done before ringing doorbell */ 1945289551Szbb bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); 1946289550Szbb 1947289551Szbb dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", 1948289551Szbb __func__, sq->idx, subdesc_cnt); 1949289550Szbb /* Inform HW to xmit new packet */ 1950289551Szbb nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, 1951289551Szbb sq->idx, subdesc_cnt); 1952289551Szbb return (0); 1953289550Szbb} 1954289550Szbb 1955289551Szbbstatic __inline u_int 1956289551Szbbfrag_num(u_int i) 1957289550Szbb{ 1958289551Szbb#if BYTE_ORDER == BIG_ENDIAN 1959289551Szbb return ((i & ~3) + 3 - (i & 3)); 1960289550Szbb#else 1961289551Szbb return (i); 1962289550Szbb#endif 1963289550Szbb} 1964289550Szbb 1965289551Szbb/* Returns MBUF for a received packet */ 1966289551Szbbstruct mbuf * 1967289551Szbbnicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1968289550Szbb{ 1969289550Szbb int frag; 1970289550Szbb int payload_len = 0; 1971289551Szbb struct mbuf *mbuf; 1972289551Szbb struct mbuf *mbuf_frag; 1973289551Szbb uint16_t *rb_lens = NULL; 1974289551Szbb uint64_t *rb_ptrs = NULL; 1975289550Szbb 1976289551Szbb mbuf = NULL; 1977289551Szbb rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); 1978289551Szbb rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); 1979289550Szbb 1980289551Szbb dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", 1981289551Szbb __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1982289550Szbb 1983289550Szbb for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1984289550Szbb payload_len = rb_lens[frag_num(frag)]; 1985289551Szbb if (frag == 0) { 1986289550Szbb /* First fragment */ 1987289551Szbb mbuf = nicvf_rb_ptr_to_mbuf(nic, 1988289551Szbb (*rb_ptrs - cqe_rx->align_pad)); 1989289551Szbb mbuf->m_len = payload_len; 1990289551Szbb mbuf->m_data += cqe_rx->align_pad; 1991289551Szbb if_setrcvif(mbuf, nic->ifp); 1992289550Szbb } else { 1993289550Szbb /* Add fragments */ 1994289551Szbb mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); 1995289551Szbb m_append(mbuf, payload_len, mbuf_frag->m_data); 1996289551Szbb m_freem(mbuf_frag); 1997289550Szbb } 1998289550Szbb /* Next buffer pointer */ 1999289550Szbb rb_ptrs++; 2000289550Szbb } 2001289551Szbb 2002289551Szbb if (__predict_true(mbuf != NULL)) { 2003289551Szbb m_fixhdr(mbuf); 2004289551Szbb mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; 2005289551Szbb M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); 2006296030Szbb if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { 2007296030Szbb /* 2008296030Szbb * HW by default verifies IP & TCP/UDP/SCTP checksums 2009296030Szbb */ 2010297389Szbb if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) { 2011296030Szbb mbuf->m_pkthdr.csum_flags = 2012296030Szbb (CSUM_IP_CHECKED | CSUM_IP_VALID); 2013296030Szbb } 2014297389Szbb 2015297389Szbb switch (cqe_rx->l4_type) { 2016297389Szbb case L4TYPE_UDP: 2017297389Szbb case L4TYPE_TCP: /* fall through */ 2018296030Szbb mbuf->m_pkthdr.csum_flags |= 2019296030Szbb (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2020297389Szbb mbuf->m_pkthdr.csum_data = 0xffff; 2021297389Szbb break; 2022297389Szbb case L4TYPE_SCTP: 2023297389Szbb mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 2024297389Szbb break; 2025297389Szbb default: 2026297389Szbb break; 2027296030Szbb } 2028296030Szbb } 2029289551Szbb } 2030289551Szbb 2031289551Szbb return (mbuf); 2032289550Szbb} 2033289550Szbb 2034289550Szbb/* Enable interrupt */ 2035289551Szbbvoid 2036289551Szbbnicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 2037289550Szbb{ 2038289551Szbb uint64_t reg_val; 2039289550Szbb 2040289550Szbb reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2041289550Szbb 2042289550Szbb switch (int_type) { 2043289550Szbb case NICVF_INTR_CQ: 2044289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2045289550Szbb break; 2046289550Szbb case NICVF_INTR_SQ: 2047289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2048289550Szbb break; 2049289550Szbb case NICVF_INTR_RBDR: 2050289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2051289550Szbb break; 2052289550Szbb case NICVF_INTR_PKT_DROP: 2053289551Szbb reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2054289550Szbb break; 2055289550Szbb case NICVF_INTR_TCP_TIMER: 2056289551Szbb reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2057289550Szbb break; 2058289550Szbb case NICVF_INTR_MBOX: 2059289551Szbb reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2060289550Szbb break; 2061289550Szbb case NICVF_INTR_QS_ERR: 2062289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2063289550Szbb break; 2064289550Szbb default: 2065289551Szbb device_printf(nic->dev, 2066289550Szbb "Failed to enable interrupt: unknown type\n"); 2067289550Szbb break; 2068289550Szbb } 2069289550Szbb 2070289550Szbb nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 2071289550Szbb} 2072289550Szbb 2073289550Szbb/* Disable interrupt */ 2074289551Szbbvoid 2075289551Szbbnicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 2076289550Szbb{ 2077289551Szbb uint64_t reg_val = 0; 2078289550Szbb 2079289550Szbb switch (int_type) { 2080289550Szbb case NICVF_INTR_CQ: 2081289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2082289550Szbb break; 2083289550Szbb case NICVF_INTR_SQ: 2084289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2085289550Szbb break; 2086289550Szbb case NICVF_INTR_RBDR: 2087289551Szbb reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2088289550Szbb break; 2089289550Szbb case NICVF_INTR_PKT_DROP: 2090289551Szbb reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2091289550Szbb break; 2092289550Szbb case NICVF_INTR_TCP_TIMER: 2093289551Szbb reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2094289550Szbb break; 2095289550Szbb case NICVF_INTR_MBOX: 2096289551Szbb reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); 2097289550Szbb break; 2098289550Szbb case NICVF_INTR_QS_ERR: 2099289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2100289550Szbb break; 2101289550Szbb default: 2102289551Szbb device_printf(nic->dev, 2103289550Szbb "Failed to disable interrupt: unknown type\n"); 2104289550Szbb break; 2105289550Szbb } 2106289550Szbb 2107289550Szbb nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 2108289550Szbb} 2109289550Szbb 2110289550Szbb/* Clear interrupt */ 2111289551Szbbvoid 2112289551Szbbnicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 2113289550Szbb{ 2114289551Szbb uint64_t reg_val = 0; 2115289550Szbb 2116289550Szbb switch (int_type) { 2117289550Szbb case NICVF_INTR_CQ: 2118289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2119289550Szbb break; 2120289550Szbb case NICVF_INTR_SQ: 2121289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2122289550Szbb break; 2123289550Szbb case NICVF_INTR_RBDR: 2124289551Szbb reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2125289550Szbb break; 2126289550Szbb case NICVF_INTR_PKT_DROP: 2127289551Szbb reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); 2128289550Szbb break; 2129289550Szbb case NICVF_INTR_TCP_TIMER: 2130289551Szbb reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); 2131289550Szbb break; 2132289550Szbb case NICVF_INTR_MBOX: 2133289551Szbb reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); 2134289550Szbb break; 2135289550Szbb case NICVF_INTR_QS_ERR: 2136289551Szbb reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); 2137289550Szbb break; 2138289550Szbb default: 2139289551Szbb device_printf(nic->dev, 2140289550Szbb "Failed to clear interrupt: unknown type\n"); 2141289550Szbb break; 2142289550Szbb } 2143289550Szbb 2144289550Szbb nicvf_reg_write(nic, NIC_VF_INT, reg_val); 2145289550Szbb} 2146289550Szbb 2147289550Szbb/* Check if interrupt is enabled */ 2148289551Szbbint 2149289551Szbbnicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 2150289550Szbb{ 2151289551Szbb uint64_t reg_val; 2152289551Szbb uint64_t mask = 0xff; 2153289550Szbb 2154289550Szbb reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 2155289550Szbb 2156289550Szbb switch (int_type) { 2157289550Szbb case NICVF_INTR_CQ: 2158289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); 2159289550Szbb break; 2160289550Szbb case NICVF_INTR_SQ: 2161289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); 2162289550Szbb break; 2163289550Szbb case NICVF_INTR_RBDR: 2164289551Szbb mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); 2165289550Szbb break; 2166289550Szbb case NICVF_INTR_PKT_DROP: 2167289550Szbb mask = NICVF_INTR_PKT_DROP_MASK; 2168289550Szbb break; 2169289550Szbb case NICVF_INTR_TCP_TIMER: 2170289550Szbb mask = NICVF_INTR_TCP_TIMER_MASK; 2171289550Szbb break; 2172289550Szbb case NICVF_INTR_MBOX: 2173289550Szbb mask = NICVF_INTR_MBOX_MASK; 2174289550Szbb break; 2175289550Szbb case NICVF_INTR_QS_ERR: 2176289550Szbb mask = NICVF_INTR_QS_ERR_MASK; 2177289550Szbb break; 2178289550Szbb default: 2179289551Szbb device_printf(nic->dev, 2180289550Szbb "Failed to check interrupt enable: unknown type\n"); 2181289550Szbb break; 2182289550Szbb } 2183289550Szbb 2184289550Szbb return (reg_val & mask); 2185289550Szbb} 2186289550Szbb 2187289551Szbbvoid 2188289551Szbbnicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 2189289550Szbb{ 2190289550Szbb struct rcv_queue *rq; 2191289550Szbb 2192289550Szbb#define GET_RQ_STATS(reg) \ 2193289550Szbb nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 2194289550Szbb (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2195289550Szbb 2196289550Szbb rq = &nic->qs->rq[rq_idx]; 2197289550Szbb rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 2198289550Szbb rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 2199289550Szbb} 2200289550Szbb 2201289551Szbbvoid 2202289551Szbbnicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 2203289550Szbb{ 2204289550Szbb struct snd_queue *sq; 2205289550Szbb 2206289550Szbb#define GET_SQ_STATS(reg) \ 2207289550Szbb nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 2208289550Szbb (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 2209289550Szbb 2210289550Szbb sq = &nic->qs->sq[sq_idx]; 2211289550Szbb sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 2212289550Szbb sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 2213289550Szbb} 2214289550Szbb 2215289550Szbb/* Check for errors in the receive cmp.queue entry */ 2216289551Szbbint 2217289551Szbbnicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, 2218289551Szbb struct cqe_rx_t *cqe_rx) 2219289550Szbb{ 2220289550Szbb struct nicvf_hw_stats *stats = &nic->hw_stats; 2221289550Szbb struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 2222289550Szbb 2223289550Szbb if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 2224289550Szbb drv_stats->rx_frames_ok++; 2225289551Szbb return (0); 2226289550Szbb } 2227289550Szbb 2228289550Szbb switch (cqe_rx->err_opcode) { 2229289550Szbb case CQ_RX_ERROP_RE_PARTIAL: 2230289550Szbb stats->rx_bgx_truncated_pkts++; 2231289550Szbb break; 2232289550Szbb case CQ_RX_ERROP_RE_JABBER: 2233289550Szbb stats->rx_jabber_errs++; 2234289550Szbb break; 2235289550Szbb case CQ_RX_ERROP_RE_FCS: 2236289550Szbb stats->rx_fcs_errs++; 2237289550Szbb break; 2238289550Szbb case CQ_RX_ERROP_RE_RX_CTL: 2239289550Szbb stats->rx_bgx_errs++; 2240289550Szbb break; 2241289550Szbb case CQ_RX_ERROP_PREL2_ERR: 2242289550Szbb stats->rx_prel2_errs++; 2243289550Szbb break; 2244289550Szbb case CQ_RX_ERROP_L2_MAL: 2245289550Szbb stats->rx_l2_hdr_malformed++; 2246289550Szbb break; 2247289550Szbb case CQ_RX_ERROP_L2_OVERSIZE: 2248289550Szbb stats->rx_oversize++; 2249289550Szbb break; 2250289550Szbb case CQ_RX_ERROP_L2_UNDERSIZE: 2251289550Szbb stats->rx_undersize++; 2252289550Szbb break; 2253289550Szbb case CQ_RX_ERROP_L2_LENMISM: 2254289550Szbb stats->rx_l2_len_mismatch++; 2255289550Szbb break; 2256289550Szbb case CQ_RX_ERROP_L2_PCLP: 2257289550Szbb stats->rx_l2_pclp++; 2258289550Szbb break; 2259289550Szbb case CQ_RX_ERROP_IP_NOT: 2260289550Szbb stats->rx_ip_ver_errs++; 2261289550Szbb break; 2262289550Szbb case CQ_RX_ERROP_IP_CSUM_ERR: 2263289550Szbb stats->rx_ip_csum_errs++; 2264289550Szbb break; 2265289550Szbb case CQ_RX_ERROP_IP_MAL: 2266289550Szbb stats->rx_ip_hdr_malformed++; 2267289550Szbb break; 2268289550Szbb case CQ_RX_ERROP_IP_MALD: 2269289550Szbb stats->rx_ip_payload_malformed++; 2270289550Szbb break; 2271289550Szbb case CQ_RX_ERROP_IP_HOP: 2272289550Szbb stats->rx_ip_ttl_errs++; 2273289550Szbb break; 2274289550Szbb case CQ_RX_ERROP_L3_PCLP: 2275289550Szbb stats->rx_l3_pclp++; 2276289550Szbb break; 2277289550Szbb case CQ_RX_ERROP_L4_MAL: 2278289550Szbb stats->rx_l4_malformed++; 2279289550Szbb break; 2280289550Szbb case CQ_RX_ERROP_L4_CHK: 2281289550Szbb stats->rx_l4_csum_errs++; 2282289550Szbb break; 2283289550Szbb case CQ_RX_ERROP_UDP_LEN: 2284289550Szbb stats->rx_udp_len_errs++; 2285289550Szbb break; 2286289550Szbb case CQ_RX_ERROP_L4_PORT: 2287289550Szbb stats->rx_l4_port_errs++; 2288289550Szbb break; 2289289550Szbb case CQ_RX_ERROP_TCP_FLAG: 2290289550Szbb stats->rx_tcp_flag_errs++; 2291289550Szbb break; 2292289550Szbb case CQ_RX_ERROP_TCP_OFFSET: 2293289550Szbb stats->rx_tcp_offset_errs++; 2294289550Szbb break; 2295289550Szbb case CQ_RX_ERROP_L4_PCLP: 2296289550Szbb stats->rx_l4_pclp++; 2297289550Szbb break; 2298289550Szbb case CQ_RX_ERROP_RBDR_TRUNC: 2299289550Szbb stats->rx_truncated_pkts++; 2300289550Szbb break; 2301289550Szbb } 2302289550Szbb 2303289551Szbb return (1); 2304289550Szbb} 2305289550Szbb 2306289550Szbb/* Check for errors in the send cmp.queue entry */ 2307289551Szbbint 2308289551Szbbnicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, 2309289551Szbb struct cqe_send_t *cqe_tx) 2310289550Szbb{ 2311289550Szbb struct cmp_queue_stats *stats = &cq->stats; 2312289550Szbb 2313289550Szbb switch (cqe_tx->send_status) { 2314289550Szbb case CQ_TX_ERROP_GOOD: 2315289550Szbb stats->tx.good++; 2316289551Szbb return (0); 2317289550Szbb case CQ_TX_ERROP_DESC_FAULT: 2318289550Szbb stats->tx.desc_fault++; 2319289550Szbb break; 2320289550Szbb case CQ_TX_ERROP_HDR_CONS_ERR: 2321289550Szbb stats->tx.hdr_cons_err++; 2322289550Szbb break; 2323289550Szbb case CQ_TX_ERROP_SUBDC_ERR: 2324289550Szbb stats->tx.subdesc_err++; 2325289550Szbb break; 2326289550Szbb case CQ_TX_ERROP_IMM_SIZE_OFLOW: 2327289550Szbb stats->tx.imm_size_oflow++; 2328289550Szbb break; 2329289550Szbb case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 2330289550Szbb stats->tx.data_seq_err++; 2331289550Szbb break; 2332289550Szbb case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 2333289550Szbb stats->tx.mem_seq_err++; 2334289550Szbb break; 2335289550Szbb case CQ_TX_ERROP_LOCK_VIOL: 2336289550Szbb stats->tx.lock_viol++; 2337289550Szbb break; 2338289550Szbb case CQ_TX_ERROP_DATA_FAULT: 2339289550Szbb stats->tx.data_fault++; 2340289550Szbb break; 2341289550Szbb case CQ_TX_ERROP_TSTMP_CONFLICT: 2342289550Szbb stats->tx.tstmp_conflict++; 2343289550Szbb break; 2344289550Szbb case CQ_TX_ERROP_TSTMP_TIMEOUT: 2345289550Szbb stats->tx.tstmp_timeout++; 2346289550Szbb break; 2347289550Szbb case CQ_TX_ERROP_MEM_FAULT: 2348289550Szbb stats->tx.mem_fault++; 2349289550Szbb break; 2350289550Szbb case CQ_TX_ERROP_CK_OVERLAP: 2351289550Szbb stats->tx.csum_overlap++; 2352289550Szbb break; 2353289550Szbb case CQ_TX_ERROP_CK_OFLOW: 2354289550Szbb stats->tx.csum_overflow++; 2355289550Szbb break; 2356289550Szbb } 2357289550Szbb 2358289551Szbb return (1); 2359289550Szbb} 2360