nicvf_queues.c (289550) | nicvf_queues.c (289551) |
---|---|
1/* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 9 unchanged lines hidden (view full) --- 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * | 1/* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 9 unchanged lines hidden (view full) --- 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * |
26 * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 289550 2015-10-18 21:39:15Z zbb $ | 26 * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 289551 2015-10-18 22:02:58Z zbb $ |
27 * 28 */ | 27 * 28 */ |
29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_queues.c 289551 2015-10-18 22:02:58Z zbb $"); |
|
29 | 31 |
30#include <linux/pci.h> 31#include <linux/netdevice.h> 32#include <linux/ip.h> 33#include <linux/etherdevice.h> 34#include <net/ip.h> 35#include <net/tso.h> | 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/bitset.h> 35#include <sys/bitstring.h> 36#include <sys/buf_ring.h> 37#include <sys/bus.h> 38#include <sys/endian.h> 39#include <sys/kernel.h> 40#include <sys/malloc.h> 41#include <sys/module.h> 42#include <sys/rman.h> 43#include <sys/pciio.h> 44#include <sys/pcpu.h> 45#include <sys/proc.h> 46#include <sys/sockio.h> 47#include <sys/socket.h> 48#include <sys/stdatomic.h> 49#include <sys/cpuset.h> 50#include <sys/lock.h> 51#include <sys/mutex.h> 52#include <sys/smp.h> 53#include <sys/taskqueue.h> |
36 | 54 |
55#include <vm/vm.h> 56#include <vm/pmap.h> 57 58#include <machine/bus.h> 59#include <machine/vmparam.h> 60 61#include <net/ethernet.h> 62#include <net/if.h> 63#include <net/if_var.h> 64#include <net/if_media.h> 65#include <net/ifq.h> 66 67#include <dev/pci/pcireg.h> 68#include <dev/pci/pcivar.h> 69 70#include "thunder_bgx.h" |
|
37#include "nic_reg.h" 38#include "nic.h" 39#include "q_struct.h" 40#include "nicvf_queues.h" 41 | 71#include "nic_reg.h" 72#include "nic.h" 73#include "q_struct.h" 74#include "nicvf_queues.h" 75 |
76#define DEBUG 77#undef DEBUG 78 79#ifdef DEBUG 80#define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) 81#else 82#define dprintf(dev, fmt, ...) 83#endif 84 85MALLOC_DECLARE(M_NICVF); 86 87static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); 88static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf *); 89static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); 90static void nicvf_sq_disable(struct nicvf *, int); 91static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); 92static void nicvf_put_sq_desc(struct snd_queue *, int); 93static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, 94 boolean_t); 95static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); 96 97static void nicvf_rbdr_task(void *, int); 98static void nicvf_rbdr_task_nowait(void *, int); 99 |
|
42struct rbuf_info { | 100struct rbuf_info { |
43 struct page *page; 44 void *data; 45 u64 offset; | 101 bus_dma_tag_t dmat; 102 bus_dmamap_t dmap; 103 struct mbuf * mbuf; |
46}; 47 | 104}; 105 |
48#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) | 106#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) |
49 50/* Poll a register for a specific value */ 51static int nicvf_poll_reg(struct nicvf *nic, int qidx, | 107 108/* Poll a register for a specific value */ 109static int nicvf_poll_reg(struct nicvf *nic, int qidx, |
52 u64 reg, int bit_pos, int bits, int val) | 110 uint64_t reg, int bit_pos, int bits, int val) |
53{ | 111{ |
54 u64 bit_mask; 55 u64 reg_val; | 112 uint64_t bit_mask; 113 uint64_t reg_val; |
56 int timeout = 10; 57 | 114 int timeout = 10; 115 |
58 bit_mask = (1ULL << bits) - 1; | 116 bit_mask = (1UL << bits) - 1; |
59 bit_mask = (bit_mask << bit_pos); 60 61 while (timeout) { 62 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 63 if (((reg_val & bit_mask) >> bit_pos) == val) | 117 bit_mask = (bit_mask << bit_pos); 118 119 while (timeout) { 120 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 121 if (((reg_val & bit_mask) >> bit_pos) == val) |
64 return 0; 65 usleep_range(1000, 2000); | 122 return (0); 123 124 DELAY(1000); |
66 timeout--; 67 } | 125 timeout--; 126 } |
68 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); 69 return 1; | 127 device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); 128 return (ETIMEDOUT); |
70} 71 | 129} 130 |
131/* Callback for bus_dmamap_load() */ 132static void 133nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 134{ 135 bus_addr_t *paddr; 136 137 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 138 paddr = arg; 139 *paddr = segs->ds_addr; 140} 141 |
|
72/* Allocate memory for a queue's descriptors */ | 142/* Allocate memory for a queue's descriptors */ |
73static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 74 int q_len, int desc_size, int align_bytes) | 143static int 144nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 145 int q_len, int desc_size, int align_bytes) |
75{ | 146{ |
147 int err, err_dmat; 148 149 /* Create DMA tag first */ 150 err = bus_dma_tag_create( 151 bus_get_dma_tag(nic->dev), /* parent tag */ 152 align_bytes, /* alignment */ 153 0, /* boundary */ 154 BUS_SPACE_MAXADDR, /* lowaddr */ 155 BUS_SPACE_MAXADDR, /* highaddr */ 156 NULL, NULL, /* filtfunc, filtfuncarg */ 157 (q_len * desc_size), /* maxsize */ 158 1, /* nsegments */ 159 (q_len * desc_size), /* maxsegsize */ 160 0, /* flags */ 161 NULL, NULL, /* lockfunc, lockfuncarg */ 162 &dmem->dmat); /* dmat */ 163 164 if (err != 0) { 165 device_printf(nic->dev, 166 "Failed to create busdma tag for descriptors ring\n"); 167 return (err); 168 } 169 170 /* Allocate segment of continuous DMA safe memory */ 171 err = bus_dmamem_alloc( 172 dmem->dmat, /* DMA tag */ 173 &dmem->base, /* virtual address */ 174 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ 175 &dmem->dmap); /* DMA map */ 176 if (err != 0) { 177 device_printf(nic->dev, "Failed to allocate DMA safe memory for" 178 "descriptors ring\n"); 179 goto dmamem_fail; 180 } 181 182 err = bus_dmamap_load( 183 dmem->dmat, 184 dmem->dmap, 185 dmem->base, 186 (q_len * desc_size), /* allocation size */ 187 nicvf_dmamap_q_cb, /* map to DMA address cb. */ 188 &dmem->phys_base, /* physical address */ 189 BUS_DMA_NOWAIT); 190 if (err != 0) { 191 device_printf(nic->dev, 192 "Cannot load DMA map of descriptors ring\n"); 193 goto dmamap_fail; 194 } 195 |
|
76 dmem->q_len = q_len; | 196 dmem->q_len = q_len; |
77 dmem->size = (desc_size * q_len) + align_bytes; 78 /* Save address, need it while freeing */ 79 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 80 &dmem->dma, GFP_KERNEL); 81 if (!dmem->unalign_base) 82 return -ENOMEM; | 197 dmem->size = (desc_size * q_len); |
83 | 198 |
84 /* Align memory address for 'align_bytes' */ 85 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); 86 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); 87 return 0; | 199 return (0); 200 201dmamap_fail: 202 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 203 dmem->phys_base = 0; 204dmamem_fail: 205 err_dmat = bus_dma_tag_destroy(dmem->dmat); 206 dmem->base = NULL; 207 KASSERT(err_dmat == 0, 208 ("%s: Trying to destroy BUSY DMA tag", __func__)); 209 210 return (err); |
88} 89 90/* Free queue's descriptor memory */ | 211} 212 213/* Free queue's descriptor memory */ |
91static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) | 214static void 215nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) |
92{ | 216{ |
93 if (!dmem) | 217 int err; 218 219 if ((dmem == NULL) || (dmem->base == NULL)) |
94 return; 95 | 220 return; 221 |
96 dma_free_coherent(&nic->pdev->dev, dmem->size, 97 dmem->unalign_base, dmem->dma); 98 dmem->unalign_base = NULL; | 222 /* Unload a map */ 223 bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); 224 bus_dmamap_unload(dmem->dmat, dmem->dmap); 225 /* Free DMA memory */ 226 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); 227 /* Destroy DMA tag */ 228 err = bus_dma_tag_destroy(dmem->dmat); 229 230 KASSERT(err == 0, 231 ("%s: Trying to destroy BUSY DMA tag", __func__)); 232 233 dmem->phys_base = 0; |
99 dmem->base = NULL; 100} 101 | 234 dmem->base = NULL; 235} 236 |
102/* Allocate buffer for packet reception | 237/* 238 * Allocate buffer for packet reception |
103 * HW returns memory address where packet is DMA'ed but not a pointer 104 * into RBDR ring, so save buffer address at the start of fragment and 105 * align the start address to a cache aligned address 106 */ | 239 * HW returns memory address where packet is DMA'ed but not a pointer 240 * into RBDR ring, so save buffer address at the start of fragment and 241 * align the start address to a cache aligned address 242 */ |
107static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 108 u32 buf_len, u64 **rbuf) | 243static __inline int 244nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, 245 bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) |
109{ | 246{ |
110 u64 data; | 247 struct mbuf *mbuf; |
111 struct rbuf_info *rinfo; | 248 struct rbuf_info *rinfo; |
112 int order = get_order(buf_len); | 249 bus_dma_segment_t segs[1]; 250 int nsegs; 251 int err; |
113 | 252 |
114 /* Check if request can be accomodated in previous allocated page */ 115 if (nic->rb_page) { 116 if ((nic->rb_page_offset + buf_len + buf_len) > 117 (PAGE_SIZE << order)) { 118 nic->rb_page = NULL; 119 } else { 120 nic->rb_page_offset += buf_len; 121 get_page(nic->rb_page); 122 } 123 } | 253 mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); 254 if (mbuf == NULL) 255 return (ENOMEM); |
124 | 256 |
125 /* Allocate a new page */ 126 if (!nic->rb_page) { 127 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 128 order); 129 if (!nic->rb_page) { 130 netdev_err(nic->netdev, 131 "Failed to allocate new rcv buffer\n"); 132 return -ENOMEM; 133 } 134 nic->rb_page_offset = 0; | 257 /* 258 * The length is equal to the actual length + one 128b line 259 * used as a room for rbuf_info structure. 260 */ 261 mbuf->m_len = mbuf->m_pkthdr.len = buf_len; 262 263 err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, 264 &nsegs, BUS_DMA_NOWAIT); 265 if (err != 0) { 266 device_printf(nic->dev, 267 "Failed to map mbuf into DMA visible memory, err: %d\n", 268 err); 269 m_freem(mbuf); 270 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); 271 return (err); |
135 } | 272 } |
273 if (nsegs != 1) 274 panic("Unexpected number of DMA segments for RB: %d", nsegs); 275 /* 276 * Now use the room for rbuf_info structure 277 * and adjust mbuf data and length. 278 */ 279 rinfo = (struct rbuf_info *)mbuf->m_data; 280 m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); |
|
136 | 281 |
137 data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; | 282 rinfo->dmat = rbdr->rbdr_buff_dmat; 283 rinfo->dmap = dmap; 284 rinfo->mbuf = mbuf; |
138 | 285 |
139 /* Align buffer addr to cache line i.e 128 bytes */ 140 rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); 141 /* Save page address for reference updation */ 142 rinfo->page = nic->rb_page; 143 /* Store start address for later retrieval */ 144 rinfo->data = (void *)data; 145 /* Store alignment offset */ 146 rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); | 286 *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; |
147 | 287 |
148 data += rinfo->offset; 149 150 /* Give next aligned address to hw for DMA */ 151 *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); 152 return 0; | 288 return (0); |
153} 154 | 289} 290 |
155/* Retrieve actual buffer start address and build skb for received packet */ 156static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, 157 u64 rb_ptr, int len) | 291/* Retrieve mbuf for received packet */ 292static struct mbuf * 293nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) |
158{ | 294{ |
159 struct sk_buff *skb; | 295 struct mbuf *mbuf; |
160 struct rbuf_info *rinfo; 161 | 296 struct rbuf_info *rinfo; 297 |
162 rb_ptr = (u64)phys_to_virt(rb_ptr); | |
163 /* Get buffer start address and alignment offset */ | 298 /* Get buffer start address and alignment offset */ |
164 rinfo = GET_RBUF_INFO(rb_ptr); | 299 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); |
165 | 300 |
166 /* Now build an skb to give to stack */ 167 skb = build_skb(rinfo->data, RCV_FRAG_LEN); 168 if (!skb) { 169 put_page(rinfo->page); 170 return NULL; | 301 /* Now retrieve mbuf to give to stack */ 302 mbuf = rinfo->mbuf; 303 if (__predict_false(mbuf == NULL)) { 304 panic("%s: Received packet fragment with NULL mbuf", 305 device_get_nameunit(nic->dev)); |
171 } | 306 } |
307 /* 308 * Clear the mbuf in the descriptor to indicate 309 * that this slot is processed and free to use. 310 */ 311 rinfo->mbuf = NULL; |
|
172 | 312 |
173 /* Set correct skb->data */ 174 skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); | 313 bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); 314 bus_dmamap_unload(rinfo->dmat, rinfo->dmap); |
175 | 315 |
176 prefetch((void *)rb_ptr); 177 return skb; | 316 return (mbuf); |
178} 179 180/* Allocate RBDR ring and populate receive buffers */ | 317} 318 319/* Allocate RBDR ring and populate receive buffers */ |
181static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, 182 int ring_len, int buf_size) | 320static int 321nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, 322 int buf_size, int qidx) |
183{ | 323{ |
184 int idx; 185 u64 *rbuf; | 324 bus_dmamap_t dmap; 325 bus_addr_t rbuf; |
186 struct rbdr_entry_t *desc; | 326 struct rbdr_entry_t *desc; |
327 int idx; |
|
187 int err; 188 | 328 int err; 329 |
330 /* Allocate rbdr descriptors ring */ |
|
189 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, | 331 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, |
190 sizeof(struct rbdr_entry_t), 191 NICVF_RCV_BUF_ALIGN_BYTES); 192 if (err) 193 return err; | 332 sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); 333 if (err != 0) { 334 device_printf(nic->dev, 335 "Failed to create RBDR descriptors ring\n"); 336 return (err); 337 } |
194 195 rbdr->desc = rbdr->dmem.base; | 338 339 rbdr->desc = rbdr->dmem.base; |
196 /* Buffer size has to be in multiples of 128 bytes */ 197 rbdr->dma_size = buf_size; 198 rbdr->enable = true; | 340 /* 341 * Buffer size has to be in multiples of 128 bytes. 342 * Make room for metadata of size of one line (128 bytes). 343 */ 344 rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; 345 rbdr->enable = TRUE; |
199 rbdr->thresh = RBDR_THRESH; | 346 rbdr->thresh = RBDR_THRESH; |
347 rbdr->nic = nic; 348 rbdr->idx = qidx; |
|
200 | 349 |
201 nic->rb_page = NULL; | 350 /* 351 * Create DMA tag for Rx buffers. 352 * Each map created using this tag is intended to store Rx payload for 353 * one fragment and one header structure containing rbuf_info (thus 354 * additional 128 byte line since RB must be a multiple of 128 byte 355 * cache line). 356 */ 357 if (buf_size > MCLBYTES) { 358 device_printf(nic->dev, 359 "Buffer size to large for mbuf cluster\n"); 360 return (EINVAL); 361 } 362 err = bus_dma_tag_create( 363 bus_get_dma_tag(nic->dev), /* parent tag */ 364 NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 365 0, /* boundary */ 366 DMAP_MAX_PHYSADDR, /* lowaddr */ 367 DMAP_MIN_PHYSADDR, /* highaddr */ 368 NULL, NULL, /* filtfunc, filtfuncarg */ 369 roundup2(buf_size, MCLBYTES), /* maxsize */ 370 1, /* nsegments */ 371 roundup2(buf_size, MCLBYTES), /* maxsegsize */ 372 0, /* flags */ 373 NULL, NULL, /* lockfunc, lockfuncarg */ 374 &rbdr->rbdr_buff_dmat); /* dmat */ 375 376 if (err != 0) { 377 device_printf(nic->dev, 378 "Failed to create busdma tag for RBDR buffers\n"); 379 return (err); 380 } 381 382 rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * 383 ring_len, M_NICVF, (M_WAITOK | M_ZERO)); 384 |
202 for (idx = 0; idx < ring_len; idx++) { | 385 for (idx = 0; idx < ring_len; idx++) { |
203 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 204 &rbuf); 205 if (err) 206 return err; | 386 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); 387 if (err != 0) { 388 device_printf(nic->dev, 389 "Failed to create DMA map for RB\n"); 390 return (err); 391 } 392 rbdr->rbdr_buff_dmaps[idx] = dmap; |
207 | 393 |
394 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, 395 DMA_BUFFER_LEN, &rbuf); 396 if (err != 0) 397 return (err); 398 |
|
208 desc = GET_RBDR_DESC(rbdr, idx); | 399 desc = GET_RBDR_DESC(rbdr, idx); |
209 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | 400 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); |
210 } | 401 } |
211 return 0; | 402 403 /* Allocate taskqueue */ 404 TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); 405 TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); 406 rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, 407 taskqueue_thread_enqueue, &rbdr->rbdr_taskq); 408 taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", 409 device_get_nameunit(nic->dev)); 410 411 return (0); |
212} 213 214/* Free RBDR ring and its receive buffers */ | 412} 413 414/* Free RBDR ring and its receive buffers */ |
215static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | 415static void 416nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) |
216{ | 417{ |
217 int head, tail; 218 u64 buf_addr; | 418 struct mbuf *mbuf; 419 struct queue_set *qs; |
219 struct rbdr_entry_t *desc; 220 struct rbuf_info *rinfo; | 420 struct rbdr_entry_t *desc; 421 struct rbuf_info *rinfo; |
422 bus_addr_t buf_addr; 423 int head, tail, idx; 424 int err; |
|
221 | 425 |
222 if (!rbdr) 223 return; | 426 qs = nic->qs; |
224 | 427 |
225 rbdr->enable = false; 226 if (!rbdr->dmem.base) | 428 if ((qs == NULL) || (rbdr == NULL)) |
227 return; 228 | 429 return; 430 |
229 head = rbdr->head; 230 tail = rbdr->tail; | 431 rbdr->enable = FALSE; 432 if (rbdr->rbdr_taskq != NULL) { 433 /* Remove tasks */ 434 while (taskqueue_cancel(rbdr->rbdr_taskq, 435 &rbdr->rbdr_task_nowait, NULL) != 0) { 436 /* Finish the nowait task first */ 437 taskqueue_drain(rbdr->rbdr_taskq, 438 &rbdr->rbdr_task_nowait); 439 } 440 taskqueue_free(rbdr->rbdr_taskq); 441 rbdr->rbdr_taskq = NULL; |
231 | 442 |
232 /* Free SKBs */ 233 while (head != tail) { 234 desc = GET_RBDR_DESC(rbdr, head); | 443 while (taskqueue_cancel(taskqueue_thread, 444 &rbdr->rbdr_task, NULL) != 0) { 445 /* Now finish the sleepable task */ 446 taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); 447 } 448 } 449 450 /* 451 * Free all of the memory under the RB descriptors. 452 * There are assumptions here: 453 * 1. Corresponding RBDR is disabled 454 * - it is safe to operate using head and tail indexes 455 * 2. All bffers that were received are properly freed by 456 * the receive handler 457 * - there is no need to unload DMA map and free MBUF for other 458 * descriptors than unused ones 459 */ 460 if (rbdr->rbdr_buff_dmat != NULL) { 461 head = rbdr->head; 462 tail = rbdr->tail; 463 while (head != tail) { 464 desc = GET_RBDR_DESC(rbdr, head); 465 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 466 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 467 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 468 mbuf = rinfo->mbuf; 469 /* This will destroy everything including rinfo! */ 470 m_freem(mbuf); 471 head++; 472 head &= (rbdr->dmem.q_len - 1); 473 } 474 /* Free tail descriptor */ 475 desc = GET_RBDR_DESC(rbdr, tail); |
235 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | 476 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; |
236 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); 237 put_page(rinfo->page); 238 head++; 239 head &= (rbdr->dmem.q_len - 1); | 477 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); 478 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); 479 mbuf = rinfo->mbuf; 480 /* This will destroy everything including rinfo! */ 481 m_freem(mbuf); 482 483 /* Destroy DMA maps */ 484 for (idx = 0; idx < qs->rbdr_len; idx++) { 485 if (rbdr->rbdr_buff_dmaps[idx] == NULL) 486 continue; 487 err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, 488 rbdr->rbdr_buff_dmaps[idx]); 489 KASSERT(err == 0, 490 ("%s: Could not destroy DMA map for RB, desc: %d", 491 __func__, idx)); 492 rbdr->rbdr_buff_dmaps[idx] = NULL; 493 } 494 495 /* Now destroy the tag */ 496 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); 497 KASSERT(err == 0, 498 ("%s: Trying to destroy BUSY DMA tag", __func__)); 499 500 rbdr->head = 0; 501 rbdr->tail = 0; |
240 } | 502 } |
241 /* Free SKB of tail desc */ 242 desc = GET_RBDR_DESC(rbdr, tail); 243 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 244 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); 245 put_page(rinfo->page); | |
246 247 /* Free RBDR ring */ 248 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 249} 250 | 503 504 /* Free RBDR ring */ 505 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 506} 507 |
251/* Refill receive buffer descriptors with new buffers. | 508/* 509 * Refill receive buffer descriptors with new buffers. |
252 */ | 510 */ |
253static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) | 511static int 512nicvf_refill_rbdr(struct rbdr *rbdr, int mflags) |
254{ | 513{ |
255 struct queue_set *qs = nic->qs; 256 int rbdr_idx = qs->rbdr_cnt; | 514 struct nicvf *nic; 515 struct queue_set *qs; 516 int rbdr_idx; |
257 int tail, qcount; 258 int refill_rb_cnt; | 517 int tail, qcount; 518 int refill_rb_cnt; |
259 struct rbdr *rbdr; | |
260 struct rbdr_entry_t *desc; | 519 struct rbdr_entry_t *desc; |
261 u64 *rbuf; 262 int new_rb = 0; | 520 bus_dmamap_t dmap; 521 bus_addr_t rbuf; 522 boolean_t rb_alloc_fail; 523 int new_rb; |
263 | 524 |
264refill: 265 if (!rbdr_idx) 266 return; 267 rbdr_idx--; 268 rbdr = &qs->rbdr[rbdr_idx]; | 525 rb_alloc_fail = TRUE; 526 new_rb = 0; 527 nic = rbdr->nic; 528 qs = nic->qs; 529 rbdr_idx = rbdr->idx; 530 |
269 /* Check if it's enabled */ 270 if (!rbdr->enable) | 531 /* Check if it's enabled */ 532 if (!rbdr->enable) |
271 goto next_rbdr; | 533 return (0); |
272 273 /* Get no of desc's to be refilled */ 274 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 275 qcount &= 0x7FFFF; 276 /* Doorbell can be ringed with a max of ring size minus 1 */ | 534 535 /* Get no of desc's to be refilled */ 536 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 537 qcount &= 0x7FFFF; 538 /* Doorbell can be ringed with a max of ring size minus 1 */ |
277 if (qcount >= (qs->rbdr_len - 1)) 278 goto next_rbdr; 279 else | 539 if (qcount >= (qs->rbdr_len - 1)) { 540 rb_alloc_fail = FALSE; 541 goto out; 542 } else |
280 refill_rb_cnt = qs->rbdr_len - qcount - 1; 281 282 /* Start filling descs from tail */ 283 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 284 while (refill_rb_cnt) { 285 tail++; 286 tail &= (rbdr->dmem.q_len - 1); 287 | 543 refill_rb_cnt = qs->rbdr_len - qcount - 1; 544 545 /* Start filling descs from tail */ 546 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 547 while (refill_rb_cnt) { 548 tail++; 549 tail &= (rbdr->dmem.q_len - 1); 550 |
288 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) | 551 dmap = rbdr->rbdr_buff_dmaps[tail]; 552 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, 553 DMA_BUFFER_LEN, &rbuf)) { 554 /* Something went wrong. Resign */ |
289 break; | 555 break; |
290 | 556 } |
291 desc = GET_RBDR_DESC(rbdr, tail); | 557 desc = GET_RBDR_DESC(rbdr, tail); |
292 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | 558 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); |
293 refill_rb_cnt--; 294 new_rb++; 295 } 296 297 /* make sure all memory stores are done before ringing doorbell */ | 559 refill_rb_cnt--; 560 new_rb++; 561 } 562 563 /* make sure all memory stores are done before ringing doorbell */ |
298 smp_wmb(); | 564 wmb(); |
299 300 /* Check if buffer allocation failed */ | 565 566 /* Check if buffer allocation failed */ |
301 if (refill_rb_cnt) 302 nic->rb_alloc_fail = true; 303 else 304 nic->rb_alloc_fail = false; | 567 if (refill_rb_cnt == 0) 568 rb_alloc_fail = FALSE; |
305 306 /* Notify HW */ 307 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 308 rbdr_idx, new_rb); | 569 570 /* Notify HW */ 571 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 572 rbdr_idx, new_rb); |
309next_rbdr: 310 /* Re-enable RBDR interrupts only if buffer allocation is success */ 311 if (!nic->rb_alloc_fail && rbdr->enable) | 573out: 574 if (!rb_alloc_fail) { 575 /* 576 * Re-enable RBDR interrupts only 577 * if buffer allocation is success. 578 */ |
312 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 313 | 579 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 580 |
314 if (rbdr_idx) 315 goto refill; | 581 return (0); 582 } 583 584 return (ENOMEM); |
316} 317 | 585} 586 |
318/* Alloc rcv buffers in non-atomic mode for better success */ 319void nicvf_rbdr_work(struct work_struct *work) | 587/* Refill RBs even if sleep is needed to reclaim memory */ 588static void 589nicvf_rbdr_task(void *arg, int pending) |
320{ | 590{ |
321 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); | 591 struct rbdr *rbdr; 592 int err; |
322 | 593 |
323 nicvf_refill_rbdr(nic, GFP_KERNEL); 324 if (nic->rb_alloc_fail) 325 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 326 else 327 nic->rb_work_scheduled = false; | 594 rbdr = (struct rbdr *)arg; 595 596 err = nicvf_refill_rbdr(rbdr, M_WAITOK); 597 if (__predict_false(err != 0)) { 598 panic("%s: Failed to refill RBs even when sleep enabled", 599 __func__); 600 } |
328} 329 | 601} 602 |
330/* In Softirq context, alloc rcv buffers in atomic mode */ 331void nicvf_rbdr_task(unsigned long data) | 603/* Refill RBs as soon as possible without waiting */ 604static void 605nicvf_rbdr_task_nowait(void *arg, int pending) |
332{ | 606{ |
333 struct nicvf *nic = (struct nicvf *)data; | 607 struct rbdr *rbdr; 608 int err; |
334 | 609 |
335 nicvf_refill_rbdr(nic, GFP_ATOMIC); 336 if (nic->rb_alloc_fail) { 337 nic->rb_work_scheduled = true; 338 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | 610 rbdr = (struct rbdr *)arg; 611 612 err = nicvf_refill_rbdr(rbdr, M_NOWAIT); 613 if (err != 0) { 614 /* 615 * Schedule another, sleepable kernel thread 616 * that will for sure refill the buffers. 617 */ 618 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); |
339 } 340} 341 | 619 } 620} 621 |
622static int 623nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 624 struct cqe_rx_t *cqe_rx, int cqe_type) 625{ 626 struct mbuf *mbuf; 627 int rq_idx; 628 int err = 0; 629 630 rq_idx = cqe_rx->rq_idx; 631 632 /* Check for errors */ 633 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 634 if (err && !cqe_rx->rb_cnt) 635 return (0); 636 637 mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); 638 if (mbuf == NULL) { 639 dprintf(nic->dev, "Packet not received\n"); 640 return (0); 641 } 642 643 /* If error packet */ 644 if (err != 0) { 645 m_freem(mbuf); 646 return (0); 647 } 648 649 /* 650 * Push this packet to the stack later to avoid 651 * unlocking completion task in the middle of work. 652 */ 653 err = buf_ring_enqueue(cq->rx_br, mbuf); 654 if (err != 0) { 655 /* 656 * Failed to enqueue this mbuf. 657 * We don't drop it, just schedule another task. 658 */ 659 return (err); 660 } 661 662 return (0); 663} 664 665static int 666nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, 667 struct cqe_send_t *cqe_tx, int cqe_type) 668{ 669 bus_dmamap_t dmap; 670 struct mbuf *mbuf; 671 struct snd_queue *sq; 672 struct sq_hdr_subdesc *hdr; 673 674 mbuf = NULL; 675 sq = &nic->qs->sq[cqe_tx->sq_idx]; 676 /* Avoid blocking here since we hold a non-sleepable NICVF_CMP_LOCK */ 677 if (NICVF_TX_TRYLOCK(sq) == 0) 678 return (EAGAIN); 679 680 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); 681 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 682 NICVF_TX_UNLOCK(sq); 683 return (0); 684 } 685 686 dprintf(nic->dev, 687 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", 688 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 689 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 690 691 dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; 692 bus_dmamap_unload(sq->snd_buff_dmat, dmap); 693 694 mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; 695 if (mbuf != NULL) { 696 m_freem(mbuf); 697 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; 698 } 699 700 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 701 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 702 703 NICVF_TX_UNLOCK(sq); 704 return (0); 705} 706 707static int 708nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) 709{ 710 struct mbuf *mbuf; 711 struct ifnet *ifp; 712 int processed_cqe, work_done = 0, tx_done = 0; 713 int cqe_count, cqe_head; 714 struct queue_set *qs = nic->qs; 715 struct cmp_queue *cq = &qs->cq[cq_idx]; 716 struct cqe_rx_t *cq_desc; 717 int cmp_err; 718 719 NICVF_CMP_LOCK(cq); 720 cmp_err = 0; 721 processed_cqe = 0; 722 /* Get no of valid CQ entries to process */ 723 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); 724 cqe_count &= CQ_CQE_COUNT; 725 if (cqe_count == 0) 726 goto out; 727 728 /* Get head of the valid CQ entries */ 729 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 730 cqe_head &= 0xFFFF; 731 732 dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", 733 __func__, cq_idx, cqe_count, cqe_head); 734 while (processed_cqe < cqe_count) { 735 /* Get the CQ descriptor */ 736 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 737 cqe_head++; 738 cqe_head &= (cq->dmem.q_len - 1); 739 740 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, 741 cq_desc->cqe_type); 742 switch (cq_desc->cqe_type) { 743 case CQE_TYPE_RX: 744 cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, 745 CQE_TYPE_RX); 746 if (__predict_false(cmp_err != 0)) { 747 /* 748 * Ups. Cannot finish now. 749 * Let's try again later. 750 */ 751 goto done; 752 } 753 work_done++; 754 break; 755 case CQE_TYPE_SEND: 756 cmp_err = nicvf_snd_pkt_handler(nic, cq, 757 (void *)cq_desc, CQE_TYPE_SEND); 758 if (__predict_false(cmp_err != 0)) { 759 /* 760 * Ups. Cannot finish now. 761 * Let's try again later. 762 */ 763 goto done; 764 } 765 766 tx_done++; 767 break; 768 case CQE_TYPE_INVALID: 769 case CQE_TYPE_RX_SPLIT: 770 case CQE_TYPE_RX_TCP: 771 case CQE_TYPE_SEND_PTP: 772 /* Ignore for now */ 773 break; 774 } 775 processed_cqe++; 776 } 777done: 778 dprintf(nic->dev, 779 "%s CQ%d processed_cqe %d work_done %d\n", 780 __func__, cq_idx, processed_cqe, work_done); 781 782 /* Ring doorbell to inform H/W to reuse processed CQEs */ 783 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); 784 785 if ((tx_done > 0) && 786 ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { 787 /* Reenable TXQ if its stopped earlier due to SQ full */ 788 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 789 } 790out: 791 NICVF_CMP_UNLOCK(cq); 792 793 ifp = nic->ifp; 794 /* Push received MBUFs to the stack */ 795 while (!buf_ring_empty(cq->rx_br)) { 796 mbuf = buf_ring_dequeue_mc(cq->rx_br); 797 if (__predict_true(mbuf != NULL)) 798 (*ifp->if_input)(ifp, mbuf); 799 } 800 801 return (cmp_err); 802} 803 804/* 805 * Qset error interrupt handler 806 * 807 * As of now only CQ errors are handled 808 */ 809static void 810nicvf_qs_err_task(void *arg, int pending) 811{ 812 struct nicvf *nic; 813 struct queue_set *qs; 814 int qidx; 815 uint64_t status; 816 boolean_t enable = TRUE; 817 818 nic = (struct nicvf *)arg; 819 qs = nic->qs; 820 821 /* Deactivate network interface */ 822 if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 823 824 /* Check if it is CQ err */ 825 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 826 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, 827 qidx); 828 if ((status & CQ_ERR_MASK) == 0) 829 continue; 830 /* Process already queued CQEs and reconfig CQ */ 831 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 832 nicvf_sq_disable(nic, qidx); 833 (void)nicvf_cq_intr_handler(nic, qidx); 834 nicvf_cmp_queue_config(nic, qs, qidx, enable); 835 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); 836 nicvf_sq_enable(nic, &qs->sq[qidx], qidx); 837 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 838 } 839 840 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 841 /* Re-enable Qset error interrupt */ 842 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 843} 844 845static void 846nicvf_cmp_task(void *arg, int pending) 847{ 848 uint64_t cq_head; 849 struct cmp_queue *cq; 850 struct nicvf *nic; 851 int cmp_err; 852 853 cq = (struct cmp_queue *)arg; 854 nic = cq->nic; 855 856 /* Handle CQ descriptors */ 857 cmp_err = nicvf_cq_intr_handler(nic, cq->idx); 858 /* Re-enable interrupts */ 859 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->idx); 860 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 861 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, cq->idx, cq_head); 862 863 if (__predict_false(cmp_err != 0)) { 864 /* 865 * Schedule another thread here since we did not 866 * process the entire CQ due to Tx or Rx CQ parse error. 867 */ 868 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); 869 870 } 871 872 /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ 873 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); 874 875} 876 |
|
342/* Initialize completion queue */ | 877/* Initialize completion queue */ |
343static int nicvf_init_cmp_queue(struct nicvf *nic, 344 struct cmp_queue *cq, int q_len) | 878static int 879nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, 880 int qidx) |
345{ 346 int err; 347 | 881{ 882 int err; 883 |
884 /* Initizalize lock */ 885 snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", 886 device_get_nameunit(nic->dev), qidx); 887 mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); 888 |
|
348 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 349 NICVF_CQ_BASE_ALIGN_BYTES); | 889 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 890 NICVF_CQ_BASE_ALIGN_BYTES); |
350 if (err) 351 return err; | |
352 | 891 |
892 if (err != 0) { 893 device_printf(nic->dev, 894 "Could not allocate DMA memory for CQ\n"); 895 return (err); 896 } 897 |
|
353 cq->desc = cq->dmem.base; 354 cq->thresh = CMP_QUEUE_CQE_THRESH; | 898 cq->desc = cq->dmem.base; 899 cq->thresh = CMP_QUEUE_CQE_THRESH; |
900 cq->nic = nic; 901 cq->idx = qidx; |
|
355 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 356 | 902 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 903 |
357 return 0; | 904 cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, 905 &cq->mtx); 906 907 /* Allocate taskqueue */ 908 TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); 909 cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, 910 taskqueue_thread_enqueue, &cq->cmp_taskq); 911 taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", 912 device_get_nameunit(nic->dev), qidx); 913 914 return (0); |
358} 359 | 915} 916 |
360static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) | 917static void 918nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) |
361{ | 919{ |
362 if (!cq) | 920 921 if (cq == NULL) |
363 return; | 922 return; |
364 if (!cq->dmem.base) 365 return; | 923 /* 924 * The completion queue itself should be disabled by now 925 * (ref. nicvf_snd_queue_config()). 926 * Ensure that it is safe to disable it or panic. 927 */ 928 if (cq->enable) 929 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); |
366 | 930 |
931 if (cq->cmp_taskq != NULL) { 932 /* Remove task */ 933 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) 934 taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); 935 936 taskqueue_free(cq->cmp_taskq); 937 cq->cmp_taskq = NULL; 938 } 939 /* 940 * Completion interrupt will possibly enable interrupts again 941 * so disable interrupting now after we finished processing 942 * completion task. It is safe to do so since the corresponding CQ 943 * was already disabled. 944 */ 945 nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); 946 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); 947 948 NICVF_CMP_LOCK(cq); |
|
367 nicvf_free_q_desc_mem(nic, &cq->dmem); | 949 nicvf_free_q_desc_mem(nic, &cq->dmem); |
950 drbr_free(cq->rx_br, M_DEVBUF); 951 NICVF_CMP_UNLOCK(cq); 952 mtx_destroy(&cq->mtx); 953 memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); |
|
368} 369 | 954} 955 |
956static void 957nicvf_snd_task(void *arg, int pending) 958{ 959 struct snd_queue *sq = (struct snd_queue *)arg; 960 struct mbuf *mbuf; 961 962 NICVF_TX_LOCK(sq); 963 while (1) { 964 mbuf = drbr_dequeue(NULL, sq->br); 965 if (mbuf == NULL) 966 break; 967 968 if (nicvf_tx_mbuf_locked(sq, mbuf) != 0) { 969 /* XXX ARM64TODO: Increase Tx drop counter */ 970 m_freem(mbuf); 971 break; 972 } 973 } 974 NICVF_TX_UNLOCK(sq); 975} 976 |
|
370/* Initialize transmit queue */ | 977/* Initialize transmit queue */ |
371static int nicvf_init_snd_queue(struct nicvf *nic, 372 struct snd_queue *sq, int q_len) | 978static int 979nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, 980 int qidx) |
373{ | 981{ |
982 size_t i; |
|
374 int err; 375 | 983 int err; 984 |
985 /* Initizalize TX lock for this queue */ 986 snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", 987 device_get_nameunit(nic->dev), qidx); 988 mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); 989 990 NICVF_TX_LOCK(sq); 991 /* Allocate buffer ring */ 992 sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, 993 M_NOWAIT, &sq->mtx); 994 if (sq->br == NULL) { 995 device_printf(nic->dev, 996 "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); 997 err = ENOMEM; 998 goto error; 999 } 1000 1001 /* Allocate DMA memory for Tx descriptors */ |
|
376 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 377 NICVF_SQ_BASE_ALIGN_BYTES); | 1002 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 1003 NICVF_SQ_BASE_ALIGN_BYTES); |
378 if (err) 379 return err; | 1004 if (err != 0) { 1005 device_printf(nic->dev, 1006 "Could not allocate DMA memory for SQ\n"); 1007 goto error; 1008 } |
380 381 sq->desc = sq->dmem.base; | 1009 1010 sq->desc = sq->dmem.base; |
382 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 383 if (!sq->skbuff) 384 return -ENOMEM; 385 sq->head = 0; 386 sq->tail = 0; 387 atomic_set(&sq->free_cnt, q_len - 1); | 1011 sq->head = sq->tail = 0; 1012 atomic_store_rel_int(&sq->free_cnt, q_len - 1); |
388 sq->thresh = SND_QUEUE_THRESH; | 1013 sq->thresh = SND_QUEUE_THRESH; |
1014 sq->idx = qidx; 1015 sq->nic = nic; |
|
389 | 1016 |
390 /* Preallocate memory for TSO segment's header */ 391 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 392 q_len * TSO_HEADER_SIZE, 393 &sq->tso_hdrs_phys, GFP_KERNEL); 394 if (!sq->tso_hdrs) 395 return -ENOMEM; | 1017 /* 1018 * Allocate DMA maps for Tx buffers 1019 */ |
396 | 1020 |
397 return 0; | 1021 /* Create DMA tag first */ 1022 err = bus_dma_tag_create( 1023 bus_get_dma_tag(nic->dev), /* parent tag */ 1024 1, /* alignment */ 1025 0, /* boundary */ 1026 BUS_SPACE_MAXADDR, /* lowaddr */ 1027 BUS_SPACE_MAXADDR, /* highaddr */ 1028 NULL, NULL, /* filtfunc, filtfuncarg */ 1029 NICVF_TXBUF_MAXSIZE, /* maxsize */ 1030 NICVF_TXBUF_NSEGS, /* nsegments */ 1031 MCLBYTES, /* maxsegsize */ 1032 0, /* flags */ 1033 NULL, NULL, /* lockfunc, lockfuncarg */ 1034 &sq->snd_buff_dmat); /* dmat */ 1035 1036 if (err != 0) { 1037 device_printf(nic->dev, 1038 "Failed to create busdma tag for Tx buffers\n"); 1039 goto error; 1040 } 1041 1042 /* Allocate send buffers array */ 1043 sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, 1044 (M_NOWAIT | M_ZERO)); 1045 if (sq->snd_buff == NULL) { 1046 device_printf(nic->dev, 1047 "Could not allocate memory for Tx buffers array\n"); 1048 err = ENOMEM; 1049 goto error; 1050 } 1051 1052 /* Now populate maps */ 1053 for (i = 0; i < q_len; i++) { 1054 err = bus_dmamap_create(sq->snd_buff_dmat, 0, 1055 &sq->snd_buff[i].dmap); 1056 if (err != 0) { 1057 device_printf(nic->dev, 1058 "Failed to create DMA maps for Tx buffers\n"); 1059 goto error; 1060 } 1061 } 1062 NICVF_TX_UNLOCK(sq); 1063 1064 /* Allocate taskqueue */ 1065 TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); 1066 sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, 1067 taskqueue_thread_enqueue, &sq->snd_taskq); 1068 taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", 1069 device_get_nameunit(nic->dev), qidx); 1070 1071 return (0); 1072error: 1073 NICVF_TX_UNLOCK(sq); 1074 return (err); |
398} 399 | 1075} 1076 |
400static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | 1077static void 1078nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) |
401{ | 1079{ |
402 if (!sq) | 1080 struct queue_set *qs = nic->qs; 1081 size_t i; 1082 int err; 1083 1084 if (sq == NULL) |
403 return; | 1085 return; |
404 if (!sq->dmem.base) 405 return; | |
406 | 1086 |
407 if (sq->tso_hdrs) 408 dma_free_coherent(&nic->pdev->dev, 409 sq->dmem.q_len * TSO_HEADER_SIZE, 410 sq->tso_hdrs, sq->tso_hdrs_phys); | 1087 if (sq->snd_taskq != NULL) { 1088 /* Remove task */ 1089 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) 1090 taskqueue_drain(sq->snd_taskq, &sq->snd_task); |
411 | 1091 |
412 kfree(sq->skbuff); 413 nicvf_free_q_desc_mem(nic, &sq->dmem); | 1092 taskqueue_free(sq->snd_taskq); 1093 sq->snd_taskq = NULL; 1094 } 1095 1096 NICVF_TX_LOCK(sq); 1097 if (sq->snd_buff_dmat != NULL) { 1098 if (sq->snd_buff != NULL) { 1099 for (i = 0; i < qs->sq_len; i++) { 1100 m_freem(sq->snd_buff[i].mbuf); 1101 sq->snd_buff[i].mbuf = NULL; 1102 1103 bus_dmamap_unload(sq->snd_buff_dmat, 1104 sq->snd_buff[i].dmap); 1105 err = bus_dmamap_destroy(sq->snd_buff_dmat, 1106 sq->snd_buff[i].dmap); 1107 /* 1108 * If bus_dmamap_destroy fails it can cause 1109 * random panic later if the tag is also 1110 * destroyed in the process. 1111 */ 1112 KASSERT(err == 0, 1113 ("%s: Could not destroy DMA map for SQ", 1114 __func__)); 1115 } 1116 } 1117 1118 free(sq->snd_buff, M_NICVF); 1119 1120 err = bus_dma_tag_destroy(sq->snd_buff_dmat); 1121 KASSERT(err == 0, 1122 ("%s: Trying to destroy BUSY DMA tag", __func__)); 1123 } 1124 1125 /* Free private driver ring for this send queue */ 1126 if (sq->br != NULL) 1127 drbr_free(sq->br, M_DEVBUF); 1128 1129 if (sq->dmem.base != NULL) 1130 nicvf_free_q_desc_mem(nic, &sq->dmem); 1131 1132 NICVF_TX_UNLOCK(sq); 1133 /* Destroy Tx lock */ 1134 mtx_destroy(&sq->mtx); 1135 memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); |
414} 415 | 1136} 1137 |
416static void nicvf_reclaim_snd_queue(struct nicvf *nic, 417 struct queue_set *qs, int qidx) | 1138static void 1139nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) |
418{ | 1140{ |
1141 |
|
419 /* Disable send queue */ 420 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 421 /* Check if SQ is stopped */ 422 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 423 return; 424 /* Reset send queue */ 425 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 426} 427 | 1142 /* Disable send queue */ 1143 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 1144 /* Check if SQ is stopped */ 1145 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 1146 return; 1147 /* Reset send queue */ 1148 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 1149} 1150 |
428static void nicvf_reclaim_rcv_queue(struct nicvf *nic, 429 struct queue_set *qs, int qidx) | 1151static void 1152nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) |
430{ 431 union nic_mbx mbx = {}; 432 433 /* Make sure all packets in the pipeline are written back into mem */ 434 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 435 nicvf_send_msg_to_pf(nic, &mbx); 436} 437 | 1153{ 1154 union nic_mbx mbx = {}; 1155 1156 /* Make sure all packets in the pipeline are written back into mem */ 1157 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 1158 nicvf_send_msg_to_pf(nic, &mbx); 1159} 1160 |
438static void nicvf_reclaim_cmp_queue(struct nicvf *nic, 439 struct queue_set *qs, int qidx) | 1161static void 1162nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) |
440{ | 1163{ |
1164 |
|
441 /* Disable timer threshold (doesn't get reset upon CQ reset */ 442 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 443 /* Disable completion queue */ 444 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 445 /* Reset completion queue */ 446 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 447} 448 | 1165 /* Disable timer threshold (doesn't get reset upon CQ reset */ 1166 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 1167 /* Disable completion queue */ 1168 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 1169 /* Reset completion queue */ 1170 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1171} 1172 |
449static void nicvf_reclaim_rbdr(struct nicvf *nic, 450 struct rbdr *rbdr, int qidx) | 1173static void 1174nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) |
451{ | 1175{ |
452 u64 tmp, fifo_state; | 1176 uint64_t tmp, fifo_state; |
453 int timeout = 10; 454 455 /* Save head and tail pointers for feeing up buffers */ | 1177 int timeout = 10; 1178 1179 /* Save head and tail pointers for feeing up buffers */ |
456 rbdr->head = nicvf_queue_reg_read(nic, 457 NIC_QSET_RBDR_0_1_HEAD, 458 qidx) >> 3; 459 rbdr->tail = nicvf_queue_reg_read(nic, 460 NIC_QSET_RBDR_0_1_TAIL, 461 qidx) >> 3; | 1180 rbdr->head = 1181 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; 1182 rbdr->tail = 1183 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; |
462 | 1184 |
463 /* If RBDR FIFO is in 'FAIL' state then do a reset first | 1185 /* 1186 * If RBDR FIFO is in 'FAIL' state then do a reset first |
464 * before relaiming. 465 */ 466 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); | 1187 * before relaiming. 1188 */ 1189 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); |
467 if (((fifo_state >> 62) & 0x03) == 0x3) | 1190 if (((fifo_state >> 62) & 0x03) == 0x3) { |
468 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | 1191 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
469 qidx, NICVF_RBDR_RESET); | 1192 qidx, NICVF_RBDR_RESET); 1193 } |
470 471 /* Disable RBDR */ 472 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 473 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 474 return; 475 while (1) { 476 tmp = nicvf_queue_reg_read(nic, | 1194 1195 /* Disable RBDR */ 1196 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 1197 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1198 return; 1199 while (1) { 1200 tmp = nicvf_queue_reg_read(nic, |
477 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, 478 qidx); | 1201 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); |
479 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 480 break; | 1202 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 1203 break; |
481 usleep_range(1000, 2000); | 1204 1205 DELAY(1000); |
482 timeout--; 483 if (!timeout) { | 1206 timeout--; 1207 if (!timeout) { |
484 netdev_err(nic->netdev, 485 "Failed polling on prefetch status\n"); | 1208 device_printf(nic->dev, 1209 "Failed polling on prefetch status\n"); |
486 return; 487 } 488 } | 1210 return; 1211 } 1212 } |
489 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 490 qidx, NICVF_RBDR_RESET); | 1213 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1214 NICVF_RBDR_RESET); |
491 492 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 493 return; 494 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 495 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 496 return; 497} 498 | 1215 1216 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 1217 return; 1218 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 1219 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 1220 return; 1221} 1222 |
499void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) 500{ 501 u64 rq_cfg; 502#ifdef VNIC_MULTI_QSET_SUPPORT 503 int sqs = 0; 504#endif 505 506 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); 507 508 /* Enable first VLAN stripping */ 509 if (features & NETIF_F_HW_VLAN_CTAG_RX) 510 rq_cfg |= (1ULL << 25); 511 else 512 rq_cfg &= ~(1ULL << 25); 513 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 514 515#ifdef VNIC_MULTI_QSET_SUPPORT 516 /* Configure Secondary Qsets, if any */ 517 for (sqs = 0; sqs < nic->sqs_count; sqs++) 518 if (nic->snicvf[sqs]) 519 nicvf_queue_reg_write(nic->snicvf[sqs], 520 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 521#endif 522} 523 | |
524/* Configures receive queue */ | 1223/* Configures receive queue */ |
525static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 526 int qidx, bool enable) | 1224static void 1225nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 1226 int qidx, bool enable) |
527{ 528 union nic_mbx mbx = {}; 529 struct rcv_queue *rq; | 1227{ 1228 union nic_mbx mbx = {}; 1229 struct rcv_queue *rq; |
530 struct cmp_queue *cq; | |
531 struct rq_cfg rq_cfg; 532 533 rq = &qs->rq[qidx]; 534 rq->enable = enable; 535 536 /* Disable receive queue */ 537 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 538 --- 11 unchanged lines hidden (view full) --- 550 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 551 rq->caching = 1; 552 553 /* Send a mailbox msg to PF to config RQ */ 554 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 555 mbx.rq.qs_num = qs->vnic_id; 556 mbx.rq.rq_num = qidx; 557 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | | 1230 struct rq_cfg rq_cfg; 1231 1232 rq = &qs->rq[qidx]; 1233 rq->enable = enable; 1234 1235 /* Disable receive queue */ 1236 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 1237 --- 11 unchanged lines hidden (view full) --- 1249 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 1250 rq->caching = 1; 1251 1252 /* Send a mailbox msg to PF to config RQ */ 1253 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 1254 mbx.rq.qs_num = qs->vnic_id; 1255 mbx.rq.rq_num = qidx; 1256 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | |
558 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 559 (rq->cont_qs_rbdr_idx << 8) | 560 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); | 1257 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 1258 (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | 1259 (rq->start_qs_rbdr_idx); |
561 nicvf_send_msg_to_pf(nic, &mbx); 562 563 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; | 1260 nicvf_send_msg_to_pf(nic, &mbx); 1261 1262 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; |
564 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); | 1263 mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); |
565 nicvf_send_msg_to_pf(nic, &mbx); 566 | 1264 nicvf_send_msg_to_pf(nic, &mbx); 1265 |
567 /* RQ drop config | 1266 /* 1267 * RQ drop config |
568 * Enable CQ drop to reserve sufficient CQEs for all tx packets 569 */ 570 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; | 1268 * Enable CQ drop to reserve sufficient CQEs for all tx packets 1269 */ 1270 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; |
571 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); | 1271 mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); |
572 nicvf_send_msg_to_pf(nic, &mbx); 573 574 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); | 1272 nicvf_send_msg_to_pf(nic, &mbx); 1273 1274 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); |
575 if (!nic->sqs_mode) 576 nicvf_config_vlan_stripping(nic, nic->netdev->features); | |
577 578 /* Enable Receive queue */ 579 rq_cfg.ena = 1; 580 rq_cfg.tcp_ena = 0; | 1275 1276 /* Enable Receive queue */ 1277 rq_cfg.ena = 1; 1278 rq_cfg.tcp_ena = 0; |
581 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); | 1279 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 1280 *(uint64_t *)&rq_cfg); |
582} 583 584/* Configures completion queue */ | 1281} 1282 1283/* Configures completion queue */ |
585void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 586 int qidx, bool enable) | 1284static void 1285nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 1286 int qidx, boolean_t enable) |
587{ 588 struct cmp_queue *cq; 589 struct cq_cfg cq_cfg; 590 591 cq = &qs->cq[qidx]; 592 cq->enable = enable; 593 594 if (!cq->enable) { 595 nicvf_reclaim_cmp_queue(nic, qs, qidx); 596 return; 597 } 598 599 /* Reset completion queue */ 600 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 601 | 1287{ 1288 struct cmp_queue *cq; 1289 struct cq_cfg cq_cfg; 1290 1291 cq = &qs->cq[qidx]; 1292 cq->enable = enable; 1293 1294 if (!cq->enable) { 1295 nicvf_reclaim_cmp_queue(nic, qs, qidx); 1296 return; 1297 } 1298 1299 /* Reset completion queue */ 1300 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 1301 |
602 if (!cq->enable) 603 return; 604 605 spin_lock_init(&cq->lock); | |
606 /* Set completion queue base address */ | 1302 /* Set completion queue base address */ |
607 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, 608 qidx, (u64)(cq->dmem.phys_base)); | 1303 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, 1304 (uint64_t)(cq->dmem.phys_base)); |
609 610 /* Enable Completion queue */ 611 cq_cfg.ena = 1; 612 cq_cfg.reset = 0; 613 cq_cfg.caching = 0; 614 cq_cfg.qsize = CMP_QSIZE; 615 cq_cfg.avg_con = 0; | 1305 1306 /* Enable Completion queue */ 1307 cq_cfg.ena = 1; 1308 cq_cfg.reset = 0; 1309 cq_cfg.caching = 0; 1310 cq_cfg.qsize = CMP_QSIZE; 1311 cq_cfg.avg_con = 0; |
616 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); | 1312 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); |
617 618 /* Set threshold value for interrupt generation */ 619 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | 1313 1314 /* Set threshold value for interrupt generation */ 1315 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); |
620 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 621 qidx, nic->cq_coalesce_usecs); | 1316 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 1317 nic->cq_coalesce_usecs); |
622} 623 624/* Configures transmit queue */ | 1318} 1319 1320/* Configures transmit queue */ |
625static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, 626 int qidx, bool enable) | 1321static void 1322nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1323 boolean_t enable) |
627{ 628 union nic_mbx mbx = {}; 629 struct snd_queue *sq; 630 struct sq_cfg sq_cfg; 631 632 sq = &qs->sq[qidx]; 633 sq->enable = enable; 634 --- 12 unchanged lines hidden (view full) --- 647 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 648 mbx.sq.qs_num = qs->vnic_id; 649 mbx.sq.sq_num = qidx; 650 mbx.sq.sqs_mode = nic->sqs_mode; 651 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 652 nicvf_send_msg_to_pf(nic, &mbx); 653 654 /* Set queue base address */ | 1324{ 1325 union nic_mbx mbx = {}; 1326 struct snd_queue *sq; 1327 struct sq_cfg sq_cfg; 1328 1329 sq = &qs->sq[qidx]; 1330 sq->enable = enable; 1331 --- 12 unchanged lines hidden (view full) --- 1344 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 1345 mbx.sq.qs_num = qs->vnic_id; 1346 mbx.sq.sq_num = qidx; 1347 mbx.sq.sqs_mode = nic->sqs_mode; 1348 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 1349 nicvf_send_msg_to_pf(nic, &mbx); 1350 1351 /* Set queue base address */ |
655 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, 656 qidx, (u64)(sq->dmem.phys_base)); | 1352 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, 1353 (uint64_t)(sq->dmem.phys_base)); |
657 658 /* Enable send queue & set queue size */ 659 sq_cfg.ena = 1; 660 sq_cfg.reset = 0; 661 sq_cfg.ldwb = 0; 662 sq_cfg.qsize = SND_QSIZE; 663 sq_cfg.tstmp_bgx_intf = 0; | 1354 1355 /* Enable send queue & set queue size */ 1356 sq_cfg.ena = 1; 1357 sq_cfg.reset = 0; 1358 sq_cfg.ldwb = 0; 1359 sq_cfg.qsize = SND_QSIZE; 1360 sq_cfg.tstmp_bgx_intf = 0; |
664 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); | 1361 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); |
665 666 /* Set threshold value for interrupt generation */ 667 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 668} 669 670/* Configures receive buffer descriptor ring */ | 1362 1363 /* Set threshold value for interrupt generation */ 1364 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 1365} 1366 1367/* Configures receive buffer descriptor ring */ |
671static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, 672 int qidx, bool enable) | 1368static void 1369nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, 1370 boolean_t enable) |
673{ 674 struct rbdr *rbdr; 675 struct rbdr_cfg rbdr_cfg; 676 677 rbdr = &qs->rbdr[qidx]; 678 nicvf_reclaim_rbdr(nic, rbdr, qidx); 679 if (!enable) 680 return; 681 682 /* Set descriptor base address */ | 1371{ 1372 struct rbdr *rbdr; 1373 struct rbdr_cfg rbdr_cfg; 1374 1375 rbdr = &qs->rbdr[qidx]; 1376 nicvf_reclaim_rbdr(nic, rbdr, qidx); 1377 if (!enable) 1378 return; 1379 1380 /* Set descriptor base address */ |
683 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, 684 qidx, (u64)(rbdr->dmem.phys_base)); | 1381 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, 1382 (uint64_t)(rbdr->dmem.phys_base)); |
685 686 /* Enable RBDR & set queue size */ 687 /* Buffer size should be in multiples of 128 bytes */ 688 rbdr_cfg.ena = 1; 689 rbdr_cfg.reset = 0; 690 rbdr_cfg.ldwb = 0; 691 rbdr_cfg.qsize = RBDR_SIZE; 692 rbdr_cfg.avg_con = 0; 693 rbdr_cfg.lines = rbdr->dma_size / 128; | 1383 1384 /* Enable RBDR & set queue size */ 1385 /* Buffer size should be in multiples of 128 bytes */ 1386 rbdr_cfg.ena = 1; 1387 rbdr_cfg.reset = 0; 1388 rbdr_cfg.ldwb = 0; 1389 rbdr_cfg.qsize = RBDR_SIZE; 1390 rbdr_cfg.avg_con = 0; 1391 rbdr_cfg.lines = rbdr->dma_size / 128; |
694 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 695 qidx, *(u64 *)&rbdr_cfg); | 1392 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 1393 *(uint64_t *)&rbdr_cfg); |
696 697 /* Notify HW */ | 1394 1395 /* Notify HW */ |
698 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 699 qidx, qs->rbdr_len - 1); | 1396 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, 1397 qs->rbdr_len - 1); |
700 701 /* Set threshold value for interrupt generation */ | 1398 1399 /* Set threshold value for interrupt generation */ |
702 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, 703 qidx, rbdr->thresh - 1); | 1400 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, 1401 rbdr->thresh - 1); |
704} 705 706/* Requests PF to assign and enable Qset */ | 1402} 1403 1404/* Requests PF to assign and enable Qset */ |
707void nicvf_qset_config(struct nicvf *nic, bool enable) | 1405void 1406nicvf_qset_config(struct nicvf *nic, boolean_t enable) |
708{ 709 union nic_mbx mbx = {}; | 1407{ 1408 union nic_mbx mbx = {}; |
710 struct queue_set *qs = nic->qs; | 1409 struct queue_set *qs; |
711 struct qs_cfg *qs_cfg; 712 | 1410 struct qs_cfg *qs_cfg; 1411 |
713 if (!qs) { 714 netdev_warn(nic->netdev, 715 "Qset is still not allocated, don't init queues\n"); | 1412 qs = nic->qs; 1413 if (qs == NULL) { 1414 device_printf(nic->dev, 1415 "Qset is still not allocated, don't init queues\n"); |
716 return; 717 } 718 719 qs->enable = enable; 720 qs->vnic_id = nic->vf_id; 721 722 /* Send a mailbox msg to PF to config Qset */ 723 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 724 mbx.qs.num = qs->vnic_id; | 1416 return; 1417 } 1418 1419 qs->enable = enable; 1420 qs->vnic_id = nic->vf_id; 1421 1422 /* Send a mailbox msg to PF to config Qset */ 1423 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 1424 mbx.qs.num = qs->vnic_id; |
725#ifdef VNIC_MULTI_QSET_SUPPORT 726 mbx.qs.sqs_count = nic->sqs_count; 727#endif | |
728 729 mbx.qs.cfg = 0; 730 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 731 if (qs->enable) { 732 qs_cfg->ena = 1; | 1425 1426 mbx.qs.cfg = 0; 1427 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 1428 if (qs->enable) { 1429 qs_cfg->ena = 1; |
733#ifdef __BIG_ENDIAN 734 qs_cfg->be = 1; 735#endif | |
736 qs_cfg->vnic = qs->vnic_id; 737 } 738 nicvf_send_msg_to_pf(nic, &mbx); 739} 740 | 1430 qs_cfg->vnic = qs->vnic_id; 1431 } 1432 nicvf_send_msg_to_pf(nic, &mbx); 1433} 1434 |
741static void nicvf_free_resources(struct nicvf *nic) | 1435static void 1436nicvf_free_resources(struct nicvf *nic) |
742{ 743 int qidx; | 1437{ 1438 int qidx; |
744 struct queue_set *qs = nic->qs; | 1439 struct queue_set *qs; |
745 | 1440 |
1441 qs = nic->qs; 1442 /* 1443 * Remove QS error task first since it has to be dead 1444 * to safely free completion queue tasks. 1445 */ 1446 if (qs->qs_err_taskq != NULL) { 1447 /* Shut down QS error tasks */ 1448 while (taskqueue_cancel(qs->qs_err_taskq, 1449 &qs->qs_err_task, NULL) != 0) { 1450 taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); 1451 1452 } 1453 taskqueue_free(qs->qs_err_taskq); 1454 qs->qs_err_taskq = NULL; 1455 } |
|
746 /* Free receive buffer descriptor ring */ 747 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 748 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 749 750 /* Free completion queue */ 751 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 752 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 753 754 /* Free send queue */ 755 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 756 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 757} 758 | 1456 /* Free receive buffer descriptor ring */ 1457 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1458 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 1459 1460 /* Free completion queue */ 1461 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1462 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 1463 1464 /* Free send queue */ 1465 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1466 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 1467} 1468 |
759static int nicvf_alloc_resources(struct nicvf *nic) | 1469static int 1470nicvf_alloc_resources(struct nicvf *nic) |
760{ | 1471{ |
761 int qidx; | |
762 struct queue_set *qs = nic->qs; | 1472 struct queue_set *qs = nic->qs; |
1473 int qidx; |
|
763 764 /* Alloc receive buffer descriptor ring */ 765 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 766 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, | 1474 1475 /* Alloc receive buffer descriptor ring */ 1476 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 1477 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, |
767 DMA_BUFFER_LEN)) | 1478 DMA_BUFFER_LEN, qidx)) |
768 goto alloc_fail; 769 } 770 771 /* Alloc send queue */ 772 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { | 1479 goto alloc_fail; 1480 } 1481 1482 /* Alloc send queue */ 1483 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { |
773 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) | 1484 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) |
774 goto alloc_fail; 775 } 776 777 /* Alloc completion queue */ 778 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | 1485 goto alloc_fail; 1486 } 1487 1488 /* Alloc completion queue */ 1489 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { |
779 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) | 1490 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) |
780 goto alloc_fail; 781 } 782 | 1491 goto alloc_fail; 1492 } 1493 |
783 return 0; | 1494 /* Allocate QS error taskqueue */ 1495 TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); 1496 qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, 1497 taskqueue_thread_enqueue, &qs->qs_err_taskq); 1498 taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", 1499 device_get_nameunit(nic->dev)); 1500 1501 return (0); |
784alloc_fail: 785 nicvf_free_resources(nic); | 1502alloc_fail: 1503 nicvf_free_resources(nic); |
786 return -ENOMEM; | 1504 return (ENOMEM); |
787} 788 | 1505} 1506 |
789int nicvf_set_qset_resources(struct nicvf *nic) | 1507int 1508nicvf_set_qset_resources(struct nicvf *nic) |
790{ 791 struct queue_set *qs; 792 | 1509{ 1510 struct queue_set *qs; 1511 |
793 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); 794 if (!qs) 795 return -ENOMEM; | 1512 qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); |
796 nic->qs = qs; 797 798 /* Set count of each queue */ 799 qs->rbdr_cnt = RBDR_CNT; | 1513 nic->qs = qs; 1514 1515 /* Set count of each queue */ 1516 qs->rbdr_cnt = RBDR_CNT; |
800#ifdef VNIC_RSS_SUPPORT 801 qs->rq_cnt = RCV_QUEUE_CNT; 802#else | 1517 /* With no RSS we stay with single RQ */ |
803 qs->rq_cnt = 1; | 1518 qs->rq_cnt = 1; |
804#endif | 1519 |
805 qs->sq_cnt = SND_QUEUE_CNT; 806 qs->cq_cnt = CMP_QUEUE_CNT; 807 808 /* Set queue lengths */ 809 qs->rbdr_len = RCV_BUF_COUNT; 810 qs->sq_len = SND_QUEUE_LEN; 811 qs->cq_len = CMP_QUEUE_LEN; 812 813 nic->rx_queues = qs->rq_cnt; 814 nic->tx_queues = qs->sq_cnt; 815 | 1520 qs->sq_cnt = SND_QUEUE_CNT; 1521 qs->cq_cnt = CMP_QUEUE_CNT; 1522 1523 /* Set queue lengths */ 1524 qs->rbdr_len = RCV_BUF_COUNT; 1525 qs->sq_len = SND_QUEUE_LEN; 1526 qs->cq_len = CMP_QUEUE_LEN; 1527 1528 nic->rx_queues = qs->rq_cnt; 1529 nic->tx_queues = qs->sq_cnt; 1530 |
816 return 0; | 1531 return (0); |
817} 818 | 1532} 1533 |
819int nicvf_config_data_transfer(struct nicvf *nic, bool enable) | 1534int 1535nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) |
820{ | 1536{ |
821 bool disable = false; 822 struct queue_set *qs = nic->qs; | 1537 boolean_t disable = FALSE; 1538 struct queue_set *qs; |
823 int qidx; 824 | 1539 int qidx; 1540 |
825 if (!qs) 826 return 0; | 1541 qs = nic->qs; 1542 if (qs == NULL) 1543 return (0); |
827 828 if (enable) { | 1544 1545 if (enable) { |
829 if (nicvf_alloc_resources(nic)) 830 return -ENOMEM; | 1546 if (nicvf_alloc_resources(nic) != 0) 1547 return (ENOMEM); |
831 832 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 833 nicvf_snd_queue_config(nic, qs, qidx, enable); 834 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 835 nicvf_cmp_queue_config(nic, qs, qidx, enable); 836 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 837 nicvf_rbdr_config(nic, qs, qidx, enable); 838 for (qidx = 0; qidx < qs->rq_cnt; qidx++) --- 6 unchanged lines hidden (view full) --- 845 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 846 nicvf_snd_queue_config(nic, qs, qidx, disable); 847 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 848 nicvf_cmp_queue_config(nic, qs, qidx, disable); 849 850 nicvf_free_resources(nic); 851 } 852 | 1548 1549 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1550 nicvf_snd_queue_config(nic, qs, qidx, enable); 1551 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1552 nicvf_cmp_queue_config(nic, qs, qidx, enable); 1553 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1554 nicvf_rbdr_config(nic, qs, qidx, enable); 1555 for (qidx = 0; qidx < qs->rq_cnt; qidx++) --- 6 unchanged lines hidden (view full) --- 1562 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1563 nicvf_snd_queue_config(nic, qs, qidx, disable); 1564 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1565 nicvf_cmp_queue_config(nic, qs, qidx, disable); 1566 1567 nicvf_free_resources(nic); 1568 } 1569 |
853 return 0; | 1570 return (0); |
854} 855 | 1571} 1572 |
856/* Get a free desc from SQ | 1573/* 1574 * Get a free desc from SQ |
857 * returns descriptor ponter & descriptor number 858 */ | 1575 * returns descriptor ponter & descriptor number 1576 */ |
859static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) | 1577static __inline int 1578nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) |
860{ 861 int qentry; 862 863 qentry = sq->tail; | 1579{ 1580 int qentry; 1581 1582 qentry = sq->tail; |
864 atomic_sub(desc_cnt, &sq->free_cnt); | 1583 atomic_subtract_int(&sq->free_cnt, desc_cnt); |
865 sq->tail += desc_cnt; 866 sq->tail &= (sq->dmem.q_len - 1); 867 | 1584 sq->tail += desc_cnt; 1585 sq->tail &= (sq->dmem.q_len - 1); 1586 |
868 return qentry; | 1587 return (qentry); |
869} 870 871/* Free descriptor back to SQ for future use */ | 1588} 1589 1590/* Free descriptor back to SQ for future use */ |
872void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) | 1591static void 1592nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) |
873{ | 1593{ |
874 atomic_add(desc_cnt, &sq->free_cnt); | 1594 1595 atomic_add_int(&sq->free_cnt, desc_cnt); |
875 sq->head += desc_cnt; 876 sq->head &= (sq->dmem.q_len - 1); 877} 878 | 1596 sq->head += desc_cnt; 1597 sq->head &= (sq->dmem.q_len - 1); 1598} 1599 |
879static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) | 1600static __inline int 1601nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) |
880{ 881 qentry++; 882 qentry &= (sq->dmem.q_len - 1); | 1602{ 1603 qentry++; 1604 qentry &= (sq->dmem.q_len - 1); |
883 return qentry; | 1605 return (qentry); |
884} 885 | 1606} 1607 |
886void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) | 1608static void 1609nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) |
887{ | 1610{ |
888 u64 sq_cfg; | 1611 uint64_t sq_cfg; |
889 890 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 891 sq_cfg |= NICVF_SQ_EN; 892 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 893 /* Ring doorbell so that H/W restarts processing SQEs */ 894 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 895} 896 | 1612 1613 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1614 sq_cfg |= NICVF_SQ_EN; 1615 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1616 /* Ring doorbell so that H/W restarts processing SQEs */ 1617 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 1618} 1619 |
897void nicvf_sq_disable(struct nicvf *nic, int qidx) | 1620static void 1621nicvf_sq_disable(struct nicvf *nic, int qidx) |
898{ | 1622{ |
899 u64 sq_cfg; | 1623 uint64_t sq_cfg; |
900 901 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 902 sq_cfg &= ~NICVF_SQ_EN; 903 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 904} 905 | 1624 1625 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1626 sq_cfg &= ~NICVF_SQ_EN; 1627 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1628} 1629 |
906void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, 907 int qidx) | 1630static void 1631nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) |
908{ | 1632{ |
909 u64 head, tail; 910 struct sk_buff *skb; 911 struct nicvf *nic = netdev_priv(netdev); | 1633 uint64_t head, tail; 1634 struct snd_buff *snd_buff; |
912 struct sq_hdr_subdesc *hdr; 913 | 1635 struct sq_hdr_subdesc *hdr; 1636 |
1637 NICVF_TX_LOCK(sq); |
|
914 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 915 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 916 while (sq->head != head) { 917 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 918 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 919 nicvf_put_sq_desc(sq, 1); 920 continue; 921 } | 1638 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 1639 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 1640 while (sq->head != head) { 1641 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 1642 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 1643 nicvf_put_sq_desc(sq, 1); 1644 continue; 1645 } |
922 skb = (struct sk_buff *)sq->skbuff[sq->head]; 923 if (skb) 924 dev_kfree_skb_any(skb); 925 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 926 atomic64_add(hdr->tot_len, 927 (atomic64_t *)&netdev->stats.tx_bytes); | 1646 snd_buff = &sq->snd_buff[sq->head]; 1647 if (snd_buff->mbuf != NULL) { 1648 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1649 m_freem(snd_buff->mbuf); 1650 sq->snd_buff[sq->head].mbuf = NULL; 1651 } |
928 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 929 } | 1652 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 1653 } |
1654 NICVF_TX_UNLOCK(sq); |
|
930} 931 | 1655} 1656 |
932/* Get the number of SQ descriptors needed to xmit this skb */ 933static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 934{ 935 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 936 937 if (skb_shinfo(skb)->gso_size) { 938 subdesc_cnt = nicvf_tso_count_subdescs(skb); 939 return subdesc_cnt; 940 } 941 942 if (skb_shinfo(skb)->nr_frags) 943 subdesc_cnt += skb_shinfo(skb)->nr_frags; 944 945 return subdesc_cnt; 946} 947 948/* Add SQ HEADER subdescriptor. | 1657/* 1658 * Add SQ HEADER subdescriptor. |
949 * First subdescriptor for every send descriptor. 950 */ | 1659 * First subdescriptor for every send descriptor. 1660 */ |
951static inline void | 1661static __inline void |
952nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, | 1662nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, |
953 int subdesc_cnt, struct sk_buff *skb, int len) | 1663 int subdesc_cnt, struct mbuf *mbuf, int len) |
954{ | 1664{ |
955 int proto; | |
956 struct sq_hdr_subdesc *hdr; 957 958 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | 1665 struct sq_hdr_subdesc *hdr; 1666 1667 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); |
959 sq->skbuff[qentry] = (u64)skb; | 1668 sq->snd_buff[qentry].mbuf = mbuf; |
960 961 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 962 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 963 /* Enable notification via CQE after processing SQE */ 964 hdr->post_cqe = 1; 965 /* No of subdescriptors following this */ 966 hdr->subdesc_cnt = subdesc_cnt; 967 hdr->tot_len = len; 968 | 1669 1670 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1671 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1672 /* Enable notification via CQE after processing SQE */ 1673 hdr->post_cqe = 1; 1674 /* No of subdescriptors following this */ 1675 hdr->subdesc_cnt = subdesc_cnt; 1676 hdr->tot_len = len; 1677 |
969 /* Offload checksum calculation to HW */ 970 if (skb->ip_summed == CHECKSUM_PARTIAL) { 971 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 972 hdr->l3_offset = skb_network_offset(skb); 973 hdr->l4_offset = skb_transport_offset(skb); 974 975 proto = ip_hdr(skb)->protocol; 976 switch (proto) { 977 case IPPROTO_TCP: 978 hdr->csum_l4 = SEND_L4_CSUM_TCP; 979 break; 980 case IPPROTO_UDP: 981 hdr->csum_l4 = SEND_L4_CSUM_UDP; 982 break; 983 case IPPROTO_SCTP: 984 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 985 break; 986 } 987 } | 1678 /* ARM64TODO: Implement HW checksums calculation */ |
988} 989 | 1679} 1680 |
990/* SQ GATHER subdescriptor | 1681/* 1682 * SQ GATHER subdescriptor |
991 * Must follow HDR descriptor 992 */ 993static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, | 1683 * Must follow HDR descriptor 1684 */ 1685static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, |
994 int size, u64 data) | 1686 int size, uint64_t data) |
995{ 996 struct sq_gather_subdesc *gather; 997 998 qentry &= (sq->dmem.q_len - 1); 999 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1000 1001 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1002 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1003 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1004 gather->size = size; 1005 gather->addr = data; 1006} 1007 | 1687{ 1688 struct sq_gather_subdesc *gather; 1689 1690 qentry &= (sq->dmem.q_len - 1); 1691 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1692 1693 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1694 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1695 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1696 gather->size = size; 1697 gather->addr = data; 1698} 1699 |
1008/* Append an skb to a SQ for packet transfer. */ 1009int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | 1700/* Put an mbuf to a SQ for packet transfer. */ 1701static int 1702nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf) |
1010{ | 1703{ |
1011 int i, size; 1012 int subdesc_cnt; 1013 int sq_num, qentry; 1014 struct queue_set *qs; 1015 struct snd_queue *sq; | 1704 bus_dma_segment_t segs[256]; 1705 struct snd_buff *snd_buff; 1706 size_t seg; 1707 int nsegs, qentry; 1708 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT - 1; 1709 int err; |
1016 | 1710 |
1017 sq_num = skb_get_queue_mapping(skb); 1018#ifdef VNIC_MULTI_QSET_SUPPORT 1019 if (sq_num >= MAX_SND_QUEUES_PER_QS) { 1020 /* Get secondary Qset's SQ structure */ 1021 i = sq_num / MAX_SND_QUEUES_PER_QS; 1022 if (!nic->snicvf[i - 1]) { 1023 netdev_warn(nic->netdev, 1024 "Secondary Qset#%d's ptr not initialized\n", 1025 i - 1); 1026 return 1; 1027 } 1028 nic = (struct nicvf *)nic->snicvf[i - 1]; 1029 sq_num = sq_num % MAX_SND_QUEUES_PER_QS; | 1711 NICVF_TX_LOCK_ASSERT(sq); 1712 1713 if (sq->free_cnt == 0) 1714 return (ENOBUFS); 1715 1716 snd_buff = &sq->snd_buff[sq->tail]; 1717 1718 err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, 1719 mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1720 if (err != 0) { 1721 /* ARM64TODO: Add mbuf defragmenting if we lack maps */ 1722 return (err); |
1030 } | 1723 } |
1031#endif | |
1032 | 1724 |
1033 qs = nic->qs; 1034 sq = &qs->sq[sq_num]; | 1725 /* Set how many subdescriptors is required */ 1726 subdesc_cnt += nsegs; |
1035 | 1727 |
1036 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1037 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1038 goto append_fail; | 1728 if (subdesc_cnt > sq->free_cnt) { 1729 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ 1730 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); 1731 return (ENOBUFS); 1732 } |
1039 1040 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1041 1042 /* Add SQ header subdesc */ | 1733 1734 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1735 1736 /* Add SQ header subdesc */ |
1043 nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); | 1737 nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, mbuf, 1738 mbuf->m_pkthdr.len); |
1044 1045 /* Add SQ gather subdescs */ | 1739 1740 /* Add SQ gather subdescs */ |
1046 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1047 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1048 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1049 1050 /* Check for scattered buffer */ 1051 if (!skb_is_nonlinear(skb)) 1052 goto doorbell; 1053 1054 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1055 const struct skb_frag_struct *frag; 1056 1057 frag = &skb_shinfo(skb)->frags[i]; 1058 | 1741 for (seg = 0; seg < nsegs; seg++) { |
1059 qentry = nicvf_get_nxt_sqentry(sq, qentry); | 1742 qentry = nicvf_get_nxt_sqentry(sq, qentry); |
1060 size = skb_frag_size(frag); 1061 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1062 virt_to_phys( 1063 skb_frag_address(frag))); | 1743 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, 1744 segs[seg].ds_addr); |
1064 } 1065 | 1745 } 1746 |
1066doorbell: | |
1067 /* make sure all memory stores are done before ringing doorbell */ | 1747 /* make sure all memory stores are done before ringing doorbell */ |
1068 smp_wmb(); | 1748 bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); |
1069 | 1749 |
1750 dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", 1751 __func__, sq->idx, subdesc_cnt); |
|
1070 /* Inform HW to xmit new packet */ | 1752 /* Inform HW to xmit new packet */ |
1071 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1072 sq_num, subdesc_cnt); 1073 return 1; 1074 1075append_fail: 1076 /* Use original PCI dev for debug log */ 1077 nic = nic->pnicvf; 1078 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); 1079 return 0; | 1753 nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, 1754 sq->idx, subdesc_cnt); 1755 return (0); |
1080} 1081 | 1756} 1757 |
1082static inline unsigned frag_num(unsigned i) | 1758static __inline u_int 1759frag_num(u_int i) |
1083{ | 1760{ |
1084#ifdef __BIG_ENDIAN 1085 return (i & ~3) + 3 - (i & 3); | 1761#if BYTE_ORDER == BIG_ENDIAN 1762 return ((i & ~3) + 3 - (i & 3)); |
1086#else | 1763#else |
1087 return i; | 1764 return (i); |
1088#endif 1089} 1090 | 1765#endif 1766} 1767 |
1091/* Returns SKB for a received packet */ 1092struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | 1768/* Returns MBUF for a received packet */ 1769struct mbuf * 1770nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
1093{ 1094 int frag; 1095 int payload_len = 0; | 1771{ 1772 int frag; 1773 int payload_len = 0; |
1096 struct sk_buff *skb = NULL; 1097 struct sk_buff *skb_frag = NULL; 1098 struct sk_buff *prev_frag = NULL; 1099 u16 *rb_lens = NULL; 1100 u64 *rb_ptrs = NULL; | 1774 struct mbuf *mbuf; 1775 struct mbuf *mbuf_frag; 1776 uint16_t *rb_lens = NULL; 1777 uint64_t *rb_ptrs = NULL; |
1101 | 1778 |
1102 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1103 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); | 1779 mbuf = NULL; 1780 rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); 1781 rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); |
1104 | 1782 |
1105 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", 1106 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); | 1783 dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", 1784 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); |
1107 1108 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1109 payload_len = rb_lens[frag_num(frag)]; | 1785 1786 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1787 payload_len = rb_lens[frag_num(frag)]; |
1110 if (!frag) { | 1788 if (frag == 0) { |
1111 /* First fragment */ | 1789 /* First fragment */ |
1112 skb = nicvf_rb_ptr_to_skb(nic, 1113 *rb_ptrs - cqe_rx->align_pad, 1114 payload_len); 1115 if (!skb) 1116 return NULL; 1117 skb_reserve(skb, cqe_rx->align_pad); 1118 skb_put(skb, payload_len); | 1790 mbuf = nicvf_rb_ptr_to_mbuf(nic, 1791 (*rb_ptrs - cqe_rx->align_pad)); 1792 mbuf->m_len = payload_len; 1793 mbuf->m_data += cqe_rx->align_pad; 1794 if_setrcvif(mbuf, nic->ifp); |
1119 } else { 1120 /* Add fragments */ | 1795 } else { 1796 /* Add fragments */ |
1121 skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, 1122 payload_len); 1123 if (!skb_frag) { 1124 dev_kfree_skb(skb); 1125 return NULL; 1126 } 1127 1128 if (!skb_shinfo(skb)->frag_list) 1129 skb_shinfo(skb)->frag_list = skb_frag; 1130 else 1131 prev_frag->next = skb_frag; 1132 1133 prev_frag = skb_frag; 1134 skb->len += payload_len; 1135 skb->data_len += payload_len; 1136 skb_frag->len = payload_len; | 1797 mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); 1798 m_append(mbuf, payload_len, mbuf_frag->m_data); 1799 m_freem(mbuf_frag); |
1137 } 1138 /* Next buffer pointer */ 1139 rb_ptrs++; 1140 } | 1800 } 1801 /* Next buffer pointer */ 1802 rb_ptrs++; 1803 } |
1141 return skb; | 1804 1805 if (__predict_true(mbuf != NULL)) { 1806 m_fixhdr(mbuf); 1807 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; 1808 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); 1809 } 1810 1811 return (mbuf); |
1142} 1143 1144/* Enable interrupt */ | 1812} 1813 1814/* Enable interrupt */ |
1145void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) | 1815void 1816nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) |
1146{ | 1817{ |
1147 u64 reg_val; | 1818 uint64_t reg_val; |
1148 1149 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1150 1151 switch (int_type) { 1152 case NICVF_INTR_CQ: | 1819 1820 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1821 1822 switch (int_type) { 1823 case NICVF_INTR_CQ: |
1153 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | 1824 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); |
1154 break; 1155 case NICVF_INTR_SQ: | 1825 break; 1826 case NICVF_INTR_SQ: |
1156 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | 1827 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); |
1157 break; 1158 case NICVF_INTR_RBDR: | 1828 break; 1829 case NICVF_INTR_RBDR: |
1159 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | 1830 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
1160 break; 1161 case NICVF_INTR_PKT_DROP: | 1831 break; 1832 case NICVF_INTR_PKT_DROP: |
1162 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | 1833 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); |
1163 break; 1164 case NICVF_INTR_TCP_TIMER: | 1834 break; 1835 case NICVF_INTR_TCP_TIMER: |
1165 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | 1836 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); |
1166 break; 1167 case NICVF_INTR_MBOX: | 1837 break; 1838 case NICVF_INTR_MBOX: |
1168 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); | 1839 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); |
1169 break; 1170 case NICVF_INTR_QS_ERR: | 1840 break; 1841 case NICVF_INTR_QS_ERR: |
1171 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | 1842 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); |
1172 break; 1173 default: | 1843 break; 1844 default: |
1174 netdev_err(nic->netdev, | 1845 device_printf(nic->dev, |
1175 "Failed to enable interrupt: unknown type\n"); 1176 break; 1177 } 1178 1179 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 1180} 1181 1182/* Disable interrupt */ | 1846 "Failed to enable interrupt: unknown type\n"); 1847 break; 1848 } 1849 1850 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 1851} 1852 1853/* Disable interrupt */ |
1183void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) | 1854void 1855nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) |
1184{ | 1856{ |
1185 u64 reg_val = 0; | 1857 uint64_t reg_val = 0; |
1186 1187 switch (int_type) { 1188 case NICVF_INTR_CQ: | 1858 1859 switch (int_type) { 1860 case NICVF_INTR_CQ: |
1189 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | 1861 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); |
1190 break; 1191 case NICVF_INTR_SQ: | 1862 break; 1863 case NICVF_INTR_SQ: |
1192 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | 1864 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); |
1193 break; 1194 case NICVF_INTR_RBDR: | 1865 break; 1866 case NICVF_INTR_RBDR: |
1195 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | 1867 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
1196 break; 1197 case NICVF_INTR_PKT_DROP: | 1868 break; 1869 case NICVF_INTR_PKT_DROP: |
1198 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | 1870 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); |
1199 break; 1200 case NICVF_INTR_TCP_TIMER: | 1871 break; 1872 case NICVF_INTR_TCP_TIMER: |
1201 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | 1873 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); |
1202 break; 1203 case NICVF_INTR_MBOX: | 1874 break; 1875 case NICVF_INTR_MBOX: |
1204 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); | 1876 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); |
1205 break; 1206 case NICVF_INTR_QS_ERR: | 1877 break; 1878 case NICVF_INTR_QS_ERR: |
1207 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | 1879 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); |
1208 break; 1209 default: | 1880 break; 1881 default: |
1210 netdev_err(nic->netdev, | 1882 device_printf(nic->dev, |
1211 "Failed to disable interrupt: unknown type\n"); 1212 break; 1213 } 1214 1215 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 1216} 1217 1218/* Clear interrupt */ | 1883 "Failed to disable interrupt: unknown type\n"); 1884 break; 1885 } 1886 1887 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 1888} 1889 1890/* Clear interrupt */ |
1219void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) | 1891void 1892nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) |
1220{ | 1893{ |
1221 u64 reg_val = 0; | 1894 uint64_t reg_val = 0; |
1222 1223 switch (int_type) { 1224 case NICVF_INTR_CQ: | 1895 1896 switch (int_type) { 1897 case NICVF_INTR_CQ: |
1225 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | 1898 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); |
1226 break; 1227 case NICVF_INTR_SQ: | 1899 break; 1900 case NICVF_INTR_SQ: |
1228 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | 1901 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); |
1229 break; 1230 case NICVF_INTR_RBDR: | 1902 break; 1903 case NICVF_INTR_RBDR: |
1231 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | 1904 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
1232 break; 1233 case NICVF_INTR_PKT_DROP: | 1905 break; 1906 case NICVF_INTR_PKT_DROP: |
1234 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | 1907 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); |
1235 break; 1236 case NICVF_INTR_TCP_TIMER: | 1908 break; 1909 case NICVF_INTR_TCP_TIMER: |
1237 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | 1910 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); |
1238 break; 1239 case NICVF_INTR_MBOX: | 1911 break; 1912 case NICVF_INTR_MBOX: |
1240 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); | 1913 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); |
1241 break; 1242 case NICVF_INTR_QS_ERR: | 1914 break; 1915 case NICVF_INTR_QS_ERR: |
1243 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | 1916 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); |
1244 break; 1245 default: | 1917 break; 1918 default: |
1246 netdev_err(nic->netdev, | 1919 device_printf(nic->dev, |
1247 "Failed to clear interrupt: unknown type\n"); 1248 break; 1249 } 1250 1251 nicvf_reg_write(nic, NIC_VF_INT, reg_val); 1252} 1253 1254/* Check if interrupt is enabled */ | 1920 "Failed to clear interrupt: unknown type\n"); 1921 break; 1922 } 1923 1924 nicvf_reg_write(nic, NIC_VF_INT, reg_val); 1925} 1926 1927/* Check if interrupt is enabled */ |
1255int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) | 1928int 1929nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) |
1256{ | 1930{ |
1257 u64 reg_val; 1258 u64 mask = 0xff; | 1931 uint64_t reg_val; 1932 uint64_t mask = 0xff; |
1259 1260 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1261 1262 switch (int_type) { 1263 case NICVF_INTR_CQ: | 1933 1934 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1935 1936 switch (int_type) { 1937 case NICVF_INTR_CQ: |
1264 mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | 1938 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); |
1265 break; 1266 case NICVF_INTR_SQ: | 1939 break; 1940 case NICVF_INTR_SQ: |
1267 mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | 1941 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); |
1268 break; 1269 case NICVF_INTR_RBDR: | 1942 break; 1943 case NICVF_INTR_RBDR: |
1270 mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | 1944 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
1271 break; 1272 case NICVF_INTR_PKT_DROP: 1273 mask = NICVF_INTR_PKT_DROP_MASK; 1274 break; 1275 case NICVF_INTR_TCP_TIMER: 1276 mask = NICVF_INTR_TCP_TIMER_MASK; 1277 break; 1278 case NICVF_INTR_MBOX: 1279 mask = NICVF_INTR_MBOX_MASK; 1280 break; 1281 case NICVF_INTR_QS_ERR: 1282 mask = NICVF_INTR_QS_ERR_MASK; 1283 break; 1284 default: | 1945 break; 1946 case NICVF_INTR_PKT_DROP: 1947 mask = NICVF_INTR_PKT_DROP_MASK; 1948 break; 1949 case NICVF_INTR_TCP_TIMER: 1950 mask = NICVF_INTR_TCP_TIMER_MASK; 1951 break; 1952 case NICVF_INTR_MBOX: 1953 mask = NICVF_INTR_MBOX_MASK; 1954 break; 1955 case NICVF_INTR_QS_ERR: 1956 mask = NICVF_INTR_QS_ERR_MASK; 1957 break; 1958 default: |
1285 netdev_err(nic->netdev, | 1959 device_printf(nic->dev, |
1286 "Failed to check interrupt enable: unknown type\n"); 1287 break; 1288 } 1289 1290 return (reg_val & mask); 1291} 1292 | 1960 "Failed to check interrupt enable: unknown type\n"); 1961 break; 1962 } 1963 1964 return (reg_val & mask); 1965} 1966 |
1293void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) | 1967void 1968nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) |
1294{ 1295 struct rcv_queue *rq; 1296 1297#define GET_RQ_STATS(reg) \ 1298 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1299 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1300 1301 rq = &nic->qs->rq[rq_idx]; 1302 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1303 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1304} 1305 | 1969{ 1970 struct rcv_queue *rq; 1971 1972#define GET_RQ_STATS(reg) \ 1973 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1974 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1975 1976 rq = &nic->qs->rq[rq_idx]; 1977 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1978 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1979} 1980 |
1306void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | 1981void 1982nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) |
1307{ 1308 struct snd_queue *sq; 1309 1310#define GET_SQ_STATS(reg) \ 1311 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1312 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1313 1314 sq = &nic->qs->sq[sq_idx]; 1315 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1316 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1317} 1318 1319/* Check for errors in the receive cmp.queue entry */ | 1983{ 1984 struct snd_queue *sq; 1985 1986#define GET_SQ_STATS(reg) \ 1987 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1988 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1989 1990 sq = &nic->qs->sq[sq_idx]; 1991 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1992 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1993} 1994 1995/* Check for errors in the receive cmp.queue entry */ |
1320int nicvf_check_cqe_rx_errs(struct nicvf *nic, 1321 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) | 1996int 1997nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, 1998 struct cqe_rx_t *cqe_rx) |
1322{ 1323 struct nicvf_hw_stats *stats = &nic->hw_stats; 1324 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1325 1326 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 1327 drv_stats->rx_frames_ok++; | 1999{ 2000 struct nicvf_hw_stats *stats = &nic->hw_stats; 2001 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 2002 2003 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 2004 drv_stats->rx_frames_ok++; |
1328 return 0; | 2005 return (0); |
1329 } 1330 | 2006 } 2007 |
1331 if (netif_msg_rx_err(nic)) 1332 netdev_err(nic->netdev, 1333 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", 1334 nic->netdev->name, 1335 cqe_rx->err_level, cqe_rx->err_opcode); 1336 | |
1337 switch (cqe_rx->err_opcode) { 1338 case CQ_RX_ERROP_RE_PARTIAL: 1339 stats->rx_bgx_truncated_pkts++; 1340 break; 1341 case CQ_RX_ERROP_RE_JABBER: 1342 stats->rx_jabber_errs++; 1343 break; 1344 case CQ_RX_ERROP_RE_FCS: --- 59 unchanged lines hidden (view full) --- 1404 case CQ_RX_ERROP_L4_PCLP: 1405 stats->rx_l4_pclp++; 1406 break; 1407 case CQ_RX_ERROP_RBDR_TRUNC: 1408 stats->rx_truncated_pkts++; 1409 break; 1410 } 1411 | 2008 switch (cqe_rx->err_opcode) { 2009 case CQ_RX_ERROP_RE_PARTIAL: 2010 stats->rx_bgx_truncated_pkts++; 2011 break; 2012 case CQ_RX_ERROP_RE_JABBER: 2013 stats->rx_jabber_errs++; 2014 break; 2015 case CQ_RX_ERROP_RE_FCS: --- 59 unchanged lines hidden (view full) --- 2075 case CQ_RX_ERROP_L4_PCLP: 2076 stats->rx_l4_pclp++; 2077 break; 2078 case CQ_RX_ERROP_RBDR_TRUNC: 2079 stats->rx_truncated_pkts++; 2080 break; 2081 } 2082 |
1412 return 1; | 2083 return (1); |
1413} 1414 1415/* Check for errors in the send cmp.queue entry */ | 2084} 2085 2086/* Check for errors in the send cmp.queue entry */ |
1416int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1417 struct cmp_queue *cq, struct cqe_send_t *cqe_tx) | 2087int 2088nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, 2089 struct cqe_send_t *cqe_tx) |
1418{ 1419 struct cmp_queue_stats *stats = &cq->stats; 1420 1421 switch (cqe_tx->send_status) { 1422 case CQ_TX_ERROP_GOOD: 1423 stats->tx.good++; | 2090{ 2091 struct cmp_queue_stats *stats = &cq->stats; 2092 2093 switch (cqe_tx->send_status) { 2094 case CQ_TX_ERROP_GOOD: 2095 stats->tx.good++; |
1424 return 0; | 2096 return (0); |
1425 case CQ_TX_ERROP_DESC_FAULT: 1426 stats->tx.desc_fault++; 1427 break; 1428 case CQ_TX_ERROP_HDR_CONS_ERR: 1429 stats->tx.hdr_cons_err++; 1430 break; 1431 case CQ_TX_ERROP_SUBDC_ERR: 1432 stats->tx.subdesc_err++; --- 25 unchanged lines hidden (view full) --- 1458 case CQ_TX_ERROP_CK_OVERLAP: 1459 stats->tx.csum_overlap++; 1460 break; 1461 case CQ_TX_ERROP_CK_OFLOW: 1462 stats->tx.csum_overflow++; 1463 break; 1464 } 1465 | 2097 case CQ_TX_ERROP_DESC_FAULT: 2098 stats->tx.desc_fault++; 2099 break; 2100 case CQ_TX_ERROP_HDR_CONS_ERR: 2101 stats->tx.hdr_cons_err++; 2102 break; 2103 case CQ_TX_ERROP_SUBDC_ERR: 2104 stats->tx.subdesc_err++; --- 25 unchanged lines hidden (view full) --- 2130 case CQ_TX_ERROP_CK_OVERLAP: 2131 stats->tx.csum_overlap++; 2132 break; 2133 case CQ_TX_ERROP_CK_OFLOW: 2134 stats->tx.csum_overflow++; 2135 break; 2136 } 2137 |
1466 return 1; | 2138 return (1); |
1467} | 2139} |