nicvf_queues.c revision 289550
1/*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 289550 2015-10-18 21:39:15Z zbb $
27 *
28 */
29
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/ip.h>
33#include <linux/etherdevice.h>
34#include <net/ip.h>
35#include <net/tso.h>
36
37#include "nic_reg.h"
38#include "nic.h"
39#include "q_struct.h"
40#include "nicvf_queues.h"
41
42struct rbuf_info {
43	struct page *page;
44	void	*data;
45	u64	offset;
46};
47
48#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
49
50/* Poll a register for a specific value */
51static int nicvf_poll_reg(struct nicvf *nic, int qidx,
52			  u64 reg, int bit_pos, int bits, int val)
53{
54	u64 bit_mask;
55	u64 reg_val;
56	int timeout = 10;
57
58	bit_mask = (1ULL << bits) - 1;
59	bit_mask = (bit_mask << bit_pos);
60
61	while (timeout) {
62		reg_val = nicvf_queue_reg_read(nic, reg, qidx);
63		if (((reg_val & bit_mask) >> bit_pos) == val)
64			return 0;
65		usleep_range(1000, 2000);
66		timeout--;
67	}
68	netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
69	return 1;
70}
71
72/* Allocate memory for a queue's descriptors */
73static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
74				  int q_len, int desc_size, int align_bytes)
75{
76	dmem->q_len = q_len;
77	dmem->size = (desc_size * q_len) + align_bytes;
78	/* Save address, need it while freeing */
79	dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
80						&dmem->dma, GFP_KERNEL);
81	if (!dmem->unalign_base)
82		return -ENOMEM;
83
84	/* Align memory address for 'align_bytes' */
85	dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
86	dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
87	return 0;
88}
89
90/* Free queue's descriptor memory */
91static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
92{
93	if (!dmem)
94		return;
95
96	dma_free_coherent(&nic->pdev->dev, dmem->size,
97			  dmem->unalign_base, dmem->dma);
98	dmem->unalign_base = NULL;
99	dmem->base = NULL;
100}
101
102/* Allocate buffer for packet reception
103 * HW returns memory address where packet is DMA'ed but not a pointer
104 * into RBDR ring, so save buffer address at the start of fragment and
105 * align the start address to a cache aligned address
106 */
107static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
108					 u32 buf_len, u64 **rbuf)
109{
110	u64 data;
111	struct rbuf_info *rinfo;
112	int order = get_order(buf_len);
113
114	/* Check if request can be accomodated in previous allocated page */
115	if (nic->rb_page) {
116		if ((nic->rb_page_offset + buf_len + buf_len) >
117		    (PAGE_SIZE << order)) {
118			nic->rb_page = NULL;
119		} else {
120			nic->rb_page_offset += buf_len;
121			get_page(nic->rb_page);
122		}
123	}
124
125	/* Allocate a new page */
126	if (!nic->rb_page) {
127		nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
128					   order);
129		if (!nic->rb_page) {
130			netdev_err(nic->netdev,
131				   "Failed to allocate new rcv buffer\n");
132			return -ENOMEM;
133		}
134		nic->rb_page_offset = 0;
135	}
136
137	data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
138
139	/* Align buffer addr to cache line i.e 128 bytes */
140	rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
141	/* Save page address for reference updation */
142	rinfo->page = nic->rb_page;
143	/* Store start address for later retrieval */
144	rinfo->data = (void *)data;
145	/* Store alignment offset */
146	rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
147
148	data += rinfo->offset;
149
150	/* Give next aligned address to hw for DMA */
151	*rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
152	return 0;
153}
154
155/* Retrieve actual buffer start address and build skb for received packet */
156static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
157					   u64 rb_ptr, int len)
158{
159	struct sk_buff *skb;
160	struct rbuf_info *rinfo;
161
162	rb_ptr = (u64)phys_to_virt(rb_ptr);
163	/* Get buffer start address and alignment offset */
164	rinfo = GET_RBUF_INFO(rb_ptr);
165
166	/* Now build an skb to give to stack */
167	skb = build_skb(rinfo->data, RCV_FRAG_LEN);
168	if (!skb) {
169		put_page(rinfo->page);
170		return NULL;
171	}
172
173	/* Set correct skb->data */
174	skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
175
176	prefetch((void *)rb_ptr);
177	return skb;
178}
179
180/* Allocate RBDR ring and populate receive buffers */
181static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
182			    int ring_len, int buf_size)
183{
184	int idx;
185	u64 *rbuf;
186	struct rbdr_entry_t *desc;
187	int err;
188
189	err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
190				     sizeof(struct rbdr_entry_t),
191				     NICVF_RCV_BUF_ALIGN_BYTES);
192	if (err)
193		return err;
194
195	rbdr->desc = rbdr->dmem.base;
196	/* Buffer size has to be in multiples of 128 bytes */
197	rbdr->dma_size = buf_size;
198	rbdr->enable = true;
199	rbdr->thresh = RBDR_THRESH;
200
201	nic->rb_page = NULL;
202	for (idx = 0; idx < ring_len; idx++) {
203		err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
204					     &rbuf);
205		if (err)
206			return err;
207
208		desc = GET_RBDR_DESC(rbdr, idx);
209		desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
210	}
211	return 0;
212}
213
214/* Free RBDR ring and its receive buffers */
215static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
216{
217	int head, tail;
218	u64 buf_addr;
219	struct rbdr_entry_t *desc;
220	struct rbuf_info *rinfo;
221
222	if (!rbdr)
223		return;
224
225	rbdr->enable = false;
226	if (!rbdr->dmem.base)
227		return;
228
229	head = rbdr->head;
230	tail = rbdr->tail;
231
232	/* Free SKBs */
233	while (head != tail) {
234		desc = GET_RBDR_DESC(rbdr, head);
235		buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
236		rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
237		put_page(rinfo->page);
238		head++;
239		head &= (rbdr->dmem.q_len - 1);
240	}
241	/* Free SKB of tail desc */
242	desc = GET_RBDR_DESC(rbdr, tail);
243	buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
244	rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
245	put_page(rinfo->page);
246
247	/* Free RBDR ring */
248	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
249}
250
251/* Refill receive buffer descriptors with new buffers.
252 */
253static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
254{
255	struct queue_set *qs = nic->qs;
256	int rbdr_idx = qs->rbdr_cnt;
257	int tail, qcount;
258	int refill_rb_cnt;
259	struct rbdr *rbdr;
260	struct rbdr_entry_t *desc;
261	u64 *rbuf;
262	int new_rb = 0;
263
264refill:
265	if (!rbdr_idx)
266		return;
267	rbdr_idx--;
268	rbdr = &qs->rbdr[rbdr_idx];
269	/* Check if it's enabled */
270	if (!rbdr->enable)
271		goto next_rbdr;
272
273	/* Get no of desc's to be refilled */
274	qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
275	qcount &= 0x7FFFF;
276	/* Doorbell can be ringed with a max of ring size minus 1 */
277	if (qcount >= (qs->rbdr_len - 1))
278		goto next_rbdr;
279	else
280		refill_rb_cnt = qs->rbdr_len - qcount - 1;
281
282	/* Start filling descs from tail */
283	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
284	while (refill_rb_cnt) {
285		tail++;
286		tail &= (rbdr->dmem.q_len - 1);
287
288		if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
289			break;
290
291		desc = GET_RBDR_DESC(rbdr, tail);
292		desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
293		refill_rb_cnt--;
294		new_rb++;
295	}
296
297	/* make sure all memory stores are done before ringing doorbell */
298	smp_wmb();
299
300	/* Check if buffer allocation failed */
301	if (refill_rb_cnt)
302		nic->rb_alloc_fail = true;
303	else
304		nic->rb_alloc_fail = false;
305
306	/* Notify HW */
307	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
308			      rbdr_idx, new_rb);
309next_rbdr:
310	/* Re-enable RBDR interrupts only if buffer allocation is success */
311	if (!nic->rb_alloc_fail && rbdr->enable)
312		nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
313
314	if (rbdr_idx)
315		goto refill;
316}
317
318/* Alloc rcv buffers in non-atomic mode for better success */
319void nicvf_rbdr_work(struct work_struct *work)
320{
321	struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
322
323	nicvf_refill_rbdr(nic, GFP_KERNEL);
324	if (nic->rb_alloc_fail)
325		schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
326	else
327		nic->rb_work_scheduled = false;
328}
329
330/* In Softirq context, alloc rcv buffers in atomic mode */
331void nicvf_rbdr_task(unsigned long data)
332{
333	struct nicvf *nic = (struct nicvf *)data;
334
335	nicvf_refill_rbdr(nic, GFP_ATOMIC);
336	if (nic->rb_alloc_fail) {
337		nic->rb_work_scheduled = true;
338		schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
339	}
340}
341
342/* Initialize completion queue */
343static int nicvf_init_cmp_queue(struct nicvf *nic,
344				struct cmp_queue *cq, int q_len)
345{
346	int err;
347
348	err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
349				     NICVF_CQ_BASE_ALIGN_BYTES);
350	if (err)
351		return err;
352
353	cq->desc = cq->dmem.base;
354	cq->thresh = CMP_QUEUE_CQE_THRESH;
355	nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
356
357	return 0;
358}
359
360static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
361{
362	if (!cq)
363		return;
364	if (!cq->dmem.base)
365		return;
366
367	nicvf_free_q_desc_mem(nic, &cq->dmem);
368}
369
370/* Initialize transmit queue */
371static int nicvf_init_snd_queue(struct nicvf *nic,
372				struct snd_queue *sq, int q_len)
373{
374	int err;
375
376	err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
377				     NICVF_SQ_BASE_ALIGN_BYTES);
378	if (err)
379		return err;
380
381	sq->desc = sq->dmem.base;
382	sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
383	if (!sq->skbuff)
384		return -ENOMEM;
385	sq->head = 0;
386	sq->tail = 0;
387	atomic_set(&sq->free_cnt, q_len - 1);
388	sq->thresh = SND_QUEUE_THRESH;
389
390	/* Preallocate memory for TSO segment's header */
391	sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
392					  q_len * TSO_HEADER_SIZE,
393					  &sq->tso_hdrs_phys, GFP_KERNEL);
394	if (!sq->tso_hdrs)
395		return -ENOMEM;
396
397	return 0;
398}
399
400static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
401{
402	if (!sq)
403		return;
404	if (!sq->dmem.base)
405		return;
406
407	if (sq->tso_hdrs)
408		dma_free_coherent(&nic->pdev->dev,
409				  sq->dmem.q_len * TSO_HEADER_SIZE,
410				  sq->tso_hdrs, sq->tso_hdrs_phys);
411
412	kfree(sq->skbuff);
413	nicvf_free_q_desc_mem(nic, &sq->dmem);
414}
415
416static void nicvf_reclaim_snd_queue(struct nicvf *nic,
417				    struct queue_set *qs, int qidx)
418{
419	/* Disable send queue */
420	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
421	/* Check if SQ is stopped */
422	if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
423		return;
424	/* Reset send queue */
425	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
426}
427
428static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
429				    struct queue_set *qs, int qidx)
430{
431	union nic_mbx mbx = {};
432
433	/* Make sure all packets in the pipeline are written back into mem */
434	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
435	nicvf_send_msg_to_pf(nic, &mbx);
436}
437
438static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
439				    struct queue_set *qs, int qidx)
440{
441	/* Disable timer threshold (doesn't get reset upon CQ reset */
442	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
443	/* Disable completion queue */
444	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
445	/* Reset completion queue */
446	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
447}
448
449static void nicvf_reclaim_rbdr(struct nicvf *nic,
450			       struct rbdr *rbdr, int qidx)
451{
452	u64 tmp, fifo_state;
453	int timeout = 10;
454
455	/* Save head and tail pointers for feeing up buffers */
456	rbdr->head = nicvf_queue_reg_read(nic,
457					  NIC_QSET_RBDR_0_1_HEAD,
458					  qidx) >> 3;
459	rbdr->tail = nicvf_queue_reg_read(nic,
460					  NIC_QSET_RBDR_0_1_TAIL,
461					  qidx) >> 3;
462
463	/* If RBDR FIFO is in 'FAIL' state then do a reset first
464	 * before relaiming.
465	 */
466	fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
467	if (((fifo_state >> 62) & 0x03) == 0x3)
468		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
469				      qidx, NICVF_RBDR_RESET);
470
471	/* Disable RBDR */
472	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
473	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
474		return;
475	while (1) {
476		tmp = nicvf_queue_reg_read(nic,
477					   NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
478					   qidx);
479		if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
480			break;
481		usleep_range(1000, 2000);
482		timeout--;
483		if (!timeout) {
484			netdev_err(nic->netdev,
485				   "Failed polling on prefetch status\n");
486			return;
487		}
488	}
489	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
490			      qidx, NICVF_RBDR_RESET);
491
492	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
493		return;
494	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
495	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
496		return;
497}
498
499void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
500{
501	u64 rq_cfg;
502#ifdef VNIC_MULTI_QSET_SUPPORT
503	int sqs = 0;
504#endif
505
506	rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
507
508	/* Enable first VLAN stripping */
509	if (features & NETIF_F_HW_VLAN_CTAG_RX)
510		rq_cfg |= (1ULL << 25);
511	else
512		rq_cfg &= ~(1ULL << 25);
513	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
514
515#ifdef VNIC_MULTI_QSET_SUPPORT
516	/* Configure Secondary Qsets, if any */
517	for (sqs = 0; sqs < nic->sqs_count; sqs++)
518		if (nic->snicvf[sqs])
519			nicvf_queue_reg_write(nic->snicvf[sqs],
520					      NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
521#endif
522}
523
524/* Configures receive queue */
525static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
526				   int qidx, bool enable)
527{
528	union nic_mbx mbx = {};
529	struct rcv_queue *rq;
530	struct cmp_queue *cq;
531	struct rq_cfg rq_cfg;
532
533	rq = &qs->rq[qidx];
534	rq->enable = enable;
535
536	/* Disable receive queue */
537	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
538
539	if (!rq->enable) {
540		nicvf_reclaim_rcv_queue(nic, qs, qidx);
541		return;
542	}
543
544	rq->cq_qs = qs->vnic_id;
545	rq->cq_idx = qidx;
546	rq->start_rbdr_qs = qs->vnic_id;
547	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
548	rq->cont_rbdr_qs = qs->vnic_id;
549	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
550	/* all writes of RBDR data to be loaded into L2 Cache as well*/
551	rq->caching = 1;
552
553	/* Send a mailbox msg to PF to config RQ */
554	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
555	mbx.rq.qs_num = qs->vnic_id;
556	mbx.rq.rq_num = qidx;
557	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
558			  (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
559			  (rq->cont_qs_rbdr_idx << 8) |
560			  (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
561	nicvf_send_msg_to_pf(nic, &mbx);
562
563	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
564	mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
565	nicvf_send_msg_to_pf(nic, &mbx);
566
567	/* RQ drop config
568	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
569	 */
570	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
571	mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
572	nicvf_send_msg_to_pf(nic, &mbx);
573
574	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
575	if (!nic->sqs_mode)
576		nicvf_config_vlan_stripping(nic, nic->netdev->features);
577
578	/* Enable Receive queue */
579	rq_cfg.ena = 1;
580	rq_cfg.tcp_ena = 0;
581	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
582}
583
584/* Configures completion queue */
585void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
586			    int qidx, bool enable)
587{
588	struct cmp_queue *cq;
589	struct cq_cfg cq_cfg;
590
591	cq = &qs->cq[qidx];
592	cq->enable = enable;
593
594	if (!cq->enable) {
595		nicvf_reclaim_cmp_queue(nic, qs, qidx);
596		return;
597	}
598
599	/* Reset completion queue */
600	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
601
602	if (!cq->enable)
603		return;
604
605	spin_lock_init(&cq->lock);
606	/* Set completion queue base address */
607	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
608			      qidx, (u64)(cq->dmem.phys_base));
609
610	/* Enable Completion queue */
611	cq_cfg.ena = 1;
612	cq_cfg.reset = 0;
613	cq_cfg.caching = 0;
614	cq_cfg.qsize = CMP_QSIZE;
615	cq_cfg.avg_con = 0;
616	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
617
618	/* Set threshold value for interrupt generation */
619	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
620	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
621			      qidx, nic->cq_coalesce_usecs);
622}
623
624/* Configures transmit queue */
625static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
626				   int qidx, bool enable)
627{
628	union nic_mbx mbx = {};
629	struct snd_queue *sq;
630	struct sq_cfg sq_cfg;
631
632	sq = &qs->sq[qidx];
633	sq->enable = enable;
634
635	if (!sq->enable) {
636		nicvf_reclaim_snd_queue(nic, qs, qidx);
637		return;
638	}
639
640	/* Reset send queue */
641	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
642
643	sq->cq_qs = qs->vnic_id;
644	sq->cq_idx = qidx;
645
646	/* Send a mailbox msg to PF to config SQ */
647	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
648	mbx.sq.qs_num = qs->vnic_id;
649	mbx.sq.sq_num = qidx;
650	mbx.sq.sqs_mode = nic->sqs_mode;
651	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
652	nicvf_send_msg_to_pf(nic, &mbx);
653
654	/* Set queue base address */
655	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
656			      qidx, (u64)(sq->dmem.phys_base));
657
658	/* Enable send queue  & set queue size */
659	sq_cfg.ena = 1;
660	sq_cfg.reset = 0;
661	sq_cfg.ldwb = 0;
662	sq_cfg.qsize = SND_QSIZE;
663	sq_cfg.tstmp_bgx_intf = 0;
664	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
665
666	/* Set threshold value for interrupt generation */
667	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
668}
669
670/* Configures receive buffer descriptor ring */
671static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
672			      int qidx, bool enable)
673{
674	struct rbdr *rbdr;
675	struct rbdr_cfg rbdr_cfg;
676
677	rbdr = &qs->rbdr[qidx];
678	nicvf_reclaim_rbdr(nic, rbdr, qidx);
679	if (!enable)
680		return;
681
682	/* Set descriptor base address */
683	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
684			      qidx, (u64)(rbdr->dmem.phys_base));
685
686	/* Enable RBDR  & set queue size */
687	/* Buffer size should be in multiples of 128 bytes */
688	rbdr_cfg.ena = 1;
689	rbdr_cfg.reset = 0;
690	rbdr_cfg.ldwb = 0;
691	rbdr_cfg.qsize = RBDR_SIZE;
692	rbdr_cfg.avg_con = 0;
693	rbdr_cfg.lines = rbdr->dma_size / 128;
694	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
695			      qidx, *(u64 *)&rbdr_cfg);
696
697	/* Notify HW */
698	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
699			      qidx, qs->rbdr_len - 1);
700
701	/* Set threshold value for interrupt generation */
702	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
703			      qidx, rbdr->thresh - 1);
704}
705
706/* Requests PF to assign and enable Qset */
707void nicvf_qset_config(struct nicvf *nic, bool enable)
708{
709	union nic_mbx mbx = {};
710	struct queue_set *qs = nic->qs;
711	struct qs_cfg *qs_cfg;
712
713	if (!qs) {
714		netdev_warn(nic->netdev,
715			    "Qset is still not allocated, don't init queues\n");
716		return;
717	}
718
719	qs->enable = enable;
720	qs->vnic_id = nic->vf_id;
721
722	/* Send a mailbox msg to PF to config Qset */
723	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
724	mbx.qs.num = qs->vnic_id;
725#ifdef VNIC_MULTI_QSET_SUPPORT
726	mbx.qs.sqs_count = nic->sqs_count;
727#endif
728
729	mbx.qs.cfg = 0;
730	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
731	if (qs->enable) {
732		qs_cfg->ena = 1;
733#ifdef __BIG_ENDIAN
734		qs_cfg->be = 1;
735#endif
736		qs_cfg->vnic = qs->vnic_id;
737	}
738	nicvf_send_msg_to_pf(nic, &mbx);
739}
740
741static void nicvf_free_resources(struct nicvf *nic)
742{
743	int qidx;
744	struct queue_set *qs = nic->qs;
745
746	/* Free receive buffer descriptor ring */
747	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
748		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
749
750	/* Free completion queue */
751	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
752		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
753
754	/* Free send queue */
755	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
756		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
757}
758
759static int nicvf_alloc_resources(struct nicvf *nic)
760{
761	int qidx;
762	struct queue_set *qs = nic->qs;
763
764	/* Alloc receive buffer descriptor ring */
765	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
766		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
767				    DMA_BUFFER_LEN))
768			goto alloc_fail;
769	}
770
771	/* Alloc send queue */
772	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
773		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
774			goto alloc_fail;
775	}
776
777	/* Alloc completion queue */
778	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
779		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
780			goto alloc_fail;
781	}
782
783	return 0;
784alloc_fail:
785	nicvf_free_resources(nic);
786	return -ENOMEM;
787}
788
789int nicvf_set_qset_resources(struct nicvf *nic)
790{
791	struct queue_set *qs;
792
793	qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
794	if (!qs)
795		return -ENOMEM;
796	nic->qs = qs;
797
798	/* Set count of each queue */
799	qs->rbdr_cnt = RBDR_CNT;
800#ifdef VNIC_RSS_SUPPORT
801	qs->rq_cnt = RCV_QUEUE_CNT;
802#else
803	qs->rq_cnt = 1;
804#endif
805	qs->sq_cnt = SND_QUEUE_CNT;
806	qs->cq_cnt = CMP_QUEUE_CNT;
807
808	/* Set queue lengths */
809	qs->rbdr_len = RCV_BUF_COUNT;
810	qs->sq_len = SND_QUEUE_LEN;
811	qs->cq_len = CMP_QUEUE_LEN;
812
813	nic->rx_queues = qs->rq_cnt;
814	nic->tx_queues = qs->sq_cnt;
815
816	return 0;
817}
818
819int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
820{
821	bool disable = false;
822	struct queue_set *qs = nic->qs;
823	int qidx;
824
825	if (!qs)
826		return 0;
827
828	if (enable) {
829		if (nicvf_alloc_resources(nic))
830			return -ENOMEM;
831
832		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
833			nicvf_snd_queue_config(nic, qs, qidx, enable);
834		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
835			nicvf_cmp_queue_config(nic, qs, qidx, enable);
836		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
837			nicvf_rbdr_config(nic, qs, qidx, enable);
838		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
839			nicvf_rcv_queue_config(nic, qs, qidx, enable);
840	} else {
841		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
842			nicvf_rcv_queue_config(nic, qs, qidx, disable);
843		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
844			nicvf_rbdr_config(nic, qs, qidx, disable);
845		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
846			nicvf_snd_queue_config(nic, qs, qidx, disable);
847		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
848			nicvf_cmp_queue_config(nic, qs, qidx, disable);
849
850		nicvf_free_resources(nic);
851	}
852
853	return 0;
854}
855
856/* Get a free desc from SQ
857 * returns descriptor ponter & descriptor number
858 */
859static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
860{
861	int qentry;
862
863	qentry = sq->tail;
864	atomic_sub(desc_cnt, &sq->free_cnt);
865	sq->tail += desc_cnt;
866	sq->tail &= (sq->dmem.q_len - 1);
867
868	return qentry;
869}
870
871/* Free descriptor back to SQ for future use */
872void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
873{
874	atomic_add(desc_cnt, &sq->free_cnt);
875	sq->head += desc_cnt;
876	sq->head &= (sq->dmem.q_len - 1);
877}
878
879static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
880{
881	qentry++;
882	qentry &= (sq->dmem.q_len - 1);
883	return qentry;
884}
885
886void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
887{
888	u64 sq_cfg;
889
890	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
891	sq_cfg |= NICVF_SQ_EN;
892	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
893	/* Ring doorbell so that H/W restarts processing SQEs */
894	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
895}
896
897void nicvf_sq_disable(struct nicvf *nic, int qidx)
898{
899	u64 sq_cfg;
900
901	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
902	sq_cfg &= ~NICVF_SQ_EN;
903	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
904}
905
906void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
907			      int qidx)
908{
909	u64 head, tail;
910	struct sk_buff *skb;
911	struct nicvf *nic = netdev_priv(netdev);
912	struct sq_hdr_subdesc *hdr;
913
914	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
915	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
916	while (sq->head != head) {
917		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
918		if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
919			nicvf_put_sq_desc(sq, 1);
920			continue;
921		}
922		skb = (struct sk_buff *)sq->skbuff[sq->head];
923		if (skb)
924			dev_kfree_skb_any(skb);
925		atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
926		atomic64_add(hdr->tot_len,
927			     (atomic64_t *)&netdev->stats.tx_bytes);
928		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
929	}
930}
931
932/* Get the number of SQ descriptors needed to xmit this skb */
933static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
934{
935	int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
936
937	if (skb_shinfo(skb)->gso_size) {
938		subdesc_cnt = nicvf_tso_count_subdescs(skb);
939		return subdesc_cnt;
940	}
941
942	if (skb_shinfo(skb)->nr_frags)
943		subdesc_cnt += skb_shinfo(skb)->nr_frags;
944
945	return subdesc_cnt;
946}
947
948/* Add SQ HEADER subdescriptor.
949 * First subdescriptor for every send descriptor.
950 */
951static inline void
952nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
953			 int subdesc_cnt, struct sk_buff *skb, int len)
954{
955	int proto;
956	struct sq_hdr_subdesc *hdr;
957
958	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
959	sq->skbuff[qentry] = (u64)skb;
960
961	memset(hdr, 0, SND_QUEUE_DESC_SIZE);
962	hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
963	/* Enable notification via CQE after processing SQE */
964	hdr->post_cqe = 1;
965	/* No of subdescriptors following this */
966	hdr->subdesc_cnt = subdesc_cnt;
967	hdr->tot_len = len;
968
969	/* Offload checksum calculation to HW */
970	if (skb->ip_summed == CHECKSUM_PARTIAL) {
971		hdr->csum_l3 = 1; /* Enable IP csum calculation */
972		hdr->l3_offset = skb_network_offset(skb);
973		hdr->l4_offset = skb_transport_offset(skb);
974
975		proto = ip_hdr(skb)->protocol;
976		switch (proto) {
977		case IPPROTO_TCP:
978			hdr->csum_l4 = SEND_L4_CSUM_TCP;
979			break;
980		case IPPROTO_UDP:
981			hdr->csum_l4 = SEND_L4_CSUM_UDP;
982			break;
983		case IPPROTO_SCTP:
984			hdr->csum_l4 = SEND_L4_CSUM_SCTP;
985			break;
986		}
987	}
988}
989
990/* SQ GATHER subdescriptor
991 * Must follow HDR descriptor
992 */
993static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
994					       int size, u64 data)
995{
996	struct sq_gather_subdesc *gather;
997
998	qentry &= (sq->dmem.q_len - 1);
999	gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1000
1001	memset(gather, 0, SND_QUEUE_DESC_SIZE);
1002	gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1003	gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1004	gather->size = size;
1005	gather->addr = data;
1006}
1007
1008/* Append an skb to a SQ for packet transfer. */
1009int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1010{
1011	int i, size;
1012	int subdesc_cnt;
1013	int sq_num, qentry;
1014	struct queue_set *qs;
1015	struct snd_queue *sq;
1016
1017	sq_num = skb_get_queue_mapping(skb);
1018#ifdef VNIC_MULTI_QSET_SUPPORT
1019	if (sq_num >= MAX_SND_QUEUES_PER_QS) {
1020		/* Get secondary Qset's SQ structure */
1021		i = sq_num / MAX_SND_QUEUES_PER_QS;
1022		if (!nic->snicvf[i - 1]) {
1023			netdev_warn(nic->netdev,
1024				    "Secondary Qset#%d's ptr not initialized\n",
1025				    i - 1);
1026			return 1;
1027		}
1028		nic = (struct nicvf *)nic->snicvf[i - 1];
1029		sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
1030	}
1031#endif
1032
1033	qs = nic->qs;
1034	sq = &qs->sq[sq_num];
1035
1036	subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1037	if (subdesc_cnt > atomic_read(&sq->free_cnt))
1038		goto append_fail;
1039
1040	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1041
1042	/* Add SQ header subdesc */
1043	nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
1044
1045	/* Add SQ gather subdescs */
1046	qentry = nicvf_get_nxt_sqentry(sq, qentry);
1047	size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1048	nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
1049
1050	/* Check for scattered buffer */
1051	if (!skb_is_nonlinear(skb))
1052		goto doorbell;
1053
1054	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1055		const struct skb_frag_struct *frag;
1056
1057		frag = &skb_shinfo(skb)->frags[i];
1058
1059		qentry = nicvf_get_nxt_sqentry(sq, qentry);
1060		size = skb_frag_size(frag);
1061		nicvf_sq_add_gather_subdesc(sq, qentry, size,
1062					    virt_to_phys(
1063					    skb_frag_address(frag)));
1064	}
1065
1066doorbell:
1067	/* make sure all memory stores are done before ringing doorbell */
1068	smp_wmb();
1069
1070	/* Inform HW to xmit new packet */
1071	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1072			      sq_num, subdesc_cnt);
1073	return 1;
1074
1075append_fail:
1076	/* Use original PCI dev for debug log */
1077	nic = nic->pnicvf;
1078	netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1079	return 0;
1080}
1081
1082static inline unsigned frag_num(unsigned i)
1083{
1084#ifdef __BIG_ENDIAN
1085	return (i & ~3) + 3 - (i & 3);
1086#else
1087	return i;
1088#endif
1089}
1090
1091/* Returns SKB for a received packet */
1092struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1093{
1094	int frag;
1095	int payload_len = 0;
1096	struct sk_buff *skb = NULL;
1097	struct sk_buff *skb_frag = NULL;
1098	struct sk_buff *prev_frag = NULL;
1099	u16 *rb_lens = NULL;
1100	u64 *rb_ptrs = NULL;
1101
1102	rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1103	rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1104
1105	netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1106		   __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1107
1108	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1109		payload_len = rb_lens[frag_num(frag)];
1110		if (!frag) {
1111			/* First fragment */
1112			skb = nicvf_rb_ptr_to_skb(nic,
1113						  *rb_ptrs - cqe_rx->align_pad,
1114						  payload_len);
1115			if (!skb)
1116				return NULL;
1117			skb_reserve(skb, cqe_rx->align_pad);
1118			skb_put(skb, payload_len);
1119		} else {
1120			/* Add fragments */
1121			skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
1122						       payload_len);
1123			if (!skb_frag) {
1124				dev_kfree_skb(skb);
1125				return NULL;
1126			}
1127
1128			if (!skb_shinfo(skb)->frag_list)
1129				skb_shinfo(skb)->frag_list = skb_frag;
1130			else
1131				prev_frag->next = skb_frag;
1132
1133			prev_frag = skb_frag;
1134			skb->len += payload_len;
1135			skb->data_len += payload_len;
1136			skb_frag->len = payload_len;
1137		}
1138		/* Next buffer pointer */
1139		rb_ptrs++;
1140	}
1141	return skb;
1142}
1143
1144/* Enable interrupt */
1145void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1146{
1147	u64 reg_val;
1148
1149	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1150
1151	switch (int_type) {
1152	case NICVF_INTR_CQ:
1153		reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1154		break;
1155	case NICVF_INTR_SQ:
1156		reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1157		break;
1158	case NICVF_INTR_RBDR:
1159		reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1160		break;
1161	case NICVF_INTR_PKT_DROP:
1162		reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1163		break;
1164	case NICVF_INTR_TCP_TIMER:
1165		reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1166		break;
1167	case NICVF_INTR_MBOX:
1168		reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
1169		break;
1170	case NICVF_INTR_QS_ERR:
1171		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1172		break;
1173	default:
1174		netdev_err(nic->netdev,
1175			   "Failed to enable interrupt: unknown type\n");
1176		break;
1177	}
1178
1179	nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
1180}
1181
1182/* Disable interrupt */
1183void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1184{
1185	u64 reg_val = 0;
1186
1187	switch (int_type) {
1188	case NICVF_INTR_CQ:
1189		reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1190		break;
1191	case NICVF_INTR_SQ:
1192		reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1193		break;
1194	case NICVF_INTR_RBDR:
1195		reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1196		break;
1197	case NICVF_INTR_PKT_DROP:
1198		reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1199		break;
1200	case NICVF_INTR_TCP_TIMER:
1201		reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1202		break;
1203	case NICVF_INTR_MBOX:
1204		reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
1205		break;
1206	case NICVF_INTR_QS_ERR:
1207		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1208		break;
1209	default:
1210		netdev_err(nic->netdev,
1211			   "Failed to disable interrupt: unknown type\n");
1212		break;
1213	}
1214
1215	nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
1216}
1217
1218/* Clear interrupt */
1219void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1220{
1221	u64 reg_val = 0;
1222
1223	switch (int_type) {
1224	case NICVF_INTR_CQ:
1225		reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1226		break;
1227	case NICVF_INTR_SQ:
1228		reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1229		break;
1230	case NICVF_INTR_RBDR:
1231		reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1232		break;
1233	case NICVF_INTR_PKT_DROP:
1234		reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1235		break;
1236	case NICVF_INTR_TCP_TIMER:
1237		reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1238		break;
1239	case NICVF_INTR_MBOX:
1240		reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1241		break;
1242	case NICVF_INTR_QS_ERR:
1243		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1244		break;
1245	default:
1246		netdev_err(nic->netdev,
1247			   "Failed to clear interrupt: unknown type\n");
1248		break;
1249	}
1250
1251	nicvf_reg_write(nic, NIC_VF_INT, reg_val);
1252}
1253
1254/* Check if interrupt is enabled */
1255int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1256{
1257	u64 reg_val;
1258	u64 mask = 0xff;
1259
1260	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1261
1262	switch (int_type) {
1263	case NICVF_INTR_CQ:
1264		mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1265		break;
1266	case NICVF_INTR_SQ:
1267		mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1268		break;
1269	case NICVF_INTR_RBDR:
1270		mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1271		break;
1272	case NICVF_INTR_PKT_DROP:
1273		mask = NICVF_INTR_PKT_DROP_MASK;
1274		break;
1275	case NICVF_INTR_TCP_TIMER:
1276		mask = NICVF_INTR_TCP_TIMER_MASK;
1277		break;
1278	case NICVF_INTR_MBOX:
1279		mask = NICVF_INTR_MBOX_MASK;
1280		break;
1281	case NICVF_INTR_QS_ERR:
1282		mask = NICVF_INTR_QS_ERR_MASK;
1283		break;
1284	default:
1285		netdev_err(nic->netdev,
1286			   "Failed to check interrupt enable: unknown type\n");
1287		break;
1288	}
1289
1290	return (reg_val & mask);
1291}
1292
1293void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1294{
1295	struct rcv_queue *rq;
1296
1297#define GET_RQ_STATS(reg) \
1298	nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1299			    (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1300
1301	rq = &nic->qs->rq[rq_idx];
1302	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1303	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1304}
1305
1306void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1307{
1308	struct snd_queue *sq;
1309
1310#define GET_SQ_STATS(reg) \
1311	nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1312			    (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1313
1314	sq = &nic->qs->sq[sq_idx];
1315	sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1316	sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1317}
1318
1319/* Check for errors in the receive cmp.queue entry */
1320int nicvf_check_cqe_rx_errs(struct nicvf *nic,
1321			    struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1322{
1323	struct nicvf_hw_stats *stats = &nic->hw_stats;
1324	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1325
1326	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
1327		drv_stats->rx_frames_ok++;
1328		return 0;
1329	}
1330
1331	if (netif_msg_rx_err(nic))
1332		netdev_err(nic->netdev,
1333			   "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1334			   nic->netdev->name,
1335			   cqe_rx->err_level, cqe_rx->err_opcode);
1336
1337	switch (cqe_rx->err_opcode) {
1338	case CQ_RX_ERROP_RE_PARTIAL:
1339		stats->rx_bgx_truncated_pkts++;
1340		break;
1341	case CQ_RX_ERROP_RE_JABBER:
1342		stats->rx_jabber_errs++;
1343		break;
1344	case CQ_RX_ERROP_RE_FCS:
1345		stats->rx_fcs_errs++;
1346		break;
1347	case CQ_RX_ERROP_RE_RX_CTL:
1348		stats->rx_bgx_errs++;
1349		break;
1350	case CQ_RX_ERROP_PREL2_ERR:
1351		stats->rx_prel2_errs++;
1352		break;
1353	case CQ_RX_ERROP_L2_MAL:
1354		stats->rx_l2_hdr_malformed++;
1355		break;
1356	case CQ_RX_ERROP_L2_OVERSIZE:
1357		stats->rx_oversize++;
1358		break;
1359	case CQ_RX_ERROP_L2_UNDERSIZE:
1360		stats->rx_undersize++;
1361		break;
1362	case CQ_RX_ERROP_L2_LENMISM:
1363		stats->rx_l2_len_mismatch++;
1364		break;
1365	case CQ_RX_ERROP_L2_PCLP:
1366		stats->rx_l2_pclp++;
1367		break;
1368	case CQ_RX_ERROP_IP_NOT:
1369		stats->rx_ip_ver_errs++;
1370		break;
1371	case CQ_RX_ERROP_IP_CSUM_ERR:
1372		stats->rx_ip_csum_errs++;
1373		break;
1374	case CQ_RX_ERROP_IP_MAL:
1375		stats->rx_ip_hdr_malformed++;
1376		break;
1377	case CQ_RX_ERROP_IP_MALD:
1378		stats->rx_ip_payload_malformed++;
1379		break;
1380	case CQ_RX_ERROP_IP_HOP:
1381		stats->rx_ip_ttl_errs++;
1382		break;
1383	case CQ_RX_ERROP_L3_PCLP:
1384		stats->rx_l3_pclp++;
1385		break;
1386	case CQ_RX_ERROP_L4_MAL:
1387		stats->rx_l4_malformed++;
1388		break;
1389	case CQ_RX_ERROP_L4_CHK:
1390		stats->rx_l4_csum_errs++;
1391		break;
1392	case CQ_RX_ERROP_UDP_LEN:
1393		stats->rx_udp_len_errs++;
1394		break;
1395	case CQ_RX_ERROP_L4_PORT:
1396		stats->rx_l4_port_errs++;
1397		break;
1398	case CQ_RX_ERROP_TCP_FLAG:
1399		stats->rx_tcp_flag_errs++;
1400		break;
1401	case CQ_RX_ERROP_TCP_OFFSET:
1402		stats->rx_tcp_offset_errs++;
1403		break;
1404	case CQ_RX_ERROP_L4_PCLP:
1405		stats->rx_l4_pclp++;
1406		break;
1407	case CQ_RX_ERROP_RBDR_TRUNC:
1408		stats->rx_truncated_pkts++;
1409		break;
1410	}
1411
1412	return 1;
1413}
1414
1415/* Check for errors in the send cmp.queue entry */
1416int nicvf_check_cqe_tx_errs(struct nicvf *nic,
1417			    struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1418{
1419	struct cmp_queue_stats *stats = &cq->stats;
1420
1421	switch (cqe_tx->send_status) {
1422	case CQ_TX_ERROP_GOOD:
1423		stats->tx.good++;
1424		return 0;
1425	case CQ_TX_ERROP_DESC_FAULT:
1426		stats->tx.desc_fault++;
1427		break;
1428	case CQ_TX_ERROP_HDR_CONS_ERR:
1429		stats->tx.hdr_cons_err++;
1430		break;
1431	case CQ_TX_ERROP_SUBDC_ERR:
1432		stats->tx.subdesc_err++;
1433		break;
1434	case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1435		stats->tx.imm_size_oflow++;
1436		break;
1437	case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1438		stats->tx.data_seq_err++;
1439		break;
1440	case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1441		stats->tx.mem_seq_err++;
1442		break;
1443	case CQ_TX_ERROP_LOCK_VIOL:
1444		stats->tx.lock_viol++;
1445		break;
1446	case CQ_TX_ERROP_DATA_FAULT:
1447		stats->tx.data_fault++;
1448		break;
1449	case CQ_TX_ERROP_TSTMP_CONFLICT:
1450		stats->tx.tstmp_conflict++;
1451		break;
1452	case CQ_TX_ERROP_TSTMP_TIMEOUT:
1453		stats->tx.tstmp_timeout++;
1454		break;
1455	case CQ_TX_ERROP_MEM_FAULT:
1456		stats->tx.mem_fault++;
1457		break;
1458	case CQ_TX_ERROP_CK_OVERLAP:
1459		stats->tx.csum_overlap++;
1460		break;
1461	case CQ_TX_ERROP_CK_OFLOW:
1462		stats->tx.csum_overflow++;
1463		break;
1464	}
1465
1466	return 1;
1467}
1468