1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 */
5
6#ifndef _VNIC_RQ_H_
7#define _VNIC_RQ_H_
8
9#include "vnic_dev.h"
10#include "vnic_cq.h"
11
12/* Receive queue control */
13struct vnic_rq_ctrl {
14	u64 ring_base;			/* 0x00 */
15#define RX_RING_BASE			   0x00
16	u32 ring_size;			/* 0x08 */
17#define RX_RING_SIZE			   0x08
18	u32 pad0;
19	u32 posted_index;		/* 0x10 */
20#define RX_POSTED_INDEX			   0x10
21	u32 pad1;
22	u32 cq_index;			/* 0x18 */
23#define RX_CQ_INDEX			   0x18
24	u32 pad2;
25	u32 enable;			/* 0x20 */
26#define RX_ENABLE			   0x20
27	u32 pad3;
28	u32 running;			/* 0x28 */
29#define RX_RUNNING			   0x28
30	u32 pad4;
31	u32 fetch_index;		/* 0x30 */
32#define RX_FETCH_INDEX			   0x30
33	u32 pad5;
34	u32 error_interrupt_enable;	/* 0x38 */
35#define RX_ERROR_INTR_ENABLE		   0x38
36	u32 pad6;
37	u32 error_interrupt_offset;	/* 0x40 */
38#define RX_ERROR_INTR_OFFSET		   0x40
39	u32 pad7;
40	u32 error_status;		/* 0x48 */
41#define RX_ERROR_STATUS			   0x48
42	u32 pad8;
43	u32 tcp_sn;			/* 0x50 */
44#define RX_TCP_SN			   0x50
45	u32 pad9;
46	u32 unused;			/* 0x58 */
47	u32 pad10;
48	u32 dca_select;			/* 0x60 */
49#define RX_DCA_SELECT			   0x60
50	u32 pad11;
51	u32 dca_value;			/* 0x68 */
52#define RX_DCA_VALUE			   0x68
53	u32 pad12;
54	u32 data_ring;			/* 0x70 */
55};
56
57struct vnic_rq {
58	unsigned int index;
59	unsigned int posted_index;
60	struct vnic_dev *vdev;
61	struct vnic_res *ctrl;
62	struct vnic_dev_ring ring;
63	int num_free_mbufs;
64	struct rte_mbuf **mbuf_ring;		/* array of allocated mbufs */
65	unsigned int mbuf_next_idx;		/* next mb to consume */
66	void *os_buf_head;
67	unsigned int pkts_outstanding;
68	uint16_t rx_nb_hold;
69	uint16_t rx_free_thresh;
70	unsigned int socket_id;
71	struct rte_mempool *mp;
72	uint16_t rxst_idx;
73	uint32_t tot_pkts;
74	uint8_t in_use;
75	unsigned int max_mbufs_per_pkt;
76	uint16_t tot_nb_desc;
77	bool need_initial_post;
78	struct iflib_dma_info data;
79};
80
81static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
82{
83	/* how many does SW own? */
84	return rq->ring.desc_avail;
85}
86
87static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
88{
89	/* how many does HW own? */
90	return rq->ring.desc_count - rq->ring.desc_avail - 1;
91}
92
93enum desc_return_options {
94	VNIC_RQ_RETURN_DESC,
95	VNIC_RQ_DEFER_RETURN_DESC,
96};
97
98static inline int vnic_rq_fill(struct vnic_rq *rq,
99	int (*buf_fill)(struct vnic_rq *rq))
100{
101	int err;
102
103	while (vnic_rq_desc_avail(rq) > 0) {
104
105		err = (*buf_fill)(rq);
106		if (err)
107			return err;
108	}
109
110	return 0;
111}
112
113static inline int vnic_rq_fill_count(struct vnic_rq *rq,
114	int (*buf_fill)(struct vnic_rq *rq), unsigned int count)
115{
116	int err;
117
118	while ((vnic_rq_desc_avail(rq) > 0) && (count--)) {
119
120		err = (*buf_fill)(rq);
121		if (err)
122			return err;
123	}
124
125	return 0;
126}
127
128void vnic_rq_free(struct vnic_rq *rq);
129void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
130    unsigned int fetch_index, unsigned int posted_index,
131    unsigned int error_interrupt_enable,
132    unsigned int error_interrupt_offset);
133void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
134    unsigned int error_interrupt_enable,
135    unsigned int error_interrupt_offset);
136void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error);
137unsigned int vnic_rq_error_status(struct vnic_rq *rq);
138void vnic_rq_enable(struct vnic_rq *rq);
139int vnic_rq_disable(struct vnic_rq *rq);
140void vnic_rq_clean(struct vnic_rq *rq);
141
142#endif /* _VNIC_RQ_H_ */
143