1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/*
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6#ifndef ENA_ETH_COM_H_
7#define ENA_ETH_COM_H_
8
9#include "ena_com.h"
10
11/* we allow 2 DMA descriptors per LLQ entry */
12#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE	(2 * sizeof(struct ena_eth_io_tx_desc))
13#define ENA_LLQ_HEADER		(128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
14#define ENA_LLQ_LARGE_HEADER	(256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
15
16struct ena_com_tx_ctx {
17	struct ena_com_tx_meta ena_meta;
18	struct ena_com_buf *ena_bufs;
19	/* For LLQ, header buffer - pushed to the device mem space */
20	void *push_header;
21
22	enum ena_eth_io_l3_proto_index l3_proto;
23	enum ena_eth_io_l4_proto_index l4_proto;
24	u16 num_bufs;
25	u16 req_id;
26	/* For regular queue, indicate the size of the header
27	 * For LLQ, indicate the size of the pushed buffer
28	 */
29	u16 header_len;
30
31	u8 meta_valid;
32	u8 tso_enable;
33	u8 l3_csum_enable;
34	u8 l4_csum_enable;
35	u8 l4_csum_partial;
36	u8 df; /* Don't fragment */
37};
38
39struct ena_com_rx_ctx {
40	struct ena_com_rx_buf_info *ena_bufs;
41	enum ena_eth_io_l3_proto_index l3_proto;
42	enum ena_eth_io_l4_proto_index l4_proto;
43	bool l3_csum_err;
44	bool l4_csum_err;
45	u8 l4_csum_checked;
46	/* fragmented packet */
47	bool frag;
48	u32 hash;
49	u16 descs;
50	int max_bufs;
51	u8 pkt_offset;
52};
53
54int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
55		       struct ena_com_tx_ctx *ena_tx_ctx,
56		       int *nb_hw_desc);
57
58int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
59		   struct ena_com_io_sq *io_sq,
60		   struct ena_com_rx_ctx *ena_rx_ctx);
61
62int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
63			       struct ena_com_buf *ena_buf,
64			       u16 req_id);
65
66bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
67
68static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
69				       struct ena_eth_io_intr_reg *intr_reg)
70{
71	writel(intr_reg->intr_control, io_cq->unmask_reg);
72}
73
74static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
75{
76	u16 tail, next_to_comp, cnt;
77
78	next_to_comp = io_sq->next_to_comp;
79	tail = io_sq->tail;
80	cnt = tail - next_to_comp;
81
82	return io_sq->q_depth - 1 - cnt;
83}
84
85/* Check if the submission queue has enough space to hold required_buffers */
86static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
87						u16 required_buffers)
88{
89	int temp;
90
91	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
92		return ena_com_free_q_entries(io_sq) >= required_buffers;
93
94	/* This calculation doesn't need to be 100% accurate. So to reduce
95	 * the calculation overhead just Subtract 2 lines from the free descs
96	 * (one for the header line and one to compensate the devision
97	 * down calculation.
98	 */
99	temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
100
101	return ena_com_free_q_entries(io_sq) > temp;
102}
103
104static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
105					     struct ena_com_tx_ctx *ena_tx_ctx)
106{
107	if (!ena_tx_ctx->meta_valid)
108		return false;
109
110	return !!memcmp(&io_sq->cached_tx_meta,
111			&ena_tx_ctx->ena_meta,
112			sizeof(struct ena_com_tx_meta));
113}
114
115static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
116{
117	return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
118	       io_sq->llq_info.max_entries_in_tx_burst > 0;
119}
120
121static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
122					      struct ena_com_tx_ctx *ena_tx_ctx)
123{
124	struct ena_com_llq_info *llq_info;
125	int descs_after_first_entry;
126	int num_entries_needed = 1;
127	u16 num_descs;
128
129	if (!is_llq_max_tx_burst_exists(io_sq))
130		return false;
131
132	llq_info = &io_sq->llq_info;
133	num_descs = ena_tx_ctx->num_bufs;
134
135	if (llq_info->disable_meta_caching ||
136	    unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
137		++num_descs;
138
139	if (num_descs > llq_info->descs_num_before_header) {
140		descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
141		num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
142						   llq_info->descs_per_entry);
143	}
144
145	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
146		   "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
147		   num_entries_needed);
148
149	return num_entries_needed > io_sq->entries_in_tx_burst_left;
150}
151
152static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
153{
154	u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
155	u16 tail = io_sq->tail;
156
157	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
158		   "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
159
160	writel(tail, io_sq->db_addr);
161
162	if (is_llq_max_tx_burst_exists(io_sq)) {
163		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
164			   "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
165			   max_entries_in_tx_burst);
166		io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
167	}
168
169	return 0;
170}
171
172static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
173					    u8 numa_node)
174{
175	struct ena_eth_io_numa_node_cfg_reg numa_cfg;
176
177	if (!io_cq->numa_node_cfg_reg)
178		return;
179
180	numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
181		| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
182
183	writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
184}
185
186static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
187{
188	io_sq->next_to_comp += elem;
189}
190
191static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
192{
193	io_cq->head++;
194
195	/* Switch phase bit in case of wrap around */
196	if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
197		io_cq->phase ^= 1;
198}
199
200static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
201					     u16 *req_id)
202{
203	u8 expected_phase, cdesc_phase;
204	struct ena_eth_io_tx_cdesc *cdesc;
205	u16 masked_head;
206
207	masked_head = io_cq->head & (io_cq->q_depth - 1);
208	expected_phase = io_cq->phase;
209
210	cdesc = (struct ena_eth_io_tx_cdesc *)
211		((uintptr_t)io_cq->cdesc_addr.virt_addr +
212		(masked_head * io_cq->cdesc_entry_size_in_bytes));
213
214	/* When the current completion descriptor phase isn't the same as the
215	 * expected, it mean that the device still didn't update
216	 * this completion.
217	 */
218	cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
219	if (cdesc_phase != expected_phase)
220		return -EAGAIN;
221
222	dma_rmb();
223
224	*req_id = READ_ONCE(cdesc->req_id);
225	if (unlikely(*req_id >= io_cq->q_depth)) {
226		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
227			   cdesc->req_id);
228		return -EINVAL;
229	}
230
231	ena_com_cq_inc_head(io_cq);
232
233	return 0;
234}
235
236#endif /* ENA_ETH_COM_H_ */
237