ena_com.h revision 320731
1206917Smarius/*-
2206917Smarius * BSD LICENSE
3206917Smarius *
4206917Smarius * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
5206917Smarius * All rights reserved.
6206917Smarius *
7206917Smarius * Redistribution and use in source and binary forms, with or without
8206917Smarius * modification, are permitted provided that the following conditions
9206917Smarius * are met:
10206917Smarius *
11206917Smarius * * Redistributions of source code must retain the above copyright
12206917Smarius * notice, this list of conditions and the following disclaimer.
13206917Smarius * * Redistributions in binary form must reproduce the above copyright
14206917Smarius * notice, this list of conditions and the following disclaimer in
15206917Smarius * the documentation and/or other materials provided with the
16206917Smarius * distribution.
17206917Smarius * * Neither the name of copyright holder nor the names of its
18206917Smarius * contributors may be used to endorse or promote products derived
19206917Smarius * from this software without specific prior written permission.
20206917Smarius *
21206917Smarius * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22206917Smarius * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23206917Smarius * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24206917Smarius * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25206917Smarius * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26206917Smarius * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27206917Smarius * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28206917Smarius * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29206917Smarius * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30206917Smarius * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31206917Smarius * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32206917Smarius */
33206917Smarius
34206917Smarius#ifndef ENA_COM
35206917Smarius#define ENA_COM
36206917Smarius
37206917Smarius#ifndef ENA_INTERNAL
38206917Smarius#include "ena_plat.h"
39206917Smarius#else
40206917Smarius#include "ena_plat.h"
41206917Smarius#include "ena_includes.h"
42206917Smarius#endif
43206917Smarius
44206917Smarius#define ENA_MAX_NUM_IO_QUEUES		128U
45206917Smarius/* We need to queues for each IO (on for Tx and one for Rx) */
46206917Smarius#define ENA_TOTAL_NUM_QUEUES		(2 * (ENA_MAX_NUM_IO_QUEUES))
47206917Smarius
48206917Smarius#define ENA_MAX_HANDLERS 256
49206917Smarius
50206917Smarius#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
51206917Smarius
52206917Smarius/* Unit in usec */
53206917Smarius#define ENA_REG_READ_TIMEOUT 200000
54206917Smarius
55206917Smarius#define ADMIN_SQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_aq_entry))
56206917Smarius#define ADMIN_CQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_acq_entry))
57206917Smarius#define ADMIN_AENQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_aenq_entry))
58206917Smarius
59206917Smarius/*****************************************************************************/
60206917Smarius/*****************************************************************************/
61206917Smarius/* ENA adaptive interrupt moderation settings */
62206917Smarius
63206917Smarius#define ENA_INTR_LOWEST_USECS           (0)
64206917Smarius#define ENA_INTR_LOWEST_PKTS            (3)
65206917Smarius#define ENA_INTR_LOWEST_BYTES           (2 * 1524)
66206917Smarius
67206917Smarius#define ENA_INTR_LOW_USECS              (32)
68206917Smarius#define ENA_INTR_LOW_PKTS               (12)
69206917Smarius#define ENA_INTR_LOW_BYTES              (16 * 1024)
70206917Smarius
71206917Smarius#define ENA_INTR_MID_USECS              (80)
72206917Smarius#define ENA_INTR_MID_PKTS               (48)
73206917Smarius#define ENA_INTR_MID_BYTES              (64 * 1024)
74206917Smarius
75206917Smarius#define ENA_INTR_HIGH_USECS             (128)
76206917Smarius#define ENA_INTR_HIGH_PKTS              (96)
77206917Smarius#define ENA_INTR_HIGH_BYTES             (128 * 1024)
78206917Smarius
79206917Smarius#define ENA_INTR_HIGHEST_USECS          (192)
80206917Smarius#define ENA_INTR_HIGHEST_PKTS           (128)
81206917Smarius#define ENA_INTR_HIGHEST_BYTES          (192 * 1024)
82206917Smarius
83206917Smarius#define ENA_INTR_INITIAL_TX_INTERVAL_USECS		196
84206917Smarius#define ENA_INTR_INITIAL_RX_INTERVAL_USECS		4
85206917Smarius#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT			6
86206917Smarius#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT			4
87206917Smarius#define ENA_INTR_MODER_LEVEL_STRIDE			1
88206917Smarius#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED		0xFFFFFF
89206917Smarius
90206917Smarius#define ENA_HW_HINTS_NO_TIMEOUT				0xFFFF
91206917Smarius
92206917Smariusenum ena_intr_moder_level {
93206917Smarius	ENA_INTR_MODER_LOWEST = 0,
94206917Smarius	ENA_INTR_MODER_LOW,
95206917Smarius	ENA_INTR_MODER_MID,
96206917Smarius	ENA_INTR_MODER_HIGH,
97206917Smarius	ENA_INTR_MODER_HIGHEST,
98206917Smarius	ENA_INTR_MAX_NUM_OF_LEVELS,
99206917Smarius};
100206917Smarius
101206917Smariusstruct ena_intr_moder_entry {
102206917Smarius	unsigned int intr_moder_interval;
103206917Smarius	unsigned int pkts_per_interval;
104206917Smarius	unsigned int bytes_per_interval;
105206917Smarius};
106206917Smarius
107206917Smariusenum queue_direction {
108206917Smarius	ENA_COM_IO_QUEUE_DIRECTION_TX,
109206917Smarius	ENA_COM_IO_QUEUE_DIRECTION_RX
110206917Smarius};
111206917Smarius
112206917Smariusstruct ena_com_buf {
113206917Smarius	dma_addr_t paddr; /**< Buffer physical address */
114206917Smarius	u16 len; /**< Buffer length in bytes */
115206917Smarius};
116206917Smarius
117206917Smariusstruct ena_com_rx_buf_info {
118206917Smarius	u16 len;
119206917Smarius	u16 req_id;
120206917Smarius};
121206917Smarius
122206917Smariusstruct ena_com_io_desc_addr {
123206917Smarius	u8 __iomem *pbuf_dev_addr; /* LLQ address */
124206917Smarius	u8 *virt_addr;
125206917Smarius	dma_addr_t phys_addr;
126206917Smarius	ena_mem_handle_t mem_handle;
127206917Smarius};
128206917Smarius
129206917Smariusstruct ena_com_tx_meta {
130206917Smarius	u16 mss;
131206917Smarius	u16 l3_hdr_len;
132206917Smarius	u16 l3_hdr_offset;
133206917Smarius	u16 l4_hdr_len; /* In words */
134206917Smarius};
135206917Smarius
136struct ena_com_io_cq {
137	struct ena_com_io_desc_addr cdesc_addr;
138	void *bus;
139
140	/* Interrupt unmask register */
141	u32 __iomem *unmask_reg;
142
143	/* The completion queue head doorbell register */
144	u32 __iomem *cq_head_db_reg;
145
146	/* numa configuration register (for TPH) */
147	u32 __iomem *numa_node_cfg_reg;
148
149	/* The value to write to the above register to unmask
150	 * the interrupt of this queue
151	 */
152	u32 msix_vector;
153
154	enum queue_direction direction;
155
156	/* holds the number of cdesc of the current packet */
157	u16 cur_rx_pkt_cdesc_count;
158	/* save the firt cdesc idx of the current packet */
159	u16 cur_rx_pkt_cdesc_start_idx;
160
161	u16 q_depth;
162	/* Caller qid */
163	u16 qid;
164
165	/* Device queue index */
166	u16 idx;
167	u16 head;
168	u16 last_head_update;
169	u8 phase;
170	u8 cdesc_entry_size_in_bytes;
171
172} ____cacheline_aligned;
173
174struct ena_com_io_sq {
175	struct ena_com_io_desc_addr desc_addr;
176	void *bus;
177
178	u32 __iomem *db_addr;
179	u8 __iomem *header_addr;
180
181	enum queue_direction direction;
182	enum ena_admin_placement_policy_type mem_queue_type;
183
184	u32 msix_vector;
185	struct ena_com_tx_meta cached_tx_meta;
186
187	u16 q_depth;
188	u16 qid;
189
190	u16 idx;
191	u16 tail;
192	u16 next_to_comp;
193	u32 tx_max_header_size;
194	u8 phase;
195	u8 desc_entry_size;
196	u8 dma_addr_bits;
197} ____cacheline_aligned;
198
199struct ena_com_admin_cq {
200	struct ena_admin_acq_entry *entries;
201	ena_mem_handle_t mem_handle;
202	dma_addr_t dma_addr;
203
204	u16 head;
205	u8 phase;
206};
207
208struct ena_com_admin_sq {
209	struct ena_admin_aq_entry *entries;
210	ena_mem_handle_t mem_handle;
211	dma_addr_t dma_addr;
212
213	u32 __iomem *db_addr;
214
215	u16 head;
216	u16 tail;
217	u8 phase;
218
219};
220
221struct ena_com_stats_admin {
222	u32 aborted_cmd;
223	u32 submitted_cmd;
224	u32 completed_cmd;
225	u32 out_of_space;
226	u32 no_completion;
227};
228
229struct ena_com_admin_queue {
230	void *q_dmadev;
231	void *bus;
232	ena_spinlock_t q_lock; /* spinlock for the admin queue */
233
234	struct ena_comp_ctx *comp_ctx;
235	u32 completion_timeout;
236	u16 q_depth;
237	struct ena_com_admin_cq cq;
238	struct ena_com_admin_sq sq;
239
240	/* Indicate if the admin queue should poll for completion */
241	bool polling;
242
243	u16 curr_cmd_id;
244
245	/* Indicate that the ena was initialized and can
246	 * process new admin commands
247	 */
248	bool running_state;
249
250	/* Count the number of outstanding admin commands */
251	ena_atomic32_t outstanding_cmds;
252
253	struct ena_com_stats_admin stats;
254};
255
256struct ena_aenq_handlers;
257
258struct ena_com_aenq {
259	u16 head;
260	u8 phase;
261	struct ena_admin_aenq_entry *entries;
262	dma_addr_t dma_addr;
263	ena_mem_handle_t mem_handle;
264	u16 q_depth;
265	struct ena_aenq_handlers *aenq_handlers;
266};
267
268struct ena_com_mmio_read {
269	struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
270	dma_addr_t read_resp_dma_addr;
271	ena_mem_handle_t read_resp_mem_handle;
272	u32 reg_read_to; /* in us */
273	u16 seq_num;
274	bool readless_supported;
275	/* spin lock to ensure a single outstanding read */
276	ena_spinlock_t lock;
277};
278
279struct ena_rss {
280	/* Indirect table */
281	u16 *host_rss_ind_tbl;
282	struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
283	dma_addr_t rss_ind_tbl_dma_addr;
284	ena_mem_handle_t rss_ind_tbl_mem_handle;
285	u16 tbl_log_size;
286
287	/* Hash key */
288	enum ena_admin_hash_functions hash_func;
289	struct ena_admin_feature_rss_flow_hash_control *hash_key;
290	dma_addr_t hash_key_dma_addr;
291	ena_mem_handle_t hash_key_mem_handle;
292	u32 hash_init_val;
293
294	/* Flow Control */
295	struct ena_admin_feature_rss_hash_control *hash_ctrl;
296	dma_addr_t hash_ctrl_dma_addr;
297	ena_mem_handle_t hash_ctrl_mem_handle;
298
299};
300
301struct ena_host_attribute {
302	/* Debug area */
303	u8 *debug_area_virt_addr;
304	dma_addr_t debug_area_dma_addr;
305	ena_mem_handle_t debug_area_dma_handle;
306	u32 debug_area_size;
307
308	/* Host information */
309	struct ena_admin_host_info *host_info;
310	dma_addr_t host_info_dma_addr;
311	ena_mem_handle_t host_info_dma_handle;
312};
313
314/* Each ena_dev is a PCI function. */
315struct ena_com_dev {
316	struct ena_com_admin_queue admin_queue;
317	struct ena_com_aenq aenq;
318	struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
319	struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
320	u8 __iomem *reg_bar;
321	void __iomem *mem_bar;
322	void *dmadev;
323	void *bus;
324	enum ena_admin_placement_policy_type tx_mem_queue_type;
325	u32 tx_max_header_size;
326	u16 stats_func; /* Selected function for extended statistic dump */
327	u16 stats_queue; /* Selected queue for extended statistic dump */
328
329	struct ena_com_mmio_read mmio_read;
330
331	struct ena_rss rss;
332	u32 supported_features;
333	u32 dma_addr_bits;
334
335	struct ena_host_attribute host_attr;
336	bool adaptive_coalescing;
337	u16 intr_delay_resolution;
338	u32 intr_moder_tx_interval;
339	struct ena_intr_moder_entry *intr_moder_tbl;
340};
341
342struct ena_com_dev_get_features_ctx {
343	struct ena_admin_queue_feature_desc max_queues;
344	struct ena_admin_device_attr_feature_desc dev_attr;
345	struct ena_admin_feature_aenq_desc aenq;
346	struct ena_admin_feature_offload_desc offload;
347	struct ena_admin_ena_hw_hints hw_hints;
348};
349
350struct ena_com_create_io_ctx {
351	enum ena_admin_placement_policy_type mem_queue_type;
352	enum queue_direction direction;
353	int numa_node;
354	u32 msix_vector;
355	u16 queue_size;
356	u16 qid;
357};
358
359typedef void (*ena_aenq_handler)(void *data,
360	struct ena_admin_aenq_entry *aenq_e);
361
362/* Holds aenq handlers. Indexed by AENQ event group */
363struct ena_aenq_handlers {
364	ena_aenq_handler handlers[ENA_MAX_HANDLERS];
365	ena_aenq_handler unimplemented_handler;
366};
367
368/*****************************************************************************/
369/*****************************************************************************/
370#if defined(__cplusplus)
371extern "C" {
372#endif
373
374/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
375 * @ena_dev: ENA communication layer struct
376 *
377 * Initialize the register read mechanism.
378 *
379 * @note: This method must be the first stage in the initialization sequence.
380 *
381 * @return - 0 on success, negative value on failure.
382 */
383int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
384
385/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
386 * @ena_dev: ENA communication layer struct
387 * @readless_supported: readless mode (enable/disable)
388 */
389void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
390				bool readless_supported);
391
392/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
393 * value physical address.
394 * @ena_dev: ENA communication layer struct
395 */
396void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
397
398/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
399 * @ena_dev: ENA communication layer struct
400 */
401void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
402
403/* ena_com_admin_init - Init the admin and the async queues
404 * @ena_dev: ENA communication layer struct
405 * @aenq_handlers: Those handlers to be called upon event.
406 * @init_spinlock: Indicate if this method should init the admin spinlock or
407 * the spinlock was init before (for example, in a case of FLR).
408 *
409 * Initialize the admin submission and completion queues.
410 * Initialize the asynchronous events notification queues.
411 *
412 * @return - 0 on success, negative value on failure.
413 */
414int ena_com_admin_init(struct ena_com_dev *ena_dev,
415		       struct ena_aenq_handlers *aenq_handlers,
416		       bool init_spinlock);
417
418/* ena_com_admin_destroy - Destroy the admin and the async events queues.
419 * @ena_dev: ENA communication layer struct
420 *
421 * @note: Before calling this method, the caller must validate that the device
422 * won't send any additional admin completions/aenq.
423 * To achieve that, a FLR is recommended.
424 */
425void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
426
427/* ena_com_dev_reset - Perform device FLR to the device.
428 * @ena_dev: ENA communication layer struct
429 *
430 * @return - 0 on success, negative value on failure.
431 */
432int ena_com_dev_reset(struct ena_com_dev *ena_dev);
433
434/* ena_com_create_io_queue - Create io queue.
435 * @ena_dev: ENA communication layer struct
436 * @ctx - create context structure
437 *
438 * Create the submission and the completion queues.
439 *
440 * @return - 0 on success, negative value on failure.
441 */
442int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
443			    struct ena_com_create_io_ctx *ctx);
444
445/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
446 * @ena_dev: ENA communication layer struct
447 * @qid - the caller virtual queue id.
448 */
449void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
450
451/* ena_com_get_io_handlers - Return the io queue handlers
452 * @ena_dev: ENA communication layer struct
453 * @qid - the caller virtual queue id.
454 * @io_sq - IO submission queue handler
455 * @io_cq - IO completion queue handler.
456 *
457 * @return - 0 on success, negative value on failure.
458 */
459int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
460			    struct ena_com_io_sq **io_sq,
461			    struct ena_com_io_cq **io_cq);
462
463/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
464 * @ena_dev: ENA communication layer struct
465 *
466 * After this method, aenq event can be received via AENQ.
467 */
468void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
469
470/* ena_com_set_admin_running_state - Set the state of the admin queue
471 * @ena_dev: ENA communication layer struct
472 *
473 * Change the state of the admin queue (enable/disable)
474 */
475void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
476
477/* ena_com_get_admin_running_state - Get the admin queue state
478 * @ena_dev: ENA communication layer struct
479 *
480 * Retrieve the state of the admin queue (enable/disable)
481 *
482 * @return - current polling mode (enable/disable)
483 */
484bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
485
486/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
487 * @ena_dev: ENA communication layer struct
488 * @polling: ENAble/Disable polling mode
489 *
490 * Set the admin completion mode.
491 */
492void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
493
494/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
495 * @ena_dev: ENA communication layer struct
496 *
497 * Get the admin completion mode.
498 * If polling mode is on, ena_com_execute_admin_command will perform a
499 * polling on the admin completion queue for the commands completion,
500 * otherwise it will wait on wait event.
501 *
502 * @return state
503 */
504bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
505
506/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
507 * @ena_dev: ENA communication layer struct
508 *
509 * This method go over the admin completion queue and wake up all the pending
510 * threads that wait on the commands wait event.
511 *
512 * @note: Should be called after MSI-X interrupt.
513 */
514void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
515
516/* ena_com_aenq_intr_handler - AENQ interrupt handler
517 * @ena_dev: ENA communication layer struct
518 *
519 * This method go over the async event notification queue and call the proper
520 * aenq handler.
521 */
522void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
523
524/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
525 * @ena_dev: ENA communication layer struct
526 *
527 * This method aborts all the outstanding admin commands.
528 * The caller should then call ena_com_wait_for_abort_completion to make sure
529 * all the commands were completed.
530 */
531void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
532
533/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
534 * @ena_dev: ENA communication layer struct
535 *
536 * This method wait until all the outstanding admin commands will be completed.
537 */
538void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
539
540/* ena_com_validate_version - Validate the device parameters
541 * @ena_dev: ENA communication layer struct
542 *
543 * This method validate the device parameters are the same as the saved
544 * parameters in ena_dev.
545 * This method is useful after device reset, to validate the device mac address
546 * and the device offloads are the same as before the reset.
547 *
548 * @return - 0 on success negative value otherwise.
549 */
550int ena_com_validate_version(struct ena_com_dev *ena_dev);
551
552/* ena_com_get_link_params - Retrieve physical link parameters.
553 * @ena_dev: ENA communication layer struct
554 * @resp: Link parameters
555 *
556 * Retrieve the physical link parameters,
557 * like speed, auto-negotiation and full duplex support.
558 *
559 * @return - 0 on Success negative value otherwise.
560 */
561int ena_com_get_link_params(struct ena_com_dev *ena_dev,
562			    struct ena_admin_get_feat_resp *resp);
563
564/* ena_com_get_dma_width - Retrieve physical dma address width the device
565 * supports.
566 * @ena_dev: ENA communication layer struct
567 *
568 * Retrieve the maximum physical address bits the device can handle.
569 *
570 * @return: > 0 on Success and negative value otherwise.
571 */
572int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
573
574/* ena_com_set_aenq_config - Set aenq groups configurations
575 * @ena_dev: ENA communication layer struct
576 * @groups flag: bit fields flags of enum ena_admin_aenq_group.
577 *
578 * Configure which aenq event group the driver would like to receive.
579 *
580 * @return: 0 on Success and negative value otherwise.
581 */
582int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
583
584/* ena_com_get_dev_attr_feat - Get device features
585 * @ena_dev: ENA communication layer struct
586 * @get_feat_ctx: returned context that contain the get features.
587 *
588 * @return: 0 on Success and negative value otherwise.
589 */
590int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
591			      struct ena_com_dev_get_features_ctx *get_feat_ctx);
592
593/* ena_com_get_dev_basic_stats - Get device basic statistics
594 * @ena_dev: ENA communication layer struct
595 * @stats: stats return value
596 *
597 * @return: 0 on Success and negative value otherwise.
598 */
599int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
600				struct ena_admin_basic_stats *stats);
601
602/* ena_com_set_dev_mtu - Configure the device mtu.
603 * @ena_dev: ENA communication layer struct
604 * @mtu: mtu value
605 *
606 * @return: 0 on Success and negative value otherwise.
607 */
608int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
609
610/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
611 * @ena_dev: ENA communication layer struct
612 * @offlad: offload return value
613 *
614 * @return: 0 on Success and negative value otherwise.
615 */
616int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
617				 struct ena_admin_feature_offload_desc *offload);
618
619/* ena_com_rss_init - Init RSS
620 * @ena_dev: ENA communication layer struct
621 * @log_size: indirection log size
622 *
623 * Allocate RSS/RFS resources.
624 * The caller then can configure rss using ena_com_set_hash_function,
625 * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
626 *
627 * @return: 0 on Success and negative value otherwise.
628 */
629int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
630
631/* ena_com_rss_destroy - Destroy rss
632 * @ena_dev: ENA communication layer struct
633 *
634 * Free all the RSS/RFS resources.
635 */
636void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
637
638/* ena_com_fill_hash_function - Fill RSS hash function
639 * @ena_dev: ENA communication layer struct
640 * @func: The hash function (Toeplitz or crc)
641 * @key: Hash key (for toeplitz hash)
642 * @key_len: key length (max length 10 DW)
643 * @init_val: initial value for the hash function
644 *
645 * Fill the ena_dev resources with the desire hash function, hash key, key_len
646 * and key initial value (if needed by the hash function).
647 * To flush the key into the device the caller should call
648 * ena_com_set_hash_function.
649 *
650 * @return: 0 on Success and negative value otherwise.
651 */
652int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
653			       enum ena_admin_hash_functions func,
654			       const u8 *key, u16 key_len, u32 init_val);
655
656/* ena_com_set_hash_function - Flush the hash function and it dependencies to
657 * the device.
658 * @ena_dev: ENA communication layer struct
659 *
660 * Flush the hash function and it dependencies (key, key length and
661 * initial value) if needed.
662 *
663 * @note: Prior to this method the caller should call ena_com_fill_hash_function
664 *
665 * @return: 0 on Success and negative value otherwise.
666 */
667int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
668
669/* ena_com_get_hash_function - Retrieve the hash function and the hash key
670 * from the device.
671 * @ena_dev: ENA communication layer struct
672 * @func: hash function
673 * @key: hash key
674 *
675 * Retrieve the hash function and the hash key from the device.
676 *
677 * @note: If the caller called ena_com_fill_hash_function but didn't flash
678 * it to the device, the new configuration will be lost.
679 *
680 * @return: 0 on Success and negative value otherwise.
681 */
682int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
683			      enum ena_admin_hash_functions *func,
684			      u8 *key);
685
686/* ena_com_fill_hash_ctrl - Fill RSS hash control
687 * @ena_dev: ENA communication layer struct.
688 * @proto: The protocol to configure.
689 * @hash_fields: bit mask of ena_admin_flow_hash_fields
690 *
691 * Fill the ena_dev resources with the desire hash control (the ethernet
692 * fields that take part of the hash) for a specific protocol.
693 * To flush the hash control to the device, the caller should call
694 * ena_com_set_hash_ctrl.
695 *
696 * @return: 0 on Success and negative value otherwise.
697 */
698int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
699			   enum ena_admin_flow_hash_proto proto,
700			   u16 hash_fields);
701
702/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
703 * @ena_dev: ENA communication layer struct
704 *
705 * Flush the hash control (the ethernet fields that take part of the hash)
706 *
707 * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
708 *
709 * @return: 0 on Success and negative value otherwise.
710 */
711int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
712
713/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
714 * @ena_dev: ENA communication layer struct
715 * @proto: The protocol to retrieve.
716 * @fields: bit mask of ena_admin_flow_hash_fields.
717 *
718 * Retrieve the hash control from the device.
719 *
720 * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
721 * it to the device, the new configuration will be lost.
722 *
723 * @return: 0 on Success and negative value otherwise.
724 */
725int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
726			  enum ena_admin_flow_hash_proto proto,
727			  u16 *fields);
728
729/* ena_com_set_default_hash_ctrl - Set the hash control to a default
730 * configuration.
731 * @ena_dev: ENA communication layer struct
732 *
733 * Fill the ena_dev resources with the default hash control configuration.
734 * To flush the hash control to the device, the caller should call
735 * ena_com_set_hash_ctrl.
736 *
737 * @return: 0 on Success and negative value otherwise.
738 */
739int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
740
741/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
742 * indirection table
743 * @ena_dev: ENA communication layer struct.
744 * @entry_idx - indirection table entry.
745 * @entry_value - redirection value
746 *
747 * Fill a single entry of the RSS indirection table in the ena_dev resources.
748 * To flush the indirection table to the device, the called should call
749 * ena_com_indirect_table_set.
750 *
751 * @return: 0 on Success and negative value otherwise.
752 */
753int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
754				      u16 entry_idx, u16 entry_value);
755
756/* ena_com_indirect_table_set - Flush the indirection table to the device.
757 * @ena_dev: ENA communication layer struct
758 *
759 * Flush the indirection hash control to the device.
760 * Prior to this method the caller should call ena_com_indirect_table_fill_entry
761 *
762 * @return: 0 on Success and negative value otherwise.
763 */
764int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
765
766/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
767 * @ena_dev: ENA communication layer struct
768 * @ind_tbl: indirection table
769 *
770 * Retrieve the RSS indirection table from the device.
771 *
772 * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
773 * it to the device, the new configuration will be lost.
774 *
775 * @return: 0 on Success and negative value otherwise.
776 */
777int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
778
779/* ena_com_allocate_host_info - Allocate host info resources.
780 * @ena_dev: ENA communication layer struct
781 *
782 * @return: 0 on Success and negative value otherwise.
783 */
784int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
785
786/* ena_com_allocate_debug_area - Allocate debug area.
787 * @ena_dev: ENA communication layer struct
788 * @debug_area_size - debug area size.
789 *
790 * @return: 0 on Success and negative value otherwise.
791 */
792int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
793				u32 debug_area_size);
794
795/* ena_com_delete_debug_area - Free the debug area resources.
796 * @ena_dev: ENA communication layer struct
797 *
798 * Free the allocate debug area.
799 */
800void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
801
802/* ena_com_delete_host_info - Free the host info resources.
803 * @ena_dev: ENA communication layer struct
804 *
805 * Free the allocate host info.
806 */
807void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
808
809/* ena_com_set_host_attributes - Update the device with the host
810 * attributes (debug area and host info) base address.
811 * @ena_dev: ENA communication layer struct
812 *
813 * @return: 0 on Success and negative value otherwise.
814 */
815int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
816
817/* ena_com_create_io_cq - Create io completion queue.
818 * @ena_dev: ENA communication layer struct
819 * @io_cq - io completion queue handler
820
821 * Create IO completion queue.
822 *
823 * @return - 0 on success, negative value on failure.
824 */
825int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
826			 struct ena_com_io_cq *io_cq);
827
828/* ena_com_destroy_io_cq - Destroy io completion queue.
829 * @ena_dev: ENA communication layer struct
830 * @io_cq - io completion queue handler
831
832 * Destroy IO completion queue.
833 *
834 * @return - 0 on success, negative value on failure.
835 */
836int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
837			  struct ena_com_io_cq *io_cq);
838
839/* ena_com_execute_admin_command - Execute admin command
840 * @admin_queue: admin queue.
841 * @cmd: the admin command to execute.
842 * @cmd_size: the command size.
843 * @cmd_completion: command completion return value.
844 * @cmd_comp_size: command completion size.
845
846 * Submit an admin command and then wait until the device will return a
847 * completion.
848 * The completion will be copyed into cmd_comp.
849 *
850 * @return - 0 on success, negative value on failure.
851 */
852int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
853				  struct ena_admin_aq_entry *cmd,
854				  size_t cmd_size,
855				  struct ena_admin_acq_entry *cmd_comp,
856				  size_t cmd_comp_size);
857
858/* ena_com_init_interrupt_moderation - Init interrupt moderation
859 * @ena_dev: ENA communication layer struct
860 *
861 * @return - 0 on success, negative value on failure.
862 */
863int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
864
865/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
866 * @ena_dev: ENA communication layer struct
867 */
868void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
869
870/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
871 * capability is supported by the device.
872 *
873 * @return - supported or not.
874 */
875bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
876
877/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
878 * moderation table back to the default parameters.
879 * @ena_dev: ENA communication layer struct
880 */
881void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
882
883/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
884 * non-adaptive interval in Tx direction.
885 * @ena_dev: ENA communication layer struct
886 * @tx_coalesce_usecs: Interval in usec.
887 *
888 * @return - 0 on success, negative value on failure.
889 */
890int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
891						      u32 tx_coalesce_usecs);
892
893/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
894 * non-adaptive interval in Rx direction.
895 * @ena_dev: ENA communication layer struct
896 * @rx_coalesce_usecs: Interval in usec.
897 *
898 * @return - 0 on success, negative value on failure.
899 */
900int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
901						      u32 rx_coalesce_usecs);
902
903/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
904 * non-adaptive interval in Tx direction.
905 * @ena_dev: ENA communication layer struct
906 *
907 * @return - interval in usec
908 */
909unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
910
911/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
912 * non-adaptive interval in Rx direction.
913 * @ena_dev: ENA communication layer struct
914 *
915 * @return - interval in usec
916 */
917unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
918
919/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
920 * moderation table.
921 * @ena_dev: ENA communication layer struct
922 * @level: Interrupt moderation table level
923 * @entry: Entry value
924 *
925 * Update a single entry in the interrupt moderation table.
926 */
927void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
928					enum ena_intr_moder_level level,
929					struct ena_intr_moder_entry *entry);
930
931/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
932 * @ena_dev: ENA communication layer struct
933 * @level: Interrupt moderation table level
934 * @entry: Entry to fill.
935 *
936 * Initialize the entry according to the adaptive interrupt moderation table.
937 */
938void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
939				       enum ena_intr_moder_level level,
940				       struct ena_intr_moder_entry *entry);
941
942static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
943{
944	return ena_dev->adaptive_coalescing;
945}
946
947static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
948{
949	ena_dev->adaptive_coalescing = true;
950}
951
952static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
953{
954	ena_dev->adaptive_coalescing = false;
955}
956
957/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
958 * @ena_dev: ENA communication layer struct
959 * @pkts: Number of packets since the last update
960 * @bytes: Number of bytes received since the last update.
961 * @smoothed_interval: Returned interval
962 * @moder_tbl_idx: Current table level as input update new level as return
963 * value.
964 */
965static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
966						     unsigned int pkts,
967						     unsigned int bytes,
968						     unsigned int *smoothed_interval,
969						     unsigned int *moder_tbl_idx)
970{
971	enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
972	struct ena_intr_moder_entry *curr_moder_entry;
973	struct ena_intr_moder_entry *pred_moder_entry;
974	struct ena_intr_moder_entry *new_moder_entry;
975	struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
976	unsigned int interval;
977
978	/* We apply adaptive moderation on Rx path only.
979	 * Tx uses static interrupt moderation.
980	 */
981	if (!pkts || !bytes)
982		/* Tx interrupt, or spurious interrupt,
983		 * in both cases we just use same delay values
984		 */
985		return;
986
987	curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
988	if (unlikely(curr_moder_idx >=  ENA_INTR_MAX_NUM_OF_LEVELS)) {
989		ena_trc_err("Wrong moderation index %u\n", curr_moder_idx);
990		return;
991	}
992
993	curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
994	new_moder_idx = curr_moder_idx;
995
996	if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
997		if ((pkts > curr_moder_entry->pkts_per_interval) ||
998		    (bytes > curr_moder_entry->bytes_per_interval))
999			new_moder_idx =
1000				(enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
1001	} else {
1002		pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
1003
1004		if ((pkts <= pred_moder_entry->pkts_per_interval) ||
1005		    (bytes <= pred_moder_entry->bytes_per_interval))
1006			new_moder_idx =
1007				(enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
1008		else if ((pkts > curr_moder_entry->pkts_per_interval) ||
1009			 (bytes > curr_moder_entry->bytes_per_interval)) {
1010			if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
1011				new_moder_idx =
1012					(enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
1013		}
1014	}
1015	new_moder_entry = &intr_moder_tbl[new_moder_idx];
1016
1017	interval = new_moder_entry->intr_moder_interval;
1018	*smoothed_interval = (
1019		(interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
1020		ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
1021		10;
1022
1023	*moder_tbl_idx = new_moder_idx;
1024}
1025
1026/* ena_com_update_intr_reg - Prepare interrupt register
1027 * @intr_reg: interrupt register to update.
1028 * @rx_delay_interval: Rx interval in usecs
1029 * @tx_delay_interval: Tx interval in usecs
1030 * @unmask: unask enable/disable
1031 *
1032 * Prepare interrupt update register with the supplied parameters.
1033 */
1034static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
1035					   u32 rx_delay_interval,
1036					   u32 tx_delay_interval,
1037					   bool unmask)
1038{
1039	intr_reg->intr_control = 0;
1040	intr_reg->intr_control |= rx_delay_interval &
1041		ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
1042
1043	intr_reg->intr_control |=
1044		(tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
1045		& ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
1046
1047	if (unmask)
1048		intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
1049}
1050
1051#if defined(__cplusplus)
1052}
1053#endif /* __cplusplus */
1054#endif /* !(ENA_COM) */
1055