ena_com.h revision 368013
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#ifndef ENA_COM
35#define ENA_COM
36
37#include "ena_plat.h"
38
39#define ENA_MAX_NUM_IO_QUEUES 128U
40/* We need to queues for each IO (on for Tx and one for Rx) */
41#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
42
43#define ENA_MAX_HANDLERS 256
44
45#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
46
47/* Unit in usec */
48#define ENA_REG_READ_TIMEOUT 200000
49
50#define ADMIN_SQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_aq_entry))
51#define ADMIN_CQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_acq_entry))
52#define ADMIN_AENQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_aenq_entry))
53
54#define ENA_CDESC_RING_SIZE_ALIGNMENT	(1 << 12) /* 4K */
55
56/*****************************************************************************/
57/*****************************************************************************/
58/* ENA adaptive interrupt moderation settings */
59
60#define ENA_INTR_INITIAL_TX_INTERVAL_USECS ENA_INTR_INITIAL_TX_INTERVAL_USECS_PLAT
61#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
62#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
63
64#define ENA_HASH_KEY_SIZE 40
65
66#define ENA_HW_HINTS_NO_TIMEOUT	0xFFFF
67
68#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
69
70struct ena_llq_configurations {
71	enum ena_admin_llq_header_location llq_header_location;
72	enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
73	enum ena_admin_llq_stride_ctrl  llq_stride_ctrl;
74	enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
75	u16 llq_ring_entry_size_value;
76};
77
78enum queue_direction {
79	ENA_COM_IO_QUEUE_DIRECTION_TX,
80	ENA_COM_IO_QUEUE_DIRECTION_RX
81};
82
83struct ena_com_buf {
84	dma_addr_t paddr; /**< Buffer physical address */
85	u16 len; /**< Buffer length in bytes */
86};
87
88struct ena_com_rx_buf_info {
89	u16 len;
90	u16 req_id;
91};
92
93struct ena_com_io_desc_addr {
94	u8 __iomem *pbuf_dev_addr; /* LLQ address */
95	u8 *virt_addr;
96	dma_addr_t phys_addr;
97	ena_mem_handle_t mem_handle;
98};
99
100struct ena_com_tx_meta {
101	u16 mss;
102	u16 l3_hdr_len;
103	u16 l3_hdr_offset;
104	u16 l4_hdr_len; /* In words */
105};
106
107struct ena_com_llq_info {
108	u16 header_location_ctrl;
109	u16 desc_stride_ctrl;
110	u16 desc_list_entry_size_ctrl;
111	u16 desc_list_entry_size;
112	u16 descs_num_before_header;
113	u16 descs_per_entry;
114	u16 max_entries_in_tx_burst;
115	bool disable_meta_caching;
116};
117
118struct ena_com_io_cq {
119	struct ena_com_io_desc_addr cdesc_addr;
120	void *bus;
121
122	/* Interrupt unmask register */
123	u32 __iomem *unmask_reg;
124
125	/* The completion queue head doorbell register */
126	u32 __iomem *cq_head_db_reg;
127
128	/* numa configuration register (for TPH) */
129	u32 __iomem *numa_node_cfg_reg;
130
131	/* The value to write to the above register to unmask
132	 * the interrupt of this queue
133	 */
134	u32 msix_vector;
135
136	enum queue_direction direction;
137
138	/* holds the number of cdesc of the current packet */
139	u16 cur_rx_pkt_cdesc_count;
140	/* save the firt cdesc idx of the current packet */
141	u16 cur_rx_pkt_cdesc_start_idx;
142
143	u16 q_depth;
144	/* Caller qid */
145	u16 qid;
146
147	/* Device queue index */
148	u16 idx;
149	u16 head;
150	u16 last_head_update;
151	u8 phase;
152	u8 cdesc_entry_size_in_bytes;
153
154} ____cacheline_aligned;
155
156struct ena_com_io_bounce_buffer_control {
157	u8 *base_buffer;
158	u16 next_to_use;
159	u16 buffer_size;
160	u16 buffers_num;  /* Must be a power of 2 */
161};
162
163/* This struct is to keep tracking the current location of the next llq entry */
164struct ena_com_llq_pkt_ctrl {
165	u8 *curr_bounce_buf;
166	u16 idx;
167	u16 descs_left_in_line;
168};
169
170struct ena_com_io_sq {
171	struct ena_com_io_desc_addr desc_addr;
172	void *bus;
173
174	u32 __iomem *db_addr;
175	u8 __iomem *header_addr;
176
177	enum queue_direction direction;
178	enum ena_admin_placement_policy_type mem_queue_type;
179
180	bool disable_meta_caching;
181
182	u32 msix_vector;
183	struct ena_com_tx_meta cached_tx_meta;
184	struct ena_com_llq_info llq_info;
185	struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
186	struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
187
188	u16 q_depth;
189	u16 qid;
190
191	u16 idx;
192	u16 tail;
193	u16 next_to_comp;
194	u16 llq_last_copy_tail;
195	u32 tx_max_header_size;
196	u8 phase;
197	u8 desc_entry_size;
198	u8 dma_addr_bits;
199	u16 entries_in_tx_burst_left;
200} ____cacheline_aligned;
201
202struct ena_com_admin_cq {
203	struct ena_admin_acq_entry *entries;
204	ena_mem_handle_t mem_handle;
205	dma_addr_t dma_addr;
206
207	u16 head;
208	u8 phase;
209};
210
211struct ena_com_admin_sq {
212	struct ena_admin_aq_entry *entries;
213	ena_mem_handle_t mem_handle;
214	dma_addr_t dma_addr;
215
216	u32 __iomem *db_addr;
217
218	u16 head;
219	u16 tail;
220	u8 phase;
221
222};
223
224struct ena_com_stats_admin {
225	u64 aborted_cmd;
226	u64 submitted_cmd;
227	u64 completed_cmd;
228	u64 out_of_space;
229	u64 no_completion;
230};
231
232struct ena_com_admin_queue {
233	void *q_dmadev;
234	void *bus;
235	struct ena_com_dev *ena_dev;
236	ena_spinlock_t q_lock; /* spinlock for the admin queue */
237
238	struct ena_comp_ctx *comp_ctx;
239	u32 completion_timeout;
240	u16 q_depth;
241	struct ena_com_admin_cq cq;
242	struct ena_com_admin_sq sq;
243
244	/* Indicate if the admin queue should poll for completion */
245	bool polling;
246
247	/* Define if fallback to polling mode should occur */
248	bool auto_polling;
249
250	u16 curr_cmd_id;
251
252	/* Indicate that the ena was initialized and can
253	 * process new admin commands
254	 */
255	bool running_state;
256
257	/* Count the number of outstanding admin commands */
258	ena_atomic32_t outstanding_cmds;
259
260	struct ena_com_stats_admin stats;
261};
262
263struct ena_aenq_handlers;
264
265struct ena_com_aenq {
266	u16 head;
267	u8 phase;
268	struct ena_admin_aenq_entry *entries;
269	dma_addr_t dma_addr;
270	ena_mem_handle_t mem_handle;
271	u16 q_depth;
272	struct ena_aenq_handlers *aenq_handlers;
273};
274
275struct ena_com_mmio_read {
276	struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
277	dma_addr_t read_resp_dma_addr;
278	ena_mem_handle_t read_resp_mem_handle;
279	u32 reg_read_to; /* in us */
280	u16 seq_num;
281	bool readless_supported;
282	/* spin lock to ensure a single outstanding read */
283	ena_spinlock_t lock;
284};
285
286struct ena_rss {
287	/* Indirect table */
288	u16 *host_rss_ind_tbl;
289	struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
290	dma_addr_t rss_ind_tbl_dma_addr;
291	ena_mem_handle_t rss_ind_tbl_mem_handle;
292	u16 tbl_log_size;
293
294	/* Hash key */
295	enum ena_admin_hash_functions hash_func;
296	struct ena_admin_feature_rss_flow_hash_control *hash_key;
297	dma_addr_t hash_key_dma_addr;
298	ena_mem_handle_t hash_key_mem_handle;
299	u32 hash_init_val;
300
301	/* Flow Control */
302	struct ena_admin_feature_rss_hash_control *hash_ctrl;
303	dma_addr_t hash_ctrl_dma_addr;
304	ena_mem_handle_t hash_ctrl_mem_handle;
305
306};
307
308struct ena_host_attribute {
309	/* Debug area */
310	u8 *debug_area_virt_addr;
311	dma_addr_t debug_area_dma_addr;
312	ena_mem_handle_t debug_area_dma_handle;
313	u32 debug_area_size;
314
315	/* Host information */
316	struct ena_admin_host_info *host_info;
317	dma_addr_t host_info_dma_addr;
318	ena_mem_handle_t host_info_dma_handle;
319};
320
321/* Each ena_dev is a PCI function. */
322struct ena_com_dev {
323	struct ena_com_admin_queue admin_queue;
324	struct ena_com_aenq aenq;
325	struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
326	struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
327	u8 __iomem *reg_bar;
328	void __iomem *mem_bar;
329	void *dmadev;
330	void *bus;
331	ena_netdev *net_device;
332
333	enum ena_admin_placement_policy_type tx_mem_queue_type;
334	u32 tx_max_header_size;
335	u16 stats_func; /* Selected function for extended statistic dump */
336	u16 stats_queue; /* Selected queue for extended statistic dump */
337
338	struct ena_com_mmio_read mmio_read;
339
340	struct ena_rss rss;
341	u32 supported_features;
342	u32 dma_addr_bits;
343
344	struct ena_host_attribute host_attr;
345	bool adaptive_coalescing;
346	u16 intr_delay_resolution;
347
348	/* interrupt moderation intervals are in usec divided by
349	 * intr_delay_resolution, which is supplied by the device.
350	 */
351	u32 intr_moder_tx_interval;
352	u32 intr_moder_rx_interval;
353
354	struct ena_intr_moder_entry *intr_moder_tbl;
355
356	struct ena_com_llq_info llq_info;
357
358	u32 ena_min_poll_delay_us;
359};
360
361struct ena_com_dev_get_features_ctx {
362	struct ena_admin_queue_feature_desc max_queues;
363	struct ena_admin_queue_ext_feature_desc max_queue_ext;
364	struct ena_admin_device_attr_feature_desc dev_attr;
365	struct ena_admin_feature_aenq_desc aenq;
366	struct ena_admin_feature_offload_desc offload;
367	struct ena_admin_ena_hw_hints hw_hints;
368	struct ena_admin_feature_llq_desc llq;
369};
370
371struct ena_com_create_io_ctx {
372	enum ena_admin_placement_policy_type mem_queue_type;
373	enum queue_direction direction;
374	int numa_node;
375	u32 msix_vector;
376	u16 queue_size;
377	u16 qid;
378};
379
380typedef void (*ena_aenq_handler)(void *data,
381	struct ena_admin_aenq_entry *aenq_e);
382
383/* Holds aenq handlers. Indexed by AENQ event group */
384struct ena_aenq_handlers {
385	ena_aenq_handler handlers[ENA_MAX_HANDLERS];
386	ena_aenq_handler unimplemented_handler;
387};
388
389/*****************************************************************************/
390/*****************************************************************************/
391#if defined(__cplusplus)
392extern "C" {
393#endif
394
395/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
396 * @ena_dev: ENA communication layer struct
397 *
398 * Initialize the register read mechanism.
399 *
400 * @note: This method must be the first stage in the initialization sequence.
401 *
402 * @return - 0 on success, negative value on failure.
403 */
404int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
405
406/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
407 * @ena_dev: ENA communication layer struct
408 * @readless_supported: readless mode (enable/disable)
409 */
410void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
411				bool readless_supported);
412
413/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
414 * value physical address.
415 * @ena_dev: ENA communication layer struct
416 */
417void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
418
419/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
420 * @ena_dev: ENA communication layer struct
421 */
422void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
423
424/* ena_com_admin_init - Init the admin and the async queues
425 * @ena_dev: ENA communication layer struct
426 * @aenq_handlers: Those handlers to be called upon event.
427 *
428 * Initialize the admin submission and completion queues.
429 * Initialize the asynchronous events notification queues.
430 *
431 * @return - 0 on success, negative value on failure.
432 */
433int ena_com_admin_init(struct ena_com_dev *ena_dev,
434		       struct ena_aenq_handlers *aenq_handlers);
435
436/* ena_com_admin_destroy - Destroy the admin and the async events queues.
437 * @ena_dev: ENA communication layer struct
438 *
439 * @note: Before calling this method, the caller must validate that the device
440 * won't send any additional admin completions/aenq.
441 * To achieve that, a FLR is recommended.
442 */
443void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
444
445/* ena_com_dev_reset - Perform device FLR to the device.
446 * @ena_dev: ENA communication layer struct
447 * @reset_reason: Specify what is the trigger for the reset in case of an error.
448 *
449 * @return - 0 on success, negative value on failure.
450 */
451int ena_com_dev_reset(struct ena_com_dev *ena_dev,
452		      enum ena_regs_reset_reason_types reset_reason);
453
454/* ena_com_create_io_queue - Create io queue.
455 * @ena_dev: ENA communication layer struct
456 * @ctx - create context structure
457 *
458 * Create the submission and the completion queues.
459 *
460 * @return - 0 on success, negative value on failure.
461 */
462int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
463			    struct ena_com_create_io_ctx *ctx);
464
465/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
466 * @ena_dev: ENA communication layer struct
467 * @qid - the caller virtual queue id.
468 */
469void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
470
471/* ena_com_get_io_handlers - Return the io queue handlers
472 * @ena_dev: ENA communication layer struct
473 * @qid - the caller virtual queue id.
474 * @io_sq - IO submission queue handler
475 * @io_cq - IO completion queue handler.
476 *
477 * @return - 0 on success, negative value on failure.
478 */
479int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
480			    struct ena_com_io_sq **io_sq,
481			    struct ena_com_io_cq **io_cq);
482
483/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
484 * @ena_dev: ENA communication layer struct
485 *
486 * After this method, aenq event can be received via AENQ.
487 */
488void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
489
490/* ena_com_set_admin_running_state - Set the state of the admin queue
491 * @ena_dev: ENA communication layer struct
492 *
493 * Change the state of the admin queue (enable/disable)
494 */
495void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
496
497/* ena_com_get_admin_running_state - Get the admin queue state
498 * @ena_dev: ENA communication layer struct
499 *
500 * Retrieve the state of the admin queue (enable/disable)
501 *
502 * @return - current polling mode (enable/disable)
503 */
504bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
505
506/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
507 * @ena_dev: ENA communication layer struct
508 * @polling: ENAble/Disable polling mode
509 *
510 * Set the admin completion mode.
511 */
512void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
513
514/* ena_com_get_admin_polling_mode - Get the admin completion queue polling mode
515 * @ena_dev: ENA communication layer struct
516 *
517 * Get the admin completion mode.
518 * If polling mode is on, ena_com_execute_admin_command will perform a
519 * polling on the admin completion queue for the commands completion,
520 * otherwise it will wait on wait event.
521 *
522 * @return state
523 */
524bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev);
525
526/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
527 * @ena_dev: ENA communication layer struct
528 * @polling: Enable/Disable polling mode
529 *
530 * Set the autopolling mode.
531 * If autopolling is on:
532 * In case of missing interrupt when data is available switch to polling.
533 */
534void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
535					 bool polling);
536
537/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
538 * @ena_dev: ENA communication layer struct
539 *
540 * This method goes over the admin completion queue and wakes up all the pending
541 * threads that wait on the commands wait event.
542 *
543 * @note: Should be called after MSI-X interrupt.
544 */
545void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
546
547/* ena_com_aenq_intr_handler - AENQ interrupt handler
548 * @ena_dev: ENA communication layer struct
549 *
550 * This method goes over the async event notification queue and calls the proper
551 * aenq handler.
552 */
553void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
554
555/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
556 * @ena_dev: ENA communication layer struct
557 *
558 * This method aborts all the outstanding admin commands.
559 * The caller should then call ena_com_wait_for_abort_completion to make sure
560 * all the commands were completed.
561 */
562void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
563
564/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
565 * @ena_dev: ENA communication layer struct
566 *
567 * This method waits until all the outstanding admin commands are completed.
568 */
569void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
570
571/* ena_com_validate_version - Validate the device parameters
572 * @ena_dev: ENA communication layer struct
573 *
574 * This method verifies the device parameters are the same as the saved
575 * parameters in ena_dev.
576 * This method is useful after device reset, to validate the device mac address
577 * and the device offloads are the same as before the reset.
578 *
579 * @return - 0 on success negative value otherwise.
580 */
581int ena_com_validate_version(struct ena_com_dev *ena_dev);
582
583/* ena_com_get_link_params - Retrieve physical link parameters.
584 * @ena_dev: ENA communication layer struct
585 * @resp: Link parameters
586 *
587 * Retrieve the physical link parameters,
588 * like speed, auto-negotiation and full duplex support.
589 *
590 * @return - 0 on Success negative value otherwise.
591 */
592int ena_com_get_link_params(struct ena_com_dev *ena_dev,
593			    struct ena_admin_get_feat_resp *resp);
594
595/* ena_com_get_dma_width - Retrieve physical dma address width the device
596 * supports.
597 * @ena_dev: ENA communication layer struct
598 *
599 * Retrieve the maximum physical address bits the device can handle.
600 *
601 * @return: > 0 on Success and negative value otherwise.
602 */
603int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
604
605/* ena_com_set_aenq_config - Set aenq groups configurations
606 * @ena_dev: ENA communication layer struct
607 * @groups flag: bit fields flags of enum ena_admin_aenq_group.
608 *
609 * Configure which aenq event group the driver would like to receive.
610 *
611 * @return: 0 on Success and negative value otherwise.
612 */
613int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
614
615/* ena_com_get_dev_attr_feat - Get device features
616 * @ena_dev: ENA communication layer struct
617 * @get_feat_ctx: returned context that contain the get features.
618 *
619 * @return: 0 on Success and negative value otherwise.
620 */
621int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
622			      struct ena_com_dev_get_features_ctx *get_feat_ctx);
623
624/* ena_com_get_dev_basic_stats - Get device basic statistics
625 * @ena_dev: ENA communication layer struct
626 * @stats: stats return value
627 *
628 * @return: 0 on Success and negative value otherwise.
629 */
630int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
631				struct ena_admin_basic_stats *stats);
632
633/* ena_com_get_eni_stats - Get extended network interface statistics
634 * @ena_dev: ENA communication layer struct
635 * @stats: stats return value
636 *
637 * @return: 0 on Success and negative value otherwise.
638 */
639int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
640			  struct ena_admin_eni_stats *stats);
641
642/* ena_com_set_dev_mtu - Configure the device mtu.
643 * @ena_dev: ENA communication layer struct
644 * @mtu: mtu value
645 *
646 * @return: 0 on Success and negative value otherwise.
647 */
648int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
649
650/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
651 * @ena_dev: ENA communication layer struct
652 * @offlad: offload return value
653 *
654 * @return: 0 on Success and negative value otherwise.
655 */
656int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
657				 struct ena_admin_feature_offload_desc *offload);
658
659/* ena_com_rss_init - Init RSS
660 * @ena_dev: ENA communication layer struct
661 * @log_size: indirection log size
662 *
663 * Allocate RSS/RFS resources.
664 * The caller then can configure rss using ena_com_set_hash_function,
665 * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
666 *
667 * @return: 0 on Success and negative value otherwise.
668 */
669int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
670
671/* ena_com_rss_destroy - Destroy rss
672 * @ena_dev: ENA communication layer struct
673 *
674 * Free all the RSS/RFS resources.
675 */
676void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
677
678/* ena_com_get_current_hash_function - Get RSS hash function
679 * @ena_dev: ENA communication layer struct
680 *
681 * Return the current hash function.
682 * @return: 0 or one of the ena_admin_hash_functions values.
683 */
684int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
685
686/* ena_com_fill_hash_function - Fill RSS hash function
687 * @ena_dev: ENA communication layer struct
688 * @func: The hash function (Toeplitz or crc)
689 * @key: Hash key (for toeplitz hash)
690 * @key_len: key length (max length 10 DW)
691 * @init_val: initial value for the hash function
692 *
693 * Fill the ena_dev resources with the desire hash function, hash key, key_len
694 * and key initial value (if needed by the hash function).
695 * To flush the key into the device the caller should call
696 * ena_com_set_hash_function.
697 *
698 * @return: 0 on Success and negative value otherwise.
699 */
700int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
701			       enum ena_admin_hash_functions func,
702			       const u8 *key, u16 key_len, u32 init_val);
703
704/* ena_com_set_hash_function - Flush the hash function and it dependencies to
705 * the device.
706 * @ena_dev: ENA communication layer struct
707 *
708 * Flush the hash function and it dependencies (key, key length and
709 * initial value) if needed.
710 *
711 * @note: Prior to this method the caller should call ena_com_fill_hash_function
712 *
713 * @return: 0 on Success and negative value otherwise.
714 */
715int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
716
717/* ena_com_get_hash_function - Retrieve the hash function from the device.
718 * @ena_dev: ENA communication layer struct
719 * @func: hash function
720 *
721 * Retrieve the hash function from the device.
722 *
723 * @note: If the caller called ena_com_fill_hash_function but didn't flush
724 * it to the device, the new configuration will be lost.
725 *
726 * @return: 0 on Success and negative value otherwise.
727 */
728int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
729			      enum ena_admin_hash_functions *func);
730
731/* ena_com_get_hash_key - Retrieve the hash key
732 * @ena_dev: ENA communication layer struct
733 * @key: hash key
734 *
735 * Retrieve the hash key.
736 *
737 * @note: If the caller called ena_com_fill_hash_key but didn't flush
738 * it to the device, the new configuration will be lost.
739 *
740 * @return: 0 on Success and negative value otherwise.
741 */
742int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
743/* ena_com_fill_hash_ctrl - Fill RSS hash control
744 * @ena_dev: ENA communication layer struct.
745 * @proto: The protocol to configure.
746 * @hash_fields: bit mask of ena_admin_flow_hash_fields
747 *
748 * Fill the ena_dev resources with the desire hash control (the ethernet
749 * fields that take part of the hash) for a specific protocol.
750 * To flush the hash control to the device, the caller should call
751 * ena_com_set_hash_ctrl.
752 *
753 * @return: 0 on Success and negative value otherwise.
754 */
755int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
756			   enum ena_admin_flow_hash_proto proto,
757			   u16 hash_fields);
758
759/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
760 * @ena_dev: ENA communication layer struct
761 *
762 * Flush the hash control (the ethernet fields that take part of the hash)
763 *
764 * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
765 *
766 * @return: 0 on Success and negative value otherwise.
767 */
768int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
769
770/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
771 * @ena_dev: ENA communication layer struct
772 * @proto: The protocol to retrieve.
773 * @fields: bit mask of ena_admin_flow_hash_fields.
774 *
775 * Retrieve the hash control from the device.
776 *
777 * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush
778 * it to the device, the new configuration will be lost.
779 *
780 * @return: 0 on Success and negative value otherwise.
781 */
782int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
783			  enum ena_admin_flow_hash_proto proto,
784			  u16 *fields);
785
786/* ena_com_set_default_hash_ctrl - Set the hash control to a default
787 * configuration.
788 * @ena_dev: ENA communication layer struct
789 *
790 * Fill the ena_dev resources with the default hash control configuration.
791 * To flush the hash control to the device, the caller should call
792 * ena_com_set_hash_ctrl.
793 *
794 * @return: 0 on Success and negative value otherwise.
795 */
796int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
797
798/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
799 * indirection table
800 * @ena_dev: ENA communication layer struct.
801 * @entry_idx - indirection table entry.
802 * @entry_value - redirection value
803 *
804 * Fill a single entry of the RSS indirection table in the ena_dev resources.
805 * To flush the indirection table to the device, the called should call
806 * ena_com_indirect_table_set.
807 *
808 * @return: 0 on Success and negative value otherwise.
809 */
810int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
811				      u16 entry_idx, u16 entry_value);
812
813/* ena_com_indirect_table_set - Flush the indirection table to the device.
814 * @ena_dev: ENA communication layer struct
815 *
816 * Flush the indirection hash control to the device.
817 * Prior to this method the caller should call ena_com_indirect_table_fill_entry
818 *
819 * @return: 0 on Success and negative value otherwise.
820 */
821int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
822
823/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
824 * @ena_dev: ENA communication layer struct
825 * @ind_tbl: indirection table
826 *
827 * Retrieve the RSS indirection table from the device.
828 *
829 * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush
830 * it to the device, the new configuration will be lost.
831 *
832 * @return: 0 on Success and negative value otherwise.
833 */
834int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
835
836/* ena_com_allocate_host_info - Allocate host info resources.
837 * @ena_dev: ENA communication layer struct
838 *
839 * @return: 0 on Success and negative value otherwise.
840 */
841int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
842
843/* ena_com_allocate_debug_area - Allocate debug area.
844 * @ena_dev: ENA communication layer struct
845 * @debug_area_size - debug area size.
846 *
847 * @return: 0 on Success and negative value otherwise.
848 */
849int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
850				u32 debug_area_size);
851
852/* ena_com_delete_debug_area - Free the debug area resources.
853 * @ena_dev: ENA communication layer struct
854 *
855 * Free the allocated debug area.
856 */
857void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
858
859/* ena_com_delete_host_info - Free the host info resources.
860 * @ena_dev: ENA communication layer struct
861 *
862 * Free the allocated host info.
863 */
864void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
865
866/* ena_com_set_host_attributes - Update the device with the host
867 * attributes (debug area and host info) base address.
868 * @ena_dev: ENA communication layer struct
869 *
870 * @return: 0 on Success and negative value otherwise.
871 */
872int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
873
874/* ena_com_create_io_cq - Create io completion queue.
875 * @ena_dev: ENA communication layer struct
876 * @io_cq - io completion queue handler
877
878 * Create IO completion queue.
879 *
880 * @return - 0 on success, negative value on failure.
881 */
882int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
883			 struct ena_com_io_cq *io_cq);
884
885/* ena_com_destroy_io_cq - Destroy io completion queue.
886 * @ena_dev: ENA communication layer struct
887 * @io_cq - io completion queue handler
888
889 * Destroy IO completion queue.
890 *
891 * @return - 0 on success, negative value on failure.
892 */
893int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
894			  struct ena_com_io_cq *io_cq);
895
896/* ena_com_execute_admin_command - Execute admin command
897 * @admin_queue: admin queue.
898 * @cmd: the admin command to execute.
899 * @cmd_size: the command size.
900 * @cmd_completion: command completion return value.
901 * @cmd_comp_size: command completion size.
902
903 * Submit an admin command and then wait until the device returns a
904 * completion.
905 * The completion will be copied into cmd_comp.
906 *
907 * @return - 0 on success, negative value on failure.
908 */
909int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
910				  struct ena_admin_aq_entry *cmd,
911				  size_t cmd_size,
912				  struct ena_admin_acq_entry *cmd_comp,
913				  size_t cmd_comp_size);
914
915/* ena_com_init_interrupt_moderation - Init interrupt moderation
916 * @ena_dev: ENA communication layer struct
917 *
918 * @return - 0 on success, negative value on failure.
919 */
920int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
921
922/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
923 * capability is supported by the device.
924 *
925 * @return - supported or not.
926 */
927bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
928
929/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
930 * non-adaptive interval in Tx direction.
931 * @ena_dev: ENA communication layer struct
932 * @tx_coalesce_usecs: Interval in usec.
933 *
934 * @return - 0 on success, negative value on failure.
935 */
936int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
937						      u32 tx_coalesce_usecs);
938
939/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
940 * non-adaptive interval in Rx direction.
941 * @ena_dev: ENA communication layer struct
942 * @rx_coalesce_usecs: Interval in usec.
943 *
944 * @return - 0 on success, negative value on failure.
945 */
946int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
947						      u32 rx_coalesce_usecs);
948
949/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
950 * non-adaptive interval in Tx direction.
951 * @ena_dev: ENA communication layer struct
952 *
953 * @return - interval in usec
954 */
955unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
956
957/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
958 * non-adaptive interval in Rx direction.
959 * @ena_dev: ENA communication layer struct
960 *
961 * @return - interval in usec
962 */
963unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
964
965/* ena_com_config_dev_mode - Configure the placement policy of the device.
966 * @ena_dev: ENA communication layer struct
967 * @llq_features: LLQ feature descriptor, retrieve via
968 *		   ena_com_get_dev_attr_feat.
969 * @ena_llq_config: The default driver LLQ parameters configurations
970 */
971int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
972			    struct ena_admin_feature_llq_desc *llq_features,
973			    struct ena_llq_configurations *llq_default_config);
974
975/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
976 * @io_sq: IO submit queue struct
977 *
978 * @return - ena_com_dev struct extracted from io_sq
979 */
980static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
981{
982	return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
983}
984
985/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
986 * @io_sq: IO submit queue struct
987 *
988 * @return - ena_com_dev struct extracted from io_sq
989 */
990static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
991{
992	return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
993}
994
995static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
996{
997	return ena_dev->adaptive_coalescing;
998}
999
1000static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
1001{
1002	ena_dev->adaptive_coalescing = true;
1003}
1004
1005static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
1006{
1007	ena_dev->adaptive_coalescing = false;
1008}
1009
1010/* ena_com_update_intr_reg - Prepare interrupt register
1011 * @intr_reg: interrupt register to update.
1012 * @rx_delay_interval: Rx interval in usecs
1013 * @tx_delay_interval: Tx interval in usecs
1014 * @unmask: unmask enable/disable
1015 *
1016 * Prepare interrupt update register with the supplied parameters.
1017 */
1018static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
1019					   u32 rx_delay_interval,
1020					   u32 tx_delay_interval,
1021					   bool unmask)
1022{
1023	intr_reg->intr_control = 0;
1024	intr_reg->intr_control |= rx_delay_interval &
1025		ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
1026
1027	intr_reg->intr_control |=
1028		(tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
1029		& ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
1030
1031	if (unmask)
1032		intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
1033}
1034
1035static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
1036{
1037	u16 size, buffers_num;
1038	u8 *buf;
1039
1040	size = bounce_buf_ctrl->buffer_size;
1041	buffers_num = bounce_buf_ctrl->buffers_num;
1042
1043	buf = bounce_buf_ctrl->base_buffer +
1044		(bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
1045
1046	prefetchw(bounce_buf_ctrl->base_buffer +
1047		(bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
1048
1049	return buf;
1050}
1051
1052#ifdef ENA_EXTENDED_STATS
1053int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
1054				   u32 len);
1055
1056int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
1057					  u32 funct_queue);
1058#endif
1059#if defined(__cplusplus)
1060}
1061#endif /* __cplusplus */
1062#endif /* !(ENA_COM) */
1063