ecore_dev_api.h revision 316485
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/qlnx/qlnxe/ecore_dev_api.h 316485 2017-04-04 06:16:59Z davidcs $
28 *
29 */
30
31
32#ifndef __ECORE_DEV_API_H__
33#define __ECORE_DEV_API_H__
34
35#include "ecore_status.h"
36#include "ecore_chain.h"
37#include "ecore_int_api.h"
38
39struct ecore_wake_info {
40	u32 wk_info;
41	u32 wk_details;
42	u32 wk_pkt_len;
43	u8  wk_buffer[256];
44};
45
46/**
47 * @brief ecore_init_dp - initialize the debug level
48 *
49 * @param p_dev
50 * @param dp_module
51 * @param dp_level
52 * @param dp_ctx
53 */
54void ecore_init_dp(struct ecore_dev *p_dev,
55		   u32 dp_module,
56		   u8 dp_level,
57		   void *dp_ctx);
58
59/**
60 * @brief ecore_init_struct - initialize the device structure to
61 *        its defaults
62 *
63 * @param p_dev
64 */
65void ecore_init_struct(struct ecore_dev *p_dev);
66
67/**
68 * @brief ecore_resc_free -
69 *
70 * @param p_dev
71 */
72void ecore_resc_free(struct ecore_dev *p_dev);
73
74/**
75 * @brief ecore_resc_alloc -
76 *
77 * @param p_dev
78 *
79 * @return enum _ecore_status_t
80 */
81enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
82
83/**
84 * @brief ecore_resc_setup -
85 *
86 * @param p_dev
87 */
88void ecore_resc_setup(struct ecore_dev *p_dev);
89
90enum ecore_override_force_load {
91	ECORE_OVERRIDE_FORCE_LOAD_NONE,
92	ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
93	ECORE_OVERRIDE_FORCE_LOAD_NEVER,
94};
95
96struct ecore_drv_load_params {
97	/* Indicates whether the driver is running over a crash kernel.
98	 * As part of the load request, this will be used for providing the
99	 * driver role to the MFW.
100	 * In case of a crash kernel over PDA - this should be set to false.
101	 */
102	bool is_crash_kernel;
103
104	/* The timeout value that the MFW should use when locking the engine for
105	 * the driver load process.
106	 * A value of '0' means the default value, and '255' means no timeout.
107	 */
108	u8 mfw_timeout_val;
109#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT	0
110#define ECORE_LOAD_REQ_LOCK_TO_NONE	255
111
112	/* Avoid engine reset when first PF loads on it */
113	bool avoid_eng_reset;
114
115	/* Allow overriding the default force load behavior */
116	enum ecore_override_force_load override_force_load;
117};
118
119struct ecore_hw_init_params {
120	/* Tunneling parameters */
121	struct ecore_tunnel_info *p_tunn;
122
123	bool b_hw_start;
124
125	/* Interrupt mode [msix, inta, etc.] to use */
126	enum ecore_int_mode int_mode;
127
128	/* NPAR tx switching to be used for vports configured for tx-switching */
129	bool allow_npar_tx_switch;
130
131	/* Binary fw data pointer in binary fw file */
132	const u8 *bin_fw_data;
133
134	/* Driver load parameters */
135	struct ecore_drv_load_params *p_drv_load_params;
136};
137
138/**
139 * @brief ecore_hw_init -
140 *
141 * @param p_dev
142 * @param p_params
143 *
144 * @return enum _ecore_status_t
145 */
146enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
147				   struct ecore_hw_init_params *p_params);
148
149/**
150 * @brief ecore_hw_timers_stop_all -
151 *
152 * @param p_dev
153 *
154 * @return void
155 */
156void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
157
158/**
159 * @brief ecore_hw_stop -
160 *
161 * @param p_dev
162 *
163 * @return enum _ecore_status_t
164 */
165enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
166
167/**
168 * @brief ecore_hw_stop_fastpath -should be called incase
169 *        slowpath is still required for the device,
170 *        but fastpath is not.
171 *
172 * @param p_dev
173 *
174 */
175void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
176
177/**
178 * @brief ecore_hw_hibernate_prepare -should be called when
179 *        the system is going into the hibernate state
180 *
181 * @param p_dev
182 *
183 */
184void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev);
185
186/**
187 * @brief ecore_hw_hibernate_resume -should be called when the system is
188	  resuming from D3 power state and before calling ecore_hw_init.
189 *
190 * @param p_hwfn
191 *
192 */
193void ecore_hw_hibernate_resume(struct ecore_dev *p_dev);
194
195
196/**
197 * @brief ecore_hw_start_fastpath -restart fastpath traffic,
198 *        only if hw_stop_fastpath was called
199
200 * @param p_dev
201 *
202 */
203void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
204
205enum ecore_hw_prepare_result {
206	ECORE_HW_PREPARE_SUCCESS,
207
208	/* FAILED results indicate probe has failed & cleaned up */
209	ECORE_HW_PREPARE_FAILED_ENG2,
210	ECORE_HW_PREPARE_FAILED_ME,
211	ECORE_HW_PREPARE_FAILED_MEM,
212	ECORE_HW_PREPARE_FAILED_DEV,
213	ECORE_HW_PREPARE_FAILED_NVM,
214
215	/* BAD results indicate probe is passed even though some wrongness
216	 * has occurred; Trying to actually use [I.e., hw_init()] might have
217	 * dire reprecautions.
218	 */
219	ECORE_HW_PREPARE_BAD_IOV,
220	ECORE_HW_PREPARE_BAD_MCP,
221	ECORE_HW_PREPARE_BAD_IGU,
222};
223
224struct ecore_hw_prepare_params {
225	/* Personality to initialize */
226	int personality;
227
228	/* Force the driver's default resource allocation */
229	bool drv_resc_alloc;
230
231	/* Check the reg_fifo after any register access */
232	bool chk_reg_fifo;
233
234	/* Request the MFW to initiate PF FLR */
235	bool initiate_pf_flr;
236
237	/* The OS Epoch time in seconds */
238	u32 epoch;
239
240	/* Allow the MFW to collect a crash dump */
241	bool allow_mdump;
242
243	/* Allow prepare to pass even if some initializations are failing.
244	 * If set, the `p_prepare_res' field would be set with the return,
245	 * and might allow probe to pass even if there are certain issues.
246	 */
247	bool b_relaxed_probe;
248	enum ecore_hw_prepare_result p_relaxed_res;
249};
250
251/**
252 * @brief ecore_hw_prepare -
253 *
254 * @param p_dev
255 * @param p_params
256 *
257 * @return enum _ecore_status_t
258 */
259enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
260				      struct ecore_hw_prepare_params *p_params);
261
262/**
263 * @brief ecore_hw_remove -
264 *
265 * @param p_dev
266 */
267void ecore_hw_remove(struct ecore_dev *p_dev);
268
269/**
270* @brief ecore_set_nwuf_reg -
271*
272* @param p_dev
273* @param wol_flag - wol_capability
274* @param reg_idx - Index of the pattern register
275* @param pattern_size - size of pattern
276* @param crc - CRC value of patter & mask
277*
278* @return enum _ecore_status_t
279*/
280enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev,
281	const bool b_enable,
282	u32 reg_idx,
283	u32 pattern_size,
284	u32 crc);
285
286/**
287* @brief ecore_get_wake_info - get magic packet buffer
288*
289* @param p_dev
290* @param wake_info - pointer to ecore_wake_info buffer
291*
292* @return enum _ecore_status_t
293*/
294enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev,
295	struct ecore_wake_info *wake_info);
296
297/**
298* @brief ecore_wol_buffer_clear - Clear magic package buffer
299*
300* @param p_dev
301*
302* @return void
303*/
304void ecore_wol_buffer_clear(struct ecore_dev *p_dev);
305
306/**
307 * @brief ecore_ptt_acquire - Allocate a PTT window
308 *
309 * Should be called at the entry point to the driver (at the beginning of an
310 * exported function)
311 *
312 * @param p_hwfn
313 *
314 * @return struct ecore_ptt
315 */
316struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
317
318/**
319 * @brief ecore_ptt_release - Release PTT Window
320 *
321 * Should be called at the end of a flow - at the end of the function that
322 * acquired the PTT.
323 *
324 *
325 * @param p_hwfn
326 * @param p_ptt
327 */
328void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
329		       struct ecore_ptt *p_ptt);
330
331struct ecore_eth_stats_common {
332	u64 no_buff_discards;
333	u64 packet_too_big_discard;
334	u64 ttl0_discard;
335	u64 rx_ucast_bytes;
336	u64 rx_mcast_bytes;
337	u64 rx_bcast_bytes;
338	u64 rx_ucast_pkts;
339	u64 rx_mcast_pkts;
340	u64 rx_bcast_pkts;
341	u64 mftag_filter_discards;
342	u64 mac_filter_discards;
343	u64 tx_ucast_bytes;
344	u64 tx_mcast_bytes;
345	u64 tx_bcast_bytes;
346	u64 tx_ucast_pkts;
347	u64 tx_mcast_pkts;
348	u64 tx_bcast_pkts;
349	u64 tx_err_drop_pkts;
350	u64 tpa_coalesced_pkts;
351	u64 tpa_coalesced_events;
352	u64 tpa_aborts_num;
353	u64 tpa_not_coalesced_pkts;
354	u64 tpa_coalesced_bytes;
355
356	/* port */
357	u64 rx_64_byte_packets;
358	u64 rx_65_to_127_byte_packets;
359	u64 rx_128_to_255_byte_packets;
360	u64 rx_256_to_511_byte_packets;
361	u64 rx_512_to_1023_byte_packets;
362	u64 rx_1024_to_1518_byte_packets;
363	u64 rx_crc_errors;
364	u64 rx_mac_crtl_frames;
365	u64 rx_pause_frames;
366	u64 rx_pfc_frames;
367	u64 rx_align_errors;
368	u64 rx_carrier_errors;
369	u64 rx_oversize_packets;
370	u64 rx_jabbers;
371	u64 rx_undersize_packets;
372	u64 rx_fragments;
373	u64 tx_64_byte_packets;
374	u64 tx_65_to_127_byte_packets;
375	u64 tx_128_to_255_byte_packets;
376	u64 tx_256_to_511_byte_packets;
377	u64 tx_512_to_1023_byte_packets;
378	u64 tx_1024_to_1518_byte_packets;
379	u64 tx_pause_frames;
380	u64 tx_pfc_frames;
381	u64 brb_truncates;
382	u64 brb_discards;
383	u64 rx_mac_bytes;
384	u64 rx_mac_uc_packets;
385	u64 rx_mac_mc_packets;
386	u64 rx_mac_bc_packets;
387	u64 rx_mac_frames_ok;
388	u64 tx_mac_bytes;
389	u64 tx_mac_uc_packets;
390	u64 tx_mac_mc_packets;
391	u64 tx_mac_bc_packets;
392	u64 tx_mac_ctrl_frames;
393};
394
395struct ecore_eth_stats_bb {
396	u64 rx_1519_to_1522_byte_packets;
397	u64 rx_1519_to_2047_byte_packets;
398	u64 rx_2048_to_4095_byte_packets;
399	u64 rx_4096_to_9216_byte_packets;
400	u64 rx_9217_to_16383_byte_packets;
401	u64 tx_1519_to_2047_byte_packets;
402	u64 tx_2048_to_4095_byte_packets;
403	u64 tx_4096_to_9216_byte_packets;
404	u64 tx_9217_to_16383_byte_packets;
405	u64 tx_lpi_entry_count;
406	u64 tx_total_collisions;
407};
408
409struct ecore_eth_stats_ah {
410	u64 rx_1519_to_max_byte_packets;
411	u64 tx_1519_to_max_byte_packets;
412};
413
414struct ecore_eth_stats {
415	struct ecore_eth_stats_common common;
416	union {
417		struct ecore_eth_stats_bb bb;
418		struct ecore_eth_stats_ah ah;
419	};
420};
421
422enum ecore_dmae_address_type_t {
423	ECORE_DMAE_ADDRESS_HOST_VIRT,
424	ECORE_DMAE_ADDRESS_HOST_PHYS,
425	ECORE_DMAE_ADDRESS_GRC
426};
427
428/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
429 * source is a block of length DMAE_MAX_RW_SIZE and the
430 * destination is larger, the source block will be duplicated as
431 * many times as required to fill the destination block. This is
432 * used mostly to write a zeroed buffer to destination address
433 * using DMA
434 */
435#define ECORE_DMAE_FLAG_RW_REPL_SRC	0x00000001
436#define ECORE_DMAE_FLAG_VF_SRC		0x00000002
437#define ECORE_DMAE_FLAG_VF_DST		0x00000004
438#define ECORE_DMAE_FLAG_COMPLETION_DST	0x00000008
439
440struct ecore_dmae_params {
441	u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
442	u8 src_vfid;
443	u8 dst_vfid;
444};
445
446/**
447 * @brief ecore_dmae_host2grc - copy data from source addr to
448 * dmae registers using the given ptt
449 *
450 * @param p_hwfn
451 * @param p_ptt
452 * @param source_addr
453 * @param grc_addr (dmae_data_offset)
454 * @param size_in_dwords
455 * @param flags (one of the flags defined above)
456 */
457enum _ecore_status_t
458ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
459		    struct ecore_ptt *p_ptt,
460		    u64 source_addr,
461		    u32 grc_addr,
462		    u32 size_in_dwords,
463		    u32 flags);
464
465/**
466 * @brief ecore_dmae_grc2host - Read data from dmae data offset
467 * to source address using the given ptt
468 *
469 * @param p_ptt
470 * @param grc_addr (dmae_data_offset)
471 * @param dest_addr
472 * @param size_in_dwords
473 * @param flags - one of the flags defined above
474 */
475enum _ecore_status_t
476ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
477		    struct ecore_ptt *p_ptt,
478		    u32 grc_addr,
479		    dma_addr_t dest_addr,
480		    u32 size_in_dwords,
481		    u32 flags);
482
483/**
484 * @brief ecore_dmae_host2host - copy data from to source address
485 * to a destination address (for SRIOV) using the given ptt
486 *
487 * @param p_hwfn
488 * @param p_ptt
489 * @param source_addr
490 * @param dest_addr
491 * @param size_in_dwords
492 * @param params
493 */
494enum _ecore_status_t
495ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
496		     struct ecore_ptt *p_ptt,
497		     dma_addr_t source_addr,
498		     dma_addr_t dest_addr,
499		     u32 size_in_dwords,
500		     struct ecore_dmae_params *p_params);
501
502/**
503 * @brief ecore_chain_alloc - Allocate and initialize a chain
504 *
505 * @param p_hwfn
506 * @param intended_use
507 * @param mode
508 * @param num_elems
509 * @param elem_size
510 * @param p_chain
511 *
512 * @return enum _ecore_status_t
513 */
514enum _ecore_status_t
515ecore_chain_alloc(struct ecore_dev *p_dev,
516		  enum ecore_chain_use_mode intended_use,
517		  enum ecore_chain_mode mode,
518		  enum ecore_chain_cnt_type cnt_type,
519		  u32 num_elems,
520		  osal_size_t elem_size,
521		  struct ecore_chain *p_chain,
522		  struct ecore_chain_ext_pbl *ext_pbl);
523
524/**
525 * @brief ecore_chain_free - Free chain DMA memory
526 *
527 * @param p_hwfn
528 * @param p_chain
529 */
530void ecore_chain_free(struct ecore_dev *p_dev,
531		      struct ecore_chain *p_chain);
532
533/**
534 * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
535 *
536 *  @param p_hwfn
537 *  @param src_id - relative to p_hwfn
538 *  @param dst_id - absolute per engine
539 *
540 *  @return enum _ecore_status_t
541 */
542enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
543				       u16 src_id,
544				       u16 *dst_id);
545
546/**
547 * @@brief ecore_fw_vport - Get absolute vport ID
548 *
549 *  @param p_hwfn
550 *  @param src_id - relative to p_hwfn
551 *  @param dst_id - absolute per engine
552 *
553 *  @return enum _ecore_status_t
554 */
555enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
556				    u8 src_id,
557				    u8 *dst_id);
558
559/**
560 * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
561 *
562 *  @param p_hwfn
563 *  @param src_id - relative to p_hwfn
564 *  @param dst_id - absolute per engine
565 *
566 *  @return enum _ecore_status_t
567 */
568enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
569				      u8 src_id,
570				      u8 *dst_id);
571
572/**
573 * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
574 *
575 * @param p_hwfn
576 * @param p_ptt
577 * @param p_filter - MAC to add
578 */
579enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
580					  struct ecore_ptt *p_ptt,
581					  u8 *p_filter);
582
583/**
584 * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
585 *
586 * @param p_hwfn
587 * @param p_ptt
588 * @param p_filter - MAC to remove
589 */
590void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
591			     struct ecore_ptt *p_ptt,
592			     u8 *p_filter);
593
594enum ecore_llh_port_filter_type_t {
595	ECORE_LLH_FILTER_ETHERTYPE,
596	ECORE_LLH_FILTER_TCP_SRC_PORT,
597	ECORE_LLH_FILTER_TCP_DEST_PORT,
598	ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
599	ECORE_LLH_FILTER_UDP_SRC_PORT,
600	ECORE_LLH_FILTER_UDP_DEST_PORT,
601	ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
602};
603
604/**
605 * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
606 *
607 * @param p_hwfn
608 * @param p_ptt
609 * @param source_port_or_eth_type - source port or ethertype to add
610 * @param dest_port - destination port to add
611 * @param type - type of filters and comparing
612 */
613enum _ecore_status_t
614ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
615			      struct ecore_ptt *p_ptt,
616			      u16 source_port_or_eth_type,
617			      u16 dest_port,
618			      enum ecore_llh_port_filter_type_t type);
619
620/**
621 * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
622 *
623 * @param p_hwfn
624 * @param p_ptt
625 * @param source_port_or_eth_type - source port or ethertype to add
626 * @param dest_port - destination port to add
627 * @param type - type of filters and comparing
628 */
629void
630ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
631				 struct ecore_ptt *p_ptt,
632				 u16 source_port_or_eth_type,
633				 u16 dest_port,
634				 enum ecore_llh_port_filter_type_t type);
635
636/**
637 * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
638 *
639 * @param p_hwfn
640 * @param p_ptt
641 */
642void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
643			     struct ecore_ptt *p_ptt);
644
645/**
646 * @brief ecore_llh_set_function_as_default - set function as defult per port
647 *
648 * @param p_hwfn
649 * @param p_ptt
650 */
651enum _ecore_status_t
652ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
653				  struct ecore_ptt *p_ptt);
654
655/**
656 *@brief Cleanup of previous driver remains prior to load
657 *
658 * @param p_hwfn
659 * @param p_ptt
660 * @param id - For PF, engine-relative. For VF, PF-relative.
661 * @param is_vf - true iff cleanup is made for a VF.
662 *
663 * @return enum _ecore_status_t
664 */
665enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn	*p_hwfn,
666					 struct ecore_ptt	*p_ptt,
667					 u16			id,
668					 bool			is_vf);
669/**
670 * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
671 *    Tx queue. The fact that we can configure coalescing to up to 511, but on
672 *    varying accuracy [the bigger the value the less accurate] up to a mistake
673 *    of 3usec for the highest values.
674 *    While the API allows setting coalescing per-qid, all queues sharing a SB
675 *    should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
676 *    otherwise configuration would break.
677 *
678 * @param p_hwfn
679 * @param rx_coal - Rx Coalesce value in micro seconds.
680 * @param tx_coal - TX Coalesce value in micro seconds.
681 * @param p_handle
682 *
683 * @return enum _ecore_status_t
684 **/
685enum _ecore_status_t
686ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
687			 u16 tx_coal, void *p_handle);
688
689/**
690 * @brief - Recalculate feature distributions based on HW resources and
691 * user inputs. Currently this affects RDMA_CNQ, PF_L2_QUE and VF_L2_QUE.
692 * As a result, this must not be called while RDMA is active or while VFs
693 * are enabled.
694 *
695 * @param p_hwfn
696 */
697void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn);
698
699/**
700 * @brief ecore_change_pci_hwfn - Enable or disable PCI BUS MASTER
701 *
702 * @param p_hwfn
703 * @param p_ptt
704 * @param enable - true/false
705 *
706 * @return enum _ecore_status_t
707 */
708enum _ecore_status_t
709ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
710		      struct ecore_ptt *p_ptt,
711		      u8 enable);
712
713#endif
714