1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *	- Redistributions of source code must retain the above
15 *	  copyright notice, this list of conditions and the following
16 *	  disclaimer.
17 *
18 *	- Redistributions in binary form must reproduce the above
19 *	  copyright notice, this list of conditions and the following
20 *	  disclaimer in the documentation and/or other materials
21 *	  provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX4_DEVICE_H
34#define MLX4_DEVICE_H
35
36#include <linux/pci.h>
37#include <linux/completion.h>
38#include <linux/radix-tree.h>
39//#include <linux/cpu_rmap.h> /* XXX SK Probably not needed in freeBSD XXX */
40
41#include <asm/atomic.h>
42
43#include <linux/clocksource.h> /* XXX SK ported to freeBSD */
44
45#define MAX_MSIX_P_PORT		17
46#define MAX_MSIX		64
47#define MSIX_LEGACY_SZ		4
48#define MIN_MSIX_P_PORT		5
49
50#define MLX4_ROCE_MAX_GIDS	128
51#define MLX4_ROCE_PF_GIDS	16
52
53#define MLX4_NUM_UP			8
54#define MLX4_NUM_TC			8
55#define MLX4_MAX_100M_UNITS_VAL		255	/*
56						 * work around: can't set values
57						 * greater then this value when
58						 * using 100 Mbps units.
59						 */
60#define MLX4_RATELIMIT_100M_UNITS	3	/* 100 Mbps */
61#define MLX4_RATELIMIT_1G_UNITS		4	/* 1 Gbps */
62#define MLX4_RATELIMIT_DEFAULT		0x00ff
63
64
65
66#define MLX4_LEAST_ATTACHED_VECTOR      0xffffffff
67
68enum {
69	MLX4_FLAG_MSI_X		= 1 << 0,
70	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
71	MLX4_FLAG_MASTER	= 1 << 2,
72	MLX4_FLAG_SLAVE		= 1 << 3,
73	MLX4_FLAG_SRIOV		= 1 << 4,
74};
75
76enum {
77	MLX4_PORT_CAP_IS_SM	= 1 << 1,
78	MLX4_PORT_CAP_DEV_MGMT_SUP = 1 << 19,
79};
80
81enum {
82	MLX4_MAX_PORTS		= 2,
83	MLX4_MAX_PORT_PKEYS	= 128
84};
85
86/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
87 * These qkeys must not be allowed for general use. This is a 64k range,
88 * and to test for violation, we use the mask (protect against future chg).
89 */
90#define MLX4_RESERVED_QKEY_BASE  (0xFFFF0000)
91#define MLX4_RESERVED_QKEY_MASK  (0xFFFF0000)
92
93enum {
94	MLX4_BOARD_ID_LEN = 64
95};
96
97enum {
98	MLX4_MAX_NUM_PF		= 16,
99	MLX4_MAX_NUM_VF		= 64,
100	MLX4_MFUNC_MAX		= 80,
101	MLX4_MAX_EQ_NUM		= 1024,
102	MLX4_MFUNC_EQ_NUM	= 4,
103	MLX4_MFUNC_MAX_EQES     = 8,
104	MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
105};
106
107/* Driver supports 3 diffrent device methods to manage traffic steering:
108 *	-device managed - High level API for ib and eth flow steering. FW is
109 *			  managing flow steering tables.
110 *	- B0 steering mode - Common low level API for ib and (if supported) eth.
111 *	- A0 steering mode - Limited low level API for eth. In case of IB,
112 *			     B0 mode is in use.
113 */
114enum {
115	MLX4_STEERING_MODE_A0,
116	MLX4_STEERING_MODE_B0,
117	MLX4_STEERING_MODE_DEVICE_MANAGED
118};
119
120static inline const char *mlx4_steering_mode_str(int steering_mode)
121{
122	switch (steering_mode) {
123	case MLX4_STEERING_MODE_A0:
124		return "A0 steering";
125
126	case MLX4_STEERING_MODE_B0:
127		return "B0 steering";
128
129	case MLX4_STEERING_MODE_DEVICE_MANAGED:
130		return "Device managed flow steering";
131
132	default:
133		return "Unrecognize steering mode";
134	}
135}
136
137enum {
138	MLX4_DEV_CAP_FLAG_RC		= 1LL <<  0,
139	MLX4_DEV_CAP_FLAG_UC		= 1LL <<  1,
140	MLX4_DEV_CAP_FLAG_UD		= 1LL <<  2,
141	MLX4_DEV_CAP_FLAG_XRC		= 1LL <<  3,
142	MLX4_DEV_CAP_FLAG_SRQ		= 1LL <<  6,
143	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1LL <<  7,
144	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
145	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
146	MLX4_DEV_CAP_FLAG_DPDP		= 1LL << 12,
147	MLX4_DEV_CAP_FLAG_BLH		= 1LL << 15,
148	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1LL << 16,
149	MLX4_DEV_CAP_FLAG_APM		= 1LL << 17,
150	MLX4_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
151	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1LL << 19,
152	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1LL << 20,
153	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1LL << 21,
154	MLX4_DEV_CAP_FLAG_IBOE		= 1LL << 30,
155	MLX4_DEV_CAP_FLAG_UC_LOOPBACK	= 1LL << 32,
156	MLX4_DEV_CAP_FLAG_FCS_KEEP	= 1LL << 34,
157	MLX4_DEV_CAP_FLAG_WOL_PORT1	= 1LL << 37,
158	MLX4_DEV_CAP_FLAG_WOL_PORT2	= 1LL << 38,
159	MLX4_DEV_CAP_FLAG_UDP_RSS	= 1LL << 40,
160	MLX4_DEV_CAP_FLAG_VEP_UC_STEER	= 1LL << 41,
161	MLX4_DEV_CAP_FLAG_VEP_MC_STEER	= 1LL << 42,
162	MLX4_DEV_CAP_FLAG_COUNTERS	= 1LL << 48,
163	MLX4_DEV_CAP_FLAG_COUNTERS_EXT	= 1LL << 49,
164	MLX4_DEV_CAP_FLAG_SET_PORT_ETH_SCHED = 1LL << 53,
165	MLX4_DEV_CAP_FLAG_SENSE_SUPPORT	= 1LL << 55,
166	MLX4_DEV_CAP_FLAG_FAST_DROP	= 1LL << 57,
167	MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
168	MLX4_DEV_CAP_FLAG_ESWITCH_SUPPORT = 1LL << 60,
169	MLX4_DEV_CAP_FLAG_64B_EQE	= 1LL << 61,
170	MLX4_DEV_CAP_FLAG_64B_CQE	= 1LL << 62
171};
172
173enum {
174	MLX4_DEV_CAP_FLAG2_RSS			= 1LL <<  0,
175	MLX4_DEV_CAP_FLAG2_RSS_TOP		= 1LL <<  1,
176	MLX4_DEV_CAP_FLAG2_RSS_XOR		= 1LL <<  2,
177	MLX4_DEV_CAP_FLAG2_FS_EN		= 1LL <<  3
178};
179
180enum {
181	MLX4_DEV_CAP_64B_EQE_ENABLED	= 1LL << 0,
182	MLX4_DEV_CAP_64B_CQE_ENABLED	= 1LL << 1
183};
184
185enum {
186	MLX4_USER_DEV_CAP_64B_CQE	= 1L << 0
187};
188
189enum {
190	MLX4_FUNC_CAP_64B_EQE_CQE	= 1L << 0
191};
192
193/* bit enums for an 8-bit flags field indicating special use
194 * QPs which require special handling in qp_reserve_range.
195 * Currently, this only includes QPs used by the ETH interface,
196 * where we expect to use blueflame.  These QPs must not have
197 * bits 6 and 7 set in their qp number.
198 *
199 * This enum may use only bits 0..7.
200 */
201enum {
202        MLX4_RESERVE_BF_QP      = 1 << 7,
203};
204
205
206#define MLX4_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
207
208enum {
209	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6,
210	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7,
211	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9,
212	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10,
213	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11,
214};
215
216enum mlx4_event {
217	MLX4_EVENT_TYPE_COMP		   = 0x00,
218	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01,
219	MLX4_EVENT_TYPE_COMM_EST	   = 0x02,
220	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03,
221	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13,
222	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14,
223	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04,
224	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
225	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06,
226	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
227	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
228	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
229	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
230	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
231	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
232	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
233	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
234	MLX4_EVENT_TYPE_CMD		   = 0x0a,
235	MLX4_EVENT_TYPE_VEP_UPDATE	   = 0x19,
236	MLX4_EVENT_TYPE_COMM_CHANNEL	   = 0x18,
237	MLX4_EVENT_TYPE_OP_REQUIRED	   = 0x1a,
238	MLX4_EVENT_TYPE_FATAL_WARNING	   = 0x1b,
239	MLX4_EVENT_TYPE_FLR_EVENT	   = 0x1c,
240	MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
241	MLX4_EVENT_TYPE_NONE		   = 0xff,
242};
243
244enum {
245	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1,
246	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4
247};
248
249enum {
250	MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
251};
252
253enum slave_port_state {
254	SLAVE_PORT_DOWN = 0,
255	SLAVE_PENDING_UP,
256	SLAVE_PORT_UP,
257};
258
259enum slave_port_gen_event {
260	SLAVE_PORT_GEN_EVENT_DOWN = 0,
261	SLAVE_PORT_GEN_EVENT_UP,
262	SLAVE_PORT_GEN_EVENT_NONE,
263};
264
265enum slave_port_state_event {
266	MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
267	MLX4_PORT_STATE_DEV_EVENT_PORT_UP,
268	MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
269	MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
270};
271
272enum {
273	MLX4_PERM_LOCAL_READ	= 1 << 10,
274	MLX4_PERM_LOCAL_WRITE	= 1 << 11,
275	MLX4_PERM_REMOTE_READ	= 1 << 12,
276	MLX4_PERM_REMOTE_WRITE	= 1 << 13,
277	MLX4_PERM_ATOMIC	= 1 << 14
278};
279
280enum {
281	MLX4_OPCODE_NOP			= 0x00,
282	MLX4_OPCODE_SEND_INVAL		= 0x01,
283	MLX4_OPCODE_RDMA_WRITE		= 0x08,
284	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
285	MLX4_OPCODE_SEND		= 0x0a,
286	MLX4_OPCODE_SEND_IMM		= 0x0b,
287	MLX4_OPCODE_LSO			= 0x0e,
288	MLX4_OPCODE_RDMA_READ		= 0x10,
289	MLX4_OPCODE_ATOMIC_CS		= 0x11,
290	MLX4_OPCODE_ATOMIC_FA		= 0x12,
291	MLX4_OPCODE_MASKED_ATOMIC_CS	= 0x14,
292	MLX4_OPCODE_MASKED_ATOMIC_FA	= 0x15,
293	MLX4_OPCODE_BIND_MW		= 0x18,
294	MLX4_OPCODE_FMR			= 0x19,
295	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
296	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
297
298	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
299	MLX4_RECV_OPCODE_SEND		= 0x01,
300	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
301	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
302
303	MLX4_CQE_OPCODE_ERROR		= 0x1e,
304	MLX4_CQE_OPCODE_RESIZE		= 0x16,
305};
306
307enum {
308	MLX4_STAT_RATE_OFFSET	= 5
309};
310
311enum mlx4_protocol {
312	MLX4_PROT_IB_IPV6 = 0,
313	MLX4_PROT_ETH,
314	MLX4_PROT_IB_IPV4,
315	MLX4_PROT_FCOE
316};
317
318enum {
319	MLX4_MTT_FLAG_PRESENT		= 1
320};
321
322enum {
323	MLX4_MAX_MTT_SHIFT		= 31
324};
325
326enum mlx4_qp_region {
327	MLX4_QP_REGION_FW = 0,
328	MLX4_QP_REGION_ETH_ADDR,
329	MLX4_QP_REGION_FC_ADDR,
330	MLX4_QP_REGION_FC_EXCH,
331	MLX4_NUM_QP_REGION
332};
333
334enum mlx4_port_type {
335	MLX4_PORT_TYPE_NONE	= 0,
336	MLX4_PORT_TYPE_IB	= 1,
337	MLX4_PORT_TYPE_ETH	= 2,
338	MLX4_PORT_TYPE_AUTO	= 3
339};
340
341enum mlx4_special_vlan_idx {
342	MLX4_NO_VLAN_IDX        = 0,
343	MLX4_VLAN_MISS_IDX,
344	MLX4_VLAN_REGULAR
345};
346
347enum mlx4_steer_type {
348	MLX4_MC_STEER = 0,
349	MLX4_UC_STEER,
350	MLX4_NUM_STEERS
351};
352
353enum {
354	MLX4_NUM_FEXCH          = 64 * 1024,
355};
356
357enum {
358	MLX4_MAX_FAST_REG_PAGES = 511,
359};
360
361enum {
362	MLX4_DEV_PMC_SUBTYPE_GUID_INFO	 = 0x14,
363	MLX4_DEV_PMC_SUBTYPE_PORT_INFO	 = 0x15,
364	MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE	 = 0x16,
365};
366
367/* Port mgmt change event handling */
368enum {
369	MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK	= 1 << 0,
370	MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK		= 1 << 1,
371	MLX4_EQ_PORT_INFO_LID_CHANGE_MASK		= 1 << 2,
372	MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK		= 1 << 3,
373	MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK	= 1 << 4,
374};
375
376#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
377			     MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
378
379static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
380{
381	return (major << 32) | (minor << 16) | subminor;
382}
383
384struct mlx4_phys_caps {
385	u32			gid_phys_table_len[MLX4_MAX_PORTS + 1];
386	u32			pkey_phys_table_len[MLX4_MAX_PORTS + 1];
387	u32			num_phys_eqs;
388	u32			base_sqpn;
389	u32			base_proxy_sqpn;
390	u32			base_tunnel_sqpn;
391};
392
393struct mlx4_caps {
394	u64			fw_ver;
395	u32			function;
396	int			num_ports;
397	int			vl_cap[MLX4_MAX_PORTS + 1];
398	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
399	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1];
400	u64			def_mac[MLX4_MAX_PORTS + 1];
401	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
402	int			gid_table_len[MLX4_MAX_PORTS + 1];
403	int			pkey_table_len[MLX4_MAX_PORTS + 1];
404	int			trans_type[MLX4_MAX_PORTS + 1];
405	int			vendor_oui[MLX4_MAX_PORTS + 1];
406	int			wavelength[MLX4_MAX_PORTS + 1];
407	u64			trans_code[MLX4_MAX_PORTS + 1];
408	int			local_ca_ack_delay;
409	int			num_uars;
410	u32			uar_page_size;
411	int			bf_reg_size;
412	int			bf_regs_per_page;
413	int			max_sq_sg;
414	int			max_rq_sg;
415	int			num_qps;
416	int			max_wqes;
417	int			max_sq_desc_sz;
418	int			max_rq_desc_sz;
419	int			max_qp_init_rdma;
420	int			max_qp_dest_rdma;
421	u32			*qp0_proxy;
422	u32			*qp1_proxy;
423	u32			*qp0_tunnel;
424	u32			*qp1_tunnel;
425	int			num_srqs;
426	int			max_srq_wqes;
427	int			max_srq_sge;
428	int			reserved_srqs;
429	int			num_cqs;
430	int			max_cqes;
431	int			reserved_cqs;
432	int			num_eqs;
433	int			reserved_eqs;
434	int			num_comp_vectors;
435	int			comp_pool;
436	int			num_mpts;
437	int			max_fmr_maps;
438	int			num_mtts;
439	int			fmr_reserved_mtts;
440	int			reserved_mtts;
441	int			reserved_mrws;
442	int			reserved_uars;
443	int			num_mgms;
444	int			num_amgms;
445	int			reserved_mcgs;
446	int			num_qp_per_mgm;
447	int			steering_mode;
448	int			num_pds;
449	int			reserved_pds;
450	int			max_xrcds;
451	int			reserved_xrcds;
452	int			mtt_entry_sz;
453	u32			max_msg_sz;
454	u32			page_size_cap;
455	u64			flags;
456	u64			flags2;
457	u32			bmme_flags;
458	u32			reserved_lkey;
459	u16			stat_rate_support;
460	u8			cq_timestamp;
461	u8			port_width_cap[MLX4_MAX_PORTS + 1];
462	int			max_gso_sz;
463	int			max_rss_tbl_sz;
464	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
465	int			reserved_qps;
466	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
467	int                     log_num_macs;
468	int                     log_num_vlans;
469	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
470	u8			supported_type[MLX4_MAX_PORTS + 1];
471	u8                      suggested_type[MLX4_MAX_PORTS + 1];
472	u8                      default_sense[MLX4_MAX_PORTS + 1];
473	u32			port_mask[MLX4_MAX_PORTS + 1];
474	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
475	u32			max_counters;
476	u8			port_ib_mtu[MLX4_MAX_PORTS + 1];
477	u16			sqp_demux;
478	u32			sync_qp;
479	u32			eqe_size;
480	u32			cqe_size;
481	u8			eqe_factor;
482	u32			userspace_caps; /* userspace must be aware to */
483	u32			function_caps;  /* functions must be aware to */
484	u8			fast_drop;
485	u16			hca_core_clock;
486	u32			max_basic_counters;
487	u32			max_extended_counters;
488};
489
490struct mlx4_buf_list {
491	void		       *buf;
492	dma_addr_t		map;
493};
494
495struct mlx4_buf {
496	struct mlx4_buf_list	direct;
497	struct mlx4_buf_list   *page_list;
498	int			nbufs;
499	int			npages;
500	int			page_shift;
501};
502
503struct mlx4_mtt {
504	u32			offset;
505	int			order;
506	int			page_shift;
507};
508
509enum {
510	MLX4_DB_PER_PAGE = PAGE_SIZE / 4
511};
512
513struct mlx4_db_pgdir {
514	struct list_head	list;
515	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
516	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
517	unsigned long	       *bits[2];
518	__be32		       *db_page;
519	dma_addr_t		db_dma;
520};
521
522struct mlx4_ib_user_db_page;
523
524struct mlx4_db {
525	__be32			*db;
526	union {
527		struct mlx4_db_pgdir		*pgdir;
528		struct mlx4_ib_user_db_page	*user_page;
529	}			u;
530	dma_addr_t		dma;
531	int			index;
532	int			order;
533};
534
535struct mlx4_hwq_resources {
536	struct mlx4_db		db;
537	struct mlx4_mtt		mtt;
538	struct mlx4_buf		buf;
539};
540
541struct mlx4_mr {
542	struct mlx4_mtt		mtt;
543	u64			iova;
544	u64			size;
545	u32			key;
546	u32			pd;
547	u32			access;
548	int			enabled;
549};
550
551struct mlx4_fmr {
552	struct mlx4_mr		mr;
553	struct mlx4_mpt_entry  *mpt;
554	__be64		       *mtts;
555	dma_addr_t		dma_handle;
556	int			max_pages;
557	int			max_maps;
558	int			maps;
559	u8			page_shift;
560};
561
562struct mlx4_uar {
563	unsigned long		pfn;
564	int			index;
565	struct list_head	bf_list;
566	unsigned		free_bf_bmap;
567	void __iomem	       *map;
568	void __iomem	       *bf_map;
569};
570
571struct mlx4_bf {
572	unsigned long		offset;
573	int			buf_size;
574	struct mlx4_uar	       *uar;
575	void __iomem	       *reg;
576};
577
578struct mlx4_cq {
579	void (*comp)		(struct mlx4_cq *);
580	void (*event)		(struct mlx4_cq *, enum mlx4_event);
581
582	struct mlx4_uar	       *uar;
583
584	u32			cons_index;
585
586	__be32		       *set_ci_db;
587	__be32		       *arm_db;
588	int			arm_sn;
589
590	int			cqn;
591	unsigned		vector;
592
593	atomic_t		refcount;
594	struct completion	free;
595	int			eqn;
596	u16			irq;
597};
598
599struct mlx4_qp {
600	void (*event)		(struct mlx4_qp *, enum mlx4_event);
601
602	int			qpn;
603
604	atomic_t		refcount;
605	struct completion	free;
606};
607
608struct mlx4_srq {
609	void (*event)		(struct mlx4_srq *, enum mlx4_event);
610
611	int			srqn;
612	int			max;
613	int			max_gs;
614	int			wqe_shift;
615
616	atomic_t		refcount;
617	struct completion	free;
618};
619
620struct mlx4_av {
621	__be32			port_pd;
622	u8			reserved1;
623	u8			g_slid;
624	__be16			dlid;
625	u8			reserved2;
626	u8			gid_index;
627	u8			stat_rate;
628	u8			hop_limit;
629	__be32			sl_tclass_flowlabel;
630	u8			dgid[16];
631};
632
633struct mlx4_eth_av {
634	__be32		port_pd;
635	u8		reserved1;
636	u8		smac_idx;
637	u16		reserved2;
638	u8		reserved3;
639	u8		gid_index;
640	u8		stat_rate;
641	u8		hop_limit;
642	__be32		sl_tclass_flowlabel;
643	u8		dgid[16];
644	u32		reserved4[2];
645	__be16		vlan;
646	u8		mac[6];
647};
648
649union mlx4_ext_av {
650	struct mlx4_av		ib;
651	struct mlx4_eth_av	eth;
652};
653
654struct mlx4_if_stat_control {
655	u8 reserved1[3];
656	/* Extended counters enabled */
657	u8 cnt_mode;
658	/* Number of interfaces */
659	__be32 num_of_if;
660	__be32 reserved[2];
661};
662
663struct mlx4_if_stat_basic {
664	struct mlx4_if_stat_control control;
665	struct {
666		__be64 IfRxFrames;
667		__be64 IfRxOctets;
668		__be64 IfTxFrames;
669		__be64 IfTxOctets;
670	} counters[];
671};
672#define MLX4_IF_STAT_BSC_SZ(ports)(sizeof(struct mlx4_if_stat_extended) +\
673				   sizeof(((struct mlx4_if_stat_extended *)0)->\
674				   counters[0]) * ports)
675
676struct mlx4_if_stat_extended {
677	struct mlx4_if_stat_control control;
678	struct {
679		__be64 IfRxUnicastFrames;
680		__be64 IfRxUnicastOctets;
681		__be64 IfRxMulticastFrames;
682		__be64 IfRxMulticastOctets;
683		__be64 IfRxBroadcastFrames;
684		__be64 IfRxBroadcastOctets;
685		__be64 IfRxNoBufferFrames;
686		__be64 IfRxNoBufferOctets;
687		__be64 IfRxErrorFrames;
688		__be64 IfRxErrorOctets;
689		__be32 reserved[39];
690		__be64 IfTxUnicastFrames;
691		__be64 IfTxUnicastOctets;
692		__be64 IfTxMulticastFrames;
693		__be64 IfTxMulticastOctets;
694		__be64 IfTxBroadcastFrames;
695		__be64 IfTxBroadcastOctets;
696		__be64 IfTxDroppedFrames;
697		__be64 IfTxDroppedOctets;
698		__be64 IfTxRequestedFramesSent;
699		__be64 IfTxGeneratedFramesSent;
700		__be64 IfTxTsoOctets;
701	} __packed counters[];
702};
703#define MLX4_IF_STAT_EXT_SZ(ports)   (sizeof(struct mlx4_if_stat_extended) +\
704				      sizeof(((struct mlx4_if_stat_extended *)\
705				      0)->counters[0]) * ports)
706
707union mlx4_counter {
708	struct mlx4_if_stat_control	control;
709	struct mlx4_if_stat_basic	basic;
710	struct mlx4_if_stat_extended	ext;
711};
712#define MLX4_IF_STAT_SZ(ports)		MLX4_IF_STAT_EXT_SZ(ports)
713
714struct mlx4_quotas {
715	int qp;
716	int cq;
717	int srq;
718	int mpt;
719	int mtt;
720	int counter;
721	int xrcd;
722};
723
724struct mlx4_dev {
725	struct pci_dev	       *pdev;
726	unsigned long		flags;
727	unsigned long		num_slaves;
728	struct mlx4_caps	caps;
729	struct mlx4_phys_caps	phys_caps;
730	struct mlx4_quotas	quotas;
731	struct radix_tree_root	qp_table_tree;
732	u8			rev_id;
733	char			board_id[MLX4_BOARD_ID_LEN];
734	int			num_vfs;
735	int			numa_node;
736	int			oper_log_mgm_entry_size;
737	u64			regid_promisc_array[MLX4_MAX_PORTS + 1];
738	u64			regid_allmulti_array[MLX4_MAX_PORTS + 1];
739};
740
741struct mlx4_eqe {
742	u8			reserved1;
743	u8			type;
744	u8			reserved2;
745	u8			subtype;
746	union {
747		u32		raw[6];
748		struct {
749			__be32	cqn;
750		} __packed comp;
751		struct {
752			u16	reserved1;
753			__be16	token;
754			u32	reserved2;
755			u8	reserved3[3];
756			u8	status;
757			__be64	out_param;
758		} __packed cmd;
759		struct {
760			__be32	qpn;
761		} __packed qp;
762		struct {
763			__be32	srqn;
764		} __packed srq;
765		struct {
766			__be32	cqn;
767			u32	reserved1;
768			u8	reserved2[3];
769			u8	syndrome;
770		} __packed cq_err;
771		struct {
772			u32	reserved1[2];
773			__be32	port;
774		} __packed port_change;
775		struct {
776			#define COMM_CHANNEL_BIT_ARRAY_SIZE	4
777			u32 reserved;
778			u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
779		} __packed comm_channel_arm;
780		struct {
781			u8	port;
782			u8	reserved[3];
783			__be64	mac;
784		} __packed mac_update;
785		struct {
786			__be32	slave_id;
787		} __packed flr_event;
788		struct {
789			__be16  current_temperature;
790			__be16  warning_threshold;
791		} __packed warming;
792		struct {
793			u8 reserved[3];
794			u8 port;
795			union {
796				struct {
797					__be16 mstr_sm_lid;
798					__be16 port_lid;
799					__be32 changed_attr;
800					u8 reserved[3];
801					u8 mstr_sm_sl;
802					__be64 gid_prefix;
803				} __packed port_info;
804				struct {
805					__be32 block_ptr;
806					__be32 tbl_entries_mask;
807				} __packed tbl_change_info;
808			} params;
809		} __packed port_mgmt_change;
810	}			event;
811	u8			slave_id;
812	u8			reserved3[2];
813	u8			owner;
814} __packed;
815
816struct mlx4_init_port_param {
817	int			set_guid0;
818	int			set_node_guid;
819	int			set_si_guid;
820	u16			mtu;
821	int			port_width_cap;
822	u16			vl_cap;
823	u16			max_gid;
824	u16			max_pkey;
825	u64			guid0;
826	u64			node_guid;
827	u64			si_guid;
828};
829
830#define mlx4_foreach_port(port, dev, type)				\
831	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
832		if ((type) == (dev)->caps.port_mask[(port)])
833
834#define mlx4_foreach_non_ib_transport_port(port, dev)                     \
835	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \
836		if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
837
838#define mlx4_foreach_ib_transport_port(port, dev)                         \
839	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \
840		if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
841			((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
842
843#define MLX4_INVALID_SLAVE_ID	0xFF
844
845void handle_port_mgmt_change_event(struct work_struct *work);
846
847static inline int mlx4_master_func_num(struct mlx4_dev *dev)
848{
849	return dev->caps.function;
850}
851
852static inline int mlx4_is_master(struct mlx4_dev *dev)
853{
854	return dev->flags & MLX4_FLAG_MASTER;
855}
856
857static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
858{
859	return dev->phys_caps.base_sqpn + 8 +
860		16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
861}
862
863static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
864{
865	return (qpn < dev->phys_caps.base_sqpn + 8 +
866		16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev));
867}
868
869static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
870{
871	int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8;
872
873	if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8)
874		return 1;
875
876	return 0;
877}
878
879static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
880{
881	return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
882}
883
884static inline int mlx4_is_slave(struct mlx4_dev *dev)
885{
886	return dev->flags & MLX4_FLAG_SLAVE;
887}
888
889int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
890		   struct mlx4_buf *buf);
891void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
892static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
893{
894	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
895		return buf->direct.buf + offset;
896	else
897		return buf->page_list[offset >> PAGE_SHIFT].buf +
898			(offset & (PAGE_SIZE - 1));
899}
900
901int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
902void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
903int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
904void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
905
906int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
907void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
908int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
909void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
910
911int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
912		  struct mlx4_mtt *mtt);
913void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
914u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
915
916int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
917		  int npages, int page_shift, struct mlx4_mr *mr);
918void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
919int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
920int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
921		   int start_index, int npages, u64 *page_list);
922int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
923		       struct mlx4_buf *buf);
924
925int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
926void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
927
928int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
929		       int size, int max_direct);
930void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
931		       int size);
932
933int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
934		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
935		  unsigned vector, int collapsed, int timestamp_en);
936void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
937
938int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
939			  int *base, u8 bf_qp);
940void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
941
942int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
943void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
944
945int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
946		   struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
947void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
948int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
949int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
950
951int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
952int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
953
954int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
955			int block_mcast_loopback, enum mlx4_protocol prot);
956int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
957			enum mlx4_protocol prot);
958int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
959			  u8 port, int block_mcast_loopback,
960			  enum mlx4_protocol protocol, u64 *reg_id);
961int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
962			  enum mlx4_protocol protocol, u64 reg_id);
963
964enum {
965	MLX4_DOMAIN_UVERBS	= 0x1000,
966	MLX4_DOMAIN_ETHTOOL     = 0x2000,
967	MLX4_DOMAIN_RFS         = 0x3000,
968	MLX4_DOMAIN_NIC    = 0x5000,
969};
970
971enum mlx4_net_trans_rule_id {
972	MLX4_NET_TRANS_RULE_ID_ETH = 0,
973	MLX4_NET_TRANS_RULE_ID_IB,
974	MLX4_NET_TRANS_RULE_ID_IPV6,
975	MLX4_NET_TRANS_RULE_ID_IPV4,
976	MLX4_NET_TRANS_RULE_ID_TCP,
977	MLX4_NET_TRANS_RULE_ID_UDP,
978	MLX4_NET_TRANS_RULE_NUM, /* should be last */
979};
980
981extern const u16 __sw_id_hw[];
982
983static inline int map_hw_to_sw_id(u16 header_id)
984{
985
986	int i;
987	for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) {
988		if (header_id == __sw_id_hw[i])
989			return i;
990	}
991	return -EINVAL;
992}
993enum mlx4_net_trans_promisc_mode {
994	MLX4_FS_REGULAR		= 0,
995	MLX4_FS_ALL_DEFAULT	= 1,
996	MLX4_FS_MC_DEFAULT	= 3,
997	MLX4_FS_UC_SNIFFER	= 4,
998	MLX4_FS_MC_SNIFFER	= 5,
999};
1000
1001struct mlx4_spec_eth {
1002	u8	dst_mac[6];
1003	u8	dst_mac_msk[6];
1004	u8	src_mac[6];
1005	u8	src_mac_msk[6];
1006	u8	ether_type_enable;
1007	__be16	ether_type;
1008	__be16	vlan_id_msk;
1009	__be16	vlan_id;
1010};
1011
1012struct mlx4_spec_tcp_udp {
1013	__be16 dst_port;
1014	__be16 dst_port_msk;
1015	__be16 src_port;
1016	__be16 src_port_msk;
1017};
1018
1019struct mlx4_spec_ipv4 {
1020	__be32 dst_ip;
1021	__be32 dst_ip_msk;
1022	__be32 src_ip;
1023	__be32 src_ip_msk;
1024};
1025
1026struct mlx4_spec_ib {
1027	__be32 r_u_qpn;
1028	__be32 qpn_msk;
1029	u8 dst_gid[16];
1030	u8 dst_gid_msk[16];
1031};
1032
1033struct mlx4_spec_list {
1034	struct	list_head list;
1035	enum	mlx4_net_trans_rule_id id;
1036	union {
1037		struct mlx4_spec_eth eth;
1038		struct mlx4_spec_ib ib;
1039		struct mlx4_spec_ipv4 ipv4;
1040		struct mlx4_spec_tcp_udp tcp_udp;
1041	};
1042};
1043
1044enum mlx4_net_trans_hw_rule_queue {
1045	MLX4_NET_TRANS_Q_FIFO,
1046	MLX4_NET_TRANS_Q_LIFO,
1047};
1048
1049struct mlx4_net_trans_rule {
1050	struct	list_head list;
1051	enum	mlx4_net_trans_hw_rule_queue queue_mode;
1052	bool	exclusive;
1053	bool	allow_loopback;
1054	enum	mlx4_net_trans_promisc_mode promisc_mode;
1055	u8	port;
1056	u16	priority;
1057	u32	qpn;
1058};
1059
1060int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
1061				enum mlx4_net_trans_promisc_mode mode);
1062int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1063				   enum mlx4_net_trans_promisc_mode mode);
1064int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
1065int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
1066int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
1067int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
1068int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
1069
1070int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
1071void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
1072int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
1073int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
1074void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
1075int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1076			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
1077int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1078			   u8 promisc);
1079int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
1080int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
1081		u8 *pg, u16 *ratelimit);
1082int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1083int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1084void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1085
1086int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
1087		      int npages, u64 iova, u32 *lkey, u32 *rkey);
1088int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
1089		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
1090int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1091void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1092		    u32 *lkey, u32 *rkey);
1093int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1094int mlx4_SYNC_TPT(struct mlx4_dev *dev);
1095int mlx4_query_diag_counters(struct mlx4_dev *mlx4_dev, int array_length,
1096			     u8 op_modifier, u32 in_offset[],
1097			     u32 counter_out[]);
1098
1099int mlx4_test_interrupts(struct mlx4_dev *dev);
1100int mlx4_assign_eq(struct mlx4_dev *dev, char *name, int *vector);
1101void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1102
1103int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1104int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
1105
1106int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
1107void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
1108
1109int mlx4_flow_attach(struct mlx4_dev *dev,
1110		     struct mlx4_net_trans_rule *rule, u64 *reg_id);
1111int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
1112
1113void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
1114			  int i, int val);
1115
1116int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
1117
1118int mlx4_is_slave_active(struct mlx4_dev *dev, int slave);
1119int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port);
1120int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port);
1121int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr);
1122int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change);
1123enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port);
1124int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event);
1125
1126void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
1127__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
1128int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, int *slave_id);
1129int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, u8 *gid);
1130
1131int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn);
1132
1133cycle_t mlx4_read_clock(struct mlx4_dev *dev);
1134
1135#endif /* MLX4_DEVICE_H */
1136