1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib.h 323223 2017-09-06 15:33:23Z hselasky $
26 */
27
28#ifndef MLX5_IB_H
29#define MLX5_IB_H
30
31#include <linux/kernel.h>
32#include <linux/sched.h>
33#include <rdma/ib_verbs.h>
34#include <rdma/ib_smi.h>
35#include <rdma/ib_addr.h>
36#include <dev/mlx5/device.h>
37#include <dev/mlx5/driver.h>
38#include <dev/mlx5/cq.h>
39#include <dev/mlx5/qp.h>
40#include <dev/mlx5/srq.h>
41#include <linux/types.h>
42#include <dev/mlx5/mlx5_core/transobj.h>
43
44#define mlx5_ib_dbg(dev, format, arg...)				\
45pr_debug("mlx5_dbg:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
46	 __LINE__, curthread->td_proc->p_pid, ##arg)
47
48#define mlx5_ib_err(dev, format, arg...)				\
49printf("mlx5_ib: ERR: ""mlx5_err:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
50	__LINE__, curthread->td_proc->p_pid, ##arg)
51
52#define mlx5_ib_warn(dev, format, arg...)				\
53printf("mlx5_ib: WARN: ""mlx5_warn:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
54	__LINE__, curthread->td_proc->p_pid, ##arg)
55#define BF_ENABLE 0
56
57extern struct workqueue_struct *mlx5_ib_wq;
58
59enum {
60	MLX5_IB_MMAP_CMD_SHIFT	= 8,
61	MLX5_IB_MMAP_CMD_MASK	= 0xff,
62};
63
64enum mlx5_ib_mmap_cmd {
65	MLX5_IB_MMAP_REGULAR_PAGE		= 0,
66	MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES	= 1,
67	MLX5_IB_MMAP_WC_PAGE			= 2,
68	MLX5_IB_MMAP_NC_PAGE			= 3,
69	MLX5_IB_MMAP_MAP_DC_INFO_PAGE		= 4,
70
71	/* Use EXP mmap commands until it is pushed to upstream */
72	MLX5_IB_EXP_MMAP_CORE_CLOCK			= 0xFB,
73	MLX5_IB_EXP_MMAP_GET_CONTIGUOUS_PAGES_CPU_NUMA	= 0xFC,
74	MLX5_IB_EXP_MMAP_GET_CONTIGUOUS_PAGES_DEV_NUMA	= 0xFD,
75	MLX5_IB_EXP_ALLOC_N_MMAP_WC			= 0xFE,
76};
77
78enum {
79	MLX5_RES_SCAT_DATA32_CQE	= 0x1,
80	MLX5_RES_SCAT_DATA64_CQE	= 0x2,
81	MLX5_REQ_SCAT_DATA32_CQE	= 0x11,
82	MLX5_REQ_SCAT_DATA64_CQE	= 0x22,
83};
84
85enum {
86	MLX5_DCT_CS_RES_64		= 2,
87	MLX5_CNAK_RX_POLL_CQ_QUOTA	= 256,
88};
89
90enum mlx5_ib_latency_class {
91	MLX5_IB_LATENCY_CLASS_LOW,
92	MLX5_IB_LATENCY_CLASS_MEDIUM,
93	MLX5_IB_LATENCY_CLASS_HIGH,
94	MLX5_IB_LATENCY_CLASS_FAST_PATH
95};
96
97enum mlx5_ib_mad_ifc_flags {
98	MLX5_MAD_IFC_IGNORE_MKEY	= 1,
99	MLX5_MAD_IFC_IGNORE_BKEY	= 2,
100	MLX5_MAD_IFC_NET_VIEW		= 4,
101};
102
103enum {
104	MLX5_CROSS_CHANNEL_UUAR		= 0,
105};
106
107enum {
108	MLX5_IB_MAX_CTX_DYNAMIC_UARS = 256,
109	MLX5_IB_INVALID_UAR_INDEX = -1U
110};
111
112enum {
113	MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES	= 13,
114	MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES	= 6,
115	MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES	= 16,
116	MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES	= 9,
117};
118
119struct mlx5_ib_ucontext {
120	struct ib_ucontext	ibucontext;
121	struct list_head	db_page_list;
122
123	/* protect doorbell record alloc/free
124	 */
125	struct mutex		db_page_mutex;
126	struct mlx5_uuar_info	uuari;
127	u32			dynamic_wc_uar_index[MLX5_IB_MAX_CTX_DYNAMIC_UARS];
128	/* Transport Domain number */
129	u32			tdn;
130};
131
132static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
133{
134	return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
135}
136
137struct mlx5_ib_pd {
138	struct ib_pd		ibpd;
139	u32			pdn;
140	u32			pa_lkey;
141};
142
143struct wr_list {
144	u16	opcode;
145	u16	next;
146};
147
148struct mlx5_swr_ctx {
149	u64		wrid;
150	u32		wr_data;
151	struct wr_list	w_list;
152	u32		wqe_head;
153	u8		sig_piped;
154	u8		rsvd[11];
155};
156
157struct mlx5_rwr_ctx {
158	u64		       wrid;
159};
160
161struct mlx5_ib_wq {
162	union {
163		struct mlx5_swr_ctx *swr_ctx;
164		struct mlx5_rwr_ctx *rwr_ctx;
165	};
166	u16		        unsig_count;
167
168	/* serialize post to the work queue
169	 */
170	spinlock_t		lock;
171	int			wqe_cnt;
172	int			max_post;
173	int			max_gs;
174	int			offset;
175	int			wqe_shift;
176	unsigned		head;
177	unsigned		tail;
178	u16			cur_post;
179	u16			last_poll;
180	void		       *qend;
181};
182
183enum {
184	MLX5_QP_USER,
185	MLX5_QP_KERNEL,
186	MLX5_QP_EMPTY
187};
188
189enum {
190	MLX5_WQ_USER,
191	MLX5_WQ_KERNEL
192};
193
194struct mlx5_ib_sqd {
195	struct mlx5_ib_qp	*qp;
196	struct work_struct	work;
197};
198
199struct mlx5_ib_mc_flows_list {
200	struct list_head		flows_list;
201	/*Protect the flows_list*/
202	struct mutex		lock;
203};
204
205struct mlx5_ib_qp {
206	struct ib_qp		ibqp;
207	struct mlx5_core_qp	mqp;
208	struct mlx5_core_qp	mrq;
209	struct mlx5_core_qp	msq;
210	u32			tisn;
211	u32			tirn;
212	struct mlx5_buf		buf;
213
214	struct mlx5_db		db;
215	struct mlx5_ib_wq	rq;
216
217	u32			doorbell_qpn;
218	u8			sq_signal_bits;
219	u8			fm_cache;
220	int			sq_max_wqes_per_wr;
221	int			sq_spare_wqes;
222	struct mlx5_ib_wq	sq;
223
224	struct ib_umem	       *umem;
225	int			buf_size;
226	/* Raw Ethernet QP's SQ is allocated seperately
227	 * from the RQ's buffer in user-space.
228	 */
229	struct ib_umem	       *sq_umem;
230	int			sq_buf_size;
231	u64			sq_buf_addr;
232	int			allow_mp_wqe;
233
234	/* serialize qp state modifications
235	 */
236	struct mutex		mutex;
237	u16			xrcdn;
238	u32			flags;
239	u8			port;
240	u8			alt_port;
241	u8			atomic_rd_en;
242	u8			resp_depth;
243	u8			state;
244	/* Raw Ethernet QP's SQ and RQ states */
245	u8			rq_state;
246	u8			sq_state;
247	int			mlx_type;
248	int			wq_sig;
249	int			scat_cqe;
250	int			max_inline_data;
251	struct mlx5_bf	       *bf;
252	int			has_rq;
253
254	/* only for user space QPs. For kernel
255	 * we have it from the bf object
256	 */
257	int			uuarn;
258
259	int			create_type;
260	u32			pa_lkey;
261
262	/* Store signature errors */
263	bool			signature_en;
264
265	struct list_head	qps_list;
266	struct list_head	cq_recv_list;
267	struct list_head	cq_send_list;
268
269	struct mlx5_ib_mc_flows_list mc_flows_list;
270};
271
272struct mlx5_ib_cq_buf {
273	struct mlx5_buf		buf;
274	struct ib_umem		*umem;
275	int			cqe_size;
276	int			nent;
277};
278
279enum mlx5_ib_qp_flags {
280	MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK     = 1 << 0,
281	MLX5_IB_QP_SIGNATURE_HANDLING           = 1 << 1,
282	MLX5_IB_QP_CAP_RX_END_PADDING		= 1 << 5,
283};
284
285struct mlx5_umr_wr {
286	union {
287		u64			virt_addr;
288		u64			offset;
289	} target;
290	struct ib_pd		       *pd;
291	unsigned int			page_shift;
292	unsigned int			npages;
293	u64				length;
294	int				access_flags;
295	u32				mkey;
296};
297
298struct mlx5_shared_mr_info {
299	int mr_id;
300	struct ib_umem		*umem;
301};
302
303struct mlx5_ib_cq {
304	struct ib_cq		ibcq;
305	struct mlx5_core_cq	mcq;
306	struct mlx5_ib_cq_buf	buf;
307	struct mlx5_db		db;
308
309	/* serialize access to the CQ
310	 */
311	spinlock_t		lock;
312
313	/* protect resize cq
314	 */
315	struct mutex		resize_mutex;
316	struct mlx5_ib_cq_buf  *resize_buf;
317	struct ib_umem	       *resize_umem;
318	int			cqe_size;
319	struct list_head		list_send_qp;
320	struct list_head		list_recv_qp;
321};
322
323struct mlx5_ib_srq {
324	struct ib_srq		ibsrq;
325	struct mlx5_core_srq	msrq;
326	struct mlx5_buf		buf;
327	struct mlx5_db		db;
328	u64		       *wrid;
329	/* protect SRQ hanlding
330	 */
331	spinlock_t		lock;
332	int			head;
333	int			tail;
334	u16			wqe_ctr;
335	struct ib_umem	       *umem;
336	/* serialize arming a SRQ
337	 */
338	struct mutex		mutex;
339	int			wq_sig;
340};
341
342struct mlx5_ib_xrcd {
343	struct ib_xrcd		ibxrcd;
344	u32			xrcdn;
345};
346
347enum mlx5_ib_mtt_access_flags {
348	MLX5_IB_MTT_READ  = (1 << 0),
349	MLX5_IB_MTT_WRITE = (1 << 1),
350};
351
352#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
353
354struct mlx5_ib_mr {
355	struct ib_mr		ibmr;
356	struct mlx5_core_mr	mmr;
357	struct ib_umem	       *umem;
358	struct mlx5_shared_mr_info	*smr_info;
359	struct list_head	list;
360	int			order;
361	int			umred;
362	dma_addr_t		dma;
363	int			npages;
364	struct mlx5_ib_dev     *dev;
365	struct mlx5_create_mkey_mbox_out out;
366	struct mlx5_core_sig_ctx    *sig;
367	u32			max_reg_descriptors;
368	u64			size;
369	u64			page_count;
370	struct mlx5_ib_mr     **children;
371	int			nchild;
372};
373
374struct mlx5_ib_fast_reg_page_list {
375	struct ib_fast_reg_page_list	ibfrpl;
376	__be64			       *mapped_page_list;
377	dma_addr_t			map;
378};
379
380struct mlx5_ib_umr_context {
381	enum ib_wc_status	status;
382	struct completion	done;
383};
384
385static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
386{
387	context->status = -1;
388	init_completion(&context->done);
389}
390
391struct umr_common {
392	struct ib_pd	*pd;
393	struct ib_mr	*mr;
394};
395
396enum {
397	MLX5_FMR_INVALID,
398	MLX5_FMR_VALID,
399	MLX5_FMR_BUSY,
400};
401
402struct mlx5_ib_fmr {
403	struct ib_fmr			ibfmr;
404	struct mlx5_core_mr		mr;
405	int				access_flags;
406	int				state;
407	/* protect fmr state
408	 */
409	spinlock_t			lock;
410	u64				wrid;
411	struct ib_send_wr		wr[2];
412	u8				page_shift;
413	struct ib_fast_reg_page_list	page_list;
414};
415
416struct cache_order {
417	struct kobject		kobj;
418	int			order;
419	int			index;
420	struct mlx5_ib_dev     *dev;
421};
422
423struct mlx5_cache_ent {
424	struct list_head	head;
425	/* sync access to the cahce entry
426	 */
427	spinlock_t		lock;
428
429
430	u32                     order;
431	u32			size;
432	u32                     cur;
433	u32                     miss;
434	u32			limit;
435
436	struct mlx5_ib_dev     *dev;
437	struct work_struct	work;
438	struct delayed_work	dwork;
439	int			pending;
440	struct cache_order	co;
441};
442
443struct mlx5_mr_cache {
444	struct workqueue_struct *wq;
445	struct mlx5_cache_ent	ent[MAX_MR_CACHE_ENTRIES];
446	int			stopped;
447	struct dentry		*root;
448	int		last_add;
449	int			rel_timeout;
450	int			rel_imm;
451};
452
453struct mlx5_ib_resources {
454	struct ib_cq	*c0;
455	struct ib_xrcd	*x0;
456	struct ib_xrcd	*x1;
457	struct ib_pd	*p0;
458	struct ib_srq	*s0;
459	struct ib_srq	*s1;
460};
461
462struct mlx5_dc_tracer {
463	struct page	*pg;
464	dma_addr_t	dma;
465	int		size;
466	int		order;
467};
468
469struct mlx5_dc_desc {
470	dma_addr_t	dma;
471	void		*buf;
472};
473
474enum mlx5_op {
475	MLX5_WR_OP_MLX	= 1,
476};
477
478struct mlx5_mlx_wr {
479	u8	sl;
480	u16	dlid;
481	int	icrc;
482};
483
484struct mlx5_send_wr {
485	struct ib_send_wr	wr;
486	union {
487		struct mlx5_mlx_wr	mlx;
488	} sel;
489};
490
491struct mlx5_dc_data {
492	struct ib_mr		*mr;
493	struct ib_qp		*dcqp;
494	struct ib_cq		*rcq;
495	struct ib_cq		*scq;
496	unsigned int		rx_npages;
497	unsigned int		tx_npages;
498	struct mlx5_dc_desc	*rxdesc;
499	struct mlx5_dc_desc	*txdesc;
500	unsigned int		max_wqes;
501	unsigned int		cur_send;
502	unsigned int		last_send_completed;
503	int			tx_pending;
504	struct mlx5_ib_dev	*dev;
505	int			port;
506	int			initialized;
507	struct kobject		kobj;
508	unsigned long		connects;
509	unsigned long		cnaks;
510	unsigned long		discards;
511	struct ib_wc		wc_tbl[MLX5_CNAK_RX_POLL_CQ_QUOTA];
512};
513
514struct mlx5_ib_port_sysfs_group {
515	struct kobject		kobj;
516	bool   enabled;
517	struct attribute_group	counters;
518};
519
520#define	MLX5_IB_GID_MAX	16
521
522struct mlx5_ib_port {
523	struct mlx5_ib_dev	*dev;
524	u8  port_num;	/* 0 based */
525	u8  port_gone;	/* set when gone */
526	u16 q_cnt_id;
527	struct mlx5_ib_port_sysfs_group group;
528	union ib_gid gid_table[MLX5_IB_GID_MAX];
529};
530
531struct mlx5_ib_dev {
532	struct ib_device		ib_dev;
533	struct mlx5_core_dev		*mdev;
534	MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
535	int				num_ports;
536	/* serialize update of capability mask
537	 */
538	struct mutex			cap_mask_mutex;
539	bool	ib_active;
540	struct umr_common		umrc;
541	/* sync used page count stats
542	 */
543	struct mlx5_ib_resources	devr;
544	struct mutex		slow_path_mutex;
545	int				enable_atomic_resp;
546	enum ib_atomic_cap		atomic_cap;
547	struct mlx5_mr_cache		cache;
548	struct kobject               mr_cache;
549	/* protect resources needed as part of reset flow */
550	spinlock_t		reset_flow_resource_lock;
551	struct list_head		qp_list;
552	struct timer_list		delay_timer;
553	int				fill_delay;
554	struct mlx5_dc_tracer	dctr;
555	struct mlx5_dc_data	dcd[MLX5_MAX_PORTS];
556	struct kobject		*dc_kobj;
557	/* Array with num_ports elements */
558	struct mlx5_ib_port	*port;
559	struct kobject		*ports_parent;
560};
561
562static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
563{
564	return container_of(mcq, struct mlx5_ib_cq, mcq);
565}
566
567static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
568{
569	return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
570}
571
572static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
573{
574	return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
575}
576
577static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
578{
579	return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr);
580}
581
582static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
583{
584	return container_of(ibcq, struct mlx5_ib_cq, ibcq);
585}
586
587static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
588{
589	return container_of(mqp, struct mlx5_ib_qp, mqp);
590}
591
592static inline struct mlx5_ib_qp *sq_to_mibqp(struct mlx5_core_qp *msq)
593{
594	return container_of(msq, struct mlx5_ib_qp, msq);
595}
596
597static inline struct mlx5_ib_qp *rq_to_mibqp(struct mlx5_core_qp *mrq)
598{
599	return container_of(mrq, struct mlx5_ib_qp, mrq);
600}
601
602static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
603{
604	return container_of(mmr, struct mlx5_ib_mr, mmr);
605}
606
607static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
608{
609	return container_of(ibpd, struct mlx5_ib_pd, ibpd);
610}
611
612static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
613{
614	return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
615}
616
617static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
618{
619	return container_of(ibqp, struct mlx5_ib_qp, ibqp);
620}
621
622static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
623{
624	return container_of(msrq, struct mlx5_ib_srq, msrq);
625}
626
627static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
628{
629	return container_of(ibmr, struct mlx5_ib_mr, ibmr);
630}
631
632static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
633{
634	return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
635}
636
637struct mlx5_ib_ah {
638	struct ib_ah		ibah;
639	struct mlx5_av		av;
640};
641
642static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
643{
644	return container_of(ibah, struct mlx5_ib_ah, ibah);
645}
646
647int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, uintptr_t virt,
648			struct mlx5_db *db);
649void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
650void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
651void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
652void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
653int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
654		 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
655		 void *in_mad, void *response_mad);
656int mlx5_ib_resolve_grh(const struct ib_ah_attr *ah_attr, u8 *mac, int *is_mcast);
657struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev, struct ib_ah_attr *ah_attr,
658			   struct mlx5_ib_ah *ah, enum rdma_link_layer ll);
659struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
660int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
661int mlx5_ib_destroy_ah(struct ib_ah *ah);
662struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
663				  struct ib_srq_init_attr *init_attr,
664				  struct ib_udata *udata);
665int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
666		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
667int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
668int mlx5_ib_destroy_srq(struct ib_srq *srq);
669int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
670			  struct ib_recv_wr **bad_wr);
671struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
672				struct ib_qp_init_attr *init_attr,
673				struct ib_udata *udata);
674int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
675		      int attr_mask, struct ib_udata *udata);
676int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
677		     struct ib_qp_init_attr *qp_init_attr);
678int mlx5_ib_destroy_qp(struct ib_qp *qp);
679int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
680		      struct ib_send_wr **bad_wr);
681int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
682		      struct ib_recv_wr **bad_wr);
683void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
684struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
685				int entries, int vector,
686				struct ib_ucontext *context,
687				struct ib_udata *udata);
688int mlx5_ib_destroy_cq(struct ib_cq *cq);
689int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
690int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
691int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
692int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
693struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
694struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
695				  u64 virt_addr, int access_flags,
696				  struct ib_udata *udata, int mr_id);
697struct ib_mr *mlx5_ib_reg_phys_mr(struct ib_pd *pd,
698				  struct ib_phys_buf *buffer_list,
699				  int num_phys_buf,
700				  int access_flags,
701				  u64 *virt_addr);
702int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
703int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
704struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
705					int max_page_list_len);
706struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
707							       int page_list_len);
708void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
709
710struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc,
711				 struct ib_fmr_attr *fmr_attr);
712int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
713		      int npages, u64 iova);
714int mlx5_ib_unmap_fmr(struct list_head *fmr_list);
715int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr);
716int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
717			struct ib_wc *in_wc, struct ib_grh *in_grh,
718			struct ib_mad *in_mad, struct ib_mad *out_mad);
719struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
720					  struct ib_ucontext *context,
721					  struct ib_udata *udata);
722int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
723int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
724int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
725int mlx5_query_smp_attr_node_info_mad_ifc(struct ib_device *ibdev,
726					  struct ib_smp *out_mad);
727int mlx5_query_system_image_guid_mad_ifc(struct ib_device *ibdev,
728					 __be64 *sys_image_guid);
729int mlx5_query_max_pkeys_mad_ifc(struct ib_device *ibdev,
730				 u16 *max_pkeys);
731int mlx5_query_vendor_id_mad_ifc(struct ib_device *ibdev,
732				 u32 *vendor_id);
733int mlx5_query_pkey_mad_ifc(struct ib_device *ibdev, u8 port, u16 index,
734			    u16 *pkey);
735int mlx5_query_node_desc_mad_ifc(struct mlx5_ib_dev *dev, char *node_desc);
736int mlx5_query_node_guid_mad_ifc(struct mlx5_ib_dev *dev, u64 *node_guid);
737int mlx5_query_gids_mad_ifc(struct ib_device *ibdev, u8 port, int index,
738			    union ib_gid *gid);
739int mlx5_query_port_mad_ifc(struct ib_device *ibdev, u8 port,
740			    struct ib_port_attr *props);
741int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
742		       struct ib_port_attr *props);
743int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
744void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
745void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
746			int *ncont, int *order);
747void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
748			  int page_shift, __be64 *pas, int umr);
749void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
750int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
751int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
752int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
753int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
754void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
755int mlx5_query_port_roce(struct ib_device *ibdev, u8 port,
756			 struct ib_port_attr *props);
757__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port, int index,
758			       __be16 ah_udp_s_port);
759int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port,
760			   int index, int *gid_type);
761struct net_device *mlx5_ib_get_netdev(struct ib_device *ib_dev, u8 port);
762int modify_gid_roce(struct ib_device *ib_dev, u8 port, unsigned int index,
763		    const union ib_gid *gid, struct net_device *ndev);
764int query_gid_roce(struct ib_device *ib_dev, u8 port, int index,
765		   union ib_gid *gid);
766int mlx5_process_mad_mad_ifc(struct ib_device *ibdev, int mad_flags,
767			     u8 port_num, struct ib_wc *in_wc,
768			     struct ib_grh *in_grh, struct ib_mad *in_mad,
769			     struct ib_mad *out_mad);
770
771static inline void init_query_mad(struct ib_smp *mad)
772{
773	mad->base_version  = 1;
774	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
775	mad->class_version = 1;
776	mad->method	   = IB_MGMT_METHOD_GET;
777}
778
779static inline u8 convert_access(int acc)
780{
781	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
782	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
783	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
784	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
785	       MLX5_PERM_LOCAL_READ;
786}
787
788#define MLX5_MAX_UMR_SHIFT 16
789#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
790
791#endif /* MLX5_IB_H */
792