ib_verbs_compat.h revision 331769
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018, Mellanox Technologies, Ltd.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/11/sys/ofed/include/rdma/ib_verbs_compat.h 331769 2018-03-30 18:06:29Z hselasky $
28 */
29
30#if !defined(IB_VERBS_COMPAT_H)
31#define	IB_VERBS_COMPAT_H
32
33#include <rdma/ib_verbs.h>
34
35enum ib_device_attr_comp_mask {
36	IB_DEVICE_ATTR_WITH_TIMESTAMP_MASK = 1ULL << 1,
37	IB_DEVICE_ATTR_WITH_HCA_CORE_CLOCK = 1ULL << 2
38};
39
40struct ib_protocol_stats {
41	/* TBD... */
42};
43
44struct iw_protocol_stats {
45	u64	ipInReceives;
46	u64	ipInHdrErrors;
47	u64	ipInTooBigErrors;
48	u64	ipInNoRoutes;
49	u64	ipInAddrErrors;
50	u64	ipInUnknownProtos;
51	u64	ipInTruncatedPkts;
52	u64	ipInDiscards;
53	u64	ipInDelivers;
54	u64	ipOutForwDatagrams;
55	u64	ipOutRequests;
56	u64	ipOutDiscards;
57	u64	ipOutNoRoutes;
58	u64	ipReasmTimeout;
59	u64	ipReasmReqds;
60	u64	ipReasmOKs;
61	u64	ipReasmFails;
62	u64	ipFragOKs;
63	u64	ipFragFails;
64	u64	ipFragCreates;
65	u64	ipInMcastPkts;
66	u64	ipOutMcastPkts;
67	u64	ipInBcastPkts;
68	u64	ipOutBcastPkts;
69
70	u64	tcpRtoAlgorithm;
71	u64	tcpRtoMin;
72	u64	tcpRtoMax;
73	u64	tcpMaxConn;
74	u64	tcpActiveOpens;
75	u64	tcpPassiveOpens;
76	u64	tcpAttemptFails;
77	u64	tcpEstabResets;
78	u64	tcpCurrEstab;
79	u64	tcpInSegs;
80	u64	tcpOutSegs;
81	u64	tcpRetransSegs;
82	u64	tcpInErrs;
83	u64	tcpOutRsts;
84};
85
86union rdma_protocol_stats {
87	struct ib_protocol_stats ib;
88	struct iw_protocol_stats iw;
89};
90
91enum ib_mr_create_flags {
92	IB_MR_SIGNATURE_EN = 1,
93};
94
95struct ib_mr_init_attr {
96	int	max_reg_descriptors;
97	u32	flags;
98};
99
100enum ib_qpg_type {
101	IB_QPG_NONE = 0,
102	IB_QPG_PARENT = (1 << 0),
103	IB_QPG_CHILD_RX = (1 << 1),
104	IB_QPG_CHILD_TX = (1 << 2)
105};
106
107struct ib_qpg_init_attrib {
108	u32	tss_child_count;
109	u32	rss_child_count;
110};
111
112enum {
113	IB_DCT_CREATE_FLAG_RCV_INLINE = 1 << 0,
114	IB_DCT_CREATE_FLAGS_MASK = IB_DCT_CREATE_FLAG_RCV_INLINE,
115};
116
117struct ib_dct_init_attr {
118	struct ib_pd *pd;
119	struct ib_cq *cq;
120	struct ib_srq *srq;
121	u64	dc_key;
122	u8	port;
123	u32	access_flags;
124	u8	min_rnr_timer;
125	u8	tclass;
126	u32	flow_label;
127	enum ib_mtu mtu;
128	u8	pkey_index;
129	u8	gid_index;
130	u8	hop_limit;
131	u32	create_flags;
132};
133
134struct ib_dct_attr {
135	u64	dc_key;
136	u8	port;
137	u32	access_flags;
138	u8	min_rnr_timer;
139	u8	tclass;
140	u32	flow_label;
141	enum ib_mtu mtu;
142	u8	pkey_index;
143	u8	gid_index;
144	u8	hop_limit;
145	u32	key_violations;
146	u8	state;
147};
148
149struct ib_fast_reg_page_list {
150	struct ib_device *device;
151	u64    *page_list;
152	unsigned int max_page_list_len;
153};
154
155struct ib_mw_bind_info {
156	struct ib_mr *mr;
157	u64	addr;
158	u64	length;
159	int	mw_access_flags;
160};
161
162struct ib_mr_attr {
163	struct ib_pd *pd;
164	u64	device_virt_addr;
165	u64	size;
166	int	mr_access_flags;
167	u32	lkey;
168	u32	rkey;
169};
170
171struct ib_mw_bind {
172	u64	wr_id;
173	int	send_flags;
174	struct ib_mw_bind_info bind_info;
175};
176
177enum ib_cq_attr_mask {
178	IB_CQ_MODERATION = (1 << 0),
179	IB_CQ_CAP_FLAGS = (1 << 1)
180};
181
182enum ib_cq_cap_flags {
183	IB_CQ_IGNORE_OVERRUN = (1 << 0)
184};
185
186struct ib_cq_attr {
187	struct {
188		u16	cq_count;
189		u16	cq_period;
190	}	moderation;
191	u32	cq_cap_flags;
192};
193
194struct ib_dct {
195	struct ib_device *device;
196	struct ib_uobject *uobject;
197	struct ib_pd *pd;
198	struct ib_cq *cq;
199	struct ib_srq *srq;
200	u32	dct_num;
201};
202
203enum verbs_values_mask {
204	IBV_VALUES_HW_CLOCK = 1 << 0
205};
206
207struct ib_device_values {
208	int	values_mask;
209	uint64_t hwclock;
210};
211
212#define	IB_WR_FAST_REG_MR -2		/* not implemented */
213
214struct ib_send_wr_compat {
215	union {
216		/*
217		 * NOTE: The following structure must be kept in sync
218		 * with "struct ib_send_wr":
219		 */
220		struct {
221			struct ib_send_wr_compat *next;
222			union {
223				u64	wr_id;
224				struct ib_cqe *wr_cqe;
225			};
226			struct ib_sge *sg_list;
227			int	num_sge;
228			enum ib_wr_opcode opcode;
229			int	send_flags;
230			union {
231				__be32	imm_data;
232				u32	invalidate_rkey;
233			}	ex;
234		};
235		union {
236			struct ib_rdma_wr rdma;
237			struct ib_atomic_wr atomic;
238			struct ib_ud_wr ud;
239			struct ib_sig_handover_wr sig_handover;
240			struct {
241				struct ib_send_wr wr;
242				u64	iova_start;
243				struct ib_fast_reg_page_list *page_list;
244				unsigned int page_shift;
245				unsigned int page_list_len;
246				u32	length;
247				int	access_flags;
248				u32	rkey;
249			}	fast_reg;
250			struct {
251				struct ib_send_wr wr;
252				int	npages;
253				int	access_flags;
254				u32	mkey;
255				struct ib_pd *pd;
256				u64	virt_addr;
257				u64	length;
258				int	page_shift;
259			}	umr;
260			struct {
261				struct ib_send_wr wr;
262				struct ib_mw *mw;
263				/* The new rkey for the memory window. */
264				u32	rkey;
265				struct ib_mw_bind_info bind_info;
266			}	bind_mw;
267		}	wr;
268	};
269	u32	xrc_remote_srq_num;	/* XRC TGT QPs only */
270};
271
272static inline int
273ib_post_send_compat(struct ib_qp *qp,
274    struct ib_send_wr_compat *send_wr,
275    struct ib_send_wr_compat **bad_send_wr)
276{
277	return (ib_post_send(qp, (struct ib_send_wr *)send_wr,
278	    (struct ib_send_wr **)bad_send_wr));
279}
280
281#undef ib_post_send
282#define	ib_post_send(...) \
283	ib_post_send_compat(__VA_ARGS__)
284
285#define	ib_send_wr \
286	ib_send_wr_compat
287
288static inline int
289ib_query_device_compat(struct ib_device *device,
290    struct ib_device_attr *device_attr)
291{
292	*device_attr = device->attrs;
293	return (0);
294}
295
296#undef ib_query_device
297#define	ib_query_device(...) \
298	ib_query_device_compat(__VA_ARGS__)
299
300static inline int
301ib_query_gid_compat(struct ib_device *device,
302    u8 port_num, int index, union ib_gid *gid)
303{
304	return (ib_query_gid(device, port_num, index, gid, NULL));
305}
306
307#undef ib_query_gid
308#define	ib_query_gid(...) \
309	ib_query_gid_compat(__VA_ARGS__)
310
311static inline int
312ib_find_gid_compat(struct ib_device *device, union ib_gid *gid,
313    u8 * port_num, u16 * index)
314{
315	return (ib_find_gid(device, gid, IB_GID_TYPE_IB, NULL, port_num, index));
316}
317
318#undef ib_find_gid
319#define	ib_find_gid(...) \
320	ib_find_gid_compat(__VA_ARGS__)
321
322static inline struct ib_pd *
323ib_alloc_pd_compat(struct ib_device *device)
324{
325	return (ib_alloc_pd(device, 0));
326}
327
328#undef ib_alloc_pd
329#define	ib_alloc_pd(...) \
330	ib_alloc_pd_compat(__VA_ARGS__)
331
332static inline struct ib_cq *
333ib_create_cq_compat(struct ib_device *device,
334    ib_comp_handler comp_handler,
335    void (*event_handler) (struct ib_event *, void *),
336    void *cq_context, int cqe, int comp_vector)
337{
338	const struct ib_cq_init_attr cq_attr = {.cqe = cqe,.comp_vector = comp_vector};
339
340	return (ib_create_cq(device, comp_handler, event_handler, cq_context, &cq_attr));
341}
342
343#undef ib_create_cq
344#define	ib_create_cq(...) \
345	ib_create_cq_compat(__VA_ARGS__)
346
347static inline int
348ib_modify_cq_compat(struct ib_cq *cq,
349    struct ib_cq_attr *cq_attr,
350    int cq_attr_mask)
351{
352	if (cq_attr_mask & IB_CQ_MODERATION) {
353		return (ib_modify_cq(cq, cq_attr->moderation.cq_count,
354		    cq_attr->moderation.cq_period));
355	} else {
356		return (0);
357	}
358}
359
360#undef ib_modify_cq
361#define	ib_modify_cq(...) \
362	ib_modify_cq_compat(__VA_ARGS__)
363
364static inline struct ib_mr *
365ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
366{
367	struct ib_mr *mr;
368	int err;
369
370	err = ib_check_mr_access(mr_access_flags);
371	if (err)
372		return ERR_PTR(err);
373
374	if (!pd->device->get_dma_mr)
375		return ERR_PTR(-ENOSYS);
376
377	mr = pd->device->get_dma_mr(pd, mr_access_flags);
378	if (IS_ERR(mr))
379		return ERR_CAST(mr);
380
381	mr->device = pd->device;
382	mr->pd = pd;
383	mr->uobject = NULL;
384	mr->need_inval = false;
385	atomic_inc(&pd->usecnt);
386
387	return (mr);
388}
389
390static inline struct ib_mr *
391ib_reg_phys_mr(struct ib_pd *pd,
392    struct ib_phys_buf *phys_buf_array,
393    int num_phys_buf,
394    int mr_access_flags,
395    u64 * iova_start)
396{
397	struct ib_mr *mr;
398	int err;
399
400	err = ib_check_mr_access(mr_access_flags);
401	if (err)
402		return ERR_PTR(err);
403
404	if (!pd->device->reg_phys_mr)
405		return ERR_PTR(-ENOSYS);
406
407	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
408				     mr_access_flags, iova_start);
409	if (IS_ERR(mr))
410		return ERR_CAST(mr);
411
412	mr->device = pd->device;
413	mr->pd = pd;
414	mr->uobject = NULL;
415	atomic_inc(&pd->usecnt);
416
417	return (mr);
418}
419
420static inline int
421ib_rereg_phys_mr(struct ib_mr *mr,
422    int mr_rereg_mask,
423    struct ib_pd *pd,
424    struct ib_phys_buf *phys_buf_array,
425    int num_phys_buf,
426    int mr_access_flags,
427    u64 * iova_start)
428{
429	return (-EOPNOTSUPP);
430}
431
432static inline int
433ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
434{
435	return (-EOPNOTSUPP);
436}
437
438static inline struct ib_mr *
439ib_create_mr(struct ib_pd *pd,
440    struct ib_mr_init_attr *mr_init_attr)
441{
442	return (ERR_PTR(-ENOSYS));
443}
444
445static inline int
446ib_destroy_mr(struct ib_mr *mr)
447{
448	return (-EOPNOTSUPP);
449}
450
451static inline struct ib_mr *
452ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
453{
454	return (ERR_PTR(-ENOSYS));
455}
456
457static inline struct ib_fast_reg_page_list *
458ib_alloc_fast_reg_page_list(struct ib_device *device, int page_list_len)
459{
460	return (ERR_PTR(-ENOSYS));
461}
462
463static inline void
464ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
465{
466
467}
468
469static inline struct ib_mw *
470ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
471{
472	struct ib_mw *mw;
473
474	if (!pd->device->alloc_mw)
475		return ERR_PTR(-ENOSYS);
476
477	mw = pd->device->alloc_mw(pd, type, NULL);
478	if (!IS_ERR(mw)) {
479		mw->device = pd->device;
480		mw->pd = pd;
481		mw->uobject = NULL;
482		mw->type = type;
483		atomic_inc(&pd->usecnt);
484	}
485	return (mw);
486}
487
488static inline int
489ib_bind_mw(struct ib_qp *qp,
490    struct ib_mw *mw,
491    struct ib_mw_bind *mw_bind)
492{
493	return (-EOPNOTSUPP);
494}
495
496static inline int
497ib_dealloc_mw(struct ib_mw *mw)
498{
499	struct ib_pd *pd;
500	int ret;
501
502	pd = mw->pd;
503	ret = mw->device->dealloc_mw(mw);
504	if (!ret)
505		atomic_dec(&pd->usecnt);
506	return (ret);
507}
508
509static inline struct ib_dct *
510ib_create_dct(struct ib_pd *pd, struct ib_dct_init_attr *attr,
511    struct ib_udata *udata)
512{
513	return (ERR_PTR(-ENOSYS));
514}
515
516static inline int
517ib_destroy_dct(struct ib_dct *dct)
518{
519	return (-EOPNOTSUPP);
520}
521
522static inline int
523ib_query_dct(struct ib_dct *dct, struct ib_dct_attr *attr)
524{
525	return (-EOPNOTSUPP);
526}
527
528static inline int
529ib_query_values(struct ib_device *device,
530    int q_values, struct ib_device_values *values)
531{
532	return (-EOPNOTSUPP);
533}
534
535static inline void
536ib_active_speed_enum_to_rate(u8 active_speed,
537    int *rate,
538    char **speed)
539{
540	switch (active_speed) {
541	case IB_SPEED_DDR:
542		*speed = " DDR";
543		*rate = 50;
544		break;
545	case IB_SPEED_QDR:
546		*speed = " QDR";
547		*rate = 100;
548		break;
549	case IB_SPEED_FDR10:
550		*speed = " FDR10";
551		*rate = 100;
552		break;
553	case IB_SPEED_FDR:
554		*speed = " FDR";
555		*rate = 140;
556		break;
557	case IB_SPEED_EDR:
558		*speed = " EDR";
559		*rate = 250;
560		break;
561	case IB_SPEED_SDR:
562	default:			/* default to SDR for invalid rates */
563		*rate = 25;
564		break;
565	}
566}
567
568#include <rdma/rdma_cm.h>
569
570static inline struct rdma_cm_id *
571rdma_create_id_compat(rdma_cm_event_handler event_handler,
572    void *context, enum rdma_port_space ps,
573    enum ib_qp_type qp_type)
574{
575	return (rdma_create_id(&init_net, event_handler, context, ps, qp_type));
576}
577
578#undef rdma_create_id
579#define	rdma_create_id(...) \
580	rdma_create_id_compat(__VA_ARGS__)
581
582#endif					/* IB_VERBS_COMPAT_H */
583