1/*-
2 * Copyright (c) 2018-2020, Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#include "opt_rss.h"
27#include "opt_ratelimit.h"
28
29#include <rdma/ib_user_verbs.h>
30#include <rdma/ib_verbs.h>
31#include <rdma/uverbs_types.h>
32#include <rdma/uverbs_ioctl.h>
33#include <rdma/mlx5_user_ioctl_cmds.h>
34#include <rdma/mlx5_user_ioctl_verbs.h>
35#include <rdma/ib_umem.h>
36#include <rdma/uverbs_std_types.h>
37#include <dev/mlx5/driver.h>
38#include <dev/mlx5/fs.h>
39#include <dev/mlx5/mlx5_ib/mlx5_ib.h>
40
41#include <sys/priv.h>
42
43#include <linux/xarray.h>
44#include <linux/rculist.h>
45#include <linux/srcu.h>
46#include <linux/file.h>
47#include <linux/poll.h>
48#include <linux/wait.h>
49
50#define UVERBS_MODULE_NAME mlx5_ib
51#include <rdma/uverbs_named_ioctl.h>
52
53static void dispatch_event_fd(struct list_head *fd_list, const void *data);
54
55enum devx_obj_flags {
56	DEVX_OBJ_FLAGS_DCT = 1 << 1,
57	DEVX_OBJ_FLAGS_CQ = 1 << 2,
58};
59
60struct devx_async_data {
61	struct mlx5_ib_dev *mdev;
62	struct list_head list;
63	struct devx_async_cmd_event_file *ev_file;
64	struct mlx5_async_work cb_work;
65	u16 cmd_out_len;
66	/* must be last field in this structure */
67	struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
68};
69
70struct devx_async_event_data {
71	struct list_head list; /* headed in ev_file->event_list */
72	struct mlx5_ib_uapi_devx_async_event_hdr hdr;
73};
74
75/* first level XA value data structure */
76struct devx_event {
77	struct xarray object_ids; /* second XA level, Key = object id */
78	struct list_head unaffiliated_list;
79};
80
81/* second level XA value data structure */
82struct devx_obj_event {
83	struct rcu_head rcu;
84	struct list_head obj_sub_list;
85};
86
87struct devx_event_subscription {
88	struct list_head file_list; /* headed in ev_file->
89				     * subscribed_events_list
90				     */
91	struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
92				   * devx_obj_event->obj_sub_list
93				   */
94	struct list_head obj_list; /* headed in devx_object */
95	struct list_head event_list; /* headed in ev_file->event_list or in
96				      * temp list via subscription
97				      */
98
99	u8 is_cleaned:1;
100	u32 xa_key_level1;
101	u32 xa_key_level2;
102	struct rcu_head	rcu;
103	u64 cookie;
104	struct devx_async_event_file *ev_file;
105	struct fd eventfd;
106};
107
108struct devx_async_event_file {
109	struct ib_uobject uobj;
110	/* Head of events that are subscribed to this FD */
111	struct list_head subscribed_events_list;
112	spinlock_t lock;
113	wait_queue_head_t poll_wait;
114	struct list_head event_list;
115	struct mlx5_ib_dev *dev;
116	u8 omit_data:1;
117	u8 is_overflow_err:1;
118	u8 is_destroyed:1;
119};
120
121#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
122struct devx_obj {
123	struct mlx5_ib_dev	*ib_dev;
124	u64			obj_id;
125	u32			dinlen; /* destroy inbox length */
126	u32			dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
127	u32			flags;
128	union {
129		struct mlx5_ib_devx_mr	devx_mr;
130		struct mlx5_core_dct	core_dct;
131		struct mlx5_core_cq	core_cq;
132		u32			flow_counter_bulk_size;
133	};
134	struct list_head event_sub; /* holds devx_event_subscription entries */
135};
136
137struct devx_umem {
138	struct mlx5_core_dev		*mdev;
139	struct ib_umem			*umem;
140	u32				page_offset;
141	int				page_shift;
142	int				ncont;
143	u32				dinlen;
144	u32				dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
145};
146
147struct devx_umem_reg_cmd {
148	void				*in;
149	u32				inlen;
150	u32				out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
151};
152
153static struct mlx5_ib_ucontext *
154devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
155{
156	return to_mucontext(ib_uverbs_get_ucontext(attrs));
157}
158
159int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
160{
161	u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
162	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
163	void *uctx;
164	int err;
165	u16 uid;
166	u32 cap = 0;
167
168	/* 0 means not supported */
169	if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
170		return -EINVAL;
171
172	uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
173	if (is_user && priv_check(curthread, PRIV_NET_RAW) == 0 &&
174	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
175		cap |= MLX5_UCTX_CAP_RAW_TX;
176	if (is_user && priv_check(curthread, PRIV_DRIVER) == 0 &&
177	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
178	     MLX5_UCTX_CAP_INTERNAL_DEV_RES))
179		cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
180
181	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
182	MLX5_SET(uctx, uctx, cap, cap);
183
184	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
185	if (err)
186		return err;
187
188	uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
189	return uid;
190}
191
192void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
193{
194	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
195	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
196
197	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
198	MLX5_SET(destroy_uctx_in, in, uid, uid);
199
200	mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
201}
202
203bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
204{
205	struct devx_obj *devx_obj = obj;
206	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
207
208	switch (opcode) {
209	case MLX5_CMD_OP_DESTROY_TIR:
210		*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
211		*dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
212				    obj_id);
213		return true;
214
215	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
216		*dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
217		*dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
218				    table_id);
219		return true;
220	default:
221		return false;
222	}
223}
224
225bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
226{
227	struct devx_obj *devx_obj = obj;
228	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
229
230	if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
231
232		if (offset && offset >= devx_obj->flow_counter_bulk_size)
233			return false;
234
235		*counter_id = MLX5_GET(dealloc_flow_counter_in,
236				       devx_obj->dinbox,
237				       flow_counter_id);
238		*counter_id += offset;
239		return true;
240	}
241
242	return false;
243}
244
245static bool is_legacy_unaffiliated_event_num(u16 event_num)
246{
247	switch (event_num) {
248	case MLX5_EVENT_TYPE_PORT_CHANGE:
249		return true;
250	default:
251		return false;
252	}
253}
254
255static bool is_legacy_obj_event_num(u16 event_num)
256{
257	switch (event_num) {
258	case MLX5_EVENT_TYPE_PATH_MIG:
259	case MLX5_EVENT_TYPE_COMM_EST:
260	case MLX5_EVENT_TYPE_SQ_DRAINED:
261	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
262	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
263	case MLX5_EVENT_TYPE_CQ_ERROR:
264	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
265	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
266	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
267	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
268	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
269	case MLX5_EVENT_TYPE_DCT_DRAINED:
270	case MLX5_EVENT_TYPE_COMP:
271	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
272	case MLX5_EVENT_TYPE_XRQ_ERROR:
273		return true;
274	default:
275		return false;
276	}
277}
278
279static u16 get_legacy_obj_type(u16 opcode)
280{
281	switch (opcode) {
282	case MLX5_CMD_OP_CREATE_RQ:
283		return MLX5_EVENT_QUEUE_TYPE_RQ;
284	case MLX5_CMD_OP_CREATE_QP:
285		return MLX5_EVENT_QUEUE_TYPE_QP;
286	case MLX5_CMD_OP_CREATE_SQ:
287		return MLX5_EVENT_QUEUE_TYPE_SQ;
288	case MLX5_CMD_OP_CREATE_DCT:
289		return MLX5_EVENT_QUEUE_TYPE_DCT;
290	default:
291		return 0;
292	}
293}
294
295static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
296{
297	u16 opcode;
298
299	opcode = (obj->obj_id >> 32) & 0xffff;
300
301	if (is_legacy_obj_event_num(event_num))
302		return get_legacy_obj_type(opcode);
303
304	switch (opcode) {
305	case MLX5_CMD_OP_CREATE_GENERAL_OBJ:
306		return (obj->obj_id >> 48);
307	case MLX5_CMD_OP_CREATE_RQ:
308		return MLX5_OBJ_TYPE_RQ;
309	case MLX5_CMD_OP_CREATE_QP:
310		return MLX5_OBJ_TYPE_QP;
311	case MLX5_CMD_OP_CREATE_SQ:
312		return MLX5_OBJ_TYPE_SQ;
313	case MLX5_CMD_OP_CREATE_DCT:
314		return MLX5_OBJ_TYPE_DCT;
315	case MLX5_CMD_OP_CREATE_TIR:
316		return MLX5_OBJ_TYPE_TIR;
317	case MLX5_CMD_OP_CREATE_TIS:
318		return MLX5_OBJ_TYPE_TIS;
319	case MLX5_CMD_OP_CREATE_PSV:
320		return MLX5_OBJ_TYPE_PSV;
321	case MLX5_OBJ_TYPE_MKEY:
322		return MLX5_OBJ_TYPE_MKEY;
323	case MLX5_CMD_OP_CREATE_RMP:
324		return MLX5_OBJ_TYPE_RMP;
325	case MLX5_CMD_OP_CREATE_XRC_SRQ:
326		return MLX5_OBJ_TYPE_XRC_SRQ;
327	case MLX5_CMD_OP_CREATE_XRQ:
328		return MLX5_OBJ_TYPE_XRQ;
329	case MLX5_CMD_OP_CREATE_RQT:
330		return MLX5_OBJ_TYPE_RQT;
331	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
332		return MLX5_OBJ_TYPE_FLOW_COUNTER;
333	case MLX5_CMD_OP_CREATE_CQ:
334		return MLX5_OBJ_TYPE_CQ;
335	default:
336		return 0;
337	}
338}
339
340static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
341{
342	switch (event_type) {
343	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
344	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
345	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
346	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
347	case MLX5_EVENT_TYPE_PATH_MIG:
348	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
349	case MLX5_EVENT_TYPE_COMM_EST:
350	case MLX5_EVENT_TYPE_SQ_DRAINED:
351	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
352	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
353		return eqe->data.qp_srq.type;
354	case MLX5_EVENT_TYPE_CQ_ERROR:
355	case MLX5_EVENT_TYPE_XRQ_ERROR:
356		return 0;
357	case MLX5_EVENT_TYPE_DCT_DRAINED:
358	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
359		return MLX5_EVENT_QUEUE_TYPE_DCT;
360	default:
361		return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
362	}
363}
364
365static u32 get_dec_obj_id(u64 obj_id)
366{
367	return (obj_id & 0xffffffff);
368}
369
370/*
371 * As the obj_id in the firmware is not globally unique the object type
372 * must be considered upon checking for a valid object id.
373 * For that the opcode of the creator command is encoded as part of the obj_id.
374 */
375static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
376{
377	return ((u64)opcode << 32) | obj_id;
378}
379
380static u64 devx_get_obj_id(const void *in)
381{
382	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
383	u64 obj_id;
384
385	switch (opcode) {
386	case MLX5_CMD_OP_MODIFY_GENERAL_OBJ:
387	case MLX5_CMD_OP_QUERY_GENERAL_OBJ:
388		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJ |
389					MLX5_GET(general_obj_in_cmd_hdr, in,
390						 obj_type) << 16,
391					MLX5_GET(general_obj_in_cmd_hdr, in,
392						 obj_id));
393		break;
394	case MLX5_CMD_OP_QUERY_MKEY:
395		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
396					MLX5_GET(query_mkey_in, in,
397						 mkey_index));
398		break;
399	case MLX5_CMD_OP_QUERY_CQ:
400		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
401					MLX5_GET(query_cq_in, in, cqn));
402		break;
403	case MLX5_CMD_OP_MODIFY_CQ:
404		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
405					MLX5_GET(modify_cq_in, in, cqn));
406		break;
407	case MLX5_CMD_OP_QUERY_SQ:
408		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
409					MLX5_GET(query_sq_in, in, sqn));
410		break;
411	case MLX5_CMD_OP_MODIFY_SQ:
412		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
413					MLX5_GET(modify_sq_in, in, sqn));
414		break;
415	case MLX5_CMD_OP_QUERY_RQ:
416		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
417					MLX5_GET(query_rq_in, in, rqn));
418		break;
419	case MLX5_CMD_OP_MODIFY_RQ:
420		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
421					MLX5_GET(modify_rq_in, in, rqn));
422		break;
423	case MLX5_CMD_OP_QUERY_RMP:
424		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
425					MLX5_GET(query_rmp_in, in, rmpn));
426		break;
427	case MLX5_CMD_OP_MODIFY_RMP:
428		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
429					MLX5_GET(modify_rmp_in, in, rmpn));
430		break;
431	case MLX5_CMD_OP_QUERY_RQT:
432		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
433					MLX5_GET(query_rqt_in, in, rqtn));
434		break;
435	case MLX5_CMD_OP_MODIFY_RQT:
436		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
437					MLX5_GET(modify_rqt_in, in, rqtn));
438		break;
439	case MLX5_CMD_OP_QUERY_TIR:
440		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
441					MLX5_GET(query_tir_in, in, tirn));
442		break;
443	case MLX5_CMD_OP_MODIFY_TIR:
444		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
445					MLX5_GET(modify_tir_in, in, tirn));
446		break;
447	case MLX5_CMD_OP_QUERY_TIS:
448		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
449					MLX5_GET(query_tis_in, in, tisn));
450		break;
451	case MLX5_CMD_OP_MODIFY_TIS:
452		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
453					MLX5_GET(modify_tis_in, in, tisn));
454		break;
455	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
456		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
457					MLX5_GET(query_flow_table_in, in,
458						 table_id));
459		break;
460	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
461		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
462					MLX5_GET(modify_flow_table_in, in,
463						 table_id));
464		break;
465	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
466		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
467					MLX5_GET(query_flow_group_in, in,
468						 group_id));
469		break;
470	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
471		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
472					MLX5_GET(query_fte_in, in,
473						 flow_index));
474		break;
475	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
476		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
477					MLX5_GET(set_fte_in, in, flow_index));
478		break;
479	case MLX5_CMD_OP_QUERY_Q_COUNTER:
480		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
481					MLX5_GET(query_q_counter_in, in,
482						 counter_set_id));
483		break;
484	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
485		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
486					MLX5_GET(query_flow_counter_in, in,
487						 flow_counter_id));
488		break;
489	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
490		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
491					MLX5_GET(general_obj_in_cmd_hdr, in,
492						 obj_id));
493		break;
494	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
495		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
496					MLX5_GET(query_scheduling_element_in,
497						 in, scheduling_element_id));
498		break;
499	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
500		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
501					MLX5_GET(modify_scheduling_element_in,
502						 in, scheduling_element_id));
503		break;
504	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
505		obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
506					MLX5_GET(add_vxlan_udp_dport_in, in,
507						 vxlan_udp_port));
508		break;
509	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
510		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
511					MLX5_GET(query_l2_table_entry_in, in,
512						 table_index));
513		break;
514	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
515		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
516					MLX5_GET(set_l2_table_entry_in, in,
517						 table_index));
518		break;
519	case MLX5_CMD_OP_QUERY_QP:
520		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
521					MLX5_GET(query_qp_in, in, qpn));
522		break;
523	case MLX5_CMD_OP_RST2INIT_QP:
524		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
525					MLX5_GET(rst2init_qp_in, in, qpn));
526		break;
527	case MLX5_CMD_OP_INIT2RTR_QP:
528		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
529					MLX5_GET(init2rtr_qp_in, in, qpn));
530		break;
531	case MLX5_CMD_OP_RTR2RTS_QP:
532		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
533					MLX5_GET(rtr2rts_qp_in, in, qpn));
534		break;
535	case MLX5_CMD_OP_RTS2RTS_QP:
536		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
537					MLX5_GET(rts2rts_qp_in, in, qpn));
538		break;
539	case MLX5_CMD_OP_SQERR2RTS_QP:
540		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
541					MLX5_GET(sqerr2rts_qp_in, in, qpn));
542		break;
543	case MLX5_CMD_OP_2ERR_QP:
544		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
545					MLX5_GET(qp_2err_in, in, qpn));
546		break;
547	case MLX5_CMD_OP_2RST_QP:
548		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
549					MLX5_GET(qp_2rst_in, in, qpn));
550		break;
551	case MLX5_CMD_OP_QUERY_DCT:
552		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
553					MLX5_GET(query_dct_in, in, dctn));
554		break;
555	case MLX5_CMD_OP_QUERY_XRQ:
556	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
557	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
558		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
559					MLX5_GET(query_xrq_in, in, xrqn));
560		break;
561	case MLX5_CMD_OP_QUERY_XRC_SRQ:
562		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
563					MLX5_GET(query_xrc_srq_in, in,
564						 xrc_srqn));
565		break;
566	case MLX5_CMD_OP_ARM_XRC_SRQ:
567		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
568					MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
569		break;
570	case MLX5_CMD_OP_QUERY_SRQ:
571		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
572					MLX5_GET(query_srq_in, in, srqn));
573		break;
574	case MLX5_CMD_OP_ARM_RQ:
575		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
576					MLX5_GET(arm_rq_in, in, srq_number));
577		break;
578	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
579		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
580					MLX5_GET(drain_dct_in, in, dctn));
581		break;
582	case MLX5_CMD_OP_ARM_XRQ:
583	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
584	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
585	case MLX5_CMD_OP_MODIFY_XRQ:
586		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
587					MLX5_GET(arm_xrq_in, in, xrqn));
588		break;
589	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
590		obj_id = get_enc_obj_id
591				(MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
592				 MLX5_GET(query_packet_reformat_context_in,
593					  in, packet_reformat_id));
594		break;
595	default:
596		obj_id = 0;
597	}
598
599	return obj_id;
600}
601
602static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
603				 struct ib_uobject *uobj, const void *in)
604{
605	struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
606	u64 obj_id = devx_get_obj_id(in);
607
608	if (!obj_id)
609		return false;
610
611	switch (uobj_get_object_id(uobj)) {
612	case UVERBS_OBJECT_CQ:
613		return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
614				      to_mcq(uobj->object)->mcq.cqn) ==
615				      obj_id;
616
617	case UVERBS_OBJECT_SRQ:
618	{
619		struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
620		u16 opcode;
621
622		switch (srq->common.res) {
623		case MLX5_RES_XSRQ:
624			opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
625			break;
626		case MLX5_RES_XRQ:
627			opcode = MLX5_CMD_OP_CREATE_XRQ;
628			break;
629		default:
630			if (!dev->mdev->issi)
631				opcode = MLX5_CMD_OP_CREATE_SRQ;
632			else
633				opcode = MLX5_CMD_OP_CREATE_RMP;
634		}
635
636		return get_enc_obj_id(opcode,
637				      to_msrq(uobj->object)->msrq.srqn) ==
638				      obj_id;
639	}
640
641	case UVERBS_OBJECT_QP:
642	{
643		struct mlx5_ib_qp *qp = to_mqp(uobj->object);
644		enum ib_qp_type	qp_type = qp->ibqp.qp_type;
645
646		if (qp_type == IB_QPT_RAW_PACKET ||
647		    (qp->flags & MLX5_IB_QP_UNDERLAY)) {
648			struct mlx5_ib_raw_packet_qp *raw_packet_qp =
649							 &qp->raw_packet_qp;
650			struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
651			struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
652
653			return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
654					       rq->base.mqp.qpn) == obj_id ||
655				get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
656					       sq->base.mqp.qpn) == obj_id ||
657				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
658					       rq->tirn) == obj_id ||
659				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
660					       sq->tisn) == obj_id);
661		}
662
663		if (qp_type == MLX5_IB_QPT_DCT)
664			return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
665					      qp->dct.mdct.dctn) == obj_id;
666
667		return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
668				      qp->ibqp.qp_num) == obj_id;
669	}
670
671	case UVERBS_OBJECT_WQ:
672		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
673				      to_mrwq(uobj->object)->core_qp.qpn) ==
674				      obj_id;
675
676	case UVERBS_OBJECT_RWQ_IND_TBL:
677		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
678				      to_mrwq_ind_table(uobj->object)->rqtn) ==
679				      obj_id;
680
681	case MLX5_IB_OBJECT_DEVX_OBJ:
682		return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
683
684	default:
685		return false;
686	}
687}
688
689static void devx_set_umem_valid(const void *in)
690{
691	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
692
693	switch (opcode) {
694	case MLX5_CMD_OP_CREATE_MKEY:
695		MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
696		break;
697	case MLX5_CMD_OP_CREATE_CQ:
698	{
699		void *cqc;
700
701		MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
702		cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
703		MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
704		break;
705	}
706	case MLX5_CMD_OP_CREATE_QP:
707	{
708		void *qpc;
709
710		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
711		MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
712		MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
713		break;
714	}
715
716	case MLX5_CMD_OP_CREATE_RQ:
717	{
718		void *rqc, *wq;
719
720		rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
721		wq  = MLX5_ADDR_OF(rqc, rqc, wq);
722		MLX5_SET(wq, wq, dbr_umem_valid, 1);
723		MLX5_SET(wq, wq, wq_umem_valid, 1);
724		break;
725	}
726
727	case MLX5_CMD_OP_CREATE_SQ:
728	{
729		void *sqc, *wq;
730
731		sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
732		wq = MLX5_ADDR_OF(sqc, sqc, wq);
733		MLX5_SET(wq, wq, dbr_umem_valid, 1);
734		MLX5_SET(wq, wq, wq_umem_valid, 1);
735		break;
736	}
737
738	case MLX5_CMD_OP_MODIFY_CQ:
739		MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
740		break;
741
742	case MLX5_CMD_OP_CREATE_RMP:
743	{
744		void *rmpc, *wq;
745
746		rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
747		wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
748		MLX5_SET(wq, wq, dbr_umem_valid, 1);
749		MLX5_SET(wq, wq, wq_umem_valid, 1);
750		break;
751	}
752
753	case MLX5_CMD_OP_CREATE_XRQ:
754	{
755		void *xrqc, *wq;
756
757		xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
758		wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
759		MLX5_SET(wq, wq, dbr_umem_valid, 1);
760		MLX5_SET(wq, wq, wq_umem_valid, 1);
761		break;
762	}
763
764	case MLX5_CMD_OP_CREATE_XRC_SRQ:
765	{
766		void *xrc_srqc;
767
768		MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
769		xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
770					xrc_srq_context_entry);
771		MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
772		break;
773	}
774
775	default:
776		return;
777	}
778}
779
780static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
781{
782	*opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
783
784	switch (*opcode) {
785	case MLX5_CMD_OP_CREATE_GENERAL_OBJ:
786	case MLX5_CMD_OP_CREATE_MKEY:
787	case MLX5_CMD_OP_CREATE_CQ:
788	case MLX5_CMD_OP_ALLOC_PD:
789	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
790	case MLX5_CMD_OP_CREATE_RMP:
791	case MLX5_CMD_OP_CREATE_SQ:
792	case MLX5_CMD_OP_CREATE_RQ:
793	case MLX5_CMD_OP_CREATE_RQT:
794	case MLX5_CMD_OP_CREATE_TIR:
795	case MLX5_CMD_OP_CREATE_TIS:
796	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
797	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
798	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
799	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
800	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
801	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
802	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
803	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
804	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
805	case MLX5_CMD_OP_CREATE_QP:
806	case MLX5_CMD_OP_CREATE_SRQ:
807	case MLX5_CMD_OP_CREATE_XRC_SRQ:
808	case MLX5_CMD_OP_CREATE_DCT:
809	case MLX5_CMD_OP_CREATE_XRQ:
810	case MLX5_CMD_OP_ATTACH_TO_MCG:
811	case MLX5_CMD_OP_ALLOC_XRCD:
812		return true;
813	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
814	{
815		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
816		if (op_mod == 0)
817			return true;
818		return false;
819	}
820	case MLX5_CMD_OP_CREATE_PSV:
821	{
822		u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
823
824		if (num_psv == 1)
825			return true;
826		return false;
827	}
828	default:
829		return false;
830	}
831}
832
833static bool devx_is_obj_modify_cmd(const void *in)
834{
835	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
836
837	switch (opcode) {
838	case MLX5_CMD_OP_MODIFY_GENERAL_OBJ:
839	case MLX5_CMD_OP_MODIFY_CQ:
840	case MLX5_CMD_OP_MODIFY_RMP:
841	case MLX5_CMD_OP_MODIFY_SQ:
842	case MLX5_CMD_OP_MODIFY_RQ:
843	case MLX5_CMD_OP_MODIFY_RQT:
844	case MLX5_CMD_OP_MODIFY_TIR:
845	case MLX5_CMD_OP_MODIFY_TIS:
846	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
847	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
848	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
849	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
850	case MLX5_CMD_OP_RST2INIT_QP:
851	case MLX5_CMD_OP_INIT2RTR_QP:
852	case MLX5_CMD_OP_RTR2RTS_QP:
853	case MLX5_CMD_OP_RTS2RTS_QP:
854	case MLX5_CMD_OP_SQERR2RTS_QP:
855	case MLX5_CMD_OP_2ERR_QP:
856	case MLX5_CMD_OP_2RST_QP:
857	case MLX5_CMD_OP_ARM_XRC_SRQ:
858	case MLX5_CMD_OP_ARM_RQ:
859	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
860	case MLX5_CMD_OP_ARM_XRQ:
861	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
862	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
863	case MLX5_CMD_OP_MODIFY_XRQ:
864		return true;
865	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
866	{
867		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
868
869		if (op_mod == 1)
870			return true;
871		return false;
872	}
873	default:
874		return false;
875	}
876}
877
878static bool devx_is_obj_query_cmd(const void *in)
879{
880	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
881
882	switch (opcode) {
883	case MLX5_CMD_OP_QUERY_GENERAL_OBJ:
884	case MLX5_CMD_OP_QUERY_MKEY:
885	case MLX5_CMD_OP_QUERY_CQ:
886	case MLX5_CMD_OP_QUERY_RMP:
887	case MLX5_CMD_OP_QUERY_SQ:
888	case MLX5_CMD_OP_QUERY_RQ:
889	case MLX5_CMD_OP_QUERY_RQT:
890	case MLX5_CMD_OP_QUERY_TIR:
891	case MLX5_CMD_OP_QUERY_TIS:
892	case MLX5_CMD_OP_QUERY_Q_COUNTER:
893	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
894	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
895	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
896	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
897	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
898	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
899	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
900	case MLX5_CMD_OP_QUERY_QP:
901	case MLX5_CMD_OP_QUERY_SRQ:
902	case MLX5_CMD_OP_QUERY_XRC_SRQ:
903	case MLX5_CMD_OP_QUERY_DCT:
904	case MLX5_CMD_OP_QUERY_XRQ:
905	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
906	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
907	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
908		return true;
909	default:
910		return false;
911	}
912}
913
914static bool devx_is_whitelist_cmd(void *in)
915{
916	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
917
918	switch (opcode) {
919	case MLX5_CMD_OP_QUERY_HCA_CAP:
920	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
921	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
922		return true;
923	default:
924		return false;
925	}
926}
927
928static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
929{
930	if (devx_is_whitelist_cmd(cmd_in)) {
931		if (c->devx_uid)
932			return c->devx_uid;
933
934		return -EOPNOTSUPP;
935	}
936
937	if (!c->devx_uid)
938		return -EINVAL;
939
940	return c->devx_uid;
941}
942
943static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
944{
945	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
946
947	/* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
948	if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
949	     MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
950	    (opcode >= MLX5_CMD_OP_GENERAL_START &&
951	     opcode < MLX5_CMD_OP_GENERAL_END))
952		return true;
953
954	switch (opcode) {
955	case MLX5_CMD_OP_QUERY_HCA_CAP:
956	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
957	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
958	case MLX5_CMD_OP_QUERY_VPORT_STATE:
959	case MLX5_CMD_OP_QUERY_ADAPTER:
960	case MLX5_CMD_OP_QUERY_ISSI:
961	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
962	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
963	case MLX5_CMD_OP_QUERY_VNIC_ENV:
964	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
965	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
966	case MLX5_CMD_OP_NOP:
967	case MLX5_CMD_OP_QUERY_CONG_STATUS:
968	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
969	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
970	case MLX5_CMD_OP_QUERY_LAG:
971		return true;
972	default:
973		return false;
974	}
975}
976
977static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
978	struct uverbs_attr_bundle *attrs)
979{
980	struct mlx5_ib_ucontext *c;
981	struct mlx5_ib_dev *dev;
982	int user_vector;
983	int dev_eqn;
984	unsigned int irqn;
985	int err;
986
987	if (uverbs_copy_from(&user_vector, attrs,
988			     MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
989		return -EFAULT;
990
991	c = devx_ufile2uctx(attrs);
992	if (IS_ERR(c))
993		return PTR_ERR(c);
994	dev = to_mdev(c->ibucontext.device);
995
996	err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
997	if (err < 0)
998		return err;
999
1000	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1001			   &dev_eqn, sizeof(dev_eqn)))
1002		return -EFAULT;
1003
1004	return 0;
1005}
1006
1007/*
1008 *Security note:
1009 * The hardware protection mechanism works like this: Each device object that
1010 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1011 * the device specification manual) upon its creation. Then upon doorbell,
1012 * hardware fetches the object context for which the doorbell was rang, and
1013 * validates that the UAR through which the DB was rang matches the UAR ID
1014 * of the object.
1015 * If no match the doorbell is silently ignored by the hardware. Of course,
1016 * the user cannot ring a doorbell on a UAR that was not mapped to it.
1017 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1018 * mailboxes (except tagging them with UID), we expose to the user its UAR
1019 * ID, so it can embed it in these objects in the expected specification
1020 * format. So the only thing the user can do is hurt itself by creating a
1021 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1022 * may ring a doorbell on its objects.
1023 * The consequence of that will be that another user can schedule a QP/SQ
1024 * of the buggy user for execution (just insert it to the hardware schedule
1025 * queue or arm its CQ for event generation), no further harm is expected.
1026 */
1027static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1028	struct uverbs_attr_bundle *attrs)
1029{
1030	struct mlx5_ib_ucontext *c;
1031	struct mlx5_ib_dev *dev;
1032	u32 user_idx;
1033	s32 dev_idx;
1034
1035	c = devx_ufile2uctx(attrs);
1036	if (IS_ERR(c))
1037		return PTR_ERR(c);
1038	dev = to_mdev(c->ibucontext.device);
1039
1040	if (uverbs_copy_from(&user_idx, attrs,
1041			     MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1042		return -EFAULT;
1043
1044	dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1045	if (dev_idx < 0)
1046		return dev_idx;
1047
1048	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1049			   &dev_idx, sizeof(dev_idx)))
1050		return -EFAULT;
1051
1052	return 0;
1053}
1054
1055static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1056	struct uverbs_attr_bundle *attrs)
1057{
1058	struct mlx5_ib_ucontext *c;
1059	struct mlx5_ib_dev *dev;
1060	void *cmd_in = uverbs_attr_get_alloced_ptr(
1061		attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1062	int cmd_out_len = uverbs_attr_get_len(attrs,
1063					MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1064	void *cmd_out;
1065	int err;
1066	int uid;
1067
1068	c = devx_ufile2uctx(attrs);
1069	if (IS_ERR(c))
1070		return PTR_ERR(c);
1071	dev = to_mdev(c->ibucontext.device);
1072
1073	uid = devx_get_uid(c, cmd_in);
1074	if (uid < 0)
1075		return uid;
1076
1077	/* Only white list of some general HCA commands are allowed for this method. */
1078	if (!devx_is_general_cmd(cmd_in, dev))
1079		return -EINVAL;
1080
1081	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1082	if (IS_ERR(cmd_out))
1083		return PTR_ERR(cmd_out);
1084
1085	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1086	err = mlx5_cmd_exec(dev->mdev, cmd_in,
1087			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1088			    cmd_out, cmd_out_len);
1089	if (err)
1090		return err;
1091
1092	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1093			      cmd_out_len);
1094}
1095
1096static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1097				       u32 *dinlen,
1098				       u32 *obj_id)
1099{
1100	u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
1101	u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1102
1103	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1104	*dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1105
1106	MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1107	MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1108
1109	switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
1110	case MLX5_CMD_OP_CREATE_GENERAL_OBJ:
1111		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJ);
1112		MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
1113		break;
1114
1115	case MLX5_CMD_OP_CREATE_UMEM:
1116		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1117			 MLX5_CMD_OP_DESTROY_UMEM);
1118		break;
1119	case MLX5_CMD_OP_CREATE_MKEY:
1120		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
1121		break;
1122	case MLX5_CMD_OP_CREATE_CQ:
1123		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1124		break;
1125	case MLX5_CMD_OP_ALLOC_PD:
1126		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1127		break;
1128	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1129		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1130			 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1131		break;
1132	case MLX5_CMD_OP_CREATE_RMP:
1133		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1134		break;
1135	case MLX5_CMD_OP_CREATE_SQ:
1136		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1137		break;
1138	case MLX5_CMD_OP_CREATE_RQ:
1139		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1140		break;
1141	case MLX5_CMD_OP_CREATE_RQT:
1142		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1143		break;
1144	case MLX5_CMD_OP_CREATE_TIR:
1145		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1146		break;
1147	case MLX5_CMD_OP_CREATE_TIS:
1148		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1149		break;
1150	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1151		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1152			 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1153		break;
1154	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1155		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1156		*obj_id = MLX5_GET(create_flow_table_out, out, table_id);
1157		MLX5_SET(destroy_flow_table_in, din, other_vport,
1158			 MLX5_GET(create_flow_table_in,  in, other_vport));
1159		MLX5_SET(destroy_flow_table_in, din, vport_number,
1160			 MLX5_GET(create_flow_table_in,  in, vport_number));
1161		MLX5_SET(destroy_flow_table_in, din, table_type,
1162			 MLX5_GET(create_flow_table_in,  in, table_type));
1163		MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1164		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1165			 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1166		break;
1167	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1168		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1169		*obj_id = MLX5_GET(create_flow_group_out, out, group_id);
1170		MLX5_SET(destroy_flow_group_in, din, other_vport,
1171			 MLX5_GET(create_flow_group_in, in, other_vport));
1172		MLX5_SET(destroy_flow_group_in, din, vport_number,
1173			 MLX5_GET(create_flow_group_in, in, vport_number));
1174		MLX5_SET(destroy_flow_group_in, din, table_type,
1175			 MLX5_GET(create_flow_group_in, in, table_type));
1176		MLX5_SET(destroy_flow_group_in, din, table_id,
1177			 MLX5_GET(create_flow_group_in, in, table_id));
1178		MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1179		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1180			 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1181		break;
1182	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1183		*dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1184		*obj_id = MLX5_GET(set_fte_in, in, flow_index);
1185		MLX5_SET(delete_fte_in, din, other_vport,
1186			 MLX5_GET(set_fte_in,  in, other_vport));
1187		MLX5_SET(delete_fte_in, din, vport_number,
1188			 MLX5_GET(set_fte_in, in, vport_number));
1189		MLX5_SET(delete_fte_in, din, table_type,
1190			 MLX5_GET(set_fte_in, in, table_type));
1191		MLX5_SET(delete_fte_in, din, table_id,
1192			 MLX5_GET(set_fte_in, in, table_id));
1193		MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1194		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1195			 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1196		break;
1197	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1198		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1199			 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1200		break;
1201	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1202		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1203			 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1204		break;
1205	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1206		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1207			 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1208		break;
1209	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1210		*dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1211		*obj_id = MLX5_GET(create_scheduling_element_out, out,
1212				   scheduling_element_id);
1213		MLX5_SET(destroy_scheduling_element_in, din,
1214			 scheduling_hierarchy,
1215			 MLX5_GET(create_scheduling_element_in, in,
1216				  scheduling_hierarchy));
1217		MLX5_SET(destroy_scheduling_element_in, din,
1218			 scheduling_element_id, *obj_id);
1219		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1220			 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1221		break;
1222	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1223		*dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1224		*obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
1225		MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1226		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1227			 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1228		break;
1229	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1230		*dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1231		*obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
1232		MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1233		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1234			 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1235		break;
1236	case MLX5_CMD_OP_CREATE_QP:
1237		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1238		break;
1239	case MLX5_CMD_OP_CREATE_SRQ:
1240		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1241		break;
1242	case MLX5_CMD_OP_CREATE_XRC_SRQ:
1243		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1244			 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1245		break;
1246	case MLX5_CMD_OP_CREATE_DCT:
1247		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1248		break;
1249	case MLX5_CMD_OP_CREATE_XRQ:
1250		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1251		break;
1252	case MLX5_CMD_OP_ATTACH_TO_MCG:
1253		*dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1254		MLX5_SET(detach_from_mcg_in, din, qpn,
1255			 MLX5_GET(attach_to_mcg_in, in, qpn));
1256		memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1257		       MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1258		       MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1259		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1260		break;
1261	case MLX5_CMD_OP_ALLOC_XRCD:
1262		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1263		break;
1264	case MLX5_CMD_OP_CREATE_PSV:
1265		MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1266			 MLX5_CMD_OP_DESTROY_PSV);
1267		MLX5_SET(destroy_psv_in, din, psvn,
1268			 MLX5_GET(create_psv_out, out, psv0_index));
1269		break;
1270	default:
1271		/* The entry must match to one of the devx_is_obj_create_cmd */
1272		WARN_ON(true);
1273		break;
1274	}
1275}
1276
1277static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1278				   struct devx_obj *obj,
1279				   void *in, int in_len)
1280{
1281	int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1282			MLX5_FLD_SZ_BYTES(create_mkey_in,
1283			memory_key_mkey_entry);
1284	void *mkc;
1285	u8 access_mode;
1286
1287	if (in_len < min_len)
1288		return -EINVAL;
1289
1290	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1291
1292	access_mode = MLX5_GET(mkc, mkc, access_mode);
1293	access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1294
1295	if (access_mode == MLX5_ACCESS_MODE_KLM ||
1296		access_mode == MLX5_ACCESS_MODE_KSM) {
1297		return 0;
1298	}
1299
1300	MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1301	return 0;
1302}
1303
1304static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1305				      struct devx_event_subscription *sub)
1306{
1307	struct devx_event *event;
1308	struct devx_obj_event *xa_val_level2;
1309
1310	if (sub->is_cleaned)
1311		return;
1312
1313	sub->is_cleaned = 1;
1314	list_del_rcu(&sub->xa_list);
1315
1316	if (list_empty(&sub->obj_list))
1317		return;
1318
1319	list_del_rcu(&sub->obj_list);
1320	/* check whether key level 1 for this obj_sub_list is empty */
1321	event = xa_load(&dev->devx_event_table.event_xa,
1322			sub->xa_key_level1);
1323	WARN_ON(!event);
1324
1325	xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1326	if (list_empty(&xa_val_level2->obj_sub_list)) {
1327		xa_erase(&event->object_ids,
1328			 sub->xa_key_level2);
1329		kfree_rcu(xa_val_level2, rcu);
1330	}
1331}
1332
1333static int devx_obj_cleanup(struct ib_uobject *uobject,
1334			    enum rdma_remove_reason why,
1335			    struct uverbs_attr_bundle *attrs)
1336{
1337	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1338	struct mlx5_devx_event_table *devx_event_table;
1339	struct devx_obj *obj = uobject->object;
1340	struct devx_event_subscription *sub_entry, *tmp;
1341	struct mlx5_ib_dev *dev;
1342	int ret;
1343
1344	dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1345	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1346		ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1347	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1348		ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1349	else
1350		ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1351				    obj->dinlen, out, sizeof(out));
1352	if (ib_is_destroy_retryable(ret, why, uobject))
1353		return ret;
1354
1355	devx_event_table = &dev->devx_event_table;
1356
1357	mutex_lock(&devx_event_table->event_xa_lock);
1358	list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1359		devx_cleanup_subscription(dev, sub_entry);
1360	mutex_unlock(&devx_event_table->event_xa_lock);
1361
1362	kfree(obj);
1363	return ret;
1364}
1365
1366static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1367{
1368	struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1369	struct mlx5_devx_event_table *table;
1370	struct devx_event *event;
1371	struct devx_obj_event *obj_event;
1372	u32 obj_id = mcq->cqn;
1373
1374	table = &obj->ib_dev->devx_event_table;
1375	rcu_read_lock();
1376	event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1377	if (!event)
1378		goto out;
1379
1380	obj_event = xa_load(&event->object_ids, obj_id);
1381	if (!obj_event)
1382		goto out;
1383
1384	dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1385out:
1386	rcu_read_unlock();
1387}
1388
1389static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1390	struct uverbs_attr_bundle *attrs)
1391{
1392	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1393	int cmd_out_len =  uverbs_attr_get_len(attrs,
1394					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1395	int cmd_in_len = uverbs_attr_get_len(attrs,
1396					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1397	void *cmd_out;
1398	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1399		attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1400	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1401		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1402	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1403	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1404	struct devx_obj *obj;
1405	u16 obj_type = 0;
1406	int err;
1407	int uid;
1408	u32 obj_id;
1409	u16 opcode;
1410
1411	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1412		return -EINVAL;
1413
1414	uid = devx_get_uid(c, cmd_in);
1415	if (uid < 0)
1416		return uid;
1417
1418	if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1419		return -EINVAL;
1420
1421	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1422	if (IS_ERR(cmd_out))
1423		return PTR_ERR(cmd_out);
1424
1425	obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1426	if (!obj)
1427		return -ENOMEM;
1428
1429	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1430	if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1431		err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1432		if (err)
1433			goto obj_free;
1434	} else {
1435		devx_set_umem_valid(cmd_in);
1436	}
1437
1438	if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1439		obj->flags |= DEVX_OBJ_FLAGS_DCT;
1440		err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1441					   cmd_in, cmd_in_len,
1442					   cmd_out, cmd_out_len);
1443	} else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
1444		obj->flags |= DEVX_OBJ_FLAGS_CQ;
1445		obj->core_cq.comp = devx_cq_comp;
1446		err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1447					  cmd_in, cmd_in_len, cmd_out,
1448					  cmd_out_len);
1449	} else {
1450		err = mlx5_cmd_exec(dev->mdev, cmd_in,
1451				    cmd_in_len,
1452				    cmd_out, cmd_out_len);
1453	}
1454
1455	if (err)
1456		goto obj_free;
1457
1458	if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1459		u8 bulk = MLX5_GET(alloc_flow_counter_in,
1460				   cmd_in,
1461				   flow_counter_bulk);
1462		obj->flow_counter_bulk_size = 128UL * bulk;
1463	}
1464
1465	uobj->object = obj;
1466	INIT_LIST_HEAD(&obj->event_sub);
1467	obj->ib_dev = dev;
1468	devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1469				   &obj_id);
1470	WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1471
1472	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1473	if (err)
1474		goto obj_destroy;
1475
1476	if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJ)
1477		obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1478	obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1479
1480	return 0;
1481
1482obj_destroy:
1483	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1484		mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1485	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1486		mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1487	else
1488		mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1489			      sizeof(out));
1490obj_free:
1491	kfree(obj);
1492	return err;
1493}
1494
1495static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1496	struct uverbs_attr_bundle *attrs)
1497{
1498	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1499	int cmd_out_len = uverbs_attr_get_len(attrs,
1500					MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1501	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1502							  MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1503	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1504		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1505	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1506	void *cmd_out;
1507	int err;
1508	int uid;
1509
1510	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1511		return -EINVAL;
1512
1513	uid = devx_get_uid(c, cmd_in);
1514	if (uid < 0)
1515		return uid;
1516
1517	if (!devx_is_obj_modify_cmd(cmd_in))
1518		return -EINVAL;
1519
1520	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1521		return -EINVAL;
1522
1523	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1524	if (IS_ERR(cmd_out))
1525		return PTR_ERR(cmd_out);
1526
1527	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1528	devx_set_umem_valid(cmd_in);
1529
1530	err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1531			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1532			    cmd_out, cmd_out_len);
1533	if (err)
1534		return err;
1535
1536	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1537			      cmd_out, cmd_out_len);
1538}
1539
1540static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1541	struct uverbs_attr_bundle *attrs)
1542{
1543	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1544	int cmd_out_len = uverbs_attr_get_len(attrs,
1545					      MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1546	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1547							  MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1548	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1549		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1550	void *cmd_out;
1551	int err;
1552	int uid;
1553	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1554
1555	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1556		return -EINVAL;
1557
1558	uid = devx_get_uid(c, cmd_in);
1559	if (uid < 0)
1560		return uid;
1561
1562	if (!devx_is_obj_query_cmd(cmd_in))
1563		return -EINVAL;
1564
1565	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1566		return -EINVAL;
1567
1568	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1569	if (IS_ERR(cmd_out))
1570		return PTR_ERR(cmd_out);
1571
1572	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1573	err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1574			    uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1575			    cmd_out, cmd_out_len);
1576	if (err)
1577		return err;
1578
1579	return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1580			      cmd_out, cmd_out_len);
1581}
1582
1583struct devx_async_event_queue {
1584	spinlock_t		lock;
1585	wait_queue_head_t	poll_wait;
1586	struct list_head	event_list;
1587	atomic_t		bytes_in_use;
1588	u8			is_destroyed:1;
1589};
1590
1591struct devx_async_cmd_event_file {
1592	struct ib_uobject		uobj;
1593	struct devx_async_event_queue	ev_queue;
1594	struct mlx5_async_ctx		async_ctx;
1595};
1596
1597static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1598{
1599	spin_lock_init(&ev_queue->lock);
1600	INIT_LIST_HEAD(&ev_queue->event_list);
1601	init_waitqueue_head(&ev_queue->poll_wait);
1602	atomic_set(&ev_queue->bytes_in_use, 0);
1603	ev_queue->is_destroyed = 0;
1604}
1605
1606static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1607	struct uverbs_attr_bundle *attrs)
1608{
1609	struct devx_async_cmd_event_file *ev_file;
1610
1611	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1612		attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1613	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1614
1615	ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1616			       uobj);
1617	devx_init_event_queue(&ev_file->ev_queue);
1618	mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1619	return 0;
1620}
1621
1622static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1623	struct uverbs_attr_bundle *attrs)
1624{
1625	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1626		attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1627	struct devx_async_event_file *ev_file;
1628	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1629		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1630	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1631	u32 flags;
1632	int err;
1633
1634	err = uverbs_get_flags32(&flags, attrs,
1635		MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1636		MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1637
1638	if (err)
1639		return err;
1640
1641	ev_file = container_of(uobj, struct devx_async_event_file,
1642			       uobj);
1643	spin_lock_init(&ev_file->lock);
1644	INIT_LIST_HEAD(&ev_file->event_list);
1645	init_waitqueue_head(&ev_file->poll_wait);
1646	if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1647		ev_file->omit_data = 1;
1648	INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1649	ev_file->dev = dev;
1650	get_device(&dev->ib_dev.dev);
1651	return 0;
1652}
1653
1654static void devx_query_callback(int status, struct mlx5_async_work *context)
1655{
1656	struct devx_async_data *async_data =
1657		container_of(context, struct devx_async_data, cb_work);
1658	struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1659	struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1660	unsigned long flags;
1661
1662	/*
1663	 * Note that if the struct devx_async_cmd_event_file uobj begins to be
1664	 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1665	 * routine returns, ensuring that it always remains valid here.
1666	 */
1667	spin_lock_irqsave(&ev_queue->lock, flags);
1668	list_add_tail(&async_data->list, &ev_queue->event_list);
1669	spin_unlock_irqrestore(&ev_queue->lock, flags);
1670
1671	wake_up_interruptible(&ev_queue->poll_wait);
1672}
1673
1674#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1675
1676static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1677	struct uverbs_attr_bundle *attrs)
1678{
1679	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1680				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1681	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1682				attrs,
1683				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1684	u16 cmd_out_len;
1685	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1686		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1687	struct ib_uobject *fd_uobj;
1688	int err;
1689	int uid;
1690	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1691	struct devx_async_cmd_event_file *ev_file;
1692	struct devx_async_data *async_data;
1693
1694	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1695		return -EINVAL;
1696
1697	uid = devx_get_uid(c, cmd_in);
1698	if (uid < 0)
1699		return uid;
1700
1701	if (!devx_is_obj_query_cmd(cmd_in))
1702		return -EINVAL;
1703
1704	err = uverbs_get_const(&cmd_out_len, attrs,
1705			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1706	if (err)
1707		return err;
1708
1709	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1710		return -EINVAL;
1711
1712	fd_uobj = uverbs_attr_get_uobject(attrs,
1713				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1714	if (IS_ERR(fd_uobj))
1715		return PTR_ERR(fd_uobj);
1716
1717	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1718			       uobj);
1719
1720	if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1721			MAX_ASYNC_BYTES_IN_USE) {
1722		atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1723		return -EAGAIN;
1724	}
1725
1726	async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1727					  cmd_out_len), GFP_KERNEL);
1728	if (!async_data) {
1729		err = -ENOMEM;
1730		goto sub_bytes;
1731	}
1732
1733	err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1734			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1735	if (err)
1736		goto free_async;
1737
1738	async_data->cmd_out_len = cmd_out_len;
1739	async_data->mdev = mdev;
1740	async_data->ev_file = ev_file;
1741
1742	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1743	err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1744		    uverbs_attr_get_len(attrs,
1745				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1746		    async_data->hdr.out_data,
1747		    async_data->cmd_out_len,
1748		    devx_query_callback, &async_data->cb_work);
1749
1750	if (err)
1751		goto free_async;
1752
1753	return 0;
1754
1755free_async:
1756	kvfree(async_data);
1757sub_bytes:
1758	atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1759	return err;
1760}
1761
1762static void
1763subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1764			   u32 key_level1,
1765			   bool is_level2,
1766			   u32 key_level2)
1767{
1768	struct devx_event *event;
1769	struct devx_obj_event *xa_val_level2;
1770
1771	/* Level 1 is valid for future use, no need to free */
1772	if (!is_level2)
1773		return;
1774
1775	event = xa_load(&devx_event_table->event_xa, key_level1);
1776	WARN_ON(!event);
1777
1778	xa_val_level2 = xa_load(&event->object_ids,
1779				key_level2);
1780	if (list_empty(&xa_val_level2->obj_sub_list)) {
1781		xa_erase(&event->object_ids,
1782			 key_level2);
1783		kfree_rcu(xa_val_level2, rcu);
1784	}
1785}
1786
1787static int
1788subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1789			 u32 key_level1,
1790			 bool is_level2,
1791			 u32 key_level2)
1792{
1793	struct devx_obj_event *obj_event;
1794	struct devx_event *event;
1795	int err;
1796
1797	event = xa_load(&devx_event_table->event_xa, key_level1);
1798	if (!event) {
1799		event = kzalloc(sizeof(*event), GFP_KERNEL);
1800		if (!event)
1801			return -ENOMEM;
1802
1803		INIT_LIST_HEAD(&event->unaffiliated_list);
1804		xa_init_flags(&event->object_ids, 0);
1805
1806		err = xa_insert(&devx_event_table->event_xa,
1807				key_level1,
1808				event,
1809				GFP_KERNEL);
1810		if (err) {
1811			kfree(event);
1812			return err;
1813		}
1814	}
1815
1816	if (!is_level2)
1817		return 0;
1818
1819	obj_event = xa_load(&event->object_ids, key_level2);
1820	if (!obj_event) {
1821		obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1822		if (!obj_event)
1823			/* Level1 is valid for future use, no need to free */
1824			return -ENOMEM;
1825
1826		err = xa_insert(&event->object_ids,
1827				key_level2,
1828				obj_event,
1829				GFP_KERNEL);
1830		if (err)
1831			return err;
1832		INIT_LIST_HEAD(&obj_event->obj_sub_list);
1833	}
1834
1835	return 0;
1836}
1837
1838static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1839				   struct devx_obj *obj)
1840{
1841	int i;
1842
1843	for (i = 0; i < num_events; i++) {
1844		if (obj) {
1845			if (!is_legacy_obj_event_num(event_type_num_list[i]))
1846				return false;
1847		} else if (!is_legacy_unaffiliated_event_num(
1848				event_type_num_list[i])) {
1849			return false;
1850		}
1851	}
1852
1853	return true;
1854}
1855
1856#define MAX_SUPP_EVENT_NUM 255
1857static bool is_valid_events(struct mlx5_core_dev *dev,
1858			    int num_events, u16 *event_type_num_list,
1859			    struct devx_obj *obj)
1860{
1861	__be64 *aff_events;
1862	__be64 *unaff_events;
1863	int mask_entry;
1864	int mask_bit;
1865	int i;
1866
1867	if (MLX5_CAP_GEN(dev, event_cap)) {
1868		aff_events = (__be64 *)MLX5_CAP_DEV_EVENT(dev,
1869						user_affiliated_events);
1870		unaff_events = (__be64 *)MLX5_CAP_DEV_EVENT(dev,
1871						  user_unaffiliated_events);
1872	} else {
1873		return is_valid_events_legacy(num_events, event_type_num_list,
1874					      obj);
1875	}
1876
1877	for (i = 0; i < num_events; i++) {
1878		if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1879			return false;
1880
1881		mask_entry = event_type_num_list[i] / 64;
1882		mask_bit = event_type_num_list[i] % 64;
1883
1884		if (obj) {
1885			/* CQ completion */
1886			if (event_type_num_list[i] == 0)
1887				continue;
1888
1889			if (!(be64_to_cpu(aff_events[mask_entry]) &
1890					(1ull << mask_bit)))
1891				return false;
1892
1893			continue;
1894		}
1895
1896		if (!(be64_to_cpu(unaff_events[mask_entry]) &
1897				(1ull << mask_bit)))
1898			return false;
1899	}
1900
1901	return true;
1902}
1903
1904#define MAX_NUM_EVENTS 16
1905static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1906	struct uverbs_attr_bundle *attrs)
1907{
1908	struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1909				attrs,
1910				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1911	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1912		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1913	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1914	struct ib_uobject *fd_uobj;
1915	struct devx_obj *obj = NULL;
1916	struct devx_async_event_file *ev_file;
1917	struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1918	u16 *event_type_num_list;
1919	struct devx_event_subscription *event_sub, *tmp_sub;
1920	struct list_head sub_list;
1921	int redirect_fd;
1922	bool use_eventfd = false;
1923	int num_events;
1924	u16 obj_type = 0;
1925	u64 cookie = 0;
1926	u32 obj_id = 0;
1927	int err;
1928	int i;
1929
1930	if (!c->devx_uid)
1931		return -EINVAL;
1932
1933	if (!IS_ERR(devx_uobj)) {
1934		obj = (struct devx_obj *)devx_uobj->object;
1935		if (obj)
1936			obj_id = get_dec_obj_id(obj->obj_id);
1937	}
1938
1939	fd_uobj = uverbs_attr_get_uobject(attrs,
1940				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
1941	if (IS_ERR(fd_uobj))
1942		return PTR_ERR(fd_uobj);
1943
1944	ev_file = container_of(fd_uobj, struct devx_async_event_file,
1945			       uobj);
1946
1947	if (uverbs_attr_is_valid(attrs,
1948				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
1949		err = uverbs_copy_from(&redirect_fd, attrs,
1950			       MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
1951		if (err)
1952			return err;
1953
1954		use_eventfd = true;
1955	}
1956
1957	if (uverbs_attr_is_valid(attrs,
1958				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
1959		if (use_eventfd)
1960			return -EINVAL;
1961
1962		err = uverbs_copy_from(&cookie, attrs,
1963				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
1964		if (err)
1965			return err;
1966	}
1967
1968	num_events = uverbs_attr_ptr_get_array_size(
1969		attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
1970		sizeof(u16));
1971
1972	if (num_events < 0)
1973		return num_events;
1974
1975	if (num_events > MAX_NUM_EVENTS)
1976		return -EINVAL;
1977
1978	event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
1979			MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
1980
1981	if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
1982		return -EINVAL;
1983
1984	INIT_LIST_HEAD(&sub_list);
1985
1986	/* Protect from concurrent subscriptions to same XA entries to allow
1987	 * both to succeed
1988	 */
1989	mutex_lock(&devx_event_table->event_xa_lock);
1990	for (i = 0; i < num_events; i++) {
1991		u32 key_level1;
1992
1993		if (obj)
1994			obj_type = get_dec_obj_type(obj,
1995						    event_type_num_list[i]);
1996		key_level1 = event_type_num_list[i] | obj_type << 16;
1997
1998		err = subscribe_event_xa_alloc(devx_event_table,
1999					       key_level1,
2000					       obj,
2001					       obj_id);
2002		if (err)
2003			goto err;
2004
2005		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2006		if (!event_sub)
2007			goto err;
2008
2009		list_add_tail(&event_sub->event_list, &sub_list);
2010		uverbs_uobject_get(&ev_file->uobj);
2011		if (use_eventfd) {
2012			event_sub->eventfd =
2013				fdget(redirect_fd);
2014
2015			if (event_sub->eventfd.file == NULL) {
2016				err = -EBADF;
2017				goto err;
2018			}
2019		}
2020
2021		event_sub->cookie = cookie;
2022		event_sub->ev_file = ev_file;
2023		/* May be needed upon cleanup the devx object/subscription */
2024		event_sub->xa_key_level1 = key_level1;
2025		event_sub->xa_key_level2 = obj_id;
2026		INIT_LIST_HEAD(&event_sub->obj_list);
2027	}
2028
2029	/* Once all the allocations and the XA data insertions were done we
2030	 * can go ahead and add all the subscriptions to the relevant lists
2031	 * without concern of a failure.
2032	 */
2033	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2034		struct devx_event *event;
2035		struct devx_obj_event *obj_event;
2036
2037		list_del_init(&event_sub->event_list);
2038
2039		spin_lock_irq(&ev_file->lock);
2040		list_add_tail_rcu(&event_sub->file_list,
2041				  &ev_file->subscribed_events_list);
2042		spin_unlock_irq(&ev_file->lock);
2043
2044		event = xa_load(&devx_event_table->event_xa,
2045				event_sub->xa_key_level1);
2046		WARN_ON(!event);
2047
2048		if (!obj) {
2049			list_add_tail_rcu(&event_sub->xa_list,
2050					  &event->unaffiliated_list);
2051			continue;
2052		}
2053
2054		obj_event = xa_load(&event->object_ids, obj_id);
2055		WARN_ON(!obj_event);
2056		list_add_tail_rcu(&event_sub->xa_list,
2057				  &obj_event->obj_sub_list);
2058		list_add_tail_rcu(&event_sub->obj_list,
2059				  &obj->event_sub);
2060	}
2061
2062	mutex_unlock(&devx_event_table->event_xa_lock);
2063	return 0;
2064
2065err:
2066	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2067		list_del(&event_sub->event_list);
2068
2069		subscribe_event_xa_dealloc(devx_event_table,
2070					   event_sub->xa_key_level1,
2071					   obj,
2072					   obj_id);
2073
2074		if (event_sub->eventfd.file)
2075			fdput(event_sub->eventfd);
2076		uverbs_uobject_put(&event_sub->ev_file->uobj);
2077		kfree(event_sub);
2078	}
2079
2080	mutex_unlock(&devx_event_table->event_xa_lock);
2081	return err;
2082}
2083
2084static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2085			 struct uverbs_attr_bundle *attrs,
2086			 struct devx_umem *obj)
2087{
2088	u64 addr;
2089	size_t size;
2090	u32 access;
2091	int npages;
2092	int err;
2093	u32 page_mask;
2094
2095	if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2096	    uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2097		return -EFAULT;
2098
2099	err = uverbs_get_flags32(&access, attrs,
2100				 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2101				 IB_ACCESS_LOCAL_WRITE |
2102				 IB_ACCESS_REMOTE_WRITE |
2103				 IB_ACCESS_REMOTE_READ);
2104	if (err)
2105		return err;
2106
2107	err = ib_check_mr_access(access);
2108	if (err)
2109		return err;
2110
2111	obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
2112	if (IS_ERR(obj->umem))
2113		return PTR_ERR(obj->umem);
2114
2115	mlx5_ib_cont_pages(obj->umem, obj->umem->address,
2116			   MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
2117			   &obj->page_shift, &obj->ncont, NULL);
2118
2119	if (!npages) {
2120		ib_umem_release(obj->umem);
2121		return -EINVAL;
2122	}
2123
2124	page_mask = (1 << obj->page_shift) - 1;
2125	obj->page_offset = obj->umem->address & page_mask;
2126
2127	return 0;
2128}
2129
2130static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
2131				   struct devx_umem *obj,
2132				   struct devx_umem_reg_cmd *cmd)
2133{
2134	cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2135		    (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
2136	cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2137	return PTR_ERR_OR_ZERO(cmd->in);
2138}
2139
2140static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
2141				    struct devx_umem *obj,
2142				    struct devx_umem_reg_cmd *cmd)
2143{
2144	void *umem;
2145	__be64 *mtt;
2146
2147	umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2148	mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2149
2150	MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2151	MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
2152	MLX5_SET(umem, umem, log_page_size, obj->page_shift -
2153					    MLX5_ADAPTER_PAGE_SHIFT);
2154	MLX5_SET(umem, umem, page_offset, obj->page_offset);
2155	mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
2156			     (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2157			     MLX5_IB_MTT_READ);
2158}
2159
2160static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2161	struct uverbs_attr_bundle *attrs)
2162{
2163	struct devx_umem_reg_cmd cmd;
2164	struct devx_umem *obj;
2165	struct ib_uobject *uobj = uverbs_attr_get_uobject(
2166		attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2167	u32 obj_id;
2168	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2169		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2170	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2171	int err;
2172
2173	if (!c->devx_uid)
2174		return -EINVAL;
2175
2176	obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2177	if (!obj)
2178		return -ENOMEM;
2179
2180	err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2181	if (err)
2182		goto err_obj_free;
2183
2184	err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
2185	if (err)
2186		goto err_umem_release;
2187
2188	devx_umem_reg_cmd_build(dev, obj, &cmd);
2189
2190	MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2191	err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2192			    sizeof(cmd.out));
2193	if (err)
2194		goto err_umem_release;
2195
2196	obj->mdev = dev->mdev;
2197	uobj->object = obj;
2198	devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2199	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
2200	if (err)
2201		goto err_umem_destroy;
2202
2203	return 0;
2204
2205err_umem_destroy:
2206	mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
2207err_umem_release:
2208	ib_umem_release(obj->umem);
2209err_obj_free:
2210	kfree(obj);
2211	return err;
2212}
2213
2214static int devx_umem_cleanup(struct ib_uobject *uobject,
2215			     enum rdma_remove_reason why,
2216			     struct uverbs_attr_bundle *attrs)
2217{
2218	struct devx_umem *obj = uobject->object;
2219	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2220	int err;
2221
2222	err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2223	if (ib_is_destroy_retryable(err, why, uobject))
2224		return err;
2225
2226	ib_umem_release(obj->umem);
2227	kfree(obj);
2228	return 0;
2229}
2230
2231static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2232				  unsigned long event_type)
2233{
2234	__be64 *unaff_events;
2235	int mask_entry;
2236	int mask_bit;
2237
2238	if (!MLX5_CAP_GEN(dev, event_cap))
2239		return is_legacy_unaffiliated_event_num(event_type);
2240
2241	unaff_events = (__be64 *)MLX5_CAP_DEV_EVENT(dev,
2242					  user_unaffiliated_events);
2243	WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2244
2245	mask_entry = event_type / 64;
2246	mask_bit = event_type % 64;
2247
2248	if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2249		return false;
2250
2251	return true;
2252}
2253
2254static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2255{
2256	struct mlx5_eqe *eqe = data;
2257	u32 obj_id = 0;
2258
2259	switch (event_type) {
2260	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2261	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2262	case MLX5_EVENT_TYPE_PATH_MIG:
2263	case MLX5_EVENT_TYPE_COMM_EST:
2264	case MLX5_EVENT_TYPE_SQ_DRAINED:
2265	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2266	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2267	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2268	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2269	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2270		obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2271		break;
2272	case MLX5_EVENT_TYPE_XRQ_ERROR:
2273		obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2274		break;
2275	case MLX5_EVENT_TYPE_DCT_DRAINED:
2276	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2277		obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2278		break;
2279	case MLX5_EVENT_TYPE_CQ_ERROR:
2280		obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2281		break;
2282	default:
2283		obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2284		break;
2285	}
2286
2287	return obj_id;
2288}
2289
2290static int deliver_event(struct devx_event_subscription *event_sub,
2291			 const void *data)
2292{
2293	struct devx_async_event_file *ev_file;
2294	struct devx_async_event_data *event_data;
2295	unsigned long flags;
2296
2297	ev_file = event_sub->ev_file;
2298
2299	if (ev_file->omit_data) {
2300		spin_lock_irqsave(&ev_file->lock, flags);
2301		if (!list_empty(&event_sub->event_list) ||
2302		    ev_file->is_destroyed) {
2303			spin_unlock_irqrestore(&ev_file->lock, flags);
2304			return 0;
2305		}
2306
2307		list_add_tail(&event_sub->event_list, &ev_file->event_list);
2308		spin_unlock_irqrestore(&ev_file->lock, flags);
2309		wake_up_interruptible(&ev_file->poll_wait);
2310		return 0;
2311	}
2312
2313	event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2314			     GFP_ATOMIC);
2315	if (!event_data) {
2316		spin_lock_irqsave(&ev_file->lock, flags);
2317		ev_file->is_overflow_err = 1;
2318		spin_unlock_irqrestore(&ev_file->lock, flags);
2319		return -ENOMEM;
2320	}
2321
2322	event_data->hdr.cookie = event_sub->cookie;
2323	memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2324
2325	spin_lock_irqsave(&ev_file->lock, flags);
2326	if (!ev_file->is_destroyed)
2327		list_add_tail(&event_data->list, &ev_file->event_list);
2328	else
2329		kfree(event_data);
2330	spin_unlock_irqrestore(&ev_file->lock, flags);
2331	wake_up_interruptible(&ev_file->poll_wait);
2332
2333	return 0;
2334}
2335
2336static void dispatch_event_fd(struct list_head *fd_list,
2337			      const void *data)
2338{
2339	struct devx_event_subscription *item;
2340
2341	list_for_each_entry_rcu(item, fd_list, xa_list) {
2342		if (item->eventfd.file != NULL)
2343			linux_poll_wakeup(item->eventfd.file);
2344		else
2345			deliver_event(item, data);
2346	}
2347}
2348
2349static bool mlx5_devx_event_notifier(struct mlx5_core_dev *mdev,
2350				     uint8_t event_type, void *data)
2351{
2352	struct mlx5_ib_dev *dev;
2353	struct mlx5_devx_event_table *table;
2354	struct devx_event *event;
2355	struct devx_obj_event *obj_event;
2356	u16 obj_type = 0;
2357	bool is_unaffiliated;
2358	u32 obj_id;
2359
2360	/* Explicit filtering to kernel events which may occur frequently */
2361	if (event_type == MLX5_EVENT_TYPE_CMD ||
2362	    event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2363		return true;
2364
2365	dev = mdev->priv.eq_table.dev;
2366	table = &dev->devx_event_table;
2367	is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2368
2369	if (!is_unaffiliated)
2370		obj_type = get_event_obj_type(event_type, data);
2371
2372	rcu_read_lock();
2373	event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2374	if (!event) {
2375		rcu_read_unlock();
2376		return false;
2377	}
2378
2379	if (is_unaffiliated) {
2380		dispatch_event_fd(&event->unaffiliated_list, data);
2381		rcu_read_unlock();
2382		return true;
2383	}
2384
2385	obj_id = devx_get_obj_id_from_event(event_type, data);
2386	obj_event = xa_load(&event->object_ids, obj_id);
2387	if (!obj_event) {
2388		rcu_read_unlock();
2389		return false;
2390	}
2391
2392	dispatch_event_fd(&obj_event->obj_sub_list, data);
2393
2394	rcu_read_unlock();
2395	return true;
2396}
2397
2398void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
2399{
2400	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2401
2402	xa_init_flags(&table->event_xa, 0);
2403	mutex_init(&table->event_xa_lock);
2404	dev->mdev->priv.eq_table.dev = dev;
2405	dev->mdev->priv.eq_table.cb = mlx5_devx_event_notifier;
2406}
2407
2408void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
2409{
2410	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2411	struct devx_event_subscription *sub, *tmp;
2412	struct devx_event *event;
2413	void *entry;
2414	unsigned long id;
2415
2416	dev->mdev->priv.eq_table.cb = NULL;
2417	dev->mdev->priv.eq_table.dev = NULL;
2418	mutex_lock(&dev->devx_event_table.event_xa_lock);
2419	xa_for_each(&table->event_xa, id, entry) {
2420		event = entry;
2421		list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
2422					 xa_list)
2423			devx_cleanup_subscription(dev, sub);
2424		kfree(entry);
2425	}
2426	mutex_unlock(&dev->devx_event_table.event_xa_lock);
2427	xa_destroy(&table->event_xa);
2428}
2429
2430static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2431					 size_t count, loff_t *pos)
2432{
2433	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2434	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2435	struct devx_async_data *event;
2436	int ret = 0;
2437	size_t eventsz;
2438
2439	spin_lock_irq(&ev_queue->lock);
2440
2441	while (list_empty(&ev_queue->event_list)) {
2442		spin_unlock_irq(&ev_queue->lock);
2443
2444		if (filp->f_flags & O_NONBLOCK)
2445			return -EAGAIN;
2446
2447		if (wait_event_interruptible(
2448			    ev_queue->poll_wait,
2449			    (!list_empty(&ev_queue->event_list) ||
2450			     ev_queue->is_destroyed))) {
2451			return -ERESTARTSYS;
2452		}
2453
2454		spin_lock_irq(&ev_queue->lock);
2455		if (ev_queue->is_destroyed) {
2456			spin_unlock_irq(&ev_queue->lock);
2457			return -EIO;
2458		}
2459	}
2460
2461	event = list_entry(ev_queue->event_list.next,
2462			   struct devx_async_data, list);
2463	eventsz = event->cmd_out_len +
2464			sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2465
2466	if (eventsz > count) {
2467		spin_unlock_irq(&ev_queue->lock);
2468		return -ENOSPC;
2469	}
2470
2471	list_del(ev_queue->event_list.next);
2472	spin_unlock_irq(&ev_queue->lock);
2473
2474	if (copy_to_user(buf, &event->hdr, eventsz))
2475		ret = -EFAULT;
2476	else
2477		ret = eventsz;
2478
2479	atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2480	kvfree(event);
2481	return ret;
2482}
2483
2484static __poll_t devx_async_cmd_event_poll(struct file *filp,
2485					  struct poll_table_struct *wait)
2486{
2487	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2488	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2489	__poll_t pollflags = 0;
2490
2491	poll_wait(filp, &ev_queue->poll_wait, wait);
2492
2493	spin_lock_irq(&ev_queue->lock);
2494	if (ev_queue->is_destroyed)
2495		pollflags = POLLIN | POLLRDNORM | POLLHUP;
2496	else if (!list_empty(&ev_queue->event_list))
2497		pollflags = POLLIN | POLLRDNORM;
2498	spin_unlock_irq(&ev_queue->lock);
2499
2500	return pollflags;
2501}
2502
2503static const struct file_operations devx_async_cmd_event_fops = {
2504	.owner	 = THIS_MODULE,
2505	.read	 = devx_async_cmd_event_read,
2506	.poll    = devx_async_cmd_event_poll,
2507	.release = uverbs_uobject_fd_release,
2508	.llseek	 = no_llseek,
2509};
2510
2511static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2512				     size_t count, loff_t *pos)
2513{
2514	struct devx_async_event_file *ev_file = filp->private_data;
2515	struct devx_event_subscription *event_sub;
2516	struct devx_async_event_data *uninitialized_var(event);
2517	int ret = 0;
2518	size_t eventsz;
2519	bool omit_data;
2520	void *event_data;
2521
2522	omit_data = ev_file->omit_data;
2523
2524	spin_lock_irq(&ev_file->lock);
2525
2526	if (ev_file->is_overflow_err) {
2527		ev_file->is_overflow_err = 0;
2528		spin_unlock_irq(&ev_file->lock);
2529		return -EOVERFLOW;
2530	}
2531
2532
2533	while (list_empty(&ev_file->event_list)) {
2534		spin_unlock_irq(&ev_file->lock);
2535
2536		if (filp->f_flags & O_NONBLOCK)
2537			return -EAGAIN;
2538
2539		if (wait_event_interruptible(ev_file->poll_wait,
2540			    (!list_empty(&ev_file->event_list) ||
2541			     ev_file->is_destroyed))) {
2542			return -ERESTARTSYS;
2543		}
2544
2545		spin_lock_irq(&ev_file->lock);
2546		if (ev_file->is_destroyed) {
2547			spin_unlock_irq(&ev_file->lock);
2548			return -EIO;
2549		}
2550	}
2551
2552	if (omit_data) {
2553		event_sub = list_first_entry(&ev_file->event_list,
2554					struct devx_event_subscription,
2555					event_list);
2556		eventsz = sizeof(event_sub->cookie);
2557		event_data = &event_sub->cookie;
2558	} else {
2559		event = list_first_entry(&ev_file->event_list,
2560				      struct devx_async_event_data, list);
2561		eventsz = sizeof(struct mlx5_eqe) +
2562			sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2563		event_data = &event->hdr;
2564	}
2565
2566	if (eventsz > count) {
2567		spin_unlock_irq(&ev_file->lock);
2568		return -EINVAL;
2569	}
2570
2571	if (omit_data)
2572		list_del_init(&event_sub->event_list);
2573	else
2574		list_del(&event->list);
2575
2576	spin_unlock_irq(&ev_file->lock);
2577
2578	if (copy_to_user(buf, event_data, eventsz))
2579		/* This points to an application issue, not a kernel concern */
2580		ret = -EFAULT;
2581	else
2582		ret = eventsz;
2583
2584	if (!omit_data)
2585		kfree(event);
2586	return ret;
2587}
2588
2589static __poll_t devx_async_event_poll(struct file *filp,
2590				      struct poll_table_struct *wait)
2591{
2592	struct devx_async_event_file *ev_file = filp->private_data;
2593	__poll_t pollflags = 0;
2594
2595	poll_wait(filp, &ev_file->poll_wait, wait);
2596
2597	spin_lock_irq(&ev_file->lock);
2598	if (ev_file->is_destroyed)
2599		pollflags = POLLIN | POLLRDNORM | POLLHUP;
2600	else if (!list_empty(&ev_file->event_list))
2601		pollflags = POLLIN | POLLRDNORM;
2602	spin_unlock_irq(&ev_file->lock);
2603
2604	return pollflags;
2605}
2606
2607static void devx_free_subscription(struct rcu_head *rcu)
2608{
2609	struct devx_event_subscription *event_sub =
2610		container_of(rcu, struct devx_event_subscription, rcu);
2611
2612	if (event_sub->eventfd.file)
2613		fdput(event_sub->eventfd);
2614	uverbs_uobject_put(&event_sub->ev_file->uobj);
2615	kfree(event_sub);
2616}
2617
2618static const struct file_operations devx_async_event_fops = {
2619	.owner	 = THIS_MODULE,
2620	.read	 = devx_async_event_read,
2621	.poll    = devx_async_event_poll,
2622	.release = uverbs_uobject_fd_release,
2623	.llseek	 = no_llseek,
2624};
2625
2626static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2627					     enum rdma_remove_reason why)
2628{
2629	struct devx_async_cmd_event_file *comp_ev_file =
2630		container_of(uobj, struct devx_async_cmd_event_file,
2631			     uobj);
2632	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2633	struct devx_async_data *entry, *tmp;
2634
2635	spin_lock_irq(&ev_queue->lock);
2636	ev_queue->is_destroyed = 1;
2637	spin_unlock_irq(&ev_queue->lock);
2638	wake_up_interruptible(&ev_queue->poll_wait);
2639
2640	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2641
2642	spin_lock_irq(&comp_ev_file->ev_queue.lock);
2643	list_for_each_entry_safe(entry, tmp,
2644				 &comp_ev_file->ev_queue.event_list, list) {
2645		list_del(&entry->list);
2646		kvfree(entry);
2647	}
2648	spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2649	return 0;
2650};
2651
2652static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2653					 enum rdma_remove_reason why)
2654{
2655	struct devx_async_event_file *ev_file =
2656		container_of(uobj, struct devx_async_event_file,
2657			     uobj);
2658	struct devx_event_subscription *event_sub, *event_sub_tmp;
2659	struct mlx5_ib_dev *dev = ev_file->dev;
2660
2661	spin_lock_irq(&ev_file->lock);
2662	ev_file->is_destroyed = 1;
2663
2664	/* free the pending events allocation */
2665	if (ev_file->omit_data) {
2666		struct devx_event_subscription *event_sub, *tmp;
2667
2668		list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2669					 event_list)
2670			list_del_init(&event_sub->event_list);
2671
2672	} else {
2673		struct devx_async_event_data *entry, *tmp;
2674
2675		list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2676					 list) {
2677			list_del(&entry->list);
2678			kfree(entry);
2679		}
2680	}
2681
2682	spin_unlock_irq(&ev_file->lock);
2683	wake_up_interruptible(&ev_file->poll_wait);
2684
2685	mutex_lock(&dev->devx_event_table.event_xa_lock);
2686	/* delete the subscriptions which are related to this FD */
2687	list_for_each_entry_safe(event_sub, event_sub_tmp,
2688				 &ev_file->subscribed_events_list, file_list) {
2689		devx_cleanup_subscription(dev, event_sub);
2690		list_del_rcu(&event_sub->file_list);
2691		/* subscription may not be used by the read API any more */
2692		call_rcu(&event_sub->rcu, devx_free_subscription);
2693	}
2694	mutex_unlock(&dev->devx_event_table.event_xa_lock);
2695
2696	put_device(&dev->ib_dev.dev);
2697	return 0;
2698};
2699
2700DECLARE_UVERBS_NAMED_METHOD(
2701	MLX5_IB_METHOD_DEVX_UMEM_REG,
2702	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2703			MLX5_IB_OBJECT_DEVX_UMEM,
2704			UVERBS_ACCESS_NEW,
2705			UA_MANDATORY),
2706	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2707			   UVERBS_ATTR_TYPE(u64),
2708			   UA_MANDATORY),
2709	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2710			   UVERBS_ATTR_TYPE(u64),
2711			   UA_MANDATORY),
2712	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2713			     enum ib_access_flags),
2714	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2715			    UVERBS_ATTR_TYPE(u32),
2716			    UA_MANDATORY));
2717
2718DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2719	MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2720	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2721			MLX5_IB_OBJECT_DEVX_UMEM,
2722			UVERBS_ACCESS_DESTROY,
2723			UA_MANDATORY));
2724
2725DECLARE_UVERBS_NAMED_METHOD(
2726	MLX5_IB_METHOD_DEVX_QUERY_EQN,
2727	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2728			   UVERBS_ATTR_TYPE(u32),
2729			   UA_MANDATORY),
2730	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2731			    UVERBS_ATTR_TYPE(u32),
2732			    UA_MANDATORY));
2733
2734DECLARE_UVERBS_NAMED_METHOD(
2735	MLX5_IB_METHOD_DEVX_QUERY_UAR,
2736	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2737			   UVERBS_ATTR_TYPE(u32),
2738			   UA_MANDATORY),
2739	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2740			    UVERBS_ATTR_TYPE(u32),
2741			    UA_MANDATORY));
2742
2743DECLARE_UVERBS_NAMED_METHOD(
2744	MLX5_IB_METHOD_DEVX_OTHER,
2745	UVERBS_ATTR_PTR_IN(
2746		MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2747		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2748		UA_MANDATORY,
2749		UA_ALLOC_AND_COPY),
2750	UVERBS_ATTR_PTR_OUT(
2751		MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2752		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2753		UA_MANDATORY));
2754
2755DECLARE_UVERBS_NAMED_METHOD(
2756	MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2757	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2758			MLX5_IB_OBJECT_DEVX_OBJ,
2759			UVERBS_ACCESS_NEW,
2760			UA_MANDATORY),
2761	UVERBS_ATTR_PTR_IN(
2762		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2763		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2764		UA_MANDATORY,
2765		UA_ALLOC_AND_COPY),
2766	UVERBS_ATTR_PTR_OUT(
2767		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2768		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2769		UA_MANDATORY));
2770
2771DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2772	MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2773	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2774			MLX5_IB_OBJECT_DEVX_OBJ,
2775			UVERBS_ACCESS_DESTROY,
2776			UA_MANDATORY));
2777
2778DECLARE_UVERBS_NAMED_METHOD(
2779	MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2780	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2781			UVERBS_IDR_ANY_OBJECT,
2782			UVERBS_ACCESS_WRITE,
2783			UA_MANDATORY),
2784	UVERBS_ATTR_PTR_IN(
2785		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2786		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2787		UA_MANDATORY,
2788		UA_ALLOC_AND_COPY),
2789	UVERBS_ATTR_PTR_OUT(
2790		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2791		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2792		UA_MANDATORY));
2793
2794DECLARE_UVERBS_NAMED_METHOD(
2795	MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2796	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2797			UVERBS_IDR_ANY_OBJECT,
2798			UVERBS_ACCESS_READ,
2799			UA_MANDATORY),
2800	UVERBS_ATTR_PTR_IN(
2801		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2802		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2803		UA_MANDATORY,
2804		UA_ALLOC_AND_COPY),
2805	UVERBS_ATTR_PTR_OUT(
2806		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2807		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2808		UA_MANDATORY));
2809
2810DECLARE_UVERBS_NAMED_METHOD(
2811	MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2812	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2813			UVERBS_IDR_ANY_OBJECT,
2814			UVERBS_ACCESS_READ,
2815			UA_MANDATORY),
2816	UVERBS_ATTR_PTR_IN(
2817		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2818		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2819		UA_MANDATORY,
2820		UA_ALLOC_AND_COPY),
2821	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2822		u16, UA_MANDATORY),
2823	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2824		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2825		UVERBS_ACCESS_READ,
2826		UA_MANDATORY),
2827	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2828		UVERBS_ATTR_TYPE(u64),
2829		UA_MANDATORY));
2830
2831DECLARE_UVERBS_NAMED_METHOD(
2832	MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2833	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2834		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2835		UVERBS_ACCESS_READ,
2836		UA_MANDATORY),
2837	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2838		MLX5_IB_OBJECT_DEVX_OBJ,
2839		UVERBS_ACCESS_READ,
2840		UA_OPTIONAL),
2841	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2842		UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2843		UA_MANDATORY,
2844		UA_ALLOC_AND_COPY),
2845	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2846		UVERBS_ATTR_TYPE(u64),
2847		UA_OPTIONAL),
2848	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2849		UVERBS_ATTR_TYPE(u32),
2850		UA_OPTIONAL));
2851
2852DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2853			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2854			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2855			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2856			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2857
2858DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2859			    UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2860			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2861			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2862			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2863			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2864			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2865
2866DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2867			    UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2868			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2869			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2870
2871
2872DECLARE_UVERBS_NAMED_METHOD(
2873	MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2874	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2875			MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2876			UVERBS_ACCESS_NEW,
2877			UA_MANDATORY));
2878
2879DECLARE_UVERBS_NAMED_OBJECT(
2880	MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2881	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
2882			     devx_async_cmd_event_destroy_uobj,
2883			     &devx_async_cmd_event_fops, "[devx_async_cmd]",
2884			     FMODE_READ),
2885	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
2886
2887DECLARE_UVERBS_NAMED_METHOD(
2888	MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
2889	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
2890			MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2891			UVERBS_ACCESS_NEW,
2892			UA_MANDATORY),
2893	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
2894			enum mlx5_ib_uapi_devx_create_event_channel_flags,
2895			UA_MANDATORY));
2896
2897DECLARE_UVERBS_NAMED_OBJECT(
2898	MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2899	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
2900			     devx_async_event_destroy_uobj,
2901			     &devx_async_event_fops, "[devx_async_event]",
2902			     FMODE_READ),
2903	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
2904
2905static bool devx_is_supported(struct ib_device *device)
2906{
2907	struct mlx5_ib_dev *dev = to_mdev(device);
2908
2909	return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
2910}
2911
2912const struct uapi_definition mlx5_ib_devx_defs[] = {
2913	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2914		MLX5_IB_OBJECT_DEVX,
2915		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2916	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2917		MLX5_IB_OBJECT_DEVX_OBJ,
2918		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2919	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2920		MLX5_IB_OBJECT_DEVX_UMEM,
2921		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2922	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2923		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2924		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2925	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2926		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2927		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2928	{},
2929};
2930