blob: 664e0f374ac006083e5c7d86c163556e42722af3 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
David Brazdil0f672f62019-12-10 10:32:29 +000011#include <rdma/mlx5_user_ioctl_verbs.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <rdma/ib_umem.h>
David Brazdil0f672f62019-12-10 10:32:29 +000013#include <rdma/uverbs_std_types.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
16#include "mlx5_ib.h"
David Brazdil0f672f62019-12-10 10:32:29 +000017#include <linux/xarray.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018
19#define UVERBS_MODULE_NAME mlx5_ib
20#include <rdma/uverbs_named_ioctl.h>
21
David Brazdil0f672f62019-12-10 10:32:29 +000022static void dispatch_event_fd(struct list_head *fd_list, const void *data);
23
24enum devx_obj_flags {
25 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
26 DEVX_OBJ_FLAGS_DCT = 1 << 1,
27 DEVX_OBJ_FLAGS_CQ = 1 << 2,
28};
29
30struct devx_async_data {
31 struct mlx5_ib_dev *mdev;
32 struct list_head list;
33 struct ib_uobject *fd_uobj;
34 struct mlx5_async_work cb_work;
35 u16 cmd_out_len;
36 /* must be last field in this structure */
37 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
38};
39
40struct devx_async_event_data {
41 struct list_head list; /* headed in ev_file->event_list */
42 struct mlx5_ib_uapi_devx_async_event_hdr hdr;
43};
44
45/* first level XA value data structure */
46struct devx_event {
47 struct xarray object_ids; /* second XA level, Key = object id */
48 struct list_head unaffiliated_list;
49};
50
51/* second level XA value data structure */
52struct devx_obj_event {
53 struct rcu_head rcu;
54 struct list_head obj_sub_list;
55};
56
57struct devx_event_subscription {
58 struct list_head file_list; /* headed in ev_file->
59 * subscribed_events_list
60 */
61 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
62 * devx_obj_event->obj_sub_list
63 */
64 struct list_head obj_list; /* headed in devx_object */
65 struct list_head event_list; /* headed in ev_file->event_list or in
66 * temp list via subscription
67 */
68
69 u8 is_cleaned:1;
70 u32 xa_key_level1;
71 u32 xa_key_level2;
72 struct rcu_head rcu;
73 u64 cookie;
74 struct devx_async_event_file *ev_file;
75 struct file *filp; /* Upon hot unplug we need a direct access to */
76 struct eventfd_ctx *eventfd;
77};
78
79struct devx_async_event_file {
80 struct ib_uobject uobj;
81 /* Head of events that are subscribed to this FD */
82 struct list_head subscribed_events_list;
83 spinlock_t lock;
84 wait_queue_head_t poll_wait;
85 struct list_head event_list;
86 struct mlx5_ib_dev *dev;
87 u8 omit_data:1;
88 u8 is_overflow_err:1;
89 u8 is_destroyed:1;
90};
91
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
93struct devx_obj {
David Brazdil0f672f62019-12-10 10:32:29 +000094 struct mlx5_ib_dev *ib_dev;
95 u64 obj_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096 u32 dinlen; /* destroy inbox length */
97 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
David Brazdil0f672f62019-12-10 10:32:29 +000098 u32 flags;
99 union {
100 struct mlx5_ib_devx_mr devx_mr;
101 struct mlx5_core_dct core_dct;
102 struct mlx5_core_cq core_cq;
103 };
104 struct list_head event_sub; /* holds devx_event_subscription entries */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105};
106
107struct devx_umem {
108 struct mlx5_core_dev *mdev;
109 struct ib_umem *umem;
110 u32 page_offset;
111 int page_shift;
112 int ncont;
113 u32 dinlen;
114 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
115};
116
117struct devx_umem_reg_cmd {
118 void *in;
119 u32 inlen;
120 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
121};
122
David Brazdil0f672f62019-12-10 10:32:29 +0000123static struct mlx5_ib_ucontext *
124devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125{
David Brazdil0f672f62019-12-10 10:32:29 +0000126 return to_mucontext(ib_uverbs_get_ucontext(attrs));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127}
128
David Brazdil0f672f62019-12-10 10:32:29 +0000129int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130{
131 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
David Brazdil0f672f62019-12-10 10:32:29 +0000133 void *uctx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134 int err;
David Brazdil0f672f62019-12-10 10:32:29 +0000135 u16 uid;
136 u32 cap = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137
David Brazdil0f672f62019-12-10 10:32:29 +0000138 /* 0 means not supported */
139 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140 return -EINVAL;
141
David Brazdil0f672f62019-12-10 10:32:29 +0000142 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
143 if (is_user && capable(CAP_NET_RAW) &&
144 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
145 cap |= MLX5_UCTX_CAP_RAW_TX;
146 if (is_user && capable(CAP_SYS_RAWIO) &&
147 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
148 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
149 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150
David Brazdil0f672f62019-12-10 10:32:29 +0000151 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
152 MLX5_SET(uctx, uctx, cap, cap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000153
154 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
155 if (err)
156 return err;
157
David Brazdil0f672f62019-12-10 10:32:29 +0000158 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
159 return uid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160}
161
David Brazdil0f672f62019-12-10 10:32:29 +0000162void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163{
David Brazdil0f672f62019-12-10 10:32:29 +0000164 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
166
David Brazdil0f672f62019-12-10 10:32:29 +0000167 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
168 MLX5_SET(destroy_uctx_in, in, uid, uid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169
170 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
171}
172
173bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
174{
175 struct devx_obj *devx_obj = obj;
176 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
177
178 switch (opcode) {
179 case MLX5_CMD_OP_DESTROY_TIR:
180 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
181 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
182 obj_id);
183 return true;
184
185 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
186 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
187 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
188 table_id);
189 return true;
190 default:
191 return false;
192 }
193}
194
David Brazdil0f672f62019-12-10 10:32:29 +0000195bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000196{
David Brazdil0f672f62019-12-10 10:32:29 +0000197 struct devx_obj *devx_obj = obj;
198 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199
David Brazdil0f672f62019-12-10 10:32:29 +0000200 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
201 *counter_id = MLX5_GET(dealloc_flow_counter_in,
202 devx_obj->dinbox,
203 flow_counter_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204 return true;
David Brazdil0f672f62019-12-10 10:32:29 +0000205 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206
207 return false;
208}
209
David Brazdil0f672f62019-12-10 10:32:29 +0000210static bool is_legacy_unaffiliated_event_num(u16 event_num)
211{
212 switch (event_num) {
213 case MLX5_EVENT_TYPE_PORT_CHANGE:
214 return true;
215 default:
216 return false;
217 }
218}
219
220static bool is_legacy_obj_event_num(u16 event_num)
221{
222 switch (event_num) {
223 case MLX5_EVENT_TYPE_PATH_MIG:
224 case MLX5_EVENT_TYPE_COMM_EST:
225 case MLX5_EVENT_TYPE_SQ_DRAINED:
226 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
227 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
228 case MLX5_EVENT_TYPE_CQ_ERROR:
229 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
230 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
231 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
232 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
233 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
234 case MLX5_EVENT_TYPE_DCT_DRAINED:
235 case MLX5_EVENT_TYPE_COMP:
236 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
237 case MLX5_EVENT_TYPE_XRQ_ERROR:
238 return true;
239 default:
240 return false;
241 }
242}
243
244static u16 get_legacy_obj_type(u16 opcode)
245{
246 switch (opcode) {
247 case MLX5_CMD_OP_CREATE_RQ:
248 return MLX5_EVENT_QUEUE_TYPE_RQ;
249 case MLX5_CMD_OP_CREATE_QP:
250 return MLX5_EVENT_QUEUE_TYPE_QP;
251 case MLX5_CMD_OP_CREATE_SQ:
252 return MLX5_EVENT_QUEUE_TYPE_SQ;
253 case MLX5_CMD_OP_CREATE_DCT:
254 return MLX5_EVENT_QUEUE_TYPE_DCT;
255 default:
256 return 0;
257 }
258}
259
260static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
261{
262 u16 opcode;
263
264 opcode = (obj->obj_id >> 32) & 0xffff;
265
266 if (is_legacy_obj_event_num(event_num))
267 return get_legacy_obj_type(opcode);
268
269 switch (opcode) {
270 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
271 return (obj->obj_id >> 48);
272 case MLX5_CMD_OP_CREATE_RQ:
273 return MLX5_OBJ_TYPE_RQ;
274 case MLX5_CMD_OP_CREATE_QP:
275 return MLX5_OBJ_TYPE_QP;
276 case MLX5_CMD_OP_CREATE_SQ:
277 return MLX5_OBJ_TYPE_SQ;
278 case MLX5_CMD_OP_CREATE_DCT:
279 return MLX5_OBJ_TYPE_DCT;
280 case MLX5_CMD_OP_CREATE_TIR:
281 return MLX5_OBJ_TYPE_TIR;
282 case MLX5_CMD_OP_CREATE_TIS:
283 return MLX5_OBJ_TYPE_TIS;
284 case MLX5_CMD_OP_CREATE_PSV:
285 return MLX5_OBJ_TYPE_PSV;
286 case MLX5_OBJ_TYPE_MKEY:
287 return MLX5_OBJ_TYPE_MKEY;
288 case MLX5_CMD_OP_CREATE_RMP:
289 return MLX5_OBJ_TYPE_RMP;
290 case MLX5_CMD_OP_CREATE_XRC_SRQ:
291 return MLX5_OBJ_TYPE_XRC_SRQ;
292 case MLX5_CMD_OP_CREATE_XRQ:
293 return MLX5_OBJ_TYPE_XRQ;
294 case MLX5_CMD_OP_CREATE_RQT:
295 return MLX5_OBJ_TYPE_RQT;
296 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
297 return MLX5_OBJ_TYPE_FLOW_COUNTER;
298 case MLX5_CMD_OP_CREATE_CQ:
299 return MLX5_OBJ_TYPE_CQ;
300 default:
301 return 0;
302 }
303}
304
305static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
306{
307 switch (event_type) {
308 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
309 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
310 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
311 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
312 case MLX5_EVENT_TYPE_PATH_MIG:
313 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
314 case MLX5_EVENT_TYPE_COMM_EST:
315 case MLX5_EVENT_TYPE_SQ_DRAINED:
316 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
317 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
318 return eqe->data.qp_srq.type;
319 case MLX5_EVENT_TYPE_CQ_ERROR:
320 case MLX5_EVENT_TYPE_XRQ_ERROR:
321 return 0;
322 case MLX5_EVENT_TYPE_DCT_DRAINED:
323 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
324 return MLX5_EVENT_QUEUE_TYPE_DCT;
325 default:
326 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
327 }
328}
329
330static u32 get_dec_obj_id(u64 obj_id)
331{
332 return (obj_id & 0xffffffff);
333}
334
335/*
336 * As the obj_id in the firmware is not globally unique the object type
337 * must be considered upon checking for a valid object id.
338 * For that the opcode of the creator command is encoded as part of the obj_id.
339 */
340static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
341{
342 return ((u64)opcode << 32) | obj_id;
343}
344
345static u64 devx_get_obj_id(const void *in)
346{
347 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
348 u64 obj_id;
349
350 switch (opcode) {
351 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
352 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
353 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
354 MLX5_GET(general_obj_in_cmd_hdr, in,
355 obj_type) << 16,
356 MLX5_GET(general_obj_in_cmd_hdr, in,
357 obj_id));
358 break;
359 case MLX5_CMD_OP_QUERY_MKEY:
360 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
361 MLX5_GET(query_mkey_in, in,
362 mkey_index));
363 break;
364 case MLX5_CMD_OP_QUERY_CQ:
365 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
366 MLX5_GET(query_cq_in, in, cqn));
367 break;
368 case MLX5_CMD_OP_MODIFY_CQ:
369 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
370 MLX5_GET(modify_cq_in, in, cqn));
371 break;
372 case MLX5_CMD_OP_QUERY_SQ:
373 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
374 MLX5_GET(query_sq_in, in, sqn));
375 break;
376 case MLX5_CMD_OP_MODIFY_SQ:
377 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
378 MLX5_GET(modify_sq_in, in, sqn));
379 break;
380 case MLX5_CMD_OP_QUERY_RQ:
381 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
382 MLX5_GET(query_rq_in, in, rqn));
383 break;
384 case MLX5_CMD_OP_MODIFY_RQ:
385 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
386 MLX5_GET(modify_rq_in, in, rqn));
387 break;
388 case MLX5_CMD_OP_QUERY_RMP:
389 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
390 MLX5_GET(query_rmp_in, in, rmpn));
391 break;
392 case MLX5_CMD_OP_MODIFY_RMP:
393 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
394 MLX5_GET(modify_rmp_in, in, rmpn));
395 break;
396 case MLX5_CMD_OP_QUERY_RQT:
397 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
398 MLX5_GET(query_rqt_in, in, rqtn));
399 break;
400 case MLX5_CMD_OP_MODIFY_RQT:
401 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
402 MLX5_GET(modify_rqt_in, in, rqtn));
403 break;
404 case MLX5_CMD_OP_QUERY_TIR:
405 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
406 MLX5_GET(query_tir_in, in, tirn));
407 break;
408 case MLX5_CMD_OP_MODIFY_TIR:
409 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
410 MLX5_GET(modify_tir_in, in, tirn));
411 break;
412 case MLX5_CMD_OP_QUERY_TIS:
413 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
414 MLX5_GET(query_tis_in, in, tisn));
415 break;
416 case MLX5_CMD_OP_MODIFY_TIS:
417 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
418 MLX5_GET(modify_tis_in, in, tisn));
419 break;
420 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
421 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
422 MLX5_GET(query_flow_table_in, in,
423 table_id));
424 break;
425 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
426 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
427 MLX5_GET(modify_flow_table_in, in,
428 table_id));
429 break;
430 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
431 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
432 MLX5_GET(query_flow_group_in, in,
433 group_id));
434 break;
435 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
436 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
437 MLX5_GET(query_fte_in, in,
438 flow_index));
439 break;
440 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
441 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
442 MLX5_GET(set_fte_in, in, flow_index));
443 break;
444 case MLX5_CMD_OP_QUERY_Q_COUNTER:
445 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
446 MLX5_GET(query_q_counter_in, in,
447 counter_set_id));
448 break;
449 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
450 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
451 MLX5_GET(query_flow_counter_in, in,
452 flow_counter_id));
453 break;
454 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
455 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
456 MLX5_GET(general_obj_in_cmd_hdr, in,
457 obj_id));
458 break;
459 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
460 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
461 MLX5_GET(query_scheduling_element_in,
462 in, scheduling_element_id));
463 break;
464 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
465 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
466 MLX5_GET(modify_scheduling_element_in,
467 in, scheduling_element_id));
468 break;
469 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
470 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
471 MLX5_GET(add_vxlan_udp_dport_in, in,
472 vxlan_udp_port));
473 break;
474 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
475 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
476 MLX5_GET(query_l2_table_entry_in, in,
477 table_index));
478 break;
479 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
480 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
481 MLX5_GET(set_l2_table_entry_in, in,
482 table_index));
483 break;
484 case MLX5_CMD_OP_QUERY_QP:
485 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
486 MLX5_GET(query_qp_in, in, qpn));
487 break;
488 case MLX5_CMD_OP_RST2INIT_QP:
489 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
490 MLX5_GET(rst2init_qp_in, in, qpn));
491 break;
Olivier Deprez0e641232021-09-23 10:07:05 +0200492 case MLX5_CMD_OP_INIT2INIT_QP:
493 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
494 MLX5_GET(init2init_qp_in, in, qpn));
495 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000496 case MLX5_CMD_OP_INIT2RTR_QP:
497 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
498 MLX5_GET(init2rtr_qp_in, in, qpn));
499 break;
500 case MLX5_CMD_OP_RTR2RTS_QP:
501 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
502 MLX5_GET(rtr2rts_qp_in, in, qpn));
503 break;
504 case MLX5_CMD_OP_RTS2RTS_QP:
505 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
506 MLX5_GET(rts2rts_qp_in, in, qpn));
507 break;
508 case MLX5_CMD_OP_SQERR2RTS_QP:
509 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
510 MLX5_GET(sqerr2rts_qp_in, in, qpn));
511 break;
512 case MLX5_CMD_OP_2ERR_QP:
513 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
514 MLX5_GET(qp_2err_in, in, qpn));
515 break;
516 case MLX5_CMD_OP_2RST_QP:
517 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
518 MLX5_GET(qp_2rst_in, in, qpn));
519 break;
520 case MLX5_CMD_OP_QUERY_DCT:
521 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
522 MLX5_GET(query_dct_in, in, dctn));
523 break;
524 case MLX5_CMD_OP_QUERY_XRQ:
525 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
526 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
527 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
528 MLX5_GET(query_xrq_in, in, xrqn));
529 break;
530 case MLX5_CMD_OP_QUERY_XRC_SRQ:
531 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
532 MLX5_GET(query_xrc_srq_in, in,
533 xrc_srqn));
534 break;
535 case MLX5_CMD_OP_ARM_XRC_SRQ:
536 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
537 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
538 break;
539 case MLX5_CMD_OP_QUERY_SRQ:
540 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
541 MLX5_GET(query_srq_in, in, srqn));
542 break;
543 case MLX5_CMD_OP_ARM_RQ:
544 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
545 MLX5_GET(arm_rq_in, in, srq_number));
546 break;
547 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
548 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
549 MLX5_GET(drain_dct_in, in, dctn));
550 break;
551 case MLX5_CMD_OP_ARM_XRQ:
552 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
553 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
554 case MLX5_CMD_OP_MODIFY_XRQ:
555 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
556 MLX5_GET(arm_xrq_in, in, xrqn));
557 break;
558 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
559 obj_id = get_enc_obj_id
560 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
561 MLX5_GET(query_packet_reformat_context_in,
562 in, packet_reformat_id));
563 break;
564 default:
565 obj_id = 0;
566 }
567
568 return obj_id;
569}
570
571static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
572 struct ib_uobject *uobj, const void *in)
573{
574 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
575 u64 obj_id = devx_get_obj_id(in);
576
577 if (!obj_id)
578 return false;
579
580 switch (uobj_get_object_id(uobj)) {
581 case UVERBS_OBJECT_CQ:
582 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
583 to_mcq(uobj->object)->mcq.cqn) ==
584 obj_id;
585
586 case UVERBS_OBJECT_SRQ:
587 {
588 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
589 u16 opcode;
590
591 switch (srq->common.res) {
592 case MLX5_RES_XSRQ:
593 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
594 break;
595 case MLX5_RES_XRQ:
596 opcode = MLX5_CMD_OP_CREATE_XRQ;
597 break;
598 default:
599 if (!dev->mdev->issi)
600 opcode = MLX5_CMD_OP_CREATE_SRQ;
601 else
602 opcode = MLX5_CMD_OP_CREATE_RMP;
603 }
604
605 return get_enc_obj_id(opcode,
606 to_msrq(uobj->object)->msrq.srqn) ==
607 obj_id;
608 }
609
610 case UVERBS_OBJECT_QP:
611 {
612 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
613 enum ib_qp_type qp_type = qp->ibqp.qp_type;
614
615 if (qp_type == IB_QPT_RAW_PACKET ||
616 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
617 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
618 &qp->raw_packet_qp;
619 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
620 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
621
622 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
623 rq->base.mqp.qpn) == obj_id ||
624 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
625 sq->base.mqp.qpn) == obj_id ||
626 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
627 rq->tirn) == obj_id ||
628 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
629 sq->tisn) == obj_id);
630 }
631
632 if (qp_type == MLX5_IB_QPT_DCT)
633 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
634 qp->dct.mdct.mqp.qpn) == obj_id;
635
636 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
637 qp->ibqp.qp_num) == obj_id;
638 }
639
640 case UVERBS_OBJECT_WQ:
641 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
642 to_mrwq(uobj->object)->core_qp.qpn) ==
643 obj_id;
644
645 case UVERBS_OBJECT_RWQ_IND_TBL:
646 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
647 to_mrwq_ind_table(uobj->object)->rqtn) ==
648 obj_id;
649
650 case MLX5_IB_OBJECT_DEVX_OBJ:
651 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
652
653 default:
654 return false;
655 }
656}
657
658static void devx_set_umem_valid(const void *in)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000659{
660 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
661
662 switch (opcode) {
David Brazdil0f672f62019-12-10 10:32:29 +0000663 case MLX5_CMD_OP_CREATE_MKEY:
664 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
665 break;
666 case MLX5_CMD_OP_CREATE_CQ:
667 {
668 void *cqc;
669
670 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
671 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
672 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
673 break;
674 }
675 case MLX5_CMD_OP_CREATE_QP:
676 {
677 void *qpc;
678
679 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
680 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
681 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
682 break;
683 }
684
685 case MLX5_CMD_OP_CREATE_RQ:
686 {
687 void *rqc, *wq;
688
689 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
690 wq = MLX5_ADDR_OF(rqc, rqc, wq);
691 MLX5_SET(wq, wq, dbr_umem_valid, 1);
692 MLX5_SET(wq, wq, wq_umem_valid, 1);
693 break;
694 }
695
696 case MLX5_CMD_OP_CREATE_SQ:
697 {
698 void *sqc, *wq;
699
700 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
701 wq = MLX5_ADDR_OF(sqc, sqc, wq);
702 MLX5_SET(wq, wq, dbr_umem_valid, 1);
703 MLX5_SET(wq, wq, wq_umem_valid, 1);
704 break;
705 }
706
707 case MLX5_CMD_OP_MODIFY_CQ:
708 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
709 break;
710
711 case MLX5_CMD_OP_CREATE_RMP:
712 {
713 void *rmpc, *wq;
714
715 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
716 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
717 MLX5_SET(wq, wq, dbr_umem_valid, 1);
718 MLX5_SET(wq, wq, wq_umem_valid, 1);
719 break;
720 }
721
722 case MLX5_CMD_OP_CREATE_XRQ:
723 {
724 void *xrqc, *wq;
725
726 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
727 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
728 MLX5_SET(wq, wq, dbr_umem_valid, 1);
729 MLX5_SET(wq, wq, wq_umem_valid, 1);
730 break;
731 }
732
733 case MLX5_CMD_OP_CREATE_XRC_SRQ:
734 {
735 void *xrc_srqc;
736
737 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
738 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
739 xrc_srq_context_entry);
740 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
741 break;
742 }
743
744 default:
745 return;
746 }
747}
748
749static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
750{
751 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
752
753 switch (*opcode) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000754 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
755 case MLX5_CMD_OP_CREATE_MKEY:
756 case MLX5_CMD_OP_CREATE_CQ:
757 case MLX5_CMD_OP_ALLOC_PD:
758 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
759 case MLX5_CMD_OP_CREATE_RMP:
760 case MLX5_CMD_OP_CREATE_SQ:
761 case MLX5_CMD_OP_CREATE_RQ:
762 case MLX5_CMD_OP_CREATE_RQT:
763 case MLX5_CMD_OP_CREATE_TIR:
764 case MLX5_CMD_OP_CREATE_TIS:
765 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
766 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
767 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
768 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
David Brazdil0f672f62019-12-10 10:32:29 +0000769 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
771 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
772 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
773 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
774 case MLX5_CMD_OP_CREATE_QP:
775 case MLX5_CMD_OP_CREATE_SRQ:
776 case MLX5_CMD_OP_CREATE_XRC_SRQ:
777 case MLX5_CMD_OP_CREATE_DCT:
778 case MLX5_CMD_OP_CREATE_XRQ:
779 case MLX5_CMD_OP_ATTACH_TO_MCG:
780 case MLX5_CMD_OP_ALLOC_XRCD:
781 return true;
782 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
783 {
784 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
785 if (op_mod == 0)
786 return true;
787 return false;
788 }
David Brazdil0f672f62019-12-10 10:32:29 +0000789 case MLX5_CMD_OP_CREATE_PSV:
790 {
791 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
792
793 if (num_psv == 1)
794 return true;
795 return false;
796 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000797 default:
798 return false;
799 }
800}
801
802static bool devx_is_obj_modify_cmd(const void *in)
803{
804 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
805
806 switch (opcode) {
807 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
808 case MLX5_CMD_OP_MODIFY_CQ:
809 case MLX5_CMD_OP_MODIFY_RMP:
810 case MLX5_CMD_OP_MODIFY_SQ:
811 case MLX5_CMD_OP_MODIFY_RQ:
812 case MLX5_CMD_OP_MODIFY_RQT:
813 case MLX5_CMD_OP_MODIFY_TIR:
814 case MLX5_CMD_OP_MODIFY_TIS:
815 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
816 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
817 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
818 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
819 case MLX5_CMD_OP_RST2INIT_QP:
820 case MLX5_CMD_OP_INIT2RTR_QP:
Olivier Deprez0e641232021-09-23 10:07:05 +0200821 case MLX5_CMD_OP_INIT2INIT_QP:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000822 case MLX5_CMD_OP_RTR2RTS_QP:
823 case MLX5_CMD_OP_RTS2RTS_QP:
824 case MLX5_CMD_OP_SQERR2RTS_QP:
825 case MLX5_CMD_OP_2ERR_QP:
826 case MLX5_CMD_OP_2RST_QP:
827 case MLX5_CMD_OP_ARM_XRC_SRQ:
828 case MLX5_CMD_OP_ARM_RQ:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000829 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
830 case MLX5_CMD_OP_ARM_XRQ:
David Brazdil0f672f62019-12-10 10:32:29 +0000831 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
832 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
833 case MLX5_CMD_OP_MODIFY_XRQ:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000834 return true;
835 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
836 {
837 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
838
839 if (op_mod == 1)
840 return true;
841 return false;
842 }
843 default:
844 return false;
845 }
846}
847
848static bool devx_is_obj_query_cmd(const void *in)
849{
850 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
851
852 switch (opcode) {
853 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
854 case MLX5_CMD_OP_QUERY_MKEY:
855 case MLX5_CMD_OP_QUERY_CQ:
856 case MLX5_CMD_OP_QUERY_RMP:
857 case MLX5_CMD_OP_QUERY_SQ:
858 case MLX5_CMD_OP_QUERY_RQ:
859 case MLX5_CMD_OP_QUERY_RQT:
860 case MLX5_CMD_OP_QUERY_TIR:
861 case MLX5_CMD_OP_QUERY_TIS:
862 case MLX5_CMD_OP_QUERY_Q_COUNTER:
863 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
864 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
865 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
866 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
867 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
868 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
869 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
870 case MLX5_CMD_OP_QUERY_QP:
871 case MLX5_CMD_OP_QUERY_SRQ:
872 case MLX5_CMD_OP_QUERY_XRC_SRQ:
873 case MLX5_CMD_OP_QUERY_DCT:
874 case MLX5_CMD_OP_QUERY_XRQ:
David Brazdil0f672f62019-12-10 10:32:29 +0000875 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
876 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
877 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000878 return true;
879 default:
880 return false;
881 }
882}
883
David Brazdil0f672f62019-12-10 10:32:29 +0000884static bool devx_is_whitelist_cmd(void *in)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000885{
886 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
887
888 switch (opcode) {
889 case MLX5_CMD_OP_QUERY_HCA_CAP:
David Brazdil0f672f62019-12-10 10:32:29 +0000890 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
891 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
892 return true;
893 default:
894 return false;
895 }
896}
897
898static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
899{
900 if (devx_is_whitelist_cmd(cmd_in)) {
901 struct mlx5_ib_dev *dev;
902
903 if (c->devx_uid)
904 return c->devx_uid;
905
906 dev = to_mdev(c->ibucontext.device);
907 if (dev->devx_whitelist_uid)
908 return dev->devx_whitelist_uid;
909
910 return -EOPNOTSUPP;
911 }
912
913 if (!c->devx_uid)
914 return -EINVAL;
915
916 return c->devx_uid;
917}
918
919static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
920{
921 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
922
923 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
924 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
925 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
926 (opcode >= MLX5_CMD_OP_GENERAL_START &&
927 opcode < MLX5_CMD_OP_GENERAL_END))
928 return true;
929
930 switch (opcode) {
931 case MLX5_CMD_OP_QUERY_HCA_CAP:
932 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
933 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000934 case MLX5_CMD_OP_QUERY_VPORT_STATE:
935 case MLX5_CMD_OP_QUERY_ADAPTER:
936 case MLX5_CMD_OP_QUERY_ISSI:
937 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
938 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
939 case MLX5_CMD_OP_QUERY_VNIC_ENV:
940 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
941 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
942 case MLX5_CMD_OP_NOP:
943 case MLX5_CMD_OP_QUERY_CONG_STATUS:
944 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
945 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
David Brazdil0f672f62019-12-10 10:32:29 +0000946 case MLX5_CMD_OP_QUERY_LAG:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000947 return true;
948 default:
949 return false;
950 }
951}
952
953static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
David Brazdil0f672f62019-12-10 10:32:29 +0000954 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000955{
956 struct mlx5_ib_ucontext *c;
957 struct mlx5_ib_dev *dev;
958 int user_vector;
959 int dev_eqn;
960 unsigned int irqn;
961 int err;
962
963 if (uverbs_copy_from(&user_vector, attrs,
964 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
965 return -EFAULT;
966
David Brazdil0f672f62019-12-10 10:32:29 +0000967 c = devx_ufile2uctx(attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000968 if (IS_ERR(c))
969 return PTR_ERR(c);
970 dev = to_mdev(c->ibucontext.device);
971
972 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
973 if (err < 0)
974 return err;
975
976 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
977 &dev_eqn, sizeof(dev_eqn)))
978 return -EFAULT;
979
980 return 0;
981}
982
983/*
984 *Security note:
985 * The hardware protection mechanism works like this: Each device object that
986 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
987 * the device specification manual) upon its creation. Then upon doorbell,
988 * hardware fetches the object context for which the doorbell was rang, and
989 * validates that the UAR through which the DB was rang matches the UAR ID
990 * of the object.
991 * If no match the doorbell is silently ignored by the hardware. Of course,
992 * the user cannot ring a doorbell on a UAR that was not mapped to it.
993 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
994 * mailboxes (except tagging them with UID), we expose to the user its UAR
995 * ID, so it can embed it in these objects in the expected specification
996 * format. So the only thing the user can do is hurt itself by creating a
997 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
998 * may ring a doorbell on its objects.
999 * The consequence of that will be that another user can schedule a QP/SQ
1000 * of the buggy user for execution (just insert it to the hardware schedule
1001 * queue or arm its CQ for event generation), no further harm is expected.
1002 */
1003static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
David Brazdil0f672f62019-12-10 10:32:29 +00001004 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001005{
1006 struct mlx5_ib_ucontext *c;
1007 struct mlx5_ib_dev *dev;
1008 u32 user_idx;
1009 s32 dev_idx;
1010
David Brazdil0f672f62019-12-10 10:32:29 +00001011 c = devx_ufile2uctx(attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001012 if (IS_ERR(c))
1013 return PTR_ERR(c);
1014 dev = to_mdev(c->ibucontext.device);
1015
1016 if (uverbs_copy_from(&user_idx, attrs,
1017 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1018 return -EFAULT;
1019
1020 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1021 if (dev_idx < 0)
1022 return dev_idx;
1023
1024 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1025 &dev_idx, sizeof(dev_idx)))
1026 return -EFAULT;
1027
1028 return 0;
1029}
1030
1031static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
David Brazdil0f672f62019-12-10 10:32:29 +00001032 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001033{
1034 struct mlx5_ib_ucontext *c;
1035 struct mlx5_ib_dev *dev;
1036 void *cmd_in = uverbs_attr_get_alloced_ptr(
1037 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1038 int cmd_out_len = uverbs_attr_get_len(attrs,
1039 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1040 void *cmd_out;
1041 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00001042 int uid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001043
David Brazdil0f672f62019-12-10 10:32:29 +00001044 c = devx_ufile2uctx(attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001045 if (IS_ERR(c))
1046 return PTR_ERR(c);
1047 dev = to_mdev(c->ibucontext.device);
1048
David Brazdil0f672f62019-12-10 10:32:29 +00001049 uid = devx_get_uid(c, cmd_in);
1050 if (uid < 0)
1051 return uid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001052
1053 /* Only white list of some general HCA commands are allowed for this method. */
David Brazdil0f672f62019-12-10 10:32:29 +00001054 if (!devx_is_general_cmd(cmd_in, dev))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001055 return -EINVAL;
1056
1057 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1058 if (IS_ERR(cmd_out))
1059 return PTR_ERR(cmd_out);
1060
David Brazdil0f672f62019-12-10 10:32:29 +00001061 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001062 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1063 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1064 cmd_out, cmd_out_len);
1065 if (err)
1066 return err;
1067
1068 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1069 cmd_out_len);
1070}
1071
1072static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1073 u32 *dinlen,
1074 u32 *obj_id)
1075{
1076 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
1077 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1078
1079 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1080 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1081
1082 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1083 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1084
1085 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
1086 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1087 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1088 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
1089 break;
1090
David Brazdil0f672f62019-12-10 10:32:29 +00001091 case MLX5_CMD_OP_CREATE_UMEM:
1092 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1093 MLX5_CMD_OP_DESTROY_UMEM);
1094 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001095 case MLX5_CMD_OP_CREATE_MKEY:
1096 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
1097 break;
1098 case MLX5_CMD_OP_CREATE_CQ:
1099 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1100 break;
1101 case MLX5_CMD_OP_ALLOC_PD:
1102 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1103 break;
1104 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1105 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1106 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1107 break;
1108 case MLX5_CMD_OP_CREATE_RMP:
1109 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1110 break;
1111 case MLX5_CMD_OP_CREATE_SQ:
1112 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1113 break;
1114 case MLX5_CMD_OP_CREATE_RQ:
1115 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1116 break;
1117 case MLX5_CMD_OP_CREATE_RQT:
1118 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1119 break;
1120 case MLX5_CMD_OP_CREATE_TIR:
Olivier Deprez0e641232021-09-23 10:07:05 +02001121 *obj_id = MLX5_GET(create_tir_out, out, tirn);
1122 MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1123 MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001124 break;
1125 case MLX5_CMD_OP_CREATE_TIS:
1126 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1127 break;
1128 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1129 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1130 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1131 break;
1132 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1133 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1134 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
1135 MLX5_SET(destroy_flow_table_in, din, other_vport,
1136 MLX5_GET(create_flow_table_in, in, other_vport));
1137 MLX5_SET(destroy_flow_table_in, din, vport_number,
1138 MLX5_GET(create_flow_table_in, in, vport_number));
1139 MLX5_SET(destroy_flow_table_in, din, table_type,
1140 MLX5_GET(create_flow_table_in, in, table_type));
1141 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1142 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1143 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1144 break;
1145 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1146 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1147 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
1148 MLX5_SET(destroy_flow_group_in, din, other_vport,
1149 MLX5_GET(create_flow_group_in, in, other_vport));
1150 MLX5_SET(destroy_flow_group_in, din, vport_number,
1151 MLX5_GET(create_flow_group_in, in, vport_number));
1152 MLX5_SET(destroy_flow_group_in, din, table_type,
1153 MLX5_GET(create_flow_group_in, in, table_type));
1154 MLX5_SET(destroy_flow_group_in, din, table_id,
1155 MLX5_GET(create_flow_group_in, in, table_id));
1156 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1157 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1158 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1159 break;
1160 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1161 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1162 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
1163 MLX5_SET(delete_fte_in, din, other_vport,
1164 MLX5_GET(set_fte_in, in, other_vport));
1165 MLX5_SET(delete_fte_in, din, vport_number,
1166 MLX5_GET(set_fte_in, in, vport_number));
1167 MLX5_SET(delete_fte_in, din, table_type,
1168 MLX5_GET(set_fte_in, in, table_type));
1169 MLX5_SET(delete_fte_in, din, table_id,
1170 MLX5_GET(set_fte_in, in, table_id));
1171 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1172 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1173 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1174 break;
1175 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1176 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1177 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1178 break;
David Brazdil0f672f62019-12-10 10:32:29 +00001179 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001180 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
David Brazdil0f672f62019-12-10 10:32:29 +00001181 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001182 break;
1183 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1184 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1185 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1186 break;
1187 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1188 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1189 *obj_id = MLX5_GET(create_scheduling_element_out, out,
1190 scheduling_element_id);
1191 MLX5_SET(destroy_scheduling_element_in, din,
1192 scheduling_hierarchy,
1193 MLX5_GET(create_scheduling_element_in, in,
1194 scheduling_hierarchy));
1195 MLX5_SET(destroy_scheduling_element_in, din,
1196 scheduling_element_id, *obj_id);
1197 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1198 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1199 break;
1200 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1201 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1202 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
1203 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1204 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1205 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1206 break;
1207 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1208 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1209 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
1210 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1211 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1212 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1213 break;
1214 case MLX5_CMD_OP_CREATE_QP:
1215 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1216 break;
1217 case MLX5_CMD_OP_CREATE_SRQ:
1218 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1219 break;
1220 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1221 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1222 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1223 break;
1224 case MLX5_CMD_OP_CREATE_DCT:
1225 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1226 break;
1227 case MLX5_CMD_OP_CREATE_XRQ:
1228 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1229 break;
1230 case MLX5_CMD_OP_ATTACH_TO_MCG:
1231 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1232 MLX5_SET(detach_from_mcg_in, din, qpn,
1233 MLX5_GET(attach_to_mcg_in, in, qpn));
1234 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1235 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1236 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1237 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1238 break;
1239 case MLX5_CMD_OP_ALLOC_XRCD:
1240 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1241 break;
David Brazdil0f672f62019-12-10 10:32:29 +00001242 case MLX5_CMD_OP_CREATE_PSV:
1243 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1244 MLX5_CMD_OP_DESTROY_PSV);
1245 MLX5_SET(destroy_psv_in, din, psvn,
1246 MLX5_GET(create_psv_out, out, psv0_index));
1247 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001248 default:
1249 /* The entry must match to one of the devx_is_obj_create_cmd */
1250 WARN_ON(true);
1251 break;
1252 }
1253}
1254
David Brazdil0f672f62019-12-10 10:32:29 +00001255static int devx_handle_mkey_indirect(struct devx_obj *obj,
1256 struct mlx5_ib_dev *dev,
1257 void *in, void *out)
1258{
1259 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1260 struct mlx5_core_mkey *mkey;
1261 void *mkc;
1262 u8 key;
1263
1264 mkey = &devx_mr->mmkey;
1265 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1266 key = MLX5_GET(mkc, mkc, mkey_7_0);
1267 mkey->key = mlx5_idx_to_mkey(
1268 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1269 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1270 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1271 mkey->size = MLX5_GET64(mkc, mkc, len);
1272 mkey->pd = MLX5_GET(mkc, mkc, pd);
1273 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1274
1275 return xa_err(xa_store(&dev->mdev->priv.mkey_table,
1276 mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
1277}
1278
1279static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1280 struct devx_obj *obj,
1281 void *in, int in_len)
1282{
1283 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1284 MLX5_FLD_SZ_BYTES(create_mkey_in,
1285 memory_key_mkey_entry);
1286 void *mkc;
1287 u8 access_mode;
1288
1289 if (in_len < min_len)
1290 return -EINVAL;
1291
1292 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1293
1294 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1295 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1296
1297 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1298 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1299 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1300 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1301 return 0;
1302 }
1303
1304 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1305 return 0;
1306}
1307
1308static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1309 struct devx_event_subscription *sub)
1310{
1311 struct devx_event *event;
1312 struct devx_obj_event *xa_val_level2;
1313
1314 if (sub->is_cleaned)
1315 return;
1316
1317 sub->is_cleaned = 1;
1318 list_del_rcu(&sub->xa_list);
1319
1320 if (list_empty(&sub->obj_list))
1321 return;
1322
1323 list_del_rcu(&sub->obj_list);
1324 /* check whether key level 1 for this obj_sub_list is empty */
1325 event = xa_load(&dev->devx_event_table.event_xa,
1326 sub->xa_key_level1);
1327 WARN_ON(!event);
1328
1329 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1330 if (list_empty(&xa_val_level2->obj_sub_list)) {
1331 xa_erase(&event->object_ids,
1332 sub->xa_key_level2);
1333 kfree_rcu(xa_val_level2, rcu);
1334 }
1335}
1336
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001337static int devx_obj_cleanup(struct ib_uobject *uobject,
David Brazdil0f672f62019-12-10 10:32:29 +00001338 enum rdma_remove_reason why,
1339 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001340{
1341 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
David Brazdil0f672f62019-12-10 10:32:29 +00001342 struct mlx5_devx_event_table *devx_event_table;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001343 struct devx_obj *obj = uobject->object;
David Brazdil0f672f62019-12-10 10:32:29 +00001344 struct devx_event_subscription *sub_entry, *tmp;
1345 struct mlx5_ib_dev *dev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001346 int ret;
1347
David Brazdil0f672f62019-12-10 10:32:29 +00001348 dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1349 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1350 /*
1351 * The pagefault_single_data_segment() does commands against
1352 * the mmkey, we must wait for that to stop before freeing the
1353 * mkey, as another allocation could get the same mkey #.
1354 */
1355 xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
1356 mlx5_base_mkey(obj->devx_mr.mmkey.key));
1357 synchronize_srcu(&dev->mr_srcu);
1358 }
1359
1360 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1361 ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1362 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1363 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1364 else
1365 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1366 obj->dinlen, out, sizeof(out));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001367 if (ib_is_destroy_retryable(ret, why, uobject))
1368 return ret;
1369
David Brazdil0f672f62019-12-10 10:32:29 +00001370 devx_event_table = &dev->devx_event_table;
1371
1372 mutex_lock(&devx_event_table->event_xa_lock);
1373 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1374 devx_cleanup_subscription(dev, sub_entry);
1375 mutex_unlock(&devx_event_table->event_xa_lock);
1376
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001377 kfree(obj);
1378 return ret;
1379}
1380
David Brazdil0f672f62019-12-10 10:32:29 +00001381static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1382{
1383 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1384 struct mlx5_devx_event_table *table;
1385 struct devx_event *event;
1386 struct devx_obj_event *obj_event;
1387 u32 obj_id = mcq->cqn;
1388
1389 table = &obj->ib_dev->devx_event_table;
1390 rcu_read_lock();
1391 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1392 if (!event)
1393 goto out;
1394
1395 obj_event = xa_load(&event->object_ids, obj_id);
1396 if (!obj_event)
1397 goto out;
1398
1399 dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1400out:
1401 rcu_read_unlock();
1402}
1403
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001404static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
David Brazdil0f672f62019-12-10 10:32:29 +00001405 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001406{
1407 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1408 int cmd_out_len = uverbs_attr_get_len(attrs,
1409 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
David Brazdil0f672f62019-12-10 10:32:29 +00001410 int cmd_in_len = uverbs_attr_get_len(attrs,
1411 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001412 void *cmd_out;
1413 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1414 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
David Brazdil0f672f62019-12-10 10:32:29 +00001415 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1416 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001417 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1418 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1419 struct devx_obj *obj;
David Brazdil0f672f62019-12-10 10:32:29 +00001420 u16 obj_type = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001421 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00001422 int uid;
1423 u32 obj_id;
1424 u16 opcode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001425
David Brazdil0f672f62019-12-10 10:32:29 +00001426 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1427 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001428
David Brazdil0f672f62019-12-10 10:32:29 +00001429 uid = devx_get_uid(c, cmd_in);
1430 if (uid < 0)
1431 return uid;
1432
1433 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001434 return -EINVAL;
1435
1436 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1437 if (IS_ERR(cmd_out))
1438 return PTR_ERR(cmd_out);
1439
1440 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1441 if (!obj)
1442 return -ENOMEM;
1443
David Brazdil0f672f62019-12-10 10:32:29 +00001444 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1445 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1446 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1447 if (err)
1448 goto obj_free;
1449 } else {
1450 devx_set_umem_valid(cmd_in);
1451 }
1452
1453 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1454 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1455 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1456 cmd_in, cmd_in_len,
1457 cmd_out, cmd_out_len);
1458 } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
1459 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1460 obj->core_cq.comp = devx_cq_comp;
1461 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1462 cmd_in, cmd_in_len, cmd_out,
1463 cmd_out_len);
1464 } else {
1465 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1466 cmd_in_len,
1467 cmd_out, cmd_out_len);
1468 }
1469
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001470 if (err)
1471 goto obj_free;
1472
1473 uobj->object = obj;
David Brazdil0f672f62019-12-10 10:32:29 +00001474 INIT_LIST_HEAD(&obj->event_sub);
1475 obj->ib_dev = dev;
1476 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1477 &obj_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001478 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1479
1480 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1481 if (err)
1482 goto obj_destroy;
1483
David Brazdil0f672f62019-12-10 10:32:29 +00001484 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1485 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1486 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1487
1488 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1489 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1490 if (err)
1491 goto obj_destroy;
1492 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001493 return 0;
1494
1495obj_destroy:
David Brazdil0f672f62019-12-10 10:32:29 +00001496 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1497 mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1498 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1499 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1500 else
1501 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1502 sizeof(out));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001503obj_free:
1504 kfree(obj);
1505 return err;
1506}
1507
1508static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
David Brazdil0f672f62019-12-10 10:32:29 +00001509 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001510{
1511 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1512 int cmd_out_len = uverbs_attr_get_len(attrs,
1513 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1514 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1515 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
David Brazdil0f672f62019-12-10 10:32:29 +00001516 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1517 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1518 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001519 void *cmd_out;
1520 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00001521 int uid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001522
David Brazdil0f672f62019-12-10 10:32:29 +00001523 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1524 return -EINVAL;
1525
1526 uid = devx_get_uid(c, cmd_in);
1527 if (uid < 0)
1528 return uid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001529
1530 if (!devx_is_obj_modify_cmd(cmd_in))
1531 return -EINVAL;
1532
David Brazdil0f672f62019-12-10 10:32:29 +00001533 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001534 return -EINVAL;
1535
1536 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1537 if (IS_ERR(cmd_out))
1538 return PTR_ERR(cmd_out);
1539
David Brazdil0f672f62019-12-10 10:32:29 +00001540 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1541 devx_set_umem_valid(cmd_in);
1542
1543 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001544 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1545 cmd_out, cmd_out_len);
1546 if (err)
1547 return err;
1548
1549 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1550 cmd_out, cmd_out_len);
1551}
1552
1553static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
David Brazdil0f672f62019-12-10 10:32:29 +00001554 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001555{
1556 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1557 int cmd_out_len = uverbs_attr_get_len(attrs,
1558 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1559 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1560 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
David Brazdil0f672f62019-12-10 10:32:29 +00001561 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1562 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001563 void *cmd_out;
1564 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00001565 int uid;
1566 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001567
David Brazdil0f672f62019-12-10 10:32:29 +00001568 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1569 return -EINVAL;
1570
1571 uid = devx_get_uid(c, cmd_in);
1572 if (uid < 0)
1573 return uid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001574
1575 if (!devx_is_obj_query_cmd(cmd_in))
1576 return -EINVAL;
1577
David Brazdil0f672f62019-12-10 10:32:29 +00001578 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001579 return -EINVAL;
1580
1581 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1582 if (IS_ERR(cmd_out))
1583 return PTR_ERR(cmd_out);
1584
David Brazdil0f672f62019-12-10 10:32:29 +00001585 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1586 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001587 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1588 cmd_out, cmd_out_len);
1589 if (err)
1590 return err;
1591
1592 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1593 cmd_out, cmd_out_len);
1594}
1595
David Brazdil0f672f62019-12-10 10:32:29 +00001596struct devx_async_event_queue {
1597 spinlock_t lock;
1598 wait_queue_head_t poll_wait;
1599 struct list_head event_list;
1600 atomic_t bytes_in_use;
1601 u8 is_destroyed:1;
1602};
1603
1604struct devx_async_cmd_event_file {
1605 struct ib_uobject uobj;
1606 struct devx_async_event_queue ev_queue;
1607 struct mlx5_async_ctx async_ctx;
1608};
1609
1610static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1611{
1612 spin_lock_init(&ev_queue->lock);
1613 INIT_LIST_HEAD(&ev_queue->event_list);
1614 init_waitqueue_head(&ev_queue->poll_wait);
1615 atomic_set(&ev_queue->bytes_in_use, 0);
1616 ev_queue->is_destroyed = 0;
1617}
1618
1619static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1620 struct uverbs_attr_bundle *attrs)
1621{
1622 struct devx_async_cmd_event_file *ev_file;
1623
1624 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1625 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1626 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1627
1628 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1629 uobj);
1630 devx_init_event_queue(&ev_file->ev_queue);
1631 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1632 return 0;
1633}
1634
1635static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1636 struct uverbs_attr_bundle *attrs)
1637{
1638 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1639 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1640 struct devx_async_event_file *ev_file;
1641 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1642 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1643 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1644 u32 flags;
1645 int err;
1646
1647 err = uverbs_get_flags32(&flags, attrs,
1648 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1649 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1650
1651 if (err)
1652 return err;
1653
1654 ev_file = container_of(uobj, struct devx_async_event_file,
1655 uobj);
1656 spin_lock_init(&ev_file->lock);
1657 INIT_LIST_HEAD(&ev_file->event_list);
1658 init_waitqueue_head(&ev_file->poll_wait);
1659 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1660 ev_file->omit_data = 1;
1661 INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1662 ev_file->dev = dev;
1663 get_device(&dev->ib_dev.dev);
1664 return 0;
1665}
1666
1667static void devx_query_callback(int status, struct mlx5_async_work *context)
1668{
1669 struct devx_async_data *async_data =
1670 container_of(context, struct devx_async_data, cb_work);
1671 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1672 struct devx_async_cmd_event_file *ev_file;
1673 struct devx_async_event_queue *ev_queue;
1674 unsigned long flags;
1675
1676 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1677 uobj);
1678 ev_queue = &ev_file->ev_queue;
1679
1680 spin_lock_irqsave(&ev_queue->lock, flags);
1681 list_add_tail(&async_data->list, &ev_queue->event_list);
1682 spin_unlock_irqrestore(&ev_queue->lock, flags);
1683
1684 wake_up_interruptible(&ev_queue->poll_wait);
1685 fput(fd_uobj->object);
1686}
1687
1688#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1689
1690static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1691 struct uverbs_attr_bundle *attrs)
1692{
1693 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1694 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1695 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1696 attrs,
1697 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1698 u16 cmd_out_len;
1699 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1700 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1701 struct ib_uobject *fd_uobj;
1702 int err;
1703 int uid;
1704 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1705 struct devx_async_cmd_event_file *ev_file;
1706 struct devx_async_data *async_data;
1707
1708 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1709 return -EINVAL;
1710
1711 uid = devx_get_uid(c, cmd_in);
1712 if (uid < 0)
1713 return uid;
1714
1715 if (!devx_is_obj_query_cmd(cmd_in))
1716 return -EINVAL;
1717
1718 err = uverbs_get_const(&cmd_out_len, attrs,
1719 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1720 if (err)
1721 return err;
1722
1723 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1724 return -EINVAL;
1725
1726 fd_uobj = uverbs_attr_get_uobject(attrs,
1727 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1728 if (IS_ERR(fd_uobj))
1729 return PTR_ERR(fd_uobj);
1730
1731 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1732 uobj);
1733
1734 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1735 MAX_ASYNC_BYTES_IN_USE) {
1736 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1737 return -EAGAIN;
1738 }
1739
1740 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1741 cmd_out_len), GFP_KERNEL);
1742 if (!async_data) {
1743 err = -ENOMEM;
1744 goto sub_bytes;
1745 }
1746
1747 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1748 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1749 if (err)
1750 goto free_async;
1751
1752 async_data->cmd_out_len = cmd_out_len;
1753 async_data->mdev = mdev;
1754 async_data->fd_uobj = fd_uobj;
1755
1756 get_file(fd_uobj->object);
1757 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1758 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1759 uverbs_attr_get_len(attrs,
1760 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1761 async_data->hdr.out_data,
1762 async_data->cmd_out_len,
1763 devx_query_callback, &async_data->cb_work);
1764
1765 if (err)
1766 goto cb_err;
1767
1768 return 0;
1769
1770cb_err:
1771 fput(fd_uobj->object);
1772free_async:
1773 kvfree(async_data);
1774sub_bytes:
1775 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1776 return err;
1777}
1778
1779static void
1780subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1781 u32 key_level1,
1782 bool is_level2,
1783 u32 key_level2)
1784{
1785 struct devx_event *event;
1786 struct devx_obj_event *xa_val_level2;
1787
1788 /* Level 1 is valid for future use, no need to free */
1789 if (!is_level2)
1790 return;
1791
1792 event = xa_load(&devx_event_table->event_xa, key_level1);
1793 WARN_ON(!event);
1794
1795 xa_val_level2 = xa_load(&event->object_ids,
1796 key_level2);
1797 if (list_empty(&xa_val_level2->obj_sub_list)) {
1798 xa_erase(&event->object_ids,
1799 key_level2);
1800 kfree_rcu(xa_val_level2, rcu);
1801 }
1802}
1803
1804static int
1805subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1806 u32 key_level1,
1807 bool is_level2,
1808 u32 key_level2)
1809{
1810 struct devx_obj_event *obj_event;
1811 struct devx_event *event;
1812 int err;
1813
1814 event = xa_load(&devx_event_table->event_xa, key_level1);
1815 if (!event) {
1816 event = kzalloc(sizeof(*event), GFP_KERNEL);
1817 if (!event)
1818 return -ENOMEM;
1819
1820 INIT_LIST_HEAD(&event->unaffiliated_list);
1821 xa_init(&event->object_ids);
1822
1823 err = xa_insert(&devx_event_table->event_xa,
1824 key_level1,
1825 event,
1826 GFP_KERNEL);
1827 if (err) {
1828 kfree(event);
1829 return err;
1830 }
1831 }
1832
1833 if (!is_level2)
1834 return 0;
1835
1836 obj_event = xa_load(&event->object_ids, key_level2);
1837 if (!obj_event) {
1838 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1839 if (!obj_event)
1840 /* Level1 is valid for future use, no need to free */
1841 return -ENOMEM;
1842
1843 err = xa_insert(&event->object_ids,
1844 key_level2,
1845 obj_event,
1846 GFP_KERNEL);
1847 if (err)
1848 return err;
1849 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1850 }
1851
1852 return 0;
1853}
1854
1855static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1856 struct devx_obj *obj)
1857{
1858 int i;
1859
1860 for (i = 0; i < num_events; i++) {
1861 if (obj) {
1862 if (!is_legacy_obj_event_num(event_type_num_list[i]))
1863 return false;
1864 } else if (!is_legacy_unaffiliated_event_num(
1865 event_type_num_list[i])) {
1866 return false;
1867 }
1868 }
1869
1870 return true;
1871}
1872
1873#define MAX_SUPP_EVENT_NUM 255
1874static bool is_valid_events(struct mlx5_core_dev *dev,
1875 int num_events, u16 *event_type_num_list,
1876 struct devx_obj *obj)
1877{
1878 __be64 *aff_events;
1879 __be64 *unaff_events;
1880 int mask_entry;
1881 int mask_bit;
1882 int i;
1883
1884 if (MLX5_CAP_GEN(dev, event_cap)) {
1885 aff_events = MLX5_CAP_DEV_EVENT(dev,
1886 user_affiliated_events);
1887 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1888 user_unaffiliated_events);
1889 } else {
1890 return is_valid_events_legacy(num_events, event_type_num_list,
1891 obj);
1892 }
1893
1894 for (i = 0; i < num_events; i++) {
1895 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1896 return false;
1897
1898 mask_entry = event_type_num_list[i] / 64;
1899 mask_bit = event_type_num_list[i] % 64;
1900
1901 if (obj) {
1902 /* CQ completion */
1903 if (event_type_num_list[i] == 0)
1904 continue;
1905
1906 if (!(be64_to_cpu(aff_events[mask_entry]) &
1907 (1ull << mask_bit)))
1908 return false;
1909
1910 continue;
1911 }
1912
1913 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1914 (1ull << mask_bit)))
1915 return false;
1916 }
1917
1918 return true;
1919}
1920
1921#define MAX_NUM_EVENTS 16
1922static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1923 struct uverbs_attr_bundle *attrs)
1924{
1925 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1926 attrs,
1927 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1928 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1929 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1930 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1931 struct ib_uobject *fd_uobj;
1932 struct devx_obj *obj = NULL;
1933 struct devx_async_event_file *ev_file;
1934 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1935 u16 *event_type_num_list;
1936 struct devx_event_subscription *event_sub, *tmp_sub;
1937 struct list_head sub_list;
1938 int redirect_fd;
1939 bool use_eventfd = false;
1940 int num_events;
1941 int num_alloc_xa_entries = 0;
1942 u16 obj_type = 0;
1943 u64 cookie = 0;
1944 u32 obj_id = 0;
1945 int err;
1946 int i;
1947
1948 if (!c->devx_uid)
1949 return -EINVAL;
1950
1951 if (!IS_ERR(devx_uobj)) {
1952 obj = (struct devx_obj *)devx_uobj->object;
1953 if (obj)
1954 obj_id = get_dec_obj_id(obj->obj_id);
1955 }
1956
1957 fd_uobj = uverbs_attr_get_uobject(attrs,
1958 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
1959 if (IS_ERR(fd_uobj))
1960 return PTR_ERR(fd_uobj);
1961
1962 ev_file = container_of(fd_uobj, struct devx_async_event_file,
1963 uobj);
1964
1965 if (uverbs_attr_is_valid(attrs,
1966 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
1967 err = uverbs_copy_from(&redirect_fd, attrs,
1968 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
1969 if (err)
1970 return err;
1971
1972 use_eventfd = true;
1973 }
1974
1975 if (uverbs_attr_is_valid(attrs,
1976 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
1977 if (use_eventfd)
1978 return -EINVAL;
1979
1980 err = uverbs_copy_from(&cookie, attrs,
1981 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
1982 if (err)
1983 return err;
1984 }
1985
1986 num_events = uverbs_attr_ptr_get_array_size(
1987 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
1988 sizeof(u16));
1989
1990 if (num_events < 0)
1991 return num_events;
1992
1993 if (num_events > MAX_NUM_EVENTS)
1994 return -EINVAL;
1995
1996 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
1997 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
1998
1999 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2000 return -EINVAL;
2001
2002 INIT_LIST_HEAD(&sub_list);
2003
2004 /* Protect from concurrent subscriptions to same XA entries to allow
2005 * both to succeed
2006 */
2007 mutex_lock(&devx_event_table->event_xa_lock);
2008 for (i = 0; i < num_events; i++) {
2009 u32 key_level1;
2010
2011 if (obj)
2012 obj_type = get_dec_obj_type(obj,
2013 event_type_num_list[i]);
2014 key_level1 = event_type_num_list[i] | obj_type << 16;
2015
2016 err = subscribe_event_xa_alloc(devx_event_table,
2017 key_level1,
2018 obj,
2019 obj_id);
2020 if (err)
2021 goto err;
2022
2023 num_alloc_xa_entries++;
2024 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
Olivier Deprez0e641232021-09-23 10:07:05 +02002025 if (!event_sub) {
2026 err = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00002027 goto err;
Olivier Deprez0e641232021-09-23 10:07:05 +02002028 }
David Brazdil0f672f62019-12-10 10:32:29 +00002029
2030 list_add_tail(&event_sub->event_list, &sub_list);
2031 if (use_eventfd) {
2032 event_sub->eventfd =
2033 eventfd_ctx_fdget(redirect_fd);
2034
2035 if (IS_ERR(event_sub->eventfd)) {
2036 err = PTR_ERR(event_sub->eventfd);
2037 event_sub->eventfd = NULL;
2038 goto err;
2039 }
2040 }
2041
2042 event_sub->cookie = cookie;
2043 event_sub->ev_file = ev_file;
2044 event_sub->filp = fd_uobj->object;
2045 /* May be needed upon cleanup the devx object/subscription */
2046 event_sub->xa_key_level1 = key_level1;
2047 event_sub->xa_key_level2 = obj_id;
2048 INIT_LIST_HEAD(&event_sub->obj_list);
2049 }
2050
2051 /* Once all the allocations and the XA data insertions were done we
2052 * can go ahead and add all the subscriptions to the relevant lists
2053 * without concern of a failure.
2054 */
2055 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2056 struct devx_event *event;
2057 struct devx_obj_event *obj_event;
2058
2059 list_del_init(&event_sub->event_list);
2060
2061 spin_lock_irq(&ev_file->lock);
2062 list_add_tail_rcu(&event_sub->file_list,
2063 &ev_file->subscribed_events_list);
2064 spin_unlock_irq(&ev_file->lock);
2065
2066 event = xa_load(&devx_event_table->event_xa,
2067 event_sub->xa_key_level1);
2068 WARN_ON(!event);
2069
2070 if (!obj) {
2071 list_add_tail_rcu(&event_sub->xa_list,
2072 &event->unaffiliated_list);
2073 continue;
2074 }
2075
2076 obj_event = xa_load(&event->object_ids, obj_id);
2077 WARN_ON(!obj_event);
2078 list_add_tail_rcu(&event_sub->xa_list,
2079 &obj_event->obj_sub_list);
2080 list_add_tail_rcu(&event_sub->obj_list,
2081 &obj->event_sub);
2082 }
2083
2084 mutex_unlock(&devx_event_table->event_xa_lock);
2085 return 0;
2086
2087err:
2088 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2089 list_del(&event_sub->event_list);
2090
2091 subscribe_event_xa_dealloc(devx_event_table,
2092 event_sub->xa_key_level1,
2093 obj,
2094 obj_id);
2095
2096 if (event_sub->eventfd)
2097 eventfd_ctx_put(event_sub->eventfd);
2098
2099 kfree(event_sub);
2100 }
2101
2102 mutex_unlock(&devx_event_table->event_xa_lock);
2103 return err;
2104}
2105
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002106static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2107 struct uverbs_attr_bundle *attrs,
2108 struct devx_umem *obj)
2109{
2110 u64 addr;
2111 size_t size;
2112 u32 access;
2113 int npages;
2114 int err;
2115 u32 page_mask;
2116
2117 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2118 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2119 return -EFAULT;
2120
2121 err = uverbs_get_flags32(&access, attrs,
2122 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
David Brazdil0f672f62019-12-10 10:32:29 +00002123 IB_ACCESS_LOCAL_WRITE |
2124 IB_ACCESS_REMOTE_WRITE |
2125 IB_ACCESS_REMOTE_READ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002126 if (err)
2127 return err;
2128
2129 err = ib_check_mr_access(access);
2130 if (err)
2131 return err;
2132
David Brazdil0f672f62019-12-10 10:32:29 +00002133 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002134 if (IS_ERR(obj->umem))
2135 return PTR_ERR(obj->umem);
2136
2137 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
2138 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
2139 &obj->page_shift, &obj->ncont, NULL);
2140
2141 if (!npages) {
2142 ib_umem_release(obj->umem);
2143 return -EINVAL;
2144 }
2145
2146 page_mask = (1 << obj->page_shift) - 1;
2147 obj->page_offset = obj->umem->address & page_mask;
2148
2149 return 0;
2150}
2151
2152static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
2153 struct devx_umem *obj,
2154 struct devx_umem_reg_cmd *cmd)
2155{
2156 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2157 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
2158 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2159 return PTR_ERR_OR_ZERO(cmd->in);
2160}
2161
2162static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
2163 struct devx_umem *obj,
2164 struct devx_umem_reg_cmd *cmd)
2165{
2166 void *umem;
2167 __be64 *mtt;
2168
2169 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2170 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2171
David Brazdil0f672f62019-12-10 10:32:29 +00002172 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002173 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
2174 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
2175 MLX5_ADAPTER_PAGE_SHIFT);
2176 MLX5_SET(umem, umem, page_offset, obj->page_offset);
2177 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
2178 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2179 MLX5_IB_MTT_READ);
2180}
2181
2182static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
David Brazdil0f672f62019-12-10 10:32:29 +00002183 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002184{
2185 struct devx_umem_reg_cmd cmd;
2186 struct devx_umem *obj;
2187 struct ib_uobject *uobj = uverbs_attr_get_uobject(
2188 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2189 u32 obj_id;
David Brazdil0f672f62019-12-10 10:32:29 +00002190 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2191 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002192 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2193 int err;
2194
2195 if (!c->devx_uid)
David Brazdil0f672f62019-12-10 10:32:29 +00002196 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002197
2198 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2199 if (!obj)
2200 return -ENOMEM;
2201
2202 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2203 if (err)
2204 goto err_obj_free;
2205
2206 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
2207 if (err)
2208 goto err_umem_release;
2209
2210 devx_umem_reg_cmd_build(dev, obj, &cmd);
2211
David Brazdil0f672f62019-12-10 10:32:29 +00002212 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002213 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2214 sizeof(cmd.out));
2215 if (err)
2216 goto err_umem_release;
2217
2218 obj->mdev = dev->mdev;
2219 uobj->object = obj;
2220 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2221 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
2222 if (err)
2223 goto err_umem_destroy;
2224
2225 return 0;
2226
2227err_umem_destroy:
2228 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
2229err_umem_release:
2230 ib_umem_release(obj->umem);
2231err_obj_free:
2232 kfree(obj);
2233 return err;
2234}
2235
2236static int devx_umem_cleanup(struct ib_uobject *uobject,
David Brazdil0f672f62019-12-10 10:32:29 +00002237 enum rdma_remove_reason why,
2238 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002239{
2240 struct devx_umem *obj = uobject->object;
2241 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2242 int err;
2243
2244 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2245 if (ib_is_destroy_retryable(err, why, uobject))
2246 return err;
2247
2248 ib_umem_release(obj->umem);
2249 kfree(obj);
2250 return 0;
2251}
2252
David Brazdil0f672f62019-12-10 10:32:29 +00002253static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2254 unsigned long event_type)
2255{
2256 __be64 *unaff_events;
2257 int mask_entry;
2258 int mask_bit;
2259
2260 if (!MLX5_CAP_GEN(dev, event_cap))
2261 return is_legacy_unaffiliated_event_num(event_type);
2262
2263 unaff_events = MLX5_CAP_DEV_EVENT(dev,
2264 user_unaffiliated_events);
2265 WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2266
2267 mask_entry = event_type / 64;
2268 mask_bit = event_type % 64;
2269
2270 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2271 return false;
2272
2273 return true;
2274}
2275
2276static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2277{
2278 struct mlx5_eqe *eqe = data;
2279 u32 obj_id = 0;
2280
2281 switch (event_type) {
2282 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2283 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2284 case MLX5_EVENT_TYPE_PATH_MIG:
2285 case MLX5_EVENT_TYPE_COMM_EST:
2286 case MLX5_EVENT_TYPE_SQ_DRAINED:
2287 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2288 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2289 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2290 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2291 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2292 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2293 break;
2294 case MLX5_EVENT_TYPE_XRQ_ERROR:
2295 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2296 break;
2297 case MLX5_EVENT_TYPE_DCT_DRAINED:
2298 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2299 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2300 break;
2301 case MLX5_EVENT_TYPE_CQ_ERROR:
2302 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2303 break;
2304 default:
2305 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2306 break;
2307 }
2308
2309 return obj_id;
2310}
2311
2312static int deliver_event(struct devx_event_subscription *event_sub,
2313 const void *data)
2314{
2315 struct devx_async_event_file *ev_file;
2316 struct devx_async_event_data *event_data;
2317 unsigned long flags;
2318
2319 ev_file = event_sub->ev_file;
2320
2321 if (ev_file->omit_data) {
2322 spin_lock_irqsave(&ev_file->lock, flags);
2323 if (!list_empty(&event_sub->event_list)) {
2324 spin_unlock_irqrestore(&ev_file->lock, flags);
2325 return 0;
2326 }
2327
2328 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2329 spin_unlock_irqrestore(&ev_file->lock, flags);
2330 wake_up_interruptible(&ev_file->poll_wait);
2331 return 0;
2332 }
2333
2334 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2335 GFP_ATOMIC);
2336 if (!event_data) {
2337 spin_lock_irqsave(&ev_file->lock, flags);
2338 ev_file->is_overflow_err = 1;
2339 spin_unlock_irqrestore(&ev_file->lock, flags);
2340 return -ENOMEM;
2341 }
2342
2343 event_data->hdr.cookie = event_sub->cookie;
2344 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2345
2346 spin_lock_irqsave(&ev_file->lock, flags);
2347 list_add_tail(&event_data->list, &ev_file->event_list);
2348 spin_unlock_irqrestore(&ev_file->lock, flags);
2349 wake_up_interruptible(&ev_file->poll_wait);
2350
2351 return 0;
2352}
2353
2354static void dispatch_event_fd(struct list_head *fd_list,
2355 const void *data)
2356{
2357 struct devx_event_subscription *item;
2358
2359 list_for_each_entry_rcu(item, fd_list, xa_list) {
2360 if (!get_file_rcu(item->filp))
2361 continue;
2362
2363 if (item->eventfd) {
2364 eventfd_signal(item->eventfd, 1);
2365 fput(item->filp);
2366 continue;
2367 }
2368
2369 deliver_event(item, data);
2370 fput(item->filp);
2371 }
2372}
2373
2374static int devx_event_notifier(struct notifier_block *nb,
2375 unsigned long event_type, void *data)
2376{
2377 struct mlx5_devx_event_table *table;
2378 struct mlx5_ib_dev *dev;
2379 struct devx_event *event;
2380 struct devx_obj_event *obj_event;
2381 u16 obj_type = 0;
2382 bool is_unaffiliated;
2383 u32 obj_id;
2384
2385 /* Explicit filtering to kernel events which may occur frequently */
2386 if (event_type == MLX5_EVENT_TYPE_CMD ||
2387 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2388 return NOTIFY_OK;
2389
2390 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2391 dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2392 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2393
2394 if (!is_unaffiliated)
2395 obj_type = get_event_obj_type(event_type, data);
2396
2397 rcu_read_lock();
2398 event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2399 if (!event) {
2400 rcu_read_unlock();
2401 return NOTIFY_DONE;
2402 }
2403
2404 if (is_unaffiliated) {
2405 dispatch_event_fd(&event->unaffiliated_list, data);
2406 rcu_read_unlock();
2407 return NOTIFY_OK;
2408 }
2409
2410 obj_id = devx_get_obj_id_from_event(event_type, data);
2411 obj_event = xa_load(&event->object_ids, obj_id);
2412 if (!obj_event) {
2413 rcu_read_unlock();
2414 return NOTIFY_DONE;
2415 }
2416
2417 dispatch_event_fd(&obj_event->obj_sub_list, data);
2418
2419 rcu_read_unlock();
2420 return NOTIFY_OK;
2421}
2422
2423void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
2424{
2425 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2426
2427 xa_init(&table->event_xa);
2428 mutex_init(&table->event_xa_lock);
2429 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2430 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2431}
2432
2433void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
2434{
2435 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2436 struct devx_event_subscription *sub, *tmp;
2437 struct devx_event *event;
2438 void *entry;
2439 unsigned long id;
2440
2441 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2442 mutex_lock(&dev->devx_event_table.event_xa_lock);
2443 xa_for_each(&table->event_xa, id, entry) {
2444 event = entry;
2445 list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
2446 xa_list)
2447 devx_cleanup_subscription(dev, sub);
2448 kfree(entry);
2449 }
2450 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2451 xa_destroy(&table->event_xa);
2452}
2453
2454static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2455 size_t count, loff_t *pos)
2456{
2457 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2458 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2459 struct devx_async_data *event;
2460 int ret = 0;
2461 size_t eventsz;
2462
2463 spin_lock_irq(&ev_queue->lock);
2464
2465 while (list_empty(&ev_queue->event_list)) {
2466 spin_unlock_irq(&ev_queue->lock);
2467
2468 if (filp->f_flags & O_NONBLOCK)
2469 return -EAGAIN;
2470
2471 if (wait_event_interruptible(
2472 ev_queue->poll_wait,
2473 (!list_empty(&ev_queue->event_list) ||
2474 ev_queue->is_destroyed))) {
2475 return -ERESTARTSYS;
2476 }
2477
2478 if (list_empty(&ev_queue->event_list) &&
2479 ev_queue->is_destroyed)
2480 return -EIO;
2481
2482 spin_lock_irq(&ev_queue->lock);
2483 }
2484
2485 event = list_entry(ev_queue->event_list.next,
2486 struct devx_async_data, list);
2487 eventsz = event->cmd_out_len +
2488 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2489
2490 if (eventsz > count) {
2491 spin_unlock_irq(&ev_queue->lock);
2492 return -ENOSPC;
2493 }
2494
2495 list_del(ev_queue->event_list.next);
2496 spin_unlock_irq(&ev_queue->lock);
2497
2498 if (copy_to_user(buf, &event->hdr, eventsz))
2499 ret = -EFAULT;
2500 else
2501 ret = eventsz;
2502
2503 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2504 kvfree(event);
2505 return ret;
2506}
2507
2508static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
2509{
2510 struct ib_uobject *uobj = filp->private_data;
2511 struct devx_async_cmd_event_file *comp_ev_file = container_of(
2512 uobj, struct devx_async_cmd_event_file, uobj);
2513 struct devx_async_data *entry, *tmp;
2514
2515 spin_lock_irq(&comp_ev_file->ev_queue.lock);
2516 list_for_each_entry_safe(entry, tmp,
2517 &comp_ev_file->ev_queue.event_list, list)
2518 kvfree(entry);
2519 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2520
2521 uverbs_close_fd(filp);
2522 return 0;
2523}
2524
2525static __poll_t devx_async_cmd_event_poll(struct file *filp,
2526 struct poll_table_struct *wait)
2527{
2528 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2529 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2530 __poll_t pollflags = 0;
2531
2532 poll_wait(filp, &ev_queue->poll_wait, wait);
2533
2534 spin_lock_irq(&ev_queue->lock);
2535 if (ev_queue->is_destroyed)
2536 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2537 else if (!list_empty(&ev_queue->event_list))
2538 pollflags = EPOLLIN | EPOLLRDNORM;
2539 spin_unlock_irq(&ev_queue->lock);
2540
2541 return pollflags;
2542}
2543
2544static const struct file_operations devx_async_cmd_event_fops = {
2545 .owner = THIS_MODULE,
2546 .read = devx_async_cmd_event_read,
2547 .poll = devx_async_cmd_event_poll,
2548 .release = devx_async_cmd_event_close,
2549 .llseek = no_llseek,
2550};
2551
2552static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2553 size_t count, loff_t *pos)
2554{
2555 struct devx_async_event_file *ev_file = filp->private_data;
2556 struct devx_event_subscription *event_sub;
2557 struct devx_async_event_data *uninitialized_var(event);
2558 int ret = 0;
2559 size_t eventsz;
2560 bool omit_data;
2561 void *event_data;
2562
2563 omit_data = ev_file->omit_data;
2564
2565 spin_lock_irq(&ev_file->lock);
2566
2567 if (ev_file->is_overflow_err) {
2568 ev_file->is_overflow_err = 0;
2569 spin_unlock_irq(&ev_file->lock);
2570 return -EOVERFLOW;
2571 }
2572
2573 if (ev_file->is_destroyed) {
2574 spin_unlock_irq(&ev_file->lock);
2575 return -EIO;
2576 }
2577
2578 while (list_empty(&ev_file->event_list)) {
2579 spin_unlock_irq(&ev_file->lock);
2580
2581 if (filp->f_flags & O_NONBLOCK)
2582 return -EAGAIN;
2583
2584 if (wait_event_interruptible(ev_file->poll_wait,
2585 (!list_empty(&ev_file->event_list) ||
2586 ev_file->is_destroyed))) {
2587 return -ERESTARTSYS;
2588 }
2589
2590 spin_lock_irq(&ev_file->lock);
2591 if (ev_file->is_destroyed) {
2592 spin_unlock_irq(&ev_file->lock);
2593 return -EIO;
2594 }
2595 }
2596
2597 if (omit_data) {
2598 event_sub = list_first_entry(&ev_file->event_list,
2599 struct devx_event_subscription,
2600 event_list);
2601 eventsz = sizeof(event_sub->cookie);
2602 event_data = &event_sub->cookie;
2603 } else {
2604 event = list_first_entry(&ev_file->event_list,
2605 struct devx_async_event_data, list);
2606 eventsz = sizeof(struct mlx5_eqe) +
2607 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2608 event_data = &event->hdr;
2609 }
2610
2611 if (eventsz > count) {
2612 spin_unlock_irq(&ev_file->lock);
2613 return -EINVAL;
2614 }
2615
2616 if (omit_data)
2617 list_del_init(&event_sub->event_list);
2618 else
2619 list_del(&event->list);
2620
2621 spin_unlock_irq(&ev_file->lock);
2622
2623 if (copy_to_user(buf, event_data, eventsz))
2624 /* This points to an application issue, not a kernel concern */
2625 ret = -EFAULT;
2626 else
2627 ret = eventsz;
2628
2629 if (!omit_data)
2630 kfree(event);
2631 return ret;
2632}
2633
2634static __poll_t devx_async_event_poll(struct file *filp,
2635 struct poll_table_struct *wait)
2636{
2637 struct devx_async_event_file *ev_file = filp->private_data;
2638 __poll_t pollflags = 0;
2639
2640 poll_wait(filp, &ev_file->poll_wait, wait);
2641
2642 spin_lock_irq(&ev_file->lock);
2643 if (ev_file->is_destroyed)
2644 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2645 else if (!list_empty(&ev_file->event_list))
2646 pollflags = EPOLLIN | EPOLLRDNORM;
2647 spin_unlock_irq(&ev_file->lock);
2648
2649 return pollflags;
2650}
2651
2652static int devx_async_event_close(struct inode *inode, struct file *filp)
2653{
2654 struct devx_async_event_file *ev_file = filp->private_data;
2655 struct devx_event_subscription *event_sub, *event_sub_tmp;
2656 struct devx_async_event_data *entry, *tmp;
2657 struct mlx5_ib_dev *dev = ev_file->dev;
2658
2659 mutex_lock(&dev->devx_event_table.event_xa_lock);
2660 /* delete the subscriptions which are related to this FD */
2661 list_for_each_entry_safe(event_sub, event_sub_tmp,
2662 &ev_file->subscribed_events_list, file_list) {
2663 devx_cleanup_subscription(dev, event_sub);
2664 if (event_sub->eventfd)
2665 eventfd_ctx_put(event_sub->eventfd);
2666
2667 list_del_rcu(&event_sub->file_list);
2668 /* subscription may not be used by the read API any more */
2669 kfree_rcu(event_sub, rcu);
2670 }
2671
2672 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2673
2674 /* free the pending events allocation */
2675 if (!ev_file->omit_data) {
2676 spin_lock_irq(&ev_file->lock);
2677 list_for_each_entry_safe(entry, tmp,
2678 &ev_file->event_list, list)
2679 kfree(entry); /* read can't come any more */
2680 spin_unlock_irq(&ev_file->lock);
2681 }
2682
2683 uverbs_close_fd(filp);
2684 put_device(&dev->ib_dev.dev);
2685 return 0;
2686}
2687
2688static const struct file_operations devx_async_event_fops = {
2689 .owner = THIS_MODULE,
2690 .read = devx_async_event_read,
2691 .poll = devx_async_event_poll,
2692 .release = devx_async_event_close,
2693 .llseek = no_llseek,
2694};
2695
2696static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
2697 enum rdma_remove_reason why)
2698{
2699 struct devx_async_cmd_event_file *comp_ev_file =
2700 container_of(uobj, struct devx_async_cmd_event_file,
2701 uobj);
2702 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2703
2704 spin_lock_irq(&ev_queue->lock);
2705 ev_queue->is_destroyed = 1;
2706 spin_unlock_irq(&ev_queue->lock);
2707
2708 if (why == RDMA_REMOVE_DRIVER_REMOVE)
2709 wake_up_interruptible(&ev_queue->poll_wait);
2710
2711 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2712 return 0;
2713};
2714
2715static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
2716 enum rdma_remove_reason why)
2717{
2718 struct devx_async_event_file *ev_file =
2719 container_of(uobj, struct devx_async_event_file,
2720 uobj);
2721
2722 spin_lock_irq(&ev_file->lock);
2723 ev_file->is_destroyed = 1;
2724 spin_unlock_irq(&ev_file->lock);
2725
2726 wake_up_interruptible(&ev_file->poll_wait);
2727 return 0;
2728};
2729
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002730DECLARE_UVERBS_NAMED_METHOD(
2731 MLX5_IB_METHOD_DEVX_UMEM_REG,
2732 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2733 MLX5_IB_OBJECT_DEVX_UMEM,
2734 UVERBS_ACCESS_NEW,
2735 UA_MANDATORY),
2736 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2737 UVERBS_ATTR_TYPE(u64),
2738 UA_MANDATORY),
2739 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2740 UVERBS_ATTR_TYPE(u64),
2741 UA_MANDATORY),
2742 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2743 enum ib_access_flags),
2744 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2745 UVERBS_ATTR_TYPE(u32),
2746 UA_MANDATORY));
2747
2748DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2749 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2750 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2751 MLX5_IB_OBJECT_DEVX_UMEM,
2752 UVERBS_ACCESS_DESTROY,
2753 UA_MANDATORY));
2754
2755DECLARE_UVERBS_NAMED_METHOD(
2756 MLX5_IB_METHOD_DEVX_QUERY_EQN,
2757 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2758 UVERBS_ATTR_TYPE(u32),
2759 UA_MANDATORY),
2760 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2761 UVERBS_ATTR_TYPE(u32),
2762 UA_MANDATORY));
2763
2764DECLARE_UVERBS_NAMED_METHOD(
2765 MLX5_IB_METHOD_DEVX_QUERY_UAR,
2766 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2767 UVERBS_ATTR_TYPE(u32),
2768 UA_MANDATORY),
2769 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2770 UVERBS_ATTR_TYPE(u32),
2771 UA_MANDATORY));
2772
2773DECLARE_UVERBS_NAMED_METHOD(
2774 MLX5_IB_METHOD_DEVX_OTHER,
2775 UVERBS_ATTR_PTR_IN(
2776 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2777 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2778 UA_MANDATORY,
2779 UA_ALLOC_AND_COPY),
2780 UVERBS_ATTR_PTR_OUT(
2781 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2782 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2783 UA_MANDATORY));
2784
2785DECLARE_UVERBS_NAMED_METHOD(
2786 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2787 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2788 MLX5_IB_OBJECT_DEVX_OBJ,
2789 UVERBS_ACCESS_NEW,
2790 UA_MANDATORY),
2791 UVERBS_ATTR_PTR_IN(
2792 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2793 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2794 UA_MANDATORY,
2795 UA_ALLOC_AND_COPY),
2796 UVERBS_ATTR_PTR_OUT(
2797 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2798 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2799 UA_MANDATORY));
2800
2801DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2802 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2803 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2804 MLX5_IB_OBJECT_DEVX_OBJ,
2805 UVERBS_ACCESS_DESTROY,
2806 UA_MANDATORY));
2807
2808DECLARE_UVERBS_NAMED_METHOD(
2809 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2810 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
David Brazdil0f672f62019-12-10 10:32:29 +00002811 UVERBS_IDR_ANY_OBJECT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002812 UVERBS_ACCESS_WRITE,
2813 UA_MANDATORY),
2814 UVERBS_ATTR_PTR_IN(
2815 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2816 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2817 UA_MANDATORY,
2818 UA_ALLOC_AND_COPY),
2819 UVERBS_ATTR_PTR_OUT(
2820 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2821 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2822 UA_MANDATORY));
2823
2824DECLARE_UVERBS_NAMED_METHOD(
2825 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2826 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
David Brazdil0f672f62019-12-10 10:32:29 +00002827 UVERBS_IDR_ANY_OBJECT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002828 UVERBS_ACCESS_READ,
2829 UA_MANDATORY),
2830 UVERBS_ATTR_PTR_IN(
2831 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2832 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2833 UA_MANDATORY,
2834 UA_ALLOC_AND_COPY),
2835 UVERBS_ATTR_PTR_OUT(
2836 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2837 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2838 UA_MANDATORY));
2839
David Brazdil0f672f62019-12-10 10:32:29 +00002840DECLARE_UVERBS_NAMED_METHOD(
2841 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2842 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2843 UVERBS_IDR_ANY_OBJECT,
2844 UVERBS_ACCESS_READ,
2845 UA_MANDATORY),
2846 UVERBS_ATTR_PTR_IN(
2847 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2848 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2849 UA_MANDATORY,
2850 UA_ALLOC_AND_COPY),
2851 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2852 u16, UA_MANDATORY),
2853 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2854 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2855 UVERBS_ACCESS_READ,
2856 UA_MANDATORY),
2857 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2858 UVERBS_ATTR_TYPE(u64),
2859 UA_MANDATORY));
2860
2861DECLARE_UVERBS_NAMED_METHOD(
2862 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2863 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2864 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2865 UVERBS_ACCESS_READ,
2866 UA_MANDATORY),
2867 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2868 MLX5_IB_OBJECT_DEVX_OBJ,
2869 UVERBS_ACCESS_READ,
2870 UA_OPTIONAL),
2871 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2872 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2873 UA_MANDATORY,
2874 UA_ALLOC_AND_COPY),
2875 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2876 UVERBS_ATTR_TYPE(u64),
2877 UA_OPTIONAL),
2878 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2879 UVERBS_ATTR_TYPE(u32),
2880 UA_OPTIONAL));
2881
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002882DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2883 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2884 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
David Brazdil0f672f62019-12-10 10:32:29 +00002885 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2886 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002887
2888DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2889 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2890 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2891 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2892 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
David Brazdil0f672f62019-12-10 10:32:29 +00002893 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2894 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002895
2896DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2897 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2898 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2899 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2900
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002901
David Brazdil0f672f62019-12-10 10:32:29 +00002902DECLARE_UVERBS_NAMED_METHOD(
2903 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2904 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2905 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2906 UVERBS_ACCESS_NEW,
2907 UA_MANDATORY));
2908
2909DECLARE_UVERBS_NAMED_OBJECT(
2910 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2911 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
2912 devx_hot_unplug_async_cmd_event_file,
2913 &devx_async_cmd_event_fops, "[devx_async_cmd]",
2914 O_RDONLY),
2915 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
2916
2917DECLARE_UVERBS_NAMED_METHOD(
2918 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
2919 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
2920 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2921 UVERBS_ACCESS_NEW,
2922 UA_MANDATORY),
2923 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
2924 enum mlx5_ib_uapi_devx_create_event_channel_flags,
2925 UA_MANDATORY));
2926
2927DECLARE_UVERBS_NAMED_OBJECT(
2928 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2929 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
2930 devx_hot_unplug_async_event_file,
2931 &devx_async_event_fops, "[devx_async_event]",
2932 O_RDONLY),
2933 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
2934
2935static bool devx_is_supported(struct ib_device *device)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002936{
David Brazdil0f672f62019-12-10 10:32:29 +00002937 struct mlx5_ib_dev *dev = to_mdev(device);
2938
2939 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002940}
David Brazdil0f672f62019-12-10 10:32:29 +00002941
2942const struct uapi_definition mlx5_ib_devx_defs[] = {
2943 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2944 MLX5_IB_OBJECT_DEVX,
2945 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2946 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2947 MLX5_IB_OBJECT_DEVX_OBJ,
2948 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2949 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2950 MLX5_IB_OBJECT_DEVX_UMEM,
2951 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2952 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2953 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2954 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2955 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2956 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2957 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2958 {},
2959};