blob: fddefb29efd7694f6a9a24a6c78b4dc1fca8281e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
David Brazdil0f672f62019-12-10 10:32:29 +000010#include <rdma/uverbs_std_types.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011#include <rdma/mlx5_user_ioctl_cmds.h>
David Brazdil0f672f62019-12-10 10:32:29 +000012#include <rdma/mlx5_user_ioctl_verbs.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013#include <rdma/ib_umem.h>
14#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020016#include <linux/mlx5/eswitch.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#include "mlx5_ib.h"
18
19#define UVERBS_MODULE_NAME mlx5_ib
20#include <rdma/uverbs_named_ioctl.h>
21
David Brazdil0f672f62019-12-10 10:32:29 +000022static int
23mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
24 enum mlx5_flow_namespace_type *namespace)
25{
26 switch (table_type) {
27 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
28 *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
29 break;
30 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
31 *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
32 break;
33 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
34 *namespace = MLX5_FLOW_NAMESPACE_FDB;
35 break;
36 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
37 *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
38 break;
39 default:
40 return -EINVAL;
41 }
42
43 return 0;
44}
45
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
47 [MLX5_IB_FLOW_TYPE_NORMAL] = {
48 .type = UVERBS_ATTR_TYPE_PTR_IN,
49 .u.ptr = {
50 .len = sizeof(u16), /* data is priority */
51 .min_len = sizeof(u16),
52 }
53 },
54 [MLX5_IB_FLOW_TYPE_SNIFFER] = {
55 .type = UVERBS_ATTR_TYPE_PTR_IN,
56 UVERBS_ATTR_NO_DATA(),
57 },
58 [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
59 .type = UVERBS_ATTR_TYPE_PTR_IN,
60 UVERBS_ATTR_NO_DATA(),
61 },
62 [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
63 .type = UVERBS_ATTR_TYPE_PTR_IN,
64 UVERBS_ATTR_NO_DATA(),
65 },
66};
67
David Brazdil0f672f62019-12-10 10:32:29 +000068#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
David Brazdil0f672f62019-12-10 10:32:29 +000070 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071{
David Brazdil0f672f62019-12-10 10:32:29 +000072 struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073 struct mlx5_ib_flow_handler *flow_handler;
74 struct mlx5_ib_flow_matcher *fs_matcher;
David Brazdil0f672f62019-12-10 10:32:29 +000075 struct ib_uobject **arr_flow_actions;
76 struct ib_uflow_resources *uflow_res;
77 struct mlx5_flow_act flow_act = {};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078 void *devx_obj;
79 int dest_id, dest_type;
80 void *cmd_in;
81 int inlen;
82 bool dest_devx, dest_qp;
83 struct ib_qp *qp = NULL;
84 struct ib_uobject *uobj =
85 uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
David Brazdil0f672f62019-12-10 10:32:29 +000086 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
87 int len, ret, i;
88 u32 counter_id = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089
90 if (!capable(CAP_NET_RAW))
91 return -EPERM;
92
93 dest_devx =
94 uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
95 dest_qp = uverbs_attr_is_valid(attrs,
96 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
97
David Brazdil0f672f62019-12-10 10:32:29 +000098 fs_matcher = uverbs_attr_get_obj(attrs,
99 MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
100 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
101 ((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
102 return -EINVAL;
103
104 /* Allow only DEVX object as dest when inserting to FDB */
105 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
106 return -EINVAL;
107
108 /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
109 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
110 ((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 return -EINVAL;
112
113 if (dest_devx) {
114 devx_obj = uverbs_attr_get_obj(
115 attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
116 if (IS_ERR(devx_obj))
117 return PTR_ERR(devx_obj);
118
119 /* Verify that the given DEVX object is a flow
120 * steering destination.
121 */
122 if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
123 return -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +0000124 /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
125 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
126 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
127 dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
128 return -EINVAL;
129 } else if (dest_qp) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130 struct mlx5_ib_qp *mqp;
131
132 qp = uverbs_attr_get_obj(attrs,
133 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
134 if (IS_ERR(qp))
135 return PTR_ERR(qp);
136
137 if (qp->qp_type != IB_QPT_RAW_PACKET)
138 return -EINVAL;
139
140 mqp = to_mqp(qp);
141 if (mqp->flags & MLX5_IB_QP_RSS)
142 dest_id = mqp->rss_qp.tirn;
143 else
144 dest_id = mqp->raw_packet_qp.rq.tirn;
145 dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
David Brazdil0f672f62019-12-10 10:32:29 +0000146 } else {
147 dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148 }
149
David Brazdil0f672f62019-12-10 10:32:29 +0000150 len = uverbs_attr_get_uobjs_arr(attrs,
151 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
152 if (len) {
153 devx_obj = arr_flow_actions[0]->object;
154
155 if (!mlx5_ib_devx_is_flow_counter(devx_obj, &counter_id))
156 return -EINVAL;
157 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
158 }
159
160 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
161 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
162 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163
164 cmd_in = uverbs_attr_get_alloced_ptr(
165 attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
166 inlen = uverbs_attr_get_len(attrs,
167 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168
David Brazdil0f672f62019-12-10 10:32:29 +0000169 uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
170 if (!uflow_res)
171 return -ENOMEM;
172
173 len = uverbs_attr_get_uobjs_arr(attrs,
174 MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
175 for (i = 0; i < len; i++) {
176 struct mlx5_ib_flow_action *maction =
177 to_mflow_act(arr_flow_actions[i]->object);
178
179 ret = parse_flow_flow_action(maction, false, &flow_act);
180 if (ret)
181 goto err_out;
182 flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
183 arr_flow_actions[i]->object);
184 }
185
186 ret = uverbs_copy_from(&flow_context.flow_tag, attrs,
187 MLX5_IB_ATTR_CREATE_FLOW_TAG);
188 if (!ret) {
189 if (flow_context.flow_tag >= BIT(24)) {
190 ret = -EINVAL;
191 goto err_out;
192 }
193 flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
194 }
195
196 flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher,
197 &flow_context,
198 &flow_act,
199 counter_id,
200 cmd_in, inlen,
201 dest_id, dest_type);
202 if (IS_ERR(flow_handler)) {
203 ret = PTR_ERR(flow_handler);
204 goto err_out;
205 }
206
207 ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000208
209 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000210err_out:
211 ib_uverbs_flow_resources_free(uflow_res);
212 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213}
214
215static int flow_matcher_cleanup(struct ib_uobject *uobject,
David Brazdil0f672f62019-12-10 10:32:29 +0000216 enum rdma_remove_reason why,
217 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218{
219 struct mlx5_ib_flow_matcher *obj = uobject->object;
220 int ret;
221
222 ret = ib_destroy_usecnt(&obj->usecnt, why, uobject);
223 if (ret)
224 return ret;
225
226 kfree(obj);
227 return 0;
228}
229
David Brazdil0f672f62019-12-10 10:32:29 +0000230static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
231 struct mlx5_ib_flow_matcher *obj)
232{
233 enum mlx5_ib_uapi_flow_table_type ft_type =
234 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX;
235 u32 flags;
236 int err;
237
238 /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older
239 * users should switch to it. We leave this to not break userspace
240 */
241 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) &&
242 uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS))
243 return -EINVAL;
244
245 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) {
246 err = uverbs_get_const(&ft_type, attrs,
247 MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE);
248 if (err)
249 return err;
250
251 err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type);
252 if (err)
253 return err;
254
255 return 0;
256 }
257
258 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) {
259 err = uverbs_get_flags32(&flags, attrs,
260 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
261 IB_FLOW_ATTR_FLAGS_EGRESS);
262 if (err)
263 return err;
264
265 if (flags) {
266 mlx5_ib_ft_type_to_namespace(
267 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
268 &obj->ns_type);
269 return 0;
270 }
271 }
272
273 obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
274
275 return 0;
276}
277
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
David Brazdil0f672f62019-12-10 10:32:29 +0000279 struct uverbs_attr_bundle *attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280{
281 struct ib_uobject *uobj = uverbs_attr_get_uobject(
282 attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
David Brazdil0f672f62019-12-10 10:32:29 +0000283 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284 struct mlx5_ib_flow_matcher *obj;
285 int err;
286
287 obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
288 if (!obj)
289 return -ENOMEM;
290
291 obj->mask_len = uverbs_attr_get_len(
292 attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
293 err = uverbs_copy_from(&obj->matcher_mask,
294 attrs,
295 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
296 if (err)
297 goto end;
298
299 obj->flow_type = uverbs_attr_get_enum_id(
300 attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
301
302 if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
303 err = uverbs_copy_from(&obj->priority,
304 attrs,
305 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
306 if (err)
307 goto end;
308 }
309
310 err = uverbs_copy_from(&obj->match_criteria_enable,
311 attrs,
312 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
313 if (err)
314 goto end;
315
David Brazdil0f672f62019-12-10 10:32:29 +0000316 err = mlx5_ib_matcher_ns(attrs, obj);
317 if (err)
318 goto end;
319
Olivier Deprez0e641232021-09-23 10:07:05 +0200320 if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
321 mlx5_eswitch_mode(dev->mdev->priv.eswitch) !=
322 MLX5_ESWITCH_OFFLOADS) {
323 err = -EINVAL;
324 goto end;
325 }
326
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000327 uobj->object = obj;
328 obj->mdev = dev->mdev;
329 atomic_set(&obj->usecnt, 0);
330 return 0;
331
332end:
333 kfree(obj);
334 return err;
335}
336
David Brazdil0f672f62019-12-10 10:32:29 +0000337void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
338{
339 switch (maction->flow_action_raw.sub_type) {
340 case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
341 mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
342 maction->flow_action_raw.modify_hdr);
343 break;
344 case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
345 mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
346 maction->flow_action_raw.pkt_reformat);
347 break;
348 case MLX5_IB_FLOW_ACTION_DECAP:
349 break;
350 default:
351 break;
352 }
353}
354
355static struct ib_flow_action *
356mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
357 enum mlx5_ib_uapi_flow_table_type ft_type,
358 u8 num_actions, void *in)
359{
360 enum mlx5_flow_namespace_type namespace;
361 struct mlx5_ib_flow_action *maction;
362 int ret;
363
364 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
365 if (ret)
366 return ERR_PTR(-EINVAL);
367
368 maction = kzalloc(sizeof(*maction), GFP_KERNEL);
369 if (!maction)
370 return ERR_PTR(-ENOMEM);
371
372 maction->flow_action_raw.modify_hdr =
373 mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
374
375 if (IS_ERR(maction->flow_action_raw.modify_hdr)) {
376 ret = PTR_ERR(maction->flow_action_raw.modify_hdr);
377 kfree(maction);
378 return ERR_PTR(ret);
379 }
380 maction->flow_action_raw.sub_type =
381 MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
382 maction->flow_action_raw.dev = dev;
383
384 return &maction->ib_action;
385}
386
387static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
388{
389 return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
390 max_modify_header_actions) ||
391 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
392}
393
394static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
395 struct uverbs_attr_bundle *attrs)
396{
397 struct ib_uobject *uobj = uverbs_attr_get_uobject(
398 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
399 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
400 enum mlx5_ib_uapi_flow_table_type ft_type;
401 struct ib_flow_action *action;
402 int num_actions;
403 void *in;
404 int ret;
405
406 if (!mlx5_ib_modify_header_supported(mdev))
407 return -EOPNOTSUPP;
408
409 in = uverbs_attr_get_alloced_ptr(attrs,
410 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
411
412 num_actions = uverbs_attr_ptr_get_array_size(
413 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
414 MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto));
415 if (num_actions < 0)
416 return num_actions;
417
418 ret = uverbs_get_const(&ft_type, attrs,
419 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
420 if (ret)
421 return ret;
422 action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
423 if (IS_ERR(action))
424 return PTR_ERR(action);
425
426 uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev,
427 IB_FLOW_ACTION_UNSPECIFIED);
428
429 return 0;
430}
431
432static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
433 u8 packet_reformat_type,
434 u8 ft_type)
435{
436 switch (packet_reformat_type) {
437 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
438 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
439 return MLX5_CAP_FLOWTABLE(ibdev->mdev,
440 encap_general_header);
441 break;
442 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
443 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
444 return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
445 reformat_l2_to_l3_tunnel);
446 break;
447 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
448 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
449 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
450 reformat_l3_tunnel_to_l2);
451 break;
452 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
453 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
454 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
455 break;
456 default:
457 break;
458 }
459
460 return false;
461}
462
463static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
464{
465 switch (dv_prt) {
466 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
467 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
468 break;
469 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
470 *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
471 break;
472 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
473 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
474 break;
475 default:
476 return -EINVAL;
477 }
478
479 return 0;
480}
481
482static int mlx5_ib_flow_action_create_packet_reformat_ctx(
483 struct mlx5_ib_dev *dev,
484 struct mlx5_ib_flow_action *maction,
485 u8 ft_type, u8 dv_prt,
486 void *in, size_t len)
487{
488 enum mlx5_flow_namespace_type namespace;
489 u8 prm_prt;
490 int ret;
491
492 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
493 if (ret)
494 return ret;
495
496 ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
497 if (ret)
498 return ret;
499
500 maction->flow_action_raw.pkt_reformat =
501 mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
502 in, namespace);
503 if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
504 ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
505 return ret;
506 }
507
508 maction->flow_action_raw.sub_type =
509 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
510 maction->flow_action_raw.dev = dev;
511
512 return 0;
513}
514
515static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
516 struct uverbs_attr_bundle *attrs)
517{
518 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
519 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
520 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
521 enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
522 enum mlx5_ib_uapi_flow_table_type ft_type;
523 struct mlx5_ib_flow_action *maction;
524 int ret;
525
526 ret = uverbs_get_const(&ft_type, attrs,
527 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
528 if (ret)
529 return ret;
530
531 ret = uverbs_get_const(&dv_prt, attrs,
532 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
533 if (ret)
534 return ret;
535
536 if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
537 return -EOPNOTSUPP;
538
539 maction = kzalloc(sizeof(*maction), GFP_KERNEL);
540 if (!maction)
541 return -ENOMEM;
542
543 if (dv_prt ==
544 MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
545 maction->flow_action_raw.sub_type =
546 MLX5_IB_FLOW_ACTION_DECAP;
547 maction->flow_action_raw.dev = mdev;
548 } else {
549 void *in;
550 int len;
551
552 in = uverbs_attr_get_alloced_ptr(attrs,
553 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
554 if (IS_ERR(in)) {
555 ret = PTR_ERR(in);
556 goto free_maction;
557 }
558
559 len = uverbs_attr_get_len(attrs,
560 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
561
562 ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
563 maction, ft_type, dv_prt, in, len);
564 if (ret)
565 goto free_maction;
566 }
567
568 uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev,
569 IB_FLOW_ACTION_UNSPECIFIED);
570 return 0;
571
572free_maction:
573 kfree(maction);
574 return ret;
575}
576
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000577DECLARE_UVERBS_NAMED_METHOD(
578 MLX5_IB_METHOD_CREATE_FLOW,
579 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
580 UVERBS_OBJECT_FLOW,
581 UVERBS_ACCESS_NEW,
582 UA_MANDATORY),
583 UVERBS_ATTR_PTR_IN(
584 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
585 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
586 UA_MANDATORY,
587 UA_ALLOC_AND_COPY),
588 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
589 MLX5_IB_OBJECT_FLOW_MATCHER,
590 UVERBS_ACCESS_READ,
591 UA_MANDATORY),
592 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
593 UVERBS_OBJECT_QP,
594 UVERBS_ACCESS_READ),
595 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
596 MLX5_IB_OBJECT_DEVX_OBJ,
David Brazdil0f672f62019-12-10 10:32:29 +0000597 UVERBS_ACCESS_READ),
598 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
599 UVERBS_OBJECT_FLOW_ACTION,
600 UVERBS_ACCESS_READ, 1,
601 MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
602 UA_OPTIONAL),
603 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
604 UVERBS_ATTR_TYPE(u32),
605 UA_OPTIONAL),
606 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
607 MLX5_IB_OBJECT_DEVX_OBJ,
608 UVERBS_ACCESS_READ, 1, 1,
609 UA_OPTIONAL));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000610
611DECLARE_UVERBS_NAMED_METHOD_DESTROY(
612 MLX5_IB_METHOD_DESTROY_FLOW,
613 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
614 UVERBS_OBJECT_FLOW,
615 UVERBS_ACCESS_DESTROY,
616 UA_MANDATORY));
617
618ADD_UVERBS_METHODS(mlx5_ib_fs,
619 UVERBS_OBJECT_FLOW,
620 &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
621 &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
622
623DECLARE_UVERBS_NAMED_METHOD(
David Brazdil0f672f62019-12-10 10:32:29 +0000624 MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
625 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
626 UVERBS_OBJECT_FLOW_ACTION,
627 UVERBS_ACCESS_NEW,
628 UA_MANDATORY),
629 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
630 UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
631 set_action_in_add_action_in_auto)),
632 UA_MANDATORY,
633 UA_ALLOC_AND_COPY),
634 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
635 enum mlx5_ib_uapi_flow_table_type,
636 UA_MANDATORY));
637
638DECLARE_UVERBS_NAMED_METHOD(
639 MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
640 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
641 UVERBS_OBJECT_FLOW_ACTION,
642 UVERBS_ACCESS_NEW,
643 UA_MANDATORY),
644 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
645 UVERBS_ATTR_MIN_SIZE(1),
646 UA_ALLOC_AND_COPY,
647 UA_OPTIONAL),
648 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
649 enum mlx5_ib_uapi_flow_action_packet_reformat_type,
650 UA_MANDATORY),
651 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
652 enum mlx5_ib_uapi_flow_table_type,
653 UA_MANDATORY));
654
655ADD_UVERBS_METHODS(
656 mlx5_ib_flow_actions,
657 UVERBS_OBJECT_FLOW_ACTION,
658 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
659 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
660
661DECLARE_UVERBS_NAMED_METHOD(
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000662 MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
663 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
664 MLX5_IB_OBJECT_FLOW_MATCHER,
665 UVERBS_ACCESS_NEW,
666 UA_MANDATORY),
667 UVERBS_ATTR_PTR_IN(
668 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
669 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
670 UA_MANDATORY),
671 UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
672 mlx5_ib_flow_type,
673 UA_MANDATORY),
674 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
675 UVERBS_ATTR_TYPE(u8),
David Brazdil0f672f62019-12-10 10:32:29 +0000676 UA_MANDATORY),
677 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
678 enum ib_flow_flags,
679 UA_OPTIONAL),
680 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
681 enum mlx5_ib_uapi_flow_table_type,
682 UA_OPTIONAL));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683
684DECLARE_UVERBS_NAMED_METHOD_DESTROY(
685 MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
686 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
687 MLX5_IB_OBJECT_FLOW_MATCHER,
688 UVERBS_ACCESS_DESTROY,
689 UA_MANDATORY));
690
691DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
692 UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
693 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
694 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
695
David Brazdil0f672f62019-12-10 10:32:29 +0000696const struct uapi_definition mlx5_ib_flow_defs[] = {
697 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
698 MLX5_IB_OBJECT_FLOW_MATCHER),
699 UAPI_DEF_CHAIN_OBJ_TREE(
700 UVERBS_OBJECT_FLOW,
701 &mlx5_ib_fs),
702 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
703 &mlx5_ib_flow_actions),
704 {},
705};