blob: 724d276ea133692c83b1421ef06e6cb4e6b3115e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _MLX5_FS_
34#define _MLX5_FS_
35
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/mlx5_ifc.h>
38
39#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
40
41enum {
42 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
43 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
44 MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
45};
46
47enum {
David Brazdil0f672f62019-12-10 10:32:29 +000048 MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
49 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
50 MLX5_FLOW_TABLE_TERMINATION = BIT(2),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051};
52
53#define LEFTOVERS_RULE_NUM 2
54static inline void build_leftovers_ft_param(int *priority,
55 int *n_ent,
56 int *n_grp)
57{
58 *priority = 0; /* Priority of leftovers_prio-0 */
59 *n_ent = LEFTOVERS_RULE_NUM;
60 *n_grp = LEFTOVERS_RULE_NUM;
61}
62
63enum mlx5_flow_namespace_type {
64 MLX5_FLOW_NAMESPACE_BYPASS,
65 MLX5_FLOW_NAMESPACE_LAG,
66 MLX5_FLOW_NAMESPACE_OFFLOADS,
67 MLX5_FLOW_NAMESPACE_ETHTOOL,
68 MLX5_FLOW_NAMESPACE_KERNEL,
69 MLX5_FLOW_NAMESPACE_LEFTOVERS,
70 MLX5_FLOW_NAMESPACE_ANCHOR,
71 MLX5_FLOW_NAMESPACE_FDB,
72 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
73 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
74 MLX5_FLOW_NAMESPACE_SNIFFER_RX,
75 MLX5_FLOW_NAMESPACE_SNIFFER_TX,
76 MLX5_FLOW_NAMESPACE_EGRESS,
David Brazdil0f672f62019-12-10 10:32:29 +000077 MLX5_FLOW_NAMESPACE_RDMA_RX,
78 MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079};
80
David Brazdil0f672f62019-12-10 10:32:29 +000081enum {
82 FDB_BYPASS_PATH,
83 FDB_FAST_PATH,
84 FDB_SLOW_PATH,
85};
86
87struct mlx5_pkt_reformat;
88struct mlx5_modify_hdr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089struct mlx5_flow_table;
90struct mlx5_flow_group;
91struct mlx5_flow_namespace;
92struct mlx5_flow_handle;
93
David Brazdil0f672f62019-12-10 10:32:29 +000094enum {
95 FLOW_CONTEXT_HAS_TAG = BIT(0),
96};
97
98struct mlx5_flow_context {
99 u32 flags;
100 u32 flow_tag;
101 u32 flow_source;
102};
103
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104struct mlx5_flow_spec {
105 u8 match_criteria_enable;
106 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
107 u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
David Brazdil0f672f62019-12-10 10:32:29 +0000108 struct mlx5_flow_context flow_context;
109};
110
111enum {
112 MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
113 MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114};
115
116struct mlx5_flow_destination {
117 enum mlx5_flow_destination_type type;
118 union {
119 u32 tir_num;
120 u32 ft_num;
121 struct mlx5_flow_table *ft;
David Brazdil0f672f62019-12-10 10:32:29 +0000122 u32 counter_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123 struct {
124 u16 num;
125 u16 vhca_id;
David Brazdil0f672f62019-12-10 10:32:29 +0000126 struct mlx5_pkt_reformat *pkt_reformat;
127 u8 flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128 } vport;
129 };
130};
131
David Brazdil0f672f62019-12-10 10:32:29 +0000132struct mod_hdr_tbl {
133 struct mutex lock; /* protects hlist */
134 DECLARE_HASHTABLE(hlist, 8);
135};
136
137struct mlx5_flow_namespace *
138mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139struct mlx5_flow_namespace *
140mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
141 enum mlx5_flow_namespace_type type);
142struct mlx5_flow_namespace *
143mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
144 enum mlx5_flow_namespace_type type,
145 int vport);
146
147struct mlx5_flow_table *
148mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
149 int prio,
150 int num_flow_table_entries,
151 int max_num_groups,
152 u32 level,
153 u32 flags);
154
155struct mlx5_flow_table_attr {
156 int prio;
157 int max_fte;
158 u32 level;
159 u32 flags;
160};
161
162struct mlx5_flow_table *
163mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
164 struct mlx5_flow_table_attr *ft_attr);
165
166struct mlx5_flow_table *
167mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
168 int prio,
169 int num_flow_table_entries,
170 u32 level, u16 vport);
171struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
172 struct mlx5_flow_namespace *ns,
173 int prio, u32 level);
174int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
175
176/* inbox should be set with the following values:
177 * start_flow_index
178 * end_flow_index
179 * match_criteria_enable
180 * match_criteria
181 */
182struct mlx5_flow_group *
183mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
184void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
185
186struct mlx5_fs_vlan {
187 u16 ethtype;
188 u16 vid;
189 u8 prio;
190};
191
192#define MLX5_FS_VLAN_DEPTH 2
193
David Brazdil0f672f62019-12-10 10:32:29 +0000194enum {
195 FLOW_ACT_NO_APPEND = BIT(0),
196};
197
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198struct mlx5_flow_act {
199 u32 action;
David Brazdil0f672f62019-12-10 10:32:29 +0000200 struct mlx5_modify_hdr *modify_hdr;
201 struct mlx5_pkt_reformat *pkt_reformat;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202 uintptr_t esp_id;
David Brazdil0f672f62019-12-10 10:32:29 +0000203 u32 flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204 struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
205 struct ib_counters *counters;
206};
207
208#define MLX5_DECLARE_FLOW_ACT(name) \
David Brazdil0f672f62019-12-10 10:32:29 +0000209 struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
210 .flags = 0, }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000211
212/* Single destination per rule.
213 * Group ID is implied by the match criteria.
214 */
215struct mlx5_flow_handle *
216mlx5_add_flow_rules(struct mlx5_flow_table *ft,
David Brazdil0f672f62019-12-10 10:32:29 +0000217 const struct mlx5_flow_spec *spec,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 struct mlx5_flow_act *flow_act,
219 struct mlx5_flow_destination *dest,
220 int num_dest);
221void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
222
223int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
224 struct mlx5_flow_destination *new_dest,
225 struct mlx5_flow_destination *old_dest);
226
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
228void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
David Brazdil0f672f62019-12-10 10:32:29 +0000229u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230void mlx5_fc_query_cached(struct mlx5_fc *counter,
231 u64 *bytes, u64 *packets, u64 *lastuse);
232int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
233 u64 *packets, u64 *bytes);
David Brazdil0f672f62019-12-10 10:32:29 +0000234u32 mlx5_fc_id(struct mlx5_fc *counter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000235
236int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
237int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
238
David Brazdil0f672f62019-12-10 10:32:29 +0000239struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
240 u8 ns_type, u8 num_actions,
241 void *modify_actions);
242void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
243 struct mlx5_modify_hdr *modify_hdr);
244
245struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
246 int reformat_type,
247 size_t size,
248 void *reformat_data,
249 enum mlx5_flow_namespace_type ns_type);
250void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
251 struct mlx5_pkt_reformat *reformat);
252
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000253#endif