1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37 #include <net/devlink.h>
38 
39 #include "mlx5_core.h"
40 #include "fs_core.h"
41 #include "fs_cmd.h"
42 #include "fs_ft_pool.h"
43 #include "diag/fs_tracepoint.h"
44 #include "devlink.h"
45 
46 #define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 					 sizeof(struct init_tree_node))
48 
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 		 ...) {.type = FS_TYPE_PRIO,\
51 	.min_ft_level = min_level_val,\
52 	.num_levels = num_levels_val,\
53 	.num_leaf_prios = num_prios_val,\
54 	.caps = caps_val,\
55 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
56 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 }
58 
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 		 __VA_ARGS__)\
62 
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE,	\
64 	.def_miss_action = def_miss_act,\
65 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
66 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67 }
68 
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 				   sizeof(long))
71 
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73 
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 			       .caps = (long[]) {__VA_ARGS__} }
76 
77 #define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81 
82 #define FS_CHAINING_CAPS_EGRESS                                                \
83 	FS_REQUIRED_CAPS(                                                      \
84 		FS_CAP(flow_table_properties_nic_transmit.flow_modify_en),     \
85 		FS_CAP(flow_table_properties_nic_transmit.modify_root),        \
86 		FS_CAP(flow_table_properties_nic_transmit                      \
87 			       .identified_miss_table_mode),                   \
88 		FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89 
90 #define FS_CHAINING_CAPS_RDMA_TX                                                \
91 	FS_REQUIRED_CAPS(                                                       \
92 		FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 		FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root),    \
94 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
95 			       .identified_miss_table_mode),                    \
96 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
97 			       .flow_table_modify))
98 
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
101 
102 #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
103 #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
104 
105 #define BY_PASS_PRIO_NUM_LEVELS 1
106 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
107 			   LEFTOVERS_NUM_PRIOS)
108 
109 #define KERNEL_RX_MACSEC_NUM_PRIOS  1
110 #define KERNEL_RX_MACSEC_NUM_LEVELS 2
111 #define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
112 
113 #define ETHTOOL_PRIO_NUM_LEVELS 1
114 #define ETHTOOL_NUM_PRIOS 11
115 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
116 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
117  * IPsec RoCE policy
118  */
119 #define KERNEL_NIC_PRIO_NUM_LEVELS 9
120 #define KERNEL_NIC_NUM_PRIOS 1
121 /* One more level for tc */
122 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
123 
124 #define KERNEL_NIC_TC_NUM_PRIOS  1
125 #define KERNEL_NIC_TC_NUM_LEVELS 3
126 
127 #define ANCHOR_NUM_LEVELS 1
128 #define ANCHOR_NUM_PRIOS 1
129 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
130 
131 #define OFFLOADS_MAX_FT 2
132 #define OFFLOADS_NUM_PRIOS 2
133 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
134 
135 #define LAG_PRIO_NUM_LEVELS 1
136 #define LAG_NUM_PRIOS 1
137 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
138 
139 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
140 #define KERNEL_TX_IPSEC_NUM_LEVELS 2
141 #define KERNEL_TX_IPSEC_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
142 
143 #define KERNEL_TX_MACSEC_NUM_PRIOS  1
144 #define KERNEL_TX_MACSEC_NUM_LEVELS 2
145 #define KERNEL_TX_MACSEC_MIN_LEVEL       (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
146 
147 struct node_caps {
148 	size_t	arr_sz;
149 	long	*caps;
150 };
151 
152 static struct init_tree_node {
153 	enum fs_node_type	type;
154 	struct init_tree_node *children;
155 	int ar_size;
156 	struct node_caps caps;
157 	int min_ft_level;
158 	int num_leaf_prios;
159 	int prio;
160 	int num_levels;
161 	enum mlx5_flow_table_miss_action def_miss_action;
162 } root_fs = {
163 	.type = FS_TYPE_NAMESPACE,
164 	.ar_size = 8,
165 	  .children = (struct init_tree_node[]){
166 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
167 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
168 				  ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
169 						    BY_PASS_PRIO_NUM_LEVELS))),
170 		  ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
171 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
172 				  ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
173 						    KERNEL_RX_MACSEC_NUM_LEVELS))),
174 		  ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
175 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
176 				  ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
177 						    LAG_PRIO_NUM_LEVELS))),
178 		  ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
179 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 				  ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
181 						    OFFLOADS_MAX_FT))),
182 		  ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
183 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
184 				  ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
185 						    ETHTOOL_PRIO_NUM_LEVELS))),
186 		  ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
187 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
188 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
189 						    KERNEL_NIC_TC_NUM_LEVELS),
190 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
191 						    KERNEL_NIC_PRIO_NUM_LEVELS))),
192 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
193 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
194 				  ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
195 						    LEFTOVERS_NUM_LEVELS))),
196 		  ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
197 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
198 				  ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
199 						    ANCHOR_NUM_LEVELS))),
200 	}
201 };
202 
203 static struct init_tree_node egress_root_fs = {
204 	.type = FS_TYPE_NAMESPACE,
205 	.ar_size = 3,
206 	.children = (struct init_tree_node[]) {
207 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
208 			 FS_CHAINING_CAPS_EGRESS,
209 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
210 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
211 						  BY_PASS_PRIO_NUM_LEVELS))),
212 		ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
213 			 FS_CHAINING_CAPS_EGRESS,
214 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
215 				ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
216 						  KERNEL_TX_IPSEC_NUM_LEVELS))),
217 		ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
218 			 FS_CHAINING_CAPS_EGRESS,
219 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
220 				ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
221 						  KERNEL_TX_MACSEC_NUM_LEVELS))),
222 	}
223 };
224 
225 enum {
226 	RDMA_RX_IPSEC_PRIO,
227 	RDMA_RX_COUNTERS_PRIO,
228 	RDMA_RX_BYPASS_PRIO,
229 	RDMA_RX_KERNEL_PRIO,
230 };
231 
232 #define RDMA_RX_IPSEC_NUM_PRIOS 1
233 #define RDMA_RX_IPSEC_NUM_LEVELS 2
234 #define RDMA_RX_IPSEC_MIN_LEVEL  (RDMA_RX_IPSEC_NUM_LEVELS)
235 
236 #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
237 #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
238 #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
239 
240 static struct init_tree_node rdma_rx_root_fs = {
241 	.type = FS_TYPE_NAMESPACE,
242 	.ar_size = 4,
243 	.children = (struct init_tree_node[]) {
244 		[RDMA_RX_IPSEC_PRIO] =
245 		ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0,
246 			 FS_CHAINING_CAPS,
247 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
248 				ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS,
249 						  RDMA_RX_IPSEC_NUM_LEVELS))),
250 		[RDMA_RX_COUNTERS_PRIO] =
251 		ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
252 			 FS_CHAINING_CAPS,
253 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
254 				ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
255 						  RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
256 		[RDMA_RX_BYPASS_PRIO] =
257 		ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
258 			 FS_CHAINING_CAPS,
259 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
260 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
261 						  BY_PASS_PRIO_NUM_LEVELS))),
262 		[RDMA_RX_KERNEL_PRIO] =
263 		ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
264 			 FS_CHAINING_CAPS,
265 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
266 				ADD_MULTIPLE_PRIO(1, 1))),
267 	}
268 };
269 
270 enum {
271 	RDMA_TX_COUNTERS_PRIO,
272 	RDMA_TX_IPSEC_PRIO,
273 	RDMA_TX_BYPASS_PRIO,
274 };
275 
276 #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
277 #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
278 
279 #define RDMA_TX_IPSEC_NUM_PRIOS 1
280 #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
281 #define RDMA_TX_IPSEC_MIN_LEVEL  (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
282 
283 static struct init_tree_node rdma_tx_root_fs = {
284 	.type = FS_TYPE_NAMESPACE,
285 	.ar_size = 3,
286 	.children = (struct init_tree_node[]) {
287 		[RDMA_TX_COUNTERS_PRIO] =
288 		ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
289 			 FS_CHAINING_CAPS,
290 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
291 				ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
292 						  RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
293 		[RDMA_TX_IPSEC_PRIO] =
294 		ADD_PRIO(0, RDMA_TX_IPSEC_MIN_LEVEL, 0,
295 			 FS_CHAINING_CAPS,
296 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
297 				ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS,
298 						  RDMA_TX_IPSEC_PRIO_NUM_LEVELS))),
299 
300 		[RDMA_TX_BYPASS_PRIO] =
301 		ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
302 			 FS_CHAINING_CAPS_RDMA_TX,
303 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
304 				ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
305 						  BY_PASS_PRIO_NUM_LEVELS))),
306 	}
307 };
308 
309 enum fs_i_lock_class {
310 	FS_LOCK_GRANDPARENT,
311 	FS_LOCK_PARENT,
312 	FS_LOCK_CHILD
313 };
314 
315 static const struct rhashtable_params rhash_fte = {
316 	.key_len = sizeof_field(struct fs_fte, val),
317 	.key_offset = offsetof(struct fs_fte, val),
318 	.head_offset = offsetof(struct fs_fte, hash),
319 	.automatic_shrinking = true,
320 	.min_size = 1,
321 };
322 
323 static const struct rhashtable_params rhash_fg = {
324 	.key_len = sizeof_field(struct mlx5_flow_group, mask),
325 	.key_offset = offsetof(struct mlx5_flow_group, mask),
326 	.head_offset = offsetof(struct mlx5_flow_group, hash),
327 	.automatic_shrinking = true,
328 	.min_size = 1,
329 
330 };
331 
332 static void del_hw_flow_table(struct fs_node *node);
333 static void del_hw_flow_group(struct fs_node *node);
334 static void del_hw_fte(struct fs_node *node);
335 static void del_sw_flow_table(struct fs_node *node);
336 static void del_sw_flow_group(struct fs_node *node);
337 static void del_sw_fte(struct fs_node *node);
338 static void del_sw_prio(struct fs_node *node);
339 static void del_sw_ns(struct fs_node *node);
340 /* Delete rule (destination) is special case that
341  * requires to lock the FTE for all the deletion process.
342  */
343 static void del_sw_hw_rule(struct fs_node *node);
344 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
345 				struct mlx5_flow_destination *d2);
346 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
347 static struct mlx5_flow_rule *
348 find_flow_rule(struct fs_fte *fte,
349 	       struct mlx5_flow_destination *dest);
350 
tree_init_node(struct fs_node * node,void (* del_hw_func)(struct fs_node *),void (* del_sw_func)(struct fs_node *))351 static void tree_init_node(struct fs_node *node,
352 			   void (*del_hw_func)(struct fs_node *),
353 			   void (*del_sw_func)(struct fs_node *))
354 {
355 	refcount_set(&node->refcount, 1);
356 	INIT_LIST_HEAD(&node->list);
357 	INIT_LIST_HEAD(&node->children);
358 	init_rwsem(&node->lock);
359 	node->del_hw_func = del_hw_func;
360 	node->del_sw_func = del_sw_func;
361 	node->active = false;
362 }
363 
tree_add_node(struct fs_node * node,struct fs_node * parent)364 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
365 {
366 	if (parent)
367 		refcount_inc(&parent->refcount);
368 	node->parent = parent;
369 
370 	/* Parent is the root */
371 	if (!parent)
372 		node->root = node;
373 	else
374 		node->root = parent->root;
375 }
376 
tree_get_node(struct fs_node * node)377 static int tree_get_node(struct fs_node *node)
378 {
379 	return refcount_inc_not_zero(&node->refcount);
380 }
381 
nested_down_read_ref_node(struct fs_node * node,enum fs_i_lock_class class)382 static void nested_down_read_ref_node(struct fs_node *node,
383 				      enum fs_i_lock_class class)
384 {
385 	if (node) {
386 		down_read_nested(&node->lock, class);
387 		refcount_inc(&node->refcount);
388 	}
389 }
390 
nested_down_write_ref_node(struct fs_node * node,enum fs_i_lock_class class)391 static void nested_down_write_ref_node(struct fs_node *node,
392 				       enum fs_i_lock_class class)
393 {
394 	if (node) {
395 		down_write_nested(&node->lock, class);
396 		refcount_inc(&node->refcount);
397 	}
398 }
399 
down_write_ref_node(struct fs_node * node,bool locked)400 static void down_write_ref_node(struct fs_node *node, bool locked)
401 {
402 	if (node) {
403 		if (!locked)
404 			down_write(&node->lock);
405 		refcount_inc(&node->refcount);
406 	}
407 }
408 
up_read_ref_node(struct fs_node * node)409 static void up_read_ref_node(struct fs_node *node)
410 {
411 	refcount_dec(&node->refcount);
412 	up_read(&node->lock);
413 }
414 
up_write_ref_node(struct fs_node * node,bool locked)415 static void up_write_ref_node(struct fs_node *node, bool locked)
416 {
417 	refcount_dec(&node->refcount);
418 	if (!locked)
419 		up_write(&node->lock);
420 }
421 
tree_put_node(struct fs_node * node,bool locked)422 static void tree_put_node(struct fs_node *node, bool locked)
423 {
424 	struct fs_node *parent_node = node->parent;
425 
426 	if (refcount_dec_and_test(&node->refcount)) {
427 		if (node->del_hw_func)
428 			node->del_hw_func(node);
429 		if (parent_node) {
430 			down_write_ref_node(parent_node, locked);
431 			list_del_init(&node->list);
432 		}
433 		node->del_sw_func(node);
434 		if (parent_node)
435 			up_write_ref_node(parent_node, locked);
436 		node = NULL;
437 	}
438 	if (!node && parent_node)
439 		tree_put_node(parent_node, locked);
440 }
441 
tree_remove_node(struct fs_node * node,bool locked)442 static int tree_remove_node(struct fs_node *node, bool locked)
443 {
444 	if (refcount_read(&node->refcount) > 1) {
445 		refcount_dec(&node->refcount);
446 		return -EEXIST;
447 	}
448 	tree_put_node(node, locked);
449 	return 0;
450 }
451 
find_prio(struct mlx5_flow_namespace * ns,unsigned int prio)452 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
453 				 unsigned int prio)
454 {
455 	struct fs_prio *iter_prio;
456 
457 	fs_for_each_prio(iter_prio, ns) {
458 		if (iter_prio->prio == prio)
459 			return iter_prio;
460 	}
461 
462 	return NULL;
463 }
464 
is_fwd_next_action(u32 action)465 static bool is_fwd_next_action(u32 action)
466 {
467 	return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
468 			 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
469 }
470 
is_fwd_dest_type(enum mlx5_flow_destination_type type)471 static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
472 {
473 	return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
474 		type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
475 		type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
476 		type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
477 		type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
478 		type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
479 		type == MLX5_FLOW_DESTINATION_TYPE_RANGE ||
480 		type == MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
481 }
482 
check_valid_spec(const struct mlx5_flow_spec * spec)483 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
484 {
485 	int i;
486 
487 	for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
488 		if (spec->match_value[i] & ~spec->match_criteria[i]) {
489 			pr_warn("mlx5_core: match_value differs from match_criteria\n");
490 			return false;
491 		}
492 
493 	return true;
494 }
495 
find_root(struct fs_node * node)496 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
497 {
498 	struct fs_node *root;
499 	struct mlx5_flow_namespace *ns;
500 
501 	root = node->root;
502 
503 	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
504 		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
505 		return NULL;
506 	}
507 
508 	ns = container_of(root, struct mlx5_flow_namespace, node);
509 	return container_of(ns, struct mlx5_flow_root_namespace, ns);
510 }
511 
get_steering(struct fs_node * node)512 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
513 {
514 	struct mlx5_flow_root_namespace *root = find_root(node);
515 
516 	if (root)
517 		return root->dev->priv.steering;
518 	return NULL;
519 }
520 
get_dev(struct fs_node * node)521 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
522 {
523 	struct mlx5_flow_root_namespace *root = find_root(node);
524 
525 	if (root)
526 		return root->dev;
527 	return NULL;
528 }
529 
del_sw_ns(struct fs_node * node)530 static void del_sw_ns(struct fs_node *node)
531 {
532 	kfree(node);
533 }
534 
del_sw_prio(struct fs_node * node)535 static void del_sw_prio(struct fs_node *node)
536 {
537 	kfree(node);
538 }
539 
del_hw_flow_table(struct fs_node * node)540 static void del_hw_flow_table(struct fs_node *node)
541 {
542 	struct mlx5_flow_root_namespace *root;
543 	struct mlx5_flow_table *ft;
544 	struct mlx5_core_dev *dev;
545 	int err;
546 
547 	fs_get_obj(ft, node);
548 	dev = get_dev(&ft->node);
549 	root = find_root(&ft->node);
550 	trace_mlx5_fs_del_ft(ft);
551 
552 	if (node->active) {
553 		err = root->cmds->destroy_flow_table(root, ft);
554 		if (err)
555 			mlx5_core_warn(dev, "flow steering can't destroy ft\n");
556 	}
557 }
558 
del_sw_flow_table(struct fs_node * node)559 static void del_sw_flow_table(struct fs_node *node)
560 {
561 	struct mlx5_flow_table *ft;
562 	struct fs_prio *prio;
563 
564 	fs_get_obj(ft, node);
565 
566 	rhltable_destroy(&ft->fgs_hash);
567 	if (ft->node.parent) {
568 		fs_get_obj(prio, ft->node.parent);
569 		prio->num_ft--;
570 	}
571 	kfree(ft);
572 }
573 
modify_fte(struct fs_fte * fte)574 static void modify_fte(struct fs_fte *fte)
575 {
576 	struct mlx5_flow_root_namespace *root;
577 	struct mlx5_flow_table *ft;
578 	struct mlx5_flow_group *fg;
579 	struct mlx5_core_dev *dev;
580 	int err;
581 
582 	fs_get_obj(fg, fte->node.parent);
583 	fs_get_obj(ft, fg->node.parent);
584 	dev = get_dev(&fte->node);
585 
586 	root = find_root(&ft->node);
587 	err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
588 	if (err)
589 		mlx5_core_warn(dev,
590 			       "%s can't del rule fg id=%d fte_index=%d\n",
591 			       __func__, fg->id, fte->index);
592 	fte->modify_mask = 0;
593 }
594 
del_sw_hw_rule(struct fs_node * node)595 static void del_sw_hw_rule(struct fs_node *node)
596 {
597 	struct mlx5_flow_rule *rule;
598 	struct fs_fte *fte;
599 
600 	fs_get_obj(rule, node);
601 	fs_get_obj(fte, rule->node.parent);
602 	trace_mlx5_fs_del_rule(rule);
603 	if (is_fwd_next_action(rule->sw_action)) {
604 		mutex_lock(&rule->dest_attr.ft->lock);
605 		list_del(&rule->next_ft);
606 		mutex_unlock(&rule->dest_attr.ft->lock);
607 	}
608 
609 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
610 		--fte->dests_size;
611 		fte->modify_mask |=
612 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
613 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
614 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
615 		goto out;
616 	}
617 
618 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
619 		--fte->dests_size;
620 		fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
621 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
622 		goto out;
623 	}
624 
625 	if (is_fwd_dest_type(rule->dest_attr.type)) {
626 		--fte->dests_size;
627 		--fte->fwd_dests;
628 
629 		if (!fte->fwd_dests)
630 			fte->action.action &=
631 				~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
632 		fte->modify_mask |=
633 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
634 		goto out;
635 	}
636 out:
637 	kfree(rule);
638 }
639 
del_hw_fte(struct fs_node * node)640 static void del_hw_fte(struct fs_node *node)
641 {
642 	struct mlx5_flow_root_namespace *root;
643 	struct mlx5_flow_table *ft;
644 	struct mlx5_flow_group *fg;
645 	struct mlx5_core_dev *dev;
646 	struct fs_fte *fte;
647 	int err;
648 
649 	fs_get_obj(fte, node);
650 	fs_get_obj(fg, fte->node.parent);
651 	fs_get_obj(ft, fg->node.parent);
652 
653 	trace_mlx5_fs_del_fte(fte);
654 	WARN_ON(fte->dests_size);
655 	dev = get_dev(&ft->node);
656 	root = find_root(&ft->node);
657 	if (node->active) {
658 		err = root->cmds->delete_fte(root, ft, fte);
659 		if (err)
660 			mlx5_core_warn(dev,
661 				       "flow steering can't delete fte in index %d of flow group id %d\n",
662 				       fte->index, fg->id);
663 		node->active = false;
664 	}
665 }
666 
del_sw_fte(struct fs_node * node)667 static void del_sw_fte(struct fs_node *node)
668 {
669 	struct mlx5_flow_steering *steering = get_steering(node);
670 	struct mlx5_flow_group *fg;
671 	struct fs_fte *fte;
672 	int err;
673 
674 	fs_get_obj(fte, node);
675 	fs_get_obj(fg, fte->node.parent);
676 
677 	err = rhashtable_remove_fast(&fg->ftes_hash,
678 				     &fte->hash,
679 				     rhash_fte);
680 	WARN_ON(err);
681 	ida_free(&fg->fte_allocator, fte->index - fg->start_index);
682 	kmem_cache_free(steering->ftes_cache, fte);
683 }
684 
del_hw_flow_group(struct fs_node * node)685 static void del_hw_flow_group(struct fs_node *node)
686 {
687 	struct mlx5_flow_root_namespace *root;
688 	struct mlx5_flow_group *fg;
689 	struct mlx5_flow_table *ft;
690 	struct mlx5_core_dev *dev;
691 
692 	fs_get_obj(fg, node);
693 	fs_get_obj(ft, fg->node.parent);
694 	dev = get_dev(&ft->node);
695 	trace_mlx5_fs_del_fg(fg);
696 
697 	root = find_root(&ft->node);
698 	if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
699 		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
700 			       fg->id, ft->id);
701 }
702 
del_sw_flow_group(struct fs_node * node)703 static void del_sw_flow_group(struct fs_node *node)
704 {
705 	struct mlx5_flow_steering *steering = get_steering(node);
706 	struct mlx5_flow_group *fg;
707 	struct mlx5_flow_table *ft;
708 	int err;
709 
710 	fs_get_obj(fg, node);
711 	fs_get_obj(ft, fg->node.parent);
712 
713 	rhashtable_destroy(&fg->ftes_hash);
714 	ida_destroy(&fg->fte_allocator);
715 	if (ft->autogroup.active &&
716 	    fg->max_ftes == ft->autogroup.group_size &&
717 	    fg->start_index < ft->autogroup.max_fte)
718 		ft->autogroup.num_groups--;
719 	err = rhltable_remove(&ft->fgs_hash,
720 			      &fg->hash,
721 			      rhash_fg);
722 	WARN_ON(err);
723 	kmem_cache_free(steering->fgs_cache, fg);
724 }
725 
insert_fte(struct mlx5_flow_group * fg,struct fs_fte * fte)726 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
727 {
728 	int index;
729 	int ret;
730 
731 	index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
732 	if (index < 0)
733 		return index;
734 
735 	fte->index = index + fg->start_index;
736 	ret = rhashtable_insert_fast(&fg->ftes_hash,
737 				     &fte->hash,
738 				     rhash_fte);
739 	if (ret)
740 		goto err_ida_remove;
741 
742 	tree_add_node(&fte->node, &fg->node);
743 	list_add_tail(&fte->node.list, &fg->node.children);
744 	return 0;
745 
746 err_ida_remove:
747 	ida_free(&fg->fte_allocator, index);
748 	return ret;
749 }
750 
alloc_fte(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)751 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
752 				const struct mlx5_flow_spec *spec,
753 				struct mlx5_flow_act *flow_act)
754 {
755 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
756 	struct fs_fte *fte;
757 
758 	fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
759 	if (!fte)
760 		return ERR_PTR(-ENOMEM);
761 
762 	memcpy(fte->val, &spec->match_value, sizeof(fte->val));
763 	fte->node.type =  FS_TYPE_FLOW_ENTRY;
764 	fte->action = *flow_act;
765 	fte->flow_context = spec->flow_context;
766 
767 	tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
768 
769 	return fte;
770 }
771 
dealloc_flow_group(struct mlx5_flow_steering * steering,struct mlx5_flow_group * fg)772 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
773 			       struct mlx5_flow_group *fg)
774 {
775 	rhashtable_destroy(&fg->ftes_hash);
776 	kmem_cache_free(steering->fgs_cache, fg);
777 }
778 
alloc_flow_group(struct mlx5_flow_steering * steering,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index)779 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
780 						u8 match_criteria_enable,
781 						const void *match_criteria,
782 						int start_index,
783 						int end_index)
784 {
785 	struct mlx5_flow_group *fg;
786 	int ret;
787 
788 	fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
789 	if (!fg)
790 		return ERR_PTR(-ENOMEM);
791 
792 	ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
793 	if (ret) {
794 		kmem_cache_free(steering->fgs_cache, fg);
795 		return ERR_PTR(ret);
796 	}
797 
798 	ida_init(&fg->fte_allocator);
799 	fg->mask.match_criteria_enable = match_criteria_enable;
800 	memcpy(&fg->mask.match_criteria, match_criteria,
801 	       sizeof(fg->mask.match_criteria));
802 	fg->node.type =  FS_TYPE_FLOW_GROUP;
803 	fg->start_index = start_index;
804 	fg->max_ftes = end_index - start_index + 1;
805 
806 	return fg;
807 }
808 
alloc_insert_flow_group(struct mlx5_flow_table * ft,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index,struct list_head * prev)809 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
810 						       u8 match_criteria_enable,
811 						       const void *match_criteria,
812 						       int start_index,
813 						       int end_index,
814 						       struct list_head *prev)
815 {
816 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
817 	struct mlx5_flow_group *fg;
818 	int ret;
819 
820 	fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
821 			      start_index, end_index);
822 	if (IS_ERR(fg))
823 		return fg;
824 
825 	/* initialize refcnt, add to parent list */
826 	ret = rhltable_insert(&ft->fgs_hash,
827 			      &fg->hash,
828 			      rhash_fg);
829 	if (ret) {
830 		dealloc_flow_group(steering, fg);
831 		return ERR_PTR(ret);
832 	}
833 
834 	tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
835 	tree_add_node(&fg->node, &ft->node);
836 	/* Add node to group list */
837 	list_add(&fg->node.list, prev);
838 	atomic_inc(&ft->node.version);
839 
840 	return fg;
841 }
842 
alloc_flow_table(int level,u16 vport,enum fs_flow_table_type table_type,enum fs_flow_table_op_mod op_mod,u32 flags)843 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
844 						enum fs_flow_table_type table_type,
845 						enum fs_flow_table_op_mod op_mod,
846 						u32 flags)
847 {
848 	struct mlx5_flow_table *ft;
849 	int ret;
850 
851 	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
852 	if (!ft)
853 		return ERR_PTR(-ENOMEM);
854 
855 	ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
856 	if (ret) {
857 		kfree(ft);
858 		return ERR_PTR(ret);
859 	}
860 
861 	ft->level = level;
862 	ft->node.type = FS_TYPE_FLOW_TABLE;
863 	ft->op_mod = op_mod;
864 	ft->type = table_type;
865 	ft->vport = vport;
866 	ft->flags = flags;
867 	INIT_LIST_HEAD(&ft->fwd_rules);
868 	mutex_init(&ft->lock);
869 
870 	return ft;
871 }
872 
873 /* If reverse is false, then we search for the first flow table in the
874  * root sub-tree from start(closest from right), else we search for the
875  * last flow table in the root sub-tree till start(closest from left).
876  */
find_closest_ft_recursive(struct fs_node * root,struct list_head * start,bool reverse)877 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
878 							 struct list_head *start,
879 							 bool reverse)
880 {
881 #define list_advance_entry(pos, reverse)		\
882 	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
883 
884 #define list_for_each_advance_continue(pos, head, reverse)	\
885 	for (pos = list_advance_entry(pos, reverse);		\
886 	     &pos->list != (head);				\
887 	     pos = list_advance_entry(pos, reverse))
888 
889 	struct fs_node *iter = list_entry(start, struct fs_node, list);
890 	struct mlx5_flow_table *ft = NULL;
891 
892 	if (!root || root->type == FS_TYPE_PRIO_CHAINS)
893 		return NULL;
894 
895 	list_for_each_advance_continue(iter, &root->children, reverse) {
896 		if (iter->type == FS_TYPE_FLOW_TABLE) {
897 			fs_get_obj(ft, iter);
898 			return ft;
899 		}
900 		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
901 		if (ft)
902 			return ft;
903 	}
904 
905 	return ft;
906 }
907 
908 /* If reverse is false then return the first flow table in next priority of
909  * prio in the tree, else return the last flow table in the previous priority
910  * of prio in the tree.
911  */
find_closest_ft(struct fs_prio * prio,bool reverse)912 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
913 {
914 	struct mlx5_flow_table *ft = NULL;
915 	struct fs_node *curr_node;
916 	struct fs_node *parent;
917 
918 	parent = prio->node.parent;
919 	curr_node = &prio->node;
920 	while (!ft && parent) {
921 		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
922 		curr_node = parent;
923 		parent = curr_node->parent;
924 	}
925 	return ft;
926 }
927 
928 /* Assuming all the tree is locked by mutex chain lock */
find_next_chained_ft(struct fs_prio * prio)929 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
930 {
931 	return find_closest_ft(prio, false);
932 }
933 
934 /* Assuming all the tree is locked by mutex chain lock */
find_prev_chained_ft(struct fs_prio * prio)935 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
936 {
937 	return find_closest_ft(prio, true);
938 }
939 
find_next_fwd_ft(struct mlx5_flow_table * ft,struct mlx5_flow_act * flow_act)940 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
941 						struct mlx5_flow_act *flow_act)
942 {
943 	struct fs_prio *prio;
944 	bool next_ns;
945 
946 	next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
947 	fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
948 
949 	return find_next_chained_ft(prio);
950 }
951 
connect_fts_in_prio(struct mlx5_core_dev * dev,struct fs_prio * prio,struct mlx5_flow_table * ft)952 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
953 			       struct fs_prio *prio,
954 			       struct mlx5_flow_table *ft)
955 {
956 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
957 	struct mlx5_flow_table *iter;
958 	int err;
959 
960 	fs_for_each_ft(iter, prio) {
961 		err = root->cmds->modify_flow_table(root, iter, ft);
962 		if (err) {
963 			mlx5_core_err(dev,
964 				      "Failed to modify flow table id %d, type %d, err %d\n",
965 				      iter->id, iter->type, err);
966 			/* The driver is out of sync with the FW */
967 			return err;
968 		}
969 	}
970 	return 0;
971 }
972 
973 /* Connect flow tables from previous priority of prio to ft */
connect_prev_fts(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)974 static int connect_prev_fts(struct mlx5_core_dev *dev,
975 			    struct mlx5_flow_table *ft,
976 			    struct fs_prio *prio)
977 {
978 	struct mlx5_flow_table *prev_ft;
979 
980 	prev_ft = find_prev_chained_ft(prio);
981 	if (prev_ft) {
982 		struct fs_prio *prev_prio;
983 
984 		fs_get_obj(prev_prio, prev_ft->node.parent);
985 		return connect_fts_in_prio(dev, prev_prio, ft);
986 	}
987 	return 0;
988 }
989 
update_root_ft_create(struct mlx5_flow_table * ft,struct fs_prio * prio)990 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
991 				 *prio)
992 {
993 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
994 	struct mlx5_ft_underlay_qp *uqp;
995 	int min_level = INT_MAX;
996 	int err = 0;
997 	u32 qpn;
998 
999 	if (root->root_ft)
1000 		min_level = root->root_ft->level;
1001 
1002 	if (ft->level >= min_level)
1003 		return 0;
1004 
1005 	if (list_empty(&root->underlay_qpns)) {
1006 		/* Don't set any QPN (zero) in case QPN list is empty */
1007 		qpn = 0;
1008 		err = root->cmds->update_root_ft(root, ft, qpn, false);
1009 	} else {
1010 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
1011 			qpn = uqp->qpn;
1012 			err = root->cmds->update_root_ft(root, ft,
1013 							 qpn, false);
1014 			if (err)
1015 				break;
1016 		}
1017 	}
1018 
1019 	if (err)
1020 		mlx5_core_warn(root->dev,
1021 			       "Update root flow table of id(%u) qpn(%d) failed\n",
1022 			       ft->id, qpn);
1023 	else
1024 		root->root_ft = ft;
1025 
1026 	return err;
1027 }
1028 
_mlx5_modify_rule_destination(struct mlx5_flow_rule * rule,struct mlx5_flow_destination * dest)1029 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
1030 					 struct mlx5_flow_destination *dest)
1031 {
1032 	struct mlx5_flow_root_namespace *root;
1033 	struct mlx5_flow_table *ft;
1034 	struct mlx5_flow_group *fg;
1035 	struct fs_fte *fte;
1036 	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1037 	int err = 0;
1038 
1039 	fs_get_obj(fte, rule->node.parent);
1040 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1041 		return -EINVAL;
1042 	down_write_ref_node(&fte->node, false);
1043 	fs_get_obj(fg, fte->node.parent);
1044 	fs_get_obj(ft, fg->node.parent);
1045 
1046 	memcpy(&rule->dest_attr, dest, sizeof(*dest));
1047 	root = find_root(&ft->node);
1048 	err = root->cmds->update_fte(root, ft, fg,
1049 				     modify_mask, fte);
1050 	up_write_ref_node(&fte->node, false);
1051 
1052 	return err;
1053 }
1054 
mlx5_modify_rule_destination(struct mlx5_flow_handle * handle,struct mlx5_flow_destination * new_dest,struct mlx5_flow_destination * old_dest)1055 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1056 				 struct mlx5_flow_destination *new_dest,
1057 				 struct mlx5_flow_destination *old_dest)
1058 {
1059 	int i;
1060 
1061 	if (!old_dest) {
1062 		if (handle->num_rules != 1)
1063 			return -EINVAL;
1064 		return _mlx5_modify_rule_destination(handle->rule[0],
1065 						     new_dest);
1066 	}
1067 
1068 	for (i = 0; i < handle->num_rules; i++) {
1069 		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1070 			return _mlx5_modify_rule_destination(handle->rule[i],
1071 							     new_dest);
1072 	}
1073 
1074 	return -EINVAL;
1075 }
1076 
1077 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
connect_fwd_rules(struct mlx5_core_dev * dev,struct mlx5_flow_table * new_next_ft,struct mlx5_flow_table * old_next_ft)1078 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1079 			     struct mlx5_flow_table *new_next_ft,
1080 			     struct mlx5_flow_table *old_next_ft)
1081 {
1082 	struct mlx5_flow_destination dest = {};
1083 	struct mlx5_flow_rule *iter;
1084 	int err = 0;
1085 
1086 	/* new_next_ft and old_next_ft could be NULL only
1087 	 * when we create/destroy the anchor flow table.
1088 	 */
1089 	if (!new_next_ft || !old_next_ft)
1090 		return 0;
1091 
1092 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1093 	dest.ft = new_next_ft;
1094 
1095 	mutex_lock(&old_next_ft->lock);
1096 	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1097 	mutex_unlock(&old_next_ft->lock);
1098 	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1099 		if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1100 		    iter->ft->ns == new_next_ft->ns)
1101 			continue;
1102 
1103 		err = _mlx5_modify_rule_destination(iter, &dest);
1104 		if (err)
1105 			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1106 			       new_next_ft->id);
1107 	}
1108 	return 0;
1109 }
1110 
connect_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)1111 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1112 			      struct fs_prio *prio)
1113 {
1114 	struct mlx5_flow_table *next_ft, *first_ft;
1115 	int err = 0;
1116 
1117 	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1118 
1119 	first_ft = list_first_entry_or_null(&prio->node.children,
1120 					    struct mlx5_flow_table, node.list);
1121 	if (!first_ft || first_ft->level > ft->level) {
1122 		err = connect_prev_fts(dev, ft, prio);
1123 		if (err)
1124 			return err;
1125 
1126 		next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1127 		err = connect_fwd_rules(dev, ft, next_ft);
1128 		if (err)
1129 			return err;
1130 	}
1131 
1132 	if (MLX5_CAP_FLOWTABLE(dev,
1133 			       flow_table_properties_nic_receive.modify_root))
1134 		err = update_root_ft_create(ft, prio);
1135 	return err;
1136 }
1137 
list_add_flow_table(struct mlx5_flow_table * ft,struct fs_prio * prio)1138 static void list_add_flow_table(struct mlx5_flow_table *ft,
1139 				struct fs_prio *prio)
1140 {
1141 	struct list_head *prev = &prio->node.children;
1142 	struct mlx5_flow_table *iter;
1143 
1144 	fs_for_each_ft(iter, prio) {
1145 		if (iter->level > ft->level)
1146 			break;
1147 		prev = &iter->node.list;
1148 	}
1149 	list_add(&ft->node.list, prev);
1150 }
1151 
__mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,enum fs_flow_table_op_mod op_mod,u16 vport)1152 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1153 							struct mlx5_flow_table_attr *ft_attr,
1154 							enum fs_flow_table_op_mod op_mod,
1155 							u16 vport)
1156 {
1157 	struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1158 	bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1159 	struct mlx5_flow_table *next_ft;
1160 	struct fs_prio *fs_prio = NULL;
1161 	struct mlx5_flow_table *ft;
1162 	int err;
1163 
1164 	if (!root) {
1165 		pr_err("mlx5: flow steering failed to find root of namespace\n");
1166 		return ERR_PTR(-ENODEV);
1167 	}
1168 
1169 	mutex_lock(&root->chain_lock);
1170 	fs_prio = find_prio(ns, ft_attr->prio);
1171 	if (!fs_prio) {
1172 		err = -EINVAL;
1173 		goto unlock_root;
1174 	}
1175 	if (!unmanaged) {
1176 		/* The level is related to the
1177 		 * priority level range.
1178 		 */
1179 		if (ft_attr->level >= fs_prio->num_levels) {
1180 			err = -ENOSPC;
1181 			goto unlock_root;
1182 		}
1183 
1184 		ft_attr->level += fs_prio->start_level;
1185 	}
1186 
1187 	/* The level is related to the
1188 	 * priority level range.
1189 	 */
1190 	ft = alloc_flow_table(ft_attr->level,
1191 			      vport,
1192 			      root->table_type,
1193 			      op_mod, ft_attr->flags);
1194 	if (IS_ERR(ft)) {
1195 		err = PTR_ERR(ft);
1196 		goto unlock_root;
1197 	}
1198 
1199 	tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1200 	next_ft = unmanaged ? ft_attr->next_ft :
1201 			      find_next_chained_ft(fs_prio);
1202 	ft->def_miss_action = ns->def_miss_action;
1203 	ft->ns = ns;
1204 	err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
1205 	if (err)
1206 		goto free_ft;
1207 
1208 	if (!unmanaged) {
1209 		err = connect_flow_table(root->dev, ft, fs_prio);
1210 		if (err)
1211 			goto destroy_ft;
1212 	}
1213 
1214 	ft->node.active = true;
1215 	down_write_ref_node(&fs_prio->node, false);
1216 	if (!unmanaged) {
1217 		tree_add_node(&ft->node, &fs_prio->node);
1218 		list_add_flow_table(ft, fs_prio);
1219 	} else {
1220 		ft->node.root = fs_prio->node.root;
1221 	}
1222 	fs_prio->num_ft++;
1223 	up_write_ref_node(&fs_prio->node, false);
1224 	mutex_unlock(&root->chain_lock);
1225 	trace_mlx5_fs_add_ft(ft);
1226 	return ft;
1227 destroy_ft:
1228 	root->cmds->destroy_flow_table(root, ft);
1229 free_ft:
1230 	rhltable_destroy(&ft->fgs_hash);
1231 	kfree(ft);
1232 unlock_root:
1233 	mutex_unlock(&root->chain_lock);
1234 	return ERR_PTR(err);
1235 }
1236 
mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1237 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1238 					       struct mlx5_flow_table_attr *ft_attr)
1239 {
1240 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1241 }
1242 EXPORT_SYMBOL(mlx5_create_flow_table);
1243 
mlx5_flow_table_id(struct mlx5_flow_table * ft)1244 u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
1245 {
1246 	return ft->id;
1247 }
1248 EXPORT_SYMBOL(mlx5_flow_table_id);
1249 
1250 struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,u16 vport)1251 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1252 			     struct mlx5_flow_table_attr *ft_attr, u16 vport)
1253 {
1254 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1255 }
1256 
1257 struct mlx5_flow_table*
mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace * ns,int prio,u32 level)1258 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1259 				 int prio, u32 level)
1260 {
1261 	struct mlx5_flow_table_attr ft_attr = {};
1262 
1263 	ft_attr.level = level;
1264 	ft_attr.prio  = prio;
1265 	ft_attr.max_fte = 1;
1266 
1267 	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1268 }
1269 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1270 
1271 #define MAX_FLOW_GROUP_SIZE BIT(24)
1272 struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1273 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1274 				    struct mlx5_flow_table_attr *ft_attr)
1275 {
1276 	int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1277 	int max_num_groups = ft_attr->autogroup.max_num_groups;
1278 	struct mlx5_flow_table *ft;
1279 	int autogroups_max_fte;
1280 
1281 	ft = mlx5_create_flow_table(ns, ft_attr);
1282 	if (IS_ERR(ft))
1283 		return ft;
1284 
1285 	autogroups_max_fte = ft->max_fte - num_reserved_entries;
1286 	if (max_num_groups > autogroups_max_fte)
1287 		goto err_validate;
1288 	if (num_reserved_entries > ft->max_fte)
1289 		goto err_validate;
1290 
1291 	/* Align the number of groups according to the largest group size */
1292 	if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1293 		max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1294 
1295 	ft->autogroup.active = true;
1296 	ft->autogroup.required_groups = max_num_groups;
1297 	ft->autogroup.max_fte = autogroups_max_fte;
1298 	/* We save place for flow groups in addition to max types */
1299 	ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1300 
1301 	return ft;
1302 
1303 err_validate:
1304 	mlx5_destroy_flow_table(ft);
1305 	return ERR_PTR(-ENOSPC);
1306 }
1307 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1308 
mlx5_create_flow_group(struct mlx5_flow_table * ft,u32 * fg_in)1309 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1310 					       u32 *fg_in)
1311 {
1312 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1313 	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1314 					    fg_in, match_criteria);
1315 	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1316 					    fg_in,
1317 					    match_criteria_enable);
1318 	int start_index = MLX5_GET(create_flow_group_in, fg_in,
1319 				   start_flow_index);
1320 	int end_index = MLX5_GET(create_flow_group_in, fg_in,
1321 				 end_flow_index);
1322 	struct mlx5_flow_group *fg;
1323 	int err;
1324 
1325 	if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1326 		return ERR_PTR(-EPERM);
1327 
1328 	down_write_ref_node(&ft->node, false);
1329 	fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1330 				     start_index, end_index,
1331 				     ft->node.children.prev);
1332 	up_write_ref_node(&ft->node, false);
1333 	if (IS_ERR(fg))
1334 		return fg;
1335 
1336 	err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1337 	if (err) {
1338 		tree_put_node(&fg->node, false);
1339 		return ERR_PTR(err);
1340 	}
1341 	trace_mlx5_fs_add_fg(fg);
1342 	fg->node.active = true;
1343 
1344 	return fg;
1345 }
1346 EXPORT_SYMBOL(mlx5_create_flow_group);
1347 
alloc_rule(struct mlx5_flow_destination * dest)1348 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1349 {
1350 	struct mlx5_flow_rule *rule;
1351 
1352 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1353 	if (!rule)
1354 		return NULL;
1355 
1356 	INIT_LIST_HEAD(&rule->next_ft);
1357 	rule->node.type = FS_TYPE_FLOW_DEST;
1358 	if (dest)
1359 		memcpy(&rule->dest_attr, dest, sizeof(*dest));
1360 	else
1361 		rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
1362 
1363 	return rule;
1364 }
1365 
alloc_handle(int num_rules)1366 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1367 {
1368 	struct mlx5_flow_handle *handle;
1369 
1370 	handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1371 	if (!handle)
1372 		return NULL;
1373 
1374 	handle->num_rules = num_rules;
1375 
1376 	return handle;
1377 }
1378 
destroy_flow_handle(struct fs_fte * fte,struct mlx5_flow_handle * handle,struct mlx5_flow_destination * dest,int i)1379 static void destroy_flow_handle(struct fs_fte *fte,
1380 				struct mlx5_flow_handle *handle,
1381 				struct mlx5_flow_destination *dest,
1382 				int i)
1383 {
1384 	for (; --i >= 0;) {
1385 		if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1386 			fte->dests_size--;
1387 			list_del(&handle->rule[i]->node.list);
1388 			kfree(handle->rule[i]);
1389 		}
1390 	}
1391 	kfree(handle);
1392 }
1393 
1394 static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte * fte,struct mlx5_flow_destination * dest,int dest_num,int * modify_mask,bool * new_rule)1395 create_flow_handle(struct fs_fte *fte,
1396 		   struct mlx5_flow_destination *dest,
1397 		   int dest_num,
1398 		   int *modify_mask,
1399 		   bool *new_rule)
1400 {
1401 	struct mlx5_flow_handle *handle;
1402 	struct mlx5_flow_rule *rule = NULL;
1403 	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1404 	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1405 	int type;
1406 	int i = 0;
1407 
1408 	handle = alloc_handle((dest_num) ? dest_num : 1);
1409 	if (!handle)
1410 		return ERR_PTR(-ENOMEM);
1411 
1412 	do {
1413 		if (dest) {
1414 			rule = find_flow_rule(fte, dest + i);
1415 			if (rule) {
1416 				refcount_inc(&rule->node.refcount);
1417 				goto rule_found;
1418 			}
1419 		}
1420 
1421 		*new_rule = true;
1422 		rule = alloc_rule(dest + i);
1423 		if (!rule)
1424 			goto free_rules;
1425 
1426 		/* Add dest to dests list- we need flow tables to be in the
1427 		 * end of the list for forward to next prio rules.
1428 		 */
1429 		tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1430 		if (dest &&
1431 		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1432 			list_add(&rule->node.list, &fte->node.children);
1433 		else
1434 			list_add_tail(&rule->node.list, &fte->node.children);
1435 		if (dest) {
1436 			fte->dests_size++;
1437 
1438 			if (is_fwd_dest_type(dest[i].type))
1439 				fte->fwd_dests++;
1440 
1441 			type = dest[i].type ==
1442 				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1443 			*modify_mask |= type ? count : dst;
1444 		}
1445 rule_found:
1446 		handle->rule[i] = rule;
1447 	} while (++i < dest_num);
1448 
1449 	return handle;
1450 
1451 free_rules:
1452 	destroy_flow_handle(fte, handle, dest, i);
1453 	return ERR_PTR(-ENOMEM);
1454 }
1455 
1456 /* fte should not be deleted while calling this function */
1457 static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte * fte,struct mlx5_flow_group * fg,struct mlx5_flow_destination * dest,int dest_num,bool update_action)1458 add_rule_fte(struct fs_fte *fte,
1459 	     struct mlx5_flow_group *fg,
1460 	     struct mlx5_flow_destination *dest,
1461 	     int dest_num,
1462 	     bool update_action)
1463 {
1464 	struct mlx5_flow_root_namespace *root;
1465 	struct mlx5_flow_handle *handle;
1466 	struct mlx5_flow_table *ft;
1467 	int modify_mask = 0;
1468 	int err;
1469 	bool new_rule = false;
1470 
1471 	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1472 				    &new_rule);
1473 	if (IS_ERR(handle) || !new_rule)
1474 		goto out;
1475 
1476 	if (update_action)
1477 		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1478 
1479 	fs_get_obj(ft, fg->node.parent);
1480 	root = find_root(&fg->node);
1481 	if (!(fte->status & FS_FTE_STATUS_EXISTING))
1482 		err = root->cmds->create_fte(root, ft, fg, fte);
1483 	else
1484 		err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1485 	if (err)
1486 		goto free_handle;
1487 
1488 	fte->node.active = true;
1489 	fte->status |= FS_FTE_STATUS_EXISTING;
1490 	atomic_inc(&fg->node.version);
1491 
1492 out:
1493 	return handle;
1494 
1495 free_handle:
1496 	destroy_flow_handle(fte, handle, dest, handle->num_rules);
1497 	return ERR_PTR(err);
1498 }
1499 
alloc_auto_flow_group(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1500 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table  *ft,
1501 						     const struct mlx5_flow_spec *spec)
1502 {
1503 	struct list_head *prev = &ft->node.children;
1504 	u32 max_fte = ft->autogroup.max_fte;
1505 	unsigned int candidate_index = 0;
1506 	unsigned int group_size = 0;
1507 	struct mlx5_flow_group *fg;
1508 
1509 	if (!ft->autogroup.active)
1510 		return ERR_PTR(-ENOENT);
1511 
1512 	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1513 		group_size = ft->autogroup.group_size;
1514 
1515 	/*  max_fte == ft->autogroup.max_types */
1516 	if (group_size == 0)
1517 		group_size = 1;
1518 
1519 	/* sorted by start_index */
1520 	fs_for_each_fg(fg, ft) {
1521 		if (candidate_index + group_size > fg->start_index)
1522 			candidate_index = fg->start_index + fg->max_ftes;
1523 		else
1524 			break;
1525 		prev = &fg->node.list;
1526 	}
1527 
1528 	if (candidate_index + group_size > max_fte)
1529 		return ERR_PTR(-ENOSPC);
1530 
1531 	fg = alloc_insert_flow_group(ft,
1532 				     spec->match_criteria_enable,
1533 				     spec->match_criteria,
1534 				     candidate_index,
1535 				     candidate_index + group_size - 1,
1536 				     prev);
1537 	if (IS_ERR(fg))
1538 		goto out;
1539 
1540 	if (group_size == ft->autogroup.group_size)
1541 		ft->autogroup.num_groups++;
1542 
1543 out:
1544 	return fg;
1545 }
1546 
create_auto_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)1547 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1548 				  struct mlx5_flow_group *fg)
1549 {
1550 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1551 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1552 	void *match_criteria_addr;
1553 	u8 src_esw_owner_mask_on;
1554 	void *misc;
1555 	int err;
1556 	u32 *in;
1557 
1558 	in = kvzalloc(inlen, GFP_KERNEL);
1559 	if (!in)
1560 		return -ENOMEM;
1561 
1562 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1563 		 fg->mask.match_criteria_enable);
1564 	MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1565 	MLX5_SET(create_flow_group_in, in, end_flow_index,   fg->start_index +
1566 		 fg->max_ftes - 1);
1567 
1568 	misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1569 			    misc_parameters);
1570 	src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1571 					 source_eswitch_owner_vhca_id);
1572 	MLX5_SET(create_flow_group_in, in,
1573 		 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1574 
1575 	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1576 					   in, match_criteria);
1577 	memcpy(match_criteria_addr, fg->mask.match_criteria,
1578 	       sizeof(fg->mask.match_criteria));
1579 
1580 	err = root->cmds->create_flow_group(root, ft, in, fg);
1581 	if (!err) {
1582 		fg->node.active = true;
1583 		trace_mlx5_fs_add_fg(fg);
1584 	}
1585 
1586 	kvfree(in);
1587 	return err;
1588 }
1589 
mlx5_flow_dests_cmp(struct mlx5_flow_destination * d1,struct mlx5_flow_destination * d2)1590 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1591 				struct mlx5_flow_destination *d2)
1592 {
1593 	if (d1->type == d2->type) {
1594 		if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1595 		      d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1596 		     d1->vport.num == d2->vport.num &&
1597 		     d1->vport.flags == d2->vport.flags &&
1598 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1599 		      (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1600 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1601 		      (d1->vport.pkt_reformat->id ==
1602 		       d2->vport.pkt_reformat->id) : true)) ||
1603 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1604 		     d1->ft == d2->ft) ||
1605 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1606 		     d1->tir_num == d2->tir_num) ||
1607 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1608 		     d1->ft_num == d2->ft_num) ||
1609 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1610 		     d1->sampler_id == d2->sampler_id) ||
1611 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
1612 		     d1->range.field == d2->range.field &&
1613 		     d1->range.hit_ft == d2->range.hit_ft &&
1614 		     d1->range.miss_ft == d2->range.miss_ft &&
1615 		     d1->range.min == d2->range.min &&
1616 		     d1->range.max == d2->range.max))
1617 			return true;
1618 	}
1619 
1620 	return false;
1621 }
1622 
find_flow_rule(struct fs_fte * fte,struct mlx5_flow_destination * dest)1623 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1624 					     struct mlx5_flow_destination *dest)
1625 {
1626 	struct mlx5_flow_rule *rule;
1627 
1628 	list_for_each_entry(rule, &fte->node.children, node.list) {
1629 		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1630 			return rule;
1631 	}
1632 	return NULL;
1633 }
1634 
check_conflicting_actions_vlan(const struct mlx5_fs_vlan * vlan0,const struct mlx5_fs_vlan * vlan1)1635 static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1636 					   const struct mlx5_fs_vlan *vlan1)
1637 {
1638 	return vlan0->ethtype != vlan1->ethtype ||
1639 	       vlan0->vid != vlan1->vid ||
1640 	       vlan0->prio != vlan1->prio;
1641 }
1642 
check_conflicting_actions(const struct mlx5_flow_act * act1,const struct mlx5_flow_act * act2)1643 static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1644 				      const struct mlx5_flow_act *act2)
1645 {
1646 	u32 action1 = act1->action;
1647 	u32 action2 = act2->action;
1648 	u32 xored_actions;
1649 
1650 	xored_actions = action1 ^ action2;
1651 
1652 	/* if one rule only wants to count, it's ok */
1653 	if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1654 	    action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1655 		return false;
1656 
1657 	if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |
1658 			     MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1659 			     MLX5_FLOW_CONTEXT_ACTION_DECAP |
1660 			     MLX5_FLOW_CONTEXT_ACTION_MOD_HDR  |
1661 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1662 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1663 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1664 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1665 		return true;
1666 
1667 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1668 	    act1->pkt_reformat != act2->pkt_reformat)
1669 		return true;
1670 
1671 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1672 	    act1->modify_hdr != act2->modify_hdr)
1673 		return true;
1674 
1675 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1676 	    check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1677 		return true;
1678 
1679 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1680 	    check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1681 		return true;
1682 
1683 	return false;
1684 }
1685 
check_conflicting_ftes(struct fs_fte * fte,const struct mlx5_flow_context * flow_context,const struct mlx5_flow_act * flow_act)1686 static int check_conflicting_ftes(struct fs_fte *fte,
1687 				  const struct mlx5_flow_context *flow_context,
1688 				  const struct mlx5_flow_act *flow_act)
1689 {
1690 	if (check_conflicting_actions(flow_act, &fte->action)) {
1691 		mlx5_core_warn(get_dev(&fte->node),
1692 			       "Found two FTEs with conflicting actions\n");
1693 		return -EEXIST;
1694 	}
1695 
1696 	if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1697 	    fte->flow_context.flow_tag != flow_context->flow_tag) {
1698 		mlx5_core_warn(get_dev(&fte->node),
1699 			       "FTE flow tag %u already exists with different flow tag %u\n",
1700 			       fte->flow_context.flow_tag,
1701 			       flow_context->flow_tag);
1702 		return -EEXIST;
1703 	}
1704 
1705 	return 0;
1706 }
1707 
add_rule_fg(struct mlx5_flow_group * fg,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,struct fs_fte * fte)1708 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1709 					    const struct mlx5_flow_spec *spec,
1710 					    struct mlx5_flow_act *flow_act,
1711 					    struct mlx5_flow_destination *dest,
1712 					    int dest_num,
1713 					    struct fs_fte *fte)
1714 {
1715 	struct mlx5_flow_handle *handle;
1716 	int old_action;
1717 	int i;
1718 	int ret;
1719 
1720 	ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1721 	if (ret)
1722 		return ERR_PTR(ret);
1723 
1724 	old_action = fte->action.action;
1725 	fte->action.action |= flow_act->action;
1726 	handle = add_rule_fte(fte, fg, dest, dest_num,
1727 			      old_action != flow_act->action);
1728 	if (IS_ERR(handle)) {
1729 		fte->action.action = old_action;
1730 		return handle;
1731 	}
1732 	trace_mlx5_fs_set_fte(fte, false);
1733 
1734 	for (i = 0; i < handle->num_rules; i++) {
1735 		if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1736 			tree_add_node(&handle->rule[i]->node, &fte->node);
1737 			trace_mlx5_fs_add_rule(handle->rule[i]);
1738 		}
1739 	}
1740 	return handle;
1741 }
1742 
counter_is_valid(u32 action)1743 static bool counter_is_valid(u32 action)
1744 {
1745 	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1746 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1747 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1748 }
1749 
dest_is_valid(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_table * ft)1750 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1751 			  struct mlx5_flow_act *flow_act,
1752 			  struct mlx5_flow_table *ft)
1753 {
1754 	bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1755 	u32 action = flow_act->action;
1756 
1757 	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1758 		return counter_is_valid(action);
1759 
1760 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1761 		return true;
1762 
1763 	if (ignore_level) {
1764 		if (ft->type != FS_FT_FDB &&
1765 		    ft->type != FS_FT_NIC_RX)
1766 			return false;
1767 
1768 		if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1769 		    ft->type != dest->ft->type)
1770 			return false;
1771 	}
1772 
1773 	if (!dest || ((dest->type ==
1774 	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1775 	    (dest->ft->level <= ft->level && !ignore_level)))
1776 		return false;
1777 	return true;
1778 }
1779 
1780 struct match_list {
1781 	struct list_head	list;
1782 	struct mlx5_flow_group *g;
1783 };
1784 
free_match_list(struct match_list * head,bool ft_locked)1785 static void free_match_list(struct match_list *head, bool ft_locked)
1786 {
1787 	struct match_list *iter, *match_tmp;
1788 
1789 	list_for_each_entry_safe(iter, match_tmp, &head->list,
1790 				 list) {
1791 		tree_put_node(&iter->g->node, ft_locked);
1792 		list_del(&iter->list);
1793 		kfree(iter);
1794 	}
1795 }
1796 
build_match_list(struct match_list * match_head,struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_group * fg,bool ft_locked)1797 static int build_match_list(struct match_list *match_head,
1798 			    struct mlx5_flow_table *ft,
1799 			    const struct mlx5_flow_spec *spec,
1800 			    struct mlx5_flow_group *fg,
1801 			    bool ft_locked)
1802 {
1803 	struct rhlist_head *tmp, *list;
1804 	struct mlx5_flow_group *g;
1805 
1806 	rcu_read_lock();
1807 	INIT_LIST_HEAD(&match_head->list);
1808 	/* Collect all fgs which has a matching match_criteria */
1809 	list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1810 	/* RCU is atomic, we can't execute FW commands here */
1811 	rhl_for_each_entry_rcu(g, tmp, list, hash) {
1812 		struct match_list *curr_match;
1813 
1814 		if (fg && fg != g)
1815 			continue;
1816 
1817 		if (unlikely(!tree_get_node(&g->node)))
1818 			continue;
1819 
1820 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1821 		if (!curr_match) {
1822 			rcu_read_unlock();
1823 			free_match_list(match_head, ft_locked);
1824 			return -ENOMEM;
1825 		}
1826 		curr_match->g = g;
1827 		list_add_tail(&curr_match->list, &match_head->list);
1828 	}
1829 	rcu_read_unlock();
1830 	return 0;
1831 }
1832 
matched_fgs_get_version(struct list_head * match_head)1833 static u64 matched_fgs_get_version(struct list_head *match_head)
1834 {
1835 	struct match_list *iter;
1836 	u64 version = 0;
1837 
1838 	list_for_each_entry(iter, match_head, list)
1839 		version += (u64)atomic_read(&iter->g->node.version);
1840 	return version;
1841 }
1842 
1843 static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group * g,const u32 * match_value,bool take_write)1844 lookup_fte_locked(struct mlx5_flow_group *g,
1845 		  const u32 *match_value,
1846 		  bool take_write)
1847 {
1848 	struct fs_fte *fte_tmp;
1849 
1850 	if (take_write)
1851 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1852 	else
1853 		nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1854 	fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1855 					 rhash_fte);
1856 	if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1857 		fte_tmp = NULL;
1858 		goto out;
1859 	}
1860 	if (!fte_tmp->node.active) {
1861 		tree_put_node(&fte_tmp->node, false);
1862 		fte_tmp = NULL;
1863 		goto out;
1864 	}
1865 
1866 	nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1867 out:
1868 	if (take_write)
1869 		up_write_ref_node(&g->node, false);
1870 	else
1871 		up_read_ref_node(&g->node);
1872 	return fte_tmp;
1873 }
1874 
1875 static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table * ft,struct list_head * match_head,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,int ft_version)1876 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1877 		       struct list_head *match_head,
1878 		       const struct mlx5_flow_spec *spec,
1879 		       struct mlx5_flow_act *flow_act,
1880 		       struct mlx5_flow_destination *dest,
1881 		       int dest_num,
1882 		       int ft_version)
1883 {
1884 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1885 	struct mlx5_flow_group *g;
1886 	struct mlx5_flow_handle *rule;
1887 	struct match_list *iter;
1888 	bool take_write = false;
1889 	struct fs_fte *fte;
1890 	u64  version = 0;
1891 	int err;
1892 
1893 	fte = alloc_fte(ft, spec, flow_act);
1894 	if (IS_ERR(fte))
1895 		return  ERR_PTR(-ENOMEM);
1896 
1897 search_again_locked:
1898 	if (flow_act->flags & FLOW_ACT_NO_APPEND)
1899 		goto skip_search;
1900 	version = matched_fgs_get_version(match_head);
1901 	/* Try to find an fte with identical match value and attempt update its
1902 	 * action.
1903 	 */
1904 	list_for_each_entry(iter, match_head, list) {
1905 		struct fs_fte *fte_tmp;
1906 
1907 		g = iter->g;
1908 		fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1909 		if (!fte_tmp)
1910 			continue;
1911 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1912 		/* No error check needed here, because insert_fte() is not called */
1913 		up_write_ref_node(&fte_tmp->node, false);
1914 		tree_put_node(&fte_tmp->node, false);
1915 		kmem_cache_free(steering->ftes_cache, fte);
1916 		return rule;
1917 	}
1918 
1919 skip_search:
1920 	/* No group with matching fte found, or we skipped the search.
1921 	 * Try to add a new fte to any matching fg.
1922 	 */
1923 
1924 	/* Check the ft version, for case that new flow group
1925 	 * was added while the fgs weren't locked
1926 	 */
1927 	if (atomic_read(&ft->node.version) != ft_version) {
1928 		rule = ERR_PTR(-EAGAIN);
1929 		goto out;
1930 	}
1931 
1932 	/* Check the fgs version. If version have changed it could be that an
1933 	 * FTE with the same match value was added while the fgs weren't
1934 	 * locked.
1935 	 */
1936 	if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1937 	    version != matched_fgs_get_version(match_head)) {
1938 		take_write = true;
1939 		goto search_again_locked;
1940 	}
1941 
1942 	list_for_each_entry(iter, match_head, list) {
1943 		g = iter->g;
1944 
1945 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1946 
1947 		if (!g->node.active) {
1948 			up_write_ref_node(&g->node, false);
1949 			continue;
1950 		}
1951 
1952 		err = insert_fte(g, fte);
1953 		if (err) {
1954 			up_write_ref_node(&g->node, false);
1955 			if (err == -ENOSPC)
1956 				continue;
1957 			kmem_cache_free(steering->ftes_cache, fte);
1958 			return ERR_PTR(err);
1959 		}
1960 
1961 		nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1962 		up_write_ref_node(&g->node, false);
1963 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1964 		up_write_ref_node(&fte->node, false);
1965 		if (IS_ERR(rule))
1966 			tree_put_node(&fte->node, false);
1967 		return rule;
1968 	}
1969 	rule = ERR_PTR(-ENOENT);
1970 out:
1971 	kmem_cache_free(steering->ftes_cache, fte);
1972 	return rule;
1973 }
1974 
1975 static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num)1976 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1977 		     const struct mlx5_flow_spec *spec,
1978 		     struct mlx5_flow_act *flow_act,
1979 		     struct mlx5_flow_destination *dest,
1980 		     int dest_num)
1981 
1982 {
1983 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1984 	struct mlx5_flow_handle *rule;
1985 	struct match_list match_head;
1986 	struct mlx5_flow_group *g;
1987 	bool take_write = false;
1988 	struct fs_fte *fte;
1989 	int version;
1990 	int err;
1991 	int i;
1992 
1993 	if (!check_valid_spec(spec))
1994 		return ERR_PTR(-EINVAL);
1995 
1996 	if (flow_act->fg && ft->autogroup.active)
1997 		return ERR_PTR(-EINVAL);
1998 
1999 	if (dest && dest_num <= 0)
2000 		return ERR_PTR(-EINVAL);
2001 
2002 	for (i = 0; i < dest_num; i++) {
2003 		if (!dest_is_valid(&dest[i], flow_act, ft))
2004 			return ERR_PTR(-EINVAL);
2005 	}
2006 	nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2007 search_again_locked:
2008 	version = atomic_read(&ft->node.version);
2009 
2010 	/* Collect all fgs which has a matching match_criteria */
2011 	err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
2012 	if (err) {
2013 		if (take_write)
2014 			up_write_ref_node(&ft->node, false);
2015 		else
2016 			up_read_ref_node(&ft->node);
2017 		return ERR_PTR(err);
2018 	}
2019 
2020 	if (!take_write)
2021 		up_read_ref_node(&ft->node);
2022 
2023 	rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
2024 				      dest_num, version);
2025 	free_match_list(&match_head, take_write);
2026 	if (!IS_ERR(rule) ||
2027 	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
2028 		if (take_write)
2029 			up_write_ref_node(&ft->node, false);
2030 		return rule;
2031 	}
2032 
2033 	if (!take_write) {
2034 		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2035 		take_write = true;
2036 	}
2037 
2038 	if (PTR_ERR(rule) == -EAGAIN ||
2039 	    version != atomic_read(&ft->node.version))
2040 		goto search_again_locked;
2041 
2042 	g = alloc_auto_flow_group(ft, spec);
2043 	if (IS_ERR(g)) {
2044 		rule = ERR_CAST(g);
2045 		up_write_ref_node(&ft->node, false);
2046 		return rule;
2047 	}
2048 
2049 	fte = alloc_fte(ft, spec, flow_act);
2050 	if (IS_ERR(fte)) {
2051 		up_write_ref_node(&ft->node, false);
2052 		err = PTR_ERR(fte);
2053 		goto err_alloc_fte;
2054 	}
2055 
2056 	nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
2057 	up_write_ref_node(&ft->node, false);
2058 
2059 	err = create_auto_flow_group(ft, g);
2060 	if (err)
2061 		goto err_release_fg;
2062 
2063 	err = insert_fte(g, fte);
2064 	if (err)
2065 		goto err_release_fg;
2066 
2067 	nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
2068 	up_write_ref_node(&g->node, false);
2069 	rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2070 	up_write_ref_node(&fte->node, false);
2071 	if (IS_ERR(rule))
2072 		tree_put_node(&fte->node, false);
2073 	tree_put_node(&g->node, false);
2074 	return rule;
2075 
2076 err_release_fg:
2077 	up_write_ref_node(&g->node, false);
2078 	kmem_cache_free(steering->ftes_cache, fte);
2079 err_alloc_fte:
2080 	tree_put_node(&g->node, false);
2081 	return ERR_PTR(err);
2082 }
2083 
fwd_next_prio_supported(struct mlx5_flow_table * ft)2084 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
2085 {
2086 	return ((ft->type == FS_FT_NIC_RX) &&
2087 		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
2088 }
2089 
2090 struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)2091 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2092 		    const struct mlx5_flow_spec *spec,
2093 		    struct mlx5_flow_act *flow_act,
2094 		    struct mlx5_flow_destination *dest,
2095 		    int num_dest)
2096 {
2097 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2098 	static const struct mlx5_flow_spec zero_spec = {};
2099 	struct mlx5_flow_destination *gen_dest = NULL;
2100 	struct mlx5_flow_table *next_ft = NULL;
2101 	struct mlx5_flow_handle *handle = NULL;
2102 	u32 sw_action = flow_act->action;
2103 	int i;
2104 
2105 	if (!spec)
2106 		spec = &zero_spec;
2107 
2108 	if (!is_fwd_next_action(sw_action))
2109 		return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2110 
2111 	if (!fwd_next_prio_supported(ft))
2112 		return ERR_PTR(-EOPNOTSUPP);
2113 
2114 	mutex_lock(&root->chain_lock);
2115 	next_ft = find_next_fwd_ft(ft, flow_act);
2116 	if (!next_ft) {
2117 		handle = ERR_PTR(-EOPNOTSUPP);
2118 		goto unlock;
2119 	}
2120 
2121 	gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2122 			   GFP_KERNEL);
2123 	if (!gen_dest) {
2124 		handle = ERR_PTR(-ENOMEM);
2125 		goto unlock;
2126 	}
2127 	for (i = 0; i < num_dest; i++)
2128 		gen_dest[i] = dest[i];
2129 	gen_dest[i].type =
2130 		MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2131 	gen_dest[i].ft = next_ft;
2132 	dest = gen_dest;
2133 	num_dest++;
2134 	flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2135 			      MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2136 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2137 	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2138 	if (IS_ERR(handle))
2139 		goto unlock;
2140 
2141 	if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2142 		mutex_lock(&next_ft->lock);
2143 		list_add(&handle->rule[num_dest - 1]->next_ft,
2144 			 &next_ft->fwd_rules);
2145 		mutex_unlock(&next_ft->lock);
2146 		handle->rule[num_dest - 1]->sw_action = sw_action;
2147 		handle->rule[num_dest - 1]->ft = ft;
2148 	}
2149 unlock:
2150 	mutex_unlock(&root->chain_lock);
2151 	kfree(gen_dest);
2152 	return handle;
2153 }
2154 EXPORT_SYMBOL(mlx5_add_flow_rules);
2155 
mlx5_del_flow_rules(struct mlx5_flow_handle * handle)2156 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2157 {
2158 	struct fs_fte *fte;
2159 	int i;
2160 
2161 	/* In order to consolidate the HW changes we lock the FTE for other
2162 	 * changes, and increase its refcount, in order not to perform the
2163 	 * "del" functions of the FTE. Will handle them here.
2164 	 * The removal of the rules is done under locked FTE.
2165 	 * After removing all the handle's rules, if there are remaining
2166 	 * rules, it means we just need to modify the FTE in FW, and
2167 	 * unlock/decrease the refcount we increased before.
2168 	 * Otherwise, it means the FTE should be deleted. First delete the
2169 	 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2170 	 * the FTE, which will handle the last decrease of the refcount, as
2171 	 * well as required handling of its parent.
2172 	 */
2173 	fs_get_obj(fte, handle->rule[0]->node.parent);
2174 	down_write_ref_node(&fte->node, false);
2175 	for (i = handle->num_rules - 1; i >= 0; i--)
2176 		tree_remove_node(&handle->rule[i]->node, true);
2177 	if (list_empty(&fte->node.children)) {
2178 		fte->node.del_hw_func(&fte->node);
2179 		/* Avoid double call to del_hw_fte */
2180 		fte->node.del_hw_func = NULL;
2181 		up_write_ref_node(&fte->node, false);
2182 		tree_put_node(&fte->node, false);
2183 	} else if (fte->dests_size) {
2184 		if (fte->modify_mask)
2185 			modify_fte(fte);
2186 		up_write_ref_node(&fte->node, false);
2187 	} else {
2188 		up_write_ref_node(&fte->node, false);
2189 	}
2190 	kfree(handle);
2191 }
2192 EXPORT_SYMBOL(mlx5_del_flow_rules);
2193 
2194 /* Assuming prio->node.children(flow tables) is sorted by level */
find_next_ft(struct mlx5_flow_table * ft)2195 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2196 {
2197 	struct fs_prio *prio;
2198 
2199 	fs_get_obj(prio, ft->node.parent);
2200 
2201 	if (!list_is_last(&ft->node.list, &prio->node.children))
2202 		return list_next_entry(ft, node.list);
2203 	return find_next_chained_ft(prio);
2204 }
2205 
update_root_ft_destroy(struct mlx5_flow_table * ft)2206 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2207 {
2208 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2209 	struct mlx5_ft_underlay_qp *uqp;
2210 	struct mlx5_flow_table *new_root_ft = NULL;
2211 	int err = 0;
2212 	u32 qpn;
2213 
2214 	if (root->root_ft != ft)
2215 		return 0;
2216 
2217 	new_root_ft = find_next_ft(ft);
2218 	if (!new_root_ft) {
2219 		root->root_ft = NULL;
2220 		return 0;
2221 	}
2222 
2223 	if (list_empty(&root->underlay_qpns)) {
2224 		/* Don't set any QPN (zero) in case QPN list is empty */
2225 		qpn = 0;
2226 		err = root->cmds->update_root_ft(root, new_root_ft,
2227 						 qpn, false);
2228 	} else {
2229 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
2230 			qpn = uqp->qpn;
2231 			err = root->cmds->update_root_ft(root,
2232 							 new_root_ft, qpn,
2233 							 false);
2234 			if (err)
2235 				break;
2236 		}
2237 	}
2238 
2239 	if (err)
2240 		mlx5_core_warn(root->dev,
2241 			       "Update root flow table of id(%u) qpn(%d) failed\n",
2242 			       ft->id, qpn);
2243 	else
2244 		root->root_ft = new_root_ft;
2245 
2246 	return 0;
2247 }
2248 
2249 /* Connect flow table from previous priority to
2250  * the next flow table.
2251  */
disconnect_flow_table(struct mlx5_flow_table * ft)2252 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2253 {
2254 	struct mlx5_core_dev *dev = get_dev(&ft->node);
2255 	struct mlx5_flow_table *next_ft;
2256 	struct fs_prio *prio;
2257 	int err = 0;
2258 
2259 	err = update_root_ft_destroy(ft);
2260 	if (err)
2261 		return err;
2262 
2263 	fs_get_obj(prio, ft->node.parent);
2264 	if  (!(list_first_entry(&prio->node.children,
2265 				struct mlx5_flow_table,
2266 				node.list) == ft))
2267 		return 0;
2268 
2269 	next_ft = find_next_ft(ft);
2270 	err = connect_fwd_rules(dev, next_ft, ft);
2271 	if (err)
2272 		return err;
2273 
2274 	err = connect_prev_fts(dev, next_ft, prio);
2275 	if (err)
2276 		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2277 			       ft->id);
2278 	return err;
2279 }
2280 
mlx5_destroy_flow_table(struct mlx5_flow_table * ft)2281 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2282 {
2283 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2284 	int err = 0;
2285 
2286 	mutex_lock(&root->chain_lock);
2287 	if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2288 		err = disconnect_flow_table(ft);
2289 	if (err) {
2290 		mutex_unlock(&root->chain_lock);
2291 		return err;
2292 	}
2293 	if (tree_remove_node(&ft->node, false))
2294 		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2295 			       ft->id);
2296 	mutex_unlock(&root->chain_lock);
2297 
2298 	return err;
2299 }
2300 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2301 
mlx5_destroy_flow_group(struct mlx5_flow_group * fg)2302 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2303 {
2304 	if (tree_remove_node(&fg->node, false))
2305 		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2306 			       fg->id);
2307 }
2308 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2309 
mlx5_get_fdb_sub_ns(struct mlx5_core_dev * dev,int n)2310 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2311 						int n)
2312 {
2313 	struct mlx5_flow_steering *steering = dev->priv.steering;
2314 
2315 	if (!steering || !steering->fdb_sub_ns)
2316 		return NULL;
2317 
2318 	return steering->fdb_sub_ns[n];
2319 }
2320 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2321 
is_nic_rx_ns(enum mlx5_flow_namespace_type type)2322 static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2323 {
2324 	switch (type) {
2325 	case MLX5_FLOW_NAMESPACE_BYPASS:
2326 	case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
2327 	case MLX5_FLOW_NAMESPACE_LAG:
2328 	case MLX5_FLOW_NAMESPACE_OFFLOADS:
2329 	case MLX5_FLOW_NAMESPACE_ETHTOOL:
2330 	case MLX5_FLOW_NAMESPACE_KERNEL:
2331 	case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2332 	case MLX5_FLOW_NAMESPACE_ANCHOR:
2333 		return true;
2334 	default:
2335 		return false;
2336 	}
2337 }
2338 
mlx5_get_flow_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)2339 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2340 						    enum mlx5_flow_namespace_type type)
2341 {
2342 	struct mlx5_flow_steering *steering = dev->priv.steering;
2343 	struct mlx5_flow_root_namespace *root_ns;
2344 	int prio = 0;
2345 	struct fs_prio *fs_prio;
2346 	struct mlx5_flow_namespace *ns;
2347 
2348 	if (!steering)
2349 		return NULL;
2350 
2351 	switch (type) {
2352 	case MLX5_FLOW_NAMESPACE_FDB:
2353 		if (steering->fdb_root_ns)
2354 			return &steering->fdb_root_ns->ns;
2355 		return NULL;
2356 	case MLX5_FLOW_NAMESPACE_PORT_SEL:
2357 		if (steering->port_sel_root_ns)
2358 			return &steering->port_sel_root_ns->ns;
2359 		return NULL;
2360 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2361 		if (steering->sniffer_rx_root_ns)
2362 			return &steering->sniffer_rx_root_ns->ns;
2363 		return NULL;
2364 	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2365 		if (steering->sniffer_tx_root_ns)
2366 			return &steering->sniffer_tx_root_ns->ns;
2367 		return NULL;
2368 	case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2369 		root_ns = steering->fdb_root_ns;
2370 		prio =  FDB_BYPASS_PATH;
2371 		break;
2372 	case MLX5_FLOW_NAMESPACE_EGRESS:
2373 	case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
2374 	case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
2375 		root_ns = steering->egress_root_ns;
2376 		prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2377 		break;
2378 	case MLX5_FLOW_NAMESPACE_RDMA_RX:
2379 		root_ns = steering->rdma_rx_root_ns;
2380 		prio = RDMA_RX_BYPASS_PRIO;
2381 		break;
2382 	case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2383 		root_ns = steering->rdma_rx_root_ns;
2384 		prio = RDMA_RX_KERNEL_PRIO;
2385 		break;
2386 	case MLX5_FLOW_NAMESPACE_RDMA_TX:
2387 		root_ns = steering->rdma_tx_root_ns;
2388 		break;
2389 	case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2390 		root_ns = steering->rdma_rx_root_ns;
2391 		prio = RDMA_RX_COUNTERS_PRIO;
2392 		break;
2393 	case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2394 		root_ns = steering->rdma_tx_root_ns;
2395 		prio = RDMA_TX_COUNTERS_PRIO;
2396 		break;
2397 	case MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC:
2398 		root_ns = steering->rdma_rx_root_ns;
2399 		prio = RDMA_RX_IPSEC_PRIO;
2400 		break;
2401 	case MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC:
2402 		root_ns = steering->rdma_tx_root_ns;
2403 		prio = RDMA_TX_IPSEC_PRIO;
2404 		break;
2405 	default: /* Must be NIC RX */
2406 		WARN_ON(!is_nic_rx_ns(type));
2407 		root_ns = steering->root_ns;
2408 		prio = type;
2409 		break;
2410 	}
2411 
2412 	if (!root_ns)
2413 		return NULL;
2414 
2415 	fs_prio = find_prio(&root_ns->ns, prio);
2416 	if (!fs_prio)
2417 		return NULL;
2418 
2419 	ns = list_first_entry(&fs_prio->node.children,
2420 			      typeof(*ns),
2421 			      node.list);
2422 
2423 	return ns;
2424 }
2425 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2426 
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type,int vport)2427 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2428 							      enum mlx5_flow_namespace_type type,
2429 							      int vport)
2430 {
2431 	struct mlx5_flow_steering *steering = dev->priv.steering;
2432 
2433 	if (!steering)
2434 		return NULL;
2435 
2436 	switch (type) {
2437 	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2438 		if (vport >= steering->esw_egress_acl_vports)
2439 			return NULL;
2440 		if (steering->esw_egress_root_ns &&
2441 		    steering->esw_egress_root_ns[vport])
2442 			return &steering->esw_egress_root_ns[vport]->ns;
2443 		else
2444 			return NULL;
2445 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2446 		if (vport >= steering->esw_ingress_acl_vports)
2447 			return NULL;
2448 		if (steering->esw_ingress_root_ns &&
2449 		    steering->esw_ingress_root_ns[vport])
2450 			return &steering->esw_ingress_root_ns[vport]->ns;
2451 		else
2452 			return NULL;
2453 	default:
2454 		return NULL;
2455 	}
2456 }
2457 
_fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels,enum fs_node_type type)2458 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2459 				       unsigned int prio,
2460 				       int num_levels,
2461 				       enum fs_node_type type)
2462 {
2463 	struct fs_prio *fs_prio;
2464 
2465 	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2466 	if (!fs_prio)
2467 		return ERR_PTR(-ENOMEM);
2468 
2469 	fs_prio->node.type = type;
2470 	tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2471 	tree_add_node(&fs_prio->node, &ns->node);
2472 	fs_prio->num_levels = num_levels;
2473 	fs_prio->prio = prio;
2474 	list_add_tail(&fs_prio->node.list, &ns->node.children);
2475 
2476 	return fs_prio;
2477 }
2478 
fs_create_prio_chained(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2479 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2480 					      unsigned int prio,
2481 					      int num_levels)
2482 {
2483 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2484 }
2485 
fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2486 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2487 				      unsigned int prio, int num_levels)
2488 {
2489 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2490 }
2491 
fs_init_namespace(struct mlx5_flow_namespace * ns)2492 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2493 						     *ns)
2494 {
2495 	ns->node.type = FS_TYPE_NAMESPACE;
2496 
2497 	return ns;
2498 }
2499 
fs_create_namespace(struct fs_prio * prio,int def_miss_act)2500 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2501 						       int def_miss_act)
2502 {
2503 	struct mlx5_flow_namespace	*ns;
2504 
2505 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2506 	if (!ns)
2507 		return ERR_PTR(-ENOMEM);
2508 
2509 	fs_init_namespace(ns);
2510 	ns->def_miss_action = def_miss_act;
2511 	tree_init_node(&ns->node, NULL, del_sw_ns);
2512 	tree_add_node(&ns->node, &prio->node);
2513 	list_add_tail(&ns->node.list, &prio->node.children);
2514 
2515 	return ns;
2516 }
2517 
create_leaf_prios(struct mlx5_flow_namespace * ns,int prio,struct init_tree_node * prio_metadata)2518 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2519 			     struct init_tree_node *prio_metadata)
2520 {
2521 	struct fs_prio *fs_prio;
2522 	int i;
2523 
2524 	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2525 		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2526 		if (IS_ERR(fs_prio))
2527 			return PTR_ERR(fs_prio);
2528 	}
2529 	return 0;
2530 }
2531 
2532 #define FLOW_TABLE_BIT_SZ 1
2533 #define GET_FLOW_TABLE_CAP(dev, offset) \
2534 	((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) +	\
2535 			offset / 32)) >>					\
2536 	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
has_required_caps(struct mlx5_core_dev * dev,struct node_caps * caps)2537 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2538 {
2539 	int i;
2540 
2541 	for (i = 0; i < caps->arr_sz; i++) {
2542 		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2543 			return false;
2544 	}
2545 	return true;
2546 }
2547 
init_root_tree_recursive(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node,struct init_tree_node * init_parent_node,int prio)2548 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2549 				    struct init_tree_node *init_node,
2550 				    struct fs_node *fs_parent_node,
2551 				    struct init_tree_node *init_parent_node,
2552 				    int prio)
2553 {
2554 	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2555 					      flow_table_properties_nic_receive.
2556 					      max_ft_level);
2557 	struct mlx5_flow_namespace *fs_ns;
2558 	struct fs_prio *fs_prio;
2559 	struct fs_node *base;
2560 	int i;
2561 	int err;
2562 
2563 	if (init_node->type == FS_TYPE_PRIO) {
2564 		if ((init_node->min_ft_level > max_ft_level) ||
2565 		    !has_required_caps(steering->dev, &init_node->caps))
2566 			return 0;
2567 
2568 		fs_get_obj(fs_ns, fs_parent_node);
2569 		if (init_node->num_leaf_prios)
2570 			return create_leaf_prios(fs_ns, prio, init_node);
2571 		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2572 		if (IS_ERR(fs_prio))
2573 			return PTR_ERR(fs_prio);
2574 		base = &fs_prio->node;
2575 	} else if (init_node->type == FS_TYPE_NAMESPACE) {
2576 		fs_get_obj(fs_prio, fs_parent_node);
2577 		fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2578 		if (IS_ERR(fs_ns))
2579 			return PTR_ERR(fs_ns);
2580 		base = &fs_ns->node;
2581 	} else {
2582 		return -EINVAL;
2583 	}
2584 	prio = 0;
2585 	for (i = 0; i < init_node->ar_size; i++) {
2586 		err = init_root_tree_recursive(steering, &init_node->children[i],
2587 					       base, init_node, prio);
2588 		if (err)
2589 			return err;
2590 		if (init_node->children[i].type == FS_TYPE_PRIO &&
2591 		    init_node->children[i].num_leaf_prios) {
2592 			prio += init_node->children[i].num_leaf_prios;
2593 		}
2594 	}
2595 
2596 	return 0;
2597 }
2598 
init_root_tree(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node)2599 static int init_root_tree(struct mlx5_flow_steering *steering,
2600 			  struct init_tree_node *init_node,
2601 			  struct fs_node *fs_parent_node)
2602 {
2603 	int err;
2604 	int i;
2605 
2606 	for (i = 0; i < init_node->ar_size; i++) {
2607 		err = init_root_tree_recursive(steering, &init_node->children[i],
2608 					       fs_parent_node,
2609 					       init_node, i);
2610 		if (err)
2611 			return err;
2612 	}
2613 	return 0;
2614 }
2615 
del_sw_root_ns(struct fs_node * node)2616 static void del_sw_root_ns(struct fs_node *node)
2617 {
2618 	struct mlx5_flow_root_namespace *root_ns;
2619 	struct mlx5_flow_namespace *ns;
2620 
2621 	fs_get_obj(ns, node);
2622 	root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2623 	mutex_destroy(&root_ns->chain_lock);
2624 	kfree(node);
2625 }
2626 
2627 static struct mlx5_flow_root_namespace
create_root_ns(struct mlx5_flow_steering * steering,enum fs_flow_table_type table_type)2628 *create_root_ns(struct mlx5_flow_steering *steering,
2629 		enum fs_flow_table_type table_type)
2630 {
2631 	const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2632 	struct mlx5_flow_root_namespace *root_ns;
2633 	struct mlx5_flow_namespace *ns;
2634 
2635 	/* Create the root namespace */
2636 	root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2637 	if (!root_ns)
2638 		return NULL;
2639 
2640 	root_ns->dev = steering->dev;
2641 	root_ns->table_type = table_type;
2642 	root_ns->cmds = cmds;
2643 
2644 	INIT_LIST_HEAD(&root_ns->underlay_qpns);
2645 
2646 	ns = &root_ns->ns;
2647 	fs_init_namespace(ns);
2648 	mutex_init(&root_ns->chain_lock);
2649 	tree_init_node(&ns->node, NULL, del_sw_root_ns);
2650 	tree_add_node(&ns->node, NULL);
2651 
2652 	return root_ns;
2653 }
2654 
2655 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2656 
set_prio_attrs_in_ns(struct mlx5_flow_namespace * ns,int acc_level)2657 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2658 {
2659 	struct fs_prio *prio;
2660 
2661 	fs_for_each_prio(prio, ns) {
2662 		 /* This updates prio start_level and num_levels */
2663 		set_prio_attrs_in_prio(prio, acc_level);
2664 		acc_level += prio->num_levels;
2665 	}
2666 	return acc_level;
2667 }
2668 
set_prio_attrs_in_prio(struct fs_prio * prio,int acc_level)2669 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2670 {
2671 	struct mlx5_flow_namespace *ns;
2672 	int acc_level_ns = acc_level;
2673 
2674 	prio->start_level = acc_level;
2675 	fs_for_each_ns(ns, prio) {
2676 		/* This updates start_level and num_levels of ns's priority descendants */
2677 		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2678 
2679 		/* If this a prio with chains, and we can jump from one chain
2680 		 * (namespace) to another, so we accumulate the levels
2681 		 */
2682 		if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2683 			acc_level = acc_level_ns;
2684 	}
2685 
2686 	if (!prio->num_levels)
2687 		prio->num_levels = acc_level_ns - prio->start_level;
2688 	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2689 }
2690 
set_prio_attrs(struct mlx5_flow_root_namespace * root_ns)2691 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2692 {
2693 	struct mlx5_flow_namespace *ns = &root_ns->ns;
2694 	struct fs_prio *prio;
2695 	int start_level = 0;
2696 
2697 	fs_for_each_prio(prio, ns) {
2698 		set_prio_attrs_in_prio(prio, start_level);
2699 		start_level += prio->num_levels;
2700 	}
2701 }
2702 
2703 #define ANCHOR_PRIO 0
2704 #define ANCHOR_SIZE 1
2705 #define ANCHOR_LEVEL 0
create_anchor_flow_table(struct mlx5_flow_steering * steering)2706 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2707 {
2708 	struct mlx5_flow_namespace *ns = NULL;
2709 	struct mlx5_flow_table_attr ft_attr = {};
2710 	struct mlx5_flow_table *ft;
2711 
2712 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2713 	if (WARN_ON(!ns))
2714 		return -EINVAL;
2715 
2716 	ft_attr.max_fte = ANCHOR_SIZE;
2717 	ft_attr.level   = ANCHOR_LEVEL;
2718 	ft_attr.prio    = ANCHOR_PRIO;
2719 
2720 	ft = mlx5_create_flow_table(ns, &ft_attr);
2721 	if (IS_ERR(ft)) {
2722 		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2723 		return PTR_ERR(ft);
2724 	}
2725 	return 0;
2726 }
2727 
init_root_ns(struct mlx5_flow_steering * steering)2728 static int init_root_ns(struct mlx5_flow_steering *steering)
2729 {
2730 	int err;
2731 
2732 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2733 	if (!steering->root_ns)
2734 		return -ENOMEM;
2735 
2736 	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2737 	if (err)
2738 		goto out_err;
2739 
2740 	set_prio_attrs(steering->root_ns);
2741 	err = create_anchor_flow_table(steering);
2742 	if (err)
2743 		goto out_err;
2744 
2745 	return 0;
2746 
2747 out_err:
2748 	cleanup_root_ns(steering->root_ns);
2749 	steering->root_ns = NULL;
2750 	return err;
2751 }
2752 
clean_tree(struct fs_node * node)2753 static void clean_tree(struct fs_node *node)
2754 {
2755 	if (node) {
2756 		struct fs_node *iter;
2757 		struct fs_node *temp;
2758 
2759 		tree_get_node(node);
2760 		list_for_each_entry_safe(iter, temp, &node->children, list)
2761 			clean_tree(iter);
2762 		tree_put_node(node, false);
2763 		tree_remove_node(node, false);
2764 	}
2765 }
2766 
cleanup_root_ns(struct mlx5_flow_root_namespace * root_ns)2767 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2768 {
2769 	if (!root_ns)
2770 		return;
2771 
2772 	clean_tree(&root_ns->ns.node);
2773 }
2774 
init_sniffer_tx_root_ns(struct mlx5_flow_steering * steering)2775 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2776 {
2777 	struct fs_prio *prio;
2778 
2779 	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2780 	if (!steering->sniffer_tx_root_ns)
2781 		return -ENOMEM;
2782 
2783 	/* Create single prio */
2784 	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2785 	return PTR_ERR_OR_ZERO(prio);
2786 }
2787 
init_sniffer_rx_root_ns(struct mlx5_flow_steering * steering)2788 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2789 {
2790 	struct fs_prio *prio;
2791 
2792 	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2793 	if (!steering->sniffer_rx_root_ns)
2794 		return -ENOMEM;
2795 
2796 	/* Create single prio */
2797 	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2798 	return PTR_ERR_OR_ZERO(prio);
2799 }
2800 
2801 #define PORT_SEL_NUM_LEVELS 3
init_port_sel_root_ns(struct mlx5_flow_steering * steering)2802 static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2803 {
2804 	struct fs_prio *prio;
2805 
2806 	steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2807 	if (!steering->port_sel_root_ns)
2808 		return -ENOMEM;
2809 
2810 	/* Create single prio */
2811 	prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2812 			      PORT_SEL_NUM_LEVELS);
2813 	return PTR_ERR_OR_ZERO(prio);
2814 }
2815 
init_rdma_rx_root_ns(struct mlx5_flow_steering * steering)2816 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2817 {
2818 	int err;
2819 
2820 	steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2821 	if (!steering->rdma_rx_root_ns)
2822 		return -ENOMEM;
2823 
2824 	err = init_root_tree(steering, &rdma_rx_root_fs,
2825 			     &steering->rdma_rx_root_ns->ns.node);
2826 	if (err)
2827 		goto out_err;
2828 
2829 	set_prio_attrs(steering->rdma_rx_root_ns);
2830 
2831 	return 0;
2832 
2833 out_err:
2834 	cleanup_root_ns(steering->rdma_rx_root_ns);
2835 	steering->rdma_rx_root_ns = NULL;
2836 	return err;
2837 }
2838 
init_rdma_tx_root_ns(struct mlx5_flow_steering * steering)2839 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2840 {
2841 	int err;
2842 
2843 	steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2844 	if (!steering->rdma_tx_root_ns)
2845 		return -ENOMEM;
2846 
2847 	err = init_root_tree(steering, &rdma_tx_root_fs,
2848 			     &steering->rdma_tx_root_ns->ns.node);
2849 	if (err)
2850 		goto out_err;
2851 
2852 	set_prio_attrs(steering->rdma_tx_root_ns);
2853 
2854 	return 0;
2855 
2856 out_err:
2857 	cleanup_root_ns(steering->rdma_tx_root_ns);
2858 	steering->rdma_tx_root_ns = NULL;
2859 	return err;
2860 }
2861 
2862 /* FT and tc chains are stored in the same array so we can re-use the
2863  * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2864  * When creating a new ns for each chain store it in the first available slot.
2865  * Assume tc chains are created and stored first and only then the FT chain.
2866  */
store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct mlx5_flow_namespace * ns)2867 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2868 					struct mlx5_flow_namespace *ns)
2869 {
2870 	int chain = 0;
2871 
2872 	while (steering->fdb_sub_ns[chain])
2873 		++chain;
2874 
2875 	steering->fdb_sub_ns[chain] = ns;
2876 }
2877 
create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct fs_prio * maj_prio)2878 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2879 					struct fs_prio *maj_prio)
2880 {
2881 	struct mlx5_flow_namespace *ns;
2882 	struct fs_prio *min_prio;
2883 	int prio;
2884 
2885 	ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2886 	if (IS_ERR(ns))
2887 		return PTR_ERR(ns);
2888 
2889 	for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2890 		min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2891 		if (IS_ERR(min_prio))
2892 			return PTR_ERR(min_prio);
2893 	}
2894 
2895 	store_fdb_sub_ns_prio_chain(steering, ns);
2896 
2897 	return 0;
2898 }
2899 
create_fdb_chains(struct mlx5_flow_steering * steering,int fs_prio,int chains)2900 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2901 			     int fs_prio,
2902 			     int chains)
2903 {
2904 	struct fs_prio *maj_prio;
2905 	int levels;
2906 	int chain;
2907 	int err;
2908 
2909 	levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2910 	maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2911 					  fs_prio,
2912 					  levels);
2913 	if (IS_ERR(maj_prio))
2914 		return PTR_ERR(maj_prio);
2915 
2916 	for (chain = 0; chain < chains; chain++) {
2917 		err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2918 		if (err)
2919 			return err;
2920 	}
2921 
2922 	return 0;
2923 }
2924 
create_fdb_fast_path(struct mlx5_flow_steering * steering)2925 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2926 {
2927 	int err;
2928 
2929 	steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2930 				       sizeof(*steering->fdb_sub_ns),
2931 				       GFP_KERNEL);
2932 	if (!steering->fdb_sub_ns)
2933 		return -ENOMEM;
2934 
2935 	err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2936 	if (err)
2937 		return err;
2938 
2939 	err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2940 	if (err)
2941 		return err;
2942 
2943 	return 0;
2944 }
2945 
create_fdb_bypass(struct mlx5_flow_steering * steering)2946 static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2947 {
2948 	struct mlx5_flow_namespace *ns;
2949 	struct fs_prio *prio;
2950 	int i;
2951 
2952 	prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2953 	if (IS_ERR(prio))
2954 		return PTR_ERR(prio);
2955 
2956 	ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2957 	if (IS_ERR(ns))
2958 		return PTR_ERR(ns);
2959 
2960 	for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2961 		prio = fs_create_prio(ns, i, 1);
2962 		if (IS_ERR(prio))
2963 			return PTR_ERR(prio);
2964 	}
2965 	return 0;
2966 }
2967 
cleanup_fdb_root_ns(struct mlx5_flow_steering * steering)2968 static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
2969 {
2970 	cleanup_root_ns(steering->fdb_root_ns);
2971 	steering->fdb_root_ns = NULL;
2972 	kfree(steering->fdb_sub_ns);
2973 	steering->fdb_sub_ns = NULL;
2974 }
2975 
init_fdb_root_ns(struct mlx5_flow_steering * steering)2976 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2977 {
2978 	struct fs_prio *maj_prio;
2979 	int err;
2980 
2981 	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2982 	if (!steering->fdb_root_ns)
2983 		return -ENOMEM;
2984 
2985 	err = create_fdb_bypass(steering);
2986 	if (err)
2987 		goto out_err;
2988 
2989 	err = create_fdb_fast_path(steering);
2990 	if (err)
2991 		goto out_err;
2992 
2993 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2994 	if (IS_ERR(maj_prio)) {
2995 		err = PTR_ERR(maj_prio);
2996 		goto out_err;
2997 	}
2998 
2999 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
3000 	if (IS_ERR(maj_prio)) {
3001 		err = PTR_ERR(maj_prio);
3002 		goto out_err;
3003 	}
3004 
3005 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
3006 	if (IS_ERR(maj_prio)) {
3007 		err = PTR_ERR(maj_prio);
3008 		goto out_err;
3009 	}
3010 
3011 	/* We put this priority last, knowing that nothing will get here
3012 	 * unless explicitly forwarded to. This is possible because the
3013 	 * slow path tables have catch all rules and nothing gets passed
3014 	 * those tables.
3015 	 */
3016 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
3017 	if (IS_ERR(maj_prio)) {
3018 		err = PTR_ERR(maj_prio);
3019 		goto out_err;
3020 	}
3021 
3022 	set_prio_attrs(steering->fdb_root_ns);
3023 	return 0;
3024 
3025 out_err:
3026 	cleanup_fdb_root_ns(steering);
3027 	return err;
3028 }
3029 
init_egress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)3030 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
3031 {
3032 	struct fs_prio *prio;
3033 
3034 	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
3035 	if (!steering->esw_egress_root_ns[vport])
3036 		return -ENOMEM;
3037 
3038 	/* create 1 prio*/
3039 	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
3040 	return PTR_ERR_OR_ZERO(prio);
3041 }
3042 
init_ingress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)3043 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
3044 {
3045 	struct fs_prio *prio;
3046 
3047 	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
3048 	if (!steering->esw_ingress_root_ns[vport])
3049 		return -ENOMEM;
3050 
3051 	/* create 1 prio*/
3052 	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
3053 	return PTR_ERR_OR_ZERO(prio);
3054 }
3055 
mlx5_fs_egress_acls_init(struct mlx5_core_dev * dev,int total_vports)3056 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3057 {
3058 	struct mlx5_flow_steering *steering = dev->priv.steering;
3059 	int err;
3060 	int i;
3061 
3062 	steering->esw_egress_root_ns =
3063 			kcalloc(total_vports,
3064 				sizeof(*steering->esw_egress_root_ns),
3065 				GFP_KERNEL);
3066 	if (!steering->esw_egress_root_ns)
3067 		return -ENOMEM;
3068 
3069 	for (i = 0; i < total_vports; i++) {
3070 		err = init_egress_acl_root_ns(steering, i);
3071 		if (err)
3072 			goto cleanup_root_ns;
3073 	}
3074 	steering->esw_egress_acl_vports = total_vports;
3075 	return 0;
3076 
3077 cleanup_root_ns:
3078 	for (i--; i >= 0; i--)
3079 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
3080 	kfree(steering->esw_egress_root_ns);
3081 	steering->esw_egress_root_ns = NULL;
3082 	return err;
3083 }
3084 
mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev * dev)3085 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
3086 {
3087 	struct mlx5_flow_steering *steering = dev->priv.steering;
3088 	int i;
3089 
3090 	if (!steering->esw_egress_root_ns)
3091 		return;
3092 
3093 	for (i = 0; i < steering->esw_egress_acl_vports; i++)
3094 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
3095 
3096 	kfree(steering->esw_egress_root_ns);
3097 	steering->esw_egress_root_ns = NULL;
3098 }
3099 
mlx5_fs_ingress_acls_init(struct mlx5_core_dev * dev,int total_vports)3100 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3101 {
3102 	struct mlx5_flow_steering *steering = dev->priv.steering;
3103 	int err;
3104 	int i;
3105 
3106 	steering->esw_ingress_root_ns =
3107 			kcalloc(total_vports,
3108 				sizeof(*steering->esw_ingress_root_ns),
3109 				GFP_KERNEL);
3110 	if (!steering->esw_ingress_root_ns)
3111 		return -ENOMEM;
3112 
3113 	for (i = 0; i < total_vports; i++) {
3114 		err = init_ingress_acl_root_ns(steering, i);
3115 		if (err)
3116 			goto cleanup_root_ns;
3117 	}
3118 	steering->esw_ingress_acl_vports = total_vports;
3119 	return 0;
3120 
3121 cleanup_root_ns:
3122 	for (i--; i >= 0; i--)
3123 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3124 	kfree(steering->esw_ingress_root_ns);
3125 	steering->esw_ingress_root_ns = NULL;
3126 	return err;
3127 }
3128 
mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev * dev)3129 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3130 {
3131 	struct mlx5_flow_steering *steering = dev->priv.steering;
3132 	int i;
3133 
3134 	if (!steering->esw_ingress_root_ns)
3135 		return;
3136 
3137 	for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3138 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3139 
3140 	kfree(steering->esw_ingress_root_ns);
3141 	steering->esw_ingress_root_ns = NULL;
3142 }
3143 
mlx5_fs_get_capabilities(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)3144 u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3145 {
3146 	struct mlx5_flow_root_namespace *root;
3147 	struct mlx5_flow_namespace *ns;
3148 
3149 	ns = mlx5_get_flow_namespace(dev, type);
3150 	if (!ns)
3151 		return 0;
3152 
3153 	root = find_root(&ns->node);
3154 	if (!root)
3155 		return 0;
3156 
3157 	return root->cmds->get_capabilities(root, root->table_type);
3158 }
3159 
init_egress_root_ns(struct mlx5_flow_steering * steering)3160 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3161 {
3162 	int err;
3163 
3164 	steering->egress_root_ns = create_root_ns(steering,
3165 						  FS_FT_NIC_TX);
3166 	if (!steering->egress_root_ns)
3167 		return -ENOMEM;
3168 
3169 	err = init_root_tree(steering, &egress_root_fs,
3170 			     &steering->egress_root_ns->ns.node);
3171 	if (err)
3172 		goto cleanup;
3173 	set_prio_attrs(steering->egress_root_ns);
3174 	return 0;
3175 cleanup:
3176 	cleanup_root_ns(steering->egress_root_ns);
3177 	steering->egress_root_ns = NULL;
3178 	return err;
3179 }
3180 
mlx5_fs_mode_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)3181 static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
3182 				 union devlink_param_value val,
3183 				 struct netlink_ext_ack *extack)
3184 {
3185 	struct mlx5_core_dev *dev = devlink_priv(devlink);
3186 	char *value = val.vstr;
3187 	int err = 0;
3188 
3189 	if (!strcmp(value, "dmfs")) {
3190 		return 0;
3191 	} else if (!strcmp(value, "smfs")) {
3192 		u8 eswitch_mode;
3193 		bool smfs_cap;
3194 
3195 		eswitch_mode = mlx5_eswitch_mode(dev);
3196 		smfs_cap = mlx5_fs_dr_is_supported(dev);
3197 
3198 		if (!smfs_cap) {
3199 			err = -EOPNOTSUPP;
3200 			NL_SET_ERR_MSG_MOD(extack,
3201 					   "Software managed steering is not supported by current device");
3202 		}
3203 
3204 		else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
3205 			NL_SET_ERR_MSG_MOD(extack,
3206 					   "Software managed steering is not supported when eswitch offloads enabled.");
3207 			err = -EOPNOTSUPP;
3208 		}
3209 	} else {
3210 		NL_SET_ERR_MSG_MOD(extack,
3211 				   "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
3212 		err = -EINVAL;
3213 	}
3214 
3215 	return err;
3216 }
3217 
mlx5_fs_mode_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)3218 static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
3219 			    struct devlink_param_gset_ctx *ctx)
3220 {
3221 	struct mlx5_core_dev *dev = devlink_priv(devlink);
3222 	enum mlx5_flow_steering_mode mode;
3223 
3224 	if (!strcmp(ctx->val.vstr, "smfs"))
3225 		mode = MLX5_FLOW_STEERING_MODE_SMFS;
3226 	else
3227 		mode = MLX5_FLOW_STEERING_MODE_DMFS;
3228 	dev->priv.steering->mode = mode;
3229 
3230 	return 0;
3231 }
3232 
mlx5_fs_mode_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)3233 static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
3234 			    struct devlink_param_gset_ctx *ctx)
3235 {
3236 	struct mlx5_core_dev *dev = devlink_priv(devlink);
3237 
3238 	if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
3239 		strcpy(ctx->val.vstr, "smfs");
3240 	else
3241 		strcpy(ctx->val.vstr, "dmfs");
3242 	return 0;
3243 }
3244 
3245 static const struct devlink_param mlx5_fs_params[] = {
3246 	DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
3247 			     "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
3248 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3249 			     mlx5_fs_mode_get, mlx5_fs_mode_set,
3250 			     mlx5_fs_mode_validate),
3251 };
3252 
mlx5_fs_core_cleanup(struct mlx5_core_dev * dev)3253 void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
3254 {
3255 	struct mlx5_flow_steering *steering = dev->priv.steering;
3256 
3257 	cleanup_root_ns(steering->root_ns);
3258 	cleanup_fdb_root_ns(steering);
3259 	cleanup_root_ns(steering->port_sel_root_ns);
3260 	cleanup_root_ns(steering->sniffer_rx_root_ns);
3261 	cleanup_root_ns(steering->sniffer_tx_root_ns);
3262 	cleanup_root_ns(steering->rdma_rx_root_ns);
3263 	cleanup_root_ns(steering->rdma_tx_root_ns);
3264 	cleanup_root_ns(steering->egress_root_ns);
3265 
3266 	devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
3267 			       ARRAY_SIZE(mlx5_fs_params));
3268 }
3269 
mlx5_fs_core_init(struct mlx5_core_dev * dev)3270 int mlx5_fs_core_init(struct mlx5_core_dev *dev)
3271 {
3272 	struct mlx5_flow_steering *steering = dev->priv.steering;
3273 	int err;
3274 
3275 	err = devl_params_register(priv_to_devlink(dev), mlx5_fs_params,
3276 				   ARRAY_SIZE(mlx5_fs_params));
3277 	if (err)
3278 		return err;
3279 
3280 	if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3281 	      (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3282 	     ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3283 	      MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3284 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3285 		err = init_root_ns(steering);
3286 		if (err)
3287 			goto err;
3288 	}
3289 
3290 	if (MLX5_ESWITCH_MANAGER(dev)) {
3291 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3292 			err = init_fdb_root_ns(steering);
3293 			if (err)
3294 				goto err;
3295 		}
3296 	}
3297 
3298 	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3299 		err = init_sniffer_rx_root_ns(steering);
3300 		if (err)
3301 			goto err;
3302 	}
3303 
3304 	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3305 		err = init_sniffer_tx_root_ns(steering);
3306 		if (err)
3307 			goto err;
3308 	}
3309 
3310 	if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3311 		err = init_port_sel_root_ns(steering);
3312 		if (err)
3313 			goto err;
3314 	}
3315 
3316 	if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3317 	    MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3318 		err = init_rdma_rx_root_ns(steering);
3319 		if (err)
3320 			goto err;
3321 	}
3322 
3323 	if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3324 		err = init_rdma_tx_root_ns(steering);
3325 		if (err)
3326 			goto err;
3327 	}
3328 
3329 	if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3330 		err = init_egress_root_ns(steering);
3331 		if (err)
3332 			goto err;
3333 	}
3334 
3335 	return 0;
3336 
3337 err:
3338 	mlx5_fs_core_cleanup(dev);
3339 	return err;
3340 }
3341 
mlx5_fs_core_free(struct mlx5_core_dev * dev)3342 void mlx5_fs_core_free(struct mlx5_core_dev *dev)
3343 {
3344 	struct mlx5_flow_steering *steering = dev->priv.steering;
3345 
3346 	kmem_cache_destroy(steering->ftes_cache);
3347 	kmem_cache_destroy(steering->fgs_cache);
3348 	kfree(steering);
3349 	mlx5_ft_pool_destroy(dev);
3350 	mlx5_cleanup_fc_stats(dev);
3351 }
3352 
mlx5_fs_core_alloc(struct mlx5_core_dev * dev)3353 int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
3354 {
3355 	struct mlx5_flow_steering *steering;
3356 	int err = 0;
3357 
3358 	err = mlx5_init_fc_stats(dev);
3359 	if (err)
3360 		return err;
3361 
3362 	err = mlx5_ft_pool_init(dev);
3363 	if (err)
3364 		goto err;
3365 
3366 	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3367 	if (!steering) {
3368 		err = -ENOMEM;
3369 		goto err;
3370 	}
3371 
3372 	steering->dev = dev;
3373 	dev->priv.steering = steering;
3374 
3375 	if (mlx5_fs_dr_is_supported(dev))
3376 		steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3377 	else
3378 		steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3379 
3380 	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3381 						sizeof(struct mlx5_flow_group), 0,
3382 						0, NULL);
3383 	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3384 						 0, NULL);
3385 	if (!steering->ftes_cache || !steering->fgs_cache) {
3386 		err = -ENOMEM;
3387 		goto err;
3388 	}
3389 
3390 	return 0;
3391 
3392 err:
3393 	mlx5_fs_core_free(dev);
3394 	return err;
3395 }
3396 
mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3397 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3398 {
3399 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3400 	struct mlx5_ft_underlay_qp *new_uqp;
3401 	int err = 0;
3402 
3403 	new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3404 	if (!new_uqp)
3405 		return -ENOMEM;
3406 
3407 	mutex_lock(&root->chain_lock);
3408 
3409 	if (!root->root_ft) {
3410 		err = -EINVAL;
3411 		goto update_ft_fail;
3412 	}
3413 
3414 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3415 					 false);
3416 	if (err) {
3417 		mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3418 			       underlay_qpn, err);
3419 		goto update_ft_fail;
3420 	}
3421 
3422 	new_uqp->qpn = underlay_qpn;
3423 	list_add_tail(&new_uqp->list, &root->underlay_qpns);
3424 
3425 	mutex_unlock(&root->chain_lock);
3426 
3427 	return 0;
3428 
3429 update_ft_fail:
3430 	mutex_unlock(&root->chain_lock);
3431 	kfree(new_uqp);
3432 	return err;
3433 }
3434 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3435 
mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3436 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3437 {
3438 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3439 	struct mlx5_ft_underlay_qp *uqp;
3440 	bool found = false;
3441 	int err = 0;
3442 
3443 	mutex_lock(&root->chain_lock);
3444 	list_for_each_entry(uqp, &root->underlay_qpns, list) {
3445 		if (uqp->qpn == underlay_qpn) {
3446 			found = true;
3447 			break;
3448 		}
3449 	}
3450 
3451 	if (!found) {
3452 		mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3453 			       underlay_qpn);
3454 		err = -EINVAL;
3455 		goto out;
3456 	}
3457 
3458 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3459 					 true);
3460 	if (err)
3461 		mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3462 			       underlay_qpn, err);
3463 
3464 	list_del(&uqp->list);
3465 	mutex_unlock(&root->chain_lock);
3466 	kfree(uqp);
3467 
3468 	return 0;
3469 
3470 out:
3471 	mutex_unlock(&root->chain_lock);
3472 	return err;
3473 }
3474 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3475 
3476 static struct mlx5_flow_root_namespace
get_root_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type)3477 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3478 {
3479 	struct mlx5_flow_namespace *ns;
3480 
3481 	if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3482 	    ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3483 		ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3484 	else
3485 		ns = mlx5_get_flow_namespace(dev, ns_type);
3486 	if (!ns)
3487 		return NULL;
3488 
3489 	return find_root(&ns->node);
3490 }
3491 
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 ns_type,u8 num_actions,void * modify_actions)3492 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3493 						 u8 ns_type, u8 num_actions,
3494 						 void *modify_actions)
3495 {
3496 	struct mlx5_flow_root_namespace *root;
3497 	struct mlx5_modify_hdr *modify_hdr;
3498 	int err;
3499 
3500 	root = get_root_namespace(dev, ns_type);
3501 	if (!root)
3502 		return ERR_PTR(-EOPNOTSUPP);
3503 
3504 	modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3505 	if (!modify_hdr)
3506 		return ERR_PTR(-ENOMEM);
3507 
3508 	modify_hdr->ns_type = ns_type;
3509 	err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3510 					      modify_actions, modify_hdr);
3511 	if (err) {
3512 		kfree(modify_hdr);
3513 		return ERR_PTR(err);
3514 	}
3515 
3516 	return modify_hdr;
3517 }
3518 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3519 
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,struct mlx5_modify_hdr * modify_hdr)3520 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3521 				struct mlx5_modify_hdr *modify_hdr)
3522 {
3523 	struct mlx5_flow_root_namespace *root;
3524 
3525 	root = get_root_namespace(dev, modify_hdr->ns_type);
3526 	if (WARN_ON(!root))
3527 		return;
3528 	root->cmds->modify_header_dealloc(root, modify_hdr);
3529 	kfree(modify_hdr);
3530 }
3531 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3532 
mlx5_packet_reformat_alloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type ns_type)3533 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3534 						     struct mlx5_pkt_reformat_params *params,
3535 						     enum mlx5_flow_namespace_type ns_type)
3536 {
3537 	struct mlx5_pkt_reformat *pkt_reformat;
3538 	struct mlx5_flow_root_namespace *root;
3539 	int err;
3540 
3541 	root = get_root_namespace(dev, ns_type);
3542 	if (!root)
3543 		return ERR_PTR(-EOPNOTSUPP);
3544 
3545 	pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3546 	if (!pkt_reformat)
3547 		return ERR_PTR(-ENOMEM);
3548 
3549 	pkt_reformat->ns_type = ns_type;
3550 	pkt_reformat->reformat_type = params->type;
3551 	err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3552 						pkt_reformat);
3553 	if (err) {
3554 		kfree(pkt_reformat);
3555 		return ERR_PTR(err);
3556 	}
3557 
3558 	return pkt_reformat;
3559 }
3560 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3561 
mlx5_packet_reformat_dealloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat * pkt_reformat)3562 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3563 				  struct mlx5_pkt_reformat *pkt_reformat)
3564 {
3565 	struct mlx5_flow_root_namespace *root;
3566 
3567 	root = get_root_namespace(dev, pkt_reformat->ns_type);
3568 	if (WARN_ON(!root))
3569 		return;
3570 	root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3571 	kfree(pkt_reformat);
3572 }
3573 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3574 
mlx5_get_match_definer_id(struct mlx5_flow_definer * definer)3575 int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3576 {
3577 	return definer->id;
3578 }
3579 
3580 struct mlx5_flow_definer *
mlx5_create_match_definer(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type,u16 format_id,u32 * match_mask)3581 mlx5_create_match_definer(struct mlx5_core_dev *dev,
3582 			  enum mlx5_flow_namespace_type ns_type, u16 format_id,
3583 			  u32 *match_mask)
3584 {
3585 	struct mlx5_flow_root_namespace *root;
3586 	struct mlx5_flow_definer *definer;
3587 	int id;
3588 
3589 	root = get_root_namespace(dev, ns_type);
3590 	if (!root)
3591 		return ERR_PTR(-EOPNOTSUPP);
3592 
3593 	definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3594 	if (!definer)
3595 		return ERR_PTR(-ENOMEM);
3596 
3597 	definer->ns_type = ns_type;
3598 	id = root->cmds->create_match_definer(root, format_id, match_mask);
3599 	if (id < 0) {
3600 		mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3601 		kfree(definer);
3602 		return ERR_PTR(id);
3603 	}
3604 	definer->id = id;
3605 	return definer;
3606 }
3607 
mlx5_destroy_match_definer(struct mlx5_core_dev * dev,struct mlx5_flow_definer * definer)3608 void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3609 				struct mlx5_flow_definer *definer)
3610 {
3611 	struct mlx5_flow_root_namespace *root;
3612 
3613 	root = get_root_namespace(dev, definer->ns_type);
3614 	if (WARN_ON(!root))
3615 		return;
3616 
3617 	root->cmds->destroy_match_definer(root, definer->id);
3618 	kfree(definer);
3619 }
3620 
mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)3621 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3622 				 struct mlx5_flow_root_namespace *peer_ns)
3623 {
3624 	if (peer_ns && ns->mode != peer_ns->mode) {
3625 		mlx5_core_err(ns->dev,
3626 			      "Can't peer namespace of different steering mode\n");
3627 		return -EINVAL;
3628 	}
3629 
3630 	return ns->cmds->set_peer(ns, peer_ns);
3631 }
3632 
3633 /* This function should be called only at init stage of the namespace.
3634  * It is not safe to call this function while steering operations
3635  * are executed in the namespace.
3636  */
mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace * ns,enum mlx5_flow_steering_mode mode)3637 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3638 				 enum mlx5_flow_steering_mode mode)
3639 {
3640 	struct mlx5_flow_root_namespace *root;
3641 	const struct mlx5_flow_cmds *cmds;
3642 	int err;
3643 
3644 	root = find_root(&ns->node);
3645 	if (&root->ns != ns)
3646 	/* Can't set cmds to non root namespace */
3647 		return -EINVAL;
3648 
3649 	if (root->table_type != FS_FT_FDB)
3650 		return -EOPNOTSUPP;
3651 
3652 	if (root->mode == mode)
3653 		return 0;
3654 
3655 	if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3656 		cmds = mlx5_fs_cmd_get_dr_cmds();
3657 	else
3658 		cmds = mlx5_fs_cmd_get_fw_cmds();
3659 	if (!cmds)
3660 		return -EOPNOTSUPP;
3661 
3662 	err = cmds->create_ns(root);
3663 	if (err) {
3664 		mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3665 			      err);
3666 		return err;
3667 	}
3668 
3669 	root->cmds->destroy_ns(root);
3670 	root->cmds = cmds;
3671 	root->mode = mode;
3672 
3673 	return 0;
3674 }
3675