1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/hash.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include "en.h"
38
39 #define ARFS_HASH_SHIFT BITS_PER_BYTE
40 #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
41
42 struct arfs_table {
43 struct mlx5e_flow_table ft;
44 struct mlx5_flow_handle *default_rule;
45 struct hlist_head rules_hash[ARFS_HASH_SIZE];
46 };
47
48 enum arfs_type {
49 ARFS_IPV4_TCP,
50 ARFS_IPV6_TCP,
51 ARFS_IPV4_UDP,
52 ARFS_IPV6_UDP,
53 ARFS_NUM_TYPES,
54 };
55
56 struct mlx5e_arfs_tables {
57 struct arfs_table arfs_tables[ARFS_NUM_TYPES];
58 /* Protect aRFS rules list */
59 spinlock_t arfs_lock;
60 int last_filter_id;
61 struct workqueue_struct *wq;
62 };
63
64 struct arfs_tuple {
65 __be16 etype;
66 u8 ip_proto;
67 union {
68 __be32 src_ipv4;
69 struct in6_addr src_ipv6;
70 };
71 union {
72 __be32 dst_ipv4;
73 struct in6_addr dst_ipv6;
74 };
75 __be16 src_port;
76 __be16 dst_port;
77 };
78
79 struct arfs_rule {
80 struct mlx5e_priv *priv;
81 struct work_struct arfs_work;
82 struct mlx5_flow_handle *rule;
83 struct hlist_node hlist;
84 int rxq;
85 /* Flow ID passed to ndo_rx_flow_steer */
86 int flow_id;
87 /* Filter ID returned by ndo_rx_flow_steer */
88 int filter_id;
89 struct arfs_tuple tuple;
90 };
91
92 #define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
93 for (i = 0; i < ARFS_NUM_TYPES; i++) \
94 mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
95
96 #define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
97 for (j = 0; j < ARFS_HASH_SIZE; j++) \
98 hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
99
arfs_get_tt(enum arfs_type type)100 static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
101 {
102 switch (type) {
103 case ARFS_IPV4_TCP:
104 return MLX5_TT_IPV4_TCP;
105 case ARFS_IPV4_UDP:
106 return MLX5_TT_IPV4_UDP;
107 case ARFS_IPV6_TCP:
108 return MLX5_TT_IPV6_TCP;
109 case ARFS_IPV6_UDP:
110 return MLX5_TT_IPV6_UDP;
111 default:
112 return -EINVAL;
113 }
114 }
115
arfs_disable(struct mlx5e_flow_steering * fs)116 static int arfs_disable(struct mlx5e_flow_steering *fs)
117 {
118 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
119 int err, i;
120
121 for (i = 0; i < ARFS_NUM_TYPES; i++) {
122 /* Modify ttc rules destination back to their default */
123 err = mlx5_ttc_fwd_default_dest(ttc, arfs_get_tt(i));
124 if (err) {
125 fs_err(fs,
126 "%s: modify ttc[%d] default destination failed, err(%d)\n",
127 __func__, arfs_get_tt(i), err);
128 return err;
129 }
130 }
131 return 0;
132 }
133
134 static void arfs_del_rules(struct mlx5e_flow_steering *fs);
135
mlx5e_arfs_disable(struct mlx5e_flow_steering * fs)136 int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
137 {
138 arfs_del_rules(fs);
139
140 return arfs_disable(fs);
141 }
142
mlx5e_arfs_enable(struct mlx5e_flow_steering * fs)143 int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
144 {
145 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
146 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
147 struct mlx5_flow_destination dest = {};
148 int err, i;
149
150 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
151 for (i = 0; i < ARFS_NUM_TYPES; i++) {
152 dest.ft = arfs->arfs_tables[i].ft.t;
153 /* Modify ttc rules destination to point on the aRFS FTs */
154 err = mlx5_ttc_fwd_dest(ttc, arfs_get_tt(i), &dest);
155 if (err) {
156 fs_err(fs, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
157 __func__, arfs_get_tt(i), err);
158 arfs_disable(fs);
159 return err;
160 }
161 }
162 return 0;
163 }
164
arfs_destroy_table(struct arfs_table * arfs_t)165 static void arfs_destroy_table(struct arfs_table *arfs_t)
166 {
167 mlx5_del_flow_rules(arfs_t->default_rule);
168 mlx5e_destroy_flow_table(&arfs_t->ft);
169 }
170
_mlx5e_cleanup_tables(struct mlx5e_flow_steering * fs)171 static void _mlx5e_cleanup_tables(struct mlx5e_flow_steering *fs)
172 {
173 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
174 int i;
175
176 arfs_del_rules(fs);
177 destroy_workqueue(arfs->wq);
178 for (i = 0; i < ARFS_NUM_TYPES; i++) {
179 if (!IS_ERR_OR_NULL(arfs->arfs_tables[i].ft.t))
180 arfs_destroy_table(&arfs->arfs_tables[i]);
181 }
182 }
183
mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering * fs,bool ntuple)184 void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple)
185 {
186 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
187
188 if (!ntuple)
189 return;
190
191 _mlx5e_cleanup_tables(fs);
192 mlx5e_fs_set_arfs(fs, NULL);
193 kvfree(arfs);
194 }
195
arfs_add_default_rule(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,enum arfs_type type)196 static int arfs_add_default_rule(struct mlx5e_flow_steering *fs,
197 struct mlx5e_rx_res *rx_res,
198 enum arfs_type type)
199 {
200 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
201 struct arfs_table *arfs_t = &arfs->arfs_tables[type];
202 struct mlx5_flow_destination dest = {};
203 MLX5_DECLARE_FLOW_ACT(flow_act);
204 enum mlx5_traffic_types tt;
205 int err = 0;
206
207 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
208 tt = arfs_get_tt(type);
209 if (tt == -EINVAL) {
210 fs_err(fs, "%s: bad arfs_type: %d\n", __func__, type);
211 return -EINVAL;
212 }
213
214 /* FIXME: Must use mlx5_ttc_get_default_dest(),
215 * but can't since TTC default is not setup yet !
216 */
217 dest.tir_num = mlx5e_rx_res_get_tirn_rss(rx_res, tt);
218 arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
219 &flow_act,
220 &dest, 1);
221 if (IS_ERR(arfs_t->default_rule)) {
222 err = PTR_ERR(arfs_t->default_rule);
223 arfs_t->default_rule = NULL;
224 fs_err(fs, "%s: add rule failed, arfs type=%d\n", __func__, type);
225 }
226
227 return err;
228 }
229
230 #define MLX5E_ARFS_NUM_GROUPS 2
231 #define MLX5E_ARFS_GROUP1_SIZE (BIT(16) - 1)
232 #define MLX5E_ARFS_GROUP2_SIZE BIT(0)
233 #define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
234 MLX5E_ARFS_GROUP2_SIZE)
arfs_create_groups(struct mlx5e_flow_table * ft,enum arfs_type type)235 static int arfs_create_groups(struct mlx5e_flow_table *ft,
236 enum arfs_type type)
237 {
238 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
239 void *outer_headers_c;
240 int ix = 0;
241 u32 *in;
242 int err;
243 u8 *mc;
244
245 ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
246 sizeof(*ft->g), GFP_KERNEL);
247 in = kvzalloc(inlen, GFP_KERNEL);
248 if (!in || !ft->g) {
249 kfree(ft->g);
250 kvfree(in);
251 return -ENOMEM;
252 }
253
254 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
255 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
256 outer_headers);
257 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
258 switch (type) {
259 case ARFS_IPV4_TCP:
260 case ARFS_IPV6_TCP:
261 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
262 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
263 break;
264 case ARFS_IPV4_UDP:
265 case ARFS_IPV6_UDP:
266 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
267 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
268 break;
269 default:
270 err = -EINVAL;
271 goto out;
272 }
273
274 switch (type) {
275 case ARFS_IPV4_TCP:
276 case ARFS_IPV4_UDP:
277 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
278 src_ipv4_src_ipv6.ipv4_layout.ipv4);
279 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
280 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
281 break;
282 case ARFS_IPV6_TCP:
283 case ARFS_IPV6_UDP:
284 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
285 src_ipv4_src_ipv6.ipv6_layout.ipv6),
286 0xff, 16);
287 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
288 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
289 0xff, 16);
290 break;
291 default:
292 err = -EINVAL;
293 goto out;
294 }
295
296 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
297 MLX5_SET_CFG(in, start_flow_index, ix);
298 ix += MLX5E_ARFS_GROUP1_SIZE;
299 MLX5_SET_CFG(in, end_flow_index, ix - 1);
300 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
301 if (IS_ERR(ft->g[ft->num_groups]))
302 goto err;
303 ft->num_groups++;
304
305 memset(in, 0, inlen);
306 MLX5_SET_CFG(in, start_flow_index, ix);
307 ix += MLX5E_ARFS_GROUP2_SIZE;
308 MLX5_SET_CFG(in, end_flow_index, ix - 1);
309 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
310 if (IS_ERR(ft->g[ft->num_groups]))
311 goto err;
312 ft->num_groups++;
313
314 kvfree(in);
315 return 0;
316
317 err:
318 err = PTR_ERR(ft->g[ft->num_groups]);
319 ft->g[ft->num_groups] = NULL;
320 out:
321 kvfree(in);
322
323 return err;
324 }
325
arfs_create_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,enum arfs_type type)326 static int arfs_create_table(struct mlx5e_flow_steering *fs,
327 struct mlx5e_rx_res *rx_res,
328 enum arfs_type type)
329 {
330 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
331 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
332 struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
333 struct mlx5_flow_table_attr ft_attr = {};
334 int err;
335
336 ft->num_groups = 0;
337
338 ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
339 ft_attr.level = MLX5E_ARFS_FT_LEVEL;
340 ft_attr.prio = MLX5E_NIC_PRIO;
341
342 ft->t = mlx5_create_flow_table(ns, &ft_attr);
343 if (IS_ERR(ft->t)) {
344 err = PTR_ERR(ft->t);
345 ft->t = NULL;
346 return err;
347 }
348
349 err = arfs_create_groups(ft, type);
350 if (err)
351 goto err;
352
353 err = arfs_add_default_rule(fs, rx_res, type);
354 if (err)
355 goto err;
356
357 return 0;
358 err:
359 mlx5e_destroy_flow_table(ft);
360 return err;
361 }
362
mlx5e_arfs_create_tables(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,bool ntuple)363 int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
364 struct mlx5e_rx_res *rx_res, bool ntuple)
365 {
366 struct mlx5e_arfs_tables *arfs;
367 int err = -ENOMEM;
368 int i;
369
370 if (!ntuple)
371 return 0;
372
373 arfs = kvzalloc(sizeof(*arfs), GFP_KERNEL);
374 if (!arfs)
375 return -ENOMEM;
376
377 spin_lock_init(&arfs->arfs_lock);
378 arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
379 if (!arfs->wq)
380 goto err;
381
382 mlx5e_fs_set_arfs(fs, arfs);
383
384 for (i = 0; i < ARFS_NUM_TYPES; i++) {
385 err = arfs_create_table(fs, rx_res, i);
386 if (err)
387 goto err_des;
388 }
389 return 0;
390
391 err_des:
392 _mlx5e_cleanup_tables(fs);
393 err:
394 mlx5e_fs_set_arfs(fs, NULL);
395 kvfree(arfs);
396 return err;
397 }
398
399 #define MLX5E_ARFS_EXPIRY_QUOTA 60
400
arfs_may_expire_flow(struct mlx5e_priv * priv)401 static void arfs_may_expire_flow(struct mlx5e_priv *priv)
402 {
403 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
404 struct arfs_rule *arfs_rule;
405 struct hlist_node *htmp;
406 HLIST_HEAD(del_list);
407 int quota = 0;
408 int i;
409 int j;
410
411 spin_lock_bh(&arfs->arfs_lock);
412 mlx5e_for_each_arfs_rule(arfs_rule, htmp, arfs->arfs_tables, i, j) {
413 if (!work_pending(&arfs_rule->arfs_work) &&
414 rps_may_expire_flow(priv->netdev,
415 arfs_rule->rxq, arfs_rule->flow_id,
416 arfs_rule->filter_id)) {
417 hlist_del_init(&arfs_rule->hlist);
418 hlist_add_head(&arfs_rule->hlist, &del_list);
419 if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
420 break;
421 }
422 }
423 spin_unlock_bh(&arfs->arfs_lock);
424 hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
425 if (arfs_rule->rule)
426 mlx5_del_flow_rules(arfs_rule->rule);
427 hlist_del(&arfs_rule->hlist);
428 kfree(arfs_rule);
429 }
430 }
431
arfs_del_rules(struct mlx5e_flow_steering * fs)432 static void arfs_del_rules(struct mlx5e_flow_steering *fs)
433 {
434 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
435 struct hlist_node *htmp;
436 struct arfs_rule *rule;
437 HLIST_HEAD(del_list);
438 int i;
439 int j;
440
441 spin_lock_bh(&arfs->arfs_lock);
442 mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
443 hlist_del_init(&rule->hlist);
444 hlist_add_head(&rule->hlist, &del_list);
445 }
446 spin_unlock_bh(&arfs->arfs_lock);
447
448 hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
449 cancel_work_sync(&rule->arfs_work);
450 if (rule->rule)
451 mlx5_del_flow_rules(rule->rule);
452 hlist_del(&rule->hlist);
453 kfree(rule);
454 }
455 }
456
457 static struct hlist_head *
arfs_hash_bucket(struct arfs_table * arfs_t,__be16 src_port,__be16 dst_port)458 arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
459 __be16 dst_port)
460 {
461 unsigned long l;
462 int bucket_idx;
463
464 l = (__force unsigned long)src_port |
465 ((__force unsigned long)dst_port << 2);
466
467 bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
468
469 return &arfs_t->rules_hash[bucket_idx];
470 }
471
arfs_get_table(struct mlx5e_arfs_tables * arfs,u8 ip_proto,__be16 etype)472 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
473 u8 ip_proto, __be16 etype)
474 {
475 if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
476 return &arfs->arfs_tables[ARFS_IPV4_TCP];
477 if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
478 return &arfs->arfs_tables[ARFS_IPV4_UDP];
479 if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
480 return &arfs->arfs_tables[ARFS_IPV6_TCP];
481 if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
482 return &arfs->arfs_tables[ARFS_IPV6_UDP];
483
484 return NULL;
485 }
486
arfs_add_rule(struct mlx5e_priv * priv,struct arfs_rule * arfs_rule)487 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
488 struct arfs_rule *arfs_rule)
489 {
490 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
491 struct arfs_tuple *tuple = &arfs_rule->tuple;
492 struct mlx5_flow_handle *rule = NULL;
493 struct mlx5_flow_destination dest = {};
494 MLX5_DECLARE_FLOW_ACT(flow_act);
495 struct arfs_table *arfs_table;
496 struct mlx5_flow_spec *spec;
497 struct mlx5_flow_table *ft;
498 int err = 0;
499
500 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
501 if (!spec) {
502 err = -ENOMEM;
503 goto out;
504 }
505 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
506 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
507 outer_headers.ethertype);
508 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
509 ntohs(tuple->etype));
510 arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
511 if (!arfs_table) {
512 err = -EINVAL;
513 goto out;
514 }
515
516 ft = arfs_table->ft.t;
517 if (tuple->ip_proto == IPPROTO_TCP) {
518 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
519 outer_headers.tcp_dport);
520 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
521 outer_headers.tcp_sport);
522 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
523 ntohs(tuple->dst_port));
524 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
525 ntohs(tuple->src_port));
526 } else {
527 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
528 outer_headers.udp_dport);
529 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
530 outer_headers.udp_sport);
531 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
532 ntohs(tuple->dst_port));
533 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
534 ntohs(tuple->src_port));
535 }
536 if (tuple->etype == htons(ETH_P_IP)) {
537 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
538 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
539 &tuple->src_ipv4,
540 4);
541 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
542 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
543 &tuple->dst_ipv4,
544 4);
545 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
546 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
547 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
548 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
549 } else {
550 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
551 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
552 &tuple->src_ipv6,
553 16);
554 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
555 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
556 &tuple->dst_ipv6,
557 16);
558 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
559 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
560 0xff,
561 16);
562 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
563 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
564 0xff,
565 16);
566 }
567 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
568 dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq);
569 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
570 if (IS_ERR(rule)) {
571 err = PTR_ERR(rule);
572 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
573 mlx5e_dbg(HW, priv,
574 "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
575 __func__, arfs_rule->filter_id, arfs_rule->rxq,
576 tuple->ip_proto, err);
577 }
578
579 out:
580 kvfree(spec);
581 return err ? ERR_PTR(err) : rule;
582 }
583
arfs_modify_rule_rq(struct mlx5e_priv * priv,struct mlx5_flow_handle * rule,u16 rxq)584 static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
585 struct mlx5_flow_handle *rule, u16 rxq)
586 {
587 struct mlx5_flow_destination dst = {};
588 int err = 0;
589
590 dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
591 dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
592 err = mlx5_modify_rule_destination(rule, &dst, NULL);
593 if (err)
594 netdev_warn(priv->netdev,
595 "Failed to modify aRFS rule destination to rq=%d\n", rxq);
596 }
597
arfs_handle_work(struct work_struct * work)598 static void arfs_handle_work(struct work_struct *work)
599 {
600 struct arfs_rule *arfs_rule = container_of(work,
601 struct arfs_rule,
602 arfs_work);
603 struct mlx5e_priv *priv = arfs_rule->priv;
604 struct mlx5e_arfs_tables *arfs;
605 struct mlx5_flow_handle *rule;
606
607 arfs = mlx5e_fs_get_arfs(priv->fs);
608 mutex_lock(&priv->state_lock);
609 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
610 spin_lock_bh(&arfs->arfs_lock);
611 hlist_del(&arfs_rule->hlist);
612 spin_unlock_bh(&arfs->arfs_lock);
613
614 mutex_unlock(&priv->state_lock);
615 kfree(arfs_rule);
616 goto out;
617 }
618 mutex_unlock(&priv->state_lock);
619
620 if (!arfs_rule->rule) {
621 rule = arfs_add_rule(priv, arfs_rule);
622 if (IS_ERR(rule))
623 goto out;
624 arfs_rule->rule = rule;
625 } else {
626 arfs_modify_rule_rq(priv, arfs_rule->rule,
627 arfs_rule->rxq);
628 }
629 out:
630 arfs_may_expire_flow(priv);
631 }
632
arfs_alloc_rule(struct mlx5e_priv * priv,struct arfs_table * arfs_t,const struct flow_keys * fk,u16 rxq,u32 flow_id)633 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
634 struct arfs_table *arfs_t,
635 const struct flow_keys *fk,
636 u16 rxq, u32 flow_id)
637 {
638 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
639 struct arfs_rule *rule;
640 struct arfs_tuple *tuple;
641
642 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
643 if (!rule)
644 return NULL;
645
646 rule->priv = priv;
647 rule->rxq = rxq;
648 INIT_WORK(&rule->arfs_work, arfs_handle_work);
649
650 tuple = &rule->tuple;
651 tuple->etype = fk->basic.n_proto;
652 tuple->ip_proto = fk->basic.ip_proto;
653 if (tuple->etype == htons(ETH_P_IP)) {
654 tuple->src_ipv4 = fk->addrs.v4addrs.src;
655 tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
656 } else {
657 memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
658 sizeof(struct in6_addr));
659 memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
660 sizeof(struct in6_addr));
661 }
662 tuple->src_port = fk->ports.src;
663 tuple->dst_port = fk->ports.dst;
664
665 rule->flow_id = flow_id;
666 rule->filter_id = arfs->last_filter_id++ % RPS_NO_FILTER;
667
668 hlist_add_head(&rule->hlist,
669 arfs_hash_bucket(arfs_t, tuple->src_port,
670 tuple->dst_port));
671 return rule;
672 }
673
arfs_cmp(const struct arfs_tuple * tuple,const struct flow_keys * fk)674 static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
675 {
676 if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
677 return false;
678 if (tuple->etype != fk->basic.n_proto)
679 return false;
680 if (tuple->etype == htons(ETH_P_IP))
681 return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
682 tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
683 if (tuple->etype == htons(ETH_P_IPV6))
684 return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
685 sizeof(struct in6_addr)) &&
686 !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
687 sizeof(struct in6_addr));
688 return false;
689 }
690
arfs_find_rule(struct arfs_table * arfs_t,const struct flow_keys * fk)691 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
692 const struct flow_keys *fk)
693 {
694 struct arfs_rule *arfs_rule;
695 struct hlist_head *head;
696
697 head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
698 hlist_for_each_entry(arfs_rule, head, hlist) {
699 if (arfs_cmp(&arfs_rule->tuple, fk))
700 return arfs_rule;
701 }
702
703 return NULL;
704 }
705
mlx5e_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)706 int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
707 u16 rxq_index, u32 flow_id)
708 {
709 struct mlx5e_priv *priv = netdev_priv(dev);
710 struct mlx5e_arfs_tables *arfs;
711 struct arfs_rule *arfs_rule;
712 struct arfs_table *arfs_t;
713 struct flow_keys fk;
714
715 arfs = mlx5e_fs_get_arfs(priv->fs);
716 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
717 return -EPROTONOSUPPORT;
718
719 if (fk.basic.n_proto != htons(ETH_P_IP) &&
720 fk.basic.n_proto != htons(ETH_P_IPV6))
721 return -EPROTONOSUPPORT;
722
723 if (skb->encapsulation)
724 return -EPROTONOSUPPORT;
725
726 arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
727 if (!arfs_t)
728 return -EPROTONOSUPPORT;
729
730 spin_lock_bh(&arfs->arfs_lock);
731 arfs_rule = arfs_find_rule(arfs_t, &fk);
732 if (arfs_rule) {
733 if (arfs_rule->rxq == rxq_index) {
734 spin_unlock_bh(&arfs->arfs_lock);
735 return arfs_rule->filter_id;
736 }
737 arfs_rule->rxq = rxq_index;
738 } else {
739 arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
740 if (!arfs_rule) {
741 spin_unlock_bh(&arfs->arfs_lock);
742 return -ENOMEM;
743 }
744 }
745 queue_work(arfs->wq, &arfs_rule->arfs_work);
746 spin_unlock_bh(&arfs->arfs_lock);
747 return arfs_rule->filter_id;
748 }
749
750