1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
3
4 #include "prestera.h"
5 #include "prestera_acl.h"
6 #include "prestera_flow.h"
7 #include "prestera_flower.h"
8 #include "prestera_matchall.h"
9
10 struct prestera_flower_template {
11 struct prestera_acl_ruleset *ruleset;
12 struct list_head list;
13 u32 chain_index;
14 };
15
16 static void
prestera_flower_template_free(struct prestera_flower_template * template)17 prestera_flower_template_free(struct prestera_flower_template *template)
18 {
19 prestera_acl_ruleset_put(template->ruleset);
20 list_del(&template->list);
21 kfree(template);
22 }
23
prestera_flower_template_cleanup(struct prestera_flow_block * block)24 void prestera_flower_template_cleanup(struct prestera_flow_block *block)
25 {
26 struct prestera_flower_template *template, *tmp;
27
28 /* put the reference to all rulesets kept in tmpl create */
29 list_for_each_entry_safe(template, tmp, &block->template_list, list)
30 prestera_flower_template_free(template);
31 }
32
33 static int
prestera_flower_parse_goto_action(struct prestera_flow_block * block,struct prestera_acl_rule * rule,u32 chain_index,const struct flow_action_entry * act)34 prestera_flower_parse_goto_action(struct prestera_flow_block *block,
35 struct prestera_acl_rule *rule,
36 u32 chain_index,
37 const struct flow_action_entry *act)
38 {
39 struct prestera_acl_ruleset *ruleset;
40
41 if (act->chain_index <= chain_index)
42 /* we can jump only forward */
43 return -EINVAL;
44
45 if (rule->re_arg.jump.valid)
46 return -EEXIST;
47
48 ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
49 act->chain_index);
50 if (IS_ERR(ruleset))
51 return PTR_ERR(ruleset);
52
53 rule->re_arg.jump.valid = 1;
54 rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
55
56 rule->jump_ruleset = ruleset;
57
58 return 0;
59 }
60
prestera_flower_parse_actions(struct prestera_flow_block * block,struct prestera_acl_rule * rule,struct flow_action * flow_action,u32 chain_index,struct netlink_ext_ack * extack)61 static int prestera_flower_parse_actions(struct prestera_flow_block *block,
62 struct prestera_acl_rule *rule,
63 struct flow_action *flow_action,
64 u32 chain_index,
65 struct netlink_ext_ack *extack)
66 {
67 const struct flow_action_entry *act;
68 int err, i;
69
70 /* whole struct (rule->re_arg) must be initialized with 0 */
71 if (!flow_action_has_entries(flow_action))
72 return 0;
73
74 if (!flow_action_mixed_hw_stats_check(flow_action, extack))
75 return -EOPNOTSUPP;
76
77 act = flow_action_first_entry_get(flow_action);
78 if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
79 /* Nothing to do */
80 } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
81 /* setup counter first */
82 rule->re_arg.count.valid = true;
83 err = prestera_acl_chain_to_client(chain_index, block->ingress,
84 &rule->re_arg.count.client);
85 if (err)
86 return err;
87 } else {
88 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
89 return -EOPNOTSUPP;
90 }
91
92 flow_action_for_each(i, act, flow_action) {
93 switch (act->id) {
94 case FLOW_ACTION_ACCEPT:
95 if (rule->re_arg.accept.valid)
96 return -EEXIST;
97
98 rule->re_arg.accept.valid = 1;
99 break;
100 case FLOW_ACTION_DROP:
101 if (rule->re_arg.drop.valid)
102 return -EEXIST;
103
104 rule->re_arg.drop.valid = 1;
105 break;
106 case FLOW_ACTION_TRAP:
107 if (rule->re_arg.trap.valid)
108 return -EEXIST;
109
110 rule->re_arg.trap.valid = 1;
111 break;
112 case FLOW_ACTION_POLICE:
113 if (rule->re_arg.police.valid)
114 return -EEXIST;
115
116 rule->re_arg.police.valid = 1;
117 rule->re_arg.police.rate =
118 act->police.rate_bytes_ps;
119 rule->re_arg.police.burst = act->police.burst;
120 rule->re_arg.police.ingress = block->ingress;
121 break;
122 case FLOW_ACTION_GOTO:
123 err = prestera_flower_parse_goto_action(block, rule,
124 chain_index,
125 act);
126 if (err)
127 return err;
128 break;
129 default:
130 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
131 pr_err("Unsupported action\n");
132 return -EOPNOTSUPP;
133 }
134 }
135
136 return 0;
137 }
138
prestera_flower_parse_meta(struct prestera_acl_rule * rule,struct flow_cls_offload * f,struct prestera_flow_block * block)139 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
140 struct flow_cls_offload *f,
141 struct prestera_flow_block *block)
142 {
143 struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
144 struct prestera_acl_match *r_match = &rule->re_key.match;
145 struct prestera_port *port;
146 struct net_device *ingress_dev;
147 struct flow_match_meta match;
148 __be16 key, mask;
149
150 flow_rule_match_meta(f_rule, &match);
151 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
152 NL_SET_ERR_MSG_MOD(f->common.extack,
153 "Unsupported ingress ifindex mask");
154 return -EINVAL;
155 }
156
157 ingress_dev = __dev_get_by_index(block->net,
158 match.key->ingress_ifindex);
159 if (!ingress_dev) {
160 NL_SET_ERR_MSG_MOD(f->common.extack,
161 "Can't find specified ingress port to match on");
162 return -EINVAL;
163 }
164
165 if (!prestera_netdev_check(ingress_dev)) {
166 NL_SET_ERR_MSG_MOD(f->common.extack,
167 "Can't match on switchdev ingress port");
168 return -EINVAL;
169 }
170 port = netdev_priv(ingress_dev);
171
172 mask = htons(0x1FFF << 3);
173 key = htons(port->hw_id << 3);
174 rule_match_set(r_match->key, SYS_PORT, key);
175 rule_match_set(r_match->mask, SYS_PORT, mask);
176
177 mask = htons(0x3FF);
178 key = htons(port->dev_id);
179 rule_match_set(r_match->key, SYS_DEV, key);
180 rule_match_set(r_match->mask, SYS_DEV, mask);
181
182 return 0;
183 }
184
prestera_flower_parse(struct prestera_flow_block * block,struct prestera_acl_rule * rule,struct flow_cls_offload * f)185 static int prestera_flower_parse(struct prestera_flow_block *block,
186 struct prestera_acl_rule *rule,
187 struct flow_cls_offload *f)
188 {
189 struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
190 struct flow_dissector *dissector = f_rule->match.dissector;
191 struct prestera_acl_match *r_match = &rule->re_key.match;
192 __be16 n_proto_mask = 0;
193 __be16 n_proto_key = 0;
194 u16 addr_type = 0;
195 u8 ip_proto = 0;
196 int err;
197
198 if (dissector->used_keys &
199 ~(BIT(FLOW_DISSECTOR_KEY_META) |
200 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
201 BIT(FLOW_DISSECTOR_KEY_BASIC) |
202 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
203 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
204 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
205 BIT(FLOW_DISSECTOR_KEY_ICMP) |
206 BIT(FLOW_DISSECTOR_KEY_PORTS) |
207 BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
208 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
209 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
210 return -EOPNOTSUPP;
211 }
212
213 prestera_acl_rule_priority_set(rule, f->common.prio);
214
215 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
216 err = prestera_flower_parse_meta(rule, f, block);
217 if (err)
218 return err;
219 }
220
221 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
222 struct flow_match_control match;
223
224 flow_rule_match_control(f_rule, &match);
225 addr_type = match.key->addr_type;
226 }
227
228 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
229 struct flow_match_basic match;
230
231 flow_rule_match_basic(f_rule, &match);
232 n_proto_key = match.key->n_proto;
233 n_proto_mask = match.mask->n_proto;
234
235 if (ntohs(match.key->n_proto) == ETH_P_ALL) {
236 n_proto_key = 0;
237 n_proto_mask = 0;
238 }
239
240 rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
241 rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
242
243 rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
244 rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
245 ip_proto = match.key->ip_proto;
246 }
247
248 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
249 struct flow_match_eth_addrs match;
250
251 flow_rule_match_eth_addrs(f_rule, &match);
252
253 /* DA key, mask */
254 rule_match_set_n(r_match->key,
255 ETH_DMAC_0, &match.key->dst[0], 4);
256 rule_match_set_n(r_match->key,
257 ETH_DMAC_1, &match.key->dst[4], 2);
258
259 rule_match_set_n(r_match->mask,
260 ETH_DMAC_0, &match.mask->dst[0], 4);
261 rule_match_set_n(r_match->mask,
262 ETH_DMAC_1, &match.mask->dst[4], 2);
263
264 /* SA key, mask */
265 rule_match_set_n(r_match->key,
266 ETH_SMAC_0, &match.key->src[0], 4);
267 rule_match_set_n(r_match->key,
268 ETH_SMAC_1, &match.key->src[4], 2);
269
270 rule_match_set_n(r_match->mask,
271 ETH_SMAC_0, &match.mask->src[0], 4);
272 rule_match_set_n(r_match->mask,
273 ETH_SMAC_1, &match.mask->src[4], 2);
274 }
275
276 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
277 struct flow_match_ipv4_addrs match;
278
279 flow_rule_match_ipv4_addrs(f_rule, &match);
280
281 rule_match_set(r_match->key, IP_SRC, match.key->src);
282 rule_match_set(r_match->mask, IP_SRC, match.mask->src);
283
284 rule_match_set(r_match->key, IP_DST, match.key->dst);
285 rule_match_set(r_match->mask, IP_DST, match.mask->dst);
286 }
287
288 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
289 struct flow_match_ports match;
290
291 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
292 NL_SET_ERR_MSG_MOD
293 (f->common.extack,
294 "Only UDP and TCP keys are supported");
295 return -EINVAL;
296 }
297
298 flow_rule_match_ports(f_rule, &match);
299
300 rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
301 rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
302
303 rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
304 rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
305 }
306
307 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
308 struct flow_match_ports_range match;
309 __be32 tp_key, tp_mask;
310
311 flow_rule_match_ports_range(f_rule, &match);
312
313 /* src port range (min, max) */
314 tp_key = htonl(ntohs(match.key->tp_min.src) |
315 (ntohs(match.key->tp_max.src) << 16));
316 tp_mask = htonl(ntohs(match.mask->tp_min.src) |
317 (ntohs(match.mask->tp_max.src) << 16));
318 rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key);
319 rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask);
320
321 /* dst port range (min, max) */
322 tp_key = htonl(ntohs(match.key->tp_min.dst) |
323 (ntohs(match.key->tp_max.dst) << 16));
324 tp_mask = htonl(ntohs(match.mask->tp_min.dst) |
325 (ntohs(match.mask->tp_max.dst) << 16));
326 rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key);
327 rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask);
328 }
329
330 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
331 struct flow_match_vlan match;
332
333 flow_rule_match_vlan(f_rule, &match);
334
335 if (match.mask->vlan_id != 0) {
336 __be16 key = cpu_to_be16(match.key->vlan_id);
337 __be16 mask = cpu_to_be16(match.mask->vlan_id);
338
339 rule_match_set(r_match->key, VLAN_ID, key);
340 rule_match_set(r_match->mask, VLAN_ID, mask);
341 }
342
343 rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
344 rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
345 }
346
347 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
348 struct flow_match_icmp match;
349
350 flow_rule_match_icmp(f_rule, &match);
351
352 rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
353 rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
354
355 rule_match_set(r_match->key, ICMP_CODE, match.key->code);
356 rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
357 }
358
359 return prestera_flower_parse_actions(block, rule, &f->rule->action,
360 f->common.chain_index,
361 f->common.extack);
362 }
363
prestera_flower_prio_check(struct prestera_flow_block * block,struct flow_cls_offload * f)364 static int prestera_flower_prio_check(struct prestera_flow_block *block,
365 struct flow_cls_offload *f)
366 {
367 u32 mall_prio_min;
368 u32 mall_prio_max;
369 int err;
370
371 err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max);
372 if (err == -ENOENT)
373 /* No matchall filters installed on this chain. */
374 return 0;
375
376 if (err) {
377 NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
378 return err;
379 }
380
381 if (f->common.prio <= mall_prio_max && block->ingress) {
382 NL_SET_ERR_MSG(f->common.extack,
383 "Failed to add in front of existing matchall rules");
384 return -EOPNOTSUPP;
385 }
386 if (f->common.prio >= mall_prio_min && !block->ingress) {
387 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
388 return -EOPNOTSUPP;
389 }
390
391 return 0;
392 }
393
prestera_flower_prio_get(struct prestera_flow_block * block,u32 chain_index,u32 * prio_min,u32 * prio_max)394 int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
395 u32 *prio_min, u32 *prio_max)
396 {
397 struct prestera_acl_ruleset *ruleset;
398
399 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index);
400 if (IS_ERR(ruleset))
401 return PTR_ERR(ruleset);
402
403 prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max);
404 return 0;
405 }
406
prestera_flower_replace(struct prestera_flow_block * block,struct flow_cls_offload * f)407 int prestera_flower_replace(struct prestera_flow_block *block,
408 struct flow_cls_offload *f)
409 {
410 struct prestera_acl_ruleset *ruleset;
411 struct prestera_acl *acl = block->sw->acl;
412 struct prestera_acl_rule *rule;
413 int err;
414
415 err = prestera_flower_prio_check(block, f);
416 if (err)
417 return err;
418
419 ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
420 if (IS_ERR(ruleset))
421 return PTR_ERR(ruleset);
422
423 /* increments the ruleset reference */
424 rule = prestera_acl_rule_create(ruleset, f->cookie,
425 f->common.chain_index);
426 if (IS_ERR(rule)) {
427 err = PTR_ERR(rule);
428 goto err_rule_create;
429 }
430
431 err = prestera_flower_parse(block, rule, f);
432 if (err)
433 goto err_rule_add;
434
435 if (!prestera_acl_ruleset_is_offload(ruleset)) {
436 err = prestera_acl_ruleset_offload(ruleset);
437 if (err)
438 goto err_ruleset_offload;
439 }
440
441 err = prestera_acl_rule_add(block->sw, rule);
442 if (err)
443 goto err_rule_add;
444
445 prestera_acl_ruleset_put(ruleset);
446 return 0;
447
448 err_ruleset_offload:
449 err_rule_add:
450 prestera_acl_rule_destroy(rule);
451 err_rule_create:
452 prestera_acl_ruleset_put(ruleset);
453 return err;
454 }
455
prestera_flower_destroy(struct prestera_flow_block * block,struct flow_cls_offload * f)456 void prestera_flower_destroy(struct prestera_flow_block *block,
457 struct flow_cls_offload *f)
458 {
459 struct prestera_acl_ruleset *ruleset;
460 struct prestera_acl_rule *rule;
461
462 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
463 f->common.chain_index);
464 if (IS_ERR(ruleset))
465 return;
466
467 rule = prestera_acl_rule_lookup(ruleset, f->cookie);
468 if (rule) {
469 prestera_acl_rule_del(block->sw, rule);
470 prestera_acl_rule_destroy(rule);
471 }
472 prestera_acl_ruleset_put(ruleset);
473 }
474
prestera_flower_tmplt_create(struct prestera_flow_block * block,struct flow_cls_offload * f)475 int prestera_flower_tmplt_create(struct prestera_flow_block *block,
476 struct flow_cls_offload *f)
477 {
478 struct prestera_flower_template *template;
479 struct prestera_acl_ruleset *ruleset;
480 struct prestera_acl_rule rule;
481 int err;
482
483 memset(&rule, 0, sizeof(rule));
484 err = prestera_flower_parse(block, &rule, f);
485 if (err)
486 return err;
487
488 template = kmalloc(sizeof(*template), GFP_KERNEL);
489 if (!template) {
490 err = -ENOMEM;
491 goto err_malloc;
492 }
493
494 prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
495 ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
496 f->common.chain_index);
497 if (IS_ERR_OR_NULL(ruleset)) {
498 err = -EINVAL;
499 goto err_ruleset_get;
500 }
501
502 /* preserve keymask/template to this ruleset */
503 err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
504 if (err)
505 goto err_ruleset_keymask_set;
506
507 /* skip error, as it is not possible to reject template operation,
508 * so, keep the reference to the ruleset for rules to be added
509 * to that ruleset later. In case of offload fail, the ruleset
510 * will be offloaded again during adding a new rule. Also,
511 * unlikly possble that ruleset is already offloaded at this staage.
512 */
513 prestera_acl_ruleset_offload(ruleset);
514
515 /* keep the reference to the ruleset */
516 template->ruleset = ruleset;
517 template->chain_index = f->common.chain_index;
518 list_add_rcu(&template->list, &block->template_list);
519 return 0;
520
521 err_ruleset_keymask_set:
522 prestera_acl_ruleset_put(ruleset);
523 err_ruleset_get:
524 kfree(template);
525 err_malloc:
526 NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
527 return err;
528 }
529
prestera_flower_tmplt_destroy(struct prestera_flow_block * block,struct flow_cls_offload * f)530 void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
531 struct flow_cls_offload *f)
532 {
533 struct prestera_flower_template *template, *tmp;
534
535 list_for_each_entry_safe(template, tmp, &block->template_list, list)
536 if (template->chain_index == f->common.chain_index) {
537 /* put the reference to the ruleset kept in create */
538 prestera_flower_template_free(template);
539 return;
540 }
541 }
542
prestera_flower_stats(struct prestera_flow_block * block,struct flow_cls_offload * f)543 int prestera_flower_stats(struct prestera_flow_block *block,
544 struct flow_cls_offload *f)
545 {
546 struct prestera_acl_ruleset *ruleset;
547 struct prestera_acl_rule *rule;
548 u64 packets;
549 u64 lastuse;
550 u64 bytes;
551 int err;
552
553 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
554 f->common.chain_index);
555 if (IS_ERR(ruleset))
556 return PTR_ERR(ruleset);
557
558 rule = prestera_acl_rule_lookup(ruleset, f->cookie);
559 if (!rule) {
560 err = -EINVAL;
561 goto err_rule_get_stats;
562 }
563
564 err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
565 &bytes, &lastuse);
566 if (err)
567 goto err_rule_get_stats;
568
569 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
570 FLOW_ACTION_HW_STATS_DELAYED);
571
572 err_rule_get_stats:
573 prestera_acl_ruleset_put(ruleset);
574 return err;
575 }
576