1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3 *
4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5 */
6
7 #include <net/tc_act/tc_gate.h>
8 #include <net/tcp.h>
9
10 #include "sparx5_tc.h"
11 #include "vcap_api.h"
12 #include "vcap_api_client.h"
13 #include "vcap_tc.h"
14 #include "sparx5_main.h"
15 #include "sparx5_vcap_impl.h"
16
17 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
18
19 /* Collect keysets and type ids for multiple rules per size */
20 struct sparx5_wildcard_rule {
21 bool selected;
22 u8 value;
23 u8 mask;
24 enum vcap_keyfield_set keyset;
25 };
26
27 struct sparx5_multiple_rules {
28 struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
29 };
30
31 static int
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage * st)32 sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
33 {
34 int err = 0;
35
36 switch (st->tpid) {
37 case ETH_P_8021Q:
38 err = vcap_rule_add_key_u32(st->vrule,
39 VCAP_KF_8021Q_TPID,
40 SPX5_TPID_SEL_8100, ~0);
41 break;
42 case ETH_P_8021AD:
43 err = vcap_rule_add_key_u32(st->vrule,
44 VCAP_KF_8021Q_TPID,
45 SPX5_TPID_SEL_88A8, ~0);
46 break;
47 default:
48 NL_SET_ERR_MSG_MOD(st->fco->common.extack,
49 "Invalid vlan proto");
50 err = -EINVAL;
51 break;
52 }
53 return err;
54 }
55
56 static int
sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage * st)57 sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
58 {
59 struct flow_match_basic mt;
60 int err = 0;
61
62 flow_rule_match_basic(st->frule, &mt);
63
64 if (mt.mask->n_proto) {
65 st->l3_proto = be16_to_cpu(mt.key->n_proto);
66 if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) {
67 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
68 st->l3_proto, ~0);
69 if (err)
70 goto out;
71 } else if (st->l3_proto == ETH_P_IP) {
72 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
73 VCAP_BIT_1);
74 if (err)
75 goto out;
76 } else if (st->l3_proto == ETH_P_IPV6) {
77 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
78 VCAP_BIT_0);
79 if (err)
80 goto out;
81 if (st->admin->vtype == VCAP_TYPE_IS0) {
82 err = vcap_rule_add_key_bit(st->vrule,
83 VCAP_KF_IP_SNAP_IS,
84 VCAP_BIT_1);
85 if (err)
86 goto out;
87 }
88 }
89 }
90
91 if (mt.mask->ip_proto) {
92 st->l4_proto = mt.key->ip_proto;
93 if (st->l4_proto == IPPROTO_TCP) {
94 err = vcap_rule_add_key_bit(st->vrule,
95 VCAP_KF_TCP_IS,
96 VCAP_BIT_1);
97 if (err)
98 goto out;
99 } else if (st->l4_proto == IPPROTO_UDP) {
100 err = vcap_rule_add_key_bit(st->vrule,
101 VCAP_KF_TCP_IS,
102 VCAP_BIT_0);
103 if (err)
104 goto out;
105 if (st->admin->vtype == VCAP_TYPE_IS0) {
106 err = vcap_rule_add_key_bit(st->vrule,
107 VCAP_KF_TCP_UDP_IS,
108 VCAP_BIT_1);
109 if (err)
110 goto out;
111 }
112 } else {
113 err = vcap_rule_add_key_u32(st->vrule,
114 VCAP_KF_L3_IP_PROTO,
115 st->l4_proto, ~0);
116 if (err)
117 goto out;
118 }
119 }
120
121 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
122
123 return err;
124
125 out:
126 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
127 return err;
128 }
129
130 static int
sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage * st)131 sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
132 {
133 struct flow_match_control mt;
134 u32 value, mask;
135 int err = 0;
136
137 flow_rule_match_control(st->frule, &mt);
138
139 if (mt.mask->flags) {
140 if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
141 if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
142 value = 1; /* initial fragment */
143 mask = 0x3;
144 } else {
145 if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
146 value = 3; /* follow up fragment */
147 mask = 0x3;
148 } else {
149 value = 0; /* no fragment */
150 mask = 0x3;
151 }
152 }
153 } else {
154 if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
155 value = 3; /* follow up fragment */
156 mask = 0x3;
157 } else {
158 value = 0; /* no fragment */
159 mask = 0x3;
160 }
161 }
162
163 err = vcap_rule_add_key_u32(st->vrule,
164 VCAP_KF_L3_FRAGMENT_TYPE,
165 value, mask);
166 if (err)
167 goto out;
168 }
169
170 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
171
172 return err;
173
174 out:
175 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
176 return err;
177 }
178
179 static int
sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage * st)180 sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
181 {
182 if (st->admin->vtype != VCAP_TYPE_IS0) {
183 NL_SET_ERR_MSG_MOD(st->fco->common.extack,
184 "cvlan not supported in this VCAP");
185 return -EINVAL;
186 }
187
188 return vcap_tc_flower_handler_cvlan_usage(st);
189 }
190
191 static int
sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage * st)192 sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
193 {
194 enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
195 enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
196 int err;
197
198 if (st->admin->vtype == VCAP_TYPE_IS0) {
199 vid_key = VCAP_KF_8021Q_VID0;
200 pcp_key = VCAP_KF_8021Q_PCP0;
201 }
202
203 err = vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
204 if (err)
205 return err;
206
207 if (st->admin->vtype == VCAP_TYPE_ES0 && st->tpid)
208 err = sparx5_tc_flower_es0_tpid(st);
209
210 return err;
211 }
212
213 static int (*sparx5_tc_flower_usage_handlers[])(struct vcap_tc_flower_parse_usage *st) = {
214 [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
215 [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
216 [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
217 [FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
218 [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
219 [FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
220 [FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
221 [FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
222 [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
223 [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
224 [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
225 };
226
sparx5_tc_use_dissectors(struct vcap_tc_flower_parse_usage * st,struct vcap_admin * admin,struct vcap_rule * vrule)227 static int sparx5_tc_use_dissectors(struct vcap_tc_flower_parse_usage *st,
228 struct vcap_admin *admin,
229 struct vcap_rule *vrule)
230 {
231 int idx, err = 0;
232
233 for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
234 if (!flow_rule_match_key(st->frule, idx))
235 continue;
236 if (!sparx5_tc_flower_usage_handlers[idx])
237 continue;
238 err = sparx5_tc_flower_usage_handlers[idx](st);
239 if (err)
240 return err;
241 }
242
243 if (st->frule->match.dissector->used_keys ^ st->used_keys) {
244 NL_SET_ERR_MSG_MOD(st->fco->common.extack,
245 "Unsupported match item");
246 return -ENOENT;
247 }
248
249 return err;
250 }
251
sparx5_tc_flower_action_check(struct vcap_control * vctrl,struct net_device * ndev,struct flow_cls_offload * fco,bool ingress)252 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
253 struct net_device *ndev,
254 struct flow_cls_offload *fco,
255 bool ingress)
256 {
257 struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
258 struct flow_action_entry *actent, *last_actent = NULL;
259 struct flow_action *act = &rule->action;
260 u64 action_mask = 0;
261 int idx;
262
263 if (!flow_action_has_entries(act)) {
264 NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
265 return -EINVAL;
266 }
267
268 if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
269 return -EOPNOTSUPP;
270
271 flow_action_for_each(idx, actent, act) {
272 if (action_mask & BIT(actent->id)) {
273 NL_SET_ERR_MSG_MOD(fco->common.extack,
274 "More actions of the same type");
275 return -EINVAL;
276 }
277 action_mask |= BIT(actent->id);
278 last_actent = actent; /* Save last action for later check */
279 }
280
281 /* Check if last action is a goto
282 * The last chain/lookup does not need to have a goto action
283 */
284 if (last_actent->id == FLOW_ACTION_GOTO) {
285 /* Check if the destination chain is in one of the VCAPs */
286 if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
287 last_actent->chain_index)) {
288 NL_SET_ERR_MSG_MOD(fco->common.extack,
289 "Invalid goto chain");
290 return -EINVAL;
291 }
292 } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
293 ingress)) {
294 NL_SET_ERR_MSG_MOD(fco->common.extack,
295 "Last action must be 'goto'");
296 return -EINVAL;
297 }
298
299 /* Catch unsupported combinations of actions */
300 if (action_mask & BIT(FLOW_ACTION_TRAP) &&
301 action_mask & BIT(FLOW_ACTION_ACCEPT)) {
302 NL_SET_ERR_MSG_MOD(fco->common.extack,
303 "Cannot combine pass and trap action");
304 return -EOPNOTSUPP;
305 }
306
307 if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
308 action_mask & BIT(FLOW_ACTION_VLAN_POP)) {
309 NL_SET_ERR_MSG_MOD(fco->common.extack,
310 "Cannot combine vlan push and pop action");
311 return -EOPNOTSUPP;
312 }
313
314 if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
315 action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
316 NL_SET_ERR_MSG_MOD(fco->common.extack,
317 "Cannot combine vlan push and modify action");
318 return -EOPNOTSUPP;
319 }
320
321 if (action_mask & BIT(FLOW_ACTION_VLAN_POP) &&
322 action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
323 NL_SET_ERR_MSG_MOD(fco->common.extack,
324 "Cannot combine vlan pop and modify action");
325 return -EOPNOTSUPP;
326 }
327
328 return 0;
329 }
330
331 /* Add a rule counter action */
sparx5_tc_add_rule_counter(struct vcap_admin * admin,struct vcap_rule * vrule)332 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
333 struct vcap_rule *vrule)
334 {
335 int err;
336
337 switch (admin->vtype) {
338 case VCAP_TYPE_IS0:
339 break;
340 case VCAP_TYPE_ES0:
341 err = vcap_rule_mod_action_u32(vrule, VCAP_AF_ESDX,
342 vrule->id);
343 if (err)
344 return err;
345 vcap_rule_set_counter_id(vrule, vrule->id);
346 break;
347 case VCAP_TYPE_IS2:
348 case VCAP_TYPE_ES2:
349 err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
350 vrule->id);
351 if (err)
352 return err;
353 vcap_rule_set_counter_id(vrule, vrule->id);
354 break;
355 default:
356 pr_err("%s:%d: vcap type: %d not supported\n",
357 __func__, __LINE__, admin->vtype);
358 break;
359 }
360 return 0;
361 }
362
363 /* Collect all port keysets and apply the first of them, possibly wildcarded */
sparx5_tc_select_protocol_keyset(struct net_device * ndev,struct vcap_rule * vrule,struct vcap_admin * admin,u16 l3_proto,struct sparx5_multiple_rules * multi)364 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
365 struct vcap_rule *vrule,
366 struct vcap_admin *admin,
367 u16 l3_proto,
368 struct sparx5_multiple_rules *multi)
369 {
370 struct sparx5_port *port = netdev_priv(ndev);
371 struct vcap_keyset_list portkeysetlist = {};
372 enum vcap_keyfield_set portkeysets[10] = {};
373 struct vcap_keyset_list matches = {};
374 enum vcap_keyfield_set keysets[10];
375 int idx, jdx, err = 0, count = 0;
376 struct sparx5_wildcard_rule *mru;
377 const struct vcap_set *kinfo;
378 struct vcap_control *vctrl;
379
380 vctrl = port->sparx5->vcap_ctrl;
381
382 /* Find the keysets that the rule can use */
383 matches.keysets = keysets;
384 matches.max = ARRAY_SIZE(keysets);
385 if (vcap_rule_find_keysets(vrule, &matches) == 0)
386 return -EINVAL;
387
388 /* Find the keysets that the port configuration supports */
389 portkeysetlist.max = ARRAY_SIZE(portkeysets);
390 portkeysetlist.keysets = portkeysets;
391 err = sparx5_vcap_get_port_keyset(ndev,
392 admin, vrule->vcap_chain_id,
393 l3_proto,
394 &portkeysetlist);
395 if (err)
396 return err;
397
398 /* Find the intersection of the two sets of keyset */
399 for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
400 kinfo = vcap_keyfieldset(vctrl, admin->vtype,
401 portkeysetlist.keysets[idx]);
402 if (!kinfo)
403 continue;
404
405 /* Find a port keyset that matches the required keys
406 * If there are multiple keysets then compose a type id mask
407 */
408 for (jdx = 0; jdx < matches.cnt; ++jdx) {
409 if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
410 continue;
411
412 mru = &multi->rule[kinfo->sw_per_item];
413 if (!mru->selected) {
414 mru->selected = true;
415 mru->keyset = portkeysetlist.keysets[idx];
416 mru->value = kinfo->type_id;
417 }
418 mru->value &= kinfo->type_id;
419 mru->mask |= kinfo->type_id;
420 ++count;
421 }
422 }
423 if (count == 0)
424 return -EPROTO;
425
426 if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
427 return -ENOENT;
428
429 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
430 mru = &multi->rule[idx];
431 if (!mru->selected)
432 continue;
433
434 /* Align the mask to the combined value */
435 mru->mask ^= mru->value;
436 }
437
438 /* Set the chosen keyset on the rule and set a wildcarded type if there
439 * are more than one keyset
440 */
441 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
442 mru = &multi->rule[idx];
443 if (!mru->selected)
444 continue;
445
446 vcap_set_rule_set_keyset(vrule, mru->keyset);
447 if (count > 1)
448 /* Some keysets do not have a type field */
449 vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
450 mru->value,
451 ~mru->mask);
452 mru->selected = false; /* mark as done */
453 break; /* Stop here and add more rules later */
454 }
455 return err;
456 }
457
sparx5_tc_add_rule_copy(struct vcap_control * vctrl,struct flow_cls_offload * fco,struct vcap_rule * erule,struct vcap_admin * admin,struct sparx5_wildcard_rule * rule)458 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
459 struct flow_cls_offload *fco,
460 struct vcap_rule *erule,
461 struct vcap_admin *admin,
462 struct sparx5_wildcard_rule *rule)
463 {
464 enum vcap_key_field keylist[] = {
465 VCAP_KF_IF_IGR_PORT_MASK,
466 VCAP_KF_IF_IGR_PORT_MASK_SEL,
467 VCAP_KF_IF_IGR_PORT_MASK_RNG,
468 VCAP_KF_LOOKUP_FIRST_IS,
469 VCAP_KF_TYPE,
470 };
471 struct vcap_rule *vrule;
472 int err;
473
474 /* Add an extra rule with a special user and the new keyset */
475 erule->user = VCAP_USER_TC_EXTRA;
476 vrule = vcap_copy_rule(erule);
477 if (IS_ERR(vrule))
478 return PTR_ERR(vrule);
479
480 /* Link the new rule to the existing rule with the cookie */
481 vrule->cookie = erule->cookie;
482 vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
483 err = vcap_set_rule_set_keyset(vrule, rule->keyset);
484 if (err) {
485 pr_err("%s:%d: could not set keyset %s in rule: %u\n",
486 __func__, __LINE__,
487 vcap_keyset_name(vctrl, rule->keyset),
488 vrule->id);
489 goto out;
490 }
491
492 /* Some keysets do not have a type field, so ignore return value */
493 vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
494
495 err = vcap_set_rule_set_actionset(vrule, erule->actionset);
496 if (err)
497 goto out;
498
499 err = sparx5_tc_add_rule_counter(admin, vrule);
500 if (err)
501 goto out;
502
503 err = vcap_val_rule(vrule, ETH_P_ALL);
504 if (err) {
505 pr_err("%s:%d: could not validate rule: %u\n",
506 __func__, __LINE__, vrule->id);
507 vcap_set_tc_exterr(fco, vrule);
508 goto out;
509 }
510 err = vcap_add_rule(vrule);
511 if (err) {
512 pr_err("%s:%d: could not add rule: %u\n",
513 __func__, __LINE__, vrule->id);
514 goto out;
515 }
516 out:
517 vcap_free_rule(vrule);
518 return err;
519 }
520
sparx5_tc_add_remaining_rules(struct vcap_control * vctrl,struct flow_cls_offload * fco,struct vcap_rule * erule,struct vcap_admin * admin,struct sparx5_multiple_rules * multi)521 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
522 struct flow_cls_offload *fco,
523 struct vcap_rule *erule,
524 struct vcap_admin *admin,
525 struct sparx5_multiple_rules *multi)
526 {
527 int idx, err = 0;
528
529 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
530 if (!multi->rule[idx].selected)
531 continue;
532
533 err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
534 &multi->rule[idx]);
535 if (err)
536 break;
537 }
538 return err;
539 }
540
541 /* Add the actionset that is the default for the VCAP type */
sparx5_tc_set_actionset(struct vcap_admin * admin,struct vcap_rule * vrule)542 static int sparx5_tc_set_actionset(struct vcap_admin *admin,
543 struct vcap_rule *vrule)
544 {
545 enum vcap_actionfield_set aset;
546 int err = 0;
547
548 switch (admin->vtype) {
549 case VCAP_TYPE_IS0:
550 aset = VCAP_AFS_CLASSIFICATION;
551 break;
552 case VCAP_TYPE_IS2:
553 aset = VCAP_AFS_BASE_TYPE;
554 break;
555 case VCAP_TYPE_ES0:
556 aset = VCAP_AFS_ES0;
557 break;
558 case VCAP_TYPE_ES2:
559 aset = VCAP_AFS_BASE_TYPE;
560 break;
561 default:
562 pr_err("%s:%d: %s\n", __func__, __LINE__, "Invalid VCAP type");
563 return -EINVAL;
564 }
565 /* Do not overwrite any current actionset */
566 if (vrule->actionset == VCAP_AFS_NO_VALUE)
567 err = vcap_set_rule_set_actionset(vrule, aset);
568 return err;
569 }
570
571 /* Add the VCAP key to match on for a rule target value */
sparx5_tc_add_rule_link_target(struct vcap_admin * admin,struct vcap_rule * vrule,int target_cid)572 static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
573 struct vcap_rule *vrule,
574 int target_cid)
575 {
576 int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
577 int err;
578
579 if (!link_val)
580 return 0;
581
582 switch (admin->vtype) {
583 case VCAP_TYPE_IS0:
584 /* Add NXT_IDX key for chaining rules between IS0 instances */
585 err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
586 1, /* enable */
587 ~0);
588 if (err)
589 return err;
590 return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
591 link_val, /* target */
592 ~0);
593 case VCAP_TYPE_IS2:
594 /* Add PAG key for chaining rules from IS0 */
595 return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
596 link_val, /* target */
597 ~0);
598 case VCAP_TYPE_ES0:
599 case VCAP_TYPE_ES2:
600 /* Add ISDX key for chaining rules from IS0 */
601 return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
602 ~0);
603 default:
604 break;
605 }
606 return 0;
607 }
608
609 /* Add the VCAP action that adds a target value to a rule */
sparx5_tc_add_rule_link(struct vcap_control * vctrl,struct vcap_admin * admin,struct vcap_rule * vrule,int from_cid,int to_cid)610 static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
611 struct vcap_admin *admin,
612 struct vcap_rule *vrule,
613 int from_cid, int to_cid)
614 {
615 struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
616 int diff, err = 0;
617
618 if (!to_admin) {
619 pr_err("%s:%d: unsupported chain direction: %d\n",
620 __func__, __LINE__, to_cid);
621 return -EINVAL;
622 }
623
624 diff = vcap_chain_offset(vctrl, from_cid, to_cid);
625 if (!diff)
626 return 0;
627
628 if (admin->vtype == VCAP_TYPE_IS0 &&
629 to_admin->vtype == VCAP_TYPE_IS0) {
630 /* Between IS0 instances the G_IDX value is used */
631 err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
632 if (err)
633 goto out;
634 err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
635 1); /* Replace */
636 if (err)
637 goto out;
638 } else if (admin->vtype == VCAP_TYPE_IS0 &&
639 to_admin->vtype == VCAP_TYPE_IS2) {
640 /* Between IS0 and IS2 the PAG value is used */
641 err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
642 if (err)
643 goto out;
644 err = vcap_rule_add_action_u32(vrule,
645 VCAP_AF_PAG_OVERRIDE_MASK,
646 0xff);
647 if (err)
648 goto out;
649 } else if (admin->vtype == VCAP_TYPE_IS0 &&
650 (to_admin->vtype == VCAP_TYPE_ES0 ||
651 to_admin->vtype == VCAP_TYPE_ES2)) {
652 /* Between IS0 and ES0/ES2 the ISDX value is used */
653 err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
654 diff);
655 if (err)
656 goto out;
657 err = vcap_rule_add_action_bit(vrule,
658 VCAP_AF_ISDX_ADD_REPLACE_SEL,
659 VCAP_BIT_1);
660 if (err)
661 goto out;
662 } else {
663 pr_err("%s:%d: unsupported chain destination: %d\n",
664 __func__, __LINE__, to_cid);
665 err = -EOPNOTSUPP;
666 }
667 out:
668 return err;
669 }
670
sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg * sg,struct flow_action_entry * act,struct netlink_ext_ack * extack)671 static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg *sg,
672 struct flow_action_entry *act,
673 struct netlink_ext_ack *extack)
674 {
675 int i;
676
677 if (act->gate.prio < -1 || act->gate.prio > SPX5_PSFP_SG_MAX_IPV) {
678 NL_SET_ERR_MSG_MOD(extack, "Invalid gate priority");
679 return -EINVAL;
680 }
681
682 if (act->gate.cycletime < SPX5_PSFP_SG_MIN_CYCLE_TIME_NS ||
683 act->gate.cycletime > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
684 NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletime");
685 return -EINVAL;
686 }
687
688 if (act->gate.cycletimeext > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
689 NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletimeext");
690 return -EINVAL;
691 }
692
693 if (act->gate.num_entries >= SPX5_PSFP_GCE_CNT) {
694 NL_SET_ERR_MSG_MOD(extack, "Invalid number of gate entries");
695 return -EINVAL;
696 }
697
698 sg->gate_state = true;
699 sg->ipv = act->gate.prio;
700 sg->num_entries = act->gate.num_entries;
701 sg->cycletime = act->gate.cycletime;
702 sg->cycletimeext = act->gate.cycletimeext;
703
704 for (i = 0; i < sg->num_entries; i++) {
705 sg->gce[i].gate_state = !!act->gate.entries[i].gate_state;
706 sg->gce[i].interval = act->gate.entries[i].interval;
707 sg->gce[i].ipv = act->gate.entries[i].ipv;
708 sg->gce[i].maxoctets = act->gate.entries[i].maxoctets;
709 }
710
711 return 0;
712 }
713
sparx5_tc_flower_parse_act_police(struct sparx5_policer * pol,struct flow_action_entry * act,struct netlink_ext_ack * extack)714 static int sparx5_tc_flower_parse_act_police(struct sparx5_policer *pol,
715 struct flow_action_entry *act,
716 struct netlink_ext_ack *extack)
717 {
718 pol->type = SPX5_POL_SERVICE;
719 pol->rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
720 pol->burst = act->police.burst;
721 pol->idx = act->hw_index;
722
723 /* rate is now in kbit */
724 if (pol->rate > DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX, 1000)) {
725 NL_SET_ERR_MSG_MOD(extack, "Maximum rate exceeded");
726 return -EINVAL;
727 }
728
729 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
730 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop");
731 return -EOPNOTSUPP;
732 }
733
734 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
735 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
736 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok");
737 return -EOPNOTSUPP;
738 }
739
740 return 0;
741 }
742
sparx5_tc_flower_psfp_setup(struct sparx5 * sparx5,struct vcap_rule * vrule,int sg_idx,int pol_idx,struct sparx5_psfp_sg * sg,struct sparx5_psfp_fm * fm,struct sparx5_psfp_sf * sf)743 static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
744 struct vcap_rule *vrule, int sg_idx,
745 int pol_idx, struct sparx5_psfp_sg *sg,
746 struct sparx5_psfp_fm *fm,
747 struct sparx5_psfp_sf *sf)
748 {
749 u32 psfp_sfid = 0, psfp_fmid = 0, psfp_sgid = 0;
750 int ret;
751
752 /* Must always have a stream gate - max sdu (filter option) is evaluated
753 * after frames have passed the gate, so in case of only a policer, we
754 * allocate a stream gate that is always open.
755 */
756 if (sg_idx < 0) {
757 sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
758 sg->ipv = 0; /* Disabled */
759 sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
760 sg->num_entries = 1;
761 sg->gate_state = 1; /* Open */
762 sg->gate_enabled = 1;
763 sg->gce[0].gate_state = 1;
764 sg->gce[0].interval = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
765 sg->gce[0].ipv = 0;
766 sg->gce[0].maxoctets = 0; /* Disabled */
767 }
768
769 ret = sparx5_psfp_sg_add(sparx5, sg_idx, sg, &psfp_sgid);
770 if (ret < 0)
771 return ret;
772
773 if (pol_idx >= 0) {
774 /* Add new flow-meter */
775 ret = sparx5_psfp_fm_add(sparx5, pol_idx, fm, &psfp_fmid);
776 if (ret < 0)
777 return ret;
778 }
779
780 /* Map stream filter to stream gate */
781 sf->sgid = psfp_sgid;
782
783 /* Add new stream-filter and map it to a steam gate */
784 ret = sparx5_psfp_sf_add(sparx5, sf, &psfp_sfid);
785 if (ret < 0)
786 return ret;
787
788 /* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
789 sparx5_isdx_conf_set(sparx5, psfp_sfid, psfp_sfid, psfp_fmid);
790
791 ret = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_ADD_REPLACE_SEL,
792 VCAP_BIT_1);
793 if (ret)
794 return ret;
795
796 ret = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL, psfp_sfid);
797 if (ret)
798 return ret;
799
800 return 0;
801 }
802
803 /* Handle the action trap for a VCAP rule */
sparx5_tc_action_trap(struct vcap_admin * admin,struct vcap_rule * vrule,struct flow_cls_offload * fco)804 static int sparx5_tc_action_trap(struct vcap_admin *admin,
805 struct vcap_rule *vrule,
806 struct flow_cls_offload *fco)
807 {
808 int err = 0;
809
810 switch (admin->vtype) {
811 case VCAP_TYPE_IS2:
812 err = vcap_rule_add_action_bit(vrule,
813 VCAP_AF_CPU_COPY_ENA,
814 VCAP_BIT_1);
815 if (err)
816 break;
817 err = vcap_rule_add_action_u32(vrule,
818 VCAP_AF_CPU_QUEUE_NUM, 0);
819 if (err)
820 break;
821 err = vcap_rule_add_action_u32(vrule,
822 VCAP_AF_MASK_MODE,
823 SPX5_PMM_REPLACE_ALL);
824 break;
825 case VCAP_TYPE_ES0:
826 err = vcap_rule_add_action_u32(vrule,
827 VCAP_AF_FWD_SEL,
828 SPX5_FWSEL_REDIRECT_TO_LOOPBACK);
829 break;
830 case VCAP_TYPE_ES2:
831 err = vcap_rule_add_action_bit(vrule,
832 VCAP_AF_CPU_COPY_ENA,
833 VCAP_BIT_1);
834 if (err)
835 break;
836 err = vcap_rule_add_action_u32(vrule,
837 VCAP_AF_CPU_QUEUE_NUM, 0);
838 break;
839 default:
840 NL_SET_ERR_MSG_MOD(fco->common.extack,
841 "Trap action not supported in this VCAP");
842 err = -EOPNOTSUPP;
843 break;
844 }
845 return err;
846 }
847
sparx5_tc_action_vlan_pop(struct vcap_admin * admin,struct vcap_rule * vrule,struct flow_cls_offload * fco,u16 tpid)848 static int sparx5_tc_action_vlan_pop(struct vcap_admin *admin,
849 struct vcap_rule *vrule,
850 struct flow_cls_offload *fco,
851 u16 tpid)
852 {
853 int err = 0;
854
855 switch (admin->vtype) {
856 case VCAP_TYPE_ES0:
857 break;
858 default:
859 NL_SET_ERR_MSG_MOD(fco->common.extack,
860 "VLAN pop action not supported in this VCAP");
861 return -EOPNOTSUPP;
862 }
863
864 switch (tpid) {
865 case ETH_P_8021Q:
866 case ETH_P_8021AD:
867 err = vcap_rule_add_action_u32(vrule,
868 VCAP_AF_PUSH_OUTER_TAG,
869 SPX5_OTAG_UNTAG);
870 break;
871 default:
872 NL_SET_ERR_MSG_MOD(fco->common.extack,
873 "Invalid vlan proto");
874 err = -EINVAL;
875 }
876 return err;
877 }
878
sparx5_tc_action_vlan_modify(struct vcap_admin * admin,struct vcap_rule * vrule,struct flow_cls_offload * fco,struct flow_action_entry * act,u16 tpid)879 static int sparx5_tc_action_vlan_modify(struct vcap_admin *admin,
880 struct vcap_rule *vrule,
881 struct flow_cls_offload *fco,
882 struct flow_action_entry *act,
883 u16 tpid)
884 {
885 int err = 0;
886
887 switch (admin->vtype) {
888 case VCAP_TYPE_ES0:
889 err = vcap_rule_add_action_u32(vrule,
890 VCAP_AF_PUSH_OUTER_TAG,
891 SPX5_OTAG_TAG_A);
892 if (err)
893 return err;
894 break;
895 default:
896 NL_SET_ERR_MSG_MOD(fco->common.extack,
897 "VLAN modify action not supported in this VCAP");
898 return -EOPNOTSUPP;
899 }
900
901 switch (tpid) {
902 case ETH_P_8021Q:
903 err = vcap_rule_add_action_u32(vrule,
904 VCAP_AF_TAG_A_TPID_SEL,
905 SPX5_TPID_A_8100);
906 break;
907 case ETH_P_8021AD:
908 err = vcap_rule_add_action_u32(vrule,
909 VCAP_AF_TAG_A_TPID_SEL,
910 SPX5_TPID_A_88A8);
911 break;
912 default:
913 NL_SET_ERR_MSG_MOD(fco->common.extack,
914 "Invalid vlan proto");
915 err = -EINVAL;
916 }
917 if (err)
918 return err;
919
920 err = vcap_rule_add_action_u32(vrule,
921 VCAP_AF_TAG_A_VID_SEL,
922 SPX5_VID_A_VAL);
923 if (err)
924 return err;
925
926 err = vcap_rule_add_action_u32(vrule,
927 VCAP_AF_VID_A_VAL,
928 act->vlan.vid);
929 if (err)
930 return err;
931
932 err = vcap_rule_add_action_u32(vrule,
933 VCAP_AF_TAG_A_PCP_SEL,
934 SPX5_PCP_A_VAL);
935 if (err)
936 return err;
937
938 err = vcap_rule_add_action_u32(vrule,
939 VCAP_AF_PCP_A_VAL,
940 act->vlan.prio);
941 if (err)
942 return err;
943
944 return vcap_rule_add_action_u32(vrule,
945 VCAP_AF_TAG_A_DEI_SEL,
946 SPX5_DEI_A_CLASSIFIED);
947 }
948
sparx5_tc_action_vlan_push(struct vcap_admin * admin,struct vcap_rule * vrule,struct flow_cls_offload * fco,struct flow_action_entry * act,u16 tpid)949 static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
950 struct vcap_rule *vrule,
951 struct flow_cls_offload *fco,
952 struct flow_action_entry *act,
953 u16 tpid)
954 {
955 u16 act_tpid = be16_to_cpu(act->vlan.proto);
956 int err = 0;
957
958 switch (admin->vtype) {
959 case VCAP_TYPE_ES0:
960 break;
961 default:
962 NL_SET_ERR_MSG_MOD(fco->common.extack,
963 "VLAN push action not supported in this VCAP");
964 return -EOPNOTSUPP;
965 }
966
967 if (tpid == ETH_P_8021AD) {
968 NL_SET_ERR_MSG_MOD(fco->common.extack,
969 "Cannot push on double tagged frames");
970 return -EOPNOTSUPP;
971 }
972
973 err = sparx5_tc_action_vlan_modify(admin, vrule, fco, act, act_tpid);
974 if (err)
975 return err;
976
977 switch (act_tpid) {
978 case ETH_P_8021Q:
979 break;
980 case ETH_P_8021AD:
981 /* Push classified tag as inner tag */
982 err = vcap_rule_add_action_u32(vrule,
983 VCAP_AF_PUSH_INNER_TAG,
984 SPX5_ITAG_PUSH_B_TAG);
985 if (err)
986 break;
987 err = vcap_rule_add_action_u32(vrule,
988 VCAP_AF_TAG_B_TPID_SEL,
989 SPX5_TPID_B_CLASSIFIED);
990 break;
991 default:
992 NL_SET_ERR_MSG_MOD(fco->common.extack,
993 "Invalid vlan proto");
994 err = -EINVAL;
995 }
996 return err;
997 }
998
sparx5_tc_flower_replace(struct net_device * ndev,struct flow_cls_offload * fco,struct vcap_admin * admin,bool ingress)999 static int sparx5_tc_flower_replace(struct net_device *ndev,
1000 struct flow_cls_offload *fco,
1001 struct vcap_admin *admin,
1002 bool ingress)
1003 {
1004 struct sparx5_psfp_sf sf = { .max_sdu = SPX5_PSFP_SF_MAX_SDU };
1005 struct netlink_ext_ack *extack = fco->common.extack;
1006 int err, idx, tc_sg_idx = -1, tc_pol_idx = -1;
1007 struct vcap_tc_flower_parse_usage state = {
1008 .fco = fco,
1009 .l3_proto = ETH_P_ALL,
1010 .admin = admin,
1011 };
1012 struct sparx5_port *port = netdev_priv(ndev);
1013 struct sparx5_multiple_rules multi = {};
1014 struct sparx5 *sparx5 = port->sparx5;
1015 struct sparx5_psfp_sg sg = { 0 };
1016 struct sparx5_psfp_fm fm = { 0 };
1017 struct flow_action_entry *act;
1018 struct vcap_control *vctrl;
1019 struct flow_rule *frule;
1020 struct vcap_rule *vrule;
1021
1022 vctrl = port->sparx5->vcap_ctrl;
1023
1024 err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
1025 if (err)
1026 return err;
1027
1028 vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
1029 fco->common.prio, 0);
1030 if (IS_ERR(vrule))
1031 return PTR_ERR(vrule);
1032
1033 vrule->cookie = fco->cookie;
1034
1035 state.vrule = vrule;
1036 state.frule = flow_cls_offload_flow_rule(fco);
1037 err = sparx5_tc_use_dissectors(&state, admin, vrule);
1038 if (err)
1039 goto out;
1040
1041 err = sparx5_tc_add_rule_counter(admin, vrule);
1042 if (err)
1043 goto out;
1044
1045 err = sparx5_tc_add_rule_link_target(admin, vrule,
1046 fco->common.chain_index);
1047 if (err)
1048 goto out;
1049
1050 frule = flow_cls_offload_flow_rule(fco);
1051 flow_action_for_each(idx, act, &frule->action) {
1052 switch (act->id) {
1053 case FLOW_ACTION_GATE: {
1054 err = sparx5_tc_flower_parse_act_gate(&sg, act, extack);
1055 if (err < 0)
1056 goto out;
1057
1058 tc_sg_idx = act->hw_index;
1059
1060 break;
1061 }
1062 case FLOW_ACTION_POLICE: {
1063 err = sparx5_tc_flower_parse_act_police(&fm.pol, act,
1064 extack);
1065 if (err < 0)
1066 goto out;
1067
1068 tc_pol_idx = fm.pol.idx;
1069 sf.max_sdu = act->police.mtu;
1070
1071 break;
1072 }
1073 case FLOW_ACTION_TRAP:
1074 err = sparx5_tc_action_trap(admin, vrule, fco);
1075 if (err)
1076 goto out;
1077 break;
1078 case FLOW_ACTION_ACCEPT:
1079 err = sparx5_tc_set_actionset(admin, vrule);
1080 if (err)
1081 goto out;
1082 break;
1083 case FLOW_ACTION_GOTO:
1084 err = sparx5_tc_set_actionset(admin, vrule);
1085 if (err)
1086 goto out;
1087 sparx5_tc_add_rule_link(vctrl, admin, vrule,
1088 fco->common.chain_index,
1089 act->chain_index);
1090 break;
1091 case FLOW_ACTION_VLAN_POP:
1092 err = sparx5_tc_action_vlan_pop(admin, vrule, fco,
1093 state.tpid);
1094 if (err)
1095 goto out;
1096 break;
1097 case FLOW_ACTION_VLAN_PUSH:
1098 err = sparx5_tc_action_vlan_push(admin, vrule, fco,
1099 act, state.tpid);
1100 if (err)
1101 goto out;
1102 break;
1103 case FLOW_ACTION_VLAN_MANGLE:
1104 err = sparx5_tc_action_vlan_modify(admin, vrule, fco,
1105 act, state.tpid);
1106 if (err)
1107 goto out;
1108 break;
1109 default:
1110 NL_SET_ERR_MSG_MOD(fco->common.extack,
1111 "Unsupported TC action");
1112 err = -EOPNOTSUPP;
1113 goto out;
1114 }
1115 }
1116
1117 /* Setup PSFP */
1118 if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
1119 err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
1120 tc_pol_idx, &sg, &fm, &sf);
1121 if (err)
1122 goto out;
1123 }
1124
1125 err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin,
1126 state.l3_proto, &multi);
1127 if (err) {
1128 NL_SET_ERR_MSG_MOD(fco->common.extack,
1129 "No matching port keyset for filter protocol and keys");
1130 goto out;
1131 }
1132
1133 /* provide the l3 protocol to guide the keyset selection */
1134 err = vcap_val_rule(vrule, state.l3_proto);
1135 if (err) {
1136 vcap_set_tc_exterr(fco, vrule);
1137 goto out;
1138 }
1139 err = vcap_add_rule(vrule);
1140 if (err)
1141 NL_SET_ERR_MSG_MOD(fco->common.extack,
1142 "Could not add the filter");
1143
1144 if (state.l3_proto == ETH_P_ALL)
1145 err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
1146 &multi);
1147
1148 out:
1149 vcap_free_rule(vrule);
1150 return err;
1151 }
1152
sparx5_tc_free_psfp_resources(struct sparx5 * sparx5,struct vcap_rule * vrule)1153 static void sparx5_tc_free_psfp_resources(struct sparx5 *sparx5,
1154 struct vcap_rule *vrule)
1155 {
1156 struct vcap_client_actionfield *afield;
1157 u32 isdx, sfid, sgid, fmid;
1158
1159 /* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
1160 * it is used for stream and/or flow-meter classification.
1161 */
1162 afield = vcap_find_actionfield(vrule, VCAP_AF_ISDX_VAL);
1163 if (!afield)
1164 return;
1165
1166 isdx = afield->data.u32.value;
1167 sfid = sparx5_psfp_isdx_get_sf(sparx5, isdx);
1168
1169 if (!sfid)
1170 return;
1171
1172 fmid = sparx5_psfp_isdx_get_fm(sparx5, isdx);
1173 sgid = sparx5_psfp_sf_get_sg(sparx5, sfid);
1174
1175 if (fmid && sparx5_psfp_fm_del(sparx5, fmid) < 0)
1176 pr_err("%s:%d Could not delete invalid fmid: %d", __func__,
1177 __LINE__, fmid);
1178
1179 if (sgid && sparx5_psfp_sg_del(sparx5, sgid) < 0)
1180 pr_err("%s:%d Could not delete invalid sgid: %d", __func__,
1181 __LINE__, sgid);
1182
1183 if (sparx5_psfp_sf_del(sparx5, sfid) < 0)
1184 pr_err("%s:%d Could not delete invalid sfid: %d", __func__,
1185 __LINE__, sfid);
1186
1187 sparx5_isdx_conf_set(sparx5, isdx, 0, 0);
1188 }
1189
sparx5_tc_free_rule_resources(struct net_device * ndev,struct vcap_control * vctrl,int rule_id)1190 static int sparx5_tc_free_rule_resources(struct net_device *ndev,
1191 struct vcap_control *vctrl,
1192 int rule_id)
1193 {
1194 struct sparx5_port *port = netdev_priv(ndev);
1195 struct sparx5 *sparx5 = port->sparx5;
1196 struct vcap_rule *vrule;
1197 int ret = 0;
1198
1199 vrule = vcap_get_rule(vctrl, rule_id);
1200 if (!vrule || IS_ERR(vrule))
1201 return -EINVAL;
1202
1203 sparx5_tc_free_psfp_resources(sparx5, vrule);
1204
1205 vcap_free_rule(vrule);
1206 return ret;
1207 }
1208
sparx5_tc_flower_destroy(struct net_device * ndev,struct flow_cls_offload * fco,struct vcap_admin * admin)1209 static int sparx5_tc_flower_destroy(struct net_device *ndev,
1210 struct flow_cls_offload *fco,
1211 struct vcap_admin *admin)
1212 {
1213 struct sparx5_port *port = netdev_priv(ndev);
1214 int err = -ENOENT, count = 0, rule_id;
1215 struct vcap_control *vctrl;
1216
1217 vctrl = port->sparx5->vcap_ctrl;
1218 while (true) {
1219 rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
1220 if (rule_id <= 0)
1221 break;
1222 if (count == 0) {
1223 /* Resources are attached to the first rule of
1224 * a set of rules. Only works if the rules are
1225 * in the correct order.
1226 */
1227 err = sparx5_tc_free_rule_resources(ndev, vctrl,
1228 rule_id);
1229 if (err)
1230 pr_err("%s:%d: could not free resources %d\n",
1231 __func__, __LINE__, rule_id);
1232 }
1233 err = vcap_del_rule(vctrl, ndev, rule_id);
1234 if (err) {
1235 pr_err("%s:%d: could not delete rule %d\n",
1236 __func__, __LINE__, rule_id);
1237 break;
1238 }
1239 }
1240 return err;
1241 }
1242
sparx5_tc_flower_stats(struct net_device * ndev,struct flow_cls_offload * fco,struct vcap_admin * admin)1243 static int sparx5_tc_flower_stats(struct net_device *ndev,
1244 struct flow_cls_offload *fco,
1245 struct vcap_admin *admin)
1246 {
1247 struct sparx5_port *port = netdev_priv(ndev);
1248 struct vcap_counter ctr = {};
1249 struct vcap_control *vctrl;
1250 ulong lastused = 0;
1251 int err;
1252
1253 vctrl = port->sparx5->vcap_ctrl;
1254 err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
1255 if (err)
1256 return err;
1257 flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
1258 FLOW_ACTION_HW_STATS_IMMEDIATE);
1259 return err;
1260 }
1261
sparx5_tc_flower(struct net_device * ndev,struct flow_cls_offload * fco,bool ingress)1262 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
1263 bool ingress)
1264 {
1265 struct sparx5_port *port = netdev_priv(ndev);
1266 struct vcap_control *vctrl;
1267 struct vcap_admin *admin;
1268 int err = -EINVAL;
1269
1270 /* Get vcap instance from the chain id */
1271 vctrl = port->sparx5->vcap_ctrl;
1272 admin = vcap_find_admin(vctrl, fco->common.chain_index);
1273 if (!admin) {
1274 NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
1275 return err;
1276 }
1277
1278 switch (fco->command) {
1279 case FLOW_CLS_REPLACE:
1280 return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
1281 case FLOW_CLS_DESTROY:
1282 return sparx5_tc_flower_destroy(ndev, fco, admin);
1283 case FLOW_CLS_STATS:
1284 return sparx5_tc_flower_stats(ndev, fco, admin);
1285 default:
1286 return -EOPNOTSUPP;
1287 }
1288 }
1289