1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "ipsec.h"
8 #include "fs_core.h"
9 #include "lib/ipsec_fs_roce.h"
10
11 #define NUM_IPSEC_FTE BIT(15)
12
13 struct mlx5e_ipsec_fc {
14 struct mlx5_fc *cnt;
15 struct mlx5_fc *drop;
16 };
17
18 struct mlx5e_ipsec_ft {
19 struct mutex mutex; /* Protect changes to this struct */
20 struct mlx5_flow_table *pol;
21 struct mlx5_flow_table *sa;
22 struct mlx5_flow_table *status;
23 u32 refcnt;
24 };
25
26 struct mlx5e_ipsec_miss {
27 struct mlx5_flow_group *group;
28 struct mlx5_flow_handle *rule;
29 };
30
31 struct mlx5e_ipsec_rx {
32 struct mlx5e_ipsec_ft ft;
33 struct mlx5e_ipsec_miss pol;
34 struct mlx5e_ipsec_miss sa;
35 struct mlx5e_ipsec_rule status;
36 struct mlx5e_ipsec_fc *fc;
37 };
38
39 struct mlx5e_ipsec_tx {
40 struct mlx5e_ipsec_ft ft;
41 struct mlx5e_ipsec_miss pol;
42 struct mlx5_flow_namespace *ns;
43 struct mlx5e_ipsec_fc *fc;
44 };
45
46 /* IPsec RX flow steering */
family2tt(u32 family)47 static enum mlx5_traffic_types family2tt(u32 family)
48 {
49 if (family == AF_INET)
50 return MLX5_TT_IPV4_IPSEC_ESP;
51 return MLX5_TT_IPV6_IPSEC_ESP;
52 }
53
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups)54 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
55 int level, int prio,
56 int max_num_groups)
57 {
58 struct mlx5_flow_table_attr ft_attr = {};
59
60 ft_attr.autogroup.num_reserved_entries = 1;
61 ft_attr.autogroup.max_num_groups = max_num_groups;
62 ft_attr.max_fte = NUM_IPSEC_FTE;
63 ft_attr.level = level;
64 ft_attr.prio = prio;
65
66 return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
67 }
68
ipsec_status_rule(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)69 static int ipsec_status_rule(struct mlx5_core_dev *mdev,
70 struct mlx5e_ipsec_rx *rx,
71 struct mlx5_flow_destination *dest)
72 {
73 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
74 struct mlx5_flow_act flow_act = {};
75 struct mlx5_modify_hdr *modify_hdr;
76 struct mlx5_flow_handle *fte;
77 struct mlx5_flow_spec *spec;
78 int err;
79
80 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
81 if (!spec)
82 return -ENOMEM;
83
84 /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
85 MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
86 MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
87 MLX5_SET(copy_action_in, action, src_offset, 0);
88 MLX5_SET(copy_action_in, action, length, 7);
89 MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
90 MLX5_SET(copy_action_in, action, dst_offset, 24);
91
92 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
93 1, action);
94
95 if (IS_ERR(modify_hdr)) {
96 err = PTR_ERR(modify_hdr);
97 mlx5_core_err(mdev,
98 "fail to alloc ipsec copy modify_header_id err=%d\n", err);
99 goto out_spec;
100 }
101
102 /* create fte */
103 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
104 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
105 MLX5_FLOW_CONTEXT_ACTION_COUNT;
106 flow_act.modify_hdr = modify_hdr;
107 fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
108 if (IS_ERR(fte)) {
109 err = PTR_ERR(fte);
110 mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
111 goto out;
112 }
113
114 kvfree(spec);
115 rx->status.rule = fte;
116 rx->status.modify_hdr = modify_hdr;
117 return 0;
118
119 out:
120 mlx5_modify_header_dealloc(mdev, modify_hdr);
121 out_spec:
122 kvfree(spec);
123 return err;
124 }
125
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)126 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
127 struct mlx5_flow_table *ft,
128 struct mlx5e_ipsec_miss *miss,
129 struct mlx5_flow_destination *dest)
130 {
131 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
132 MLX5_DECLARE_FLOW_ACT(flow_act);
133 struct mlx5_flow_spec *spec;
134 u32 *flow_group_in;
135 int err = 0;
136
137 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
138 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
139 if (!flow_group_in || !spec) {
140 err = -ENOMEM;
141 goto out;
142 }
143
144 /* Create miss_group */
145 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
146 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
147 miss->group = mlx5_create_flow_group(ft, flow_group_in);
148 if (IS_ERR(miss->group)) {
149 err = PTR_ERR(miss->group);
150 mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
151 err);
152 goto out;
153 }
154
155 /* Create miss rule */
156 miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
157 if (IS_ERR(miss->rule)) {
158 mlx5_destroy_flow_group(miss->group);
159 err = PTR_ERR(miss->rule);
160 mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
161 err);
162 goto out;
163 }
164 out:
165 kvfree(flow_group_in);
166 kvfree(spec);
167 return err;
168 }
169
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)170 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
171 struct mlx5e_ipsec_rx *rx, u32 family)
172 {
173 mlx5_del_flow_rules(rx->pol.rule);
174 mlx5_destroy_flow_group(rx->pol.group);
175 mlx5_destroy_flow_table(rx->ft.pol);
176
177 mlx5_del_flow_rules(rx->sa.rule);
178 mlx5_destroy_flow_group(rx->sa.group);
179 mlx5_destroy_flow_table(rx->ft.sa);
180
181 mlx5_del_flow_rules(rx->status.rule);
182 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
183 mlx5_destroy_flow_table(rx->ft.status);
184
185 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
186 }
187
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)188 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
189 struct mlx5e_ipsec_rx *rx, u32 family)
190 {
191 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
192 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
193 struct mlx5_flow_destination default_dest;
194 struct mlx5_flow_destination dest[2];
195 struct mlx5_flow_table *ft;
196 int err;
197
198 default_dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
199 err = mlx5_ipsec_fs_roce_rx_create(mdev, ipsec->roce, ns, &default_dest,
200 family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
201 MLX5E_NIC_PRIO);
202 if (err)
203 return err;
204
205 ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
206 MLX5E_NIC_PRIO, 1);
207 if (IS_ERR(ft)) {
208 err = PTR_ERR(ft);
209 goto err_fs_ft_status;
210 }
211
212 rx->ft.status = ft;
213
214 ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
215 if (ft) {
216 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
217 dest[0].ft = ft;
218 } else {
219 dest[0] = default_dest;
220 }
221
222 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
223 dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
224 err = ipsec_status_rule(mdev, rx, dest);
225 if (err)
226 goto err_add;
227
228 /* Create FT */
229 ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
230 2);
231 if (IS_ERR(ft)) {
232 err = PTR_ERR(ft);
233 goto err_fs_ft;
234 }
235 rx->ft.sa = ft;
236
237 err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
238 if (err)
239 goto err_fs;
240
241 ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
242 2);
243 if (IS_ERR(ft)) {
244 err = PTR_ERR(ft);
245 goto err_pol_ft;
246 }
247 rx->ft.pol = ft;
248 memset(dest, 0x00, 2 * sizeof(*dest));
249 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
250 dest[0].ft = rx->ft.sa;
251 err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
252 if (err)
253 goto err_pol_miss;
254
255 return 0;
256
257 err_pol_miss:
258 mlx5_destroy_flow_table(rx->ft.pol);
259 err_pol_ft:
260 mlx5_del_flow_rules(rx->sa.rule);
261 mlx5_destroy_flow_group(rx->sa.group);
262 err_fs:
263 mlx5_destroy_flow_table(rx->ft.sa);
264 err_fs_ft:
265 mlx5_del_flow_rules(rx->status.rule);
266 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
267 err_add:
268 mlx5_destroy_flow_table(rx->ft.status);
269 err_fs_ft_status:
270 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
271 return err;
272 }
273
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family)274 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
275 struct mlx5e_ipsec *ipsec, u32 family)
276 {
277 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
278 struct mlx5_flow_destination dest = {};
279 struct mlx5e_ipsec_rx *rx;
280 int err = 0;
281
282 if (family == AF_INET)
283 rx = ipsec->rx_ipv4;
284 else
285 rx = ipsec->rx_ipv6;
286
287 mutex_lock(&rx->ft.mutex);
288 if (rx->ft.refcnt)
289 goto skip;
290
291 /* create FT */
292 err = rx_create(mdev, ipsec, rx, family);
293 if (err)
294 goto out;
295
296 /* connect */
297 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
298 dest.ft = rx->ft.pol;
299 mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
300
301 skip:
302 rx->ft.refcnt++;
303 out:
304 mutex_unlock(&rx->ft.mutex);
305 if (err)
306 return ERR_PTR(err);
307 return rx;
308 }
309
rx_ft_put(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family)310 static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
311 u32 family)
312 {
313 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
314 struct mlx5e_ipsec_rx *rx;
315
316 if (family == AF_INET)
317 rx = ipsec->rx_ipv4;
318 else
319 rx = ipsec->rx_ipv6;
320
321 mutex_lock(&rx->ft.mutex);
322 rx->ft.refcnt--;
323 if (rx->ft.refcnt)
324 goto out;
325
326 /* disconnect */
327 mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
328
329 /* remove FT */
330 rx_destroy(mdev, ipsec, rx, family);
331
332 out:
333 mutex_unlock(&rx->ft.mutex);
334 }
335
336 /* IPsec TX flow steering */
tx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)337 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
338 struct mlx5_ipsec_fs *roce)
339 {
340 struct mlx5_flow_destination dest = {};
341 struct mlx5_flow_table *ft;
342 int err;
343
344 ft = ipsec_ft_create(tx->ns, 1, 0, 4);
345 if (IS_ERR(ft))
346 return PTR_ERR(ft);
347
348 tx->ft.sa = ft;
349
350 ft = ipsec_ft_create(tx->ns, 0, 0, 2);
351 if (IS_ERR(ft)) {
352 err = PTR_ERR(ft);
353 goto err_pol_ft;
354 }
355 tx->ft.pol = ft;
356 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
357 dest.ft = tx->ft.sa;
358 err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
359 if (err)
360 goto err_pol_miss;
361
362 err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
363 if (err)
364 goto err_roce;
365 return 0;
366
367 err_roce:
368 mlx5_del_flow_rules(tx->pol.rule);
369 mlx5_destroy_flow_group(tx->pol.group);
370 err_pol_miss:
371 mlx5_destroy_flow_table(tx->ft.pol);
372 err_pol_ft:
373 mlx5_destroy_flow_table(tx->ft.sa);
374 return err;
375 }
376
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec)377 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
378 struct mlx5e_ipsec *ipsec)
379 {
380 struct mlx5e_ipsec_tx *tx = ipsec->tx;
381 int err = 0;
382
383 mutex_lock(&tx->ft.mutex);
384 if (tx->ft.refcnt)
385 goto skip;
386
387 err = tx_create(mdev, tx, ipsec->roce);
388 if (err)
389 goto out;
390
391 skip:
392 tx->ft.refcnt++;
393 out:
394 mutex_unlock(&tx->ft.mutex);
395 if (err)
396 return ERR_PTR(err);
397 return tx;
398 }
399
tx_ft_put(struct mlx5e_ipsec * ipsec)400 static void tx_ft_put(struct mlx5e_ipsec *ipsec)
401 {
402 struct mlx5e_ipsec_tx *tx = ipsec->tx;
403
404 mutex_lock(&tx->ft.mutex);
405 tx->ft.refcnt--;
406 if (tx->ft.refcnt)
407 goto out;
408
409 mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce);
410 mlx5_del_flow_rules(tx->pol.rule);
411 mlx5_destroy_flow_group(tx->pol.group);
412 mlx5_destroy_flow_table(tx->ft.pol);
413 mlx5_destroy_flow_table(tx->ft.sa);
414 out:
415 mutex_unlock(&tx->ft.mutex);
416 }
417
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)418 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
419 __be32 *daddr)
420 {
421 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
422
423 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
424 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
425
426 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
427 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
428 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
429 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
430 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
431 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
432 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
433 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
434 }
435
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)436 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
437 __be32 *daddr)
438 {
439 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
440
441 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
442 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
443
444 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
445 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
446 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
447 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
448 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
449 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
450 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
451 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
452 }
453
setup_fte_esp(struct mlx5_flow_spec * spec)454 static void setup_fte_esp(struct mlx5_flow_spec *spec)
455 {
456 /* ESP header */
457 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
458
459 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
460 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
461 }
462
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi)463 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
464 {
465 /* SPI number */
466 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
467
468 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
469 MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
470 }
471
setup_fte_no_frags(struct mlx5_flow_spec * spec)472 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
473 {
474 /* Non fragmented */
475 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
476
477 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
478 MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
479 }
480
setup_fte_reg_a(struct mlx5_flow_spec * spec)481 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
482 {
483 /* Add IPsec indicator in metadata_reg_a */
484 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
485
486 MLX5_SET(fte_match_param, spec->match_criteria,
487 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
488 MLX5_SET(fte_match_param, spec->match_value,
489 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
490 }
491
setup_fte_reg_c0(struct mlx5_flow_spec * spec,u32 reqid)492 static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
493 {
494 /* Pass policy check before choosing this SA */
495 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
496
497 MLX5_SET(fte_match_param, spec->match_criteria,
498 misc_parameters_2.metadata_reg_c_0, reqid);
499 MLX5_SET(fte_match_param, spec->match_value,
500 misc_parameters_2.metadata_reg_c_0, reqid);
501 }
502
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)503 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
504 {
505 if (upspec->proto != IPPROTO_UDP)
506 return;
507
508 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
509 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
510 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
511 if (upspec->dport) {
512 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
513 upspec->dport_mask);
514 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->dport);
515 }
516
517 if (upspec->sport) {
518 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
519 upspec->sport_mask);
520 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->sport);
521 }
522 }
523
setup_modify_header(struct mlx5_core_dev * mdev,u32 val,u8 dir,struct mlx5_flow_act * flow_act)524 static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
525 struct mlx5_flow_act *flow_act)
526 {
527 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
528 enum mlx5_flow_namespace_type ns_type;
529 struct mlx5_modify_hdr *modify_hdr;
530
531 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
532 switch (dir) {
533 case XFRM_DEV_OFFLOAD_IN:
534 MLX5_SET(set_action_in, action, field,
535 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
536 ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
537 break;
538 case XFRM_DEV_OFFLOAD_OUT:
539 MLX5_SET(set_action_in, action, field,
540 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
541 ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
542 break;
543 default:
544 return -EINVAL;
545 }
546
547 MLX5_SET(set_action_in, action, data, val);
548 MLX5_SET(set_action_in, action, offset, 0);
549 MLX5_SET(set_action_in, action, length, 32);
550
551 modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
552 if (IS_ERR(modify_hdr)) {
553 mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
554 PTR_ERR(modify_hdr));
555 return PTR_ERR(modify_hdr);
556 }
557
558 flow_act->modify_hdr = modify_hdr;
559 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
560 return 0;
561 }
562
setup_pkt_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)563 static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
564 struct mlx5_accel_esp_xfrm_attrs *attrs,
565 struct mlx5_flow_act *flow_act)
566 {
567 enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
568 struct mlx5_pkt_reformat_params reformat_params = {};
569 struct mlx5_pkt_reformat *pkt_reformat;
570 u8 reformatbf[16] = {};
571 __be32 spi;
572
573 if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
574 reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
575 ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
576 goto cmd;
577 }
578
579 if (attrs->family == AF_INET)
580 reformat_params.type =
581 MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
582 else
583 reformat_params.type =
584 MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
585
586 /* convert to network format */
587 spi = htonl(attrs->spi);
588 memcpy(reformatbf, &spi, 4);
589
590 reformat_params.param_0 = attrs->authsize;
591 reformat_params.size = sizeof(reformatbf);
592 reformat_params.data = &reformatbf;
593
594 cmd:
595 pkt_reformat =
596 mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
597 if (IS_ERR(pkt_reformat))
598 return PTR_ERR(pkt_reformat);
599
600 flow_act->pkt_reformat = pkt_reformat;
601 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
602 return 0;
603 }
604
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)605 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
606 {
607 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
608 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
609 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
610 struct mlx5_flow_destination dest = {};
611 struct mlx5_flow_act flow_act = {};
612 struct mlx5_flow_handle *rule;
613 struct mlx5_flow_spec *spec;
614 struct mlx5e_ipsec_rx *rx;
615 int err;
616
617 rx = rx_ft_get(mdev, ipsec, attrs->family);
618 if (IS_ERR(rx))
619 return PTR_ERR(rx);
620
621 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
622 if (!spec) {
623 err = -ENOMEM;
624 goto err_alloc;
625 }
626
627 if (attrs->family == AF_INET)
628 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
629 else
630 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
631
632 setup_fte_spi(spec, attrs->spi);
633 setup_fte_esp(spec);
634 setup_fte_no_frags(spec);
635
636 err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31),
637 XFRM_DEV_OFFLOAD_IN, &flow_act);
638 if (err)
639 goto err_mod_header;
640
641 switch (attrs->type) {
642 case XFRM_DEV_OFFLOAD_PACKET:
643 err = setup_pkt_reformat(mdev, attrs, &flow_act);
644 if (err)
645 goto err_pkt_reformat;
646 break;
647 default:
648 break;
649 }
650
651 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
652 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
653 flow_act.flags |= FLOW_ACT_NO_APPEND;
654 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
655 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT;
656 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
657 dest.ft = rx->ft.status;
658 rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1);
659 if (IS_ERR(rule)) {
660 err = PTR_ERR(rule);
661 mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
662 goto err_add_flow;
663 }
664 kvfree(spec);
665
666 sa_entry->ipsec_rule.rule = rule;
667 sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
668 sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
669 return 0;
670
671 err_add_flow:
672 if (flow_act.pkt_reformat)
673 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
674 err_pkt_reformat:
675 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
676 err_mod_header:
677 kvfree(spec);
678 err_alloc:
679 rx_ft_put(mdev, ipsec, attrs->family);
680 return err;
681 }
682
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)683 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
684 {
685 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
686 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
687 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
688 struct mlx5_flow_destination dest = {};
689 struct mlx5_flow_act flow_act = {};
690 struct mlx5_flow_handle *rule;
691 struct mlx5_flow_spec *spec;
692 struct mlx5e_ipsec_tx *tx;
693 int err = 0;
694
695 tx = tx_ft_get(mdev, ipsec);
696 if (IS_ERR(tx))
697 return PTR_ERR(tx);
698
699 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
700 if (!spec) {
701 err = -ENOMEM;
702 goto err_alloc;
703 }
704
705 if (attrs->family == AF_INET)
706 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
707 else
708 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
709
710 setup_fte_no_frags(spec);
711 setup_fte_upper_proto_match(spec, &attrs->upspec);
712
713 switch (attrs->type) {
714 case XFRM_DEV_OFFLOAD_CRYPTO:
715 setup_fte_spi(spec, attrs->spi);
716 setup_fte_esp(spec);
717 setup_fte_reg_a(spec);
718 break;
719 case XFRM_DEV_OFFLOAD_PACKET:
720 setup_fte_reg_c0(spec, attrs->reqid);
721 err = setup_pkt_reformat(mdev, attrs, &flow_act);
722 if (err)
723 goto err_pkt_reformat;
724 break;
725 default:
726 break;
727 }
728
729 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
730 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
731 flow_act.flags |= FLOW_ACT_NO_APPEND;
732 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW |
733 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
734 MLX5_FLOW_CONTEXT_ACTION_COUNT;
735 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
736 dest.counter_id = mlx5_fc_id(tx->fc->cnt);
737 rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, &dest, 1);
738 if (IS_ERR(rule)) {
739 err = PTR_ERR(rule);
740 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
741 goto err_add_flow;
742 }
743
744 kvfree(spec);
745 sa_entry->ipsec_rule.rule = rule;
746 sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
747 return 0;
748
749 err_add_flow:
750 if (flow_act.pkt_reformat)
751 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
752 err_pkt_reformat:
753 kvfree(spec);
754 err_alloc:
755 tx_ft_put(ipsec);
756 return err;
757 }
758
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)759 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
760 {
761 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
762 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
763 struct mlx5_flow_destination dest[2] = {};
764 struct mlx5_flow_act flow_act = {};
765 struct mlx5_flow_handle *rule;
766 struct mlx5_flow_spec *spec;
767 struct mlx5e_ipsec_tx *tx;
768 int err, dstn = 0;
769
770 tx = tx_ft_get(mdev, pol_entry->ipsec);
771 if (IS_ERR(tx))
772 return PTR_ERR(tx);
773
774 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
775 if (!spec) {
776 err = -ENOMEM;
777 goto err_alloc;
778 }
779
780 if (attrs->family == AF_INET)
781 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
782 else
783 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
784
785 setup_fte_no_frags(spec);
786 setup_fte_upper_proto_match(spec, &attrs->upspec);
787
788 err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
789 &flow_act);
790 if (err)
791 goto err_mod_header;
792
793 switch (attrs->action) {
794 case XFRM_POLICY_ALLOW:
795 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
796 break;
797 case XFRM_POLICY_BLOCK:
798 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
799 MLX5_FLOW_CONTEXT_ACTION_COUNT;
800 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
801 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
802 dstn++;
803 break;
804 default:
805 WARN_ON(true);
806 err = -EINVAL;
807 goto err_action;
808 }
809
810 flow_act.flags |= FLOW_ACT_NO_APPEND;
811 dest[dstn].ft = tx->ft.sa;
812 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
813 dstn++;
814 rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, dest, dstn);
815 if (IS_ERR(rule)) {
816 err = PTR_ERR(rule);
817 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
818 goto err_action;
819 }
820
821 kvfree(spec);
822 pol_entry->ipsec_rule.rule = rule;
823 pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
824 return 0;
825
826 err_action:
827 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
828 err_mod_header:
829 kvfree(spec);
830 err_alloc:
831 tx_ft_put(pol_entry->ipsec);
832 return err;
833 }
834
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)835 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
836 {
837 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
838 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
839 struct mlx5_flow_destination dest[2];
840 struct mlx5_flow_act flow_act = {};
841 struct mlx5_flow_handle *rule;
842 struct mlx5_flow_spec *spec;
843 struct mlx5e_ipsec_rx *rx;
844 int err, dstn = 0;
845
846 rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family);
847 if (IS_ERR(rx))
848 return PTR_ERR(rx);
849
850 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
851 if (!spec) {
852 err = -ENOMEM;
853 goto err_alloc;
854 }
855
856 if (attrs->family == AF_INET)
857 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
858 else
859 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
860
861 setup_fte_no_frags(spec);
862
863 switch (attrs->action) {
864 case XFRM_POLICY_ALLOW:
865 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
866 break;
867 case XFRM_POLICY_BLOCK:
868 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
869 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
870 dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
871 dstn++;
872 break;
873 default:
874 WARN_ON(true);
875 err = -EINVAL;
876 goto err_action;
877 }
878
879 flow_act.flags |= FLOW_ACT_NO_APPEND;
880 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
881 dest[dstn].ft = rx->ft.sa;
882 dstn++;
883 rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, dest, dstn);
884 if (IS_ERR(rule)) {
885 err = PTR_ERR(rule);
886 mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
887 goto err_action;
888 }
889
890 kvfree(spec);
891 pol_entry->ipsec_rule.rule = rule;
892 return 0;
893
894 err_action:
895 kvfree(spec);
896 err_alloc:
897 rx_ft_put(mdev, pol_entry->ipsec, attrs->family);
898 return err;
899 }
900
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)901 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
902 {
903 struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
904 struct mlx5_core_dev *mdev = ipsec->mdev;
905 struct mlx5e_ipsec_tx *tx = ipsec->tx;
906
907 mlx5_fc_destroy(mdev, tx->fc->drop);
908 mlx5_fc_destroy(mdev, tx->fc->cnt);
909 kfree(tx->fc);
910 mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
911 mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
912 kfree(rx_ipv4->fc);
913 }
914
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)915 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
916 {
917 struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
918 struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
919 struct mlx5_core_dev *mdev = ipsec->mdev;
920 struct mlx5e_ipsec_tx *tx = ipsec->tx;
921 struct mlx5e_ipsec_fc *fc;
922 struct mlx5_fc *counter;
923 int err;
924
925 fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL);
926 if (!fc)
927 return -ENOMEM;
928
929 /* Both IPv4 and IPv6 point to same flow counters struct. */
930 rx_ipv4->fc = fc;
931 rx_ipv6->fc = fc;
932 counter = mlx5_fc_create(mdev, false);
933 if (IS_ERR(counter)) {
934 err = PTR_ERR(counter);
935 goto err_rx_cnt;
936 }
937
938 fc->cnt = counter;
939 counter = mlx5_fc_create(mdev, false);
940 if (IS_ERR(counter)) {
941 err = PTR_ERR(counter);
942 goto err_rx_drop;
943 }
944
945 fc->drop = counter;
946 fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
947 if (!fc) {
948 err = -ENOMEM;
949 goto err_tx_fc;
950 }
951
952 tx->fc = fc;
953 counter = mlx5_fc_create(mdev, false);
954 if (IS_ERR(counter)) {
955 err = PTR_ERR(counter);
956 goto err_tx_cnt;
957 }
958
959 fc->cnt = counter;
960 counter = mlx5_fc_create(mdev, false);
961 if (IS_ERR(counter)) {
962 err = PTR_ERR(counter);
963 goto err_tx_drop;
964 }
965
966 fc->drop = counter;
967 return 0;
968
969 err_tx_drop:
970 mlx5_fc_destroy(mdev, tx->fc->cnt);
971 err_tx_cnt:
972 kfree(tx->fc);
973 err_tx_fc:
974 mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
975 err_rx_drop:
976 mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
977 err_rx_cnt:
978 kfree(rx_ipv4->fc);
979 return err;
980 }
981
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)982 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
983 {
984 struct mlx5_core_dev *mdev = priv->mdev;
985 struct mlx5e_ipsec *ipsec = priv->ipsec;
986 struct mlx5e_ipsec_hw_stats *stats;
987 struct mlx5e_ipsec_fc *fc;
988
989 stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
990
991 stats->ipsec_rx_pkts = 0;
992 stats->ipsec_rx_bytes = 0;
993 stats->ipsec_rx_drop_pkts = 0;
994 stats->ipsec_rx_drop_bytes = 0;
995 stats->ipsec_tx_pkts = 0;
996 stats->ipsec_tx_bytes = 0;
997 stats->ipsec_tx_drop_pkts = 0;
998 stats->ipsec_tx_drop_bytes = 0;
999
1000 fc = ipsec->rx_ipv4->fc;
1001 mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
1002 mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
1003 &stats->ipsec_rx_drop_bytes);
1004
1005 fc = ipsec->tx->fc;
1006 mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
1007 mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
1008 &stats->ipsec_tx_drop_bytes);
1009 }
1010
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1011 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1012 {
1013 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1014 return tx_add_rule(sa_entry);
1015
1016 return rx_add_rule(sa_entry);
1017 }
1018
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1019 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1020 {
1021 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1022 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1023
1024 mlx5_del_flow_rules(ipsec_rule->rule);
1025
1026 if (ipsec_rule->pkt_reformat)
1027 mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
1028
1029 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
1030 tx_ft_put(sa_entry->ipsec);
1031 return;
1032 }
1033
1034 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1035 rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
1036 }
1037
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1038 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1039 {
1040 if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1041 return tx_add_policy(pol_entry);
1042
1043 return rx_add_policy(pol_entry);
1044 }
1045
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1046 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1047 {
1048 struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
1049 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1050
1051 mlx5_del_flow_rules(ipsec_rule->rule);
1052
1053 if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1054 rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
1055 return;
1056 }
1057
1058 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1059 tx_ft_put(pol_entry->ipsec);
1060 }
1061
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)1062 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
1063 {
1064 if (!ipsec->tx)
1065 return;
1066
1067 if (mlx5_ipsec_device_caps(ipsec->mdev) & MLX5_IPSEC_CAP_ROCE)
1068 mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
1069
1070 ipsec_fs_destroy_counters(ipsec);
1071 mutex_destroy(&ipsec->tx->ft.mutex);
1072 WARN_ON(ipsec->tx->ft.refcnt);
1073 kfree(ipsec->tx);
1074
1075 mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
1076 WARN_ON(ipsec->rx_ipv4->ft.refcnt);
1077 kfree(ipsec->rx_ipv4);
1078
1079 mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
1080 WARN_ON(ipsec->rx_ipv6->ft.refcnt);
1081 kfree(ipsec->rx_ipv6);
1082 }
1083
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec)1084 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
1085 {
1086 struct mlx5_core_dev *mdev = ipsec->mdev;
1087 struct mlx5_flow_namespace *ns;
1088 int err = -ENOMEM;
1089
1090 ns = mlx5_get_flow_namespace(ipsec->mdev,
1091 MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
1092 if (!ns)
1093 return -EOPNOTSUPP;
1094
1095 ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
1096 if (!ipsec->tx)
1097 return -ENOMEM;
1098
1099 ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
1100 if (!ipsec->rx_ipv4)
1101 goto err_rx_ipv4;
1102
1103 ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
1104 if (!ipsec->rx_ipv6)
1105 goto err_rx_ipv6;
1106
1107 err = ipsec_fs_init_counters(ipsec);
1108 if (err)
1109 goto err_counters;
1110
1111 mutex_init(&ipsec->tx->ft.mutex);
1112 mutex_init(&ipsec->rx_ipv4->ft.mutex);
1113 mutex_init(&ipsec->rx_ipv6->ft.mutex);
1114 ipsec->tx->ns = ns;
1115
1116 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE)
1117 ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
1118
1119 return 0;
1120
1121 err_counters:
1122 kfree(ipsec->rx_ipv6);
1123 err_rx_ipv6:
1124 kfree(ipsec->rx_ipv4);
1125 err_rx_ipv4:
1126 kfree(ipsec->tx);
1127 return err;
1128 }
1129