1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 
39 #include "en.h"
40 #include "ipsec.h"
41 #include "ipsec_rxtx.h"
42 
to_ipsec_sa_entry(struct xfrm_state * x)43 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
44 {
45 	return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
46 }
47 
to_ipsec_pol_entry(struct xfrm_policy * x)48 static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
49 {
50 	return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
51 }
52 
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)53 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
54 {
55 	struct xfrm_replay_state_esn *replay_esn;
56 	u32 seq_bottom = 0;
57 	u8 overlap;
58 
59 	if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
60 		sa_entry->esn_state.trigger = 0;
61 		return false;
62 	}
63 
64 	replay_esn = sa_entry->x->replay_esn;
65 	if (replay_esn->seq >= replay_esn->replay_window)
66 		seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
67 
68 	overlap = sa_entry->esn_state.overlap;
69 
70 	sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
71 						    htonl(seq_bottom));
72 
73 	sa_entry->esn_state.trigger = 1;
74 	if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
75 		sa_entry->esn_state.overlap = 0;
76 		return true;
77 	} else if (unlikely(!overlap &&
78 			    (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
79 		sa_entry->esn_state.overlap = 1;
80 		return true;
81 	}
82 
83 	return false;
84 }
85 
mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)86 static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
87 				    struct mlx5_accel_esp_xfrm_attrs *attrs)
88 {
89 	struct xfrm_state *x = sa_entry->x;
90 
91 	attrs->hard_packet_limit = x->lft.hard_packet_limit;
92 	if (x->lft.soft_packet_limit == XFRM_INF)
93 		return;
94 
95 	/* Hardware decrements hard_packet_limit counter through
96 	 * the operation. While fires an event when soft_packet_limit
97 	 * is reached. It emans that we need substitute the numbers
98 	 * in order to properly count soft limit.
99 	 *
100 	 * As an example:
101 	 * XFRM user sets soft limit is 2 and hard limit is 9 and
102 	 * expects to see soft event after 2 packets and hard event
103 	 * after 9 packets. In our case, the hard limit will be set
104 	 * to 9 and soft limit is comparator to 7 so user gets the
105 	 * soft event after 2 packeta
106 	 */
107 	attrs->soft_packet_limit =
108 		x->lft.hard_packet_limit - x->lft.soft_packet_limit;
109 }
110 
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)111 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
112 					struct mlx5_accel_esp_xfrm_attrs *attrs)
113 {
114 	struct xfrm_state *x = sa_entry->x;
115 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
116 	struct aead_geniv_ctx *geniv_ctx;
117 	struct crypto_aead *aead;
118 	unsigned int crypto_data_len, key_len;
119 	int ivsize;
120 
121 	memset(attrs, 0, sizeof(*attrs));
122 
123 	/* key */
124 	crypto_data_len = (x->aead->alg_key_len + 7) / 8;
125 	key_len = crypto_data_len - 4; /* 4 bytes salt at end */
126 
127 	memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
128 	aes_gcm->key_len = key_len * 8;
129 
130 	/* salt and seq_iv */
131 	aead = x->data;
132 	geniv_ctx = crypto_aead_ctx(aead);
133 	ivsize = crypto_aead_ivsize(aead);
134 	memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
135 	memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
136 	       sizeof(aes_gcm->salt));
137 
138 	attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
139 
140 	/* iv len */
141 	aes_gcm->icv_len = x->aead->alg_icv_len;
142 
143 	/* esn */
144 	if (sa_entry->esn_state.trigger) {
145 		attrs->esn_trigger = true;
146 		attrs->esn = sa_entry->esn_state.esn;
147 		attrs->esn_overlap = sa_entry->esn_state.overlap;
148 		attrs->replay_window = x->replay_esn->replay_window;
149 	}
150 
151 	attrs->dir = x->xso.dir;
152 	/* spi */
153 	attrs->spi = be32_to_cpu(x->id.spi);
154 
155 	/* source , destination ips */
156 	memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
157 	memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
158 	attrs->family = x->props.family;
159 	attrs->type = x->xso.type;
160 	attrs->reqid = x->props.reqid;
161 	attrs->upspec.dport = ntohs(x->sel.dport);
162 	attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
163 	attrs->upspec.sport = ntohs(x->sel.sport);
164 	attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
165 	attrs->upspec.proto = x->sel.proto;
166 
167 	mlx5e_ipsec_init_limits(sa_entry, attrs);
168 }
169 
mlx5e_xfrm_validate_state(struct mlx5_core_dev * mdev,struct xfrm_state * x,struct netlink_ext_ack * extack)170 static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
171 				     struct xfrm_state *x,
172 				     struct netlink_ext_ack *extack)
173 {
174 	if (x->props.aalgo != SADB_AALG_NONE) {
175 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
176 		return -EINVAL;
177 	}
178 	if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
179 		NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
180 		return -EINVAL;
181 	}
182 	if (x->props.calgo != SADB_X_CALG_NONE) {
183 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
184 		return -EINVAL;
185 	}
186 	if (x->props.flags & XFRM_STATE_ESN &&
187 	    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
188 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
189 		return -EINVAL;
190 	}
191 	if (x->props.family != AF_INET &&
192 	    x->props.family != AF_INET6) {
193 		NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
194 		return -EINVAL;
195 	}
196 	if (x->id.proto != IPPROTO_ESP) {
197 		NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
198 		return -EINVAL;
199 	}
200 	if (x->encap) {
201 		NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state may not be offloaded");
202 		return -EINVAL;
203 	}
204 	if (!x->aead) {
205 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
206 		return -EINVAL;
207 	}
208 	if (x->aead->alg_icv_len != 128) {
209 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
210 		return -EINVAL;
211 	}
212 	if ((x->aead->alg_key_len != 128 + 32) &&
213 	    (x->aead->alg_key_len != 256 + 32)) {
214 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
215 		return -EINVAL;
216 	}
217 	if (x->tfcpad) {
218 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
219 		return -EINVAL;
220 	}
221 	if (!x->geniv) {
222 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
223 		return -EINVAL;
224 	}
225 	if (strcmp(x->geniv, "seqiv")) {
226 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
227 		return -EINVAL;
228 	}
229 
230 	if (x->sel.proto != IPPROTO_IP &&
231 	    (x->sel.proto != IPPROTO_UDP || x->xso.dir != XFRM_DEV_OFFLOAD_OUT)) {
232 		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
233 		return -EINVAL;
234 	}
235 
236 	switch (x->xso.type) {
237 	case XFRM_DEV_OFFLOAD_CRYPTO:
238 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
239 			NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
240 			return -EINVAL;
241 		}
242 
243 		if (x->props.mode != XFRM_MODE_TRANSPORT &&
244 		    x->props.mode != XFRM_MODE_TUNNEL) {
245 			NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
246 			return -EINVAL;
247 		}
248 		break;
249 	case XFRM_DEV_OFFLOAD_PACKET:
250 		if (!(mlx5_ipsec_device_caps(mdev) &
251 		      MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
252 			NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
253 			return -EINVAL;
254 		}
255 
256 		if (x->props.mode != XFRM_MODE_TRANSPORT) {
257 			NL_SET_ERR_MSG_MOD(extack, "Only transport xfrm states may be offloaded in packet mode");
258 			return -EINVAL;
259 		}
260 
261 		if (x->replay_esn && x->replay_esn->replay_window != 32 &&
262 		    x->replay_esn->replay_window != 64 &&
263 		    x->replay_esn->replay_window != 128 &&
264 		    x->replay_esn->replay_window != 256) {
265 			NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
266 			return -EINVAL;
267 		}
268 
269 		if (!x->props.reqid) {
270 			NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
271 			return -EINVAL;
272 		}
273 
274 		if (x->lft.hard_byte_limit != XFRM_INF ||
275 		    x->lft.soft_byte_limit != XFRM_INF) {
276 			NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes");
277 			return -EINVAL;
278 		}
279 
280 		if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
281 		    x->lft.hard_packet_limit != XFRM_INF) {
282 			/* XFRM stack doesn't prevent such configuration :(. */
283 			NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
284 			return -EINVAL;
285 		}
286 		break;
287 	default:
288 		NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
289 		return -EINVAL;
290 	}
291 	return 0;
292 }
293 
_update_xfrm_state(struct work_struct * work)294 static void _update_xfrm_state(struct work_struct *work)
295 {
296 	struct mlx5e_ipsec_modify_state_work *modify_work =
297 		container_of(work, struct mlx5e_ipsec_modify_state_work, work);
298 	struct mlx5e_ipsec_sa_entry *sa_entry = container_of(
299 		modify_work, struct mlx5e_ipsec_sa_entry, modify_work);
300 
301 	mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
302 }
303 
mlx5e_xfrm_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)304 static int mlx5e_xfrm_add_state(struct xfrm_state *x,
305 				struct netlink_ext_ack *extack)
306 {
307 	struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
308 	struct net_device *netdev = x->xso.real_dev;
309 	struct mlx5e_ipsec *ipsec;
310 	struct mlx5e_priv *priv;
311 	int err;
312 
313 	priv = netdev_priv(netdev);
314 	if (!priv->ipsec)
315 		return -EOPNOTSUPP;
316 
317 	ipsec = priv->ipsec;
318 	err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
319 	if (err)
320 		return err;
321 
322 	sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
323 	if (!sa_entry)
324 		return -ENOMEM;
325 
326 	sa_entry->x = x;
327 	sa_entry->ipsec = ipsec;
328 
329 	/* check esn */
330 	mlx5e_ipsec_update_esn_state(sa_entry);
331 
332 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
333 	/* create hw context */
334 	err = mlx5_ipsec_create_sa_ctx(sa_entry);
335 	if (err)
336 		goto err_xfrm;
337 
338 	err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
339 	if (err)
340 		goto err_hw_ctx;
341 
342 	/* We use *_bh() variant because xfrm_timer_handler(), which runs
343 	 * in softirq context, can reach our state delete logic and we need
344 	 * xa_erase_bh() there.
345 	 */
346 	err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
347 			   GFP_KERNEL);
348 	if (err)
349 		goto err_add_rule;
350 
351 	if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT)
352 		sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
353 				mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
354 
355 	INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
356 	x->xso.offload_handle = (unsigned long)sa_entry;
357 	return 0;
358 
359 err_add_rule:
360 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
361 err_hw_ctx:
362 	mlx5_ipsec_free_sa_ctx(sa_entry);
363 err_xfrm:
364 	kfree(sa_entry);
365 	NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
366 	return err;
367 }
368 
mlx5e_xfrm_del_state(struct xfrm_state * x)369 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
370 {
371 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
372 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
373 	struct mlx5e_ipsec_sa_entry *old;
374 
375 	old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
376 	WARN_ON(old != sa_entry);
377 }
378 
mlx5e_xfrm_free_state(struct xfrm_state * x)379 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
380 {
381 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
382 
383 	cancel_work_sync(&sa_entry->modify_work.work);
384 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
385 	mlx5_ipsec_free_sa_ctx(sa_entry);
386 	kfree(sa_entry);
387 }
388 
mlx5e_ipsec_init(struct mlx5e_priv * priv)389 void mlx5e_ipsec_init(struct mlx5e_priv *priv)
390 {
391 	struct mlx5e_ipsec *ipsec;
392 	int ret = -ENOMEM;
393 
394 	if (!mlx5_ipsec_device_caps(priv->mdev)) {
395 		netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
396 		return;
397 	}
398 
399 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
400 	if (!ipsec)
401 		return;
402 
403 	xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
404 	ipsec->mdev = priv->mdev;
405 	ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
406 					    priv->netdev->name);
407 	if (!ipsec->wq)
408 		goto err_wq;
409 
410 	if (mlx5_ipsec_device_caps(priv->mdev) &
411 	    MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
412 		ret = mlx5e_ipsec_aso_init(ipsec);
413 		if (ret)
414 			goto err_aso;
415 	}
416 
417 	ret = mlx5e_accel_ipsec_fs_init(ipsec);
418 	if (ret)
419 		goto err_fs_init;
420 
421 	ipsec->fs = priv->fs;
422 	priv->ipsec = ipsec;
423 	netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
424 	return;
425 
426 err_fs_init:
427 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
428 		mlx5e_ipsec_aso_cleanup(ipsec);
429 err_aso:
430 	destroy_workqueue(ipsec->wq);
431 err_wq:
432 	kfree(ipsec);
433 	mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
434 	return;
435 }
436 
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)437 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
438 {
439 	struct mlx5e_ipsec *ipsec = priv->ipsec;
440 
441 	if (!ipsec)
442 		return;
443 
444 	mlx5e_accel_ipsec_fs_cleanup(ipsec);
445 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
446 		mlx5e_ipsec_aso_cleanup(ipsec);
447 	destroy_workqueue(ipsec->wq);
448 	kfree(ipsec);
449 	priv->ipsec = NULL;
450 }
451 
mlx5e_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)452 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
453 {
454 	if (x->props.family == AF_INET) {
455 		/* Offload with IPv4 options is not supported yet */
456 		if (ip_hdr(skb)->ihl > 5)
457 			return false;
458 	} else {
459 		/* Offload with IPv6 extension headers is not support yet */
460 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
461 			return false;
462 	}
463 
464 	return true;
465 }
466 
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)467 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
468 {
469 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
470 	struct mlx5e_ipsec_modify_state_work *modify_work =
471 		&sa_entry->modify_work;
472 	bool need_update;
473 
474 	need_update = mlx5e_ipsec_update_esn_state(sa_entry);
475 	if (!need_update)
476 		return;
477 
478 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
479 	queue_work(sa_entry->ipsec->wq, &modify_work->work);
480 }
481 
mlx5e_xfrm_update_curlft(struct xfrm_state * x)482 static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
483 {
484 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
485 	int err;
486 
487 	lockdep_assert_held(&x->lock);
488 
489 	if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
490 		/* Limits are not configured, as soft limit
491 		 * must be lowever than hard limit.
492 		 */
493 		return;
494 
495 	err = mlx5e_ipsec_aso_query(sa_entry, NULL);
496 	if (err)
497 		return;
498 
499 	mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
500 }
501 
mlx5e_xfrm_validate_policy(struct xfrm_policy * x,struct netlink_ext_ack * extack)502 static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
503 				      struct netlink_ext_ack *extack)
504 {
505 	if (x->type != XFRM_POLICY_TYPE_MAIN) {
506 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
507 		return -EINVAL;
508 	}
509 
510 	/* Please pay attention that we support only one template */
511 	if (x->xfrm_nr > 1) {
512 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
513 		return -EINVAL;
514 	}
515 
516 	if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
517 	    x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
518 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
519 		return -EINVAL;
520 	}
521 
522 	if (!x->xfrm_vec[0].reqid) {
523 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload policy without reqid");
524 		return -EINVAL;
525 	}
526 
527 	if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
528 		NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
529 		return -EINVAL;
530 	}
531 
532 	if (x->selector.proto != IPPROTO_IP &&
533 	    (x->selector.proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
534 		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
535 		return -EINVAL;
536 	}
537 
538 	return 0;
539 }
540 
541 static void
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry * pol_entry,struct mlx5_accel_pol_xfrm_attrs * attrs)542 mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
543 				  struct mlx5_accel_pol_xfrm_attrs *attrs)
544 {
545 	struct xfrm_policy *x = pol_entry->x;
546 	struct xfrm_selector *sel;
547 
548 	sel = &x->selector;
549 	memset(attrs, 0, sizeof(*attrs));
550 
551 	memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
552 	memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
553 	attrs->family = sel->family;
554 	attrs->dir = x->xdo.dir;
555 	attrs->action = x->action;
556 	attrs->type = XFRM_DEV_OFFLOAD_PACKET;
557 	attrs->reqid = x->xfrm_vec[0].reqid;
558 	attrs->upspec.dport = ntohs(sel->dport);
559 	attrs->upspec.dport_mask = ntohs(sel->dport_mask);
560 	attrs->upspec.sport = ntohs(sel->sport);
561 	attrs->upspec.sport_mask = ntohs(sel->sport_mask);
562 	attrs->upspec.proto = sel->proto;
563 }
564 
mlx5e_xfrm_add_policy(struct xfrm_policy * x,struct netlink_ext_ack * extack)565 static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
566 				 struct netlink_ext_ack *extack)
567 {
568 	struct net_device *netdev = x->xdo.real_dev;
569 	struct mlx5e_ipsec_pol_entry *pol_entry;
570 	struct mlx5e_priv *priv;
571 	int err;
572 
573 	priv = netdev_priv(netdev);
574 	if (!priv->ipsec) {
575 		NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
576 		return -EOPNOTSUPP;
577 	}
578 
579 	err = mlx5e_xfrm_validate_policy(x, extack);
580 	if (err)
581 		return err;
582 
583 	pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
584 	if (!pol_entry)
585 		return -ENOMEM;
586 
587 	pol_entry->x = x;
588 	pol_entry->ipsec = priv->ipsec;
589 
590 	mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
591 	err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
592 	if (err)
593 		goto err_fs;
594 
595 	x->xdo.offload_handle = (unsigned long)pol_entry;
596 	return 0;
597 
598 err_fs:
599 	kfree(pol_entry);
600 	NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
601 	return err;
602 }
603 
mlx5e_xfrm_free_policy(struct xfrm_policy * x)604 static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
605 {
606 	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
607 
608 	mlx5e_accel_ipsec_fs_del_pol(pol_entry);
609 	kfree(pol_entry);
610 }
611 
612 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
613 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
614 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
615 	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
616 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
617 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
618 };
619 
620 static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
621 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
622 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
623 	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
624 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
625 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
626 
627 	.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
628 	.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
629 	.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
630 };
631 
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)632 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
633 {
634 	struct mlx5_core_dev *mdev = priv->mdev;
635 	struct net_device *netdev = priv->netdev;
636 
637 	if (!mlx5_ipsec_device_caps(mdev))
638 		return;
639 
640 	mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
641 
642 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
643 		netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
644 	else
645 		netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
646 
647 	netdev->features |= NETIF_F_HW_ESP;
648 	netdev->hw_enc_features |= NETIF_F_HW_ESP;
649 
650 	if (!MLX5_CAP_ETH(mdev, swp_csum)) {
651 		mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
652 		return;
653 	}
654 
655 	netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
656 	netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
657 
658 	if (!MLX5_CAP_ETH(mdev, swp_lso)) {
659 		mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
660 		return;
661 	}
662 
663 	netdev->gso_partial_features |= NETIF_F_GSO_ESP;
664 	mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
665 	netdev->features |= NETIF_F_GSO_ESP;
666 	netdev->hw_features |= NETIF_F_GSO_ESP;
667 	netdev->hw_enc_features |= NETIF_F_GSO_ESP;
668 }
669