1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/crypto.h"
8 
9 enum {
10 	MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
11 };
12 
mlx5_ipsec_device_caps(struct mlx5_core_dev * mdev)13 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
14 {
15 	u32 caps = 0;
16 
17 	if (!MLX5_CAP_GEN(mdev, ipsec_offload))
18 		return 0;
19 
20 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
21 		return 0;
22 
23 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
24 	    MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
25 		return 0;
26 
27 	if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
28 	    !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
29 		return 0;
30 
31 	if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
32 	    !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
33 		return 0;
34 
35 	if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
36 	    MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
37 		caps |= MLX5_IPSEC_CAP_CRYPTO;
38 
39 	if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
40 	    MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
41 	    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
42 	    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
43 		caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
44 
45 	if (mlx5_get_roce_state(mdev) &&
46 	    MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
47 	    MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
48 		caps |= MLX5_IPSEC_CAP_ROCE;
49 
50 	if (!caps)
51 		return 0;
52 
53 	if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
54 		caps |= MLX5_IPSEC_CAP_ESN;
55 
56 	/* We can accommodate up to 2^24 different IPsec objects
57 	 * because we use up to 24 bit in flow table metadata
58 	 * to hold the IPsec Object unique handle.
59 	 */
60 	WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
61 	return caps;
62 }
63 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
64 
mlx5e_ipsec_packet_setup(void * obj,u32 pdn,struct mlx5_accel_esp_xfrm_attrs * attrs)65 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
66 				     struct mlx5_accel_esp_xfrm_attrs *attrs)
67 {
68 	void *aso_ctx;
69 
70 	aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
71 	if (attrs->esn_trigger) {
72 		MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
73 
74 		if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
75 			MLX5_SET(ipsec_aso, aso_ctx, window_sz,
76 				 attrs->replay_window / 64);
77 			MLX5_SET(ipsec_aso, aso_ctx, mode,
78 				 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
79 			}
80 	}
81 
82 	/* ASO context */
83 	MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
84 	MLX5_SET(ipsec_obj, obj, full_offload, 1);
85 	MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
86 	/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
87 	 * in flow steering to perform matching against. Please be
88 	 * aware that this register was chosen arbitrary and can't
89 	 * be used in other places as long as IPsec packet offload
90 	 * active.
91 	 */
92 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
93 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
94 		MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
95 
96 	if (attrs->hard_packet_limit != XFRM_INF) {
97 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
98 			 lower_32_bits(attrs->hard_packet_limit));
99 		MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
100 	}
101 
102 	if (attrs->soft_packet_limit != XFRM_INF) {
103 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
104 			 lower_32_bits(attrs->soft_packet_limit));
105 
106 		MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
107 	}
108 }
109 
mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)110 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
111 {
112 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
113 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
114 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
115 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
116 	u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
117 	void *obj, *salt_p, *salt_iv_p;
118 	struct mlx5e_hw_objs *res;
119 	int err;
120 
121 	obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
122 
123 	/* salt and seq_iv */
124 	salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
125 	memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
126 
127 	MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
128 	salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
129 	memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
130 	/* esn */
131 	if (attrs->esn_trigger) {
132 		MLX5_SET(ipsec_obj, obj, esn_en, 1);
133 		MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
134 		MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
135 	}
136 
137 	MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
138 
139 	/* general object fields set */
140 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
141 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
142 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
143 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
144 
145 	res = &mdev->mlx5e_res.hw_objs;
146 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
147 		mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
148 
149 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
150 	if (!err)
151 		sa_entry->ipsec_obj_id =
152 			MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
153 
154 	return err;
155 }
156 
mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)157 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
158 {
159 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
160 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
161 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
162 
163 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
164 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
165 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
166 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
167 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
168 
169 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
170 }
171 
mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)172 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
173 {
174 	struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
175 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
176 	int err;
177 
178 	/* key */
179 	err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
180 					 aes_gcm->key_len / BITS_PER_BYTE,
181 					 MLX5_ACCEL_OBJ_IPSEC_KEY,
182 					 &sa_entry->enc_key_id);
183 	if (err) {
184 		mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
185 		return err;
186 	}
187 
188 	err = mlx5_create_ipsec_obj(sa_entry);
189 	if (err) {
190 		mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
191 		goto err_enc_key;
192 	}
193 
194 	return 0;
195 
196 err_enc_key:
197 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
198 	return err;
199 }
200 
mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)201 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
202 {
203 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
204 
205 	mlx5_destroy_ipsec_obj(sa_entry);
206 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
207 }
208 
mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)209 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
210 				 const struct mlx5_accel_esp_xfrm_attrs *attrs)
211 {
212 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
213 	u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
214 	u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
215 	u64 modify_field_select = 0;
216 	u64 general_obj_types;
217 	void *obj;
218 	int err;
219 
220 	if (!attrs->esn_trigger)
221 		return 0;
222 
223 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
224 	if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
225 		return -EINVAL;
226 
227 	/* general object fields set */
228 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
229 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
230 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
231 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
232 	if (err) {
233 		mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
234 			      sa_entry->ipsec_obj_id, err);
235 		return err;
236 	}
237 
238 	obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
239 	modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
240 
241 	/* esn */
242 	if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
243 	    !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
244 		return -EOPNOTSUPP;
245 
246 	obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
247 	MLX5_SET64(ipsec_obj, obj, modify_field_select,
248 		   MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
249 			   MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
250 	MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
251 	MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
252 
253 	/* general object fields set */
254 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
255 
256 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
257 }
258 
mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)259 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
260 				const struct mlx5_accel_esp_xfrm_attrs *attrs)
261 {
262 	int err;
263 
264 	err = mlx5_modify_ipsec_obj(sa_entry, attrs);
265 	if (err)
266 		return;
267 
268 	memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
269 }
270 
271 static void
mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)272 mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
273 			   const struct mlx5_accel_esp_xfrm_attrs *attrs)
274 {
275 	struct mlx5_wqe_aso_ctrl_seg data = {};
276 
277 	data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
278 	data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
279 								    << 4;
280 	data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
281 	data.bitwise_data = cpu_to_be64(BIT_ULL(54));
282 	data.data_mask = data.bitwise_data;
283 
284 	mlx5e_ipsec_aso_query(sa_entry, &data);
285 }
286 
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry,u32 mode_param)287 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
288 					 u32 mode_param)
289 {
290 	struct mlx5_accel_esp_xfrm_attrs attrs = {};
291 
292 	if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
293 		sa_entry->esn_state.esn++;
294 		sa_entry->esn_state.overlap = 0;
295 	} else {
296 		sa_entry->esn_state.overlap = 1;
297 	}
298 
299 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
300 	mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
301 	mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
302 }
303 
mlx5e_ipsec_handle_event(struct work_struct * _work)304 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
305 {
306 	struct mlx5e_ipsec_work *work =
307 		container_of(_work, struct mlx5e_ipsec_work, work);
308 	struct mlx5_accel_esp_xfrm_attrs *attrs;
309 	struct mlx5e_ipsec_sa_entry *sa_entry;
310 	struct mlx5e_ipsec_aso *aso;
311 	struct mlx5e_ipsec *ipsec;
312 	int ret;
313 
314 	sa_entry = xa_load(&work->ipsec->sadb, work->id);
315 	if (!sa_entry)
316 		goto out;
317 
318 	ipsec = sa_entry->ipsec;
319 	aso = ipsec->aso;
320 	attrs = &sa_entry->attrs;
321 
322 	spin_lock(&sa_entry->x->lock);
323 	ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
324 	if (ret)
325 		goto unlock;
326 
327 	if (attrs->esn_trigger &&
328 	    !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
329 		u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
330 
331 		mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
332 	}
333 
334 	if (attrs->soft_packet_limit != XFRM_INF)
335 		if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
336 		    !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm))
337 			xfrm_state_check_expire(sa_entry->x);
338 
339 unlock:
340 	spin_unlock(&sa_entry->x->lock);
341 out:
342 	kfree(work);
343 }
344 
mlx5e_ipsec_event(struct notifier_block * nb,unsigned long event,void * data)345 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
346 			     void *data)
347 {
348 	struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
349 	struct mlx5_eqe_obj_change *object;
350 	struct mlx5e_ipsec_work *work;
351 	struct mlx5_eqe *eqe = data;
352 	u16 type;
353 
354 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
355 		return NOTIFY_DONE;
356 
357 	object = &eqe->data.obj_change;
358 	type = be16_to_cpu(object->obj_type);
359 
360 	if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
361 		return NOTIFY_DONE;
362 
363 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
364 	if (!work)
365 		return NOTIFY_DONE;
366 
367 	INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
368 	work->ipsec = ipsec;
369 	work->id = be32_to_cpu(object->obj_id);
370 
371 	queue_work(ipsec->wq, &work->work);
372 	return NOTIFY_OK;
373 }
374 
mlx5e_ipsec_aso_init(struct mlx5e_ipsec * ipsec)375 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
376 {
377 	struct mlx5_core_dev *mdev = ipsec->mdev;
378 	struct mlx5e_ipsec_aso *aso;
379 	struct mlx5e_hw_objs *res;
380 	struct device *pdev;
381 	int err;
382 
383 	aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
384 	if (!aso)
385 		return -ENOMEM;
386 
387 	res = &mdev->mlx5e_res.hw_objs;
388 
389 	pdev = mlx5_core_dma_dev(mdev);
390 	aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
391 				       DMA_BIDIRECTIONAL);
392 	err = dma_mapping_error(pdev, aso->dma_addr);
393 	if (err)
394 		goto err_dma;
395 
396 	aso->aso = mlx5_aso_create(mdev, res->pdn);
397 	if (IS_ERR(aso->aso)) {
398 		err = PTR_ERR(aso->aso);
399 		goto err_aso_create;
400 	}
401 
402 	spin_lock_init(&aso->lock);
403 	ipsec->nb.notifier_call = mlx5e_ipsec_event;
404 	mlx5_notifier_register(mdev, &ipsec->nb);
405 
406 	ipsec->aso = aso;
407 	return 0;
408 
409 err_aso_create:
410 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
411 			 DMA_BIDIRECTIONAL);
412 err_dma:
413 	kfree(aso);
414 	return err;
415 }
416 
mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec * ipsec)417 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
418 {
419 	struct mlx5_core_dev *mdev = ipsec->mdev;
420 	struct mlx5e_ipsec_aso *aso;
421 	struct device *pdev;
422 
423 	aso = ipsec->aso;
424 	pdev = mlx5_core_dma_dev(mdev);
425 
426 	mlx5_notifier_unregister(mdev, &ipsec->nb);
427 	mlx5_aso_destroy(aso->aso);
428 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
429 			 DMA_BIDIRECTIONAL);
430 	kfree(aso);
431 }
432 
mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg * ctrl,struct mlx5_wqe_aso_ctrl_seg * data)433 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
434 				 struct mlx5_wqe_aso_ctrl_seg *data)
435 {
436 	if (!data)
437 		return;
438 
439 	ctrl->data_mask_mode = data->data_mask_mode;
440 	ctrl->condition_1_0_operand = data->condition_1_0_operand;
441 	ctrl->condition_1_0_offset = data->condition_1_0_offset;
442 	ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
443 	ctrl->condition_0_data = data->condition_0_data;
444 	ctrl->condition_0_mask = data->condition_0_mask;
445 	ctrl->condition_1_data = data->condition_1_data;
446 	ctrl->condition_1_mask = data->condition_1_mask;
447 	ctrl->bitwise_data = data->bitwise_data;
448 	ctrl->data_mask = data->data_mask;
449 }
450 
mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)451 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
452 			  struct mlx5_wqe_aso_ctrl_seg *data)
453 {
454 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
455 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
456 	struct mlx5_core_dev *mdev = ipsec->mdev;
457 	struct mlx5_wqe_aso_ctrl_seg *ctrl;
458 	struct mlx5e_hw_objs *res;
459 	struct mlx5_aso_wqe *wqe;
460 	u8 ds_cnt;
461 	int ret;
462 
463 	lockdep_assert_held(&sa_entry->x->lock);
464 	res = &mdev->mlx5e_res.hw_objs;
465 
466 	spin_lock_bh(&aso->lock);
467 	memset(aso->ctx, 0, sizeof(aso->ctx));
468 	wqe = mlx5_aso_get_wqe(aso->aso);
469 	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
470 	mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
471 			   MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
472 
473 	ctrl = &wqe->aso_ctrl;
474 	ctrl->va_l =
475 		cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
476 	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
477 	ctrl->l_key = cpu_to_be32(res->mkey);
478 	mlx5e_ipsec_aso_copy(ctrl, data);
479 
480 	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
481 	ret = mlx5_aso_poll_cq(aso->aso, false);
482 	spin_unlock_bh(&aso->lock);
483 	return ret;
484 }
485 
mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry * sa_entry,u64 * packets)486 void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
487 				   u64 *packets)
488 {
489 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
490 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
491 	u64 hard_cnt;
492 
493 	hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
494 	/* HW decresases the limit till it reaches zero to fire an avent.
495 	 * We need to fix the calculations, so the returned count is a total
496 	 * number of passed packets and not how much left.
497 	 */
498 	*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
499 }
500