1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7
8 struct dr_hw_ste_format {
9 u8 ctrl[DR_STE_SIZE_CTRL];
10 u8 tag[DR_STE_SIZE_TAG];
11 u8 mask[DR_STE_SIZE_MASK];
12 };
13
dr_ste_crc32_calc(const void * input_data,size_t length)14 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15 {
16 u32 crc = crc32(0, input_data, length);
17
18 return (__force u32)htonl(crc);
19 }
20
mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps * caps)21 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
22 {
23 return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
24 }
25
mlx5dr_ste_calc_hash_index(u8 * hw_ste_p,struct mlx5dr_ste_htbl * htbl)26 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
27 {
28 u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
29 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
30 u8 masked[DR_STE_SIZE_TAG] = {};
31 u32 crc32, index;
32 u16 bit;
33 int i;
34
35 /* Don't calculate CRC if the result is predicted */
36 if (num_entries == 1 || htbl->byte_mask == 0)
37 return 0;
38
39 /* Mask tag using byte mask, bit per byte */
40 bit = 1 << (DR_STE_SIZE_TAG - 1);
41 for (i = 0; i < DR_STE_SIZE_TAG; i++) {
42 if (htbl->byte_mask & bit)
43 masked[i] = hw_ste->tag[i];
44
45 bit = bit >> 1;
46 }
47
48 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
49 index = crc32 & (num_entries - 1);
50
51 return index;
52 }
53
mlx5dr_ste_conv_bit_to_byte_mask(u8 * bit_mask)54 u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
55 {
56 u16 byte_mask = 0;
57 int i;
58
59 for (i = 0; i < DR_STE_SIZE_MASK; i++) {
60 byte_mask = byte_mask << 1;
61 if (bit_mask[i] == 0xff)
62 byte_mask |= 1;
63 }
64 return byte_mask;
65 }
66
dr_ste_get_tag(u8 * hw_ste_p)67 static u8 *dr_ste_get_tag(u8 *hw_ste_p)
68 {
69 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
70
71 return hw_ste->tag;
72 }
73
mlx5dr_ste_set_bit_mask(u8 * hw_ste_p,u8 * bit_mask)74 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
75 {
76 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
77
78 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
79 }
80
dr_ste_set_always_hit(struct dr_hw_ste_format * hw_ste)81 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
82 {
83 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
84 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
85 }
86
dr_ste_set_always_miss(struct dr_hw_ste_format * hw_ste)87 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
88 {
89 hw_ste->tag[0] = 0xdc;
90 hw_ste->mask[0] = 0;
91 }
92
mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p)93 bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx,
94 u8 *hw_ste_p)
95 {
96 if (!ste_ctx->is_miss_addr_set)
97 return false;
98
99 /* check if miss address is already set for this type of STE */
100 return ste_ctx->is_miss_addr_set(hw_ste_p);
101 }
102
mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u64 miss_addr)103 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
104 u8 *hw_ste_p, u64 miss_addr)
105 {
106 ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
107 }
108
dr_ste_always_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,u64 miss_addr)109 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
110 u8 *hw_ste, u64 miss_addr)
111 {
112 ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
113 ste_ctx->set_miss_addr(hw_ste, miss_addr);
114 dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
115 }
116
mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,u64 icm_addr,u32 ht_size)117 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
118 u8 *hw_ste, u64 icm_addr, u32 ht_size)
119 {
120 ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
121 }
122
mlx5dr_ste_get_icm_addr(struct mlx5dr_ste * ste)123 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
124 {
125 u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
126 u32 index = ste - ste->htbl->chunk->ste_arr;
127
128 return base_icm_addr + DR_STE_SIZE * index;
129 }
130
mlx5dr_ste_get_mr_addr(struct mlx5dr_ste * ste)131 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
132 {
133 u32 index = ste - ste->htbl->chunk->ste_arr;
134
135 return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
136 }
137
mlx5dr_ste_get_hw_ste(struct mlx5dr_ste * ste)138 u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
139 {
140 u64 index = ste - ste->htbl->chunk->ste_arr;
141
142 return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
143 }
144
mlx5dr_ste_get_miss_list(struct mlx5dr_ste * ste)145 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
146 {
147 u32 index = ste - ste->htbl->chunk->ste_arr;
148
149 return &ste->htbl->chunk->miss_list[index];
150 }
151
dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)152 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
153 u8 *hw_ste,
154 struct mlx5dr_ste_htbl *next_htbl)
155 {
156 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
157
158 ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
159 ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
160 ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
161 mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
162
163 dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
164 }
165
mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx * nic_matcher,u8 ste_location)166 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
167 u8 ste_location)
168 {
169 return ste_location == nic_matcher->num_of_builders;
170 }
171
172 /* Replace relevant fields, except of:
173 * htbl - keep the origin htbl
174 * miss_list + list - already took the src from the list.
175 * icm_addr/mr_addr - depends on the hosting table.
176 *
177 * Before:
178 * | a | -> | b | -> | c | ->
179 *
180 * After:
181 * | a | -> | c | ->
182 * While the data that was in b copied to a.
183 */
dr_ste_replace(struct mlx5dr_ste * dst,struct mlx5dr_ste * src)184 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
185 {
186 memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
187 DR_STE_SIZE_REDUCED);
188 dst->next_htbl = src->next_htbl;
189 if (dst->next_htbl)
190 dst->next_htbl->pointing_ste = dst;
191
192 dst->refcount = src->refcount;
193 }
194
195 /* Free ste which is the head and the only one in miss_list */
196 static void
dr_ste_remove_head_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)197 dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
198 struct mlx5dr_ste *ste,
199 struct mlx5dr_matcher_rx_tx *nic_matcher,
200 struct mlx5dr_ste_send_info *ste_info_head,
201 struct list_head *send_ste_list,
202 struct mlx5dr_ste_htbl *stats_tbl)
203 {
204 u8 tmp_data_ste[DR_STE_SIZE] = {};
205 u64 miss_addr;
206
207 miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
208
209 /* Use temp ste because dr_ste_always_miss_addr
210 * touches bit_mask area which doesn't exist at ste->hw_ste.
211 * Need to use a full-sized (DR_STE_SIZE) hw_ste.
212 */
213 memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
214 dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
215 memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
216
217 list_del_init(&ste->miss_list_node);
218
219 /* Write full STE size in order to have "always_miss" */
220 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
221 0, tmp_data_ste,
222 ste_info_head,
223 send_ste_list,
224 true /* Copy data */);
225
226 stats_tbl->ctrl.num_of_valid_entries--;
227 }
228
229 /* Free ste which is the head but NOT the only one in miss_list:
230 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
231 */
232 static void
dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,struct mlx5dr_ste * next_ste,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)233 dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
234 struct mlx5dr_ste *ste,
235 struct mlx5dr_ste *next_ste,
236 struct mlx5dr_ste_send_info *ste_info_head,
237 struct list_head *send_ste_list,
238 struct mlx5dr_ste_htbl *stats_tbl)
239
240 {
241 struct mlx5dr_ste_htbl *next_miss_htbl;
242 u8 hw_ste[DR_STE_SIZE] = {};
243 int sb_idx;
244
245 next_miss_htbl = next_ste->htbl;
246
247 /* Remove from the miss_list the next_ste before copy */
248 list_del_init(&next_ste->miss_list_node);
249
250 /* Move data from next into ste */
251 dr_ste_replace(ste, next_ste);
252
253 /* Update the rule on STE change */
254 mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
255
256 /* Copy all 64 hw_ste bytes */
257 memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
258 sb_idx = ste->ste_chain_location - 1;
259 mlx5dr_ste_set_bit_mask(hw_ste,
260 nic_matcher->ste_builder[sb_idx].bit_mask);
261
262 /* Del the htbl that contains the next_ste.
263 * The origin htbl stay with the same number of entries.
264 */
265 mlx5dr_htbl_put(next_miss_htbl);
266
267 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
268 0, hw_ste,
269 ste_info_head,
270 send_ste_list,
271 true /* Copy data */);
272
273 stats_tbl->ctrl.num_of_collisions--;
274 stats_tbl->ctrl.num_of_valid_entries--;
275 }
276
277 /* Free ste that is located in the middle of the miss list:
278 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
279 */
dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_ste_send_info * ste_info,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)280 static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
281 struct mlx5dr_ste *ste,
282 struct mlx5dr_ste_send_info *ste_info,
283 struct list_head *send_ste_list,
284 struct mlx5dr_ste_htbl *stats_tbl)
285 {
286 struct mlx5dr_ste *prev_ste;
287 u64 miss_addr;
288
289 prev_ste = list_prev_entry(ste, miss_list_node);
290 if (WARN_ON(!prev_ste))
291 return;
292
293 miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
294 ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
295
296 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
297 mlx5dr_ste_get_hw_ste(prev_ste),
298 ste_info, send_ste_list,
299 true /* Copy data*/);
300
301 list_del_init(&ste->miss_list_node);
302
303 stats_tbl->ctrl.num_of_valid_entries--;
304 stats_tbl->ctrl.num_of_collisions--;
305 }
306
mlx5dr_ste_free(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)307 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
308 struct mlx5dr_matcher *matcher,
309 struct mlx5dr_matcher_rx_tx *nic_matcher)
310 {
311 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
312 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
313 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
314 struct mlx5dr_ste_send_info ste_info_head;
315 struct mlx5dr_ste *next_ste, *first_ste;
316 bool put_on_origin_table = true;
317 struct mlx5dr_ste_htbl *stats_tbl;
318 LIST_HEAD(send_ste_list);
319
320 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
321 struct mlx5dr_ste, miss_list_node);
322 stats_tbl = first_ste->htbl;
323
324 /* Two options:
325 * 1. ste is head:
326 * a. head ste is the only ste in the miss list
327 * b. head ste is not the only ste in the miss-list
328 * 2. ste is not head
329 */
330 if (first_ste == ste) { /* Ste is the head */
331 struct mlx5dr_ste *last_ste;
332
333 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
334 struct mlx5dr_ste, miss_list_node);
335 if (last_ste == first_ste)
336 next_ste = NULL;
337 else
338 next_ste = list_next_entry(ste, miss_list_node);
339
340 if (!next_ste) {
341 /* One and only entry in the list */
342 dr_ste_remove_head_ste(ste_ctx, ste,
343 nic_matcher,
344 &ste_info_head,
345 &send_ste_list,
346 stats_tbl);
347 } else {
348 /* First but not only entry in the list */
349 dr_ste_replace_head_ste(nic_matcher, ste,
350 next_ste, &ste_info_head,
351 &send_ste_list, stats_tbl);
352 put_on_origin_table = false;
353 }
354 } else { /* Ste in the middle of the list */
355 dr_ste_remove_middle_ste(ste_ctx, ste,
356 &ste_info_head, &send_ste_list,
357 stats_tbl);
358 }
359
360 /* Update HW */
361 list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
362 &send_ste_list, send_list) {
363 list_del(&cur_ste_info->send_list);
364 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
365 cur_ste_info->data, cur_ste_info->size,
366 cur_ste_info->offset);
367 }
368
369 if (put_on_origin_table)
370 mlx5dr_htbl_put(ste->htbl);
371 }
372
mlx5dr_ste_equal_tag(void * src,void * dst)373 bool mlx5dr_ste_equal_tag(void *src, void *dst)
374 {
375 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
376 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
377
378 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
379 }
380
mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)381 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
382 u8 *hw_ste,
383 struct mlx5dr_ste_htbl *next_htbl)
384 {
385 u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
386 u32 num_entries =
387 mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
388
389 ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
390 }
391
mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u32 ste_size)392 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
393 u8 *hw_ste_p, u32 ste_size)
394 {
395 if (ste_ctx->prepare_for_postsend)
396 ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
397 }
398
399 /* Init one ste as a pattern for ste data array */
mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx * ste_ctx,u16 gvmi,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_ste_htbl * htbl,u8 * formatted_ste,struct mlx5dr_htbl_connect_info * connect_info)400 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
401 u16 gvmi,
402 enum mlx5dr_domain_nic_type nic_type,
403 struct mlx5dr_ste_htbl *htbl,
404 u8 *formatted_ste,
405 struct mlx5dr_htbl_connect_info *connect_info)
406 {
407 bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
408 u8 tmp_hw_ste[DR_STE_SIZE] = {0};
409
410 ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
411
412 /* Use temp ste because dr_ste_always_miss_addr/hit_htbl
413 * touches bit_mask area which doesn't exist at ste->hw_ste.
414 * Need to use a full-sized (DR_STE_SIZE) hw_ste.
415 */
416 memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
417 if (connect_info->type == CONNECT_HIT)
418 dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
419 connect_info->hit_next_htbl);
420 else
421 dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
422 connect_info->miss_icm_addr);
423 memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
424 }
425
mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,struct mlx5dr_htbl_connect_info * connect_info,bool update_hw_ste)426 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
427 struct mlx5dr_domain_rx_tx *nic_dmn,
428 struct mlx5dr_ste_htbl *htbl,
429 struct mlx5dr_htbl_connect_info *connect_info,
430 bool update_hw_ste)
431 {
432 u8 formatted_ste[DR_STE_SIZE] = {};
433
434 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
435 dmn->info.caps.gvmi,
436 nic_dmn->type,
437 htbl,
438 formatted_ste,
439 connect_info);
440
441 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
442 }
443
mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * cur_hw_ste,enum mlx5dr_icm_chunk_size log_table_size)444 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
445 struct mlx5dr_matcher_rx_tx *nic_matcher,
446 struct mlx5dr_ste *ste,
447 u8 *cur_hw_ste,
448 enum mlx5dr_icm_chunk_size log_table_size)
449 {
450 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
451 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
452 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
453 struct mlx5dr_htbl_connect_info info;
454 struct mlx5dr_ste_htbl *next_htbl;
455
456 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
457 u16 next_lu_type;
458 u16 byte_mask;
459
460 next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
461 byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
462
463 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
464 log_table_size,
465 next_lu_type,
466 byte_mask);
467 if (!next_htbl) {
468 mlx5dr_dbg(dmn, "Failed allocating table\n");
469 return -ENOMEM;
470 }
471
472 /* Write new table to HW */
473 info.type = CONNECT_MISS;
474 info.miss_icm_addr =
475 mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
476 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
477 &info, false)) {
478 mlx5dr_info(dmn, "Failed writing table to HW\n");
479 goto free_table;
480 }
481
482 mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
483 cur_hw_ste, next_htbl);
484 ste->next_htbl = next_htbl;
485 next_htbl->pointing_ste = ste;
486 }
487
488 return 0;
489
490 free_table:
491 mlx5dr_ste_htbl_free(next_htbl);
492 return -ENOENT;
493 }
494
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,u16 lu_type,u16 byte_mask)495 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
496 enum mlx5dr_icm_chunk_size chunk_size,
497 u16 lu_type, u16 byte_mask)
498 {
499 struct mlx5dr_icm_chunk *chunk;
500 struct mlx5dr_ste_htbl *htbl;
501 u32 num_entries;
502 int i;
503
504 htbl = mlx5dr_icm_pool_alloc_htbl(pool);
505 if (!htbl)
506 return NULL;
507
508 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
509 if (!chunk)
510 goto out_free_htbl;
511
512 htbl->chunk = chunk;
513 htbl->lu_type = lu_type;
514 htbl->byte_mask = byte_mask;
515 htbl->refcount = 0;
516 htbl->pointing_ste = NULL;
517 htbl->ctrl.num_of_valid_entries = 0;
518 htbl->ctrl.num_of_collisions = 0;
519 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
520
521 for (i = 0; i < num_entries; i++) {
522 struct mlx5dr_ste *ste = &chunk->ste_arr[i];
523
524 ste->htbl = htbl;
525 ste->refcount = 0;
526 INIT_LIST_HEAD(&ste->miss_list_node);
527 INIT_LIST_HEAD(&chunk->miss_list[i]);
528 }
529
530 return htbl;
531
532 out_free_htbl:
533 mlx5dr_icm_pool_free_htbl(pool, htbl);
534 return NULL;
535 }
536
mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl * htbl)537 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
538 {
539 struct mlx5dr_icm_pool *pool = htbl->chunk->buddy_mem->pool;
540
541 if (htbl->refcount)
542 return -EBUSY;
543
544 mlx5dr_icm_free_chunk(htbl->chunk);
545 mlx5dr_icm_pool_free_htbl(pool, htbl);
546
547 return 0;
548 }
549
mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)550 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
551 struct mlx5dr_domain *dmn,
552 u8 *action_type_set,
553 u8 *hw_ste_arr,
554 struct mlx5dr_ste_actions_attr *attr,
555 u32 *added_stes)
556 {
557 ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
558 hw_ste_arr, attr, added_stes);
559 }
560
mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)561 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
562 struct mlx5dr_domain *dmn,
563 u8 *action_type_set,
564 u8 *hw_ste_arr,
565 struct mlx5dr_ste_actions_attr *attr,
566 u32 *added_stes)
567 {
568 ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
569 hw_ste_arr, attr, added_stes);
570 }
571
572 const struct mlx5dr_ste_action_modify_field *
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx * ste_ctx,u16 sw_field)573 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
574 {
575 const struct mlx5dr_ste_action_modify_field *hw_field;
576
577 if (sw_field >= ste_ctx->modify_field_arr_sz)
578 return NULL;
579
580 hw_field = &ste_ctx->modify_field_arr[sw_field];
581 if (!hw_field->end && !hw_field->start)
582 return NULL;
583
584 return hw_field;
585 }
586
mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)587 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
588 __be64 *hw_action,
589 u8 hw_field,
590 u8 shifter,
591 u8 length,
592 u32 data)
593 {
594 ste_ctx->set_action_set((u8 *)hw_action,
595 hw_field, shifter, length, data);
596 }
597
mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)598 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
599 __be64 *hw_action,
600 u8 hw_field,
601 u8 shifter,
602 u8 length,
603 u32 data)
604 {
605 ste_ctx->set_action_add((u8 *)hw_action,
606 hw_field, shifter, length, data);
607 }
608
mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)609 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
610 __be64 *hw_action,
611 u8 dst_hw_field,
612 u8 dst_shifter,
613 u8 dst_len,
614 u8 src_hw_field,
615 u8 src_shifter)
616 {
617 ste_ctx->set_action_copy((u8 *)hw_action,
618 dst_hw_field, dst_shifter, dst_len,
619 src_hw_field, src_shifter);
620 }
621
mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx * ste_ctx,void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)622 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
623 void *data, u32 data_sz,
624 u8 *hw_action, u32 hw_action_sz,
625 u16 *used_hw_action_num)
626 {
627 /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
628 if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
629 return -EINVAL;
630
631 return ste_ctx->set_action_decap_l3_list(data, data_sz,
632 hw_action, hw_action_sz,
633 used_hw_action_num);
634 }
635
dr_ste_build_pre_check_spec(struct mlx5dr_domain * dmn,struct mlx5dr_match_spec * spec)636 static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
637 struct mlx5dr_match_spec *spec)
638 {
639 if (spec->ip_version) {
640 if (spec->ip_version != 0xf) {
641 mlx5dr_err(dmn,
642 "Partial ip_version mask with src/dst IP is not supported\n");
643 return -EINVAL;
644 }
645 } else if (spec->ethertype != 0xffff &&
646 (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
647 mlx5dr_err(dmn,
648 "Partial/no ethertype mask with src/dst IP is not supported\n");
649 return -EINVAL;
650 }
651
652 return 0;
653 }
654
mlx5dr_ste_build_pre_check(struct mlx5dr_domain * dmn,u8 match_criteria,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value)655 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
656 u8 match_criteria,
657 struct mlx5dr_match_param *mask,
658 struct mlx5dr_match_param *value)
659 {
660 if (value)
661 return 0;
662
663 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
664 if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
665 mlx5dr_err(dmn,
666 "Partial mask source_port is not supported\n");
667 return -EINVAL;
668 }
669 if (mask->misc.source_eswitch_owner_vhca_id &&
670 mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
671 mlx5dr_err(dmn,
672 "Partial mask source_eswitch_owner_vhca_id is not supported\n");
673 return -EINVAL;
674 }
675 }
676
677 if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
678 dr_ste_build_pre_check_spec(dmn, &mask->outer))
679 return -EINVAL;
680
681 if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
682 dr_ste_build_pre_check_spec(dmn, &mask->inner))
683 return -EINVAL;
684
685 return 0;
686 }
687
mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_match_param * value,u8 * ste_arr)688 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
689 struct mlx5dr_matcher_rx_tx *nic_matcher,
690 struct mlx5dr_match_param *value,
691 u8 *ste_arr)
692 {
693 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
694 bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
695 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
696 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
697 struct mlx5dr_ste_build *sb;
698 int ret, i;
699
700 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
701 &matcher->mask, value);
702 if (ret)
703 return ret;
704
705 sb = nic_matcher->ste_builder;
706 for (i = 0; i < nic_matcher->num_of_builders; i++) {
707 ste_ctx->ste_init(ste_arr,
708 sb->lu_type,
709 is_rx,
710 dmn->info.caps.gvmi);
711
712 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
713
714 ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
715 if (ret)
716 return ret;
717
718 /* Connect the STEs */
719 if (i < (nic_matcher->num_of_builders - 1)) {
720 /* Need the next builder for these fields,
721 * not relevant for the last ste in the chain.
722 */
723 sb++;
724 ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
725 ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
726 }
727 ste_arr += DR_STE_SIZE;
728 }
729 return 0;
730 }
731
732 #define IFC_GET_CLR(typ, p, fld, clear) ({ \
733 void *__p = (p); \
734 u32 __t = MLX5_GET(typ, __p, fld); \
735 if (clear) \
736 MLX5_SET(typ, __p, fld, 0); \
737 __t; \
738 })
739
740 #define memcpy_and_clear(to, from, len, clear) ({ \
741 void *__to = (to), *__from = (from); \
742 size_t __len = (len); \
743 memcpy(__to, __from, __len); \
744 if (clear) \
745 memset(__from, 0, __len); \
746 })
747
dr_ste_copy_mask_misc(char * mask,struct mlx5dr_match_misc * spec,bool clr)748 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
749 {
750 spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
751 spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
752 spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
753 spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
754 spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
755
756 spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
757 spec->source_eswitch_owner_vhca_id =
758 IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
759
760 spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
761 spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
762 spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
763 spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
764 spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
765 spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
766
767 spec->outer_second_cvlan_tag =
768 IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
769 spec->inner_second_cvlan_tag =
770 IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
771 spec->outer_second_svlan_tag =
772 IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
773 spec->inner_second_svlan_tag =
774 IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
775 spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
776
777 spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
778 spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
779
780 spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
781
782 spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
783 spec->geneve_tlv_option_0_exist =
784 IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
785 spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
786
787 spec->outer_ipv6_flow_label =
788 IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
789
790 spec->inner_ipv6_flow_label =
791 IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
792
793 spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
794 spec->geneve_protocol_type =
795 IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
796
797 spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
798 }
799
dr_ste_copy_mask_spec(char * mask,struct mlx5dr_match_spec * spec,bool clr)800 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
801 {
802 __be32 raw_ip[4];
803
804 spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
805
806 spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
807 spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
808
809 spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
810
811 spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
812 spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
813 spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
814 spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
815
816 spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
817 spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
818 spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
819 spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
820 spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
821 spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
822 spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
823 spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
824 spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
825 spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
826
827 spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
828 spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
829
830 spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
831 spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
832
833 memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
834 src_ipv4_src_ipv6.ipv6_layout.ipv6),
835 sizeof(raw_ip), clr);
836
837 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
838 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
839 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
840 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
841
842 memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
843 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
844 sizeof(raw_ip), clr);
845
846 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
847 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
848 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
849 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
850 }
851
dr_ste_copy_mask_misc2(char * mask,struct mlx5dr_match_misc2 * spec,bool clr)852 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
853 {
854 spec->outer_first_mpls_label =
855 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
856 spec->outer_first_mpls_exp =
857 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
858 spec->outer_first_mpls_s_bos =
859 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
860 spec->outer_first_mpls_ttl =
861 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
862 spec->inner_first_mpls_label =
863 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
864 spec->inner_first_mpls_exp =
865 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
866 spec->inner_first_mpls_s_bos =
867 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
868 spec->inner_first_mpls_ttl =
869 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
870 spec->outer_first_mpls_over_gre_label =
871 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
872 spec->outer_first_mpls_over_gre_exp =
873 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
874 spec->outer_first_mpls_over_gre_s_bos =
875 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
876 spec->outer_first_mpls_over_gre_ttl =
877 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
878 spec->outer_first_mpls_over_udp_label =
879 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
880 spec->outer_first_mpls_over_udp_exp =
881 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
882 spec->outer_first_mpls_over_udp_s_bos =
883 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
884 spec->outer_first_mpls_over_udp_ttl =
885 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
886 spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
887 spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
888 spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
889 spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
890 spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
891 spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
892 spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
893 spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
894 spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
895 }
896
dr_ste_copy_mask_misc3(char * mask,struct mlx5dr_match_misc3 * spec,bool clr)897 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
898 {
899 spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
900 spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
901 spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
902 spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
903 spec->outer_vxlan_gpe_vni =
904 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
905 spec->outer_vxlan_gpe_next_protocol =
906 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
907 spec->outer_vxlan_gpe_flags =
908 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
909 spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
910 spec->icmpv6_header_data =
911 IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
912 spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
913 spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
914 spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
915 spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
916 spec->geneve_tlv_option_0_data =
917 IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
918 spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
919 spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
920 spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
921 spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
922 spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
923 spec->gtpu_first_ext_dw_0 =
924 IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
925 }
926
dr_ste_copy_mask_misc4(char * mask,struct mlx5dr_match_misc4 * spec,bool clr)927 static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
928 {
929 spec->prog_sample_field_id_0 =
930 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
931 spec->prog_sample_field_value_0 =
932 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
933 spec->prog_sample_field_id_1 =
934 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
935 spec->prog_sample_field_value_1 =
936 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
937 spec->prog_sample_field_id_2 =
938 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
939 spec->prog_sample_field_value_2 =
940 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
941 spec->prog_sample_field_id_3 =
942 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
943 spec->prog_sample_field_value_3 =
944 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
945 }
946
dr_ste_copy_mask_misc5(char * mask,struct mlx5dr_match_misc5 * spec,bool clr)947 static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
948 {
949 spec->macsec_tag_0 =
950 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
951 spec->macsec_tag_1 =
952 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
953 spec->macsec_tag_2 =
954 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
955 spec->macsec_tag_3 =
956 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
957 spec->tunnel_header_0 =
958 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
959 spec->tunnel_header_1 =
960 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
961 spec->tunnel_header_2 =
962 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
963 spec->tunnel_header_3 =
964 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
965 }
966
mlx5dr_ste_copy_param(u8 match_criteria,struct mlx5dr_match_param * set_param,struct mlx5dr_match_parameters * mask,bool clr)967 void mlx5dr_ste_copy_param(u8 match_criteria,
968 struct mlx5dr_match_param *set_param,
969 struct mlx5dr_match_parameters *mask,
970 bool clr)
971 {
972 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
973 u8 *data = (u8 *)mask->match_buf;
974 size_t param_location;
975 void *buff;
976
977 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
978 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
979 memcpy(tail_param, data, mask->match_sz);
980 buff = tail_param;
981 } else {
982 buff = mask->match_buf;
983 }
984 dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
985 }
986 param_location = sizeof(struct mlx5dr_match_spec);
987
988 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
989 if (mask->match_sz < param_location +
990 sizeof(struct mlx5dr_match_misc)) {
991 memcpy(tail_param, data + param_location,
992 mask->match_sz - param_location);
993 buff = tail_param;
994 } else {
995 buff = data + param_location;
996 }
997 dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
998 }
999 param_location += sizeof(struct mlx5dr_match_misc);
1000
1001 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
1002 if (mask->match_sz < param_location +
1003 sizeof(struct mlx5dr_match_spec)) {
1004 memcpy(tail_param, data + param_location,
1005 mask->match_sz - param_location);
1006 buff = tail_param;
1007 } else {
1008 buff = data + param_location;
1009 }
1010 dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
1011 }
1012 param_location += sizeof(struct mlx5dr_match_spec);
1013
1014 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
1015 if (mask->match_sz < param_location +
1016 sizeof(struct mlx5dr_match_misc2)) {
1017 memcpy(tail_param, data + param_location,
1018 mask->match_sz - param_location);
1019 buff = tail_param;
1020 } else {
1021 buff = data + param_location;
1022 }
1023 dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
1024 }
1025
1026 param_location += sizeof(struct mlx5dr_match_misc2);
1027
1028 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1029 if (mask->match_sz < param_location +
1030 sizeof(struct mlx5dr_match_misc3)) {
1031 memcpy(tail_param, data + param_location,
1032 mask->match_sz - param_location);
1033 buff = tail_param;
1034 } else {
1035 buff = data + param_location;
1036 }
1037 dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
1038 }
1039
1040 param_location += sizeof(struct mlx5dr_match_misc3);
1041
1042 if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
1043 if (mask->match_sz < param_location +
1044 sizeof(struct mlx5dr_match_misc4)) {
1045 memcpy(tail_param, data + param_location,
1046 mask->match_sz - param_location);
1047 buff = tail_param;
1048 } else {
1049 buff = data + param_location;
1050 }
1051 dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
1052 }
1053
1054 param_location += sizeof(struct mlx5dr_match_misc4);
1055
1056 if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
1057 if (mask->match_sz < param_location +
1058 sizeof(struct mlx5dr_match_misc5)) {
1059 memcpy(tail_param, data + param_location,
1060 mask->match_sz - param_location);
1061 buff = tail_param;
1062 } else {
1063 buff = data + param_location;
1064 }
1065 dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
1066 }
1067 }
1068
mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1069 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
1070 struct mlx5dr_ste_build *sb,
1071 struct mlx5dr_match_param *mask,
1072 bool inner, bool rx)
1073 {
1074 sb->rx = rx;
1075 sb->inner = inner;
1076 ste_ctx->build_eth_l2_src_dst_init(sb, mask);
1077 }
1078
mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1079 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
1080 struct mlx5dr_ste_build *sb,
1081 struct mlx5dr_match_param *mask,
1082 bool inner, bool rx)
1083 {
1084 sb->rx = rx;
1085 sb->inner = inner;
1086 ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
1087 }
1088
mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1089 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
1090 struct mlx5dr_ste_build *sb,
1091 struct mlx5dr_match_param *mask,
1092 bool inner, bool rx)
1093 {
1094 sb->rx = rx;
1095 sb->inner = inner;
1096 ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
1097 }
1098
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1099 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1100 struct mlx5dr_ste_build *sb,
1101 struct mlx5dr_match_param *mask,
1102 bool inner, bool rx)
1103 {
1104 sb->rx = rx;
1105 sb->inner = inner;
1106 ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1107 }
1108
mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1109 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1110 struct mlx5dr_ste_build *sb,
1111 struct mlx5dr_match_param *mask,
1112 bool inner, bool rx)
1113 {
1114 sb->rx = rx;
1115 sb->inner = inner;
1116 ste_ctx->build_eth_l2_src_init(sb, mask);
1117 }
1118
mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1119 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1120 struct mlx5dr_ste_build *sb,
1121 struct mlx5dr_match_param *mask,
1122 bool inner, bool rx)
1123 {
1124 sb->rx = rx;
1125 sb->inner = inner;
1126 ste_ctx->build_eth_l2_dst_init(sb, mask);
1127 }
1128
mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1129 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1130 struct mlx5dr_ste_build *sb,
1131 struct mlx5dr_match_param *mask, bool inner, bool rx)
1132 {
1133 sb->rx = rx;
1134 sb->inner = inner;
1135 ste_ctx->build_eth_l2_tnl_init(sb, mask);
1136 }
1137
mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1138 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1139 struct mlx5dr_ste_build *sb,
1140 struct mlx5dr_match_param *mask,
1141 bool inner, bool rx)
1142 {
1143 sb->rx = rx;
1144 sb->inner = inner;
1145 ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1146 }
1147
mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1148 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1149 struct mlx5dr_ste_build *sb,
1150 struct mlx5dr_match_param *mask,
1151 bool inner, bool rx)
1152 {
1153 sb->rx = rx;
1154 sb->inner = inner;
1155 ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1156 }
1157
dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1158 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1159 struct mlx5dr_ste_build *sb,
1160 u8 *tag)
1161 {
1162 return 0;
1163 }
1164
mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build * sb,bool rx)1165 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1166 {
1167 sb->rx = rx;
1168 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1169 sb->byte_mask = 0;
1170 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1171 }
1172
mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1173 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1174 struct mlx5dr_ste_build *sb,
1175 struct mlx5dr_match_param *mask,
1176 bool inner, bool rx)
1177 {
1178 sb->rx = rx;
1179 sb->inner = inner;
1180 ste_ctx->build_mpls_init(sb, mask);
1181 }
1182
mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1183 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1184 struct mlx5dr_ste_build *sb,
1185 struct mlx5dr_match_param *mask,
1186 bool inner, bool rx)
1187 {
1188 sb->rx = rx;
1189 sb->inner = inner;
1190 ste_ctx->build_tnl_gre_init(sb, mask);
1191 }
1192
mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1193 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1194 struct mlx5dr_ste_build *sb,
1195 struct mlx5dr_match_param *mask,
1196 struct mlx5dr_cmd_caps *caps,
1197 bool inner, bool rx)
1198 {
1199 sb->rx = rx;
1200 sb->inner = inner;
1201 sb->caps = caps;
1202 return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1203 }
1204
mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1205 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1206 struct mlx5dr_ste_build *sb,
1207 struct mlx5dr_match_param *mask,
1208 struct mlx5dr_cmd_caps *caps,
1209 bool inner, bool rx)
1210 {
1211 sb->rx = rx;
1212 sb->inner = inner;
1213 sb->caps = caps;
1214 return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1215 }
1216
mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1217 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1218 struct mlx5dr_ste_build *sb,
1219 struct mlx5dr_match_param *mask,
1220 struct mlx5dr_cmd_caps *caps,
1221 bool inner, bool rx)
1222 {
1223 sb->rx = rx;
1224 sb->inner = inner;
1225 sb->caps = caps;
1226 ste_ctx->build_icmp_init(sb, mask);
1227 }
1228
mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1229 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1230 struct mlx5dr_ste_build *sb,
1231 struct mlx5dr_match_param *mask,
1232 bool inner, bool rx)
1233 {
1234 sb->rx = rx;
1235 sb->inner = inner;
1236 ste_ctx->build_general_purpose_init(sb, mask);
1237 }
1238
mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1239 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1240 struct mlx5dr_ste_build *sb,
1241 struct mlx5dr_match_param *mask,
1242 bool inner, bool rx)
1243 {
1244 sb->rx = rx;
1245 sb->inner = inner;
1246 ste_ctx->build_eth_l4_misc_init(sb, mask);
1247 }
1248
mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1249 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1250 struct mlx5dr_ste_build *sb,
1251 struct mlx5dr_match_param *mask,
1252 bool inner, bool rx)
1253 {
1254 sb->rx = rx;
1255 sb->inner = inner;
1256 ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1257 }
1258
mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1259 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1260 struct mlx5dr_ste_build *sb,
1261 struct mlx5dr_match_param *mask,
1262 bool inner, bool rx)
1263 {
1264 sb->rx = rx;
1265 sb->inner = inner;
1266 ste_ctx->build_tnl_geneve_init(sb, mask);
1267 }
1268
mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1269 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1270 struct mlx5dr_ste_build *sb,
1271 struct mlx5dr_match_param *mask,
1272 struct mlx5dr_cmd_caps *caps,
1273 bool inner, bool rx)
1274 {
1275 sb->rx = rx;
1276 sb->caps = caps;
1277 sb->inner = inner;
1278 ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1279 }
1280
mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1281 void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
1282 struct mlx5dr_ste_build *sb,
1283 struct mlx5dr_match_param *mask,
1284 struct mlx5dr_cmd_caps *caps,
1285 bool inner, bool rx)
1286 {
1287 if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
1288 return;
1289
1290 sb->rx = rx;
1291 sb->caps = caps;
1292 sb->inner = inner;
1293 ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
1294 }
1295
mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1296 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1297 struct mlx5dr_ste_build *sb,
1298 struct mlx5dr_match_param *mask,
1299 bool inner, bool rx)
1300 {
1301 sb->rx = rx;
1302 sb->inner = inner;
1303 ste_ctx->build_tnl_gtpu_init(sb, mask);
1304 }
1305
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1306 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1307 struct mlx5dr_ste_build *sb,
1308 struct mlx5dr_match_param *mask,
1309 struct mlx5dr_cmd_caps *caps,
1310 bool inner, bool rx)
1311 {
1312 sb->rx = rx;
1313 sb->caps = caps;
1314 sb->inner = inner;
1315 ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1316 }
1317
mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1318 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1319 struct mlx5dr_ste_build *sb,
1320 struct mlx5dr_match_param *mask,
1321 struct mlx5dr_cmd_caps *caps,
1322 bool inner, bool rx)
1323 {
1324 sb->rx = rx;
1325 sb->caps = caps;
1326 sb->inner = inner;
1327 ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1328 }
1329
mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1330 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1331 struct mlx5dr_ste_build *sb,
1332 struct mlx5dr_match_param *mask,
1333 bool inner, bool rx)
1334 {
1335 sb->rx = rx;
1336 sb->inner = inner;
1337 ste_ctx->build_register_0_init(sb, mask);
1338 }
1339
mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1340 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1341 struct mlx5dr_ste_build *sb,
1342 struct mlx5dr_match_param *mask,
1343 bool inner, bool rx)
1344 {
1345 sb->rx = rx;
1346 sb->inner = inner;
1347 ste_ctx->build_register_1_init(sb, mask);
1348 }
1349
mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn,bool inner,bool rx)1350 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1351 struct mlx5dr_ste_build *sb,
1352 struct mlx5dr_match_param *mask,
1353 struct mlx5dr_domain *dmn,
1354 bool inner, bool rx)
1355 {
1356 /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
1357 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1358
1359 sb->rx = rx;
1360 sb->dmn = dmn;
1361 sb->inner = inner;
1362 ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1363 }
1364
mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1365 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1366 struct mlx5dr_ste_build *sb,
1367 struct mlx5dr_match_param *mask,
1368 bool inner, bool rx)
1369 {
1370 sb->rx = rx;
1371 sb->inner = inner;
1372 ste_ctx->build_flex_parser_0_init(sb, mask);
1373 }
1374
mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1375 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1376 struct mlx5dr_ste_build *sb,
1377 struct mlx5dr_match_param *mask,
1378 bool inner, bool rx)
1379 {
1380 sb->rx = rx;
1381 sb->inner = inner;
1382 ste_ctx->build_flex_parser_1_init(sb, mask);
1383 }
1384
mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1385 void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
1386 struct mlx5dr_ste_build *sb,
1387 struct mlx5dr_match_param *mask,
1388 bool inner, bool rx)
1389 {
1390 sb->rx = rx;
1391 sb->inner = inner;
1392 ste_ctx->build_tnl_header_0_1_init(sb, mask);
1393 }
1394
mlx5dr_ste_get_ctx(u8 version)1395 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1396 {
1397 if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
1398 return mlx5dr_ste_get_ctx_v0();
1399 else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
1400 return mlx5dr_ste_get_ctx_v1();
1401 else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
1402 return mlx5dr_ste_get_ctx_v2();
1403
1404 return NULL;
1405 }
1406