1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7
8 struct dr_hw_ste_format {
9 u8 ctrl[DR_STE_SIZE_CTRL];
10 u8 tag[DR_STE_SIZE_TAG];
11 u8 mask[DR_STE_SIZE_MASK];
12 };
13
dr_ste_crc32_calc(const void * input_data,size_t length)14 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15 {
16 u32 crc = crc32(0, input_data, length);
17
18 return (__force u32)htonl(crc);
19 }
20
mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps * caps)21 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
22 {
23 return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
24 }
25
mlx5dr_ste_calc_hash_index(u8 * hw_ste_p,struct mlx5dr_ste_htbl * htbl)26 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
27 {
28 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
29 u8 masked[DR_STE_SIZE_TAG] = {};
30 u32 crc32, index;
31 u16 bit;
32 int i;
33
34 /* Don't calculate CRC if the result is predicted */
35 if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
36 return 0;
37
38 /* Mask tag using byte mask, bit per byte */
39 bit = 1 << (DR_STE_SIZE_TAG - 1);
40 for (i = 0; i < DR_STE_SIZE_TAG; i++) {
41 if (htbl->byte_mask & bit)
42 masked[i] = hw_ste->tag[i];
43
44 bit = bit >> 1;
45 }
46
47 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
48 index = crc32 & (htbl->chunk->num_of_entries - 1);
49
50 return index;
51 }
52
mlx5dr_ste_conv_bit_to_byte_mask(u8 * bit_mask)53 u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
54 {
55 u16 byte_mask = 0;
56 int i;
57
58 for (i = 0; i < DR_STE_SIZE_MASK; i++) {
59 byte_mask = byte_mask << 1;
60 if (bit_mask[i] == 0xff)
61 byte_mask |= 1;
62 }
63 return byte_mask;
64 }
65
dr_ste_get_tag(u8 * hw_ste_p)66 static u8 *dr_ste_get_tag(u8 *hw_ste_p)
67 {
68 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
69
70 return hw_ste->tag;
71 }
72
mlx5dr_ste_set_bit_mask(u8 * hw_ste_p,u8 * bit_mask)73 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
74 {
75 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
76
77 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
78 }
79
dr_ste_set_always_hit(struct dr_hw_ste_format * hw_ste)80 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
81 {
82 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
83 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
84 }
85
dr_ste_set_always_miss(struct dr_hw_ste_format * hw_ste)86 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
87 {
88 hw_ste->tag[0] = 0xdc;
89 hw_ste->mask[0] = 0;
90 }
91
mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u64 miss_addr)92 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
93 u8 *hw_ste_p, u64 miss_addr)
94 {
95 ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
96 }
97
dr_ste_always_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,u64 miss_addr)98 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
99 struct mlx5dr_ste *ste, u64 miss_addr)
100 {
101 u8 *hw_ste_p = ste->hw_ste;
102
103 ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
104 ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
105 dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
106 }
107
mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,u64 icm_addr,u32 ht_size)108 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
109 u8 *hw_ste, u64 icm_addr, u32 ht_size)
110 {
111 ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
112 }
113
mlx5dr_ste_get_icm_addr(struct mlx5dr_ste * ste)114 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
115 {
116 u32 index = ste - ste->htbl->ste_arr;
117
118 return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
119 }
120
mlx5dr_ste_get_mr_addr(struct mlx5dr_ste * ste)121 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
122 {
123 u32 index = ste - ste->htbl->ste_arr;
124
125 return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
126 }
127
mlx5dr_ste_get_miss_list(struct mlx5dr_ste * ste)128 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
129 {
130 u32 index = ste - ste->htbl->ste_arr;
131
132 return &ste->htbl->miss_list[index];
133 }
134
dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_ste_htbl * next_htbl)135 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
136 struct mlx5dr_ste *ste,
137 struct mlx5dr_ste_htbl *next_htbl)
138 {
139 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
140 u8 *hw_ste = ste->hw_ste;
141
142 ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
143 ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
144 ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
145
146 dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
147 }
148
mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx * nic_matcher,u8 ste_location)149 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
150 u8 ste_location)
151 {
152 return ste_location == nic_matcher->num_of_builders;
153 }
154
155 /* Replace relevant fields, except of:
156 * htbl - keep the origin htbl
157 * miss_list + list - already took the src from the list.
158 * icm_addr/mr_addr - depends on the hosting table.
159 *
160 * Before:
161 * | a | -> | b | -> | c | ->
162 *
163 * After:
164 * | a | -> | c | ->
165 * While the data that was in b copied to a.
166 */
dr_ste_replace(struct mlx5dr_ste * dst,struct mlx5dr_ste * src)167 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
168 {
169 memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
170 dst->next_htbl = src->next_htbl;
171 if (dst->next_htbl)
172 dst->next_htbl->pointing_ste = dst;
173
174 dst->refcount = src->refcount;
175 }
176
177 /* Free ste which is the head and the only one in miss_list */
178 static void
dr_ste_remove_head_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)179 dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
180 struct mlx5dr_ste *ste,
181 struct mlx5dr_matcher_rx_tx *nic_matcher,
182 struct mlx5dr_ste_send_info *ste_info_head,
183 struct list_head *send_ste_list,
184 struct mlx5dr_ste_htbl *stats_tbl)
185 {
186 u8 tmp_data_ste[DR_STE_SIZE] = {};
187 struct mlx5dr_ste tmp_ste = {};
188 u64 miss_addr;
189
190 tmp_ste.hw_ste = tmp_data_ste;
191
192 /* Use temp ste because dr_ste_always_miss_addr
193 * touches bit_mask area which doesn't exist at ste->hw_ste.
194 */
195 memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
196 miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
197 dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
198 memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
199
200 list_del_init(&ste->miss_list_node);
201
202 /* Write full STE size in order to have "always_miss" */
203 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
204 0, tmp_data_ste,
205 ste_info_head,
206 send_ste_list,
207 true /* Copy data */);
208
209 stats_tbl->ctrl.num_of_valid_entries--;
210 }
211
212 /* Free ste which is the head but NOT the only one in miss_list:
213 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
214 */
215 static void
dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,struct mlx5dr_ste * next_ste,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)216 dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
217 struct mlx5dr_ste *ste,
218 struct mlx5dr_ste *next_ste,
219 struct mlx5dr_ste_send_info *ste_info_head,
220 struct list_head *send_ste_list,
221 struct mlx5dr_ste_htbl *stats_tbl)
222
223 {
224 struct mlx5dr_ste_htbl *next_miss_htbl;
225 u8 hw_ste[DR_STE_SIZE] = {};
226 int sb_idx;
227
228 next_miss_htbl = next_ste->htbl;
229
230 /* Remove from the miss_list the next_ste before copy */
231 list_del_init(&next_ste->miss_list_node);
232
233 /* Move data from next into ste */
234 dr_ste_replace(ste, next_ste);
235
236 /* Update the rule on STE change */
237 mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
238
239 /* Copy all 64 hw_ste bytes */
240 memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
241 sb_idx = ste->ste_chain_location - 1;
242 mlx5dr_ste_set_bit_mask(hw_ste,
243 nic_matcher->ste_builder[sb_idx].bit_mask);
244
245 /* Del the htbl that contains the next_ste.
246 * The origin htbl stay with the same number of entries.
247 */
248 mlx5dr_htbl_put(next_miss_htbl);
249
250 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
251 0, hw_ste,
252 ste_info_head,
253 send_ste_list,
254 true /* Copy data */);
255
256 stats_tbl->ctrl.num_of_collisions--;
257 stats_tbl->ctrl.num_of_valid_entries--;
258 }
259
260 /* Free ste that is located in the middle of the miss list:
261 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
262 */
dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_ste_send_info * ste_info,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)263 static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
264 struct mlx5dr_ste *ste,
265 struct mlx5dr_ste_send_info *ste_info,
266 struct list_head *send_ste_list,
267 struct mlx5dr_ste_htbl *stats_tbl)
268 {
269 struct mlx5dr_ste *prev_ste;
270 u64 miss_addr;
271
272 prev_ste = list_prev_entry(ste, miss_list_node);
273 if (WARN_ON(!prev_ste))
274 return;
275
276 miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
277 ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
278
279 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
280 prev_ste->hw_ste, ste_info,
281 send_ste_list, true /* Copy data*/);
282
283 list_del_init(&ste->miss_list_node);
284
285 stats_tbl->ctrl.num_of_valid_entries--;
286 stats_tbl->ctrl.num_of_collisions--;
287 }
288
mlx5dr_ste_free(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)289 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
290 struct mlx5dr_matcher *matcher,
291 struct mlx5dr_matcher_rx_tx *nic_matcher)
292 {
293 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
294 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
295 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
296 struct mlx5dr_ste_send_info ste_info_head;
297 struct mlx5dr_ste *next_ste, *first_ste;
298 bool put_on_origin_table = true;
299 struct mlx5dr_ste_htbl *stats_tbl;
300 LIST_HEAD(send_ste_list);
301
302 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
303 struct mlx5dr_ste, miss_list_node);
304 stats_tbl = first_ste->htbl;
305
306 /* Two options:
307 * 1. ste is head:
308 * a. head ste is the only ste in the miss list
309 * b. head ste is not the only ste in the miss-list
310 * 2. ste is not head
311 */
312 if (first_ste == ste) { /* Ste is the head */
313 struct mlx5dr_ste *last_ste;
314
315 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
316 struct mlx5dr_ste, miss_list_node);
317 if (last_ste == first_ste)
318 next_ste = NULL;
319 else
320 next_ste = list_next_entry(ste, miss_list_node);
321
322 if (!next_ste) {
323 /* One and only entry in the list */
324 dr_ste_remove_head_ste(ste_ctx, ste,
325 nic_matcher,
326 &ste_info_head,
327 &send_ste_list,
328 stats_tbl);
329 } else {
330 /* First but not only entry in the list */
331 dr_ste_replace_head_ste(nic_matcher, ste,
332 next_ste, &ste_info_head,
333 &send_ste_list, stats_tbl);
334 put_on_origin_table = false;
335 }
336 } else { /* Ste in the middle of the list */
337 dr_ste_remove_middle_ste(ste_ctx, ste,
338 &ste_info_head, &send_ste_list,
339 stats_tbl);
340 }
341
342 /* Update HW */
343 list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
344 &send_ste_list, send_list) {
345 list_del(&cur_ste_info->send_list);
346 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
347 cur_ste_info->data, cur_ste_info->size,
348 cur_ste_info->offset);
349 }
350
351 if (put_on_origin_table)
352 mlx5dr_htbl_put(ste->htbl);
353 }
354
mlx5dr_ste_equal_tag(void * src,void * dst)355 bool mlx5dr_ste_equal_tag(void *src, void *dst)
356 {
357 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
358 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
359
360 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
361 }
362
mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)363 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
364 u8 *hw_ste,
365 struct mlx5dr_ste_htbl *next_htbl)
366 {
367 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
368
369 ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
370 }
371
mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u32 ste_size)372 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
373 u8 *hw_ste_p, u32 ste_size)
374 {
375 if (ste_ctx->prepare_for_postsend)
376 ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
377 }
378
379 /* Init one ste as a pattern for ste data array */
mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx * ste_ctx,u16 gvmi,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_ste_htbl * htbl,u8 * formatted_ste,struct mlx5dr_htbl_connect_info * connect_info)380 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
381 u16 gvmi,
382 enum mlx5dr_domain_nic_type nic_type,
383 struct mlx5dr_ste_htbl *htbl,
384 u8 *formatted_ste,
385 struct mlx5dr_htbl_connect_info *connect_info)
386 {
387 bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
388 struct mlx5dr_ste ste = {};
389
390 ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
391 ste.hw_ste = formatted_ste;
392
393 if (connect_info->type == CONNECT_HIT)
394 dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
395 else
396 dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
397 }
398
mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,struct mlx5dr_htbl_connect_info * connect_info,bool update_hw_ste)399 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
400 struct mlx5dr_domain_rx_tx *nic_dmn,
401 struct mlx5dr_ste_htbl *htbl,
402 struct mlx5dr_htbl_connect_info *connect_info,
403 bool update_hw_ste)
404 {
405 u8 formatted_ste[DR_STE_SIZE] = {};
406
407 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
408 dmn->info.caps.gvmi,
409 nic_dmn->type,
410 htbl,
411 formatted_ste,
412 connect_info);
413
414 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
415 }
416
mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * cur_hw_ste,enum mlx5dr_icm_chunk_size log_table_size)417 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
418 struct mlx5dr_matcher_rx_tx *nic_matcher,
419 struct mlx5dr_ste *ste,
420 u8 *cur_hw_ste,
421 enum mlx5dr_icm_chunk_size log_table_size)
422 {
423 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
424 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
425 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
426 struct mlx5dr_htbl_connect_info info;
427 struct mlx5dr_ste_htbl *next_htbl;
428
429 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
430 u16 next_lu_type;
431 u16 byte_mask;
432
433 next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
434 byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
435
436 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
437 log_table_size,
438 next_lu_type,
439 byte_mask);
440 if (!next_htbl) {
441 mlx5dr_dbg(dmn, "Failed allocating table\n");
442 return -ENOMEM;
443 }
444
445 /* Write new table to HW */
446 info.type = CONNECT_MISS;
447 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
448 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
449 &info, false)) {
450 mlx5dr_info(dmn, "Failed writing table to HW\n");
451 goto free_table;
452 }
453
454 mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
455 cur_hw_ste, next_htbl);
456 ste->next_htbl = next_htbl;
457 next_htbl->pointing_ste = ste;
458 }
459
460 return 0;
461
462 free_table:
463 mlx5dr_ste_htbl_free(next_htbl);
464 return -ENOENT;
465 }
466
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,u16 lu_type,u16 byte_mask)467 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
468 enum mlx5dr_icm_chunk_size chunk_size,
469 u16 lu_type, u16 byte_mask)
470 {
471 struct mlx5dr_icm_chunk *chunk;
472 struct mlx5dr_ste_htbl *htbl;
473 int i;
474
475 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
476 if (!htbl)
477 return NULL;
478
479 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
480 if (!chunk)
481 goto out_free_htbl;
482
483 htbl->chunk = chunk;
484 htbl->lu_type = lu_type;
485 htbl->byte_mask = byte_mask;
486 htbl->ste_arr = chunk->ste_arr;
487 htbl->hw_ste_arr = chunk->hw_ste_arr;
488 htbl->miss_list = chunk->miss_list;
489 htbl->refcount = 0;
490
491 for (i = 0; i < chunk->num_of_entries; i++) {
492 struct mlx5dr_ste *ste = &htbl->ste_arr[i];
493
494 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
495 ste->htbl = htbl;
496 ste->refcount = 0;
497 INIT_LIST_HEAD(&ste->miss_list_node);
498 INIT_LIST_HEAD(&htbl->miss_list[i]);
499 }
500
501 htbl->chunk_size = chunk_size;
502 return htbl;
503
504 out_free_htbl:
505 kfree(htbl);
506 return NULL;
507 }
508
mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl * htbl)509 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
510 {
511 if (htbl->refcount)
512 return -EBUSY;
513
514 mlx5dr_icm_free_chunk(htbl->chunk);
515 kfree(htbl);
516 return 0;
517 }
518
mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)519 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
520 struct mlx5dr_domain *dmn,
521 u8 *action_type_set,
522 u8 *hw_ste_arr,
523 struct mlx5dr_ste_actions_attr *attr,
524 u32 *added_stes)
525 {
526 ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
527 attr, added_stes);
528 }
529
mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)530 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
531 struct mlx5dr_domain *dmn,
532 u8 *action_type_set,
533 u8 *hw_ste_arr,
534 struct mlx5dr_ste_actions_attr *attr,
535 u32 *added_stes)
536 {
537 ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
538 attr, added_stes);
539 }
540
541 const struct mlx5dr_ste_action_modify_field *
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx * ste_ctx,u16 sw_field)542 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
543 {
544 const struct mlx5dr_ste_action_modify_field *hw_field;
545
546 if (sw_field >= ste_ctx->modify_field_arr_sz)
547 return NULL;
548
549 hw_field = &ste_ctx->modify_field_arr[sw_field];
550 if (!hw_field->end && !hw_field->start)
551 return NULL;
552
553 return hw_field;
554 }
555
mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)556 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
557 __be64 *hw_action,
558 u8 hw_field,
559 u8 shifter,
560 u8 length,
561 u32 data)
562 {
563 ste_ctx->set_action_set((u8 *)hw_action,
564 hw_field, shifter, length, data);
565 }
566
mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)567 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
568 __be64 *hw_action,
569 u8 hw_field,
570 u8 shifter,
571 u8 length,
572 u32 data)
573 {
574 ste_ctx->set_action_add((u8 *)hw_action,
575 hw_field, shifter, length, data);
576 }
577
mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)578 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
579 __be64 *hw_action,
580 u8 dst_hw_field,
581 u8 dst_shifter,
582 u8 dst_len,
583 u8 src_hw_field,
584 u8 src_shifter)
585 {
586 ste_ctx->set_action_copy((u8 *)hw_action,
587 dst_hw_field, dst_shifter, dst_len,
588 src_hw_field, src_shifter);
589 }
590
mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx * ste_ctx,void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)591 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
592 void *data, u32 data_sz,
593 u8 *hw_action, u32 hw_action_sz,
594 u16 *used_hw_action_num)
595 {
596 /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
597 if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
598 return -EINVAL;
599
600 return ste_ctx->set_action_decap_l3_list(data, data_sz,
601 hw_action, hw_action_sz,
602 used_hw_action_num);
603 }
604
mlx5dr_ste_build_pre_check(struct mlx5dr_domain * dmn,u8 match_criteria,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value)605 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
606 u8 match_criteria,
607 struct mlx5dr_match_param *mask,
608 struct mlx5dr_match_param *value)
609 {
610 if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
611 if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
612 mlx5dr_err(dmn,
613 "Partial mask source_port is not supported\n");
614 return -EINVAL;
615 }
616 if (mask->misc.source_eswitch_owner_vhca_id &&
617 mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
618 mlx5dr_err(dmn,
619 "Partial mask source_eswitch_owner_vhca_id is not supported\n");
620 return -EINVAL;
621 }
622 }
623
624 return 0;
625 }
626
mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_match_param * value,u8 * ste_arr)627 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
628 struct mlx5dr_matcher_rx_tx *nic_matcher,
629 struct mlx5dr_match_param *value,
630 u8 *ste_arr)
631 {
632 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
633 bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
634 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
635 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
636 struct mlx5dr_ste_build *sb;
637 int ret, i;
638
639 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
640 &matcher->mask, value);
641 if (ret)
642 return ret;
643
644 sb = nic_matcher->ste_builder;
645 for (i = 0; i < nic_matcher->num_of_builders; i++) {
646 ste_ctx->ste_init(ste_arr,
647 sb->lu_type,
648 is_rx,
649 dmn->info.caps.gvmi);
650
651 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
652
653 ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
654 if (ret)
655 return ret;
656
657 /* Connect the STEs */
658 if (i < (nic_matcher->num_of_builders - 1)) {
659 /* Need the next builder for these fields,
660 * not relevant for the last ste in the chain.
661 */
662 sb++;
663 ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
664 ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
665 }
666 ste_arr += DR_STE_SIZE;
667 }
668 return 0;
669 }
670
671 #define IFC_GET_CLR(typ, p, fld, clear) ({ \
672 void *__p = (p); \
673 u32 __t = MLX5_GET(typ, __p, fld); \
674 if (clear) \
675 MLX5_SET(typ, __p, fld, 0); \
676 __t; \
677 })
678
679 #define memcpy_and_clear(to, from, len, clear) ({ \
680 void *__to = (to), *__from = (from); \
681 size_t __len = (len); \
682 memcpy(__to, __from, __len); \
683 if (clear) \
684 memset(__from, 0, __len); \
685 })
686
dr_ste_copy_mask_misc(char * mask,struct mlx5dr_match_misc * spec,bool clr)687 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
688 {
689 spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
690 spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
691 spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
692 spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
693 spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
694
695 spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
696 spec->source_eswitch_owner_vhca_id =
697 IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
698
699 spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
700 spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
701 spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
702 spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
703 spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
704 spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
705
706 spec->outer_second_cvlan_tag =
707 IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
708 spec->inner_second_cvlan_tag =
709 IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
710 spec->outer_second_svlan_tag =
711 IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
712 spec->inner_second_svlan_tag =
713 IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
714 spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
715
716 spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
717 spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
718
719 spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
720
721 spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
722 spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
723
724 spec->outer_ipv6_flow_label =
725 IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
726
727 spec->inner_ipv6_flow_label =
728 IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
729
730 spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
731 spec->geneve_protocol_type =
732 IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
733
734 spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
735 }
736
dr_ste_copy_mask_spec(char * mask,struct mlx5dr_match_spec * spec,bool clr)737 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
738 {
739 __be32 raw_ip[4];
740
741 spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
742
743 spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
744 spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
745
746 spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
747
748 spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
749 spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
750 spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
751 spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
752
753 spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
754 spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
755 spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
756 spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
757 spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
758 spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
759 spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
760 spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
761 spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
762 spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
763
764 spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
765
766 spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
767 spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
768
769 memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
770 src_ipv4_src_ipv6.ipv6_layout.ipv6),
771 sizeof(raw_ip), clr);
772
773 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
774 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
775 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
776 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
777
778 memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
779 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
780 sizeof(raw_ip), clr);
781
782 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
783 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
784 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
785 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
786 }
787
dr_ste_copy_mask_misc2(char * mask,struct mlx5dr_match_misc2 * spec,bool clr)788 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
789 {
790 spec->outer_first_mpls_label =
791 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
792 spec->outer_first_mpls_exp =
793 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
794 spec->outer_first_mpls_s_bos =
795 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
796 spec->outer_first_mpls_ttl =
797 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
798 spec->inner_first_mpls_label =
799 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
800 spec->inner_first_mpls_exp =
801 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
802 spec->inner_first_mpls_s_bos =
803 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
804 spec->inner_first_mpls_ttl =
805 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
806 spec->outer_first_mpls_over_gre_label =
807 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
808 spec->outer_first_mpls_over_gre_exp =
809 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
810 spec->outer_first_mpls_over_gre_s_bos =
811 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
812 spec->outer_first_mpls_over_gre_ttl =
813 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
814 spec->outer_first_mpls_over_udp_label =
815 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
816 spec->outer_first_mpls_over_udp_exp =
817 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
818 spec->outer_first_mpls_over_udp_s_bos =
819 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
820 spec->outer_first_mpls_over_udp_ttl =
821 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
822 spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
823 spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
824 spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
825 spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
826 spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
827 spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
828 spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
829 spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
830 spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
831 }
832
dr_ste_copy_mask_misc3(char * mask,struct mlx5dr_match_misc3 * spec,bool clr)833 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
834 {
835 spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
836 spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
837 spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
838 spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
839 spec->outer_vxlan_gpe_vni =
840 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
841 spec->outer_vxlan_gpe_next_protocol =
842 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
843 spec->outer_vxlan_gpe_flags =
844 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
845 spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
846 spec->icmpv6_header_data =
847 IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
848 spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
849 spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
850 spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
851 spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
852 spec->geneve_tlv_option_0_data =
853 IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
854 spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
855 spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
856 spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
857 spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
858 spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
859 spec->gtpu_first_ext_dw_0 =
860 IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
861 }
862
dr_ste_copy_mask_misc4(char * mask,struct mlx5dr_match_misc4 * spec,bool clr)863 static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
864 {
865 spec->prog_sample_field_id_0 =
866 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
867 spec->prog_sample_field_value_0 =
868 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
869 spec->prog_sample_field_id_1 =
870 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
871 spec->prog_sample_field_value_1 =
872 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
873 spec->prog_sample_field_id_2 =
874 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
875 spec->prog_sample_field_value_2 =
876 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
877 spec->prog_sample_field_id_3 =
878 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
879 spec->prog_sample_field_value_3 =
880 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
881 }
882
mlx5dr_ste_copy_param(u8 match_criteria,struct mlx5dr_match_param * set_param,struct mlx5dr_match_parameters * mask,bool clr)883 void mlx5dr_ste_copy_param(u8 match_criteria,
884 struct mlx5dr_match_param *set_param,
885 struct mlx5dr_match_parameters *mask,
886 bool clr)
887 {
888 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
889 u8 *data = (u8 *)mask->match_buf;
890 size_t param_location;
891 void *buff;
892
893 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
894 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
895 memcpy(tail_param, data, mask->match_sz);
896 buff = tail_param;
897 } else {
898 buff = mask->match_buf;
899 }
900 dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
901 }
902 param_location = sizeof(struct mlx5dr_match_spec);
903
904 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
905 if (mask->match_sz < param_location +
906 sizeof(struct mlx5dr_match_misc)) {
907 memcpy(tail_param, data + param_location,
908 mask->match_sz - param_location);
909 buff = tail_param;
910 } else {
911 buff = data + param_location;
912 }
913 dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
914 }
915 param_location += sizeof(struct mlx5dr_match_misc);
916
917 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
918 if (mask->match_sz < param_location +
919 sizeof(struct mlx5dr_match_spec)) {
920 memcpy(tail_param, data + param_location,
921 mask->match_sz - param_location);
922 buff = tail_param;
923 } else {
924 buff = data + param_location;
925 }
926 dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
927 }
928 param_location += sizeof(struct mlx5dr_match_spec);
929
930 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
931 if (mask->match_sz < param_location +
932 sizeof(struct mlx5dr_match_misc2)) {
933 memcpy(tail_param, data + param_location,
934 mask->match_sz - param_location);
935 buff = tail_param;
936 } else {
937 buff = data + param_location;
938 }
939 dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
940 }
941
942 param_location += sizeof(struct mlx5dr_match_misc2);
943
944 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
945 if (mask->match_sz < param_location +
946 sizeof(struct mlx5dr_match_misc3)) {
947 memcpy(tail_param, data + param_location,
948 mask->match_sz - param_location);
949 buff = tail_param;
950 } else {
951 buff = data + param_location;
952 }
953 dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
954 }
955
956 param_location += sizeof(struct mlx5dr_match_misc3);
957
958 if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
959 if (mask->match_sz < param_location +
960 sizeof(struct mlx5dr_match_misc4)) {
961 memcpy(tail_param, data + param_location,
962 mask->match_sz - param_location);
963 buff = tail_param;
964 } else {
965 buff = data + param_location;
966 }
967 dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
968 }
969 }
970
mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)971 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
972 struct mlx5dr_ste_build *sb,
973 struct mlx5dr_match_param *mask,
974 bool inner, bool rx)
975 {
976 sb->rx = rx;
977 sb->inner = inner;
978 ste_ctx->build_eth_l2_src_dst_init(sb, mask);
979 }
980
mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)981 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
982 struct mlx5dr_ste_build *sb,
983 struct mlx5dr_match_param *mask,
984 bool inner, bool rx)
985 {
986 sb->rx = rx;
987 sb->inner = inner;
988 ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
989 }
990
mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)991 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
992 struct mlx5dr_ste_build *sb,
993 struct mlx5dr_match_param *mask,
994 bool inner, bool rx)
995 {
996 sb->rx = rx;
997 sb->inner = inner;
998 ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
999 }
1000
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1001 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1002 struct mlx5dr_ste_build *sb,
1003 struct mlx5dr_match_param *mask,
1004 bool inner, bool rx)
1005 {
1006 sb->rx = rx;
1007 sb->inner = inner;
1008 ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1009 }
1010
mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1011 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1012 struct mlx5dr_ste_build *sb,
1013 struct mlx5dr_match_param *mask,
1014 bool inner, bool rx)
1015 {
1016 sb->rx = rx;
1017 sb->inner = inner;
1018 ste_ctx->build_eth_l2_src_init(sb, mask);
1019 }
1020
mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1021 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1022 struct mlx5dr_ste_build *sb,
1023 struct mlx5dr_match_param *mask,
1024 bool inner, bool rx)
1025 {
1026 sb->rx = rx;
1027 sb->inner = inner;
1028 ste_ctx->build_eth_l2_dst_init(sb, mask);
1029 }
1030
mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1031 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1032 struct mlx5dr_ste_build *sb,
1033 struct mlx5dr_match_param *mask, bool inner, bool rx)
1034 {
1035 sb->rx = rx;
1036 sb->inner = inner;
1037 ste_ctx->build_eth_l2_tnl_init(sb, mask);
1038 }
1039
mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1040 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1041 struct mlx5dr_ste_build *sb,
1042 struct mlx5dr_match_param *mask,
1043 bool inner, bool rx)
1044 {
1045 sb->rx = rx;
1046 sb->inner = inner;
1047 ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1048 }
1049
mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1050 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1051 struct mlx5dr_ste_build *sb,
1052 struct mlx5dr_match_param *mask,
1053 bool inner, bool rx)
1054 {
1055 sb->rx = rx;
1056 sb->inner = inner;
1057 ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1058 }
1059
dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1060 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1061 struct mlx5dr_ste_build *sb,
1062 u8 *tag)
1063 {
1064 return 0;
1065 }
1066
mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build * sb,bool rx)1067 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1068 {
1069 sb->rx = rx;
1070 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1071 sb->byte_mask = 0;
1072 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1073 }
1074
mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1075 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1076 struct mlx5dr_ste_build *sb,
1077 struct mlx5dr_match_param *mask,
1078 bool inner, bool rx)
1079 {
1080 sb->rx = rx;
1081 sb->inner = inner;
1082 ste_ctx->build_mpls_init(sb, mask);
1083 }
1084
mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1085 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1086 struct mlx5dr_ste_build *sb,
1087 struct mlx5dr_match_param *mask,
1088 bool inner, bool rx)
1089 {
1090 sb->rx = rx;
1091 sb->inner = inner;
1092 ste_ctx->build_tnl_gre_init(sb, mask);
1093 }
1094
mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1095 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1096 struct mlx5dr_ste_build *sb,
1097 struct mlx5dr_match_param *mask,
1098 struct mlx5dr_cmd_caps *caps,
1099 bool inner, bool rx)
1100 {
1101 sb->rx = rx;
1102 sb->inner = inner;
1103 sb->caps = caps;
1104 return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1105 }
1106
mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1107 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1108 struct mlx5dr_ste_build *sb,
1109 struct mlx5dr_match_param *mask,
1110 struct mlx5dr_cmd_caps *caps,
1111 bool inner, bool rx)
1112 {
1113 sb->rx = rx;
1114 sb->inner = inner;
1115 sb->caps = caps;
1116 return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1117 }
1118
mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1119 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1120 struct mlx5dr_ste_build *sb,
1121 struct mlx5dr_match_param *mask,
1122 struct mlx5dr_cmd_caps *caps,
1123 bool inner, bool rx)
1124 {
1125 sb->rx = rx;
1126 sb->inner = inner;
1127 sb->caps = caps;
1128 ste_ctx->build_icmp_init(sb, mask);
1129 }
1130
mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1131 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1132 struct mlx5dr_ste_build *sb,
1133 struct mlx5dr_match_param *mask,
1134 bool inner, bool rx)
1135 {
1136 sb->rx = rx;
1137 sb->inner = inner;
1138 ste_ctx->build_general_purpose_init(sb, mask);
1139 }
1140
mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1141 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1142 struct mlx5dr_ste_build *sb,
1143 struct mlx5dr_match_param *mask,
1144 bool inner, bool rx)
1145 {
1146 sb->rx = rx;
1147 sb->inner = inner;
1148 ste_ctx->build_eth_l4_misc_init(sb, mask);
1149 }
1150
mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1151 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1152 struct mlx5dr_ste_build *sb,
1153 struct mlx5dr_match_param *mask,
1154 bool inner, bool rx)
1155 {
1156 sb->rx = rx;
1157 sb->inner = inner;
1158 ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1159 }
1160
mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1161 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1162 struct mlx5dr_ste_build *sb,
1163 struct mlx5dr_match_param *mask,
1164 bool inner, bool rx)
1165 {
1166 sb->rx = rx;
1167 sb->inner = inner;
1168 ste_ctx->build_tnl_geneve_init(sb, mask);
1169 }
1170
mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1171 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1172 struct mlx5dr_ste_build *sb,
1173 struct mlx5dr_match_param *mask,
1174 struct mlx5dr_cmd_caps *caps,
1175 bool inner, bool rx)
1176 {
1177 sb->rx = rx;
1178 sb->caps = caps;
1179 sb->inner = inner;
1180 ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1181 }
1182
mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1183 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1184 struct mlx5dr_ste_build *sb,
1185 struct mlx5dr_match_param *mask,
1186 bool inner, bool rx)
1187 {
1188 sb->rx = rx;
1189 sb->inner = inner;
1190 ste_ctx->build_tnl_gtpu_init(sb, mask);
1191 }
1192
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1193 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1194 struct mlx5dr_ste_build *sb,
1195 struct mlx5dr_match_param *mask,
1196 struct mlx5dr_cmd_caps *caps,
1197 bool inner, bool rx)
1198 {
1199 sb->rx = rx;
1200 sb->caps = caps;
1201 sb->inner = inner;
1202 ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1203 }
1204
mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1205 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1206 struct mlx5dr_ste_build *sb,
1207 struct mlx5dr_match_param *mask,
1208 struct mlx5dr_cmd_caps *caps,
1209 bool inner, bool rx)
1210 {
1211 sb->rx = rx;
1212 sb->caps = caps;
1213 sb->inner = inner;
1214 ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1215 }
1216
mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1217 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1218 struct mlx5dr_ste_build *sb,
1219 struct mlx5dr_match_param *mask,
1220 bool inner, bool rx)
1221 {
1222 sb->rx = rx;
1223 sb->inner = inner;
1224 ste_ctx->build_register_0_init(sb, mask);
1225 }
1226
mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1227 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1228 struct mlx5dr_ste_build *sb,
1229 struct mlx5dr_match_param *mask,
1230 bool inner, bool rx)
1231 {
1232 sb->rx = rx;
1233 sb->inner = inner;
1234 ste_ctx->build_register_1_init(sb, mask);
1235 }
1236
mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn,bool inner,bool rx)1237 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1238 struct mlx5dr_ste_build *sb,
1239 struct mlx5dr_match_param *mask,
1240 struct mlx5dr_domain *dmn,
1241 bool inner, bool rx)
1242 {
1243 /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
1244 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1245
1246 sb->rx = rx;
1247 sb->dmn = dmn;
1248 sb->inner = inner;
1249 ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1250 }
1251
mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1252 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1253 struct mlx5dr_ste_build *sb,
1254 struct mlx5dr_match_param *mask,
1255 bool inner, bool rx)
1256 {
1257 sb->rx = rx;
1258 sb->inner = inner;
1259 ste_ctx->build_flex_parser_0_init(sb, mask);
1260 }
1261
mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1262 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1263 struct mlx5dr_ste_build *sb,
1264 struct mlx5dr_match_param *mask,
1265 bool inner, bool rx)
1266 {
1267 sb->rx = rx;
1268 sb->inner = inner;
1269 ste_ctx->build_flex_parser_1_init(sb, mask);
1270 }
1271
1272 static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
1273 [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
1274 [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
1275 };
1276
mlx5dr_ste_get_ctx(u8 version)1277 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1278 {
1279 if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
1280 return NULL;
1281
1282 return mlx5dr_ste_ctx_arr[version];
1283 }
1284