1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3 
4 #ifndef ACTION_STE_POOL_H_
5 #define ACTION_STE_POOL_H_
6 
7 #define MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ 10
8 #define MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ 1
9 #define MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ 20
10 
11 #define MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS 300
12 #define MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS 300
13 
14 struct mlx5hws_action_ste_pool_element;
15 
16 struct mlx5hws_action_ste_table {
17 	struct mlx5hws_action_ste_pool_element *parent_elem;
18 	/* Wraps the RTC and STE range for this given action. */
19 	struct mlx5hws_pool *pool;
20 	/* Match STEs use this STC to jump to this pool's RTC. */
21 	struct mlx5hws_pool_chunk stc;
22 	u32 rtc_0_id;
23 	u32 rtc_1_id;
24 	struct list_head list_node;
25 	unsigned long last_used;
26 };
27 
28 struct mlx5hws_action_ste_pool_element {
29 	struct mlx5hws_context *ctx;
30 	struct mlx5hws_action_ste_pool *parent_pool;
31 	size_t log_sz;  /* Size of the largest table so far. */
32 	enum mlx5hws_pool_optimize opt;
33 	struct list_head available;
34 	struct list_head full;
35 };
36 
37 /* Central repository of action STEs. The context contains one of these pools
38  * per queue.
39  */
40 struct mlx5hws_action_ste_pool {
41 	/* Protects the entire pool. We have one pool per queue and only one
42 	 * operation can be active per rule at a given time. Thus this lock
43 	 * protects solely against concurrent garbage collection and we expect
44 	 * very little contention.
45 	 */
46 	struct mutex lock;
47 	struct mlx5hws_action_ste_pool_element elems[MLX5HWS_POOL_OPTIMIZE_MAX];
48 };
49 
50 /* A chunk of STEs and the table it was allocated from. Used by rules. */
51 struct mlx5hws_action_ste_chunk {
52 	struct mlx5hws_action_ste_table *action_tbl;
53 	struct mlx5hws_pool_chunk ste;
54 };
55 
56 int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx);
57 
58 void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx);
59 
60 /* Callers are expected to fill chunk->ste.order. On success, this function
61  * populates chunk->tbl and chunk->ste.offset.
62  */
63 int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
64 				   bool skip_rx, bool skip_tx,
65 				   struct mlx5hws_action_ste_chunk *chunk);
66 
67 void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk);
68 
69 #endif /* ACTION_STE_POOL_H_ */
70