1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include "internal.h"
5 
mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context * ctx)6 bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
7 {
8 	return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
9 }
10 
mlx5hws_context_get_reparse_mode(struct mlx5hws_context * ctx)11 u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
12 {
13 	/* Prefer to use dynamic reparse, reparse only specific actions */
14 	if (mlx5hws_context_cap_dynamic_reparse(ctx))
15 		return MLX5_IFC_RTC_REPARSE_NEVER;
16 
17 	/* Otherwise use less efficient static */
18 	return MLX5_IFC_RTC_REPARSE_ALWAYS;
19 }
20 
hws_context_pools_init(struct mlx5hws_context * ctx)21 static int hws_context_pools_init(struct mlx5hws_context *ctx)
22 {
23 	struct mlx5hws_pool_attr pool_attr = {0};
24 	u8 max_log_sz;
25 	int ret;
26 
27 	ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
28 	if (ret)
29 		return ret;
30 
31 	ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
32 	if (ret)
33 		goto uninit_pat_cache;
34 
35 	/* Create an STC pool per FT type */
36 	pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
37 	max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
38 	pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
39 
40 	pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
41 	ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr);
42 	if (!ctx->stc_pool) {
43 		mlx5hws_err(ctx, "Failed to allocate STC pool\n");
44 		ret = -ENOMEM;
45 		goto uninit_cache;
46 	}
47 
48 	return 0;
49 
50 uninit_cache:
51 	mlx5hws_definer_uninit_cache(ctx->definer_cache);
52 uninit_pat_cache:
53 	mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
54 	return ret;
55 }
56 
hws_context_pools_uninit(struct mlx5hws_context * ctx)57 static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
58 {
59 	if (ctx->stc_pool)
60 		mlx5hws_pool_destroy(ctx->stc_pool);
61 
62 	mlx5hws_definer_uninit_cache(ctx->definer_cache);
63 	mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
64 }
65 
hws_context_init_pd(struct mlx5hws_context * ctx)66 static int hws_context_init_pd(struct mlx5hws_context *ctx)
67 {
68 	int ret = 0;
69 
70 	ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
71 	if (ret) {
72 		mlx5hws_err(ctx, "Failed to allocate PD\n");
73 		return ret;
74 	}
75 
76 	ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
77 
78 	return 0;
79 }
80 
hws_context_uninit_pd(struct mlx5hws_context * ctx)81 static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
82 {
83 	if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
84 		mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
85 
86 	return 0;
87 }
88 
hws_context_check_hws_supp(struct mlx5hws_context * ctx)89 static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
90 {
91 	struct mlx5hws_cmd_query_caps *caps = ctx->caps;
92 
93 	/* HWS not supported on device / FW */
94 	if (!caps->wqe_based_update) {
95 		mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
96 		return;
97 	}
98 
99 	if (!caps->eswitch_manager) {
100 		mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
101 		return;
102 	}
103 
104 	/* Current solution requires all rules to set reparse bit */
105 	if ((!caps->nic_ft.reparse ||
106 	     (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
107 	    !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
108 		mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
109 		return;
110 	}
111 
112 	/* FW/HW must support 8DW STE */
113 	if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
114 		mlx5hws_err(ctx, "Required HWS STE format not supported\n");
115 		return;
116 	}
117 
118 	/* Adding rules by hash and by offset are requirements */
119 	if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
120 	    !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
121 		mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
122 		return;
123 	}
124 
125 	/* Support for SELECT definer ID is required */
126 	if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
127 		mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
128 		return;
129 	}
130 
131 	ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
132 }
133 
hws_context_init_hws(struct mlx5hws_context * ctx,struct mlx5hws_context_attr * attr)134 static int hws_context_init_hws(struct mlx5hws_context *ctx,
135 				struct mlx5hws_context_attr *attr)
136 {
137 	int ret;
138 
139 	hws_context_check_hws_supp(ctx);
140 
141 	if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
142 		return 0;
143 
144 	ret = hws_context_init_pd(ctx);
145 	if (ret)
146 		return ret;
147 
148 	ret = hws_context_pools_init(ctx);
149 	if (ret)
150 		goto uninit_pd;
151 
152 	/* Context has support for backward compatible API,
153 	 * and does not have support for native HWS API.
154 	 */
155 	ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
156 
157 	ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
158 	if (ret)
159 		goto pools_uninit;
160 
161 	ret = mlx5hws_action_ste_pool_init(ctx);
162 	if (ret)
163 		goto close_queues;
164 
165 	INIT_LIST_HEAD(&ctx->tbl_list);
166 
167 	return 0;
168 
169 close_queues:
170 	mlx5hws_send_queues_close(ctx);
171 pools_uninit:
172 	hws_context_pools_uninit(ctx);
173 uninit_pd:
174 	hws_context_uninit_pd(ctx);
175 	return ret;
176 }
177 
hws_context_uninit_hws(struct mlx5hws_context * ctx)178 static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
179 {
180 	if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
181 		return;
182 
183 	mlx5hws_action_ste_pool_uninit(ctx);
184 	mlx5hws_send_queues_close(ctx);
185 	hws_context_pools_uninit(ctx);
186 	hws_context_uninit_pd(ctx);
187 }
188 
mlx5hws_context_open(struct mlx5_core_dev * mdev,struct mlx5hws_context_attr * attr)189 struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
190 					     struct mlx5hws_context_attr *attr)
191 {
192 	struct mlx5hws_context *ctx;
193 	int ret;
194 
195 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
196 	if (!ctx)
197 		return NULL;
198 
199 	ctx->mdev = mdev;
200 
201 	mutex_init(&ctx->ctrl_lock);
202 	xa_init(&ctx->peer_ctx_xa);
203 
204 	ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
205 	if (!ctx->caps)
206 		goto free_ctx;
207 
208 	ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
209 	if (ret)
210 		goto free_caps;
211 
212 	ret = mlx5hws_vport_init_vports(ctx);
213 	if (ret)
214 		goto free_caps;
215 
216 	ret = hws_context_init_hws(ctx, attr);
217 	if (ret)
218 		goto uninit_vports;
219 
220 	mlx5hws_debug_init_dump(ctx);
221 
222 	return ctx;
223 
224 uninit_vports:
225 	mlx5hws_vport_uninit_vports(ctx);
226 free_caps:
227 	kfree(ctx->caps);
228 free_ctx:
229 	xa_destroy(&ctx->peer_ctx_xa);
230 	mutex_destroy(&ctx->ctrl_lock);
231 	kfree(ctx);
232 	return NULL;
233 }
234 
mlx5hws_context_close(struct mlx5hws_context * ctx)235 int mlx5hws_context_close(struct mlx5hws_context *ctx)
236 {
237 	mlx5hws_debug_uninit_dump(ctx);
238 	hws_context_uninit_hws(ctx);
239 	mlx5hws_vport_uninit_vports(ctx);
240 	kfree(ctx->caps);
241 	xa_destroy(&ctx->peer_ctx_xa);
242 	mutex_destroy(&ctx->ctrl_lock);
243 	kfree(ctx);
244 	return 0;
245 }
246 
mlx5hws_context_set_peer(struct mlx5hws_context * ctx,struct mlx5hws_context * peer_ctx,u16 peer_vhca_id)247 void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
248 			      struct mlx5hws_context *peer_ctx,
249 			      u16 peer_vhca_id)
250 {
251 	mutex_lock(&ctx->ctrl_lock);
252 
253 	if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
254 		pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
255 
256 	mutex_unlock(&ctx->ctrl_lock);
257 }
258