1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/mlx5/eswitch.h>
5 #include <linux/err.h>
6 #include "dr_types.h"
7
8 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
9 ((dmn)->info.caps.dmn_type##_sw_owner || \
10 ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
11 (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
12
dr_domain_init_csum_recalc_fts(struct mlx5dr_domain * dmn)13 static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
14 {
15 /* Per vport cached FW FT for checksum recalculation, this
16 * recalculation is needed due to a HW bug in STEv0.
17 */
18 xa_init(&dmn->csum_fts_xa);
19 }
20
dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain * dmn)21 static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
22 {
23 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
24 unsigned long i;
25
26 xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
27 if (recalc_cs_ft)
28 mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
29 }
30
31 xa_destroy(&dmn->csum_fts_xa);
32 }
33
mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain * dmn,u16 vport_num,u64 * rx_icm_addr)34 int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
35 u16 vport_num,
36 u64 *rx_icm_addr)
37 {
38 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
39 int ret;
40
41 recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
42 if (!recalc_cs_ft) {
43 /* Table hasn't been created yet */
44 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
45 if (!recalc_cs_ft)
46 return -EINVAL;
47
48 ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
49 recalc_cs_ft, GFP_KERNEL));
50 if (ret)
51 return ret;
52 }
53
54 *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
55
56 return 0;
57 }
58
dr_domain_init_mem_resources(struct mlx5dr_domain * dmn)59 static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
60 {
61 int ret;
62
63 dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
64 sizeof(struct mlx5dr_icm_chunk), 0,
65 SLAB_HWCACHE_ALIGN, NULL);
66 if (!dmn->chunks_kmem_cache) {
67 mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
68 return -ENOMEM;
69 }
70
71 dmn->htbls_kmem_cache = kmem_cache_create("mlx5_dr_htbls",
72 sizeof(struct mlx5dr_ste_htbl), 0,
73 SLAB_HWCACHE_ALIGN, NULL);
74 if (!dmn->htbls_kmem_cache) {
75 mlx5dr_err(dmn, "Couldn't create hash tables kmem_cache\n");
76 ret = -ENOMEM;
77 goto free_chunks_kmem_cache;
78 }
79
80 dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
81 if (!dmn->ste_icm_pool) {
82 mlx5dr_err(dmn, "Couldn't get icm memory\n");
83 ret = -ENOMEM;
84 goto free_htbls_kmem_cache;
85 }
86
87 dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
88 if (!dmn->action_icm_pool) {
89 mlx5dr_err(dmn, "Couldn't get action icm memory\n");
90 ret = -ENOMEM;
91 goto free_ste_icm_pool;
92 }
93
94 ret = mlx5dr_send_info_pool_create(dmn);
95 if (ret) {
96 mlx5dr_err(dmn, "Couldn't create send info pool\n");
97 goto free_action_icm_pool;
98 }
99
100 return 0;
101
102 free_action_icm_pool:
103 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
104 free_ste_icm_pool:
105 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
106 free_htbls_kmem_cache:
107 kmem_cache_destroy(dmn->htbls_kmem_cache);
108 free_chunks_kmem_cache:
109 kmem_cache_destroy(dmn->chunks_kmem_cache);
110
111 return ret;
112 }
113
dr_domain_uninit_mem_resources(struct mlx5dr_domain * dmn)114 static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
115 {
116 mlx5dr_send_info_pool_destroy(dmn);
117 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
118 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
119 kmem_cache_destroy(dmn->htbls_kmem_cache);
120 kmem_cache_destroy(dmn->chunks_kmem_cache);
121 }
122
dr_domain_init_resources(struct mlx5dr_domain * dmn)123 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
124 {
125 int ret;
126
127 dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
128 if (!dmn->ste_ctx) {
129 mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
130 return -EOPNOTSUPP;
131 }
132
133 ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
134 if (ret) {
135 mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
136 return ret;
137 }
138
139 dmn->uar = mlx5_get_uars_page(dmn->mdev);
140 if (IS_ERR(dmn->uar)) {
141 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
142 ret = PTR_ERR(dmn->uar);
143 goto clean_pd;
144 }
145
146 ret = dr_domain_init_mem_resources(dmn);
147 if (ret) {
148 mlx5dr_err(dmn, "Couldn't create domain memory resources\n");
149 goto clean_uar;
150 }
151
152 ret = mlx5dr_send_ring_alloc(dmn);
153 if (ret) {
154 mlx5dr_err(dmn, "Couldn't create send-ring\n");
155 goto clean_mem_resources;
156 }
157
158 return 0;
159
160 clean_mem_resources:
161 dr_domain_uninit_mem_resources(dmn);
162 clean_uar:
163 mlx5_put_uars_page(dmn->mdev, dmn->uar);
164 clean_pd:
165 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
166
167 return ret;
168 }
169
dr_domain_uninit_resources(struct mlx5dr_domain * dmn)170 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
171 {
172 mlx5dr_send_ring_free(dmn, dmn->send_ring);
173 dr_domain_uninit_mem_resources(dmn);
174 mlx5_put_uars_page(dmn->mdev, dmn->uar);
175 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
176 }
177
dr_domain_fill_uplink_caps(struct mlx5dr_domain * dmn,struct mlx5dr_cmd_vport_cap * uplink_vport)178 static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
179 struct mlx5dr_cmd_vport_cap *uplink_vport)
180 {
181 struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
182
183 uplink_vport->num = MLX5_VPORT_UPLINK;
184 uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
185 uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
186 uplink_vport->vport_gvmi = 0;
187 uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
188 }
189
dr_domain_query_vport(struct mlx5dr_domain * dmn,u16 vport_number,bool other_vport,struct mlx5dr_cmd_vport_cap * vport_caps)190 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
191 u16 vport_number,
192 bool other_vport,
193 struct mlx5dr_cmd_vport_cap *vport_caps)
194 {
195 int ret;
196
197 ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
198 other_vport,
199 vport_number,
200 &vport_caps->icm_address_rx,
201 &vport_caps->icm_address_tx);
202 if (ret)
203 return ret;
204
205 ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
206 other_vport,
207 vport_number,
208 &vport_caps->vport_gvmi);
209 if (ret)
210 return ret;
211
212 vport_caps->num = vport_number;
213 vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
214
215 return 0;
216 }
217
dr_domain_query_esw_mngr(struct mlx5dr_domain * dmn)218 static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
219 {
220 return dr_domain_query_vport(dmn, 0, false,
221 &dmn->info.caps.vports.esw_manager_caps);
222 }
223
dr_domain_query_uplink(struct mlx5dr_domain * dmn)224 static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
225 {
226 dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
227 }
228
229 static struct mlx5dr_cmd_vport_cap *
dr_domain_add_vport_cap(struct mlx5dr_domain * dmn,u16 vport)230 dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
231 {
232 struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
233 struct mlx5dr_cmd_vport_cap *vport_caps;
234 int ret;
235
236 vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
237 if (!vport_caps)
238 return NULL;
239
240 ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
241 if (ret) {
242 kvfree(vport_caps);
243 return NULL;
244 }
245
246 ret = xa_insert(&caps->vports.vports_caps_xa, vport,
247 vport_caps, GFP_KERNEL);
248 if (ret) {
249 mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
250 kvfree(vport_caps);
251 return ERR_PTR(ret);
252 }
253
254 return vport_caps;
255 }
256
dr_domain_is_esw_mgr_vport(struct mlx5dr_domain * dmn,u16 vport)257 static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
258 {
259 struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
260
261 return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
262 (!caps->is_ecpf && vport == 0);
263 }
264
265 struct mlx5dr_cmd_vport_cap *
mlx5dr_domain_get_vport_cap(struct mlx5dr_domain * dmn,u16 vport)266 mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
267 {
268 struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
269 struct mlx5dr_cmd_vport_cap *vport_caps;
270
271 if (dr_domain_is_esw_mgr_vport(dmn, vport))
272 return &caps->vports.esw_manager_caps;
273
274 if (vport == MLX5_VPORT_UPLINK)
275 return &caps->vports.uplink_caps;
276
277 vport_load:
278 vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
279 if (vport_caps)
280 return vport_caps;
281
282 vport_caps = dr_domain_add_vport_cap(dmn, vport);
283 if (PTR_ERR(vport_caps) == -EBUSY)
284 /* caps were already stored by another thread */
285 goto vport_load;
286
287 return vport_caps;
288 }
289
dr_domain_clear_vports(struct mlx5dr_domain * dmn)290 static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
291 {
292 struct mlx5dr_cmd_vport_cap *vport_caps;
293 unsigned long i;
294
295 xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
296 vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
297 kvfree(vport_caps);
298 }
299 }
300
dr_domain_query_fdb_caps(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)301 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
302 struct mlx5dr_domain *dmn)
303 {
304 int ret;
305
306 if (!dmn->info.caps.eswitch_manager)
307 return -EOPNOTSUPP;
308
309 ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
310 if (ret)
311 return ret;
312
313 dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
314 dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
315 dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
316 dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
317
318 xa_init(&dmn->info.caps.vports.vports_caps_xa);
319
320 /* Query eswitch manager and uplink vports only. Rest of the
321 * vports (vport 0, VFs and SFs) will be queried dynamically.
322 */
323
324 ret = dr_domain_query_esw_mngr(dmn);
325 if (ret) {
326 mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
327 goto free_vports_caps_xa;
328 }
329
330 dr_domain_query_uplink(dmn);
331
332 return 0;
333
334 free_vports_caps_xa:
335 xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
336
337 return ret;
338 }
339
dr_domain_caps_init(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)340 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
341 struct mlx5dr_domain *dmn)
342 {
343 struct mlx5dr_cmd_vport_cap *vport_cap;
344 int ret;
345
346 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
347 mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
348 return -EOPNOTSUPP;
349 }
350
351 ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
352 if (ret)
353 return ret;
354
355 ret = dr_domain_query_fdb_caps(mdev, dmn);
356 if (ret)
357 return ret;
358
359 switch (dmn->type) {
360 case MLX5DR_DOMAIN_TYPE_NIC_RX:
361 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
362 return -ENOTSUPP;
363
364 dmn->info.supp_sw_steering = true;
365 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
366 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
367 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
368 break;
369 case MLX5DR_DOMAIN_TYPE_NIC_TX:
370 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
371 return -ENOTSUPP;
372
373 dmn->info.supp_sw_steering = true;
374 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
375 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
376 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
377 break;
378 case MLX5DR_DOMAIN_TYPE_FDB:
379 if (!dmn->info.caps.eswitch_manager)
380 return -ENOTSUPP;
381
382 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
383 return -ENOTSUPP;
384
385 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
386 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
387 vport_cap = &dmn->info.caps.vports.esw_manager_caps;
388
389 dmn->info.supp_sw_steering = true;
390 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
391 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
392 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
393 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
394 break;
395 default:
396 mlx5dr_err(dmn, "Invalid domain\n");
397 ret = -EINVAL;
398 break;
399 }
400
401 return ret;
402 }
403
dr_domain_caps_uninit(struct mlx5dr_domain * dmn)404 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
405 {
406 dr_domain_clear_vports(dmn);
407 xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
408 }
409
410 struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev * mdev,enum mlx5dr_domain_type type)411 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
412 {
413 struct mlx5dr_domain *dmn;
414 int ret;
415
416 if (type > MLX5DR_DOMAIN_TYPE_FDB)
417 return NULL;
418
419 dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
420 if (!dmn)
421 return NULL;
422
423 dmn->mdev = mdev;
424 dmn->type = type;
425 refcount_set(&dmn->refcount, 1);
426 mutex_init(&dmn->info.rx.mutex);
427 mutex_init(&dmn->info.tx.mutex);
428 xa_init(&dmn->definers_xa);
429
430 if (dr_domain_caps_init(mdev, dmn)) {
431 mlx5dr_err(dmn, "Failed init domain, no caps\n");
432 goto def_xa_destroy;
433 }
434
435 dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
436 dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
437 dmn->info.caps.log_icm_size);
438
439 if (!dmn->info.supp_sw_steering) {
440 mlx5dr_err(dmn, "SW steering is not supported\n");
441 goto uninit_caps;
442 }
443
444 /* Allocate resources */
445 ret = dr_domain_init_resources(dmn);
446 if (ret) {
447 mlx5dr_err(dmn, "Failed init domain resources\n");
448 goto uninit_caps;
449 }
450
451 dr_domain_init_csum_recalc_fts(dmn);
452 mlx5dr_dbg_init_dump(dmn);
453 return dmn;
454
455 uninit_caps:
456 dr_domain_caps_uninit(dmn);
457 def_xa_destroy:
458 xa_destroy(&dmn->definers_xa);
459 kfree(dmn);
460 return NULL;
461 }
462
463 /* Assure synchronization of the device steering tables with updates made by SW
464 * insertion.
465 */
mlx5dr_domain_sync(struct mlx5dr_domain * dmn,u32 flags)466 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
467 {
468 int ret = 0;
469
470 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
471 mlx5dr_domain_lock(dmn);
472 ret = mlx5dr_send_ring_force_drain(dmn);
473 mlx5dr_domain_unlock(dmn);
474 if (ret) {
475 mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
476 flags, ret);
477 return ret;
478 }
479 }
480
481 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
482 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
483
484 return ret;
485 }
486
mlx5dr_domain_destroy(struct mlx5dr_domain * dmn)487 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
488 {
489 if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
490 return -EBUSY;
491
492 /* make sure resources are not used by the hardware */
493 mlx5dr_cmd_sync_steering(dmn->mdev);
494 mlx5dr_dbg_uninit_dump(dmn);
495 dr_domain_uninit_csum_recalc_fts(dmn);
496 dr_domain_uninit_resources(dmn);
497 dr_domain_caps_uninit(dmn);
498 xa_destroy(&dmn->definers_xa);
499 mutex_destroy(&dmn->info.tx.mutex);
500 mutex_destroy(&dmn->info.rx.mutex);
501 kfree(dmn);
502 return 0;
503 }
504
mlx5dr_domain_set_peer(struct mlx5dr_domain * dmn,struct mlx5dr_domain * peer_dmn)505 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
506 struct mlx5dr_domain *peer_dmn)
507 {
508 mlx5dr_domain_lock(dmn);
509
510 if (dmn->peer_dmn)
511 refcount_dec(&dmn->peer_dmn->refcount);
512
513 dmn->peer_dmn = peer_dmn;
514
515 if (dmn->peer_dmn)
516 refcount_inc(&dmn->peer_dmn->refcount);
517
518 mlx5dr_domain_unlock(dmn);
519 }
520