1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/mlx5/mlx5_ifc_vdpa.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38
39 /* intf dev list mutex */
40 static DEFINE_MUTEX(mlx5_intf_mutex);
41 static DEFINE_IDA(mlx5_adev_ida);
42
is_eth_rep_supported(struct mlx5_core_dev * dev)43 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
44 {
45 if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
46 return false;
47
48 if (!MLX5_ESWITCH_MANAGER(dev))
49 return false;
50
51 if (!is_mdev_switchdev_mode(dev))
52 return false;
53
54 return true;
55 }
56
mlx5_eth_supported(struct mlx5_core_dev * dev)57 bool mlx5_eth_supported(struct mlx5_core_dev *dev)
58 {
59 if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
60 return false;
61
62 if (mlx5_core_is_management_pf(dev))
63 return false;
64
65 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
66 return false;
67
68 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
69 mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
70 return false;
71 }
72
73 if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
74 mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
75 return false;
76 }
77
78 if (!MLX5_CAP_ETH(dev, csum_cap)) {
79 mlx5_core_warn(dev, "Missing csum_cap capability\n");
80 return false;
81 }
82
83 if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
84 mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
85 return false;
86 }
87
88 if (!MLX5_CAP_ETH(dev, vlan_cap)) {
89 mlx5_core_warn(dev, "Missing vlan_cap capability\n");
90 return false;
91 }
92
93 if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
94 mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
95 return false;
96 }
97
98 if (MLX5_CAP_FLOWTABLE(dev,
99 flow_table_properties_nic_receive.max_ft_level) < 3) {
100 mlx5_core_warn(dev, "max_ft_level < 3\n");
101 return false;
102 }
103
104 if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
105 mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
106 if (!MLX5_CAP_GEN(dev, cq_moderation))
107 mlx5_core_warn(dev, "CQ moderation is not supported\n");
108
109 return true;
110 }
111
is_eth_enabled(struct mlx5_core_dev * dev)112 static bool is_eth_enabled(struct mlx5_core_dev *dev)
113 {
114 union devlink_param_value val;
115 int err;
116
117 err = devl_param_driverinit_value_get(priv_to_devlink(dev),
118 DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
119 &val);
120 return err ? false : val.vbool;
121 }
122
mlx5_vnet_supported(struct mlx5_core_dev * dev)123 bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
124 {
125 if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
126 return false;
127
128 if (mlx5_core_is_pf(dev))
129 return false;
130
131 if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
132 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
133 return false;
134
135 if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
136 MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
137 return false;
138
139 if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
140 return false;
141
142 return true;
143 }
144
is_vnet_enabled(struct mlx5_core_dev * dev)145 static bool is_vnet_enabled(struct mlx5_core_dev *dev)
146 {
147 union devlink_param_value val;
148 int err;
149
150 err = devl_param_driverinit_value_get(priv_to_devlink(dev),
151 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
152 &val);
153 return err ? false : val.vbool;
154 }
155
is_ib_rep_supported(struct mlx5_core_dev * dev)156 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
157 {
158 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
159 return false;
160
161 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
162 return false;
163
164 if (!is_eth_rep_supported(dev))
165 return false;
166
167 if (!MLX5_ESWITCH_MANAGER(dev))
168 return false;
169
170 if (!is_mdev_switchdev_mode(dev))
171 return false;
172
173 if (mlx5_core_mp_enabled(dev))
174 return false;
175
176 return true;
177 }
178
is_mp_supported(struct mlx5_core_dev * dev)179 static bool is_mp_supported(struct mlx5_core_dev *dev)
180 {
181 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
182 return false;
183
184 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
185 return false;
186
187 if (is_ib_rep_supported(dev))
188 return false;
189
190 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
191 return false;
192
193 if (!mlx5_core_is_mp_slave(dev))
194 return false;
195
196 return true;
197 }
198
mlx5_rdma_supported(struct mlx5_core_dev * dev)199 bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
200 {
201 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
202 return false;
203
204 if (mlx5_core_is_management_pf(dev))
205 return false;
206
207 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
208 return false;
209
210 if (is_ib_rep_supported(dev))
211 return false;
212
213 if (is_mp_supported(dev))
214 return false;
215
216 return true;
217 }
218
is_ib_enabled(struct mlx5_core_dev * dev)219 static bool is_ib_enabled(struct mlx5_core_dev *dev)
220 {
221 union devlink_param_value val;
222 int err;
223
224 err = devl_param_driverinit_value_get(priv_to_devlink(dev),
225 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
226 &val);
227 return err ? false : val.vbool;
228 }
229
230 enum {
231 MLX5_INTERFACE_PROTOCOL_ETH,
232 MLX5_INTERFACE_PROTOCOL_ETH_REP,
233
234 MLX5_INTERFACE_PROTOCOL_IB,
235 MLX5_INTERFACE_PROTOCOL_IB_REP,
236 MLX5_INTERFACE_PROTOCOL_MPIB,
237
238 MLX5_INTERFACE_PROTOCOL_VNET,
239 };
240
241 static const struct mlx5_adev_device {
242 const char *suffix;
243 bool (*is_supported)(struct mlx5_core_dev *dev);
244 bool (*is_enabled)(struct mlx5_core_dev *dev);
245 } mlx5_adev_devices[] = {
246 [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
247 .is_supported = &mlx5_vnet_supported,
248 .is_enabled = &is_vnet_enabled },
249 [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
250 .is_supported = &mlx5_rdma_supported,
251 .is_enabled = &is_ib_enabled },
252 [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
253 .is_supported = &mlx5_eth_supported,
254 .is_enabled = &is_eth_enabled },
255 [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
256 .is_supported = &is_eth_rep_supported },
257 [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
258 .is_supported = &is_ib_rep_supported },
259 [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
260 .is_supported = &is_mp_supported },
261 };
262
mlx5_adev_idx_alloc(void)263 int mlx5_adev_idx_alloc(void)
264 {
265 return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
266 }
267
mlx5_adev_idx_free(int idx)268 void mlx5_adev_idx_free(int idx)
269 {
270 ida_free(&mlx5_adev_ida, idx);
271 }
272
mlx5_adev_init(struct mlx5_core_dev * dev)273 int mlx5_adev_init(struct mlx5_core_dev *dev)
274 {
275 struct mlx5_priv *priv = &dev->priv;
276
277 priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
278 sizeof(struct mlx5_adev *), GFP_KERNEL);
279 if (!priv->adev)
280 return -ENOMEM;
281
282 return 0;
283 }
284
mlx5_adev_cleanup(struct mlx5_core_dev * dev)285 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
286 {
287 struct mlx5_priv *priv = &dev->priv;
288
289 kfree(priv->adev);
290 }
291
adev_release(struct device * dev)292 static void adev_release(struct device *dev)
293 {
294 struct mlx5_adev *mlx5_adev =
295 container_of(dev, struct mlx5_adev, adev.dev);
296 struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
297 int idx = mlx5_adev->idx;
298
299 kfree(mlx5_adev);
300 priv->adev[idx] = NULL;
301 }
302
add_adev(struct mlx5_core_dev * dev,int idx)303 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
304 {
305 const char *suffix = mlx5_adev_devices[idx].suffix;
306 struct auxiliary_device *adev;
307 struct mlx5_adev *madev;
308 int ret;
309
310 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
311 if (!madev)
312 return ERR_PTR(-ENOMEM);
313
314 adev = &madev->adev;
315 adev->id = dev->priv.adev_idx;
316 adev->name = suffix;
317 adev->dev.parent = dev->device;
318 adev->dev.release = adev_release;
319 madev->mdev = dev;
320 madev->idx = idx;
321
322 ret = auxiliary_device_init(adev);
323 if (ret) {
324 kfree(madev);
325 return ERR_PTR(ret);
326 }
327
328 ret = auxiliary_device_add(adev);
329 if (ret) {
330 auxiliary_device_uninit(adev);
331 return ERR_PTR(ret);
332 }
333 return madev;
334 }
335
del_adev(struct auxiliary_device * adev)336 static void del_adev(struct auxiliary_device *adev)
337 {
338 auxiliary_device_delete(adev);
339 auxiliary_device_uninit(adev);
340 }
341
mlx5_attach_device(struct mlx5_core_dev * dev)342 int mlx5_attach_device(struct mlx5_core_dev *dev)
343 {
344 struct mlx5_priv *priv = &dev->priv;
345 struct auxiliary_device *adev;
346 struct auxiliary_driver *adrv;
347 int ret = 0, i;
348
349 devl_assert_locked(priv_to_devlink(dev));
350 mutex_lock(&mlx5_intf_mutex);
351 priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
352 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
353 if (!priv->adev[i]) {
354 bool is_supported = false;
355
356 if (mlx5_adev_devices[i].is_enabled) {
357 bool enabled;
358
359 enabled = mlx5_adev_devices[i].is_enabled(dev);
360 if (!enabled)
361 continue;
362 }
363
364 if (mlx5_adev_devices[i].is_supported)
365 is_supported = mlx5_adev_devices[i].is_supported(dev);
366
367 if (!is_supported)
368 continue;
369
370 priv->adev[i] = add_adev(dev, i);
371 if (IS_ERR(priv->adev[i])) {
372 ret = PTR_ERR(priv->adev[i]);
373 priv->adev[i] = NULL;
374 }
375 } else {
376 adev = &priv->adev[i]->adev;
377
378 /* Pay attention that this is not PCI driver that
379 * mlx5_core_dev is connected, but auxiliary driver.
380 */
381 if (!adev->dev.driver)
382 continue;
383 adrv = to_auxiliary_drv(adev->dev.driver);
384
385 if (adrv->resume)
386 ret = adrv->resume(adev);
387 }
388 if (ret) {
389 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
390 i, mlx5_adev_devices[i].suffix);
391
392 break;
393 }
394 }
395 mutex_unlock(&mlx5_intf_mutex);
396 return ret;
397 }
398
mlx5_detach_device(struct mlx5_core_dev * dev,bool suspend)399 void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
400 {
401 struct mlx5_priv *priv = &dev->priv;
402 struct auxiliary_device *adev;
403 struct auxiliary_driver *adrv;
404 pm_message_t pm = {};
405 int i;
406
407 devl_assert_locked(priv_to_devlink(dev));
408 mutex_lock(&mlx5_intf_mutex);
409 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
410 if (!priv->adev[i])
411 continue;
412
413 if (mlx5_adev_devices[i].is_enabled) {
414 bool enabled;
415
416 enabled = mlx5_adev_devices[i].is_enabled(dev);
417 if (!enabled)
418 goto skip_suspend;
419 }
420
421 adev = &priv->adev[i]->adev;
422 /* Auxiliary driver was unbind manually through sysfs */
423 if (!adev->dev.driver)
424 goto skip_suspend;
425
426 adrv = to_auxiliary_drv(adev->dev.driver);
427
428 if (adrv->suspend && suspend) {
429 adrv->suspend(adev, pm);
430 continue;
431 }
432
433 skip_suspend:
434 del_adev(&priv->adev[i]->adev);
435 priv->adev[i] = NULL;
436 }
437 priv->flags |= MLX5_PRIV_FLAGS_DETACH;
438 mutex_unlock(&mlx5_intf_mutex);
439 }
440
mlx5_register_device(struct mlx5_core_dev * dev)441 int mlx5_register_device(struct mlx5_core_dev *dev)
442 {
443 int ret;
444
445 devl_assert_locked(priv_to_devlink(dev));
446 mutex_lock(&mlx5_intf_mutex);
447 dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
448 ret = mlx5_rescan_drivers_locked(dev);
449 mutex_unlock(&mlx5_intf_mutex);
450 if (ret)
451 mlx5_unregister_device(dev);
452
453 return ret;
454 }
455
mlx5_unregister_device(struct mlx5_core_dev * dev)456 void mlx5_unregister_device(struct mlx5_core_dev *dev)
457 {
458 devl_assert_locked(priv_to_devlink(dev));
459 mutex_lock(&mlx5_intf_mutex);
460 dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
461 mlx5_rescan_drivers_locked(dev);
462 mutex_unlock(&mlx5_intf_mutex);
463 }
464
add_drivers(struct mlx5_core_dev * dev)465 static int add_drivers(struct mlx5_core_dev *dev)
466 {
467 struct mlx5_priv *priv = &dev->priv;
468 int i, ret = 0;
469
470 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
471 bool is_supported = false;
472
473 if (priv->adev[i])
474 continue;
475
476 if (mlx5_adev_devices[i].is_supported)
477 is_supported = mlx5_adev_devices[i].is_supported(dev);
478
479 if (!is_supported)
480 continue;
481
482 priv->adev[i] = add_adev(dev, i);
483 if (IS_ERR(priv->adev[i])) {
484 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
485 i, mlx5_adev_devices[i].suffix);
486 /* We continue to rescan drivers and leave to the caller
487 * to make decision if to release everything or continue.
488 */
489 ret = PTR_ERR(priv->adev[i]);
490 priv->adev[i] = NULL;
491 }
492 }
493 return ret;
494 }
495
delete_drivers(struct mlx5_core_dev * dev)496 static void delete_drivers(struct mlx5_core_dev *dev)
497 {
498 struct mlx5_priv *priv = &dev->priv;
499 bool delete_all;
500 int i;
501
502 delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
503
504 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
505 bool is_supported = false;
506
507 if (!priv->adev[i])
508 continue;
509
510 if (mlx5_adev_devices[i].is_enabled) {
511 bool enabled;
512
513 enabled = mlx5_adev_devices[i].is_enabled(dev);
514 if (!enabled)
515 goto del_adev;
516 }
517
518 if (mlx5_adev_devices[i].is_supported && !delete_all)
519 is_supported = mlx5_adev_devices[i].is_supported(dev);
520
521 if (is_supported)
522 continue;
523
524 del_adev:
525 del_adev(&priv->adev[i]->adev);
526 priv->adev[i] = NULL;
527 }
528 }
529
530 /* This function is used after mlx5_core_dev is reconfigured.
531 */
mlx5_rescan_drivers_locked(struct mlx5_core_dev * dev)532 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
533 {
534 struct mlx5_priv *priv = &dev->priv;
535
536 lockdep_assert_held(&mlx5_intf_mutex);
537 if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
538 return 0;
539
540 delete_drivers(dev);
541 if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
542 return 0;
543
544 return add_drivers(dev);
545 }
546
mlx5_same_hw_devs(struct mlx5_core_dev * dev,struct mlx5_core_dev * peer_dev)547 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
548 {
549 u64 fsystem_guid, psystem_guid;
550
551 fsystem_guid = mlx5_query_nic_system_image_guid(dev);
552 psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
553
554 return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
555 }
556
mlx5_gen_pci_id(const struct mlx5_core_dev * dev)557 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
558 {
559 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
560 (dev->pdev->bus->number << 8) |
561 PCI_SLOT(dev->pdev->devfn));
562 }
563
_next_phys_dev(struct mlx5_core_dev * mdev,const struct mlx5_core_dev * curr)564 static int _next_phys_dev(struct mlx5_core_dev *mdev,
565 const struct mlx5_core_dev *curr)
566 {
567 if (!mlx5_core_is_pf(mdev))
568 return 0;
569
570 if (mdev == curr)
571 return 0;
572
573 if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
574 mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
575 return 0;
576
577 return 1;
578 }
579
pci_get_other_drvdata(struct device * this,struct device * other)580 static void *pci_get_other_drvdata(struct device *this, struct device *other)
581 {
582 if (this->driver != other->driver)
583 return NULL;
584
585 return pci_get_drvdata(to_pci_dev(other));
586 }
587
next_phys_dev_lag(struct device * dev,const void * data)588 static int next_phys_dev_lag(struct device *dev, const void *data)
589 {
590 struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
591
592 mdev = pci_get_other_drvdata(this->device, dev);
593 if (!mdev)
594 return 0;
595
596 if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
597 !MLX5_CAP_GEN(mdev, lag_master) ||
598 (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
599 MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
600 return 0;
601
602 return _next_phys_dev(mdev, data);
603 }
604
mlx5_get_next_dev(struct mlx5_core_dev * dev,int (* match)(struct device * dev,const void * data))605 static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
606 int (*match)(struct device *dev, const void *data))
607 {
608 struct device *next;
609
610 if (!mlx5_core_is_pf(dev))
611 return NULL;
612
613 next = bus_find_device(&pci_bus_type, NULL, dev, match);
614 if (!next)
615 return NULL;
616
617 put_device(next);
618 return pci_get_drvdata(to_pci_dev(next));
619 }
620
621 /* Must be called with intf_mutex held */
mlx5_get_next_phys_dev_lag(struct mlx5_core_dev * dev)622 struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
623 {
624 lockdep_assert_held(&mlx5_intf_mutex);
625 return mlx5_get_next_dev(dev, &next_phys_dev_lag);
626 }
627
mlx5_dev_list_lock(void)628 void mlx5_dev_list_lock(void)
629 {
630 mutex_lock(&mlx5_intf_mutex);
631 }
mlx5_dev_list_unlock(void)632 void mlx5_dev_list_unlock(void)
633 {
634 mutex_unlock(&mlx5_intf_mutex);
635 }
636
mlx5_dev_list_trylock(void)637 int mlx5_dev_list_trylock(void)
638 {
639 return mutex_trylock(&mlx5_intf_mutex);
640 }
641