Lines Matching refs:ibdev
90 dev = mpi->ibdev; in mlx5_ib_get_ibdev_from_mpi()
117 static int get_port_state(struct ib_device *ibdev, in get_port_state() argument
125 ret = ibdev->ops.query_port(ibdev, port_num, &attr); in get_port_state()
188 static struct net_device *mlx5_ib_get_rep_uplink_netdev(struct mlx5_ib_dev *ibdev) in mlx5_ib_get_rep_uplink_netdev() argument
193 for (i = 0; i < ibdev->num_ports; i++) { in mlx5_ib_get_rep_uplink_netdev()
194 port = &ibdev->port[i]; in mlx5_ib_get_rep_uplink_netdev()
196 return ib_device_get_netdev(&ibdev->ib_dev, i + 1); in mlx5_ib_get_rep_uplink_netdev()
211 struct mlx5_ib_dev *ibdev; in mlx5_netdev_event() local
213 ibdev = roce->dev; in mlx5_netdev_event()
214 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); in mlx5_netdev_event()
221 if (ibdev->is_rep) in mlx5_netdev_event()
224 ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); in mlx5_netdev_event()
230 ib_device_set_netdev(&ibdev->ib_dev, ndev, port_num); in mlx5_netdev_event()
235 if (ibdev->is_rep) in mlx5_netdev_event()
237 ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); in mlx5_netdev_event()
239 ib_device_set_netdev(&ibdev->ib_dev, NULL, port_num); in mlx5_netdev_event()
255 lag_ndev = ib_device_get_netdev(&ibdev->ib_dev, 1); in mlx5_netdev_event()
257 lag_ndev = mlx5_ib_get_rep_uplink_netdev(ibdev); in mlx5_netdev_event()
267 if (ibdev->is_rep) in mlx5_netdev_event()
268 roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num); in mlx5_netdev_event()
272 ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); in mlx5_netdev_event()
274 if (mlx5_netdev_send_event(ibdev, ndev, upper, ib_ndev)) { in mlx5_netdev_event()
278 if (get_port_state(&ibdev->ib_dev, port_num, in mlx5_netdev_event()
286 ibev.device = &ibdev->ib_dev; in mlx5_netdev_event()
306 mlx5_ib_put_native_port_mdev(ibdev, port_num); in mlx5_netdev_event()
310 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, in mlx5_ib_get_native_port_mdev() argument
314 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, in mlx5_ib_get_native_port_mdev()
320 if (ibdev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) { in mlx5_ib_get_native_port_mdev()
322 *native_port_num = smi_to_native_portnum(ibdev, in mlx5_ib_get_native_port_mdev()
324 return ibdev->mdev; in mlx5_ib_get_native_port_mdev()
328 if (!mlx5_core_mp_enabled(ibdev->mdev) || in mlx5_ib_get_native_port_mdev()
332 return ibdev->mdev; in mlx5_ib_get_native_port_mdev()
338 port = &ibdev->port[ib_port_num - 1]; in mlx5_ib_get_native_port_mdev()
340 mpi = ibdev->port[ib_port_num - 1].mp.mpi; in mlx5_ib_get_native_port_mdev()
354 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num) in mlx5_ib_put_native_port_mdev() argument
356 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, in mlx5_ib_put_native_port_mdev()
361 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) in mlx5_ib_put_native_port_mdev()
364 port = &ibdev->port[port_num - 1]; in mlx5_ib_put_native_port_mdev()
367 mpi = ibdev->port[port_num - 1].mp.mpi; in mlx5_ib_put_native_port_mdev()
716 static int mlx5_get_vport_access_method(struct ib_device *ibdev) in mlx5_get_vport_access_method() argument
718 if (mlx5_use_mad_ifc(to_mdev(ibdev))) in mlx5_get_vport_access_method()
721 if (mlx5_ib_port_link_layer(ibdev, 1) == in mlx5_get_vport_access_method()
758 static int mlx5_query_system_image_guid(struct ib_device *ibdev, in mlx5_query_system_image_guid() argument
761 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_system_image_guid()
766 switch (mlx5_get_vport_access_method(ibdev)) { in mlx5_query_system_image_guid()
768 return mlx5_query_mad_ifc_system_image_guid(ibdev, in mlx5_query_system_image_guid()
790 static int mlx5_query_max_pkeys(struct ib_device *ibdev, in mlx5_query_max_pkeys() argument
793 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_max_pkeys()
796 switch (mlx5_get_vport_access_method(ibdev)) { in mlx5_query_max_pkeys()
798 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); in mlx5_query_max_pkeys()
811 static int mlx5_query_vendor_id(struct ib_device *ibdev, in mlx5_query_vendor_id() argument
814 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_vendor_id()
816 switch (mlx5_get_vport_access_method(ibdev)) { in mlx5_query_vendor_id()
818 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); in mlx5_query_vendor_id()
886 static int mlx5_ib_query_device(struct ib_device *ibdev, in mlx5_ib_query_device() argument
891 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_query_device()
913 err = mlx5_query_system_image_guid(ibdev, in mlx5_ib_query_device()
920 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); in mlx5_ib_query_device()
1107 if (mlx5_ib_port_link_layer(ibdev, 1) == in mlx5_ib_query_device()
1308 static void translate_active_width(struct ib_device *ibdev, u16 active_width, in translate_active_width() argument
1311 struct mlx5_ib_dev *dev = to_mdev(ibdev); in translate_active_width()
1366 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, in translate_max_vl_num() argument
1393 static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port, in mlx5_query_hca_port() argument
1396 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_hca_port()
1413 if (ibdev->type == RDMA_DEVICE_TYPE_SMI) { in mlx5_query_hca_port()
1433 } else if (ibdev->type == RDMA_DEVICE_TYPE_SMI) in mlx5_query_hca_port()
1452 translate_active_width(ibdev, ib_link_width_oper, &props->active_width); in mlx5_query_hca_port()
1466 err = translate_max_vl_num(ibdev, vl_hw_cap, in mlx5_query_hca_port()
1473 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, in mlx5_ib_query_port() argument
1479 switch (mlx5_get_vport_access_method(ibdev)) { in mlx5_ib_query_port()
1481 ret = mlx5_query_mad_ifc_port(ibdev, port, props); in mlx5_ib_query_port()
1485 ret = mlx5_query_hca_port(ibdev, port, props); in mlx5_ib_query_port()
1489 ret = mlx5_query_port_roce(ibdev, port, props); in mlx5_ib_query_port()
1497 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_query_port()
1518 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port, in mlx5_ib_rep_query_port() argument
1521 return mlx5_query_port_roce(ibdev, port, props); in mlx5_ib_rep_query_port()
1524 static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index, in mlx5_ib_rep_query_pkey() argument
1534 static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index, in mlx5_ib_query_gid() argument
1537 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_query_gid()
1540 switch (mlx5_get_vport_access_method(ibdev)) { in mlx5_ib_query_gid()
1542 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); in mlx5_ib_query_gid()
1553 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port, in mlx5_query_hca_nic_pkey() argument
1556 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_hca_nic_pkey()
1580 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, in mlx5_ib_query_pkey() argument
1583 switch (mlx5_get_vport_access_method(ibdev)) { in mlx5_ib_query_pkey()
1585 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); in mlx5_ib_query_pkey()
1589 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey); in mlx5_ib_query_pkey()
1595 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, in mlx5_ib_modify_device() argument
1598 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_modify_device()
1619 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); in mlx5_ib_modify_device()
1658 static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, in mlx5_ib_modify_port() argument
1661 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_modify_port()
1667 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == in mlx5_ib_modify_port()
1684 err = ib_query_port(ibdev, port, &attr); in mlx5_ib_modify_port()
1903 struct ib_device *ibdev = uctx->device; in set_ucontext_resp() local
1904 struct mlx5_ib_dev *dev = to_mdev(ibdev); in set_ucontext_resp()
1936 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { in set_ucontext_resp()
1987 struct ib_device *ibdev = uctx->device; in mlx5_ib_alloc_ucontext() local
1988 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_alloc_ucontext()
2506 struct ib_device *ibdev = ibpd->device; in mlx5_ib_alloc_pd() local
2518 err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out); in mlx5_ib_alloc_pd()
2527 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); in mlx5_ib_alloc_pd()
2675 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) in mlx5_ib_handle_internal_error() argument
2688 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); in mlx5_ib_handle_internal_error()
2689 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { in mlx5_ib_handle_internal_error()
2731 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); in mlx5_ib_handle_internal_error()
2753 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, in handle_general_event() argument
2760 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == in handle_general_event()
2762 schedule_work(&ibdev->delay_drop.delay_drop_work); in handle_general_event()
2769 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, in handle_port_change() argument
2783 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == in handle_port_change()
2797 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); in handle_port_change()
2818 struct mlx5_ib_dev *ibdev; in mlx5_ib_handle_event() local
2823 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); in mlx5_ib_handle_event()
2824 if (!ibdev) in mlx5_ib_handle_event()
2827 ibdev = work->dev; in mlx5_ib_handle_event()
2833 mlx5_ib_handle_internal_error(ibdev); in mlx5_ib_handle_event()
2838 if (handle_port_change(ibdev, work->param, &ibev)) in mlx5_ib_handle_event()
2842 handle_general_event(ibdev, work->param, &ibev); in mlx5_ib_handle_event()
2848 ibev.device = &ibdev->ib_dev; in mlx5_ib_handle_event()
2850 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) { in mlx5_ib_handle_event()
2851 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num); in mlx5_ib_handle_event()
2855 if (ibdev->ib_active) in mlx5_ib_handle_event()
2859 ibdev->ib_active = false; in mlx5_ib_handle_event()
2976 struct ib_device *ibdev; in mlx5_ib_dev_res_cq_init() local
2993 ibdev = &dev->ib_dev; in mlx5_ib_dev_res_cq_init()
2994 pd = ib_alloc_pd(ibdev, 0); in mlx5_ib_dev_res_cq_init()
3001 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr); in mlx5_ib_dev_res_cq_init()
3169 static u32 get_core_cap_flags(struct ib_device *ibdev, in get_core_cap_flags() argument
3172 struct mlx5_ib_dev *dev = to_mdev(ibdev); in get_core_cap_flags()
3173 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); in get_core_cap_flags()
3186 else if (ibdev->type == RDMA_DEVICE_TYPE_SMI) in get_core_cap_flags()
3210 static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num, in mlx5_port_immutable() argument
3214 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_port_immutable()
3215 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); in mlx5_port_immutable()
3219 err = ib_query_port(ibdev, port_num, &attr); in mlx5_port_immutable()
3224 if (ibdev->type == RDMA_DEVICE_TYPE_SMI) in mlx5_port_immutable()
3235 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep); in mlx5_port_immutable()
3241 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num, in mlx5_port_rep_immutable() argument
3249 err = ib_query_port(ibdev, port_num, &attr); in mlx5_port_rep_immutable()
3260 static void get_dev_fw_str(struct ib_device *ibdev, char *str) in get_dev_fw_str() argument
3263 container_of(ibdev, struct mlx5_ib_dev, ib_dev); in get_dev_fw_str()
3274 struct ib_device *ibdev = &dev->ib_dev; in lag_event() local
3297 old_ndev = ib_device_get_netdev(ibdev, portnum + 1); in lag_event()
3298 ret = ib_device_set_netdev(ibdev, ndev, portnum + 1); in lag_event()
3303 roce_del_all_netdev_gids(ibdev, portnum + 1, in lag_event()
3305 rdma_roce_rescan_port(ibdev, portnum + 1); in lag_event()
3515 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, in mlx5_ib_unbind_slave_port() argument
3519 struct mlx5_ib_port *port = &ibdev->port[port_num]; in mlx5_ib_unbind_slave_port()
3526 mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev); in mlx5_ib_unbind_slave_port()
3528 mlx5_core_mp_event_replay(ibdev->mdev, in mlx5_ib_unbind_slave_port()
3535 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); in mlx5_ib_unbind_slave_port()
3538 if (!mpi->ibdev) { in mlx5_ib_unbind_slave_port()
3543 mpi->ibdev = NULL; in mlx5_ib_unbind_slave_port()
3549 mlx5_mdev_netdev_untrack(ibdev, port_num); in mlx5_ib_unbind_slave_port()
3571 mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1); in mlx5_ib_unbind_slave_port()
3576 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n", in mlx5_ib_unbind_slave_port()
3579 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; in mlx5_ib_unbind_slave_port()
3582 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, in mlx5_ib_bind_slave_port() argument
3591 spin_lock(&ibdev->port[port_num].mp.mpi_lock); in mlx5_ib_bind_slave_port()
3592 if (ibdev->port[port_num].mp.mpi) { in mlx5_ib_bind_slave_port()
3593 mlx5_ib_dbg(ibdev, "port %u already affiliated.\n", in mlx5_ib_bind_slave_port()
3595 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); in mlx5_ib_bind_slave_port()
3599 ibdev->port[port_num].mp.mpi = mpi; in mlx5_ib_bind_slave_port()
3600 mpi->ibdev = ibdev; in mlx5_ib_bind_slave_port()
3602 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); in mlx5_ib_bind_slave_port()
3604 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); in mlx5_ib_bind_slave_port()
3608 mlx5_mdev_netdev_track(ibdev, port_num); in mlx5_ib_bind_slave_port()
3613 mlx5_ib_init_cong_debugfs(ibdev, port_num); in mlx5_ib_bind_slave_port()
3619 mlx5_core_mp_event_replay(ibdev->mdev, in mlx5_ib_bind_slave_port()
3623 err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev); in mlx5_ib_bind_slave_port()
3630 mlx5_ib_unbind_slave_port(ibdev, mpi); in mlx5_ib_bind_slave_port()
3707 mpi->ibdev = dev; in mlx5_ib_init_multiport_master()
4610 void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev, in mlx5_ib_data_direct_bind() argument
4613 mutex_lock(&ibdev->data_direct_lock); in mlx5_ib_data_direct_bind()
4614 ibdev->data_direct_dev = dev; in mlx5_ib_data_direct_bind()
4615 mutex_unlock(&ibdev->data_direct_lock); in mlx5_ib_data_direct_bind()
4618 void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev) in mlx5_ib_data_direct_unbind() argument
4620 mutex_lock(&ibdev->data_direct_lock); in mlx5_ib_data_direct_unbind()
4621 mlx5_ib_revoke_data_direct_mrs(ibdev); in mlx5_ib_data_direct_unbind()
4622 ibdev->data_direct_dev = NULL; in mlx5_ib_data_direct_unbind()
4623 mutex_unlock(&ibdev->data_direct_lock); in mlx5_ib_data_direct_unbind()
4904 mpi->ibdev->ib_active = true; in mlx5r_mp_probe()
4926 if (mpi->ibdev) in mlx5r_mp_remove()
4927 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); in mlx5r_mp_remove()