Lines Matching refs:cma_dev
247 void cma_dev_get(struct cma_device *cma_dev) in cma_dev_get() argument
249 refcount_inc(&cma_dev->refcount); in cma_dev_get()
252 void cma_dev_put(struct cma_device *cma_dev) in cma_dev_put() argument
254 if (refcount_dec_and_test(&cma_dev->refcount)) in cma_dev_put()
255 complete(&cma_dev->comp); in cma_dev_put()
261 struct cma_device *cma_dev; in cma_enum_devices_by_ibdev() local
266 list_for_each_entry(cma_dev, &dev_list, list) in cma_enum_devices_by_ibdev()
267 if (filter(cma_dev->device, cookie)) { in cma_enum_devices_by_ibdev()
268 found_cma_dev = cma_dev; in cma_enum_devices_by_ibdev()
278 int cma_get_default_gid_type(struct cma_device *cma_dev, in cma_get_default_gid_type() argument
281 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_get_default_gid_type()
284 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; in cma_get_default_gid_type()
287 int cma_set_default_gid_type(struct cma_device *cma_dev, in cma_set_default_gid_type() argument
293 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_set_default_gid_type()
297 rdma_protocol_roce_eth_encap(cma_dev->device, port)) in cma_set_default_gid_type()
300 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); in cma_set_default_gid_type()
305 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = in cma_set_default_gid_type()
311 int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) in cma_get_default_roce_tos() argument
313 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_get_default_roce_tos()
316 return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; in cma_get_default_roce_tos()
319 int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, in cma_set_default_roce_tos() argument
322 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_set_default_roce_tos()
325 cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = in cma_set_default_roce_tos()
330 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) in cma_get_ib_dev() argument
332 return cma_dev->device; in cma_get_ib_dev()
575 struct cma_device *cma_dev) in _cma_attach_to_dev() argument
577 cma_dev_get(cma_dev); in _cma_attach_to_dev()
578 id_priv->cma_dev = cma_dev; in _cma_attach_to_dev()
579 id_priv->id.device = cma_dev->device; in _cma_attach_to_dev()
581 rdma_node_get_transport(cma_dev->device->node_type); in _cma_attach_to_dev()
582 list_add_tail(&id_priv->device_item, &cma_dev->id_list); in _cma_attach_to_dev()
584 trace_cm_id_attach(id_priv, cma_dev->device); in _cma_attach_to_dev()
588 struct cma_device *cma_dev) in cma_attach_to_dev() argument
590 _cma_attach_to_dev(id_priv, cma_dev); in cma_attach_to_dev()
592 cma_dev->default_gid_type[id_priv->id.port_num - in cma_attach_to_dev()
593 rdma_start_port(cma_dev->device)]; in cma_attach_to_dev()
600 cma_dev_put(id_priv->cma_dev); in cma_release_dev()
601 id_priv->cma_dev = NULL; in cma_release_dev()
779 struct cma_device *cma_dev; in cma_acquire_dev_by_src_ip() local
795 list_for_each_entry(cma_dev, &dev_list, list) { in cma_acquire_dev_by_src_ip()
796 rdma_for_each_port (cma_dev->device, port) { in cma_acquire_dev_by_src_ip()
797 gidp = rdma_protocol_roce(cma_dev->device, port) ? in cma_acquire_dev_by_src_ip()
799 gid_type = cma_dev->default_gid_type[port - 1]; in cma_acquire_dev_by_src_ip()
800 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_acquire_dev_by_src_ip()
805 cma_attach_to_dev(id_priv, cma_dev); in cma_acquire_dev_by_src_ip()
847 gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; in cma_ib_acquire_dev()
860 cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); in cma_ib_acquire_dev()
871 struct cma_device *cma_dev; in cma_iw_acquire_dev() local
886 cma_dev = listen_id_priv->cma_dev; in cma_iw_acquire_dev()
889 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_iw_acquire_dev()
898 list_for_each_entry(cma_dev, &dev_list, list) { in cma_iw_acquire_dev()
899 rdma_for_each_port (cma_dev->device, port) { in cma_iw_acquire_dev()
900 if (listen_id_priv->cma_dev == cma_dev && in cma_iw_acquire_dev()
904 gid_type = cma_dev->default_gid_type[port - 1]; in cma_iw_acquire_dev()
905 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_iw_acquire_dev()
918 cma_attach_to_dev(id_priv, cma_dev); in cma_iw_acquire_dev()
931 struct cma_device *cma_dev, *cur_dev; in cma_resolve_ib_dev() local
940 cma_dev = NULL; in cma_resolve_ib_dev()
965 cma_dev = cur_dev; in cma_resolve_ib_dev()
971 if (!cma_dev && (gid.global.subnet_prefix == in cma_resolve_ib_dev()
974 cma_dev = cur_dev; in cma_resolve_ib_dev()
986 cma_attach_to_dev(id_priv, cma_dev); in cma_resolve_ib_dev()
1199 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); in cma_modify_qp_rtr()
1978 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) in cma_cancel_operation()
2023 gid_type = id_priv->cma_dev->default_gid_type in destroy_mc()
2026 id_priv->cma_dev->device)]; in destroy_mc()
2057 if (id_priv->cma_dev) { in _destroy_id()
2713 struct cma_device *cma_dev, in cma_listen_on_dev() argument
2723 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) in cma_listen_on_dev()
2736 _cma_attach_to_dev(dev_id_priv, cma_dev); in cma_listen_on_dev()
2754 dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); in cma_listen_on_dev()
2761 struct cma_device *cma_dev; in cma_listen_on_all() local
2766 list_for_each_entry(cma_dev, &dev_list, list) { in cma_listen_on_all()
2767 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); in cma_listen_on_all()
3298 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - in cma_resolve_iboe_route()
3299 rdma_start_port(id_priv->cma_dev->device)]; in cma_resolve_iboe_route()
3437 struct cma_device *cma_dev, *cur_dev; in cma_bind_loopback() local
3444 cma_dev = NULL; in cma_bind_loopback()
3451 if (!cma_dev) in cma_bind_loopback()
3452 cma_dev = cur_dev; in cma_bind_loopback()
3457 cma_dev = cur_dev; in cma_bind_loopback()
3463 if (!cma_dev) { in cma_bind_loopback()
3471 ret = rdma_query_gid(cma_dev->device, p, 0, &gid); in cma_bind_loopback()
3475 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); in cma_bind_loopback()
3480 (rdma_protocol_ib(cma_dev->device, p)) ? in cma_bind_loopback()
3486 cma_attach_to_dev(id_priv, cma_dev); in cma_bind_loopback()
3515 if (!status && !id_priv->cma_dev) { in addr_handler()
3554 if (!id_priv->cma_dev) { in cma_resolve_loopback()
3579 if (!id_priv->cma_dev) { in cma_resolve_ib_addr()
3966 if (id_priv->cma_dev) { in rdma_listen()
4049 if (id_priv->cma_dev) in rdma_bind_addr_dst()
4836 id_priv->cma_dev in cma_make_mc_event()
4839 id_priv->cma_dev->device)]; in cma_make_mc_event()
5001 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - in cma_iboe_join_multicast()
5002 rdma_start_port(id_priv->cma_dev->device)]; in cma_iboe_join_multicast()
5108 WARN_ON(id_priv->cma_dev->device != id->device); in rdma_leave_multicast()
5146 struct cma_device *cma_dev; in cma_netdev_callback() local
5157 list_for_each_entry(cma_dev, &dev_list, list) in cma_netdev_callback()
5158 list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { in cma_netdev_callback()
5290 static void cma_process_remove(struct cma_device *cma_dev) in cma_process_remove() argument
5293 while (!list_empty(&cma_dev->id_list)) { in cma_process_remove()
5295 &cma_dev->id_list, struct rdma_id_private, device_item); in cma_process_remove()
5308 cma_dev_put(cma_dev); in cma_process_remove()
5309 wait_for_completion(&cma_dev->comp); in cma_process_remove()
5326 struct cma_device *cma_dev; in cma_add_one() local
5335 cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); in cma_add_one()
5336 if (!cma_dev) in cma_add_one()
5339 cma_dev->device = device; in cma_add_one()
5340 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, in cma_add_one()
5341 sizeof(*cma_dev->default_gid_type), in cma_add_one()
5343 if (!cma_dev->default_gid_type) { in cma_add_one()
5348 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, in cma_add_one()
5349 sizeof(*cma_dev->default_roce_tos), in cma_add_one()
5351 if (!cma_dev->default_roce_tos) { in cma_add_one()
5360 cma_dev->default_gid_type[i - rdma_start_port(device)] = in cma_add_one()
5363 cma_dev->default_gid_type[i - rdma_start_port(device)] = in cma_add_one()
5365 cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; in cma_add_one()
5368 init_completion(&cma_dev->comp); in cma_add_one()
5369 refcount_set(&cma_dev->refcount, 1); in cma_add_one()
5370 INIT_LIST_HEAD(&cma_dev->id_list); in cma_add_one()
5371 ib_set_client_data(device, &cma_client, cma_dev); in cma_add_one()
5374 list_add_tail(&cma_dev->list, &dev_list); in cma_add_one()
5376 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); in cma_add_one()
5386 list_del(&cma_dev->list); in cma_add_one()
5390 cma_process_remove(cma_dev); in cma_add_one()
5391 kfree(cma_dev->default_roce_tos); in cma_add_one()
5393 kfree(cma_dev->default_gid_type); in cma_add_one()
5396 kfree(cma_dev); in cma_add_one()
5402 struct cma_device *cma_dev = client_data; in cma_remove_one() local
5407 list_del(&cma_dev->list); in cma_remove_one()
5410 cma_process_remove(cma_dev); in cma_remove_one()
5411 kfree(cma_dev->default_roce_tos); in cma_remove_one()
5412 kfree(cma_dev->default_gid_type); in cma_remove_one()
5413 kfree(cma_dev); in cma_remove_one()