Home
last modified time | relevance | path

Searched refs:ib_dev (Results 1 – 25 of 100) sorted by relevance

1234

/drivers/target/
A Dtarget_core_iblock.c63 if (!ib_dev) { in iblock_alloc_device()
69 ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug), in iblock_alloc_device()
71 if (!ib_dev->ibd_plug) in iblock_alloc_device()
76 return &ib_dev->dev; in iblock_alloc_device()
79 kfree(ib_dev); in iblock_alloc_device()
88 ib_dev->ibd_bd); in iblock_configure_unmap()
99 void *holder = ib_dev; in iblock_configure_device()
115 ib_dev->ibd_udev_path, ib_dev->ibd_exclusive); in iblock_configure_device()
191 kfree(ib_dev->ibd_plug); in iblock_dev_call_rcu()
192 kfree(ib_dev); in iblock_dev_call_rcu()
[all …]
/drivers/infiniband/core/
A Droce_gid_mgmt.c87 if (!rdma_protocol_roce(ib_dev, port)) in roce_gid_type_mask_support()
110 ib_cache_gid_add(ib_dev, port, in update_gid()
114 ib_cache_gid_del(ib_dev, port, in update_gid()
283 struct ib_device *ib_dev, in update_gid_ip() argument
368 update_gid_ip(GID_ADD, ib_dev, port, ndev, in enum_netdev_ipv4_ips()
424 enum_netdev_ipv4_ips(ib_dev, port, ndev); in _add_netdev_ips()
426 enum_netdev_ipv6_ips(ib_dev, port, ndev); in _add_netdev_ips()
432 _add_netdev_ips(ib_dev, port, cookie); in add_netdev_ips()
499 _add_netdev_ips(ib_dev, port, ndev); in enum_all_gids_of_dev_cb()
529 if (rdma_protocol_roce(ib_dev, port)) { in rdma_roce_rescan_port()
[all …]
A Dcache.c128 event.device = ib_dev; in dispatch_gid_change_event()
574 attr->device = ib_dev; in __ib_cache_gid_add()
619 del_gid(ib_dev, port, table, ix); in _ib_cache_gid_del()
655 del_gid(ib_dev, port, table, ix); in ib_cache_gid_del_all_netdev_gids()
828 del_gid(ib_dev, port, table, i); in cleanup_gid_table_port()
885 rdma_for_each_port (ib_dev, p) { in gid_table_release_one()
886 release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); in gid_table_release_one()
908 gid_table_release_one(ib_dev); in _gid_table_setup_one()
916 rdma_for_each_port (ib_dev, p) in gid_table_cleanup_one()
917 cleanup_gid_table_port(ib_dev, p, in gid_table_cleanup_one()
[all …]
A Ddevice.c857 pdata->ib_dev = device; in alloc_port_data()
1540 ib_device_put(ib_dev); in __ib_unregister_device()
1555 disable_device(ib_dev); in __ib_unregister_device()
1559 free_netdevs(ib_dev); in __ib_unregister_device()
1562 device_del(&ib_dev->dev); in __ib_unregister_device()
1619 ib_device_put(ib_dev); in ib_unregister_device_and_put()
2259 if (xa_load(&devices, ib_dev->index) != ib_dev) in ib_device_set_netdev()
2274 if (!ib_dev->port_data) in free_netdevs()
2311 if (!ib_dev->port_data) in ib_device_get_netdev()
2321 res = ib_dev->ops.get_netdev(ib_dev, port); in ib_device_get_netdev()
[all …]
A Duverbs_std_types_cq.c68 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local
81 if ((!ib_dev->ops.create_cq && !ib_dev->ops.create_cq_umem) || !ib_dev->ops.destroy_cq) in UVERBS_HANDLER()
133 !ib_dev->ops.create_cq_umem) { in UVERBS_HANDLER()
138 umem = ib_umem_get(ib_dev, buffer_va, buffer_length, IB_ACCESS_LOCAL_WRITE); in UVERBS_HANDLER()
158 !ib_dev->ops.create_cq_umem) { in UVERBS_HANDLER()
163 umem_dmabuf = ib_umem_dmabuf_get_pinned(ib_dev, buffer_offset, buffer_length, in UVERBS_HANDLER()
172 !ib_dev->ops.create_cq) { in UVERBS_HANDLER()
177 cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); in UVERBS_HANDLER()
184 cq->device = ib_dev; in UVERBS_HANDLER()
194 ret = umem ? ib_dev->ops.create_cq_umem(cq, &attr, umem, attrs) : in UVERBS_HANDLER()
[all …]
A Duverbs_std_types_device.c156 if (rdma_cap_opa_ah(ib_dev, port_num)) { in copy_port_attr_to_resp()
179 struct ib_device *ib_dev; in UVERBS_HANDLER() local
189 ib_dev = ucontext->device; in UVERBS_HANDLER()
192 if (!ib_dev->ops.query_port) in UVERBS_HANDLER()
246 struct ib_device *ib_dev; in UVERBS_HANDLER() local
253 ib_dev = ucontext->device; in UVERBS_HANDLER()
255 if (!ib_dev->ops.query_ucontext) in UVERBS_HANDLER()
318 struct ib_device *ib_dev; in UVERBS_HANDLER() local
347 ib_dev = ucontext->device; in UVERBS_HANDLER()
374 struct ib_device *ib_dev; in UVERBS_HANDLER() local
[all …]
A Duverbs_main.c201 struct ib_device *ib_dev; in ib_uverbs_release_file() local
207 ib_dev = srcu_dereference(file->device->ib_dev, in ib_uverbs_release_file()
209 if (ib_dev && !ib_dev->ops.disassociate_ucontext) in ib_uverbs_release_file()
933 struct ib_device *ib_dev; in ib_uverbs_open() local
945 ib_dev = srcu_dereference(dev->ib_dev, in ib_uverbs_open()
947 if (!ib_dev) { in ib_uverbs_open()
1089 struct ib_device *ib_dev; in ibdev_show() local
1092 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); in ibdev_show()
1093 if (ib_dev) in ibdev_show()
1111 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); in abi_version_show()
[all …]
A Duverbs_std_types_dm.c56 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local
60 if (!ib_dev->ops.alloc_dm) in UVERBS_HANDLER()
73 dm = ib_dev->ops.alloc_dm(ib_dev, attrs->context, &attr, attrs); in UVERBS_HANDLER()
77 dm->device = ib_dev; in UVERBS_HANDLER()
A Duverbs_std_types_mr.c53 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local
60 if (!ib_dev->ops.advise_mr) in UVERBS_HANDLER()
93 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local
98 if (!ib_dev->ops.reg_dm_mr) in UVERBS_HANDLER()
119 ret = ib_check_mr_access(ib_dev, attr.access_flags); in UVERBS_HANDLER()
193 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local
200 if (!ib_dev->ops.reg_user_mr_dmabuf) in UVERBS_HANDLER()
236 ret = ib_check_mr_access(ib_dev, access_flags); in UVERBS_HANDLER()
278 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local
321 if (!ib_dev->ops.reg_user_mr_dmabuf) in UVERBS_HANDLER()
[all …]
A Duverbs_std_types_counters.c60 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local
69 if (!ib_dev->ops.create_counters) in UVERBS_HANDLER()
72 counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); in UVERBS_HANDLER()
76 counters->device = ib_dev; in UVERBS_HANDLER()
81 ret = ib_dev->ops.create_counters(counters, attrs); in UVERBS_HANDLER()
A Dcore_priv.h90 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
93 void ib_enum_roce_netdev(struct ib_device *ib_dev,
129 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
134 int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
137 int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
140 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
146 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port);
A Duverbs_cmd.c213 ib_dev = srcu_dereference(ufile->device->ib_dev, in ib_alloc_ucontext()
215 if (!ib_dev) in ib_alloc_ucontext()
461 pd->device = ib_dev; in ib_uverbs_alloc_pd()
626 &ib_dev); in ib_uverbs_open_xrcd()
955 mw->device = ib_dev; in ib_uverbs_alloc_mw()
1036 &ib_dev); in create_cq()
1330 &ib_dev); in create_qp()
1591 &ib_dev); in ib_uverbs_open_qp()
2929 &ib_dev); in ib_uverbs_ex_create_wq()
3401 &ib_dev); in __uverbs_create_xsrq()
[all …]
/drivers/infiniband/hw/mana/
A Ddevice.c83 for (i = 0; i < dev->ib_dev.phys_port_cnt; i++) in mana_ib_netdev_event()
119 dev = ib_alloc_device(mana_ib_dev, ib_dev); in mana_ib_probe()
124 dev->ib_dev.node_type = RDMA_NODE_IB_CA; in mana_ib_probe()
126 dev->ib_dev.dev.parent = gc->dev; in mana_ib_probe()
131 dev->ib_dev.phys_port_cnt = 1; in mana_ib_probe()
154 dev->ib_dev.phys_port_cnt = mc->num_ports; in mana_ib_probe()
160 ibdev_err(&dev->ib_dev, in mana_ib_probe()
185 dev->ib_dev.phys_port_cnt = mc->num_ports; in mana_ib_probe()
225 ib_dealloc_device(&dev->ib_dev); in mana_ib_probe()
233 ib_unregister_device(&dev->ib_dev); in mana_ib_remove()
[all …]
A Dqp.c119 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_rss()
126 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_rss()
133 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_rss()
141 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_rss()
148 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_rss()
210 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_rss()
236 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_rss()
291 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_raw()
297 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_raw()
304 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_qp_raw()
[all …]
A Dmain.c15 ndev = mana_ib_get_netdev(&dev->ib_dev, port); in mana_ib_uncfg_vport()
36 ndev = mana_ib_get_netdev(&dev->ib_dev, port); in mana_ib_cfg_vport()
43 ibdev_dbg(&dev->ib_dev, in mana_ib_cfg_vport()
80 dev = container_of(ibdev, struct mana_ib_dev, ib_dev); in mana_ib_alloc_pd()
94 ibdev_dbg(&dev->ib_dev, in mana_ib_alloc_pd()
105 ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n", in mana_ib_alloc_pd()
134 ibdev_dbg(&dev->ib_dev, in mana_ib_dealloc_pd()
325 ibdev_dbg(&dev->ib_dev, in mana_ib_gd_first_dma_region()
358 ibdev_dbg(&dev->ib_dev, in mana_ib_gd_add_dma_region()
690 ibdev_err(&dev->ib_dev, in mana_ib_gd_query_adapter_caps()
[all …]
A Dwq.c13 container_of(pd->device, struct mana_ib_dev, ib_dev); in mana_ib_create_wq()
23 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_wq()
32 ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr); in mana_ib_create_wq()
36 ibdev_dbg(&mdev->ib_dev, in mana_ib_create_wq()
62 struct ib_device *ib_dev = ibwq->device; in mana_ib_destroy_wq() local
65 mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev); in mana_ib_destroy_wq()
/drivers/infiniband/hw/usnic/
A Dusnic_ib_main.c150 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event()
158 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event()
161 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_handle_usdev_event()
167 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event()
176 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_handle_usdev_event()
188 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event()
276 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_inet_event()
286 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_inet_event()
293 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_inet_event()
453 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_device_add()
[all …]
A Dusnic_ib_sysfs.c52 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in board_id_show()
70 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in config_show()
87 dev_name(&us_ibdev->ib_dev.dev), in config_show()
109 dev_name(&us_ibdev->ib_dev.dev)); in config_show()
122 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in iface_show()
132 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in max_vf_show()
142 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in qp_per_vf_show()
156 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in cq_per_vf_show()
255 kobject_get(&us_ibdev->ib_dev.dev.kobj); in usnic_ib_sysfs_register_usdev()
257 &us_ibdev->ib_dev.dev.kobj); in usnic_ib_sysfs_register_usdev()
[all …]
/drivers/infiniband/hw/hns/
A Dhns_roce_main.c168 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_device()
223 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_port()
516 ret = ib_query_port(ib_dev, port_num, &attr); in hns_roce_port_immutable()
622 ib_unregister_device(&hr_dev->ib_dev); in hns_roce_unregister_device()
709 struct ib_device *ib_dev = NULL; in hns_roce_register_device() local
716 ib_dev = &hr_dev->ib_dev; in hns_roce_register_device()
718 ib_dev->node_type = RDMA_NODE_IB_CA; in hns_roce_register_device()
719 ib_dev->dev.parent = dev; in hns_roce_register_device()
721 ib_dev->phys_port_cnt = hr_dev->caps.num_ports; in hns_roce_register_device()
744 ib_set_device_ops(ib_dev, &hns_roce_dev_ops); in hns_roce_register_device()
[all …]
A Dhns_roce_pd.c47 struct ib_device *ib_dev = ibpd->device; in hns_roce_alloc_pd() local
48 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_alloc_pd()
57 ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id); in hns_roce_alloc_pd()
69 ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret); in hns_roce_alloc_pd()
94 ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id); in hns_roce_uar_alloc()
129 ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id); in hns_roce_xrcd_alloc()
/drivers/infiniband/ulp/isert/
A Dib_isert.c105 struct ib_device *ib_dev = device->ib_device; in isert_create_qp() local
149 struct ib_device *ib_dev = device->ib_device; in isert_alloc_rx_descriptors() local
166 if (ib_dma_mapping_error(ib_dev, dma_addr)) in isert_alloc_rx_descriptors()
215 struct ib_device *ib_dev = device->ib_device; in isert_create_device_ib_res() local
219 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); in isert_create_device_ib_res()
222 device->pd = ib_alloc_pd(ib_dev, 0); in isert_create_device_ib_res()
332 struct ib_device *ib_dev) in isert_alloc_login_buf() argument
1524 isert_unmap_tx_desc(tx_desc, ib_dev); in isert_completion_put()
1678 ib_dev, false); in isert_do_control_comp()
1703 isert_unmap_tx_desc(tx_desc, ib_dev); in isert_login_send_done()
[all …]
/drivers/infiniband/hw/mlx4/
A Dmad.c607 ib_dma_sync_single_for_cpu(&dev->ib_dev, in mlx4_ib_send_to_slave()
1180 struct mlx4_ib_dev *dev = ew->ib_dev; in handle_port_mgmt_change_event()
1284 event.device = &dev->ib_dev; in mlx4_ib_dispatch_event()
1419 ib_dma_sync_single_for_cpu(&dev->ib_dev, in mlx4_ib_send_to_wire()
1568 ah.ibah.device = ctx->ib_dev; in mlx4_ib_multiplex_mad()
1658 ib_dma_map_single(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1662 if (ib_dma_mapping_error(ctx->ib_dev, in mlx4_ib_alloc_pv_bufs()
1965 ctx->ib_dev = &dev->ib_dev; in alloc_pv_object()
2015 ctx->cq = ib_create_cq(ctx->ib_dev, in create_pv_resources()
2024 ctx->pd = ib_alloc_pd(ctx->ib_dev, 0); in create_pv_resources()
[all …]
/drivers/infiniband/sw/rxe/
A Drxe_net.c151 ib_device_put(&rxe->ib_dev); in rxe_udp_encap_recv()
374 ib_device_put(&rxe->ib_dev); in rxe_skb_tx_dtor()
530 ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); in rxe_parent_name()
544 rxe = ib_alloc_device(rxe_dev, ib_dev); in rxe_net_add()
548 ib_mark_name_assigned_by_user(&rxe->ib_dev); in rxe_net_add()
552 ib_dealloc_device(&rxe->ib_dev); in rxe_net_add()
564 ev.device = &rxe->ib_dev; in rxe_port_event()
575 dev_info(&rxe->ib_dev.dev, "set active\n"); in rxe_port_up()
583 dev_info(&rxe->ib_dev.dev, "set down\n"); in rxe_port_down()
614 ib_unregister_device_queued(&rxe->ib_dev); in rxe_notify()
[all …]
/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_main.c152 ib_event.device = &dev->ib_dev; in pvrdma_dispatch_event()
249 dev->ib_dev.num_comp_vectors = 1; in pvrdma_register_device()
250 dev->ib_dev.dev.parent = &dev->pdev->dev; in pvrdma_register_device()
252 dev->ib_dev.node_type = RDMA_NODE_IB_CA; in pvrdma_register_device()
701 ib_device_set_netdev(&dev->ib_dev, NULL, 1); in pvrdma_netdevice_event_handle()
722 event, dev_name(&dev->ib_dev.dev)); in pvrdma_netdevice_event_handle()
780 dev = ib_alloc_device(pvrdma_dev, ib_dev); in pvrdma_pci_probe()
1030 ib_unregister_device(&dev->ib_dev); in pvrdma_pci_probe()
1063 ib_dealloc_device(&dev->ib_dev); in pvrdma_pci_probe()
1085 ib_unregister_device(&dev->ib_dev); in pvrdma_pci_remove()
[all …]
/drivers/infiniband/hw/mlx5/
A Dib_rep.c26 return ib_device_set_netdev(&ibdev->ib_dev, ndev, vport_index + 1); in mlx5_ib_set_vport_rep()
91 ibdev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev, in mlx5_ib_vport_rep_load()
108 ibdev->ib_dev.phys_port_cnt = num_ports; in mlx5_ib_vport_rep_load()
109 ret = ib_device_set_netdev(&ibdev->ib_dev, in mlx5_ib_vport_rep_load()
129 ib_dealloc_device(&ibdev->ib_dev); in mlx5_ib_vport_rep_load()
168 ib_device_set_netdev(&dev->ib_dev, NULL, vport_index + 1); in mlx5_ib_vport_rep_unload()

Completed in 75 milliseconds

1234