Home
last modified time | relevance | path

Searched refs:peer_dev (Results 1 – 11 of 11) sorted by relevance

/drivers/infiniband/hw/mlx5/
A Dib_rep.c33 struct mlx5_core_dev *peer_dev; in mlx5_ib_num_ports_update() local
36 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_ib_num_ports_update()
37 u32 peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev); in mlx5_ib_num_ports_update()
39 if (mlx5_lag_is_mpesw(peer_dev)) in mlx5_ib_num_ports_update()
53 struct mlx5_core_dev *peer_dev; in mlx5_ib_vport_rep_load() local
71 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_ib_vport_rep_load()
72 u32 peer_n_ports = mlx5_eswitch_get_total_vports(peer_dev); in mlx5_ib_vport_rep_load()
74 if (mlx5_lag_is_master(peer_dev)) in mlx5_ib_vport_rep_load()
75 lag_master = peer_dev; in mlx5_ib_vport_rep_load()
80 if (mlx5_get_dev_index(peer_dev) < mlx5_get_dev_index(dev)) in mlx5_ib_vport_rep_load()
/drivers/net/ethernet/mellanox/mlx5/core/lib/
A Dclock.c1350 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_shared_clock_register() local
1360 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { in mlx5_shared_clock_register()
1361 if (peer_dev->clock) { in mlx5_shared_clock_register()
1362 next = peer_dev; in mlx5_shared_clock_register()
1384 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_shared_clock_unregister() local
1389 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { in mlx5_shared_clock_unregister()
1390 if (peer_dev->clock && peer_dev != mdev) { in mlx5_shared_clock_unregister()
1391 next = peer_dev; in mlx5_shared_clock_unregister()
1467 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_clock_unload() local
1481 if (peer_dev->clock && peer_dev != mdev) { in mlx5_clock_unload()
[all …]
/drivers/net/netdevsim/
A Dnetdev.c43 struct net_device *peer_dev; in nsim_start_peer_tx_queue() local
55 peer_dev = peer_ns->netdev; in nsim_start_peer_tx_queue()
56 if (dev->real_num_tx_queues != peer_dev->num_rx_queues) in nsim_start_peer_tx_queue()
59 txq = netdev_get_tx_queue(peer_dev, idx); in nsim_start_peer_tx_queue()
115 struct net_device *peer_dev; in nsim_start_xmit() local
130 peer_dev = peer_ns->netdev; in nsim_start_xmit()
132 if (rxq >= peer_dev->num_rx_queues) in nsim_start_xmit()
133 rxq = rxq % peer_dev->num_rx_queues; in nsim_start_xmit()
136 cfg = peer_dev->cfg; in nsim_start_xmit()
144 if (unlikely(nsim_forward_skb(dev, peer_dev, skb, rq) == NET_RX_DROP)) in nsim_start_xmit()
/drivers/gpu/drm/amd/amdkfd/
A Dkfd_topology.c1315 struct kfd_topology_device *peer_dev; in kfd_fill_iolink_non_crat_info() local
1324 peer_dev = kfd_topology_device_by_proximity_domain( in kfd_fill_iolink_non_crat_info()
1327 if (!peer_dev) in kfd_fill_iolink_non_crat_info()
1331 if (!peer_dev->gpu && in kfd_fill_iolink_non_crat_info()
1339 peer_dev->node_props.hive_id = dev->node_props.hive_id; in kfd_fill_iolink_non_crat_info()
1342 list_for_each_entry(inbound_link, &peer_dev->io_link_props, in kfd_fill_iolink_non_crat_info()
1348 kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link); in kfd_fill_iolink_non_crat_info()
1349 kfd_set_iolink_non_coherent(peer_dev, link, inbound_link); in kfd_fill_iolink_non_crat_info()
1358 peer_dev = kfd_topology_device_by_proximity_domain( in kfd_fill_iolink_non_crat_info()
1361 if (!peer_dev) in kfd_fill_iolink_non_crat_info()
[all …]
A Dkfd_crat.c2241 struct kfd_topology_device *peer_dev; in kfd_create_vcrat_image_gpu() local
2359 peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); in kfd_create_vcrat_image_gpu()
2360 if (!peer_dev->gpu) in kfd_create_vcrat_image_gpu()
2362 if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) in kfd_create_vcrat_image_gpu()
2364 if (!amdgpu_xgmi_get_is_sharing_enabled(kdev->adev, peer_dev->gpu->adev)) in kfd_create_vcrat_image_gpu()
2370 &avail_size, kdev, peer_dev->gpu, in kfd_create_vcrat_image_gpu()
/drivers/net/ethernet/mellanox/mlx5/core/
A Deswitch_offloads.c1127 struct mlx5_core_dev *peer_dev, in peer_miss_rules_setup() argument
1145 MLX5_CAP_GEN(peer_dev, vhca_id)); in peer_miss_rules_setup()
1196 if (!MLX5_VPORT_MANAGER(peer_dev) && in esw_add_fdb_peer_miss_rules()
1230 if (mlx5_ecpf_vport_exists(peer_dev)) { in esw_add_fdb_peer_miss_rules()
1243 mlx5_core_max_vfs(peer_dev)) { in esw_add_fdb_peer_miss_rules()
1273 pfindex = mlx5_get_dev_index(peer_dev); in esw_add_fdb_peer_miss_rules()
1294 mlx5_core_max_vfs(peer_dev)) { in esw_add_fdb_peer_miss_rules()
1299 if (mlx5_ecpf_vport_exists(peer_dev)) { in esw_add_fdb_peer_miss_rules()
1317 struct mlx5_core_dev *peer_dev) in esw_del_fdb_peer_miss_rules() argument
1336 mlx5_core_max_vfs(peer_dev)) in esw_del_fdb_peer_miss_rules()
[all …]
A Dfs_cmd.c248 struct mlx5_core_dev *peer_dev; in mlx5_cmd_update_root_ft() local
251 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_cmd_update_root_ft()
252 err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect, in mlx5_cmd_update_root_ft()
255 mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) { in mlx5_cmd_update_root_ft()
257 mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1, in mlx5_cmd_update_root_ft()
A Ddev.c565 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) in mlx5_same_hw_devs() argument
570 psystem_guid = mlx5_query_nic_system_image_guid(peer_dev); in mlx5_same_hw_devs()
A Dmlx5_core.h462 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
/drivers/net/
A Dveth.c906 struct net_device *peer_dev; in veth_xdp_rcv() local
911 peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held()); in veth_xdp_rcv()
912 peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL; in veth_xdp_rcv()
/drivers/net/ethernet/mellanox/mlx5/core/lag/
A Dlag.c1681 struct mlx5_core_dev *peer_dev = NULL; in mlx5_lag_get_next_peer_mdev() local
1703 peer_dev = ldev->pf[idx].dev; in mlx5_lag_get_next_peer_mdev()
1707 return peer_dev; in mlx5_lag_get_next_peer_mdev()

Completed in 37 milliseconds