Lines Matching refs:hive

404 	struct amdgpu_hive_info *hive = container_of(  in amdgpu_xgmi_show_attrs()  local
408 return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id); in amdgpu_xgmi_show_attrs()
415 struct amdgpu_hive_info *hive = container_of( in amdgpu_xgmi_hive_release() local
418 amdgpu_reset_put_reset_domain(hive->reset_domain); in amdgpu_xgmi_hive_release()
419 hive->reset_domain = NULL; in amdgpu_xgmi_hive_release()
421 mutex_destroy(&hive->hive_lock); in amdgpu_xgmi_hive_release()
422 kfree(hive); in amdgpu_xgmi_hive_release()
564 struct amdgpu_hive_info *hive) in amdgpu_xgmi_sysfs_add_dev_info() argument
605 if (hive->kobj.parent != (&adev->dev->kobj)) { in amdgpu_xgmi_sysfs_add_dev_info()
606 ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj, in amdgpu_xgmi_sysfs_add_dev_info()
614 sprintf(node, "node%d", atomic_read(&hive->number_devices)); in amdgpu_xgmi_sysfs_add_dev_info()
616 ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node); in amdgpu_xgmi_sysfs_add_dev_info()
642 struct amdgpu_hive_info *hive) in amdgpu_xgmi_sysfs_rem_dev_info() argument
655 if (hive->kobj.parent != (&adev->dev->kobj)) in amdgpu_xgmi_sysfs_rem_dev_info()
658 sprintf(node, "node%d", atomic_read(&hive->number_devices)); in amdgpu_xgmi_sysfs_rem_dev_info()
659 sysfs_remove_link(&hive->kobj, node); in amdgpu_xgmi_sysfs_rem_dev_info()
667 struct amdgpu_hive_info *hive = NULL; in amdgpu_get_xgmi_hive() local
673 if (adev->hive) { in amdgpu_get_xgmi_hive()
674 kobject_get(&adev->hive->kobj); in amdgpu_get_xgmi_hive()
675 return adev->hive; in amdgpu_get_xgmi_hive()
680 list_for_each_entry(hive, &xgmi_hive_list, node) { in amdgpu_get_xgmi_hive()
681 if (hive->hive_id == adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
685 hive = kzalloc(sizeof(*hive), GFP_KERNEL); in amdgpu_get_xgmi_hive()
686 if (!hive) { in amdgpu_get_xgmi_hive()
689 hive = NULL; in amdgpu_get_xgmi_hive()
694 ret = kobject_init_and_add(&hive->kobj, in amdgpu_get_xgmi_hive()
700 kobject_put(&hive->kobj); in amdgpu_get_xgmi_hive()
701 hive = NULL; in amdgpu_get_xgmi_hive()
718 hive->reset_domain = in amdgpu_get_xgmi_hive()
720 if (!hive->reset_domain) { in amdgpu_get_xgmi_hive()
723 kobject_put(&hive->kobj); in amdgpu_get_xgmi_hive()
724 hive = NULL; in amdgpu_get_xgmi_hive()
729 hive->reset_domain = adev->reset_domain; in amdgpu_get_xgmi_hive()
733 hive->hive_id = adev->gmc.xgmi.hive_id; in amdgpu_get_xgmi_hive()
734 INIT_LIST_HEAD(&hive->device_list); in amdgpu_get_xgmi_hive()
735 INIT_LIST_HEAD(&hive->node); in amdgpu_get_xgmi_hive()
736 mutex_init(&hive->hive_lock); in amdgpu_get_xgmi_hive()
737 atomic_set(&hive->number_devices, 0); in amdgpu_get_xgmi_hive()
738 task_barrier_init(&hive->tb); in amdgpu_get_xgmi_hive()
739 hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; in amdgpu_get_xgmi_hive()
740 hive->hi_req_gpu = NULL; in amdgpu_get_xgmi_hive()
741 atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); in amdgpu_get_xgmi_hive()
747 hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE; in amdgpu_get_xgmi_hive()
748 list_add_tail(&hive->node, &xgmi_hive_list); in amdgpu_get_xgmi_hive()
751 if (hive) in amdgpu_get_xgmi_hive()
752 kobject_get(&hive->kobj); in amdgpu_get_xgmi_hive()
754 return hive; in amdgpu_get_xgmi_hive()
757 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive) in amdgpu_put_xgmi_hive() argument
759 if (hive) in amdgpu_put_xgmi_hive()
760 kobject_put(&hive->kobj); in amdgpu_put_xgmi_hive()
766 struct amdgpu_hive_info *hive; in amdgpu_xgmi_set_pstate() local
771 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_xgmi_set_pstate()
772 if (!hive) in amdgpu_xgmi_set_pstate()
775 request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev; in amdgpu_xgmi_set_pstate()
776 init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN; in amdgpu_xgmi_set_pstate()
777 amdgpu_put_xgmi_hive(hive); in amdgpu_xgmi_set_pstate()
781 if (!hive || adev->asic_type != CHIP_VEGA20) in amdgpu_xgmi_set_pstate()
784 mutex_lock(&hive->hive_lock); in amdgpu_xgmi_set_pstate()
787 hive->hi_req_count++; in amdgpu_xgmi_set_pstate()
789 hive->hi_req_count--; in amdgpu_xgmi_set_pstate()
795 if (hive->pstate == pstate || in amdgpu_xgmi_set_pstate()
796 (!is_hi_req && hive->hi_req_count && !init_low)) in amdgpu_xgmi_set_pstate()
811 hive->pstate = hive->hi_req_count ? in amdgpu_xgmi_set_pstate()
812 hive->pstate : AMDGPU_XGMI_PSTATE_MIN; in amdgpu_xgmi_set_pstate()
814 hive->pstate = pstate; in amdgpu_xgmi_set_pstate()
815 hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ? in amdgpu_xgmi_set_pstate()
819 mutex_unlock(&hive->hive_lock); in amdgpu_xgmi_set_pstate()
823 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev) in amdgpu_xgmi_update_topology() argument
832 atomic_read(&hive->number_devices), in amdgpu_xgmi_update_topology()
941 static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive, in amdgpu_xgmi_initialize_hive_get_data_partition() argument
947 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_initialize_hive_get_data_partition()
986 struct amdgpu_hive_info *hive; in amdgpu_xgmi_add_device() local
1021 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_xgmi_add_device()
1022 if (!hive) { in amdgpu_xgmi_add_device()
1029 mutex_lock(&hive->hive_lock); in amdgpu_xgmi_add_device()
1033 list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); in amdgpu_xgmi_add_device()
1034 list_for_each_entry(entry, &hive->device_list, head) in amdgpu_xgmi_add_device()
1037 atomic_set(&hive->number_devices, count); in amdgpu_xgmi_add_device()
1039 task_barrier_add_task(&hive->tb); in amdgpu_xgmi_add_device()
1042 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
1050 ret = amdgpu_xgmi_update_topology(hive, tmp_adev); in amdgpu_xgmi_add_device()
1070 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
1075 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
1093 ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true); in amdgpu_xgmi_add_device()
1098 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
1111 ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false); in amdgpu_xgmi_add_device()
1119 ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive); in amdgpu_xgmi_add_device()
1122 mutex_unlock(&hive->hive_lock); in amdgpu_xgmi_add_device()
1125 adev->hive = hive; in amdgpu_xgmi_add_device()
1129 amdgpu_put_xgmi_hive(hive); in amdgpu_xgmi_add_device()
1140 struct amdgpu_hive_info *hive = adev->hive; in amdgpu_xgmi_remove_device() local
1145 if (!hive) in amdgpu_xgmi_remove_device()
1148 mutex_lock(&hive->hive_lock); in amdgpu_xgmi_remove_device()
1149 task_barrier_rem_task(&hive->tb); in amdgpu_xgmi_remove_device()
1150 amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); in amdgpu_xgmi_remove_device()
1151 if (hive->hi_req_gpu == adev) in amdgpu_xgmi_remove_device()
1152 hive->hi_req_gpu = NULL; in amdgpu_xgmi_remove_device()
1154 mutex_unlock(&hive->hive_lock); in amdgpu_xgmi_remove_device()
1156 amdgpu_put_xgmi_hive(hive); in amdgpu_xgmi_remove_device()
1157 adev->hive = NULL; in amdgpu_xgmi_remove_device()
1159 if (atomic_dec_return(&hive->number_devices) == 0) { in amdgpu_xgmi_remove_device()
1162 list_del(&hive->node); in amdgpu_xgmi_remove_device()
1165 amdgpu_put_xgmi_hive(hive); in amdgpu_xgmi_remove_device()
1645 struct amdgpu_hive_info *hive = in amdgpu_xgmi_reset_on_init_work() local
1652 mutex_lock(&hive->hive_lock); in amdgpu_xgmi_reset_on_init_work()
1655 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) in amdgpu_xgmi_reset_on_init_work()
1664 reset_context.hive = hive; in amdgpu_xgmi_reset_on_init_work()
1670 mutex_unlock(&hive->hive_lock); in amdgpu_xgmi_reset_on_init_work()
1673 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_reset_on_init_work()
1681 static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive) in amdgpu_xgmi_schedule_reset_on_init() argument
1683 INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work); in amdgpu_xgmi_schedule_reset_on_init()
1684 amdgpu_reset_domain_schedule(hive->reset_domain, in amdgpu_xgmi_schedule_reset_on_init()
1685 &hive->reset_on_init_work); in amdgpu_xgmi_schedule_reset_on_init()
1690 struct amdgpu_hive_info *hive; in amdgpu_xgmi_reset_on_init() local
1694 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_xgmi_reset_on_init()
1695 if (!hive) in amdgpu_xgmi_reset_on_init()
1698 mutex_lock(&hive->hive_lock); in amdgpu_xgmi_reset_on_init()
1699 num_devs = atomic_read(&hive->number_devices); in amdgpu_xgmi_reset_on_init()
1702 amdgpu_xgmi_schedule_reset_on_init(hive); in amdgpu_xgmi_reset_on_init()
1706 mutex_unlock(&hive->hive_lock); in amdgpu_xgmi_reset_on_init()
1707 amdgpu_put_xgmi_hive(hive); in amdgpu_xgmi_reset_on_init()
1710 flush_work(&hive->reset_on_init_work); in amdgpu_xgmi_reset_on_init()
1716 struct amdgpu_hive_info *hive, in amdgpu_xgmi_request_nps_change() argument
1728 mutex_lock(&hive->hive_lock); in amdgpu_xgmi_request_nps_change()
1729 if (atomic_read(&hive->requested_nps_mode) == in amdgpu_xgmi_request_nps_change()
1732 mutex_unlock(&hive->hive_lock); in amdgpu_xgmi_request_nps_change()
1735 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_request_nps_change()
1746 tmp_adev, &hive->device_list, gmc.xgmi.head) in amdgpu_xgmi_request_nps_change()
1751 atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); in amdgpu_xgmi_request_nps_change()
1752 mutex_unlock(&hive->hive_lock); in amdgpu_xgmi_request_nps_change()