Lines Matching refs:con

139 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
671 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_create_obj() local
674 if (!adev->ras_enabled || !con) in amdgpu_ras_create_obj()
684 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_create_obj()
686 obj = &con->objs[head->block]; in amdgpu_ras_create_obj()
697 list_add(&obj->node, &con->head); in amdgpu_ras_create_obj()
707 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_find_obj() local
711 if (!adev->ras_enabled || !con) in amdgpu_ras_find_obj()
722 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_find_obj()
724 obj = &con->objs[head->block]; in amdgpu_ras_find_obj()
730 obj = &con->objs[i]; in amdgpu_ras_find_obj()
750 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_feature_enabled() local
752 return con->features & BIT(head->block); in amdgpu_ras_is_feature_enabled()
762 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_feature_enable() local
783 con->features |= BIT(head->block); in __amdgpu_ras_feature_enable()
786 con->features &= ~BIT(head->block); in __amdgpu_ras_feature_enable()
798 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable() local
802 if (!con) in amdgpu_ras_feature_enable()
855 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable_on_boot() local
858 if (!con) in amdgpu_ras_feature_enable_on_boot()
861 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_feature_enable_on_boot()
889 con->features |= BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
895 con->features &= ~BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
906 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_disable_all_features() local
909 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_disable_all_features()
922 return con->features; in amdgpu_ras_disable_all_features()
928 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_enable_all_features() local
975 return con->features; in amdgpu_ras_enable_all_features()
1651 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_error_count() local
1656 if (!adev->ras_enabled || !con) in amdgpu_ras_query_error_count()
1668 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_error_count()
1746 struct amdgpu_ras *con = in amdgpu_ras_sysfs_badpages_read() local
1748 struct amdgpu_device *adev = con->adev; in amdgpu_ras_sysfs_badpages_read()
1777 struct amdgpu_ras *con = in amdgpu_ras_sysfs_features_read() local
1780 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); in amdgpu_ras_sysfs_features_read()
1786 struct amdgpu_ras *con = in amdgpu_ras_sysfs_version_show() local
1788 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); in amdgpu_ras_sysfs_version_show()
1794 struct amdgpu_ras *con = in amdgpu_ras_sysfs_schema_show() local
1796 return sysfs_emit(buf, "schema: 0x%x\n", con->schema); in amdgpu_ras_sysfs_schema_show()
1811 struct amdgpu_ras *con = in amdgpu_ras_sysfs_event_state_show() local
1813 struct ras_event_manager *event_mgr = con->event_mgr; in amdgpu_ras_sysfs_event_state_show()
1834 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_bad_page_node() local
1838 &con->badpages_attr.attr, in amdgpu_ras_sysfs_remove_bad_page_node()
1844 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_dev_attr_node() local
1846 &con->features_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1847 &con->version_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1848 &con->schema_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1849 &con->event_state_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1926 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_all() local
1929 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_sysfs_remove_all()
1963 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_ctrl_node() local
1964 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; in amdgpu_ras_debugfs_create_ctrl_node()
1974 &con->bad_page_cnt_threshold); in amdgpu_ras_debugfs_create_ctrl_node()
1980 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", in amdgpu_ras_debugfs_create_ctrl_node()
1983 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); in amdgpu_ras_debugfs_create_ctrl_node()
1993 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); in amdgpu_ras_debugfs_create_ctrl_node()
2000 &con->disable_ras_err_cnt_harvest); in amdgpu_ras_debugfs_create_ctrl_node()
2043 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_all() local
2052 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) in amdgpu_ras_debugfs_create_all()
2057 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_debugfs_create_all()
2090 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_init() local
2095 &con->features_attr.attr, in amdgpu_ras_fs_init()
2096 &con->version_attr.attr, in amdgpu_ras_fs_init()
2097 &con->schema_attr.attr, in amdgpu_ras_fs_init()
2098 &con->event_state_attr.attr, in amdgpu_ras_fs_init()
2110 con->features_attr = dev_attr_features; in amdgpu_ras_fs_init()
2114 con->version_attr = dev_attr_version; in amdgpu_ras_fs_init()
2118 con->schema_attr = dev_attr_schema; in amdgpu_ras_fs_init()
2122 con->event_state_attr = dev_attr_event_state; in amdgpu_ras_fs_init()
2127 con->badpages_attr = bin_attr_gpu_vram_bad_pages; in amdgpu_ras_fs_init()
2128 sysfs_bin_attr_init(&con->badpages_attr); in amdgpu_ras_fs_init()
2129 bin_attrs[0] = &con->badpages_attr; in amdgpu_ras_fs_init()
2142 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_fini() local
2146 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { in amdgpu_ras_fs_fini()
2197 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_poison_consumption_handler() local
2202 if (!block_obj || !con) in amdgpu_ras_interrupt_poison_consumption_handler()
2261 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); in amdgpu_ras_interrupt_poison_creation_handler() local
2263 atomic_inc(&con->page_retirement_req_cnt); in amdgpu_ras_interrupt_poison_creation_handler()
2264 atomic_inc(&con->poison_creation_count); in amdgpu_ras_interrupt_poison_creation_handler()
2266 wake_up(&con->page_retirement_wq); in amdgpu_ras_interrupt_poison_creation_handler()
2441 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_remove_all() local
2444 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_interrupt_remove_all()
2455 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_log_on_err_counter() local
2458 if (!adev->ras_enabled || !con) in amdgpu_ras_log_on_err_counter()
2461 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_log_on_err_counter()
2530 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_err_status() local
2533 if (!adev->ras_enabled || !con) in amdgpu_ras_query_err_status()
2536 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_err_status()
2553 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_badpages_read() local
2558 if (!con || !con->eh_data || !bps || !count) in amdgpu_ras_badpages_read()
2561 mutex_lock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2562 data = con->eh_data; in amdgpu_ras_badpages_read()
2591 mutex_unlock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2812 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_restore_bad_pages() local
2813 struct ras_err_handler_data *data = con->eh_data; in __amdgpu_ras_restore_bad_pages()
2816 if (amdgpu_ras_check_bad_page_unlock(con, in __amdgpu_ras_restore_bad_pages()
2940 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_bad_pages() local
2948 if (!con || !con->eh_data || !bps || pages <= 0) in amdgpu_ras_add_bad_pages()
2964 mutex_lock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2999 mutex_unlock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
3012 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_save_bad_pages() local
3017 if (!con || !con->eh_data) { in amdgpu_ras_save_bad_pages()
3024 if (!con->eeprom_control.is_eeprom_valid) { in amdgpu_ras_save_bad_pages()
3033 mutex_lock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
3034 control = &con->eeprom_control; in amdgpu_ras_save_bad_pages()
3035 data = con->eh_data; in amdgpu_ras_save_bad_pages()
3038 mutex_unlock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
3137 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, in amdgpu_ras_check_bad_page_unlock() argument
3140 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_check_bad_page_unlock()
3159 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_bad_page() local
3162 if (!con || !con->eh_data) in amdgpu_ras_check_bad_page()
3165 mutex_lock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
3166 ret = amdgpu_ras_check_bad_page_unlock(con, addr); in amdgpu_ras_check_bad_page()
3167 mutex_unlock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
3174 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_validate_threshold() local
3196 con->bad_page_cnt_threshold = min(lower_32_bits(val), in amdgpu_ras_validate_threshold()
3199 con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4; in amdgpu_ras_validate_threshold()
3201 con->bad_page_cnt_threshold = min_t(int, max_count, in amdgpu_ras_validate_threshold()
3212 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_put_poison_req() local
3221 ret = kfifo_put(&con->poison_fifo, poison_msg); in amdgpu_ras_put_poison_req()
3233 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_poison_req() local
3235 return kfifo_get(&con->poison_fifo, poison_msg); in amdgpu_ras_get_poison_req()
3267 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, in amdgpu_ras_schedule_retirement_dwork() argument
3272 mutex_lock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
3273 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree, in amdgpu_ras_schedule_retirement_dwork()
3275 mutex_unlock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
3278 schedule_delayed_work(&con->page_retirement_dwork, in amdgpu_ras_schedule_retirement_dwork()
3286 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_do_page_retirement() local
3288 struct amdgpu_device *adev = con->adev; in amdgpu_ras_do_page_retirement()
3294 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3309 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3367 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_clear_poison_fifo() local
3372 ret = kfifo_get(&con->poison_fifo, &msg); in amdgpu_ras_clear_poison_fifo()
3379 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_poison_consumption_handler() local
3406 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_poison_consumption_handler()
3408 con->gpu_reset_flags |= reset; in amdgpu_ras_poison_consumption_handler()
3414 flush_work(&con->recovery_work); in amdgpu_ras_poison_consumption_handler()
3423 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_page_retirement_thread() local
3430 wait_event_interruptible(con->page_retirement_wq, in amdgpu_ras_page_retirement_thread()
3432 atomic_read(&con->page_retirement_req_cnt)); in amdgpu_ras_page_retirement_thread()
3440 poison_creation_count = atomic_read(&con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3446 atomic_sub(poison_creation_count, &con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3447 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3449 } while (atomic_read(&con->poison_creation_count)); in amdgpu_ras_page_retirement_thread()
3452 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3458 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3465 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_page_retirement_thread()
3471 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_page_retirement_thread()
3480 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3484 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3487 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3491 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3500 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_badpage_info() local
3504 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_init_badpage_info()
3507 control = &con->eeprom_control; in amdgpu_ras_init_badpage_info()
3528 if (con->update_channel_flag == true) { in amdgpu_ras_init_badpage_info()
3531 con->update_channel_flag = false; in amdgpu_ras_init_badpage_info()
3547 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_init() local
3552 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_recovery_init()
3560 con->adev = adev; in amdgpu_ras_recovery_init()
3565 data = &con->eh_data; in amdgpu_ras_recovery_init()
3572 mutex_init(&con->recovery_lock); in amdgpu_ras_recovery_init()
3573 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); in amdgpu_ras_recovery_init()
3574 atomic_set(&con->in_recovery, 0); in amdgpu_ras_recovery_init()
3575 con->eeprom_control.bad_channel_bitmap = 0; in amdgpu_ras_recovery_init()
3577 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); in amdgpu_ras_recovery_init()
3586 mutex_init(&con->page_rsv_lock); in amdgpu_ras_recovery_init()
3587 INIT_KFIFO(con->poison_fifo); in amdgpu_ras_recovery_init()
3588 mutex_init(&con->page_retirement_lock); in amdgpu_ras_recovery_init()
3589 init_waitqueue_head(&con->page_retirement_wq); in amdgpu_ras_recovery_init()
3590 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_init()
3591 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_init()
3592 con->page_retirement_thread = in amdgpu_ras_recovery_init()
3594 if (IS_ERR(con->page_retirement_thread)) { in amdgpu_ras_recovery_init()
3595 con->page_retirement_thread = NULL; in amdgpu_ras_recovery_init()
3599 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); in amdgpu_ras_recovery_init()
3600 amdgpu_ras_ecc_log_init(&con->umc_ecc_log); in amdgpu_ras_recovery_init()
3611 con->eh_data = NULL; in amdgpu_ras_recovery_init()
3629 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_fini() local
3630 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_recovery_fini()
3640 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3641 ret = amdgpu_ras_schedule_retirement_dwork(con, 0); in amdgpu_ras_recovery_fini()
3644 if (con->page_retirement_thread) in amdgpu_ras_recovery_fini()
3645 kthread_stop(con->page_retirement_thread); in amdgpu_ras_recovery_fini()
3647 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_fini()
3648 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_fini()
3650 mutex_destroy(&con->page_rsv_lock); in amdgpu_ras_recovery_fini()
3652 cancel_work_sync(&con->recovery_work); in amdgpu_ras_recovery_fini()
3654 cancel_delayed_work_sync(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3656 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); in amdgpu_ras_recovery_fini()
3658 mutex_lock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3659 con->eh_data = NULL; in amdgpu_ras_recovery_fini()
3662 mutex_unlock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3773 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_poison_mode() local
3777 if (amdgpu_sriov_vf(adev) || !con) in amdgpu_ras_query_poison_mode()
3784 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
3796 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
3869 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_counte_dw() local
3871 struct amdgpu_device *adev = con->adev; in amdgpu_ras_counte_dw()
3883 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_counte_dw()
3884 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_counte_dw()
3938 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_reserved_vram_size() local
3940 if (!con || (adev->flags & AMD_IS_APU)) in amdgpu_ras_init_reserved_vram_size()
3947 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT; in amdgpu_ras_init_reserved_vram_size()
3950 con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1); in amdgpu_ras_init_reserved_vram_size()
3959 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init() local
3962 if (con) in amdgpu_ras_init()
3965 con = kzalloc(sizeof(*con) + in amdgpu_ras_init()
3969 if (!con) in amdgpu_ras_init()
3972 con->adev = adev; in amdgpu_ras_init()
3973 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); in amdgpu_ras_init()
3974 atomic_set(&con->ras_ce_count, 0); in amdgpu_ras_init()
3975 atomic_set(&con->ras_ue_count, 0); in amdgpu_ras_init()
3977 con->objs = (struct ras_manager *)(con + 1); in amdgpu_ras_init()
3979 amdgpu_ras_set_context(adev, con); in amdgpu_ras_init()
3988 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_ras_init()
3997 con->update_channel_flag = false; in amdgpu_ras_init()
3998 con->features = 0; in amdgpu_ras_init()
3999 con->schema = 0; in amdgpu_ras_init()
4000 INIT_LIST_HEAD(&con->head); in amdgpu_ras_init()
4002 con->flags = RAS_DEFAULT_FLAGS; in amdgpu_ras_init()
4068 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << in amdgpu_ras_init()
4072 con->schema = amdgpu_get_ras_schema(adev); in amdgpu_ras_init()
4097 kfree(con); in amdgpu_ras_init()
4131 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_poison_mode_supported() local
4133 if (!con) in amdgpu_ras_is_poison_mode_supported()
4136 return con->poison_supported; in amdgpu_ras_is_poison_mode_supported()
4144 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_block_late_init() local
4196 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_block_late_init()
4197 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_block_late_init()
4245 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_resume() local
4248 if (!adev->ras_enabled || !con) { in amdgpu_ras_resume()
4255 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_resume()
4267 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_resume()
4279 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_suspend() local
4281 if (!adev->ras_enabled || !con) in amdgpu_ras_suspend()
4286 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_suspend()
4347 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_pre_fini() local
4349 if (!adev->ras_enabled || !con) in amdgpu_ras_pre_fini()
4354 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_pre_fini()
4364 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fini() local
4366 if (!adev->ras_enabled || !con) in amdgpu_ras_fini()
4394 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); in amdgpu_ras_fini()
4396 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_fini()
4399 cancel_delayed_work_sync(&con->ras_counte_delay_work); in amdgpu_ras_fini()
4402 kfree(con); in amdgpu_ras_fini()
4574 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_release_ras_context() local
4576 if (!con) in amdgpu_release_ras_context()
4579 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_release_ras_context()
4580 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_release_ras_context()
4582 kfree(con); in amdgpu_release_ras_context()
4772 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_mca_debug_mode() local
4775 if (con) { in amdgpu_ras_set_mca_debug_mode()
4778 con->is_aca_debug_mode = enable; in amdgpu_ras_set_mca_debug_mode()
4786 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_aca_debug_mode() local
4789 if (con) { in amdgpu_ras_set_aca_debug_mode()
4795 con->is_aca_debug_mode = enable; in amdgpu_ras_set_aca_debug_mode()
4803 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_aca_debug_mode() local
4807 if (!con) in amdgpu_ras_get_aca_debug_mode()
4812 return con->is_aca_debug_mode; in amdgpu_ras_get_aca_debug_mode()
4820 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_error_query_mode() local
4824 if (!con) { in amdgpu_ras_get_error_query_mode()
4833 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; in amdgpu_ras_get_error_query_mode()
5272 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_reserve_page() local
5277 mutex_lock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
5281 mutex_unlock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
5306 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_rma() local
5308 if (!con) in amdgpu_ras_is_rma()
5311 return con->is_rma; in amdgpu_ras_is_rma()