Lines Matching refs:con
110 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
572 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_create_obj() local
575 if (!adev->ras_enabled || !con) in amdgpu_ras_create_obj()
585 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_create_obj()
587 obj = &con->objs[head->block]; in amdgpu_ras_create_obj()
595 list_add(&obj->node, &con->head); in amdgpu_ras_create_obj()
605 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_find_obj() local
609 if (!adev->ras_enabled || !con) in amdgpu_ras_find_obj()
620 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_find_obj()
622 obj = &con->objs[head->block]; in amdgpu_ras_find_obj()
628 obj = &con->objs[i]; in amdgpu_ras_find_obj()
648 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_feature_enabled() local
650 return con->features & BIT(head->block); in amdgpu_ras_is_feature_enabled()
660 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_feature_enable() local
681 con->features |= BIT(head->block); in __amdgpu_ras_feature_enable()
684 con->features &= ~BIT(head->block); in __amdgpu_ras_feature_enable()
696 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable() local
700 if (!con) in amdgpu_ras_feature_enable()
745 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable_on_boot() local
748 if (!con) in amdgpu_ras_feature_enable_on_boot()
751 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_feature_enable_on_boot()
779 con->features |= BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
785 con->features &= ~BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
796 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_disable_all_features() local
799 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_disable_all_features()
812 return con->features; in amdgpu_ras_disable_all_features()
818 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_enable_all_features() local
865 return con->features; in amdgpu_ras_enable_all_features()
1153 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_error_count() local
1157 if (!adev->ras_enabled || !con) in amdgpu_ras_query_error_count()
1167 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_error_count()
1244 struct amdgpu_ras *con = in amdgpu_ras_sysfs_badpages_read() local
1246 struct amdgpu_device *adev = con->adev; in amdgpu_ras_sysfs_badpages_read()
1275 struct amdgpu_ras *con = in amdgpu_ras_sysfs_features_read() local
1278 return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); in amdgpu_ras_sysfs_features_read()
1283 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_bad_page_node() local
1286 &con->badpages_attr.attr, in amdgpu_ras_sysfs_remove_bad_page_node()
1292 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_feature_node() local
1294 &con->features_attr.attr, in amdgpu_ras_sysfs_remove_feature_node()
1361 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_all() local
1364 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_sysfs_remove_all()
1398 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_ctrl_node() local
1408 &con->bad_page_cnt_threshold); in amdgpu_ras_debugfs_create_ctrl_node()
1413 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", in amdgpu_ras_debugfs_create_ctrl_node()
1416 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); in amdgpu_ras_debugfs_create_ctrl_node()
1426 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); in amdgpu_ras_debugfs_create_ctrl_node()
1433 &con->disable_ras_err_cnt_harvest); in amdgpu_ras_debugfs_create_ctrl_node()
1458 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_all() local
1467 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) in amdgpu_ras_debugfs_create_all()
1472 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_debugfs_create_all()
1492 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_init() local
1497 &con->features_attr.attr, in amdgpu_ras_fs_init()
1507 con->features_attr = dev_attr_features; in amdgpu_ras_fs_init()
1514 con->badpages_attr = bin_attr_gpu_vram_bad_pages; in amdgpu_ras_fs_init()
1515 bin_attrs[0] = &con->badpages_attr; in amdgpu_ras_fs_init()
1529 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_fini() local
1533 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { in amdgpu_ras_fs_fini()
1689 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_remove_all() local
1692 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_interrupt_remove_all()
1706 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_log_on_err_counter() local
1709 if (!adev->ras_enabled || !con) in amdgpu_ras_log_on_err_counter()
1712 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_log_on_err_counter()
1756 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_err_status() local
1759 if (!adev->ras_enabled || !con) in amdgpu_ras_query_err_status()
1762 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_err_status()
1779 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_badpages_read() local
1784 if (!con || !con->eh_data || !bps || !count) in amdgpu_ras_badpages_read()
1787 mutex_lock(&con->recovery_lock); in amdgpu_ras_badpages_read()
1788 data = con->eh_data; in amdgpu_ras_badpages_read()
1818 mutex_unlock(&con->recovery_lock); in amdgpu_ras_badpages_read()
1885 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_bad_pages() local
1890 if (!con || !con->eh_data || !bps || pages <= 0) in amdgpu_ras_add_bad_pages()
1893 mutex_lock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
1894 data = con->eh_data; in amdgpu_ras_add_bad_pages()
1899 if (amdgpu_ras_check_bad_page_unlock(con, in amdgpu_ras_add_bad_pages()
1919 mutex_unlock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
1930 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_save_bad_pages() local
1935 if (!con || !con->eh_data) in amdgpu_ras_save_bad_pages()
1938 control = &con->eeprom_control; in amdgpu_ras_save_bad_pages()
1939 data = con->eh_data; in amdgpu_ras_save_bad_pages()
1985 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, in amdgpu_ras_check_bad_page_unlock() argument
1988 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_check_bad_page_unlock()
2007 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_bad_page() local
2010 if (!con || !con->eh_data) in amdgpu_ras_check_bad_page()
2013 mutex_lock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
2014 ret = amdgpu_ras_check_bad_page_unlock(con, addr); in amdgpu_ras_check_bad_page()
2015 mutex_unlock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
2022 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_validate_threshold() local
2047 con->bad_page_cnt_threshold = min(lower_32_bits(val), in amdgpu_ras_validate_threshold()
2050 con->bad_page_cnt_threshold = min_t(int, max_count, in amdgpu_ras_validate_threshold()
2057 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_init() local
2063 if (!con) in amdgpu_ras_recovery_init()
2071 con->adev = adev; in amdgpu_ras_recovery_init()
2076 data = &con->eh_data; in amdgpu_ras_recovery_init()
2083 mutex_init(&con->recovery_lock); in amdgpu_ras_recovery_init()
2084 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); in amdgpu_ras_recovery_init()
2085 atomic_set(&con->in_recovery, 0); in amdgpu_ras_recovery_init()
2096 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); in amdgpu_ras_recovery_init()
2104 if (con->eeprom_control.ras_num_recs) { in amdgpu_ras_recovery_init()
2110 adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs); in amdgpu_ras_recovery_init()
2123 con->eh_data = NULL; in amdgpu_ras_recovery_init()
2141 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_fini() local
2142 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_recovery_fini()
2148 cancel_work_sync(&con->recovery_work); in amdgpu_ras_recovery_fini()
2150 mutex_lock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
2151 con->eh_data = NULL; in amdgpu_ras_recovery_fini()
2154 mutex_unlock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
2240 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_counte_dw() local
2242 struct amdgpu_device *adev = con->adev; in amdgpu_ras_counte_dw()
2254 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_counte_dw()
2255 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_counte_dw()
2265 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init() local
2269 if (con) in amdgpu_ras_init()
2272 con = kmalloc(sizeof(struct amdgpu_ras) + in amdgpu_ras_init()
2276 if (!con) in amdgpu_ras_init()
2279 con->adev = adev; in amdgpu_ras_init()
2280 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); in amdgpu_ras_init()
2281 atomic_set(&con->ras_ce_count, 0); in amdgpu_ras_init()
2282 atomic_set(&con->ras_ue_count, 0); in amdgpu_ras_init()
2284 con->objs = (struct ras_manager *)(con + 1); in amdgpu_ras_init()
2286 amdgpu_ras_set_context(adev, con); in amdgpu_ras_init()
2295 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_ras_init()
2304 con->features = 0; in amdgpu_ras_init()
2305 INIT_LIST_HEAD(&con->head); in amdgpu_ras_init()
2307 con->flags = RAS_DEFAULT_FLAGS; in amdgpu_ras_init()
2349 con->poison_supported = true; in amdgpu_ras_init()
2367 kfree(con); in amdgpu_ras_init()
2400 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_poison_mode_supported() local
2402 if (!con) in amdgpu_ras_is_poison_mode_supported()
2405 return con->poison_supported; in amdgpu_ras_is_poison_mode_supported()
2414 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_late_init() local
2454 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_late_init()
2455 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_late_init()
2488 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_resume() local
2491 if (!adev->ras_enabled || !con) { in amdgpu_ras_resume()
2498 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_resume()
2510 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_resume()
2522 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_suspend() local
2524 if (!adev->ras_enabled || !con) in amdgpu_ras_suspend()
2529 if (con->features) in amdgpu_ras_suspend()
2536 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_pre_fini() local
2538 if (!adev->ras_enabled || !con) in amdgpu_ras_pre_fini()
2550 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fini() local
2552 if (!adev->ras_enabled || !con) in amdgpu_ras_fini()
2558 WARN(con->features, "Feature mask is not cleared"); in amdgpu_ras_fini()
2560 if (con->features) in amdgpu_ras_fini()
2563 cancel_delayed_work_sync(&con->ras_counte_delay_work); in amdgpu_ras_fini()
2566 kfree(con); in amdgpu_ras_fini()
2598 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_release_ras_context() local
2600 if (!con) in amdgpu_release_ras_context()
2603 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_release_ras_context()
2604 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_release_ras_context()
2606 kfree(con); in amdgpu_release_ras_context()