1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "atom.h"
38 #include "amdgpu_reset.h"
39
40 #ifdef CONFIG_X86_MCE_AMD
41 #include <asm/mce.h>
42
43 static bool notifier_registered;
44 #endif
45 static const char *RAS_FS_NAME = "ras";
46
47 const char *ras_error_string[] = {
48 "none",
49 "parity",
50 "single_correctable",
51 "multi_uncorrectable",
52 "poison",
53 };
54
55 const char *ras_block_string[] = {
56 "umc",
57 "sdma",
58 "gfx",
59 "mmhub",
60 "athub",
61 "pcie_bif",
62 "hdp",
63 "xgmi_wafl",
64 "df",
65 "smn",
66 "sem",
67 "mp0",
68 "mp1",
69 "fuse",
70 "mca",
71 "vcn",
72 "jpeg",
73 };
74
75 const char *ras_mca_block_string[] = {
76 "mca_mp0",
77 "mca_mp1",
78 "mca_mpio",
79 "mca_iohc",
80 };
81
82 struct amdgpu_ras_block_list {
83 /* ras block link */
84 struct list_head node;
85
86 struct amdgpu_ras_block_object *ras_obj;
87 };
88
get_ras_block_str(struct ras_common_if * ras_block)89 const char *get_ras_block_str(struct ras_common_if *ras_block)
90 {
91 if (!ras_block)
92 return "NULL";
93
94 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
95 return "OUT OF RANGE";
96
97 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
98 return ras_mca_block_string[ras_block->sub_block_index];
99
100 return ras_block_string[ras_block->block];
101 }
102
103 #define ras_block_str(_BLOCK_) \
104 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
105
106 #define ras_err_str(i) (ras_error_string[ffs(i)])
107
108 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
109
110 /* inject address is 52 bits */
111 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
112
113 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
114 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
115
116 enum amdgpu_ras_retire_page_reservation {
117 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
118 AMDGPU_RAS_RETIRE_PAGE_PENDING,
119 AMDGPU_RAS_RETIRE_PAGE_FAULT,
120 };
121
122 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
123
124 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
125 uint64_t addr);
126 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
127 uint64_t addr);
128 #ifdef CONFIG_X86_MCE_AMD
129 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
130 struct mce_notifier_adev_list {
131 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
132 int num_gpu;
133 };
134 static struct mce_notifier_adev_list mce_adev_list;
135 #endif
136
amdgpu_ras_set_error_query_ready(struct amdgpu_device * adev,bool ready)137 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
138 {
139 if (adev && amdgpu_ras_get_context(adev))
140 amdgpu_ras_get_context(adev)->error_query_ready = ready;
141 }
142
amdgpu_ras_get_error_query_ready(struct amdgpu_device * adev)143 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
144 {
145 if (adev && amdgpu_ras_get_context(adev))
146 return amdgpu_ras_get_context(adev)->error_query_ready;
147
148 return false;
149 }
150
amdgpu_reserve_page_direct(struct amdgpu_device * adev,uint64_t address)151 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
152 {
153 struct ras_err_data err_data = {0, 0, 0, NULL};
154 struct eeprom_table_record err_rec;
155
156 if ((address >= adev->gmc.mc_vram_size) ||
157 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
158 dev_warn(adev->dev,
159 "RAS WARN: input address 0x%llx is invalid.\n",
160 address);
161 return -EINVAL;
162 }
163
164 if (amdgpu_ras_check_bad_page(adev, address)) {
165 dev_warn(adev->dev,
166 "RAS WARN: 0x%llx has already been marked as bad page!\n",
167 address);
168 return 0;
169 }
170
171 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
172 err_data.err_addr = &err_rec;
173 amdgpu_umc_fill_error_record(&err_data, address,
174 (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
175
176 if (amdgpu_bad_page_threshold != 0) {
177 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
178 err_data.err_addr_cnt);
179 amdgpu_ras_save_bad_pages(adev, NULL);
180 }
181
182 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
183 dev_warn(adev->dev, "Clear EEPROM:\n");
184 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
185
186 return 0;
187 }
188
amdgpu_ras_debugfs_read(struct file * f,char __user * buf,size_t size,loff_t * pos)189 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
190 size_t size, loff_t *pos)
191 {
192 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
193 struct ras_query_if info = {
194 .head = obj->head,
195 };
196 ssize_t s;
197 char val[128];
198
199 if (amdgpu_ras_query_error_status(obj->adev, &info))
200 return -EINVAL;
201
202 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
203 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
204 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
205 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
206 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
207 }
208
209 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
210 "ue", info.ue_count,
211 "ce", info.ce_count);
212 if (*pos >= s)
213 return 0;
214
215 s -= *pos;
216 s = min_t(u64, s, size);
217
218
219 if (copy_to_user(buf, &val[*pos], s))
220 return -EINVAL;
221
222 *pos += s;
223
224 return s;
225 }
226
227 static const struct file_operations amdgpu_ras_debugfs_ops = {
228 .owner = THIS_MODULE,
229 .read = amdgpu_ras_debugfs_read,
230 .write = NULL,
231 .llseek = default_llseek
232 };
233
amdgpu_ras_find_block_id_by_name(const char * name,int * block_id)234 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
235 {
236 int i;
237
238 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
239 *block_id = i;
240 if (strcmp(name, ras_block_string[i]) == 0)
241 return 0;
242 }
243 return -EINVAL;
244 }
245
amdgpu_ras_debugfs_ctrl_parse_data(struct file * f,const char __user * buf,size_t size,loff_t * pos,struct ras_debug_if * data)246 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
247 const char __user *buf, size_t size,
248 loff_t *pos, struct ras_debug_if *data)
249 {
250 ssize_t s = min_t(u64, 64, size);
251 char str[65];
252 char block_name[33];
253 char err[9] = "ue";
254 int op = -1;
255 int block_id;
256 uint32_t sub_block;
257 u64 address, value;
258
259 if (*pos)
260 return -EINVAL;
261 *pos = size;
262
263 memset(str, 0, sizeof(str));
264 memset(data, 0, sizeof(*data));
265
266 if (copy_from_user(str, buf, s))
267 return -EINVAL;
268
269 if (sscanf(str, "disable %32s", block_name) == 1)
270 op = 0;
271 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
272 op = 1;
273 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
274 op = 2;
275 else if (strstr(str, "retire_page") != NULL)
276 op = 3;
277 else if (str[0] && str[1] && str[2] && str[3])
278 /* ascii string, but commands are not matched. */
279 return -EINVAL;
280
281 if (op != -1) {
282 if (op == 3) {
283 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
284 sscanf(str, "%*s %llu", &address) != 1)
285 return -EINVAL;
286
287 data->op = op;
288 data->inject.address = address;
289
290 return 0;
291 }
292
293 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
294 return -EINVAL;
295
296 data->head.block = block_id;
297 /* only ue and ce errors are supported */
298 if (!memcmp("ue", err, 2))
299 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
300 else if (!memcmp("ce", err, 2))
301 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
302 else
303 return -EINVAL;
304
305 data->op = op;
306
307 if (op == 2) {
308 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
309 &sub_block, &address, &value) != 3 &&
310 sscanf(str, "%*s %*s %*s %u %llu %llu",
311 &sub_block, &address, &value) != 3)
312 return -EINVAL;
313 data->head.sub_block_index = sub_block;
314 data->inject.address = address;
315 data->inject.value = value;
316 }
317 } else {
318 if (size < sizeof(*data))
319 return -EINVAL;
320
321 if (copy_from_user(data, buf, sizeof(*data)))
322 return -EINVAL;
323 }
324
325 return 0;
326 }
327
328 /**
329 * DOC: AMDGPU RAS debugfs control interface
330 *
331 * The control interface accepts struct ras_debug_if which has two members.
332 *
333 * First member: ras_debug_if::head or ras_debug_if::inject.
334 *
335 * head is used to indicate which IP block will be under control.
336 *
337 * head has four members, they are block, type, sub_block_index, name.
338 * block: which IP will be under control.
339 * type: what kind of error will be enabled/disabled/injected.
340 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
341 * name: the name of IP.
342 *
343 * inject has two more members than head, they are address, value.
344 * As their names indicate, inject operation will write the
345 * value to the address.
346 *
347 * The second member: struct ras_debug_if::op.
348 * It has three kinds of operations.
349 *
350 * - 0: disable RAS on the block. Take ::head as its data.
351 * - 1: enable RAS on the block. Take ::head as its data.
352 * - 2: inject errors on the block. Take ::inject as its data.
353 *
354 * How to use the interface?
355 *
356 * In a program
357 *
358 * Copy the struct ras_debug_if in your code and initialize it.
359 * Write the struct to the control interface.
360 *
361 * From shell
362 *
363 * .. code-block:: bash
364 *
365 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
366 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
367 * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
368 *
369 * Where N, is the card which you want to affect.
370 *
371 * "disable" requires only the block.
372 * "enable" requires the block and error type.
373 * "inject" requires the block, error type, address, and value.
374 *
375 * The block is one of: umc, sdma, gfx, etc.
376 * see ras_block_string[] for details
377 *
378 * The error type is one of: ue, ce, where,
379 * ue is multi-uncorrectable
380 * ce is single-correctable
381 *
382 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
383 * The address and value are hexadecimal numbers, leading 0x is optional.
384 *
385 * For instance,
386 *
387 * .. code-block:: bash
388 *
389 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
390 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
391 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
392 *
393 * How to check the result of the operation?
394 *
395 * To check disable/enable, see "ras" features at,
396 * /sys/class/drm/card[0/1/2...]/device/ras/features
397 *
398 * To check inject, see the corresponding error count at,
399 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
400 *
401 * .. note::
402 * Operations are only allowed on blocks which are supported.
403 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
404 * to see which blocks support RAS on a particular asic.
405 *
406 */
amdgpu_ras_debugfs_ctrl_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)407 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
408 const char __user *buf,
409 size_t size, loff_t *pos)
410 {
411 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
412 struct ras_debug_if data;
413 int ret = 0;
414
415 if (!amdgpu_ras_get_error_query_ready(adev)) {
416 dev_warn(adev->dev, "RAS WARN: error injection "
417 "currently inaccessible\n");
418 return size;
419 }
420
421 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
422 if (ret)
423 return ret;
424
425 if (data.op == 3) {
426 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
427 if (!ret)
428 return size;
429 else
430 return ret;
431 }
432
433 if (!amdgpu_ras_is_supported(adev, data.head.block))
434 return -EINVAL;
435
436 switch (data.op) {
437 case 0:
438 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
439 break;
440 case 1:
441 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
442 break;
443 case 2:
444 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
445 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
446 dev_warn(adev->dev, "RAS WARN: input address "
447 "0x%llx is invalid.",
448 data.inject.address);
449 ret = -EINVAL;
450 break;
451 }
452
453 /* umc ce/ue error injection for a bad page is not allowed */
454 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
455 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
456 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
457 "already been marked as bad!\n",
458 data.inject.address);
459 break;
460 }
461
462 /* data.inject.address is offset instead of absolute gpu address */
463 ret = amdgpu_ras_error_inject(adev, &data.inject);
464 break;
465 default:
466 ret = -EINVAL;
467 break;
468 }
469
470 if (ret)
471 return ret;
472
473 return size;
474 }
475
476 /**
477 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
478 *
479 * Some boards contain an EEPROM which is used to persistently store a list of
480 * bad pages which experiences ECC errors in vram. This interface provides
481 * a way to reset the EEPROM, e.g., after testing error injection.
482 *
483 * Usage:
484 *
485 * .. code-block:: bash
486 *
487 * echo 1 > ../ras/ras_eeprom_reset
488 *
489 * will reset EEPROM table to 0 entries.
490 *
491 */
amdgpu_ras_debugfs_eeprom_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)492 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
493 const char __user *buf,
494 size_t size, loff_t *pos)
495 {
496 struct amdgpu_device *adev =
497 (struct amdgpu_device *)file_inode(f)->i_private;
498 int ret;
499
500 ret = amdgpu_ras_eeprom_reset_table(
501 &(amdgpu_ras_get_context(adev)->eeprom_control));
502
503 if (!ret) {
504 /* Something was written to EEPROM.
505 */
506 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
507 return size;
508 } else {
509 return ret;
510 }
511 }
512
513 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
514 .owner = THIS_MODULE,
515 .read = NULL,
516 .write = amdgpu_ras_debugfs_ctrl_write,
517 .llseek = default_llseek
518 };
519
520 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
521 .owner = THIS_MODULE,
522 .read = NULL,
523 .write = amdgpu_ras_debugfs_eeprom_write,
524 .llseek = default_llseek
525 };
526
527 /**
528 * DOC: AMDGPU RAS sysfs Error Count Interface
529 *
530 * It allows the user to read the error count for each IP block on the gpu through
531 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
532 *
533 * It outputs the multiple lines which report the uncorrected (ue) and corrected
534 * (ce) error counts.
535 *
536 * The format of one line is below,
537 *
538 * [ce|ue]: count
539 *
540 * Example:
541 *
542 * .. code-block:: bash
543 *
544 * ue: 0
545 * ce: 1
546 *
547 */
amdgpu_ras_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)548 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
549 struct device_attribute *attr, char *buf)
550 {
551 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
552 struct ras_query_if info = {
553 .head = obj->head,
554 };
555
556 if (!amdgpu_ras_get_error_query_ready(obj->adev))
557 return sysfs_emit(buf, "Query currently inaccessible\n");
558
559 if (amdgpu_ras_query_error_status(obj->adev, &info))
560 return -EINVAL;
561
562 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
563 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
564 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
565 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
566 }
567
568 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
569 "ce", info.ce_count);
570 }
571
572 /* obj begin */
573
574 #define get_obj(obj) do { (obj)->use++; } while (0)
575 #define alive_obj(obj) ((obj)->use)
576
put_obj(struct ras_manager * obj)577 static inline void put_obj(struct ras_manager *obj)
578 {
579 if (obj && (--obj->use == 0))
580 list_del(&obj->node);
581 if (obj && (obj->use < 0))
582 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
583 }
584
585 /* make one obj and return it. */
amdgpu_ras_create_obj(struct amdgpu_device * adev,struct ras_common_if * head)586 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
587 struct ras_common_if *head)
588 {
589 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
590 struct ras_manager *obj;
591
592 if (!adev->ras_enabled || !con)
593 return NULL;
594
595 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
596 return NULL;
597
598 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
599 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
600 return NULL;
601
602 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
603 } else
604 obj = &con->objs[head->block];
605
606 /* already exist. return obj? */
607 if (alive_obj(obj))
608 return NULL;
609
610 obj->head = *head;
611 obj->adev = adev;
612 list_add(&obj->node, &con->head);
613 get_obj(obj);
614
615 return obj;
616 }
617
618 /* return an obj equal to head, or the first when head is NULL */
amdgpu_ras_find_obj(struct amdgpu_device * adev,struct ras_common_if * head)619 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
620 struct ras_common_if *head)
621 {
622 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
623 struct ras_manager *obj;
624 int i;
625
626 if (!adev->ras_enabled || !con)
627 return NULL;
628
629 if (head) {
630 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
631 return NULL;
632
633 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
634 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
635 return NULL;
636
637 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
638 } else
639 obj = &con->objs[head->block];
640
641 if (alive_obj(obj))
642 return obj;
643 } else {
644 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
645 obj = &con->objs[i];
646 if (alive_obj(obj))
647 return obj;
648 }
649 }
650
651 return NULL;
652 }
653 /* obj end */
654
655 /* feature ctl begin */
amdgpu_ras_is_feature_allowed(struct amdgpu_device * adev,struct ras_common_if * head)656 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
657 struct ras_common_if *head)
658 {
659 return adev->ras_hw_enabled & BIT(head->block);
660 }
661
amdgpu_ras_is_feature_enabled(struct amdgpu_device * adev,struct ras_common_if * head)662 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
663 struct ras_common_if *head)
664 {
665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
666
667 return con->features & BIT(head->block);
668 }
669
670 /*
671 * if obj is not created, then create one.
672 * set feature enable flag.
673 */
__amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,int enable)674 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
675 struct ras_common_if *head, int enable)
676 {
677 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
678 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
679
680 /* If hardware does not support ras, then do not create obj.
681 * But if hardware support ras, we can create the obj.
682 * Ras framework checks con->hw_supported to see if it need do
683 * corresponding initialization.
684 * IP checks con->support to see if it need disable ras.
685 */
686 if (!amdgpu_ras_is_feature_allowed(adev, head))
687 return 0;
688
689 if (enable) {
690 if (!obj) {
691 obj = amdgpu_ras_create_obj(adev, head);
692 if (!obj)
693 return -EINVAL;
694 } else {
695 /* In case we create obj somewhere else */
696 get_obj(obj);
697 }
698 con->features |= BIT(head->block);
699 } else {
700 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
701 con->features &= ~BIT(head->block);
702 put_obj(obj);
703 }
704 }
705
706 return 0;
707 }
708
amdgpu_ras_check_feature_allowed(struct amdgpu_device * adev,struct ras_common_if * head)709 static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev,
710 struct ras_common_if *head)
711 {
712 if (amdgpu_ras_is_feature_allowed(adev, head) ||
713 amdgpu_ras_is_poison_mode_supported(adev))
714 return 1;
715 else
716 return 0;
717 }
718
719 /* wrapper of psp_ras_enable_features */
amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)720 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
721 struct ras_common_if *head, bool enable)
722 {
723 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
724 union ta_ras_cmd_input *info;
725 int ret = 0;
726
727 if (!con)
728 return -EINVAL;
729
730 if (head->block == AMDGPU_RAS_BLOCK__GFX) {
731 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
732 if (!info)
733 return -ENOMEM;
734
735 if (!enable) {
736 info->disable_features = (struct ta_ras_disable_features_input) {
737 .block_id = amdgpu_ras_block_to_ta(head->block),
738 .error_type = amdgpu_ras_error_to_ta(head->type),
739 };
740 } else {
741 info->enable_features = (struct ta_ras_enable_features_input) {
742 .block_id = amdgpu_ras_block_to_ta(head->block),
743 .error_type = amdgpu_ras_error_to_ta(head->type),
744 };
745 }
746 }
747
748 /* Do not enable if it is not allowed. */
749 if (enable && !amdgpu_ras_check_feature_allowed(adev, head))
750 goto out;
751
752 /* Only enable ras feature operation handle on host side */
753 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
754 !amdgpu_sriov_vf(adev) &&
755 !amdgpu_ras_intr_triggered()) {
756 ret = psp_ras_enable_features(&adev->psp, info, enable);
757 if (ret) {
758 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
759 enable ? "enable":"disable",
760 get_ras_block_str(head),
761 amdgpu_ras_is_poison_mode_supported(adev), ret);
762 goto out;
763 }
764 }
765
766 /* setup the obj */
767 __amdgpu_ras_feature_enable(adev, head, enable);
768 out:
769 if (head->block == AMDGPU_RAS_BLOCK__GFX)
770 kfree(info);
771 return ret;
772 }
773
774 /* Only used in device probe stage and called only once. */
amdgpu_ras_feature_enable_on_boot(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)775 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
776 struct ras_common_if *head, bool enable)
777 {
778 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
779 int ret;
780
781 if (!con)
782 return -EINVAL;
783
784 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
785 if (enable) {
786 /* There is no harm to issue a ras TA cmd regardless of
787 * the currecnt ras state.
788 * If current state == target state, it will do nothing
789 * But sometimes it requests driver to reset and repost
790 * with error code -EAGAIN.
791 */
792 ret = amdgpu_ras_feature_enable(adev, head, 1);
793 /* With old ras TA, we might fail to enable ras.
794 * Log it and just setup the object.
795 * TODO need remove this WA in the future.
796 */
797 if (ret == -EINVAL) {
798 ret = __amdgpu_ras_feature_enable(adev, head, 1);
799 if (!ret)
800 dev_info(adev->dev,
801 "RAS INFO: %s setup object\n",
802 get_ras_block_str(head));
803 }
804 } else {
805 /* setup the object then issue a ras TA disable cmd.*/
806 ret = __amdgpu_ras_feature_enable(adev, head, 1);
807 if (ret)
808 return ret;
809
810 /* gfx block ras dsiable cmd must send to ras-ta */
811 if (head->block == AMDGPU_RAS_BLOCK__GFX)
812 con->features |= BIT(head->block);
813
814 ret = amdgpu_ras_feature_enable(adev, head, 0);
815
816 /* clean gfx block ras features flag */
817 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
818 con->features &= ~BIT(head->block);
819 }
820 } else
821 ret = amdgpu_ras_feature_enable(adev, head, enable);
822
823 return ret;
824 }
825
amdgpu_ras_disable_all_features(struct amdgpu_device * adev,bool bypass)826 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
827 bool bypass)
828 {
829 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
830 struct ras_manager *obj, *tmp;
831
832 list_for_each_entry_safe(obj, tmp, &con->head, node) {
833 /* bypass psp.
834 * aka just release the obj and corresponding flags
835 */
836 if (bypass) {
837 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
838 break;
839 } else {
840 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
841 break;
842 }
843 }
844
845 return con->features;
846 }
847
amdgpu_ras_enable_all_features(struct amdgpu_device * adev,bool bypass)848 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
849 bool bypass)
850 {
851 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
852 int i;
853 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
854
855 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
856 struct ras_common_if head = {
857 .block = i,
858 .type = default_ras_type,
859 .sub_block_index = 0,
860 };
861
862 if (i == AMDGPU_RAS_BLOCK__MCA)
863 continue;
864
865 if (bypass) {
866 /*
867 * bypass psp. vbios enable ras for us.
868 * so just create the obj
869 */
870 if (__amdgpu_ras_feature_enable(adev, &head, 1))
871 break;
872 } else {
873 if (amdgpu_ras_feature_enable(adev, &head, 1))
874 break;
875 }
876 }
877
878 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
879 struct ras_common_if head = {
880 .block = AMDGPU_RAS_BLOCK__MCA,
881 .type = default_ras_type,
882 .sub_block_index = i,
883 };
884
885 if (bypass) {
886 /*
887 * bypass psp. vbios enable ras for us.
888 * so just create the obj
889 */
890 if (__amdgpu_ras_feature_enable(adev, &head, 1))
891 break;
892 } else {
893 if (amdgpu_ras_feature_enable(adev, &head, 1))
894 break;
895 }
896 }
897
898 return con->features;
899 }
900 /* feature ctl end */
901
amdgpu_ras_block_match_default(struct amdgpu_ras_block_object * block_obj,enum amdgpu_ras_block block)902 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
903 enum amdgpu_ras_block block)
904 {
905 if (!block_obj)
906 return -EINVAL;
907
908 if (block_obj->ras_comm.block == block)
909 return 0;
910
911 return -EINVAL;
912 }
913
amdgpu_ras_get_ras_block(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint32_t sub_block_index)914 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
915 enum amdgpu_ras_block block, uint32_t sub_block_index)
916 {
917 struct amdgpu_ras_block_list *node, *tmp;
918 struct amdgpu_ras_block_object *obj;
919
920 if (block >= AMDGPU_RAS_BLOCK__LAST)
921 return NULL;
922
923 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
924 if (!node->ras_obj) {
925 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
926 continue;
927 }
928
929 obj = node->ras_obj;
930 if (obj->ras_block_match) {
931 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
932 return obj;
933 } else {
934 if (amdgpu_ras_block_match_default(obj, block) == 0)
935 return obj;
936 }
937 }
938
939 return NULL;
940 }
941
amdgpu_ras_get_ecc_info(struct amdgpu_device * adev,struct ras_err_data * err_data)942 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
943 {
944 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
945 int ret = 0;
946
947 /*
948 * choosing right query method according to
949 * whether smu support query error information
950 */
951 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
952 if (ret == -EOPNOTSUPP) {
953 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
954 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
955 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
956
957 /* umc query_ras_error_address is also responsible for clearing
958 * error status
959 */
960 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
961 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
962 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
963 } else if (!ret) {
964 if (adev->umc.ras &&
965 adev->umc.ras->ecc_info_query_ras_error_count)
966 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
967
968 if (adev->umc.ras &&
969 adev->umc.ras->ecc_info_query_ras_error_address)
970 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
971 }
972 }
973
974 /* query/inject/cure begin */
amdgpu_ras_query_error_status(struct amdgpu_device * adev,struct ras_query_if * info)975 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
976 struct ras_query_if *info)
977 {
978 struct amdgpu_ras_block_object *block_obj = NULL;
979 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
980 struct ras_err_data err_data = {0, 0, 0, NULL};
981
982 if (!obj)
983 return -EINVAL;
984
985 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
986 amdgpu_ras_get_ecc_info(adev, &err_data);
987 } else {
988 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
989 if (!block_obj || !block_obj->hw_ops) {
990 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
991 get_ras_block_str(&info->head));
992 return -EINVAL;
993 }
994
995 if (block_obj->hw_ops->query_ras_error_count)
996 block_obj->hw_ops->query_ras_error_count(adev, &err_data);
997
998 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
999 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1000 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1001 if (block_obj->hw_ops->query_ras_error_status)
1002 block_obj->hw_ops->query_ras_error_status(adev);
1003 }
1004 }
1005
1006 obj->err_data.ue_count += err_data.ue_count;
1007 obj->err_data.ce_count += err_data.ce_count;
1008
1009 info->ue_count = obj->err_data.ue_count;
1010 info->ce_count = obj->err_data.ce_count;
1011
1012 if (err_data.ce_count) {
1013 if (adev->smuio.funcs &&
1014 adev->smuio.funcs->get_socket_id &&
1015 adev->smuio.funcs->get_die_id) {
1016 dev_info(adev->dev, "socket: %d, die: %d "
1017 "%ld correctable hardware errors "
1018 "detected in %s block, no user "
1019 "action is needed.\n",
1020 adev->smuio.funcs->get_socket_id(adev),
1021 adev->smuio.funcs->get_die_id(adev),
1022 obj->err_data.ce_count,
1023 get_ras_block_str(&info->head));
1024 } else {
1025 dev_info(adev->dev, "%ld correctable hardware errors "
1026 "detected in %s block, no user "
1027 "action is needed.\n",
1028 obj->err_data.ce_count,
1029 get_ras_block_str(&info->head));
1030 }
1031 }
1032 if (err_data.ue_count) {
1033 if (adev->smuio.funcs &&
1034 adev->smuio.funcs->get_socket_id &&
1035 adev->smuio.funcs->get_die_id) {
1036 dev_info(adev->dev, "socket: %d, die: %d "
1037 "%ld uncorrectable hardware errors "
1038 "detected in %s block\n",
1039 adev->smuio.funcs->get_socket_id(adev),
1040 adev->smuio.funcs->get_die_id(adev),
1041 obj->err_data.ue_count,
1042 get_ras_block_str(&info->head));
1043 } else {
1044 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1045 "detected in %s block\n",
1046 obj->err_data.ue_count,
1047 get_ras_block_str(&info->head));
1048 }
1049 }
1050
1051 return 0;
1052 }
1053
amdgpu_ras_reset_error_status(struct amdgpu_device * adev,enum amdgpu_ras_block block)1054 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1055 enum amdgpu_ras_block block)
1056 {
1057 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1058
1059 if (!amdgpu_ras_is_supported(adev, block))
1060 return -EINVAL;
1061
1062 if (!block_obj || !block_obj->hw_ops) {
1063 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1064 ras_block_str(block));
1065 return -EINVAL;
1066 }
1067
1068 if (block_obj->hw_ops->reset_ras_error_count)
1069 block_obj->hw_ops->reset_ras_error_count(adev);
1070
1071 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1072 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1073 if (block_obj->hw_ops->reset_ras_error_status)
1074 block_obj->hw_ops->reset_ras_error_status(adev);
1075 }
1076
1077 return 0;
1078 }
1079
1080 /* wrapper of psp_ras_trigger_error */
amdgpu_ras_error_inject(struct amdgpu_device * adev,struct ras_inject_if * info)1081 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1082 struct ras_inject_if *info)
1083 {
1084 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1085 struct ta_ras_trigger_error_input block_info = {
1086 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1087 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1088 .sub_block_index = info->head.sub_block_index,
1089 .address = info->address,
1090 .value = info->value,
1091 };
1092 int ret = -EINVAL;
1093 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1094 info->head.block,
1095 info->head.sub_block_index);
1096
1097 /* inject on guest isn't allowed, return success directly */
1098 if (amdgpu_sriov_vf(adev))
1099 return 0;
1100
1101 if (!obj)
1102 return -EINVAL;
1103
1104 if (!block_obj || !block_obj->hw_ops) {
1105 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1106 get_ras_block_str(&info->head));
1107 return -EINVAL;
1108 }
1109
1110 /* Calculate XGMI relative offset */
1111 if (adev->gmc.xgmi.num_physical_nodes > 1) {
1112 block_info.address =
1113 amdgpu_xgmi_get_relative_phy_addr(adev,
1114 block_info.address);
1115 }
1116
1117 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
1118 if (block_obj->hw_ops->ras_error_inject)
1119 ret = block_obj->hw_ops->ras_error_inject(adev, info);
1120 } else {
1121 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
1122 if (block_obj->hw_ops->ras_error_inject)
1123 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
1124 else /*If not defined .ras_error_inject, use default ras_error_inject*/
1125 ret = psp_ras_trigger_error(&adev->psp, &block_info);
1126 }
1127
1128 if (ret)
1129 dev_err(adev->dev, "ras inject %s failed %d\n",
1130 get_ras_block_str(&info->head), ret);
1131
1132 return ret;
1133 }
1134
1135 /**
1136 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1137 * @adev: pointer to AMD GPU device
1138 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1139 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1140 * @query_info: pointer to ras_query_if
1141 *
1142 * Return 0 for query success or do nothing, otherwise return an error
1143 * on failures
1144 */
amdgpu_ras_query_error_count_helper(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1145 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1146 unsigned long *ce_count,
1147 unsigned long *ue_count,
1148 struct ras_query_if *query_info)
1149 {
1150 int ret;
1151
1152 if (!query_info)
1153 /* do nothing if query_info is not specified */
1154 return 0;
1155
1156 ret = amdgpu_ras_query_error_status(adev, query_info);
1157 if (ret)
1158 return ret;
1159
1160 *ce_count += query_info->ce_count;
1161 *ue_count += query_info->ue_count;
1162
1163 /* some hardware/IP supports read to clear
1164 * no need to explictly reset the err status after the query call */
1165 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1166 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1167 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1168 dev_warn(adev->dev,
1169 "Failed to reset error counter and error status\n");
1170 }
1171
1172 return 0;
1173 }
1174
1175 /**
1176 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1177 * @adev: pointer to AMD GPU device
1178 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1179 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1180 * errors.
1181 * @query_info: pointer to ras_query_if if the query request is only for
1182 * specific ip block; if info is NULL, then the qurey request is for
1183 * all the ip blocks that support query ras error counters/status
1184 *
1185 * If set, @ce_count or @ue_count, count and return the corresponding
1186 * error counts in those integer pointers. Return 0 if the device
1187 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1188 */
amdgpu_ras_query_error_count(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1189 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1190 unsigned long *ce_count,
1191 unsigned long *ue_count,
1192 struct ras_query_if *query_info)
1193 {
1194 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1195 struct ras_manager *obj;
1196 unsigned long ce, ue;
1197 int ret;
1198
1199 if (!adev->ras_enabled || !con)
1200 return -EOPNOTSUPP;
1201
1202 /* Don't count since no reporting.
1203 */
1204 if (!ce_count && !ue_count)
1205 return 0;
1206
1207 ce = 0;
1208 ue = 0;
1209 if (!query_info) {
1210 /* query all the ip blocks that support ras query interface */
1211 list_for_each_entry(obj, &con->head, node) {
1212 struct ras_query_if info = {
1213 .head = obj->head,
1214 };
1215
1216 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1217 }
1218 } else {
1219 /* query specific ip block */
1220 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1221 }
1222
1223 if (ret)
1224 return ret;
1225
1226 if (ce_count)
1227 *ce_count = ce;
1228
1229 if (ue_count)
1230 *ue_count = ue;
1231
1232 return 0;
1233 }
1234 /* query/inject/cure end */
1235
1236
1237 /* sysfs begin */
1238
1239 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1240 struct ras_badpage **bps, unsigned int *count);
1241
amdgpu_ras_badpage_flags_str(unsigned int flags)1242 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1243 {
1244 switch (flags) {
1245 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1246 return "R";
1247 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1248 return "P";
1249 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1250 default:
1251 return "F";
1252 }
1253 }
1254
1255 /**
1256 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1257 *
1258 * It allows user to read the bad pages of vram on the gpu through
1259 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1260 *
1261 * It outputs multiple lines, and each line stands for one gpu page.
1262 *
1263 * The format of one line is below,
1264 * gpu pfn : gpu page size : flags
1265 *
1266 * gpu pfn and gpu page size are printed in hex format.
1267 * flags can be one of below character,
1268 *
1269 * R: reserved, this gpu page is reserved and not able to use.
1270 *
1271 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1272 * in next window of page_reserve.
1273 *
1274 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1275 *
1276 * Examples:
1277 *
1278 * .. code-block:: bash
1279 *
1280 * 0x00000001 : 0x00001000 : R
1281 * 0x00000002 : 0x00001000 : P
1282 *
1283 */
1284
amdgpu_ras_sysfs_badpages_read(struct file * f,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t ppos,size_t count)1285 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1286 struct kobject *kobj, struct bin_attribute *attr,
1287 char *buf, loff_t ppos, size_t count)
1288 {
1289 struct amdgpu_ras *con =
1290 container_of(attr, struct amdgpu_ras, badpages_attr);
1291 struct amdgpu_device *adev = con->adev;
1292 const unsigned int element_size =
1293 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1294 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1295 unsigned int end = div64_ul(ppos + count - 1, element_size);
1296 ssize_t s = 0;
1297 struct ras_badpage *bps = NULL;
1298 unsigned int bps_count = 0;
1299
1300 memset(buf, 0, count);
1301
1302 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1303 return 0;
1304
1305 for (; start < end && start < bps_count; start++)
1306 s += scnprintf(&buf[s], element_size + 1,
1307 "0x%08x : 0x%08x : %1s\n",
1308 bps[start].bp,
1309 bps[start].size,
1310 amdgpu_ras_badpage_flags_str(bps[start].flags));
1311
1312 kfree(bps);
1313
1314 return s;
1315 }
1316
amdgpu_ras_sysfs_features_read(struct device * dev,struct device_attribute * attr,char * buf)1317 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1318 struct device_attribute *attr, char *buf)
1319 {
1320 struct amdgpu_ras *con =
1321 container_of(attr, struct amdgpu_ras, features_attr);
1322
1323 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1324 }
1325
amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device * adev)1326 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1327 {
1328 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1329
1330 sysfs_remove_file_from_group(&adev->dev->kobj,
1331 &con->badpages_attr.attr,
1332 RAS_FS_NAME);
1333 }
1334
amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device * adev)1335 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1336 {
1337 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1338 struct attribute *attrs[] = {
1339 &con->features_attr.attr,
1340 NULL
1341 };
1342 struct attribute_group group = {
1343 .name = RAS_FS_NAME,
1344 .attrs = attrs,
1345 };
1346
1347 sysfs_remove_group(&adev->dev->kobj, &group);
1348
1349 return 0;
1350 }
1351
amdgpu_ras_sysfs_create(struct amdgpu_device * adev,struct ras_common_if * head)1352 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1353 struct ras_common_if *head)
1354 {
1355 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1356
1357 if (!obj || obj->attr_inuse)
1358 return -EINVAL;
1359
1360 get_obj(obj);
1361
1362 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1363 "%s_err_count", head->name);
1364
1365 obj->sysfs_attr = (struct device_attribute){
1366 .attr = {
1367 .name = obj->fs_data.sysfs_name,
1368 .mode = S_IRUGO,
1369 },
1370 .show = amdgpu_ras_sysfs_read,
1371 };
1372 sysfs_attr_init(&obj->sysfs_attr.attr);
1373
1374 if (sysfs_add_file_to_group(&adev->dev->kobj,
1375 &obj->sysfs_attr.attr,
1376 RAS_FS_NAME)) {
1377 put_obj(obj);
1378 return -EINVAL;
1379 }
1380
1381 obj->attr_inuse = 1;
1382
1383 return 0;
1384 }
1385
amdgpu_ras_sysfs_remove(struct amdgpu_device * adev,struct ras_common_if * head)1386 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1387 struct ras_common_if *head)
1388 {
1389 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1390
1391 if (!obj || !obj->attr_inuse)
1392 return -EINVAL;
1393
1394 sysfs_remove_file_from_group(&adev->dev->kobj,
1395 &obj->sysfs_attr.attr,
1396 RAS_FS_NAME);
1397 obj->attr_inuse = 0;
1398 put_obj(obj);
1399
1400 return 0;
1401 }
1402
amdgpu_ras_sysfs_remove_all(struct amdgpu_device * adev)1403 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1404 {
1405 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1406 struct ras_manager *obj, *tmp;
1407
1408 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1409 amdgpu_ras_sysfs_remove(adev, &obj->head);
1410 }
1411
1412 if (amdgpu_bad_page_threshold != 0)
1413 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1414
1415 amdgpu_ras_sysfs_remove_feature_node(adev);
1416
1417 return 0;
1418 }
1419 /* sysfs end */
1420
1421 /**
1422 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1423 *
1424 * Normally when there is an uncorrectable error, the driver will reset
1425 * the GPU to recover. However, in the event of an unrecoverable error,
1426 * the driver provides an interface to reboot the system automatically
1427 * in that event.
1428 *
1429 * The following file in debugfs provides that interface:
1430 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1431 *
1432 * Usage:
1433 *
1434 * .. code-block:: bash
1435 *
1436 * echo true > .../ras/auto_reboot
1437 *
1438 */
1439 /* debugfs begin */
amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device * adev)1440 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1441 {
1442 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1443 struct drm_minor *minor = adev_to_drm(adev)->primary;
1444 struct dentry *dir;
1445
1446 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1447 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1448 &amdgpu_ras_debugfs_ctrl_ops);
1449 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1450 &amdgpu_ras_debugfs_eeprom_ops);
1451 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1452 &con->bad_page_cnt_threshold);
1453 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1454 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1455 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1456 &amdgpu_ras_debugfs_eeprom_size_ops);
1457 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1458 S_IRUGO, dir, adev,
1459 &amdgpu_ras_debugfs_eeprom_table_ops);
1460 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1461
1462 /*
1463 * After one uncorrectable error happens, usually GPU recovery will
1464 * be scheduled. But due to the known problem in GPU recovery failing
1465 * to bring GPU back, below interface provides one direct way to
1466 * user to reboot system automatically in such case within
1467 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1468 * will never be called.
1469 */
1470 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1471
1472 /*
1473 * User could set this not to clean up hardware's error count register
1474 * of RAS IPs during ras recovery.
1475 */
1476 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1477 &con->disable_ras_err_cnt_harvest);
1478 return dir;
1479 }
1480
amdgpu_ras_debugfs_create(struct amdgpu_device * adev,struct ras_fs_if * head,struct dentry * dir)1481 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1482 struct ras_fs_if *head,
1483 struct dentry *dir)
1484 {
1485 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1486
1487 if (!obj || !dir)
1488 return;
1489
1490 get_obj(obj);
1491
1492 memcpy(obj->fs_data.debugfs_name,
1493 head->debugfs_name,
1494 sizeof(obj->fs_data.debugfs_name));
1495
1496 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1497 obj, &amdgpu_ras_debugfs_ops);
1498 }
1499
amdgpu_ras_debugfs_create_all(struct amdgpu_device * adev)1500 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1501 {
1502 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1503 struct dentry *dir;
1504 struct ras_manager *obj;
1505 struct ras_fs_if fs_info;
1506
1507 /*
1508 * it won't be called in resume path, no need to check
1509 * suspend and gpu reset status
1510 */
1511 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1512 return;
1513
1514 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1515
1516 list_for_each_entry(obj, &con->head, node) {
1517 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1518 (obj->attr_inuse == 1)) {
1519 sprintf(fs_info.debugfs_name, "%s_err_inject",
1520 get_ras_block_str(&obj->head));
1521 fs_info.head = obj->head;
1522 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1523 }
1524 }
1525 }
1526
1527 /* debugfs end */
1528
1529 /* ras fs */
1530 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1531 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1532 static DEVICE_ATTR(features, S_IRUGO,
1533 amdgpu_ras_sysfs_features_read, NULL);
amdgpu_ras_fs_init(struct amdgpu_device * adev)1534 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1535 {
1536 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1537 struct attribute_group group = {
1538 .name = RAS_FS_NAME,
1539 };
1540 struct attribute *attrs[] = {
1541 &con->features_attr.attr,
1542 NULL
1543 };
1544 struct bin_attribute *bin_attrs[] = {
1545 NULL,
1546 NULL,
1547 };
1548 int r;
1549
1550 /* add features entry */
1551 con->features_attr = dev_attr_features;
1552 group.attrs = attrs;
1553 sysfs_attr_init(attrs[0]);
1554
1555 if (amdgpu_bad_page_threshold != 0) {
1556 /* add bad_page_features entry */
1557 bin_attr_gpu_vram_bad_pages.private = NULL;
1558 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1559 bin_attrs[0] = &con->badpages_attr;
1560 group.bin_attrs = bin_attrs;
1561 sysfs_bin_attr_init(bin_attrs[0]);
1562 }
1563
1564 r = sysfs_create_group(&adev->dev->kobj, &group);
1565 if (r)
1566 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1567
1568 return 0;
1569 }
1570
amdgpu_ras_fs_fini(struct amdgpu_device * adev)1571 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1572 {
1573 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1574 struct ras_manager *con_obj, *ip_obj, *tmp;
1575
1576 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1577 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1578 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1579 if (ip_obj)
1580 put_obj(ip_obj);
1581 }
1582 }
1583
1584 amdgpu_ras_sysfs_remove_all(adev);
1585 return 0;
1586 }
1587 /* ras fs end */
1588
1589 /* ih begin */
1590
1591 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1592 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1593 * register to check whether the interrupt is triggered or not, and properly
1594 * ack the interrupt if it is there
1595 */
amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device * adev)1596 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1597 {
1598 /* Fatal error events are handled on host side */
1599 if (amdgpu_sriov_vf(adev) ||
1600 !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
1601 return;
1602
1603 if (adev->nbio.ras &&
1604 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1605 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1606
1607 if (adev->nbio.ras &&
1608 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1609 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1610 }
1611
amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1612 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1613 struct amdgpu_iv_entry *entry)
1614 {
1615 bool poison_stat = false;
1616 struct amdgpu_device *adev = obj->adev;
1617 struct amdgpu_ras_block_object *block_obj =
1618 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1619
1620 if (!block_obj)
1621 return;
1622
1623 /* both query_poison_status and handle_poison_consumption are optional,
1624 * but at least one of them should be implemented if we need poison
1625 * consumption handler
1626 */
1627 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1628 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1629 if (!poison_stat) {
1630 /* Not poison consumption interrupt, no need to handle it */
1631 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1632 block_obj->ras_comm.name);
1633
1634 return;
1635 }
1636 }
1637
1638 if (!adev->gmc.xgmi.connected_to_cpu)
1639 amdgpu_umc_poison_handler(adev, false);
1640
1641 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1642 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1643
1644 /* gpu reset is fallback for failed and default cases */
1645 if (poison_stat) {
1646 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1647 block_obj->ras_comm.name);
1648 amdgpu_ras_reset_gpu(adev);
1649 } else {
1650 amdgpu_gfx_poison_consumption_handler(adev, entry);
1651 }
1652 }
1653
amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1654 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1655 struct amdgpu_iv_entry *entry)
1656 {
1657 dev_info(obj->adev->dev,
1658 "Poison is created, no user action is needed.\n");
1659 }
1660
amdgpu_ras_interrupt_umc_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1661 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1662 struct amdgpu_iv_entry *entry)
1663 {
1664 struct ras_ih_data *data = &obj->ih_data;
1665 struct ras_err_data err_data = {0, 0, 0, NULL};
1666 int ret;
1667
1668 if (!data->cb)
1669 return;
1670
1671 /* Let IP handle its data, maybe we need get the output
1672 * from the callback to update the error type/count, etc
1673 */
1674 ret = data->cb(obj->adev, &err_data, entry);
1675 /* ue will trigger an interrupt, and in that case
1676 * we need do a reset to recovery the whole system.
1677 * But leave IP do that recovery, here we just dispatch
1678 * the error.
1679 */
1680 if (ret == AMDGPU_RAS_SUCCESS) {
1681 /* these counts could be left as 0 if
1682 * some blocks do not count error number
1683 */
1684 obj->err_data.ue_count += err_data.ue_count;
1685 obj->err_data.ce_count += err_data.ce_count;
1686 }
1687 }
1688
amdgpu_ras_interrupt_handler(struct ras_manager * obj)1689 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1690 {
1691 struct ras_ih_data *data = &obj->ih_data;
1692 struct amdgpu_iv_entry entry;
1693
1694 while (data->rptr != data->wptr) {
1695 rmb();
1696 memcpy(&entry, &data->ring[data->rptr],
1697 data->element_size);
1698
1699 wmb();
1700 data->rptr = (data->aligned_element_size +
1701 data->rptr) % data->ring_size;
1702
1703 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1704 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1705 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1706 else
1707 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1708 } else {
1709 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1710 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1711 else
1712 dev_warn(obj->adev->dev,
1713 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1714 }
1715 }
1716 }
1717
amdgpu_ras_interrupt_process_handler(struct work_struct * work)1718 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1719 {
1720 struct ras_ih_data *data =
1721 container_of(work, struct ras_ih_data, ih_work);
1722 struct ras_manager *obj =
1723 container_of(data, struct ras_manager, ih_data);
1724
1725 amdgpu_ras_interrupt_handler(obj);
1726 }
1727
amdgpu_ras_interrupt_dispatch(struct amdgpu_device * adev,struct ras_dispatch_if * info)1728 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1729 struct ras_dispatch_if *info)
1730 {
1731 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1732 struct ras_ih_data *data = &obj->ih_data;
1733
1734 if (!obj)
1735 return -EINVAL;
1736
1737 if (data->inuse == 0)
1738 return 0;
1739
1740 /* Might be overflow... */
1741 memcpy(&data->ring[data->wptr], info->entry,
1742 data->element_size);
1743
1744 wmb();
1745 data->wptr = (data->aligned_element_size +
1746 data->wptr) % data->ring_size;
1747
1748 schedule_work(&data->ih_work);
1749
1750 return 0;
1751 }
1752
amdgpu_ras_interrupt_remove_handler(struct amdgpu_device * adev,struct ras_common_if * head)1753 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1754 struct ras_common_if *head)
1755 {
1756 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1757 struct ras_ih_data *data;
1758
1759 if (!obj)
1760 return -EINVAL;
1761
1762 data = &obj->ih_data;
1763 if (data->inuse == 0)
1764 return 0;
1765
1766 cancel_work_sync(&data->ih_work);
1767
1768 kfree(data->ring);
1769 memset(data, 0, sizeof(*data));
1770 put_obj(obj);
1771
1772 return 0;
1773 }
1774
amdgpu_ras_interrupt_add_handler(struct amdgpu_device * adev,struct ras_common_if * head)1775 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1776 struct ras_common_if *head)
1777 {
1778 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1779 struct ras_ih_data *data;
1780 struct amdgpu_ras_block_object *ras_obj;
1781
1782 if (!obj) {
1783 /* in case we registe the IH before enable ras feature */
1784 obj = amdgpu_ras_create_obj(adev, head);
1785 if (!obj)
1786 return -EINVAL;
1787 } else
1788 get_obj(obj);
1789
1790 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1791
1792 data = &obj->ih_data;
1793 /* add the callback.etc */
1794 *data = (struct ras_ih_data) {
1795 .inuse = 0,
1796 .cb = ras_obj->ras_cb,
1797 .element_size = sizeof(struct amdgpu_iv_entry),
1798 .rptr = 0,
1799 .wptr = 0,
1800 };
1801
1802 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1803
1804 data->aligned_element_size = ALIGN(data->element_size, 8);
1805 /* the ring can store 64 iv entries. */
1806 data->ring_size = 64 * data->aligned_element_size;
1807 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1808 if (!data->ring) {
1809 put_obj(obj);
1810 return -ENOMEM;
1811 }
1812
1813 /* IH is ready */
1814 data->inuse = 1;
1815
1816 return 0;
1817 }
1818
amdgpu_ras_interrupt_remove_all(struct amdgpu_device * adev)1819 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1820 {
1821 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1822 struct ras_manager *obj, *tmp;
1823
1824 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1825 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1826 }
1827
1828 return 0;
1829 }
1830 /* ih end */
1831
1832 /* traversal all IPs except NBIO to query error counter */
amdgpu_ras_log_on_err_counter(struct amdgpu_device * adev)1833 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1834 {
1835 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1836 struct ras_manager *obj;
1837
1838 if (!adev->ras_enabled || !con)
1839 return;
1840
1841 list_for_each_entry(obj, &con->head, node) {
1842 struct ras_query_if info = {
1843 .head = obj->head,
1844 };
1845
1846 /*
1847 * PCIE_BIF IP has one different isr by ras controller
1848 * interrupt, the specific ras counter query will be
1849 * done in that isr. So skip such block from common
1850 * sync flood interrupt isr calling.
1851 */
1852 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1853 continue;
1854
1855 /*
1856 * this is a workaround for aldebaran, skip send msg to
1857 * smu to get ecc_info table due to smu handle get ecc
1858 * info table failed temporarily.
1859 * should be removed until smu fix handle ecc_info table.
1860 */
1861 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1862 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1863 continue;
1864
1865 amdgpu_ras_query_error_status(adev, &info);
1866
1867 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1868 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1869 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1870 if (amdgpu_ras_reset_error_status(adev, info.head.block))
1871 dev_warn(adev->dev, "Failed to reset error counter and error status");
1872 }
1873 }
1874 }
1875
1876 /* Parse RdRspStatus and WrRspStatus */
amdgpu_ras_error_status_query(struct amdgpu_device * adev,struct ras_query_if * info)1877 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1878 struct ras_query_if *info)
1879 {
1880 struct amdgpu_ras_block_object *block_obj;
1881 /*
1882 * Only two block need to query read/write
1883 * RspStatus at current state
1884 */
1885 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1886 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1887 return;
1888
1889 block_obj = amdgpu_ras_get_ras_block(adev,
1890 info->head.block,
1891 info->head.sub_block_index);
1892
1893 if (!block_obj || !block_obj->hw_ops) {
1894 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1895 get_ras_block_str(&info->head));
1896 return;
1897 }
1898
1899 if (block_obj->hw_ops->query_ras_error_status)
1900 block_obj->hw_ops->query_ras_error_status(adev);
1901
1902 }
1903
amdgpu_ras_query_err_status(struct amdgpu_device * adev)1904 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1905 {
1906 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1907 struct ras_manager *obj;
1908
1909 if (!adev->ras_enabled || !con)
1910 return;
1911
1912 list_for_each_entry(obj, &con->head, node) {
1913 struct ras_query_if info = {
1914 .head = obj->head,
1915 };
1916
1917 amdgpu_ras_error_status_query(adev, &info);
1918 }
1919 }
1920
1921 /* recovery begin */
1922
1923 /* return 0 on success.
1924 * caller need free bps.
1925 */
amdgpu_ras_badpages_read(struct amdgpu_device * adev,struct ras_badpage ** bps,unsigned int * count)1926 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1927 struct ras_badpage **bps, unsigned int *count)
1928 {
1929 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1930 struct ras_err_handler_data *data;
1931 int i = 0;
1932 int ret = 0, status;
1933
1934 if (!con || !con->eh_data || !bps || !count)
1935 return -EINVAL;
1936
1937 mutex_lock(&con->recovery_lock);
1938 data = con->eh_data;
1939 if (!data || data->count == 0) {
1940 *bps = NULL;
1941 ret = -EINVAL;
1942 goto out;
1943 }
1944
1945 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1946 if (!*bps) {
1947 ret = -ENOMEM;
1948 goto out;
1949 }
1950
1951 for (; i < data->count; i++) {
1952 (*bps)[i] = (struct ras_badpage){
1953 .bp = data->bps[i].retired_page,
1954 .size = AMDGPU_GPU_PAGE_SIZE,
1955 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1956 };
1957 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
1958 data->bps[i].retired_page);
1959 if (status == -EBUSY)
1960 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1961 else if (status == -ENOENT)
1962 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1963 }
1964
1965 *count = data->count;
1966 out:
1967 mutex_unlock(&con->recovery_lock);
1968 return ret;
1969 }
1970
amdgpu_ras_do_recovery(struct work_struct * work)1971 static void amdgpu_ras_do_recovery(struct work_struct *work)
1972 {
1973 struct amdgpu_ras *ras =
1974 container_of(work, struct amdgpu_ras, recovery_work);
1975 struct amdgpu_device *remote_adev = NULL;
1976 struct amdgpu_device *adev = ras->adev;
1977 struct list_head device_list, *device_list_handle = NULL;
1978
1979 if (!ras->disable_ras_err_cnt_harvest) {
1980 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1981
1982 /* Build list of devices to query RAS related errors */
1983 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1984 device_list_handle = &hive->device_list;
1985 } else {
1986 INIT_LIST_HEAD(&device_list);
1987 list_add_tail(&adev->gmc.xgmi.head, &device_list);
1988 device_list_handle = &device_list;
1989 }
1990
1991 list_for_each_entry(remote_adev,
1992 device_list_handle, gmc.xgmi.head) {
1993 amdgpu_ras_query_err_status(remote_adev);
1994 amdgpu_ras_log_on_err_counter(remote_adev);
1995 }
1996
1997 amdgpu_put_xgmi_hive(hive);
1998 }
1999
2000 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2001 struct amdgpu_reset_context reset_context;
2002 memset(&reset_context, 0, sizeof(reset_context));
2003
2004 reset_context.method = AMD_RESET_METHOD_NONE;
2005 reset_context.reset_req_dev = adev;
2006
2007 /* Perform full reset in fatal error mode */
2008 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2009 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2010 else
2011 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2012
2013 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2014 }
2015 atomic_set(&ras->in_recovery, 0);
2016 }
2017
2018 /* alloc/realloc bps array */
amdgpu_ras_realloc_eh_data_space(struct amdgpu_device * adev,struct ras_err_handler_data * data,int pages)2019 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2020 struct ras_err_handler_data *data, int pages)
2021 {
2022 unsigned int old_space = data->count + data->space_left;
2023 unsigned int new_space = old_space + pages;
2024 unsigned int align_space = ALIGN(new_space, 512);
2025 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2026
2027 if (!bps) {
2028 return -ENOMEM;
2029 }
2030
2031 if (data->bps) {
2032 memcpy(bps, data->bps,
2033 data->count * sizeof(*data->bps));
2034 kfree(data->bps);
2035 }
2036
2037 data->bps = bps;
2038 data->space_left += align_space - old_space;
2039 return 0;
2040 }
2041
2042 /* it deal with vram only. */
amdgpu_ras_add_bad_pages(struct amdgpu_device * adev,struct eeprom_table_record * bps,int pages)2043 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2044 struct eeprom_table_record *bps, int pages)
2045 {
2046 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2047 struct ras_err_handler_data *data;
2048 int ret = 0;
2049 uint32_t i;
2050
2051 if (!con || !con->eh_data || !bps || pages <= 0)
2052 return 0;
2053
2054 mutex_lock(&con->recovery_lock);
2055 data = con->eh_data;
2056 if (!data)
2057 goto out;
2058
2059 for (i = 0; i < pages; i++) {
2060 if (amdgpu_ras_check_bad_page_unlock(con,
2061 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2062 continue;
2063
2064 if (!data->space_left &&
2065 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2066 ret = -ENOMEM;
2067 goto out;
2068 }
2069
2070 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2071 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2072 AMDGPU_GPU_PAGE_SIZE);
2073
2074 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2075 data->count++;
2076 data->space_left--;
2077 }
2078 out:
2079 mutex_unlock(&con->recovery_lock);
2080
2081 return ret;
2082 }
2083
2084 /*
2085 * write error record array to eeprom, the function should be
2086 * protected by recovery_lock
2087 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2088 */
amdgpu_ras_save_bad_pages(struct amdgpu_device * adev,unsigned long * new_cnt)2089 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2090 unsigned long *new_cnt)
2091 {
2092 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2093 struct ras_err_handler_data *data;
2094 struct amdgpu_ras_eeprom_control *control;
2095 int save_count;
2096
2097 if (!con || !con->eh_data) {
2098 if (new_cnt)
2099 *new_cnt = 0;
2100
2101 return 0;
2102 }
2103
2104 mutex_lock(&con->recovery_lock);
2105 control = &con->eeprom_control;
2106 data = con->eh_data;
2107 save_count = data->count - control->ras_num_recs;
2108 mutex_unlock(&con->recovery_lock);
2109
2110 if (new_cnt)
2111 *new_cnt = save_count / adev->umc.retire_unit;
2112
2113 /* only new entries are saved */
2114 if (save_count > 0) {
2115 if (amdgpu_ras_eeprom_append(control,
2116 &data->bps[control->ras_num_recs],
2117 save_count)) {
2118 dev_err(adev->dev, "Failed to save EEPROM table data!");
2119 return -EIO;
2120 }
2121
2122 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2123 }
2124
2125 return 0;
2126 }
2127
2128 /*
2129 * read error record array in eeprom and reserve enough space for
2130 * storing new bad pages
2131 */
amdgpu_ras_load_bad_pages(struct amdgpu_device * adev)2132 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2133 {
2134 struct amdgpu_ras_eeprom_control *control =
2135 &adev->psp.ras_context.ras->eeprom_control;
2136 struct eeprom_table_record *bps;
2137 int ret;
2138
2139 /* no bad page record, skip eeprom access */
2140 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2141 return 0;
2142
2143 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2144 if (!bps)
2145 return -ENOMEM;
2146
2147 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2148 if (ret)
2149 dev_err(adev->dev, "Failed to load EEPROM table records!");
2150 else
2151 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2152
2153 kfree(bps);
2154 return ret;
2155 }
2156
amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras * con,uint64_t addr)2157 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2158 uint64_t addr)
2159 {
2160 struct ras_err_handler_data *data = con->eh_data;
2161 int i;
2162
2163 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2164 for (i = 0; i < data->count; i++)
2165 if (addr == data->bps[i].retired_page)
2166 return true;
2167
2168 return false;
2169 }
2170
2171 /*
2172 * check if an address belongs to bad page
2173 *
2174 * Note: this check is only for umc block
2175 */
amdgpu_ras_check_bad_page(struct amdgpu_device * adev,uint64_t addr)2176 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2177 uint64_t addr)
2178 {
2179 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2180 bool ret = false;
2181
2182 if (!con || !con->eh_data)
2183 return ret;
2184
2185 mutex_lock(&con->recovery_lock);
2186 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2187 mutex_unlock(&con->recovery_lock);
2188 return ret;
2189 }
2190
amdgpu_ras_validate_threshold(struct amdgpu_device * adev,uint32_t max_count)2191 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2192 uint32_t max_count)
2193 {
2194 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2195
2196 /*
2197 * Justification of value bad_page_cnt_threshold in ras structure
2198 *
2199 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2200 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2201 * scenarios accordingly.
2202 *
2203 * Bad page retirement enablement:
2204 * - If amdgpu_bad_page_threshold = -2,
2205 * bad_page_cnt_threshold = typical value by formula.
2206 *
2207 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2208 * max record length in eeprom, use it directly.
2209 *
2210 * Bad page retirement disablement:
2211 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2212 * functionality is disabled, and bad_page_cnt_threshold will
2213 * take no effect.
2214 */
2215
2216 if (amdgpu_bad_page_threshold < 0) {
2217 u64 val = adev->gmc.mc_vram_size;
2218
2219 do_div(val, RAS_BAD_PAGE_COVER);
2220 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2221 max_count);
2222 } else {
2223 con->bad_page_cnt_threshold = min_t(int, max_count,
2224 amdgpu_bad_page_threshold);
2225 }
2226 }
2227
amdgpu_ras_recovery_init(struct amdgpu_device * adev)2228 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2229 {
2230 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2231 struct ras_err_handler_data **data;
2232 u32 max_eeprom_records_count = 0;
2233 bool exc_err_limit = false;
2234 int ret;
2235
2236 if (!con || amdgpu_sriov_vf(adev))
2237 return 0;
2238
2239 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2240 * supports RAS and debugfs is enabled, but when
2241 * adev->ras_enabled is unset, i.e. when "ras_enable"
2242 * module parameter is set to 0.
2243 */
2244 con->adev = adev;
2245
2246 if (!adev->ras_enabled)
2247 return 0;
2248
2249 data = &con->eh_data;
2250 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2251 if (!*data) {
2252 ret = -ENOMEM;
2253 goto out;
2254 }
2255
2256 mutex_init(&con->recovery_lock);
2257 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2258 atomic_set(&con->in_recovery, 0);
2259 con->eeprom_control.bad_channel_bitmap = 0;
2260
2261 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
2262 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2263
2264 /* Todo: During test the SMU might fail to read the eeprom through I2C
2265 * when the GPU is pending on XGMI reset during probe time
2266 * (Mostly after second bus reset), skip it now
2267 */
2268 if (adev->gmc.xgmi.pending_reset)
2269 return 0;
2270 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2271 /*
2272 * This calling fails when exc_err_limit is true or
2273 * ret != 0.
2274 */
2275 if (exc_err_limit || ret)
2276 goto free;
2277
2278 if (con->eeprom_control.ras_num_recs) {
2279 ret = amdgpu_ras_load_bad_pages(adev);
2280 if (ret)
2281 goto free;
2282
2283 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2284
2285 if (con->update_channel_flag == true) {
2286 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2287 con->update_channel_flag = false;
2288 }
2289 }
2290
2291 #ifdef CONFIG_X86_MCE_AMD
2292 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2293 (adev->gmc.xgmi.connected_to_cpu))
2294 amdgpu_register_bad_pages_mca_notifier(adev);
2295 #endif
2296 return 0;
2297
2298 free:
2299 kfree((*data)->bps);
2300 kfree(*data);
2301 con->eh_data = NULL;
2302 out:
2303 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2304
2305 /*
2306 * Except error threshold exceeding case, other failure cases in this
2307 * function would not fail amdgpu driver init.
2308 */
2309 if (!exc_err_limit)
2310 ret = 0;
2311 else
2312 ret = -EINVAL;
2313
2314 return ret;
2315 }
2316
amdgpu_ras_recovery_fini(struct amdgpu_device * adev)2317 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2318 {
2319 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2320 struct ras_err_handler_data *data = con->eh_data;
2321
2322 /* recovery_init failed to init it, fini is useless */
2323 if (!data)
2324 return 0;
2325
2326 cancel_work_sync(&con->recovery_work);
2327
2328 mutex_lock(&con->recovery_lock);
2329 con->eh_data = NULL;
2330 kfree(data->bps);
2331 kfree(data);
2332 mutex_unlock(&con->recovery_lock);
2333
2334 return 0;
2335 }
2336 /* recovery end */
2337
amdgpu_ras_asic_supported(struct amdgpu_device * adev)2338 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2339 {
2340 if (amdgpu_sriov_vf(adev)) {
2341 switch (adev->ip_versions[MP0_HWIP][0]) {
2342 case IP_VERSION(13, 0, 2):
2343 return true;
2344 default:
2345 return false;
2346 }
2347 }
2348
2349 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2350 switch (adev->ip_versions[MP0_HWIP][0]) {
2351 case IP_VERSION(13, 0, 0):
2352 case IP_VERSION(13, 0, 10):
2353 return true;
2354 default:
2355 return false;
2356 }
2357 }
2358
2359 return adev->asic_type == CHIP_VEGA10 ||
2360 adev->asic_type == CHIP_VEGA20 ||
2361 adev->asic_type == CHIP_ARCTURUS ||
2362 adev->asic_type == CHIP_ALDEBARAN ||
2363 adev->asic_type == CHIP_SIENNA_CICHLID;
2364 }
2365
2366 /*
2367 * this is workaround for vega20 workstation sku,
2368 * force enable gfx ras, ignore vbios gfx ras flag
2369 * due to GC EDC can not write
2370 */
amdgpu_ras_get_quirks(struct amdgpu_device * adev)2371 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2372 {
2373 struct atom_context *ctx = adev->mode_info.atom_context;
2374
2375 if (!ctx)
2376 return;
2377
2378 if (strnstr(ctx->vbios_version, "D16406",
2379 sizeof(ctx->vbios_version)) ||
2380 strnstr(ctx->vbios_version, "D36002",
2381 sizeof(ctx->vbios_version)))
2382 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2383 }
2384
2385 /*
2386 * check hardware's ras ability which will be saved in hw_supported.
2387 * if hardware does not support ras, we can skip some ras initializtion and
2388 * forbid some ras operations from IP.
2389 * if software itself, say boot parameter, limit the ras ability. We still
2390 * need allow IP do some limited operations, like disable. In such case,
2391 * we have to initialize ras as normal. but need check if operation is
2392 * allowed or not in each function.
2393 */
amdgpu_ras_check_supported(struct amdgpu_device * adev)2394 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2395 {
2396 adev->ras_hw_enabled = adev->ras_enabled = 0;
2397
2398 if (!adev->is_atom_fw ||
2399 !amdgpu_ras_asic_supported(adev))
2400 return;
2401
2402 if (!adev->gmc.xgmi.connected_to_cpu) {
2403 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2404 dev_info(adev->dev, "MEM ECC is active.\n");
2405 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2406 1 << AMDGPU_RAS_BLOCK__DF);
2407 } else {
2408 dev_info(adev->dev, "MEM ECC is not presented.\n");
2409 }
2410
2411 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2412 dev_info(adev->dev, "SRAM ECC is active.\n");
2413 if (!amdgpu_sriov_vf(adev))
2414 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2415 1 << AMDGPU_RAS_BLOCK__DF);
2416 else
2417 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2418 1 << AMDGPU_RAS_BLOCK__SDMA |
2419 1 << AMDGPU_RAS_BLOCK__GFX);
2420
2421 /* VCN/JPEG RAS can be supported on both bare metal and
2422 * SRIOV environment
2423 */
2424 if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2425 adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2426 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2427 1 << AMDGPU_RAS_BLOCK__JPEG);
2428 else
2429 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2430 1 << AMDGPU_RAS_BLOCK__JPEG);
2431 } else {
2432 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2433 }
2434 } else {
2435 /* driver only manages a few IP blocks RAS feature
2436 * when GPU is connected cpu through XGMI */
2437 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2438 1 << AMDGPU_RAS_BLOCK__SDMA |
2439 1 << AMDGPU_RAS_BLOCK__MMHUB);
2440 }
2441
2442 amdgpu_ras_get_quirks(adev);
2443
2444 /* hw_supported needs to be aligned with RAS block mask. */
2445 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2446
2447 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2448 adev->ras_hw_enabled & amdgpu_ras_mask;
2449 }
2450
amdgpu_ras_counte_dw(struct work_struct * work)2451 static void amdgpu_ras_counte_dw(struct work_struct *work)
2452 {
2453 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2454 ras_counte_delay_work.work);
2455 struct amdgpu_device *adev = con->adev;
2456 struct drm_device *dev = adev_to_drm(adev);
2457 unsigned long ce_count, ue_count;
2458 int res;
2459
2460 res = pm_runtime_get_sync(dev->dev);
2461 if (res < 0)
2462 goto Out;
2463
2464 /* Cache new values.
2465 */
2466 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2467 atomic_set(&con->ras_ce_count, ce_count);
2468 atomic_set(&con->ras_ue_count, ue_count);
2469 }
2470
2471 pm_runtime_mark_last_busy(dev->dev);
2472 Out:
2473 pm_runtime_put_autosuspend(dev->dev);
2474 }
2475
amdgpu_ras_query_poison_mode(struct amdgpu_device * adev)2476 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2477 {
2478 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2479 bool df_poison, umc_poison;
2480
2481 /* poison setting is useless on SRIOV guest */
2482 if (amdgpu_sriov_vf(adev) || !con)
2483 return;
2484
2485 /* Init poison supported flag, the default value is false */
2486 if (adev->gmc.xgmi.connected_to_cpu) {
2487 /* enabled by default when GPU is connected to CPU */
2488 con->poison_supported = true;
2489 } else if (adev->df.funcs &&
2490 adev->df.funcs->query_ras_poison_mode &&
2491 adev->umc.ras &&
2492 adev->umc.ras->query_ras_poison_mode) {
2493 df_poison =
2494 adev->df.funcs->query_ras_poison_mode(adev);
2495 umc_poison =
2496 adev->umc.ras->query_ras_poison_mode(adev);
2497
2498 /* Only poison is set in both DF and UMC, we can support it */
2499 if (df_poison && umc_poison)
2500 con->poison_supported = true;
2501 else if (df_poison != umc_poison)
2502 dev_warn(adev->dev,
2503 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2504 df_poison, umc_poison);
2505 }
2506 }
2507
amdgpu_ras_init(struct amdgpu_device * adev)2508 int amdgpu_ras_init(struct amdgpu_device *adev)
2509 {
2510 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2511 int r;
2512
2513 if (con)
2514 return 0;
2515
2516 con = kmalloc(sizeof(struct amdgpu_ras) +
2517 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2518 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2519 GFP_KERNEL|__GFP_ZERO);
2520 if (!con)
2521 return -ENOMEM;
2522
2523 con->adev = adev;
2524 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2525 atomic_set(&con->ras_ce_count, 0);
2526 atomic_set(&con->ras_ue_count, 0);
2527
2528 con->objs = (struct ras_manager *)(con + 1);
2529
2530 amdgpu_ras_set_context(adev, con);
2531
2532 amdgpu_ras_check_supported(adev);
2533
2534 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2535 /* set gfx block ras context feature for VEGA20 Gaming
2536 * send ras disable cmd to ras ta during ras late init.
2537 */
2538 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2539 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2540
2541 return 0;
2542 }
2543
2544 r = 0;
2545 goto release_con;
2546 }
2547
2548 con->update_channel_flag = false;
2549 con->features = 0;
2550 INIT_LIST_HEAD(&con->head);
2551 /* Might need get this flag from vbios. */
2552 con->flags = RAS_DEFAULT_FLAGS;
2553
2554 /* initialize nbio ras function ahead of any other
2555 * ras functions so hardware fatal error interrupt
2556 * can be enabled as early as possible */
2557 switch (adev->asic_type) {
2558 case CHIP_VEGA20:
2559 case CHIP_ARCTURUS:
2560 case CHIP_ALDEBARAN:
2561 if (!adev->gmc.xgmi.connected_to_cpu) {
2562 adev->nbio.ras = &nbio_v7_4_ras;
2563 amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
2564 adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm;
2565 }
2566 break;
2567 default:
2568 /* nbio ras is not available */
2569 break;
2570 }
2571
2572 if (adev->nbio.ras &&
2573 adev->nbio.ras->init_ras_controller_interrupt) {
2574 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2575 if (r)
2576 goto release_con;
2577 }
2578
2579 if (adev->nbio.ras &&
2580 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2581 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2582 if (r)
2583 goto release_con;
2584 }
2585
2586 amdgpu_ras_query_poison_mode(adev);
2587
2588 if (amdgpu_ras_fs_init(adev)) {
2589 r = -EINVAL;
2590 goto release_con;
2591 }
2592
2593 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2594 "hardware ability[%x] ras_mask[%x]\n",
2595 adev->ras_hw_enabled, adev->ras_enabled);
2596
2597 return 0;
2598 release_con:
2599 amdgpu_ras_set_context(adev, NULL);
2600 kfree(con);
2601
2602 return r;
2603 }
2604
amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device * adev)2605 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2606 {
2607 if (adev->gmc.xgmi.connected_to_cpu)
2608 return 1;
2609 return 0;
2610 }
2611
amdgpu_persistent_edc_harvesting(struct amdgpu_device * adev,struct ras_common_if * ras_block)2612 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2613 struct ras_common_if *ras_block)
2614 {
2615 struct ras_query_if info = {
2616 .head = *ras_block,
2617 };
2618
2619 if (!amdgpu_persistent_edc_harvesting_supported(adev))
2620 return 0;
2621
2622 if (amdgpu_ras_query_error_status(adev, &info) != 0)
2623 DRM_WARN("RAS init harvest failure");
2624
2625 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2626 DRM_WARN("RAS init harvest reset failure");
2627
2628 return 0;
2629 }
2630
amdgpu_ras_is_poison_mode_supported(struct amdgpu_device * adev)2631 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2632 {
2633 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2634
2635 if (!con)
2636 return false;
2637
2638 return con->poison_supported;
2639 }
2640
2641 /* helper function to handle common stuff in ip late init phase */
amdgpu_ras_block_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)2642 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2643 struct ras_common_if *ras_block)
2644 {
2645 struct amdgpu_ras_block_object *ras_obj = NULL;
2646 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2647 struct ras_query_if *query_info;
2648 unsigned long ue_count, ce_count;
2649 int r;
2650
2651 /* disable RAS feature per IP block if it is not supported */
2652 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2653 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2654 return 0;
2655 }
2656
2657 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2658 if (r) {
2659 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2660 /* in resume phase, if fail to enable ras,
2661 * clean up all ras fs nodes, and disable ras */
2662 goto cleanup;
2663 } else
2664 return r;
2665 }
2666
2667 /* check for errors on warm reset edc persisant supported ASIC */
2668 amdgpu_persistent_edc_harvesting(adev, ras_block);
2669
2670 /* in resume phase, no need to create ras fs node */
2671 if (adev->in_suspend || amdgpu_in_reset(adev))
2672 return 0;
2673
2674 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2675 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2676 (ras_obj->hw_ops->query_poison_status ||
2677 ras_obj->hw_ops->handle_poison_consumption))) {
2678 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2679 if (r)
2680 goto cleanup;
2681 }
2682
2683 r = amdgpu_ras_sysfs_create(adev, ras_block);
2684 if (r)
2685 goto interrupt;
2686
2687 /* Those are the cached values at init.
2688 */
2689 query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL);
2690 if (!query_info)
2691 return -ENOMEM;
2692 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
2693
2694 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
2695 atomic_set(&con->ras_ce_count, ce_count);
2696 atomic_set(&con->ras_ue_count, ue_count);
2697 }
2698
2699 kfree(query_info);
2700 return 0;
2701
2702 interrupt:
2703 if (ras_obj->ras_cb)
2704 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2705 cleanup:
2706 amdgpu_ras_feature_enable(adev, ras_block, 0);
2707 return r;
2708 }
2709
amdgpu_ras_block_late_init_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)2710 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2711 struct ras_common_if *ras_block)
2712 {
2713 return amdgpu_ras_block_late_init(adev, ras_block);
2714 }
2715
2716 /* helper function to remove ras fs node and interrupt handler */
amdgpu_ras_block_late_fini(struct amdgpu_device * adev,struct ras_common_if * ras_block)2717 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2718 struct ras_common_if *ras_block)
2719 {
2720 struct amdgpu_ras_block_object *ras_obj;
2721 if (!ras_block)
2722 return;
2723
2724 amdgpu_ras_sysfs_remove(adev, ras_block);
2725
2726 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2727 if (ras_obj->ras_cb)
2728 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2729 }
2730
amdgpu_ras_block_late_fini_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)2731 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2732 struct ras_common_if *ras_block)
2733 {
2734 return amdgpu_ras_block_late_fini(adev, ras_block);
2735 }
2736
2737 /* do some init work after IP late init as dependence.
2738 * and it runs in resume/gpu reset/booting up cases.
2739 */
amdgpu_ras_resume(struct amdgpu_device * adev)2740 void amdgpu_ras_resume(struct amdgpu_device *adev)
2741 {
2742 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2743 struct ras_manager *obj, *tmp;
2744
2745 if (!adev->ras_enabled || !con) {
2746 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2747 amdgpu_release_ras_context(adev);
2748
2749 return;
2750 }
2751
2752 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2753 /* Set up all other IPs which are not implemented. There is a
2754 * tricky thing that IP's actual ras error type should be
2755 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2756 * ERROR_NONE make sense anyway.
2757 */
2758 amdgpu_ras_enable_all_features(adev, 1);
2759
2760 /* We enable ras on all hw_supported block, but as boot
2761 * parameter might disable some of them and one or more IP has
2762 * not implemented yet. So we disable them on behalf.
2763 */
2764 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2765 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2766 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2767 /* there should be no any reference. */
2768 WARN_ON(alive_obj(obj));
2769 }
2770 }
2771 }
2772 }
2773
amdgpu_ras_suspend(struct amdgpu_device * adev)2774 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2775 {
2776 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2777
2778 if (!adev->ras_enabled || !con)
2779 return;
2780
2781 amdgpu_ras_disable_all_features(adev, 0);
2782 /* Make sure all ras objects are disabled. */
2783 if (con->features)
2784 amdgpu_ras_disable_all_features(adev, 1);
2785 }
2786
amdgpu_ras_late_init(struct amdgpu_device * adev)2787 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2788 {
2789 struct amdgpu_ras_block_list *node, *tmp;
2790 struct amdgpu_ras_block_object *obj;
2791 int r;
2792
2793 /* Guest side doesn't need init ras feature */
2794 if (amdgpu_sriov_vf(adev))
2795 return 0;
2796
2797 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2798 if (!node->ras_obj) {
2799 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2800 continue;
2801 }
2802
2803 obj = node->ras_obj;
2804 if (obj->ras_late_init) {
2805 r = obj->ras_late_init(adev, &obj->ras_comm);
2806 if (r) {
2807 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2808 obj->ras_comm.name, r);
2809 return r;
2810 }
2811 } else
2812 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2813 }
2814
2815 return 0;
2816 }
2817
2818 /* do some fini work before IP fini as dependence */
amdgpu_ras_pre_fini(struct amdgpu_device * adev)2819 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2820 {
2821 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2822
2823 if (!adev->ras_enabled || !con)
2824 return 0;
2825
2826
2827 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2828 if (con->features)
2829 amdgpu_ras_disable_all_features(adev, 0);
2830 amdgpu_ras_recovery_fini(adev);
2831 return 0;
2832 }
2833
amdgpu_ras_fini(struct amdgpu_device * adev)2834 int amdgpu_ras_fini(struct amdgpu_device *adev)
2835 {
2836 struct amdgpu_ras_block_list *ras_node, *tmp;
2837 struct amdgpu_ras_block_object *obj = NULL;
2838 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2839
2840 if (!adev->ras_enabled || !con)
2841 return 0;
2842
2843 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2844 if (ras_node->ras_obj) {
2845 obj = ras_node->ras_obj;
2846 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2847 obj->ras_fini)
2848 obj->ras_fini(adev, &obj->ras_comm);
2849 else
2850 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2851 }
2852
2853 /* Clear ras blocks from ras_list and free ras block list node */
2854 list_del(&ras_node->node);
2855 kfree(ras_node);
2856 }
2857
2858 amdgpu_ras_fs_fini(adev);
2859 amdgpu_ras_interrupt_remove_all(adev);
2860
2861 WARN(con->features, "Feature mask is not cleared");
2862
2863 if (con->features)
2864 amdgpu_ras_disable_all_features(adev, 1);
2865
2866 cancel_delayed_work_sync(&con->ras_counte_delay_work);
2867
2868 amdgpu_ras_set_context(adev, NULL);
2869 kfree(con);
2870
2871 return 0;
2872 }
2873
amdgpu_ras_global_ras_isr(struct amdgpu_device * adev)2874 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2875 {
2876 amdgpu_ras_check_supported(adev);
2877 if (!adev->ras_hw_enabled)
2878 return;
2879
2880 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2881 dev_info(adev->dev, "uncorrectable hardware error"
2882 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2883
2884 amdgpu_ras_reset_gpu(adev);
2885 }
2886 }
2887
amdgpu_ras_need_emergency_restart(struct amdgpu_device * adev)2888 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2889 {
2890 if (adev->asic_type == CHIP_VEGA20 &&
2891 adev->pm.fw_version <= 0x283400) {
2892 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2893 amdgpu_ras_intr_triggered();
2894 }
2895
2896 return false;
2897 }
2898
amdgpu_release_ras_context(struct amdgpu_device * adev)2899 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2900 {
2901 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2902
2903 if (!con)
2904 return;
2905
2906 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2907 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2908 amdgpu_ras_set_context(adev, NULL);
2909 kfree(con);
2910 }
2911 }
2912
2913 #ifdef CONFIG_X86_MCE_AMD
find_adev(uint32_t node_id)2914 static struct amdgpu_device *find_adev(uint32_t node_id)
2915 {
2916 int i;
2917 struct amdgpu_device *adev = NULL;
2918
2919 for (i = 0; i < mce_adev_list.num_gpu; i++) {
2920 adev = mce_adev_list.devs[i];
2921
2922 if (adev && adev->gmc.xgmi.connected_to_cpu &&
2923 adev->gmc.xgmi.physical_node_id == node_id)
2924 break;
2925 adev = NULL;
2926 }
2927
2928 return adev;
2929 }
2930
2931 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
2932 #define GET_UMC_INST(m) (((m) >> 21) & 0x7)
2933 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
2934 #define GPU_ID_OFFSET 8
2935
amdgpu_bad_page_notifier(struct notifier_block * nb,unsigned long val,void * data)2936 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
2937 unsigned long val, void *data)
2938 {
2939 struct mce *m = (struct mce *)data;
2940 struct amdgpu_device *adev = NULL;
2941 uint32_t gpu_id = 0;
2942 uint32_t umc_inst = 0, ch_inst = 0;
2943
2944 /*
2945 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
2946 * and error occurred in DramECC (Extended error code = 0) then only
2947 * process the error, else bail out.
2948 */
2949 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
2950 (XEC(m->status, 0x3f) == 0x0)))
2951 return NOTIFY_DONE;
2952
2953 /*
2954 * If it is correctable error, return.
2955 */
2956 if (mce_is_correctable(m))
2957 return NOTIFY_OK;
2958
2959 /*
2960 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
2961 */
2962 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
2963
2964 adev = find_adev(gpu_id);
2965 if (!adev) {
2966 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
2967 gpu_id);
2968 return NOTIFY_DONE;
2969 }
2970
2971 /*
2972 * If it is uncorrectable error, then find out UMC instance and
2973 * channel index.
2974 */
2975 umc_inst = GET_UMC_INST(m->ipid);
2976 ch_inst = GET_CHAN_INDEX(m->ipid);
2977
2978 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
2979 umc_inst, ch_inst);
2980
2981 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
2982 return NOTIFY_OK;
2983 else
2984 return NOTIFY_DONE;
2985 }
2986
2987 static struct notifier_block amdgpu_bad_page_nb = {
2988 .notifier_call = amdgpu_bad_page_notifier,
2989 .priority = MCE_PRIO_UC,
2990 };
2991
amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device * adev)2992 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
2993 {
2994 /*
2995 * Add the adev to the mce_adev_list.
2996 * During mode2 reset, amdgpu device is temporarily
2997 * removed from the mgpu_info list which can cause
2998 * page retirement to fail.
2999 * Use this list instead of mgpu_info to find the amdgpu
3000 * device on which the UMC error was reported.
3001 */
3002 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3003
3004 /*
3005 * Register the x86 notifier only once
3006 * with MCE subsystem.
3007 */
3008 if (notifier_registered == false) {
3009 mce_register_decode_chain(&amdgpu_bad_page_nb);
3010 notifier_registered = true;
3011 }
3012 }
3013 #endif
3014
amdgpu_ras_get_context(struct amdgpu_device * adev)3015 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3016 {
3017 if (!adev)
3018 return NULL;
3019
3020 return adev->psp.ras_context.ras;
3021 }
3022
amdgpu_ras_set_context(struct amdgpu_device * adev,struct amdgpu_ras * ras_con)3023 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3024 {
3025 if (!adev)
3026 return -EINVAL;
3027
3028 adev->psp.ras_context.ras = ras_con;
3029 return 0;
3030 }
3031
3032 /* check if ras is supported on block, say, sdma, gfx */
amdgpu_ras_is_supported(struct amdgpu_device * adev,unsigned int block)3033 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3034 unsigned int block)
3035 {
3036 int ret = 0;
3037 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3038
3039 if (block >= AMDGPU_RAS_BLOCK_COUNT)
3040 return 0;
3041
3042 ret = ras && (adev->ras_enabled & (1 << block));
3043
3044 /* For the special asic with mem ecc enabled but sram ecc
3045 * not enabled, even if the ras block is not supported on
3046 * .ras_enabled, if the asic supports poison mode and the
3047 * ras block has ras configuration, it can be considered
3048 * that the ras block supports ras function.
3049 */
3050 if (!ret &&
3051 amdgpu_ras_is_poison_mode_supported(adev) &&
3052 amdgpu_ras_get_ras_block(adev, block, 0))
3053 ret = 1;
3054
3055 return ret;
3056 }
3057
amdgpu_ras_reset_gpu(struct amdgpu_device * adev)3058 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3059 {
3060 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3061
3062 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3063 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3064 return 0;
3065 }
3066
3067
3068 /* Register each ip ras block into amdgpu ras */
amdgpu_ras_register_ras_block(struct amdgpu_device * adev,struct amdgpu_ras_block_object * ras_block_obj)3069 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3070 struct amdgpu_ras_block_object *ras_block_obj)
3071 {
3072 struct amdgpu_ras_block_list *ras_node;
3073 if (!adev || !ras_block_obj)
3074 return -EINVAL;
3075
3076 if (!amdgpu_ras_asic_supported(adev))
3077 return 0;
3078
3079 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3080 if (!ras_node)
3081 return -ENOMEM;
3082
3083 INIT_LIST_HEAD(&ras_node->node);
3084 ras_node->ras_obj = ras_block_obj;
3085 list_add_tail(&ras_node->node, &adev->ras_list);
3086
3087 return 0;
3088 }
3089