1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_debugfs.h"
7
8 #include <linux/debugfs.h>
9 #include <linux/fault-inject.h>
10 #include <linux/string_helpers.h>
11
12 #include <drm/drm_debugfs.h>
13
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_force_wake.h"
17 #include "xe_gt_debugfs.h"
18 #include "xe_gt_printk.h"
19 #include "xe_guc_ads.h"
20 #include "xe_pm.h"
21 #include "xe_pxp_debugfs.h"
22 #include "xe_sriov.h"
23 #include "xe_sriov_pf.h"
24 #include "xe_step.h"
25 #include "xe_wa.h"
26
27 #ifdef CONFIG_DRM_XE_DEBUG
28 #include "xe_bo_evict.h"
29 #include "xe_migrate.h"
30 #include "xe_vm.h"
31 #endif
32
33 DECLARE_FAULT_ATTR(gt_reset_failure);
34
node_to_xe(struct drm_info_node * node)35 static struct xe_device *node_to_xe(struct drm_info_node *node)
36 {
37 return to_xe_device(node->minor->dev);
38 }
39
info(struct seq_file * m,void * data)40 static int info(struct seq_file *m, void *data)
41 {
42 struct xe_device *xe = node_to_xe(m->private);
43 struct drm_printer p = drm_seq_file_printer(m);
44 struct xe_gt *gt;
45 u8 id;
46
47 xe_pm_runtime_get(xe);
48
49 drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100);
50 drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100);
51 drm_printf(&p, "stepping G:%s M:%s B:%s\n",
52 xe_step_name(xe->info.step.graphics),
53 xe_step_name(xe->info.step.media),
54 xe_step_name(xe->info.step.basedie));
55 drm_printf(&p, "is_dgfx %s\n", str_yes_no(xe->info.is_dgfx));
56 drm_printf(&p, "platform %d\n", xe->info.platform);
57 drm_printf(&p, "subplatform %d\n",
58 xe->info.subplatform > XE_SUBPLATFORM_NONE ? xe->info.subplatform : 0);
59 drm_printf(&p, "devid 0x%x\n", xe->info.devid);
60 drm_printf(&p, "revid %d\n", xe->info.revid);
61 drm_printf(&p, "tile_count %d\n", xe->info.tile_count);
62 drm_printf(&p, "vm_max_level %d\n", xe->info.vm_max_level);
63 drm_printf(&p, "force_execlist %s\n", str_yes_no(xe->info.force_execlist));
64 drm_printf(&p, "has_flat_ccs %s\n", str_yes_no(xe->info.has_flat_ccs));
65 drm_printf(&p, "has_usm %s\n", str_yes_no(xe->info.has_usm));
66 drm_printf(&p, "skip_guc_pc %s\n", str_yes_no(xe->info.skip_guc_pc));
67 for_each_gt(gt, xe, id) {
68 drm_printf(&p, "gt%d force wake %d\n", id,
69 xe_force_wake_ref(gt_to_fw(gt), XE_FW_GT));
70 drm_printf(&p, "gt%d engine_mask 0x%llx\n", id,
71 gt->info.engine_mask);
72 }
73
74 xe_pm_runtime_put(xe);
75 return 0;
76 }
77
sriov_info(struct seq_file * m,void * data)78 static int sriov_info(struct seq_file *m, void *data)
79 {
80 struct xe_device *xe = node_to_xe(m->private);
81 struct drm_printer p = drm_seq_file_printer(m);
82
83 xe_sriov_print_info(xe, &p);
84 return 0;
85 }
86
workarounds(struct xe_device * xe,struct drm_printer * p)87 static int workarounds(struct xe_device *xe, struct drm_printer *p)
88 {
89 xe_pm_runtime_get(xe);
90 xe_wa_device_dump(xe, p);
91 xe_pm_runtime_put(xe);
92
93 return 0;
94 }
95
workaround_info(struct seq_file * m,void * data)96 static int workaround_info(struct seq_file *m, void *data)
97 {
98 struct xe_device *xe = node_to_xe(m->private);
99 struct drm_printer p = drm_seq_file_printer(m);
100
101 workarounds(xe, &p);
102 return 0;
103 }
104
105 static const struct drm_info_list debugfs_list[] = {
106 {"info", info, 0},
107 { .name = "sriov_info", .show = sriov_info, },
108 { .name = "workarounds", .show = workaround_info, },
109 };
110
forcewake_open(struct inode * inode,struct file * file)111 static int forcewake_open(struct inode *inode, struct file *file)
112 {
113 struct xe_device *xe = inode->i_private;
114 struct xe_gt *gt;
115 u8 id, last_gt;
116 unsigned int fw_ref;
117
118 xe_pm_runtime_get(xe);
119 for_each_gt(gt, xe, id) {
120 last_gt = id;
121
122 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
123 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
124 goto err_fw_get;
125 }
126
127 return 0;
128
129 err_fw_get:
130 for_each_gt(gt, xe, id) {
131 if (id < last_gt)
132 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
133 else if (id == last_gt)
134 xe_force_wake_put(gt_to_fw(gt), fw_ref);
135 else
136 break;
137 }
138
139 xe_pm_runtime_put(xe);
140 return -ETIMEDOUT;
141 }
142
forcewake_release(struct inode * inode,struct file * file)143 static int forcewake_release(struct inode *inode, struct file *file)
144 {
145 struct xe_device *xe = inode->i_private;
146 struct xe_gt *gt;
147 u8 id;
148
149 for_each_gt(gt, xe, id)
150 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
151 xe_pm_runtime_put(xe);
152
153 return 0;
154 }
155
156 static const struct file_operations forcewake_all_fops = {
157 .owner = THIS_MODULE,
158 .open = forcewake_open,
159 .release = forcewake_release,
160 };
161
wedged_mode_show(struct file * f,char __user * ubuf,size_t size,loff_t * pos)162 static ssize_t wedged_mode_show(struct file *f, char __user *ubuf,
163 size_t size, loff_t *pos)
164 {
165 struct xe_device *xe = file_inode(f)->i_private;
166 char buf[32];
167 int len = 0;
168
169 len = scnprintf(buf, sizeof(buf), "%d\n", xe->wedged.mode);
170
171 return simple_read_from_buffer(ubuf, size, pos, buf, len);
172 }
173
wedged_mode_set(struct file * f,const char __user * ubuf,size_t size,loff_t * pos)174 static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
175 size_t size, loff_t *pos)
176 {
177 struct xe_device *xe = file_inode(f)->i_private;
178 struct xe_gt *gt;
179 u32 wedged_mode;
180 ssize_t ret;
181 u8 id;
182
183 ret = kstrtouint_from_user(ubuf, size, 0, &wedged_mode);
184 if (ret)
185 return ret;
186
187 if (wedged_mode > 2)
188 return -EINVAL;
189
190 if (xe->wedged.mode == wedged_mode)
191 return size;
192
193 xe->wedged.mode = wedged_mode;
194
195 xe_pm_runtime_get(xe);
196 for_each_gt(gt, xe, id) {
197 ret = xe_guc_ads_scheduler_policy_toggle_reset(>->uc.guc.ads);
198 if (ret) {
199 xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n");
200 xe_pm_runtime_put(xe);
201 return -EIO;
202 }
203 }
204 xe_pm_runtime_put(xe);
205
206 return size;
207 }
208
209 static const struct file_operations wedged_mode_fops = {
210 .owner = THIS_MODULE,
211 .read = wedged_mode_show,
212 .write = wedged_mode_set,
213 };
214
atomic_svm_timeslice_ms_show(struct file * f,char __user * ubuf,size_t size,loff_t * pos)215 static ssize_t atomic_svm_timeslice_ms_show(struct file *f, char __user *ubuf,
216 size_t size, loff_t *pos)
217 {
218 struct xe_device *xe = file_inode(f)->i_private;
219 char buf[32];
220 int len = 0;
221
222 len = scnprintf(buf, sizeof(buf), "%d\n", xe->atomic_svm_timeslice_ms);
223
224 return simple_read_from_buffer(ubuf, size, pos, buf, len);
225 }
226
atomic_svm_timeslice_ms_set(struct file * f,const char __user * ubuf,size_t size,loff_t * pos)227 static ssize_t atomic_svm_timeslice_ms_set(struct file *f,
228 const char __user *ubuf,
229 size_t size, loff_t *pos)
230 {
231 struct xe_device *xe = file_inode(f)->i_private;
232 u32 atomic_svm_timeslice_ms;
233 ssize_t ret;
234
235 ret = kstrtouint_from_user(ubuf, size, 0, &atomic_svm_timeslice_ms);
236 if (ret)
237 return ret;
238
239 xe->atomic_svm_timeslice_ms = atomic_svm_timeslice_ms;
240
241 return size;
242 }
243
244 static const struct file_operations atomic_svm_timeslice_ms_fops = {
245 .owner = THIS_MODULE,
246 .read = atomic_svm_timeslice_ms_show,
247 .write = atomic_svm_timeslice_ms_set,
248 };
249
xe_debugfs_register(struct xe_device * xe)250 void xe_debugfs_register(struct xe_device *xe)
251 {
252 struct ttm_device *bdev = &xe->ttm;
253 struct drm_minor *minor = xe->drm.primary;
254 struct dentry *root = minor->debugfs_root;
255 struct ttm_resource_manager *man;
256 struct xe_gt *gt;
257 u32 mem_type;
258 u8 id;
259
260 drm_debugfs_create_files(debugfs_list,
261 ARRAY_SIZE(debugfs_list),
262 root, minor);
263
264 debugfs_create_file("forcewake_all", 0400, root, xe,
265 &forcewake_all_fops);
266
267 debugfs_create_file("wedged_mode", 0600, root, xe,
268 &wedged_mode_fops);
269
270 debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
271 &atomic_svm_timeslice_ms_fops);
272
273 for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
274 man = ttm_manager_type(bdev, mem_type);
275
276 if (man) {
277 char name[16];
278
279 snprintf(name, sizeof(name), "vram%d_mm", mem_type - XE_PL_VRAM0);
280 ttm_resource_manager_create_debugfs(man, root, name);
281 }
282 }
283
284 man = ttm_manager_type(bdev, XE_PL_TT);
285 ttm_resource_manager_create_debugfs(man, root, "gtt_mm");
286
287 man = ttm_manager_type(bdev, XE_PL_STOLEN);
288 if (man)
289 ttm_resource_manager_create_debugfs(man, root, "stolen_mm");
290
291 for_each_gt(gt, xe, id)
292 xe_gt_debugfs_register(gt);
293
294 xe_pxp_debugfs_register(xe->pxp);
295
296 fault_create_debugfs_attr("fail_gt_reset", root, >_reset_failure);
297
298 if (IS_SRIOV_PF(xe))
299 xe_sriov_pf_debugfs_register(xe, root);
300 }
301