1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2018 Intel Corporation
4  */
5 
6 #include <linux/crc32.h>
7 
8 #include "gem/i915_gem_stolen.h"
9 
10 #include "i915_memcpy.h"
11 #include "i915_selftest.h"
12 #include "intel_gpu_commands.h"
13 #include "selftests/igt_reset.h"
14 #include "selftests/igt_atomic.h"
15 #include "selftests/igt_spinner.h"
16 
17 static int
__igt_reset_stolen(struct intel_gt * gt,intel_engine_mask_t mask,const char * msg)18 __igt_reset_stolen(struct intel_gt *gt,
19 		   intel_engine_mask_t mask,
20 		   const char *msg)
21 {
22 	struct i915_ggtt *ggtt = gt->ggtt;
23 	const struct resource *dsm = &gt->i915->dsm.stolen;
24 	resource_size_t num_pages, page;
25 	struct intel_engine_cs *engine;
26 	intel_wakeref_t wakeref;
27 	enum intel_engine_id id;
28 	struct igt_spinner spin;
29 	long max, count;
30 	void *tmp;
31 	u32 *crc;
32 	int err;
33 
34 	if (!drm_mm_node_allocated(&ggtt->error_capture))
35 		return 0;
36 
37 	num_pages = resource_size(dsm) >> PAGE_SHIFT;
38 	if (!num_pages)
39 		return 0;
40 
41 	crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL);
42 	if (!crc)
43 		return -ENOMEM;
44 
45 	tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
46 	if (!tmp) {
47 		err = -ENOMEM;
48 		goto err_crc;
49 	}
50 
51 	igt_global_reset_lock(gt);
52 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
53 
54 	err = igt_spinner_init(&spin, gt);
55 	if (err)
56 		goto err_lock;
57 
58 	for_each_engine(engine, gt, id) {
59 		struct intel_context *ce;
60 		struct i915_request *rq;
61 
62 		if (!(mask & engine->mask))
63 			continue;
64 
65 		if (!intel_engine_can_store_dword(engine))
66 			continue;
67 
68 		ce = intel_context_create(engine);
69 		if (IS_ERR(ce)) {
70 			err = PTR_ERR(ce);
71 			goto err_spin;
72 		}
73 		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
74 		intel_context_put(ce);
75 		if (IS_ERR(rq)) {
76 			err = PTR_ERR(rq);
77 			goto err_spin;
78 		}
79 		i915_request_add(rq);
80 	}
81 
82 	for (page = 0; page < num_pages; page++) {
83 		dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
84 		void __iomem *s;
85 		void *in;
86 
87 		ggtt->vm.insert_page(&ggtt->vm, dma,
88 				     ggtt->error_capture.start,
89 				     I915_CACHE_NONE, 0);
90 		mb();
91 
92 		s = io_mapping_map_wc(&ggtt->iomap,
93 				      ggtt->error_capture.start,
94 				      PAGE_SIZE);
95 
96 		if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
97 					     page << PAGE_SHIFT,
98 					     ((page + 1) << PAGE_SHIFT) - 1))
99 			memset_io(s, STACK_MAGIC, PAGE_SIZE);
100 
101 		in = (void __force *)s;
102 		if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
103 			in = tmp;
104 		crc[page] = crc32_le(0, in, PAGE_SIZE);
105 
106 		io_mapping_unmap(s);
107 	}
108 	mb();
109 	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
110 
111 	if (mask == ALL_ENGINES) {
112 		intel_gt_reset(gt, mask, NULL);
113 	} else {
114 		for_each_engine(engine, gt, id) {
115 			if (mask & engine->mask)
116 				intel_engine_reset(engine, NULL);
117 		}
118 	}
119 
120 	max = -1;
121 	count = 0;
122 	for (page = 0; page < num_pages; page++) {
123 		dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
124 		void __iomem *s;
125 		void *in;
126 		u32 x;
127 
128 		ggtt->vm.insert_page(&ggtt->vm, dma,
129 				     ggtt->error_capture.start,
130 				     I915_CACHE_NONE, 0);
131 		mb();
132 
133 		s = io_mapping_map_wc(&ggtt->iomap,
134 				      ggtt->error_capture.start,
135 				      PAGE_SIZE);
136 
137 		in = (void __force *)s;
138 		if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
139 			in = tmp;
140 		x = crc32_le(0, in, PAGE_SIZE);
141 
142 		if (x != crc[page] &&
143 		    !__drm_mm_interval_first(&gt->i915->mm.stolen,
144 					     page << PAGE_SHIFT,
145 					     ((page + 1) << PAGE_SHIFT) - 1)) {
146 			pr_debug("unused stolen page %pa modified by GPU reset\n",
147 				 &page);
148 			if (count++ == 0)
149 				igt_hexdump(in, PAGE_SIZE);
150 			max = page;
151 		}
152 
153 		io_mapping_unmap(s);
154 	}
155 	mb();
156 	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
157 
158 	if (count > 0) {
159 		pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
160 			msg, count, max);
161 	}
162 	if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) {
163 		pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
164 		       msg, I915_GEM_STOLEN_BIAS);
165 		err = -EINVAL;
166 	}
167 
168 err_spin:
169 	igt_spinner_fini(&spin);
170 
171 err_lock:
172 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
173 	igt_global_reset_unlock(gt);
174 
175 	kfree(tmp);
176 err_crc:
177 	kfree(crc);
178 	return err;
179 }
180 
igt_reset_device_stolen(void * arg)181 static int igt_reset_device_stolen(void *arg)
182 {
183 	return __igt_reset_stolen(arg, ALL_ENGINES, "device");
184 }
185 
igt_reset_engines_stolen(void * arg)186 static int igt_reset_engines_stolen(void *arg)
187 {
188 	struct intel_gt *gt = arg;
189 	struct intel_engine_cs *engine;
190 	enum intel_engine_id id;
191 	int err;
192 
193 	if (!intel_has_reset_engine(gt))
194 		return 0;
195 
196 	for_each_engine(engine, gt, id) {
197 		err = __igt_reset_stolen(gt, engine->mask, engine->name);
198 		if (err)
199 			return err;
200 	}
201 
202 	return 0;
203 }
204 
igt_global_reset(void * arg)205 static int igt_global_reset(void *arg)
206 {
207 	struct intel_gt *gt = arg;
208 	unsigned int reset_count;
209 	intel_wakeref_t wakeref;
210 	int err = 0;
211 
212 	/* Check that we can issue a global GPU reset */
213 
214 	igt_global_reset_lock(gt);
215 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
216 
217 	reset_count = i915_reset_count(&gt->i915->gpu_error);
218 
219 	intel_gt_reset(gt, ALL_ENGINES, NULL);
220 
221 	if (i915_reset_count(&gt->i915->gpu_error) == reset_count) {
222 		pr_err("No GPU reset recorded!\n");
223 		err = -EINVAL;
224 	}
225 
226 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
227 	igt_global_reset_unlock(gt);
228 
229 	if (intel_gt_is_wedged(gt))
230 		err = -EIO;
231 
232 	return err;
233 }
234 
igt_wedged_reset(void * arg)235 static int igt_wedged_reset(void *arg)
236 {
237 	struct intel_gt *gt = arg;
238 	intel_wakeref_t wakeref;
239 
240 	/* Check that we can recover a wedged device with a GPU reset */
241 
242 	igt_global_reset_lock(gt);
243 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
244 
245 	intel_gt_set_wedged(gt);
246 
247 	GEM_BUG_ON(!intel_gt_is_wedged(gt));
248 	intel_gt_reset(gt, ALL_ENGINES, NULL);
249 
250 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
251 	igt_global_reset_unlock(gt);
252 
253 	return intel_gt_is_wedged(gt) ? -EIO : 0;
254 }
255 
igt_atomic_reset(void * arg)256 static int igt_atomic_reset(void *arg)
257 {
258 	struct intel_gt *gt = arg;
259 	const typeof(*igt_atomic_phases) *p;
260 	int err = 0;
261 
262 	/* Check that the resets are usable from atomic context */
263 
264 	intel_gt_pm_get(gt);
265 	igt_global_reset_lock(gt);
266 
267 	/* Flush any requests before we get started and check basics */
268 	if (!igt_force_reset(gt))
269 		goto unlock;
270 
271 	for (p = igt_atomic_phases; p->name; p++) {
272 		intel_engine_mask_t awake;
273 
274 		GEM_TRACE("__intel_gt_reset under %s\n", p->name);
275 
276 		awake = reset_prepare(gt);
277 		p->critical_section_begin();
278 
279 		err = __intel_gt_reset(gt, ALL_ENGINES);
280 
281 		p->critical_section_end();
282 		reset_finish(gt, awake);
283 
284 		if (err) {
285 			pr_err("__intel_gt_reset failed under %s\n", p->name);
286 			break;
287 		}
288 	}
289 
290 	/* As we poke around the guts, do a full reset before continuing. */
291 	igt_force_reset(gt);
292 
293 unlock:
294 	igt_global_reset_unlock(gt);
295 	intel_gt_pm_put(gt);
296 
297 	return err;
298 }
299 
igt_atomic_engine_reset(void * arg)300 static int igt_atomic_engine_reset(void *arg)
301 {
302 	struct intel_gt *gt = arg;
303 	const typeof(*igt_atomic_phases) *p;
304 	struct intel_engine_cs *engine;
305 	enum intel_engine_id id;
306 	int err = 0;
307 
308 	/* Check that the resets are usable from atomic context */
309 
310 	if (!intel_has_reset_engine(gt))
311 		return 0;
312 
313 	if (intel_uc_uses_guc_submission(&gt->uc))
314 		return 0;
315 
316 	intel_gt_pm_get(gt);
317 	igt_global_reset_lock(gt);
318 
319 	/* Flush any requests before we get started and check basics */
320 	if (!igt_force_reset(gt))
321 		goto out_unlock;
322 
323 	for_each_engine(engine, gt, id) {
324 		struct tasklet_struct *t = &engine->sched_engine->tasklet;
325 
326 		if (t->func)
327 			tasklet_disable(t);
328 		intel_engine_pm_get(engine);
329 
330 		for (p = igt_atomic_phases; p->name; p++) {
331 			GEM_TRACE("intel_engine_reset(%s) under %s\n",
332 				  engine->name, p->name);
333 			if (strcmp(p->name, "softirq"))
334 				local_bh_disable();
335 
336 			p->critical_section_begin();
337 			err = __intel_engine_reset_bh(engine, NULL);
338 			p->critical_section_end();
339 
340 			if (strcmp(p->name, "softirq"))
341 				local_bh_enable();
342 
343 			if (err) {
344 				pr_err("intel_engine_reset(%s) failed under %s\n",
345 				       engine->name, p->name);
346 				break;
347 			}
348 		}
349 
350 		intel_engine_pm_put(engine);
351 		if (t->func) {
352 			tasklet_enable(t);
353 			tasklet_hi_schedule(t);
354 		}
355 		if (err)
356 			break;
357 	}
358 
359 	/* As we poke around the guts, do a full reset before continuing. */
360 	igt_force_reset(gt);
361 
362 out_unlock:
363 	igt_global_reset_unlock(gt);
364 	intel_gt_pm_put(gt);
365 
366 	return err;
367 }
368 
intel_reset_live_selftests(struct drm_i915_private * i915)369 int intel_reset_live_selftests(struct drm_i915_private *i915)
370 {
371 	static const struct i915_subtest tests[] = {
372 		SUBTEST(igt_global_reset), /* attempt to recover GPU first */
373 		SUBTEST(igt_reset_device_stolen),
374 		SUBTEST(igt_reset_engines_stolen),
375 		SUBTEST(igt_wedged_reset),
376 		SUBTEST(igt_atomic_reset),
377 		SUBTEST(igt_atomic_engine_reset),
378 	};
379 	struct intel_gt *gt = to_gt(i915);
380 
381 	if (!intel_has_gpu_reset(gt))
382 		return 0;
383 
384 	if (intel_gt_is_wedged(gt))
385 		return -EIO; /* we're long past hope of a successful reset */
386 
387 	return intel_gt_live_subtests(tests, gt);
388 }
389