1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2012 Intel Corporation
5 */
6
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "gt/intel_gt.h"
16 #include "gt/intel_gt_mcr.h"
17 #include "gt/intel_gt_regs.h"
18 #include "gt/intel_region_lmem.h"
19 #include "i915_drv.h"
20 #include "i915_gem_stolen.h"
21 #include "i915_pci.h"
22 #include "i915_reg.h"
23 #include "i915_utils.h"
24 #include "i915_vgpu.h"
25 #include "intel_mchbar_regs.h"
26 #include "intel_pci_config.h"
27
28 /*
29 * The BIOS typically reserves some of the system's memory for the exclusive
30 * use of the integrated graphics. This memory is no longer available for
31 * use by the OS and so the user finds that his system has less memory
32 * available than he put in. We refer to this memory as stolen.
33 *
34 * The BIOS will allocate its framebuffer from the stolen memory. Our
35 * goal is try to reuse that object for our own fbcon which must always
36 * be available for panics. Anything else we can reuse the stolen memory
37 * for is a boon.
38 */
39
i915_gem_stolen_insert_node_in_range(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment,u64 start,u64 end)40 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
41 struct drm_mm_node *node, u64 size,
42 unsigned alignment, u64 start, u64 end)
43 {
44 int ret;
45
46 if (!drm_mm_initialized(&i915->mm.stolen))
47 return -ENODEV;
48
49 /* WaSkipStolenMemoryFirstPage:bdw+ */
50 if (GRAPHICS_VER(i915) >= 8 && start < 4096)
51 start = 4096;
52
53 mutex_lock(&i915->mm.stolen_lock);
54 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
55 size, alignment, 0,
56 start, end, DRM_MM_INSERT_BEST);
57 mutex_unlock(&i915->mm.stolen_lock);
58
59 return ret;
60 }
61
i915_gem_stolen_insert_node(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment)62 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
63 struct drm_mm_node *node, u64 size,
64 unsigned alignment)
65 {
66 return i915_gem_stolen_insert_node_in_range(i915, node,
67 size, alignment,
68 I915_GEM_STOLEN_BIAS,
69 U64_MAX);
70 }
71
i915_gem_stolen_remove_node(struct drm_i915_private * i915,struct drm_mm_node * node)72 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
73 struct drm_mm_node *node)
74 {
75 mutex_lock(&i915->mm.stolen_lock);
76 drm_mm_remove_node(node);
77 mutex_unlock(&i915->mm.stolen_lock);
78 }
79
valid_stolen_size(struct drm_i915_private * i915,struct resource * dsm)80 static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
81 {
82 return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
83 }
84
adjust_stolen(struct drm_i915_private * i915,struct resource * dsm)85 static int adjust_stolen(struct drm_i915_private *i915,
86 struct resource *dsm)
87 {
88 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
89 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
90
91 if (!valid_stolen_size(i915, dsm))
92 return -EINVAL;
93
94 /*
95 * Make sure we don't clobber the GTT if it's within stolen memory
96 *
97 * TODO: We have yet too encounter the case where the GTT wasn't at the
98 * end of stolen. With that assumption we could simplify this.
99 */
100 if (GRAPHICS_VER(i915) <= 4 &&
101 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
102 struct resource stolen[2] = {*dsm, *dsm};
103 struct resource ggtt_res;
104 resource_size_t ggtt_start;
105
106 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
107 if (GRAPHICS_VER(i915) == 4)
108 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
109 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
110 else
111 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
112
113 ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
114
115 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
116 stolen[0].end = ggtt_res.start;
117 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
118 stolen[1].start = ggtt_res.end;
119
120 /* Pick the larger of the two chunks */
121 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
122 *dsm = stolen[0];
123 else
124 *dsm = stolen[1];
125
126 if (stolen[0].start != stolen[1].start ||
127 stolen[0].end != stolen[1].end) {
128 drm_dbg(&i915->drm,
129 "GTT within stolen memory at %pR\n",
130 &ggtt_res);
131 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
132 dsm);
133 }
134 }
135
136 if (!valid_stolen_size(i915, dsm))
137 return -EINVAL;
138
139 return 0;
140 }
141
request_smem_stolen(struct drm_i915_private * i915,struct resource * dsm)142 static int request_smem_stolen(struct drm_i915_private *i915,
143 struct resource *dsm)
144 {
145 struct resource *r;
146
147 /*
148 * With stolen lmem, we don't need to request system memory for the
149 * address range since it's local to the gpu.
150 *
151 * Starting MTL, in IGFX devices the stolen memory is exposed via
152 * LMEMBAR and shall be considered similar to stolen lmem.
153 */
154 if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
155 return 0;
156
157 /*
158 * Verify that nothing else uses this physical address. Stolen
159 * memory should be reserved by the BIOS and hidden from the
160 * kernel. So if the region is already marked as busy, something
161 * is seriously wrong.
162 */
163 r = devm_request_mem_region(i915->drm.dev, dsm->start,
164 resource_size(dsm),
165 "Graphics Stolen Memory");
166 if (r == NULL) {
167 /*
168 * One more attempt but this time requesting region from
169 * start + 1, as we have seen that this resolves the region
170 * conflict with the PCI Bus.
171 * This is a BIOS w/a: Some BIOS wrap stolen in the root
172 * PCI bus, but have an off-by-one error. Hence retry the
173 * reservation starting from 1 instead of 0.
174 * There's also BIOS with off-by-one on the other end.
175 */
176 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
177 resource_size(dsm) - 2,
178 "Graphics Stolen Memory");
179 /*
180 * GEN3 firmware likes to smash pci bridges into the stolen
181 * range. Apparently this works.
182 */
183 if (!r && GRAPHICS_VER(i915) != 3) {
184 drm_err(&i915->drm,
185 "conflict detected with stolen region: %pR\n",
186 dsm);
187
188 return -EBUSY;
189 }
190 }
191
192 return 0;
193 }
194
i915_gem_cleanup_stolen(struct drm_i915_private * i915)195 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
196 {
197 if (!drm_mm_initialized(&i915->mm.stolen))
198 return;
199
200 drm_mm_takedown(&i915->mm.stolen);
201 }
202
g4x_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)203 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
204 struct intel_uncore *uncore,
205 resource_size_t *base,
206 resource_size_t *size)
207 {
208 u32 reg_val = intel_uncore_read(uncore,
209 IS_GM45(i915) ?
210 CTG_STOLEN_RESERVED :
211 ELK_STOLEN_RESERVED);
212 resource_size_t stolen_top = i915->dsm.stolen.end + 1;
213
214 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
215 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
216
217 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
218 return;
219
220 /*
221 * Whether ILK really reuses the ELK register for this is unclear.
222 * Let's see if we catch anyone with this supposedly enabled on ILK.
223 */
224 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
225 "ILK stolen reserved found? 0x%08x\n",
226 reg_val);
227
228 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
229 return;
230
231 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
232 drm_WARN_ON(&i915->drm,
233 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
234
235 *size = stolen_top - *base;
236 }
237
gen6_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)238 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
239 struct intel_uncore *uncore,
240 resource_size_t *base,
241 resource_size_t *size)
242 {
243 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
244
245 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
246
247 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
248 return;
249
250 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
251
252 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
253 case GEN6_STOLEN_RESERVED_1M:
254 *size = 1024 * 1024;
255 break;
256 case GEN6_STOLEN_RESERVED_512K:
257 *size = 512 * 1024;
258 break;
259 case GEN6_STOLEN_RESERVED_256K:
260 *size = 256 * 1024;
261 break;
262 case GEN6_STOLEN_RESERVED_128K:
263 *size = 128 * 1024;
264 break;
265 default:
266 *size = 1024 * 1024;
267 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
268 }
269 }
270
vlv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)271 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
272 struct intel_uncore *uncore,
273 resource_size_t *base,
274 resource_size_t *size)
275 {
276 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
277 resource_size_t stolen_top = i915->dsm.stolen.end + 1;
278
279 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
280
281 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
282 return;
283
284 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
285 default:
286 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
287 fallthrough;
288 case GEN7_STOLEN_RESERVED_1M:
289 *size = 1024 * 1024;
290 break;
291 }
292
293 /*
294 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
295 * reserved location as (top - size).
296 */
297 *base = stolen_top - *size;
298 }
299
gen7_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)300 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
301 struct intel_uncore *uncore,
302 resource_size_t *base,
303 resource_size_t *size)
304 {
305 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
306
307 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
308
309 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
310 return;
311
312 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
313
314 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
315 case GEN7_STOLEN_RESERVED_1M:
316 *size = 1024 * 1024;
317 break;
318 case GEN7_STOLEN_RESERVED_256K:
319 *size = 256 * 1024;
320 break;
321 default:
322 *size = 1024 * 1024;
323 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
324 }
325 }
326
chv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)327 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
328 struct intel_uncore *uncore,
329 resource_size_t *base,
330 resource_size_t *size)
331 {
332 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
333
334 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
335
336 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
337 return;
338
339 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
340
341 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
342 case GEN8_STOLEN_RESERVED_1M:
343 *size = 1024 * 1024;
344 break;
345 case GEN8_STOLEN_RESERVED_2M:
346 *size = 2 * 1024 * 1024;
347 break;
348 case GEN8_STOLEN_RESERVED_4M:
349 *size = 4 * 1024 * 1024;
350 break;
351 case GEN8_STOLEN_RESERVED_8M:
352 *size = 8 * 1024 * 1024;
353 break;
354 default:
355 *size = 8 * 1024 * 1024;
356 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
357 }
358 }
359
bdw_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)360 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
361 struct intel_uncore *uncore,
362 resource_size_t *base,
363 resource_size_t *size)
364 {
365 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
366 resource_size_t stolen_top = i915->dsm.stolen.end + 1;
367
368 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
369
370 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
371 return;
372
373 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
374 return;
375
376 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
377 *size = stolen_top - *base;
378 }
379
icl_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)380 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
381 struct intel_uncore *uncore,
382 resource_size_t *base,
383 resource_size_t *size)
384 {
385 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
386
387 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
388
389 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
390 case GEN8_STOLEN_RESERVED_1M:
391 *size = 1024 * 1024;
392 break;
393 case GEN8_STOLEN_RESERVED_2M:
394 *size = 2 * 1024 * 1024;
395 break;
396 case GEN8_STOLEN_RESERVED_4M:
397 *size = 4 * 1024 * 1024;
398 break;
399 case GEN8_STOLEN_RESERVED_8M:
400 *size = 8 * 1024 * 1024;
401 break;
402 default:
403 *size = 8 * 1024 * 1024;
404 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
405 }
406
407 if (HAS_LMEMBAR_SMEM_STOLEN(i915))
408 /* the base is initialized to stolen top so subtract size to get base */
409 *base -= *size;
410 else
411 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
412 }
413
414 /*
415 * Initialize i915->dsm.reserved to contain the reserved space within the Data
416 * Stolen Memory. This is a range on the top of DSM that is reserved, not to
417 * be used by driver, so must be excluded from the region passed to the
418 * allocator later. In the spec this is also called as WOPCM.
419 *
420 * Our expectation is that the reserved space is at the top of the stolen
421 * region, as it has been the case for every platform, and *never* at the
422 * bottom, so the calculation here can be simplified.
423 */
init_reserved_stolen(struct drm_i915_private * i915)424 static int init_reserved_stolen(struct drm_i915_private *i915)
425 {
426 struct intel_uncore *uncore = &i915->uncore;
427 resource_size_t reserved_base, stolen_top;
428 resource_size_t reserved_size;
429 int ret = 0;
430
431 stolen_top = i915->dsm.stolen.end + 1;
432 reserved_base = stolen_top;
433 reserved_size = 0;
434
435 if (GRAPHICS_VER(i915) >= 11) {
436 icl_get_stolen_reserved(i915, uncore,
437 &reserved_base, &reserved_size);
438 } else if (GRAPHICS_VER(i915) >= 8) {
439 if (IS_LP(i915))
440 chv_get_stolen_reserved(i915, uncore,
441 &reserved_base, &reserved_size);
442 else
443 bdw_get_stolen_reserved(i915, uncore,
444 &reserved_base, &reserved_size);
445 } else if (GRAPHICS_VER(i915) >= 7) {
446 if (IS_VALLEYVIEW(i915))
447 vlv_get_stolen_reserved(i915, uncore,
448 &reserved_base, &reserved_size);
449 else
450 gen7_get_stolen_reserved(i915, uncore,
451 &reserved_base, &reserved_size);
452 } else if (GRAPHICS_VER(i915) >= 6) {
453 gen6_get_stolen_reserved(i915, uncore,
454 &reserved_base, &reserved_size);
455 } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
456 g4x_get_stolen_reserved(i915, uncore,
457 &reserved_base, &reserved_size);
458 }
459
460 /* No reserved stolen */
461 if (reserved_base == stolen_top)
462 goto bail_out;
463
464 if (!reserved_base) {
465 drm_err(&i915->drm,
466 "inconsistent reservation %pa + %pa; ignoring\n",
467 &reserved_base, &reserved_size);
468 ret = -EINVAL;
469 goto bail_out;
470 }
471
472 i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
473
474 if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
475 drm_err(&i915->drm,
476 "Stolen reserved area %pR outside stolen memory %pR\n",
477 &i915->dsm.reserved, &i915->dsm.stolen);
478 ret = -EINVAL;
479 goto bail_out;
480 }
481
482 return 0;
483
484 bail_out:
485 i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
486
487 return ret;
488 }
489
i915_gem_init_stolen(struct intel_memory_region * mem)490 static int i915_gem_init_stolen(struct intel_memory_region *mem)
491 {
492 struct drm_i915_private *i915 = mem->i915;
493
494 mutex_init(&i915->mm.stolen_lock);
495
496 if (intel_vgpu_active(i915)) {
497 drm_notice(&i915->drm,
498 "%s, disabling use of stolen memory\n",
499 "iGVT-g active");
500 return -ENOSPC;
501 }
502
503 if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
504 drm_notice(&i915->drm,
505 "%s, disabling use of stolen memory\n",
506 "DMAR active");
507 return -ENOSPC;
508 }
509
510 if (adjust_stolen(i915, &mem->region))
511 return -ENOSPC;
512
513 if (request_smem_stolen(i915, &mem->region))
514 return -ENOSPC;
515
516 i915->dsm.stolen = mem->region;
517
518 if (init_reserved_stolen(i915))
519 return -ENOSPC;
520
521 /* Exclude the reserved region from driver use */
522 mem->region.end = i915->dsm.reserved.start - 1;
523 mem->io_size = min(mem->io_size, resource_size(&mem->region));
524
525 i915->dsm.usable_size = resource_size(&mem->region);
526
527 drm_dbg(&i915->drm,
528 "Memory reserved for graphics device: %lluK, usable: %lluK\n",
529 (u64)resource_size(&i915->dsm.stolen) >> 10,
530 (u64)i915->dsm.usable_size >> 10);
531
532 if (i915->dsm.usable_size == 0)
533 return -ENOSPC;
534
535 /* Basic memrange allocator for stolen space. */
536 drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
537
538 return 0;
539 }
540
dbg_poison(struct i915_ggtt * ggtt,dma_addr_t addr,resource_size_t size,u8 x)541 static void dbg_poison(struct i915_ggtt *ggtt,
542 dma_addr_t addr, resource_size_t size,
543 u8 x)
544 {
545 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
546 if (!drm_mm_node_allocated(&ggtt->error_capture))
547 return;
548
549 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
550 return; /* beware stop_machine() inversion */
551
552 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
553
554 mutex_lock(&ggtt->error_mutex);
555 while (size) {
556 void __iomem *s;
557
558 ggtt->vm.insert_page(&ggtt->vm, addr,
559 ggtt->error_capture.start,
560 I915_CACHE_NONE, 0);
561 mb();
562
563 s = io_mapping_map_wc(&ggtt->iomap,
564 ggtt->error_capture.start,
565 PAGE_SIZE);
566 memset_io(s, x, PAGE_SIZE);
567 io_mapping_unmap(s);
568
569 addr += PAGE_SIZE;
570 size -= PAGE_SIZE;
571 }
572 mb();
573 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
574 mutex_unlock(&ggtt->error_mutex);
575 #endif
576 }
577
578 static struct sg_table *
i915_pages_create_for_stolen(struct drm_device * dev,resource_size_t offset,resource_size_t size)579 i915_pages_create_for_stolen(struct drm_device *dev,
580 resource_size_t offset, resource_size_t size)
581 {
582 struct drm_i915_private *i915 = to_i915(dev);
583 struct sg_table *st;
584 struct scatterlist *sg;
585
586 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
587
588 /* We hide that we have no struct page backing our stolen object
589 * by wrapping the contiguous physical allocation with a fake
590 * dma mapping in a single scatterlist.
591 */
592
593 st = kmalloc(sizeof(*st), GFP_KERNEL);
594 if (st == NULL)
595 return ERR_PTR(-ENOMEM);
596
597 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
598 kfree(st);
599 return ERR_PTR(-ENOMEM);
600 }
601
602 sg = st->sgl;
603 sg->offset = 0;
604 sg->length = size;
605
606 sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
607 sg_dma_len(sg) = size;
608
609 return st;
610 }
611
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object * obj)612 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
613 {
614 struct drm_i915_private *i915 = to_i915(obj->base.dev);
615 struct sg_table *pages =
616 i915_pages_create_for_stolen(obj->base.dev,
617 obj->stolen->start,
618 obj->stolen->size);
619 if (IS_ERR(pages))
620 return PTR_ERR(pages);
621
622 dbg_poison(to_gt(i915)->ggtt,
623 sg_dma_address(pages->sgl),
624 sg_dma_len(pages->sgl),
625 POISON_INUSE);
626
627 __i915_gem_object_set_pages(obj, pages);
628
629 return 0;
630 }
631
i915_gem_object_put_pages_stolen(struct drm_i915_gem_object * obj,struct sg_table * pages)632 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
633 struct sg_table *pages)
634 {
635 struct drm_i915_private *i915 = to_i915(obj->base.dev);
636 /* Should only be called from i915_gem_object_release_stolen() */
637
638 dbg_poison(to_gt(i915)->ggtt,
639 sg_dma_address(pages->sgl),
640 sg_dma_len(pages->sgl),
641 POISON_FREE);
642
643 sg_free_table(pages);
644 kfree(pages);
645 }
646
647 static void
i915_gem_object_release_stolen(struct drm_i915_gem_object * obj)648 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
649 {
650 struct drm_i915_private *i915 = to_i915(obj->base.dev);
651 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
652
653 GEM_BUG_ON(!stolen);
654 i915_gem_stolen_remove_node(i915, stolen);
655 kfree(stolen);
656
657 i915_gem_object_release_memory_region(obj);
658 }
659
660 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
661 .name = "i915_gem_object_stolen",
662 .get_pages = i915_gem_object_get_pages_stolen,
663 .put_pages = i915_gem_object_put_pages_stolen,
664 .release = i915_gem_object_release_stolen,
665 };
666
__i915_gem_object_create_stolen(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,struct drm_mm_node * stolen)667 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
668 struct drm_i915_gem_object *obj,
669 struct drm_mm_node *stolen)
670 {
671 static struct lock_class_key lock_class;
672 unsigned int cache_level;
673 unsigned int flags;
674 int err;
675
676 /*
677 * Stolen objects are always physically contiguous since we just
678 * allocate one big block underneath using the drm_mm range allocator.
679 */
680 flags = I915_BO_ALLOC_CONTIGUOUS;
681
682 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
683 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
684
685 obj->stolen = stolen;
686 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
687 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
688 i915_gem_object_set_cache_coherency(obj, cache_level);
689
690 if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
691 return -EBUSY;
692
693 i915_gem_object_init_memory_region(obj, mem);
694
695 err = i915_gem_object_pin_pages(obj);
696 if (err)
697 i915_gem_object_release_memory_region(obj);
698 i915_gem_object_unlock(obj);
699
700 return err;
701 }
702
_i915_gem_object_stolen_init(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,resource_size_t offset,resource_size_t size,resource_size_t page_size,unsigned int flags)703 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
704 struct drm_i915_gem_object *obj,
705 resource_size_t offset,
706 resource_size_t size,
707 resource_size_t page_size,
708 unsigned int flags)
709 {
710 struct drm_i915_private *i915 = mem->i915;
711 struct drm_mm_node *stolen;
712 int ret;
713
714 if (!drm_mm_initialized(&i915->mm.stolen))
715 return -ENODEV;
716
717 if (size == 0)
718 return -EINVAL;
719
720 /*
721 * With discrete devices, where we lack a mappable aperture there is no
722 * possible way to ever access this memory on the CPU side.
723 */
724 if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
725 !(flags & I915_BO_ALLOC_GPU_ONLY))
726 return -ENOSPC;
727
728 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
729 if (!stolen)
730 return -ENOMEM;
731
732 if (offset != I915_BO_INVALID_OFFSET) {
733 drm_dbg(&i915->drm,
734 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
735 &offset, &size);
736
737 stolen->start = offset;
738 stolen->size = size;
739 mutex_lock(&i915->mm.stolen_lock);
740 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
741 mutex_unlock(&i915->mm.stolen_lock);
742 } else {
743 ret = i915_gem_stolen_insert_node(i915, stolen, size,
744 mem->min_page_size);
745 }
746 if (ret)
747 goto err_free;
748
749 ret = __i915_gem_object_create_stolen(mem, obj, stolen);
750 if (ret)
751 goto err_remove;
752
753 return 0;
754
755 err_remove:
756 i915_gem_stolen_remove_node(i915, stolen);
757 err_free:
758 kfree(stolen);
759 return ret;
760 }
761
762 struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private * i915,resource_size_t size)763 i915_gem_object_create_stolen(struct drm_i915_private *i915,
764 resource_size_t size)
765 {
766 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
767 }
768
init_stolen_smem(struct intel_memory_region * mem)769 static int init_stolen_smem(struct intel_memory_region *mem)
770 {
771 int err;
772
773 /*
774 * Initialise stolen early so that we may reserve preallocated
775 * objects for the BIOS to KMS transition.
776 */
777 err = i915_gem_init_stolen(mem);
778 if (err)
779 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
780
781 return 0;
782 }
783
release_stolen_smem(struct intel_memory_region * mem)784 static int release_stolen_smem(struct intel_memory_region *mem)
785 {
786 i915_gem_cleanup_stolen(mem->i915);
787 return 0;
788 }
789
790 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
791 .init = init_stolen_smem,
792 .release = release_stolen_smem,
793 .init_object = _i915_gem_object_stolen_init,
794 };
795
init_stolen_lmem(struct intel_memory_region * mem)796 static int init_stolen_lmem(struct intel_memory_region *mem)
797 {
798 struct drm_i915_private *i915 = mem->i915;
799 int err;
800
801 if (GEM_WARN_ON(resource_size(&mem->region) == 0))
802 return 0;
803
804 err = i915_gem_init_stolen(mem);
805 if (err) {
806 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
807 return 0;
808 }
809
810 if (mem->io_size &&
811 !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
812 goto err_cleanup;
813
814 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
815 &mem->io_start);
816 drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
817
818 return 0;
819
820 err_cleanup:
821 i915_gem_cleanup_stolen(mem->i915);
822 return err;
823 }
824
release_stolen_lmem(struct intel_memory_region * mem)825 static int release_stolen_lmem(struct intel_memory_region *mem)
826 {
827 if (mem->io_size)
828 io_mapping_fini(&mem->iomap);
829 i915_gem_cleanup_stolen(mem->i915);
830 return 0;
831 }
832
833 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
834 .init = init_stolen_lmem,
835 .release = release_stolen_lmem,
836 .init_object = _i915_gem_object_stolen_init,
837 };
838
mtl_get_gms_size(struct intel_uncore * uncore)839 static int mtl_get_gms_size(struct intel_uncore *uncore)
840 {
841 u16 ggc, gms;
842
843 ggc = intel_uncore_read16(uncore, GGC);
844
845 /* check GGMS, should be fixed 0x3 (8MB) */
846 if ((ggc & GGMS_MASK) != GGMS_MASK)
847 return -EIO;
848
849 /* return valid GMS value, -EIO if invalid */
850 gms = REG_FIELD_GET(GMS_MASK, ggc);
851 switch (gms) {
852 case 0x0 ... 0x04:
853 return gms * 32;
854 case 0xf0 ... 0xfe:
855 return (gms - 0xf0 + 1) * 4;
856 default:
857 MISSING_CASE(gms);
858 return -EIO;
859 }
860 }
861
862 struct intel_memory_region *
i915_gem_stolen_lmem_setup(struct drm_i915_private * i915,u16 type,u16 instance)863 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
864 u16 instance)
865 {
866 struct intel_uncore *uncore = &i915->uncore;
867 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
868 resource_size_t dsm_size, dsm_base, lmem_size;
869 struct intel_memory_region *mem;
870 resource_size_t io_start, io_size;
871 resource_size_t min_page_size;
872 int ret;
873
874 if (WARN_ON_ONCE(instance))
875 return ERR_PTR(-ENODEV);
876
877 if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
878 return ERR_PTR(-ENXIO);
879
880 if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
881 lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
882 } else {
883 resource_size_t lmem_range;
884
885 lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
886 lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
887 lmem_size *= SZ_1G;
888 }
889
890 if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
891 /*
892 * MTL dsm size is in GGC register.
893 * Also MTL uses offset to DSMBASE in ptes, so i915
894 * uses dsm_base = 0 to setup stolen region.
895 */
896 ret = mtl_get_gms_size(uncore);
897 if (ret < 0) {
898 drm_err(&i915->drm, "invalid MTL GGC register setting\n");
899 return ERR_PTR(ret);
900 }
901
902 dsm_base = 0;
903 dsm_size = (resource_size_t)(ret * SZ_1M);
904
905 GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
906 GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size);
907 } else {
908 /* Use DSM base address instead for stolen memory */
909 dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
910 if (WARN_ON(lmem_size < dsm_base))
911 return ERR_PTR(-ENODEV);
912 dsm_size = lmem_size - dsm_base;
913 }
914
915 io_size = dsm_size;
916 if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
917 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M;
918 } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
919 io_start = 0;
920 io_size = 0;
921 } else {
922 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
923 }
924
925 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
926 I915_GTT_PAGE_SIZE_4K;
927
928 mem = intel_memory_region_create(i915, dsm_base, dsm_size,
929 min_page_size,
930 io_start, io_size,
931 type, instance,
932 &i915_region_stolen_lmem_ops);
933 if (IS_ERR(mem))
934 return mem;
935
936 intel_memory_region_set_name(mem, "stolen-local");
937
938 mem->private = true;
939
940 return mem;
941 }
942
943 struct intel_memory_region*
i915_gem_stolen_smem_setup(struct drm_i915_private * i915,u16 type,u16 instance)944 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
945 u16 instance)
946 {
947 struct intel_memory_region *mem;
948
949 mem = intel_memory_region_create(i915,
950 intel_graphics_stolen_res.start,
951 resource_size(&intel_graphics_stolen_res),
952 PAGE_SIZE, 0, 0, type, instance,
953 &i915_region_stolen_smem_ops);
954 if (IS_ERR(mem))
955 return mem;
956
957 intel_memory_region_set_name(mem, "stolen-system");
958
959 mem->private = true;
960
961 return mem;
962 }
963
i915_gem_object_is_stolen(const struct drm_i915_gem_object * obj)964 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
965 {
966 return obj->ops == &i915_gem_object_stolen_ops;
967 }
968