1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include <drm/drm_cache.h>
8
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_pm.h"
11
12 #include "i915_drv.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_gem_lmem.h"
16 #include "i915_gem_mman.h"
17
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)18 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
19 struct sg_table *pages)
20 {
21 struct drm_i915_private *i915 = to_i915(obj->base.dev);
22 unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
23 bool shrinkable;
24 int i;
25
26 assert_object_held_shared(obj);
27
28 if (i915_gem_object_is_volatile(obj))
29 obj->mm.madv = I915_MADV_DONTNEED;
30
31 /* Make the pages coherent with the GPU (flushing any swapin). */
32 if (obj->cache_dirty) {
33 WARN_ON_ONCE(IS_DGFX(i915));
34 obj->write_domain = 0;
35 if (i915_gem_object_has_struct_page(obj))
36 drm_clflush_sg(pages);
37 obj->cache_dirty = false;
38 }
39
40 obj->mm.get_page.sg_pos = pages->sgl;
41 obj->mm.get_page.sg_idx = 0;
42 obj->mm.get_dma_page.sg_pos = pages->sgl;
43 obj->mm.get_dma_page.sg_idx = 0;
44
45 obj->mm.pages = pages;
46
47 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
48 GEM_BUG_ON(!obj->mm.page_sizes.phys);
49
50 /*
51 * Calculate the supported page-sizes which fit into the given
52 * sg_page_sizes. This will give us the page-sizes which we may be able
53 * to use opportunistically when later inserting into the GTT. For
54 * example if phys=2G, then in theory we should be able to use 1G, 2M,
55 * 64K or 4K pages, although in practice this will depend on a number of
56 * other factors.
57 */
58 obj->mm.page_sizes.sg = 0;
59 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
60 if (obj->mm.page_sizes.phys & ~0u << i)
61 obj->mm.page_sizes.sg |= BIT(i);
62 }
63 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
64
65 shrinkable = i915_gem_object_is_shrinkable(obj);
66
67 if (i915_gem_object_is_tiled(obj) &&
68 i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
69 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
70 i915_gem_object_set_tiling_quirk(obj);
71 GEM_BUG_ON(!list_empty(&obj->mm.link));
72 atomic_inc(&obj->mm.shrink_pin);
73 shrinkable = false;
74 }
75
76 if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
77 struct list_head *list;
78 unsigned long flags;
79
80 assert_object_held(obj);
81 spin_lock_irqsave(&i915->mm.obj_lock, flags);
82
83 i915->mm.shrink_count++;
84 i915->mm.shrink_memory += obj->base.size;
85
86 if (obj->mm.madv != I915_MADV_WILLNEED)
87 list = &i915->mm.purge_list;
88 else
89 list = &i915->mm.shrink_list;
90 list_add_tail(&obj->mm.link, list);
91
92 atomic_set(&obj->mm.shrink_pin, 0);
93 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
94 }
95 }
96
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)97 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
98 {
99 struct drm_i915_private *i915 = to_i915(obj->base.dev);
100 int err;
101
102 assert_object_held_shared(obj);
103
104 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
105 drm_dbg(&i915->drm,
106 "Attempting to obtain a purgeable object\n");
107 return -EFAULT;
108 }
109
110 err = obj->ops->get_pages(obj);
111 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
112
113 return err;
114 }
115
116 /* Ensure that the associated pages are gathered from the backing storage
117 * and pinned into our object. i915_gem_object_pin_pages() may be called
118 * multiple times before they are released by a single call to
119 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
120 * either as a result of memory pressure (reaping pages under the shrinker)
121 * or as the object is itself released.
122 */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)123 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
124 {
125 int err;
126
127 assert_object_held(obj);
128
129 assert_object_held_shared(obj);
130
131 if (unlikely(!i915_gem_object_has_pages(obj))) {
132 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
133
134 err = ____i915_gem_object_get_pages(obj);
135 if (err)
136 return err;
137
138 smp_mb__before_atomic();
139 }
140 atomic_inc(&obj->mm.pages_pin_count);
141
142 return 0;
143 }
144
i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object * obj)145 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
146 {
147 struct i915_gem_ww_ctx ww;
148 int err;
149
150 i915_gem_ww_ctx_init(&ww, true);
151 retry:
152 err = i915_gem_object_lock(obj, &ww);
153 if (!err)
154 err = i915_gem_object_pin_pages(obj);
155
156 if (err == -EDEADLK) {
157 err = i915_gem_ww_ctx_backoff(&ww);
158 if (!err)
159 goto retry;
160 }
161 i915_gem_ww_ctx_fini(&ww);
162 return err;
163 }
164
165 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)166 int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
167 {
168 if (obj->ops->truncate)
169 return obj->ops->truncate(obj);
170
171 return 0;
172 }
173
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)174 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
175 {
176 struct radix_tree_iter iter;
177 void __rcu **slot;
178
179 rcu_read_lock();
180 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
181 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
182 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
183 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
184 rcu_read_unlock();
185 }
186
unmap_object(struct drm_i915_gem_object * obj,void * ptr)187 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
188 {
189 if (is_vmalloc_addr(ptr))
190 vunmap(ptr);
191 }
192
flush_tlb_invalidate(struct drm_i915_gem_object * obj)193 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
194 {
195 struct drm_i915_private *i915 = to_i915(obj->base.dev);
196 struct intel_gt *gt = to_gt(i915);
197
198 if (!obj->mm.tlb)
199 return;
200
201 intel_gt_invalidate_tlb(gt, obj->mm.tlb);
202 obj->mm.tlb = 0;
203 }
204
205 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)206 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
207 {
208 struct sg_table *pages;
209
210 assert_object_held_shared(obj);
211
212 pages = fetch_and_zero(&obj->mm.pages);
213 if (IS_ERR_OR_NULL(pages))
214 return pages;
215
216 if (i915_gem_object_is_volatile(obj))
217 obj->mm.madv = I915_MADV_WILLNEED;
218
219 if (!i915_gem_object_has_self_managed_shrink_list(obj))
220 i915_gem_object_make_unshrinkable(obj);
221
222 if (obj->mm.mapping) {
223 unmap_object(obj, page_mask_bits(obj->mm.mapping));
224 obj->mm.mapping = NULL;
225 }
226
227 __i915_gem_object_reset_page_iter(obj);
228 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
229
230 flush_tlb_invalidate(obj);
231
232 return pages;
233 }
234
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)235 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
236 {
237 struct sg_table *pages;
238
239 if (i915_gem_object_has_pinned_pages(obj))
240 return -EBUSY;
241
242 /* May be called by shrinker from within get_pages() (on another bo) */
243 assert_object_held_shared(obj);
244
245 i915_gem_object_release_mmap_offset(obj);
246
247 /*
248 * ->put_pages might need to allocate memory for the bit17 swizzle
249 * array, hence protect them from being reaped by removing them from gtt
250 * lists early.
251 */
252 pages = __i915_gem_object_unset_pages(obj);
253
254 /*
255 * XXX Temporary hijinx to avoid updating all backends to handle
256 * NULL pages. In the future, when we have more asynchronous
257 * get_pages backends we should be better able to handle the
258 * cancellation of the async task in a more uniform manner.
259 */
260 if (!IS_ERR_OR_NULL(pages))
261 obj->ops->put_pages(obj, pages);
262
263 return 0;
264 }
265
266 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map_page(struct drm_i915_gem_object * obj,enum i915_map_type type)267 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
268 enum i915_map_type type)
269 {
270 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
271 struct page *stack[32], **pages = stack, *page;
272 struct sgt_iter iter;
273 pgprot_t pgprot;
274 void *vaddr;
275
276 switch (type) {
277 default:
278 MISSING_CASE(type);
279 fallthrough; /* to use PAGE_KERNEL anyway */
280 case I915_MAP_WB:
281 /*
282 * On 32b, highmem using a finite set of indirect PTE (i.e.
283 * vmap) to provide virtual mappings of the high pages.
284 * As these are finite, map_new_virtual() must wait for some
285 * other kmap() to finish when it runs out. If we map a large
286 * number of objects, there is no method for it to tell us
287 * to release the mappings, and we deadlock.
288 *
289 * However, if we make an explicit vmap of the page, that
290 * uses a larger vmalloc arena, and also has the ability
291 * to tell us to release unwanted mappings. Most importantly,
292 * it will fail and propagate an error instead of waiting
293 * forever.
294 *
295 * So if the page is beyond the 32b boundary, make an explicit
296 * vmap.
297 */
298 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
299 return page_address(sg_page(obj->mm.pages->sgl));
300 pgprot = PAGE_KERNEL;
301 break;
302 case I915_MAP_WC:
303 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
304 break;
305 }
306
307 if (n_pages > ARRAY_SIZE(stack)) {
308 /* Too big for stack -- allocate temporary array instead */
309 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
310 if (!pages)
311 return ERR_PTR(-ENOMEM);
312 }
313
314 i = 0;
315 for_each_sgt_page(page, iter, obj->mm.pages)
316 pages[i++] = page;
317 vaddr = vmap(pages, n_pages, 0, pgprot);
318 if (pages != stack)
319 kvfree(pages);
320
321 return vaddr ?: ERR_PTR(-ENOMEM);
322 }
323
i915_gem_object_map_pfn(struct drm_i915_gem_object * obj,enum i915_map_type type)324 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
325 enum i915_map_type type)
326 {
327 resource_size_t iomap = obj->mm.region->iomap.base -
328 obj->mm.region->region.start;
329 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
330 unsigned long stack[32], *pfns = stack, i;
331 struct sgt_iter iter;
332 dma_addr_t addr;
333 void *vaddr;
334
335 GEM_BUG_ON(type != I915_MAP_WC);
336
337 if (n_pfn > ARRAY_SIZE(stack)) {
338 /* Too big for stack -- allocate temporary array instead */
339 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
340 if (!pfns)
341 return ERR_PTR(-ENOMEM);
342 }
343
344 i = 0;
345 for_each_sgt_daddr(addr, iter, obj->mm.pages)
346 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
347 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
348 if (pfns != stack)
349 kvfree(pfns);
350
351 return vaddr ?: ERR_PTR(-ENOMEM);
352 }
353
354 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)355 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
356 enum i915_map_type type)
357 {
358 enum i915_map_type has_type;
359 bool pinned;
360 void *ptr;
361 int err;
362
363 if (!i915_gem_object_has_struct_page(obj) &&
364 !i915_gem_object_has_iomem(obj))
365 return ERR_PTR(-ENXIO);
366
367 if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
368 return ERR_PTR(-EINVAL);
369
370 assert_object_held(obj);
371
372 pinned = !(type & I915_MAP_OVERRIDE);
373 type &= ~I915_MAP_OVERRIDE;
374
375 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
376 if (unlikely(!i915_gem_object_has_pages(obj))) {
377 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
378
379 err = ____i915_gem_object_get_pages(obj);
380 if (err)
381 return ERR_PTR(err);
382
383 smp_mb__before_atomic();
384 }
385 atomic_inc(&obj->mm.pages_pin_count);
386 pinned = false;
387 }
388 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
389
390 /*
391 * For discrete our CPU mappings needs to be consistent in order to
392 * function correctly on !x86. When mapping things through TTM, we use
393 * the same rules to determine the caching type.
394 *
395 * The caching rules, starting from DG1:
396 *
397 * - If the object can be placed in device local-memory, then the
398 * pages should be allocated and mapped as write-combined only.
399 *
400 * - Everything else is always allocated and mapped as write-back,
401 * with the guarantee that everything is also coherent with the
402 * GPU.
403 *
404 * Internal users of lmem are already expected to get this right, so no
405 * fudging needed there.
406 */
407 if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
408 if (type != I915_MAP_WC && !obj->mm.n_placements) {
409 ptr = ERR_PTR(-ENODEV);
410 goto err_unpin;
411 }
412
413 type = I915_MAP_WC;
414 } else if (IS_DGFX(to_i915(obj->base.dev))) {
415 type = I915_MAP_WB;
416 }
417
418 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
419 if (ptr && has_type != type) {
420 if (pinned) {
421 ptr = ERR_PTR(-EBUSY);
422 goto err_unpin;
423 }
424
425 unmap_object(obj, ptr);
426
427 ptr = obj->mm.mapping = NULL;
428 }
429
430 if (!ptr) {
431 err = i915_gem_object_wait_moving_fence(obj, true);
432 if (err) {
433 ptr = ERR_PTR(err);
434 goto err_unpin;
435 }
436
437 if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
438 ptr = ERR_PTR(-ENODEV);
439 else if (i915_gem_object_has_struct_page(obj))
440 ptr = i915_gem_object_map_page(obj, type);
441 else
442 ptr = i915_gem_object_map_pfn(obj, type);
443 if (IS_ERR(ptr))
444 goto err_unpin;
445
446 obj->mm.mapping = page_pack_bits(ptr, type);
447 }
448
449 return ptr;
450
451 err_unpin:
452 atomic_dec(&obj->mm.pages_pin_count);
453 return ptr;
454 }
455
i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object * obj,enum i915_map_type type)456 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
457 enum i915_map_type type)
458 {
459 void *ret;
460
461 i915_gem_object_lock(obj, NULL);
462 ret = i915_gem_object_pin_map(obj, type);
463 i915_gem_object_unlock(obj);
464
465 return ret;
466 }
467
i915_coherent_map_type(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,bool always_coherent)468 enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
469 struct drm_i915_gem_object *obj,
470 bool always_coherent)
471 {
472 if (i915_gem_object_is_lmem(obj))
473 return I915_MAP_WC;
474 if (HAS_LLC(i915) || always_coherent)
475 return I915_MAP_WB;
476 else
477 return I915_MAP_WC;
478 }
479
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)480 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
481 unsigned long offset,
482 unsigned long size)
483 {
484 enum i915_map_type has_type;
485 void *ptr;
486
487 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
488 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
489 offset, size, obj->base.size));
490
491 wmb(); /* let all previous writes be visible to coherent partners */
492 obj->mm.dirty = true;
493
494 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
495 return;
496
497 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
498 if (has_type == I915_MAP_WC)
499 return;
500
501 drm_clflush_virt_range(ptr + offset, size);
502 if (size == obj->base.size) {
503 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
504 obj->cache_dirty = false;
505 }
506 }
507
__i915_gem_object_release_map(struct drm_i915_gem_object * obj)508 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
509 {
510 GEM_BUG_ON(!obj->mm.mapping);
511
512 /*
513 * We allow removing the mapping from underneath pinned pages!
514 *
515 * Furthermore, since this is an unsafe operation reserved only
516 * for construction time manipulation, we ignore locking prudence.
517 */
518 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
519
520 i915_gem_object_unpin_map(obj);
521 }
522
523 struct scatterlist *
__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object * obj,struct i915_gem_object_page_iter * iter,pgoff_t n,unsigned int * offset)524 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
525 struct i915_gem_object_page_iter *iter,
526 pgoff_t n,
527 unsigned int *offset)
528
529 {
530 const bool dma = iter == &obj->mm.get_dma_page ||
531 iter == &obj->ttm.get_io_page;
532 unsigned int idx, count;
533 struct scatterlist *sg;
534
535 might_sleep();
536 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
537 if (!i915_gem_object_has_pinned_pages(obj))
538 assert_object_held(obj);
539
540 /* As we iterate forward through the sg, we record each entry in a
541 * radixtree for quick repeated (backwards) lookups. If we have seen
542 * this index previously, we will have an entry for it.
543 *
544 * Initial lookup is O(N), but this is amortized to O(1) for
545 * sequential page access (where each new request is consecutive
546 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
547 * i.e. O(1) with a large constant!
548 */
549 if (n < READ_ONCE(iter->sg_idx))
550 goto lookup;
551
552 mutex_lock(&iter->lock);
553
554 /* We prefer to reuse the last sg so that repeated lookup of this
555 * (or the subsequent) sg are fast - comparing against the last
556 * sg is faster than going through the radixtree.
557 */
558
559 sg = iter->sg_pos;
560 idx = iter->sg_idx;
561 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
562
563 while (idx + count <= n) {
564 void *entry;
565 unsigned long i;
566 int ret;
567
568 /* If we cannot allocate and insert this entry, or the
569 * individual pages from this range, cancel updating the
570 * sg_idx so that on this lookup we are forced to linearly
571 * scan onwards, but on future lookups we will try the
572 * insertion again (in which case we need to be careful of
573 * the error return reporting that we have already inserted
574 * this index).
575 */
576 ret = radix_tree_insert(&iter->radix, idx, sg);
577 if (ret && ret != -EEXIST)
578 goto scan;
579
580 entry = xa_mk_value(idx);
581 for (i = 1; i < count; i++) {
582 ret = radix_tree_insert(&iter->radix, idx + i, entry);
583 if (ret && ret != -EEXIST)
584 goto scan;
585 }
586
587 idx += count;
588 sg = ____sg_next(sg);
589 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
590 }
591
592 scan:
593 iter->sg_pos = sg;
594 iter->sg_idx = idx;
595
596 mutex_unlock(&iter->lock);
597
598 if (unlikely(n < idx)) /* insertion completed by another thread */
599 goto lookup;
600
601 /* In case we failed to insert the entry into the radixtree, we need
602 * to look beyond the current sg.
603 */
604 while (idx + count <= n) {
605 idx += count;
606 sg = ____sg_next(sg);
607 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
608 }
609
610 *offset = n - idx;
611 return sg;
612
613 lookup:
614 rcu_read_lock();
615
616 sg = radix_tree_lookup(&iter->radix, n);
617 GEM_BUG_ON(!sg);
618
619 /* If this index is in the middle of multi-page sg entry,
620 * the radix tree will contain a value entry that points
621 * to the start of that range. We will return the pointer to
622 * the base page and the offset of this page within the
623 * sg entry's range.
624 */
625 *offset = 0;
626 if (unlikely(xa_is_value(sg))) {
627 unsigned long base = xa_to_value(sg);
628
629 sg = radix_tree_lookup(&iter->radix, base);
630 GEM_BUG_ON(!sg);
631
632 *offset = n - base;
633 }
634
635 rcu_read_unlock();
636
637 return sg;
638 }
639
640 struct page *
__i915_gem_object_get_page(struct drm_i915_gem_object * obj,pgoff_t n)641 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
642 {
643 struct scatterlist *sg;
644 unsigned int offset;
645
646 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
647
648 sg = i915_gem_object_get_sg(obj, n, &offset);
649 return nth_page(sg_page(sg), offset);
650 }
651
652 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
653 struct page *
__i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,pgoff_t n)654 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
655 {
656 struct page *page;
657
658 page = i915_gem_object_get_page(obj, n);
659 if (!obj->mm.dirty)
660 set_page_dirty(page);
661
662 return page;
663 }
664
665 dma_addr_t
__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,pgoff_t n,unsigned int * len)666 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
667 pgoff_t n, unsigned int *len)
668 {
669 struct scatterlist *sg;
670 unsigned int offset;
671
672 sg = i915_gem_object_get_sg_dma(obj, n, &offset);
673
674 if (len)
675 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
676
677 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
678 }
679
680 dma_addr_t
__i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,pgoff_t n)681 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
682 {
683 return i915_gem_object_get_dma_address_len(obj, n, NULL);
684 }
685