1  /*
2   * SPDX-License-Identifier: MIT
3   *
4   * Copyright © 2016 Intel Corporation
5   */
6  
7  #ifndef __I915_GEM_OBJECT_H__
8  #define __I915_GEM_OBJECT_H__
9  
10  #include <drm/drm_gem.h>
11  #include <drm/drm_file.h>
12  #include <drm/drm_device.h>
13  
14  #include "display/intel_frontbuffer.h"
15  #include "intel_memory_region.h"
16  #include "i915_gem_object_types.h"
17  #include "i915_gem_gtt.h"
18  #include "i915_gem_ww.h"
19  #include "i915_vma_types.h"
20  
21  enum intel_region_id;
22  
i915_gem_object_size_2big(u64 size)23  static inline bool i915_gem_object_size_2big(u64 size)
24  {
25  	struct drm_i915_gem_object *obj;
26  
27  	if (overflows_type(size, obj->base.size))
28  		return true;
29  
30  	return false;
31  }
32  
33  void i915_gem_init__objects(struct drm_i915_private *i915);
34  
35  void i915_objects_module_exit(void);
36  int i915_objects_module_init(void);
37  
38  struct drm_i915_gem_object *i915_gem_object_alloc(void);
39  void i915_gem_object_free(struct drm_i915_gem_object *obj);
40  
41  void i915_gem_object_init(struct drm_i915_gem_object *obj,
42  			  const struct drm_i915_gem_object_ops *ops,
43  			  struct lock_class_key *key,
44  			  unsigned alloc_flags);
45  
46  void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
47  
48  struct drm_i915_gem_object *
49  i915_gem_object_create_shmem(struct drm_i915_private *i915,
50  			     resource_size_t size);
51  struct drm_i915_gem_object *
52  i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
53  				       const void *data, resource_size_t size);
54  struct drm_i915_gem_object *
55  __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
56  			      struct intel_memory_region **placements,
57  			      unsigned int n_placements);
58  
59  extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
60  
61  void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
62  				     struct sg_table *pages,
63  				     bool needs_clflush);
64  
65  int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
66  				const struct drm_i915_gem_pwrite *args);
67  int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
68  			       const struct drm_i915_gem_pread *args);
69  
70  int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
71  void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
72  				     struct sg_table *pages);
73  void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
74  				    struct sg_table *pages);
75  
76  void i915_gem_flush_free_objects(struct drm_i915_private *i915);
77  
78  struct sg_table *
79  __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
80  
81  /**
82   * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
83   * @filp: DRM file private date
84   * @handle: userspace handle
85   *
86   * Returns:
87   *
88   * A pointer to the object named by the handle if such exists on @filp, NULL
89   * otherwise. This object is only valid whilst under the RCU read lock, and
90   * note carefully the object may be in the process of being destroyed.
91   */
92  static inline struct drm_i915_gem_object *
i915_gem_object_lookup_rcu(struct drm_file * file,u32 handle)93  i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
94  {
95  #ifdef CONFIG_LOCKDEP
96  	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
97  #endif
98  	return idr_find(&file->object_idr, handle);
99  }
100  
101  static inline struct drm_i915_gem_object *
i915_gem_object_get_rcu(struct drm_i915_gem_object * obj)102  i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
103  {
104  	if (obj && !kref_get_unless_zero(&obj->base.refcount))
105  		obj = NULL;
106  
107  	return obj;
108  }
109  
110  static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file * file,u32 handle)111  i915_gem_object_lookup(struct drm_file *file, u32 handle)
112  {
113  	struct drm_i915_gem_object *obj;
114  
115  	rcu_read_lock();
116  	obj = i915_gem_object_lookup_rcu(file, handle);
117  	obj = i915_gem_object_get_rcu(obj);
118  	rcu_read_unlock();
119  
120  	return obj;
121  }
122  
123  __deprecated
124  struct drm_gem_object *
125  drm_gem_object_lookup(struct drm_file *file, u32 handle);
126  
127  __attribute__((nonnull))
128  static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object * obj)129  i915_gem_object_get(struct drm_i915_gem_object *obj)
130  {
131  	drm_gem_object_get(&obj->base);
132  	return obj;
133  }
134  
135  __attribute__((nonnull))
136  static inline void
i915_gem_object_put(struct drm_i915_gem_object * obj)137  i915_gem_object_put(struct drm_i915_gem_object *obj)
138  {
139  	__drm_gem_object_put(&obj->base);
140  }
141  
142  #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
143  
144  /*
145   * If more than one potential simultaneous locker, assert held.
146   */
assert_object_held_shared(const struct drm_i915_gem_object * obj)147  static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
148  {
149  	/*
150  	 * Note mm list lookup is protected by
151  	 * kref_get_unless_zero().
152  	 */
153  	if (IS_ENABLED(CONFIG_LOCKDEP) &&
154  	    kref_read(&obj->base.refcount) > 0)
155  		assert_object_held(obj);
156  }
157  
__i915_gem_object_lock(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,bool intr)158  static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
159  					 struct i915_gem_ww_ctx *ww,
160  					 bool intr)
161  {
162  	int ret;
163  
164  	if (intr)
165  		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
166  	else
167  		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
168  
169  	if (!ret && ww) {
170  		i915_gem_object_get(obj);
171  		list_add_tail(&obj->obj_link, &ww->obj_list);
172  	}
173  	if (ret == -EALREADY)
174  		ret = 0;
175  
176  	if (ret == -EDEADLK) {
177  		i915_gem_object_get(obj);
178  		ww->contended = obj;
179  	}
180  
181  	return ret;
182  }
183  
i915_gem_object_lock(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww)184  static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
185  				       struct i915_gem_ww_ctx *ww)
186  {
187  	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
188  }
189  
i915_gem_object_lock_interruptible(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww)190  static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
191  						     struct i915_gem_ww_ctx *ww)
192  {
193  	WARN_ON(ww && !ww->intr);
194  	return __i915_gem_object_lock(obj, ww, true);
195  }
196  
i915_gem_object_trylock(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww)197  static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
198  					   struct i915_gem_ww_ctx *ww)
199  {
200  	if (!ww)
201  		return dma_resv_trylock(obj->base.resv);
202  	else
203  		return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
204  }
205  
i915_gem_object_unlock(struct drm_i915_gem_object * obj)206  static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
207  {
208  	if (obj->ops->adjust_lru)
209  		obj->ops->adjust_lru(obj);
210  
211  	dma_resv_unlock(obj->base.resv);
212  }
213  
214  static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object * obj)215  i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
216  {
217  	obj->flags |= I915_BO_READONLY;
218  }
219  
220  static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object * obj)221  i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
222  {
223  	return obj->flags & I915_BO_READONLY;
224  }
225  
226  static inline bool
i915_gem_object_is_contiguous(const struct drm_i915_gem_object * obj)227  i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
228  {
229  	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
230  }
231  
232  static inline bool
i915_gem_object_is_volatile(const struct drm_i915_gem_object * obj)233  i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
234  {
235  	return obj->flags & I915_BO_ALLOC_VOLATILE;
236  }
237  
238  static inline void
i915_gem_object_set_volatile(struct drm_i915_gem_object * obj)239  i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
240  {
241  	obj->flags |= I915_BO_ALLOC_VOLATILE;
242  }
243  
244  static inline bool
i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object * obj)245  i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
246  {
247  	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
248  }
249  
250  static inline void
i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object * obj)251  i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
252  {
253  	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
254  }
255  
256  static inline void
i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object * obj)257  i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
258  {
259  	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
260  }
261  
262  static inline bool
i915_gem_object_is_protected(const struct drm_i915_gem_object * obj)263  i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
264  {
265  	return obj->flags & I915_BO_PROTECTED;
266  }
267  
268  static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object * obj,unsigned long flags)269  i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
270  			 unsigned long flags)
271  {
272  	return obj->ops->flags & flags;
273  }
274  
275  bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
276  
277  bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
278  
279  static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object * obj)280  i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
281  {
282  	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
283  }
284  
285  static inline bool
i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object * obj)286  i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
287  {
288  	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
289  }
290  
291  static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object * obj)292  i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
293  {
294  	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
295  }
296  
297  static inline bool
i915_gem_object_never_mmap(const struct drm_i915_gem_object * obj)298  i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
299  {
300  	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
301  }
302  
303  static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object * obj)304  i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
305  {
306  	return READ_ONCE(obj->frontbuffer);
307  }
308  
309  static inline unsigned int
i915_gem_object_get_tiling(const struct drm_i915_gem_object * obj)310  i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
311  {
312  	return obj->tiling_and_stride & TILING_MASK;
313  }
314  
315  static inline bool
i915_gem_object_is_tiled(const struct drm_i915_gem_object * obj)316  i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
317  {
318  	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
319  }
320  
321  static inline unsigned int
i915_gem_object_get_stride(const struct drm_i915_gem_object * obj)322  i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
323  {
324  	return obj->tiling_and_stride & STRIDE_MASK;
325  }
326  
327  static inline unsigned int
i915_gem_tile_height(unsigned int tiling)328  i915_gem_tile_height(unsigned int tiling)
329  {
330  	GEM_BUG_ON(!tiling);
331  	return tiling == I915_TILING_Y ? 32 : 8;
332  }
333  
334  static inline unsigned int
i915_gem_object_get_tile_height(const struct drm_i915_gem_object * obj)335  i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
336  {
337  	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
338  }
339  
340  static inline unsigned int
i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object * obj)341  i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
342  {
343  	return (i915_gem_object_get_stride(obj) *
344  		i915_gem_object_get_tile_height(obj));
345  }
346  
347  int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
348  			       unsigned int tiling, unsigned int stride);
349  
350  /**
351   * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist
352   * pointer and the target page position using pgoff_t n input argument and
353   * i915_gem_object_page_iter
354   * @obj: i915 GEM buffer object
355   * @iter: i915 GEM buffer object page iterator
356   * @n: page offset
357   * @offset: searched physical offset,
358   *          it will be used for returning physical page offset value
359   *
360   * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
361   *          Takes and releases the RCU lock to search the radix_tree of
362   *          i915_gem_object_page_iter.
363   *
364   * Returns:
365   * The target scatterlist pointer and the target page position.
366   *
367   * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg()
368   */
369  struct scatterlist *
370  __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
371  				   struct i915_gem_object_page_iter *iter,
372  				   pgoff_t  n,
373  				   unsigned int *offset);
374  
375  /**
376   * i915_gem_object_page_iter_get_sg - wrapper macro for
377   * __i915_gem_object_page_iter_get_sg()
378   * @obj: i915 GEM buffer object
379   * @it: i915 GEM buffer object page iterator
380   * @n: page offset
381   * @offset: searched physical offset,
382   *          it will be used for returning physical page offset value
383   *
384   * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
385   *          Takes and releases the RCU lock to search the radix_tree of
386   *          i915_gem_object_page_iter.
387   *
388   * Returns:
389   * The target scatterlist pointer and the target page position.
390   *
391   * In order to avoid the truncation of the input parameter, it checks the page
392   * offset n's type from the input parameter before calling
393   * __i915_gem_object_page_iter_get_sg().
394   */
395  #define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({	\
396  	static_assert(castable_to_type(n, pgoff_t));		\
397  	__i915_gem_object_page_iter_get_sg(obj, it, n, offset);	\
398  })
399  
400  /**
401   * __i915_gem_object_get_sg - helper to find the target scatterlist
402   * pointer and the target page position using pgoff_t n input argument and
403   * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function.
404   * @obj: i915 GEM buffer object
405   * @n: page offset
406   * @offset: searched physical offset,
407   *          it will be used for returning physical page offset value
408   *
409   * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
410   * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
411   *
412   * Returns:
413   * The target scatterlist pointer and the target page position.
414   *
415   * Recommended to use wrapper macro: i915_gem_object_get_sg()
416   * See also __i915_gem_object_page_iter_get_sg()
417   */
418  static inline struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object * obj,pgoff_t n,unsigned int * offset)419  __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n,
420  			 unsigned int *offset)
421  {
422  	return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset);
423  }
424  
425  /**
426   * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg()
427   * @obj: i915 GEM buffer object
428   * @n: page offset
429   * @offset: searched physical offset,
430   *          it will be used for returning physical page offset value
431   *
432   * Returns:
433   * The target scatterlist pointer and the target page position.
434   *
435   * In order to avoid the truncation of the input parameter, it checks the page
436   * offset n's type from the input parameter before calling
437   * __i915_gem_object_get_sg().
438   * See also __i915_gem_object_page_iter_get_sg()
439   */
440  #define i915_gem_object_get_sg(obj, n, offset) ({	\
441  	static_assert(castable_to_type(n, pgoff_t));	\
442  	__i915_gem_object_get_sg(obj, n, offset);	\
443  })
444  
445  /**
446   * __i915_gem_object_get_sg_dma - helper to find the target scatterlist
447   * pointer and the target page position using pgoff_t n input argument and
448   * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function
449   * @obj: i915 GEM buffer object
450   * @n: page offset
451   * @offset: searched physical offset,
452   *          it will be used for returning physical page offset value
453   *
454   * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function
455   * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
456   *
457   * Returns:
458   * The target scatterlist pointer and the target page position.
459   *
460   * Recommended to use wrapper macro: i915_gem_object_get_sg_dma()
461   * See also __i915_gem_object_page_iter_get_sg()
462   */
463  static inline struct scatterlist *
__i915_gem_object_get_sg_dma(struct drm_i915_gem_object * obj,pgoff_t n,unsigned int * offset)464  __i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n,
465  			     unsigned int *offset)
466  {
467  	return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset);
468  }
469  
470  /**
471   * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma()
472   * @obj: i915 GEM buffer object
473   * @n: page offset
474   * @offset: searched physical offset,
475   *          it will be used for returning physical page offset value
476   *
477   * Returns:
478   * The target scatterlist pointer and the target page position.
479   *
480   * In order to avoid the truncation of the input parameter, it checks the page
481   * offset n's type from the input parameter before calling
482   * __i915_gem_object_get_sg_dma().
483   * See also __i915_gem_object_page_iter_get_sg()
484   */
485  #define i915_gem_object_get_sg_dma(obj, n, offset) ({	\
486  	static_assert(castable_to_type(n, pgoff_t));	\
487  	__i915_gem_object_get_sg_dma(obj, n, offset);	\
488  })
489  
490  /**
491   * __i915_gem_object_get_page - helper to find the target page with a page offset
492   * @obj: i915 GEM buffer object
493   * @n: page offset
494   *
495   * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
496   * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg()
497   * internally.
498   *
499   * Returns:
500   * The target page pointer.
501   *
502   * Recommended to use wrapper macro: i915_gem_object_get_page()
503   * See also __i915_gem_object_page_iter_get_sg()
504   */
505  struct page *
506  __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n);
507  
508  /**
509   * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page
510   * @obj: i915 GEM buffer object
511   * @n: page offset
512   *
513   * Returns:
514   * The target page pointer.
515   *
516   * In order to avoid the truncation of the input parameter, it checks the page
517   * offset n's type from the input parameter before calling
518   * __i915_gem_object_get_page().
519   * See also __i915_gem_object_page_iter_get_sg()
520   */
521  #define i915_gem_object_get_page(obj, n) ({		\
522  	static_assert(castable_to_type(n, pgoff_t));	\
523  	__i915_gem_object_get_page(obj, n);		\
524  })
525  
526  /**
527   * __i915_gem_object_get_dirty_page - helper to find the target page with a page
528   * offset
529   * @obj: i915 GEM buffer object
530   * @n: page offset
531   *
532   * It works like i915_gem_object_get_page(), but it marks the returned page dirty.
533   *
534   * Returns:
535   * The target page pointer.
536   *
537   * Recommended to use wrapper macro: i915_gem_object_get_dirty_page()
538   * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
539   */
540  struct page *
541  __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n);
542  
543  /**
544   * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page
545   * @obj: i915 GEM buffer object
546   * @n: page offset
547   *
548   * Returns:
549   * The target page pointer.
550   *
551   * In order to avoid the truncation of the input parameter, it checks the page
552   * offset n's type from the input parameter before calling
553   * __i915_gem_object_get_dirty_page().
554   * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
555   */
556  #define i915_gem_object_get_dirty_page(obj, n) ({	\
557  	static_assert(castable_to_type(n, pgoff_t));	\
558  	__i915_gem_object_get_dirty_page(obj, n);	\
559  })
560  
561  /**
562   * __i915_gem_object_get_dma_address_len - helper to get bus addresses of
563   * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length
564   * @obj: i915 GEM buffer object
565   * @n: page offset
566   * @len: DMA mapped scatterlist's DMA bus addresses length to return
567   *
568   * Returns:
569   * Bus addresses of targeted DMA mapped scatterlist
570   *
571   * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len()
572   * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
573   */
574  dma_addr_t
575  __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n,
576  				      unsigned int *len);
577  
578  /**
579   * i915_gem_object_get_dma_address_len - wrapper macro for
580   * __i915_gem_object_get_dma_address_len
581   * @obj: i915 GEM buffer object
582   * @n: page offset
583   * @len: DMA mapped scatterlist's DMA bus addresses length to return
584   *
585   * Returns:
586   * Bus addresses of targeted DMA mapped scatterlist
587   *
588   * In order to avoid the truncation of the input parameter, it checks the page
589   * offset n's type from the input parameter before calling
590   * __i915_gem_object_get_dma_address_len().
591   * See also __i915_gem_object_page_iter_get_sg() and
592   * __i915_gem_object_get_dma_address_len()
593   */
594  #define i915_gem_object_get_dma_address_len(obj, n, len) ({	\
595  	static_assert(castable_to_type(n, pgoff_t));		\
596  	__i915_gem_object_get_dma_address_len(obj, n, len);	\
597  })
598  
599  /**
600   * __i915_gem_object_get_dma_address - helper to get bus addresses of
601   * targeted DMA mapped scatterlist from i915 GEM buffer object
602   * @obj: i915 GEM buffer object
603   * @n: page offset
604   *
605   * Returns:
606   * Bus addresses of targeted DMA mapped scatterlis
607   *
608   * Recommended to use wrapper macro: i915_gem_object_get_dma_address()
609   * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
610   */
611  dma_addr_t
612  __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n);
613  
614  /**
615   * i915_gem_object_get_dma_address - wrapper macro for
616   * __i915_gem_object_get_dma_address
617   * @obj: i915 GEM buffer object
618   * @n: page offset
619   *
620   * Returns:
621   * Bus addresses of targeted DMA mapped scatterlist
622   *
623   * In order to avoid the truncation of the input parameter, it checks the page
624   * offset n's type from the input parameter before calling
625   * __i915_gem_object_get_dma_address().
626   * See also __i915_gem_object_page_iter_get_sg() and
627   * __i915_gem_object_get_dma_address()
628   */
629  #define i915_gem_object_get_dma_address(obj, n) ({	\
630  	static_assert(castable_to_type(n, pgoff_t));	\
631  	__i915_gem_object_get_dma_address(obj, n);	\
632  })
633  
634  void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
635  				 struct sg_table *pages);
636  
637  int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
638  int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
639  
640  static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object * obj)641  i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
642  {
643  	assert_object_held(obj);
644  
645  	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
646  		return 0;
647  
648  	return __i915_gem_object_get_pages(obj);
649  }
650  
651  int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
652  
653  static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object * obj)654  i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
655  {
656  	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
657  }
658  
659  static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object * obj)660  __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
661  {
662  	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
663  
664  	atomic_inc(&obj->mm.pages_pin_count);
665  }
666  
667  static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object * obj)668  i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
669  {
670  	return atomic_read(&obj->mm.pages_pin_count);
671  }
672  
673  static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object * obj)674  __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
675  {
676  	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
677  	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
678  
679  	atomic_dec(&obj->mm.pages_pin_count);
680  }
681  
682  static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object * obj)683  i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
684  {
685  	__i915_gem_object_unpin_pages(obj);
686  }
687  
688  int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
689  int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
690  
691  /**
692   * i915_gem_object_pin_map - return a contiguous mapping of the entire object
693   * @obj: the object to map into kernel address space
694   * @type: the type of mapping, used to select pgprot_t
695   *
696   * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
697   * pages and then returns a contiguous mapping of the backing storage into
698   * the kernel address space. Based on the @type of mapping, the PTE will be
699   * set to either WriteBack or WriteCombine (via pgprot_t).
700   *
701   * The caller is responsible for calling i915_gem_object_unpin_map() when the
702   * mapping is no longer required.
703   *
704   * Returns the pointer through which to access the mapped object, or an
705   * ERR_PTR() on error.
706   */
707  void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
708  					   enum i915_map_type type);
709  
710  void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
711  						    enum i915_map_type type);
712  
713  enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
714  					  struct drm_i915_gem_object *obj,
715  					  bool always_coherent);
716  
717  void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
718  				 unsigned long offset,
719  				 unsigned long size);
i915_gem_object_flush_map(struct drm_i915_gem_object * obj)720  static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
721  {
722  	__i915_gem_object_flush_map(obj, 0, obj->base.size);
723  }
724  
725  /**
726   * i915_gem_object_unpin_map - releases an earlier mapping
727   * @obj: the object to unmap
728   *
729   * After pinning the object and mapping its pages, once you are finished
730   * with your access, call i915_gem_object_unpin_map() to release the pin
731   * upon the mapping. Once the pin count reaches zero, that mapping may be
732   * removed.
733   */
i915_gem_object_unpin_map(struct drm_i915_gem_object * obj)734  static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
735  {
736  	i915_gem_object_unpin_pages(obj);
737  }
738  
739  void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
740  
741  int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
742  				 unsigned int *needs_clflush);
743  int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
744  				  unsigned int *needs_clflush);
745  #define CLFLUSH_BEFORE	BIT(0)
746  #define CLFLUSH_AFTER	BIT(1)
747  #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
748  
749  static inline void
i915_gem_object_finish_access(struct drm_i915_gem_object * obj)750  i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
751  {
752  	i915_gem_object_unpin_pages(obj);
753  }
754  
755  int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
756  				     struct dma_fence **fence);
757  int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
758  				      bool intr);
759  bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
760  
761  void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
762  					 unsigned int cache_level);
763  bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
764  void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
765  void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
766  bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
767  
768  int __must_check
769  i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
770  int __must_check
771  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
772  int __must_check
773  i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
774  struct i915_vma * __must_check
775  i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
776  				     struct i915_gem_ww_ctx *ww,
777  				     u32 alignment,
778  				     const struct i915_gtt_view *view,
779  				     unsigned int flags);
780  
781  void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
782  void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
783  void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
784  void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
785  void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
786  
__start_cpu_write(struct drm_i915_gem_object * obj)787  static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
788  {
789  	obj->read_domains = I915_GEM_DOMAIN_CPU;
790  	obj->write_domain = I915_GEM_DOMAIN_CPU;
791  	if (i915_gem_cpu_write_needs_clflush(obj))
792  		obj->cache_dirty = true;
793  }
794  
795  void i915_gem_fence_wait_priority(struct dma_fence *fence,
796  				  const struct i915_sched_attr *attr);
797  
798  int i915_gem_object_wait(struct drm_i915_gem_object *obj,
799  			 unsigned int flags,
800  			 long timeout);
801  int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
802  				  unsigned int flags,
803  				  const struct i915_sched_attr *attr);
804  
805  void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
806  					 enum fb_op_origin origin);
807  void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
808  					      enum fb_op_origin origin);
809  
810  static inline void
i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)811  i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
812  				  enum fb_op_origin origin)
813  {
814  	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
815  		__i915_gem_object_flush_frontbuffer(obj, origin);
816  }
817  
818  static inline void
i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)819  i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
820  				       enum fb_op_origin origin)
821  {
822  	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
823  		__i915_gem_object_invalidate_frontbuffer(obj, origin);
824  }
825  
826  int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
827  
828  bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
829  
830  void __i915_gem_free_object_rcu(struct rcu_head *head);
831  
832  void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
833  
834  void __i915_gem_free_object(struct drm_i915_gem_object *obj);
835  
836  bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
837  
838  bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
839  
840  int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
841  			    struct i915_gem_ww_ctx *ww,
842  			    enum intel_region_id id);
843  int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
844  			      struct i915_gem_ww_ctx *ww,
845  			      enum intel_region_id id,
846  			      unsigned int flags);
847  
848  bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
849  				 enum intel_region_id id);
850  
851  int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
852  				   unsigned int flags);
853  
854  bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
855  					enum intel_memory_type type);
856  
857  bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj);
858  
859  int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
860  			 size_t size, struct intel_memory_region *mr,
861  			 struct address_space *mapping,
862  			 unsigned int max_segment);
863  void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
864  			 bool dirty, bool backup);
865  void __shmem_writeback(size_t size, struct address_space *mapping);
866  
867  #ifdef CONFIG_MMU_NOTIFIER
868  static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object * obj)869  i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
870  {
871  	return obj->userptr.notifier.mm;
872  }
873  
874  int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
875  int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
876  int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
877  #else
i915_gem_object_is_userptr(struct drm_i915_gem_object * obj)878  static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
879  
i915_gem_object_userptr_submit_init(struct drm_i915_gem_object * obj)880  static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
i915_gem_object_userptr_submit_done(struct drm_i915_gem_object * obj)881  static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
i915_gem_object_userptr_validate(struct drm_i915_gem_object * obj)882  static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
883  
884  #endif
885  
886  #endif
887