1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #ifndef __I915_VMA_RESOURCE_H__
7 #define __I915_VMA_RESOURCE_H__
8
9 #include <linux/dma-fence.h>
10 #include <linux/refcount.h>
11
12 #include "i915_gem.h"
13 #include "i915_scatterlist.h"
14 #include "i915_sw_fence.h"
15 #include "intel_runtime_pm.h"
16
17 struct intel_memory_region;
18
19 struct i915_page_sizes {
20 /**
21 * The sg mask of the pages sg_table. i.e the mask of
22 * the lengths for each sg entry.
23 */
24 unsigned int phys;
25
26 /**
27 * The gtt page sizes we are allowed to use given the
28 * sg mask and the supported page sizes. This will
29 * express the smallest unit we can use for the whole
30 * object, as well as the larger sizes we may be able
31 * to use opportunistically.
32 */
33 unsigned int sg;
34 };
35
36 /**
37 * struct i915_vma_resource - Snapshotted unbind information.
38 * @unbind_fence: Fence to mark unbinding complete. Note that this fence
39 * is not considered published until unbind is scheduled, and as such it
40 * is illegal to access this fence before scheduled unbind other than
41 * for refcounting.
42 * @lock: The @unbind_fence lock.
43 * @hold_count: Number of holders blocking the fence from finishing.
44 * The vma itself is keeping a hold, which is released when unbind
45 * is scheduled.
46 * @work: Work struct for deferred unbind work.
47 * @chain: Pointer to struct i915_sw_fence used to await dependencies.
48 * @rb: Rb node for the vm's pending unbind interval tree.
49 * @__subtree_last: Interval tree private member.
50 * @vm: non-refcounted pointer to the vm. This is for internal use only and
51 * this member is cleared after vm_resource unbind.
52 * @mr: The memory region of the object pointed to by the vma.
53 * @ops: Pointer to the backend i915_vma_ops.
54 * @private: Bind backend private info.
55 * @start: Offset into the address space of bind range start. Note that
56 * this is after any padding that might have been allocated.
57 * @node_size: Size of the allocated range manager node with padding
58 * subtracted.
59 * @vma_size: Bind size.
60 * @guard: The size of guard area preceding and trailing the bind.
61 * @page_sizes_gtt: Resulting page sizes from the bind operation.
62 * @bound_flags: Flags indicating binding status.
63 * @allocated: Backend private data. TODO: Should move into @private.
64 * @immediate_unbind: Unbind can be done immediately and doesn't need to be
65 * deferred to a work item awaiting unsignaled fences. This is a hack.
66 * (dma_fence_work uses a fence flag for this, but this seems slightly
67 * cleaner).
68 * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
69 * take a wakeref in the dma-fence signalling critical path, it needs to be
70 * taken when the unbind is scheduled.
71 * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
72 * needs to be skipped for unbind.
73 * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
74 *
75 * The lifetime of a struct i915_vma_resource is from a binding request to
76 * the actual possible asynchronous unbind has completed.
77 */
78 struct i915_vma_resource {
79 struct dma_fence unbind_fence;
80 /* See above for description of the lock. */
81 spinlock_t lock;
82 refcount_t hold_count;
83 struct work_struct work;
84 struct i915_sw_fence chain;
85 struct rb_node rb;
86 u64 __subtree_last;
87 struct i915_address_space *vm;
88 intel_wakeref_t wakeref;
89
90 /**
91 * struct i915_vma_bindinfo - Information needed for async bind
92 * only but that can be dropped after the bind has taken place.
93 * Consider making this a separate argument to the bind_vma
94 * op, coalescing with other arguments like vm, stash, cache_level
95 * and flags
96 * @pages: The pages sg-table.
97 * @page_sizes: Page sizes of the pages.
98 * @pages_rsgt: Refcounted sg-table when delayed object destruction
99 * is supported. May be NULL.
100 * @readonly: Whether the vma should be bound read-only.
101 * @lmem: Whether the vma points to lmem.
102 */
103 struct i915_vma_bindinfo {
104 struct sg_table *pages;
105 struct i915_page_sizes page_sizes;
106 struct i915_refct_sgt *pages_rsgt;
107 bool readonly:1;
108 bool lmem:1;
109 } bi;
110
111 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
112 struct intel_memory_region *mr;
113 #endif
114 const struct i915_vma_ops *ops;
115 void *private;
116 u64 start;
117 u64 node_size;
118 u64 vma_size;
119 u32 guard;
120 u32 page_sizes_gtt;
121
122 u32 bound_flags;
123 bool allocated:1;
124 bool immediate_unbind:1;
125 bool needs_wakeref:1;
126 bool skip_pte_rewrite:1;
127
128 u32 *tlb;
129 };
130
131 bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
132 bool *lockdep_cookie);
133
134 void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
135 bool lockdep_cookie);
136
137 struct i915_vma_resource *i915_vma_resource_alloc(void);
138
139 void i915_vma_resource_free(struct i915_vma_resource *vma_res);
140
141 struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
142 u32 *tlb);
143
144 void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
145
146 /**
147 * i915_vma_resource_get - Take a reference on a vma resource
148 * @vma_res: The vma resource on which to take a reference.
149 *
150 * Return: The @vma_res pointer
151 */
152 static inline struct i915_vma_resource
i915_vma_resource_get(struct i915_vma_resource * vma_res)153 *i915_vma_resource_get(struct i915_vma_resource *vma_res)
154 {
155 dma_fence_get(&vma_res->unbind_fence);
156 return vma_res;
157 }
158
159 /**
160 * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
161 * @vma_res: The resource
162 */
i915_vma_resource_put(struct i915_vma_resource * vma_res)163 static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
164 {
165 dma_fence_put(&vma_res->unbind_fence);
166 }
167
168 /**
169 * i915_vma_resource_init - Initialize a vma resource.
170 * @vma_res: The vma resource to initialize
171 * @vm: Pointer to the vm.
172 * @pages: The pages sg-table.
173 * @page_sizes: Page sizes of the pages.
174 * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
175 * delayed destruction.
176 * @readonly: Whether the vma should be bound read-only.
177 * @lmem: Whether the vma points to lmem.
178 * @mr: The memory region of the object the vma points to.
179 * @ops: The backend ops.
180 * @private: Bind backend private info.
181 * @start: Offset into the address space of bind range start after padding.
182 * @node_size: Size of the allocated range manager node minus padding.
183 * @size: Bind size.
184 * @guard: The size of the guard area preceding and trailing the bind.
185 *
186 * Initializes a vma resource allocated using i915_vma_resource_alloc().
187 * The reason for having separate allocate and initialize function is that
188 * initialization may need to be performed from under a lock where
189 * allocation is not allowed.
190 */
i915_vma_resource_init(struct i915_vma_resource * vma_res,struct i915_address_space * vm,struct sg_table * pages,const struct i915_page_sizes * page_sizes,struct i915_refct_sgt * pages_rsgt,bool readonly,bool lmem,struct intel_memory_region * mr,const struct i915_vma_ops * ops,void * private,u64 start,u64 node_size,u64 size,u32 guard)191 static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
192 struct i915_address_space *vm,
193 struct sg_table *pages,
194 const struct i915_page_sizes *page_sizes,
195 struct i915_refct_sgt *pages_rsgt,
196 bool readonly,
197 bool lmem,
198 struct intel_memory_region *mr,
199 const struct i915_vma_ops *ops,
200 void *private,
201 u64 start,
202 u64 node_size,
203 u64 size,
204 u32 guard)
205 {
206 __i915_vma_resource_init(vma_res);
207 vma_res->vm = vm;
208 vma_res->bi.pages = pages;
209 vma_res->bi.page_sizes = *page_sizes;
210 if (pages_rsgt)
211 vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
212 vma_res->bi.readonly = readonly;
213 vma_res->bi.lmem = lmem;
214 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
215 vma_res->mr = mr;
216 #endif
217 vma_res->ops = ops;
218 vma_res->private = private;
219 vma_res->start = start;
220 vma_res->node_size = node_size;
221 vma_res->vma_size = size;
222 vma_res->guard = guard;
223 }
224
i915_vma_resource_fini(struct i915_vma_resource * vma_res)225 static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
226 {
227 GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
228 if (vma_res->bi.pages_rsgt)
229 i915_refct_sgt_put(vma_res->bi.pages_rsgt);
230 i915_sw_fence_fini(&vma_res->chain);
231 }
232
233 int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
234 u64 first,
235 u64 last,
236 bool intr);
237
238 int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
239 struct i915_sw_fence *sw_fence,
240 u64 first,
241 u64 last,
242 bool intr,
243 gfp_t gfp);
244
245 void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
246
247 void i915_vma_resource_module_exit(void);
248
249 int i915_vma_resource_module_init(void);
250
251 #endif
252