1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #ifndef _TTM_BO_API_H_
32 #define _TTM_BO_API_H_
33 
34 #include <drm/drm_gem.h>
35 
36 #include <linux/kref.h>
37 #include <linux/list.h>
38 
39 #include "ttm_device.h"
40 
41 /* Default number of pre-faulted pages in the TTM fault handler */
42 #define TTM_BO_VM_NUM_PREFAULT 16
43 
44 struct iosys_map;
45 
46 struct ttm_global;
47 struct ttm_device;
48 struct ttm_placement;
49 struct ttm_place;
50 struct ttm_resource;
51 struct ttm_resource_manager;
52 struct ttm_tt;
53 
54 /**
55  * enum ttm_bo_type
56  *
57  * @ttm_bo_type_device:	These are 'normal' buffers that can
58  * be mmapped by user space. Each of these bos occupy a slot in the
59  * device address space, that can be used for normal vm operations.
60  *
61  * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
62  * but they cannot be accessed from user-space. For kernel-only use.
63  *
64  * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
65  * driver.
66  */
67 enum ttm_bo_type {
68 	ttm_bo_type_device,
69 	ttm_bo_type_kernel,
70 	ttm_bo_type_sg
71 };
72 
73 /**
74  * struct ttm_buffer_object
75  *
76  * @base: drm_gem_object superclass data.
77  * @bdev: Pointer to the buffer object device structure.
78  * @type: The bo type.
79  * @page_alignment: Page alignment.
80  * @destroy: Destruction function. If NULL, kfree is used.
81  * @kref: Reference count of this buffer object. When this refcount reaches
82  * zero, the object is destroyed or put on the delayed delete list.
83  * @resource: structure describing current placement.
84  * @ttm: TTM structure holding system pages.
85  * @deleted: True if the object is only a zombie and already deleted.
86  * @bulk_move: The bulk move object.
87  * @priority: Priority for LRU, BOs with lower priority are evicted first.
88  * @pin_count: Pin count.
89  *
90  * Base class for TTM buffer object, that deals with data placement and CPU
91  * mappings. GPU mappings are really up to the driver, but for simpler GPUs
92  * the driver can usually use the placement offset @offset directly as the
93  * GPU virtual address. For drivers implementing multiple
94  * GPU memory manager contexts, the driver should manage the address space
95  * in these contexts separately and use these objects to get the correct
96  * placement and caching for these GPU maps. This makes it possible to use
97  * these objects for even quite elaborate memory management schemes.
98  * The destroy member, the API visibility of this object makes it possible
99  * to derive driver specific types.
100  */
101 struct ttm_buffer_object {
102 	struct drm_gem_object base;
103 
104 	/*
105 	 * Members constant at init.
106 	 */
107 	struct ttm_device *bdev;
108 	enum ttm_bo_type type;
109 	uint32_t page_alignment;
110 	void (*destroy) (struct ttm_buffer_object *);
111 
112 	/*
113 	* Members not needing protection.
114 	*/
115 	struct kref kref;
116 
117 	/*
118 	 * Members protected by the bo::resv::reserved lock.
119 	 */
120 	struct ttm_resource *resource;
121 	struct ttm_tt *ttm;
122 	bool deleted;
123 	struct ttm_lru_bulk_move *bulk_move;
124 	unsigned priority;
125 	unsigned pin_count;
126 
127 	/**
128 	 * @delayed_delete: Work item used when we can't delete the BO
129 	 * immediately
130 	 */
131 	struct work_struct delayed_delete;
132 
133 	/**
134 	 * @sg: external source of pages and DMA addresses, protected by the
135 	 * reservation lock.
136 	 */
137 	struct sg_table *sg;
138 };
139 
140 #define TTM_BO_MAP_IOMEM_MASK 0x80
141 
142 /**
143  * struct ttm_bo_kmap_obj
144  *
145  * @virtual: The current kernel virtual address.
146  * @page: The page when kmap'ing a single page.
147  * @bo_kmap_type: Type of bo_kmap.
148  * @bo: The TTM BO.
149  *
150  * Object describing a kernel mapping. Since a TTM bo may be located
151  * in various memory types with various caching policies, the
152  * mapping can either be an ioremap, a vmap, a kmap or part of a
153  * premapped region.
154  */
155 struct ttm_bo_kmap_obj {
156 	void *virtual;
157 	struct page *page;
158 	enum {
159 		ttm_bo_map_iomap        = 1 | TTM_BO_MAP_IOMEM_MASK,
160 		ttm_bo_map_vmap         = 2,
161 		ttm_bo_map_kmap         = 3,
162 		ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
163 	} bo_kmap_type;
164 	struct ttm_buffer_object *bo;
165 };
166 
167 /**
168  * struct ttm_operation_ctx
169  *
170  * @interruptible: Sleep interruptible if sleeping.
171  * @no_wait_gpu: Return immediately if the GPU is busy.
172  * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
173  * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
174  * BOs share the same reservation object.
175  * faults. Should only be used by TTM internally.
176  * @resv: Reservation object to allow reserved evictions with.
177  * @bytes_moved: Statistics on how many bytes have been moved.
178  *
179  * Context for TTM operations like changing buffer placement or general memory
180  * allocation.
181  */
182 struct ttm_operation_ctx {
183 	bool interruptible;
184 	bool no_wait_gpu;
185 	bool gfp_retry_mayfail;
186 	bool allow_res_evict;
187 	struct dma_resv *resv;
188 	uint64_t bytes_moved;
189 };
190 
191 struct ttm_lru_walk;
192 
193 /** struct ttm_lru_walk_ops - Operations for a LRU walk. */
194 struct ttm_lru_walk_ops {
195 	/**
196 	 * process_bo - Process this bo.
197 	 * @walk: struct ttm_lru_walk describing the walk.
198 	 * @bo: A locked and referenced buffer object.
199 	 *
200 	 * Return: Negative error code on error, User-defined positive value
201 	 * (typically, but not always, size of the processed bo) on success.
202 	 * On success, the returned values are summed by the walk and the
203 	 * walk exits when its target is met.
204 	 * 0 also indicates success, -EBUSY means this bo was skipped.
205 	 */
206 	s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
207 };
208 
209 /**
210  * struct ttm_lru_walk_arg - Common part for the variants of BO LRU walk.
211  */
212 struct ttm_lru_walk_arg {
213 	/** @ctx: Pointer to the struct ttm_operation_ctx. */
214 	struct ttm_operation_ctx *ctx;
215 	/** @ticket: The struct ww_acquire_ctx if any. */
216 	struct ww_acquire_ctx *ticket;
217 	/** @trylock_only: Only use trylock for locking. */
218 	bool trylock_only;
219 };
220 
221 /**
222  * struct ttm_lru_walk - Structure describing a LRU walk.
223  */
224 struct ttm_lru_walk {
225 	/** @ops: Pointer to the ops structure. */
226 	const struct ttm_lru_walk_ops *ops;
227 	/** @arg: Common bo LRU walk arguments. */
228 	struct ttm_lru_walk_arg arg;
229 };
230 
231 s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
232 			   struct ttm_resource_manager *man, s64 target);
233 
234 /**
235  * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour
236  * @purge: Purge the content rather than backing it up.
237  * @writeback: Attempt to immediately write content to swap space.
238  * @allow_move: Allow moving to system before shrinking. This is typically
239  * not desired for zombie- or ghost objects (with zombie object meaning
240  * objects with a zero gem object refcount)
241  */
242 struct ttm_bo_shrink_flags {
243 	u32 purge : 1;
244 	u32 writeback : 1;
245 	u32 allow_move : 1;
246 };
247 
248 long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
249 		   const struct ttm_bo_shrink_flags flags);
250 
251 bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx);
252 
253 bool ttm_bo_shrink_avoid_wait(void);
254 
255 /**
256  * ttm_bo_reserve:
257  *
258  * @bo: A pointer to a struct ttm_buffer_object.
259  * @interruptible: Sleep interruptible if waiting.
260  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
261  * @ticket: ticket used to acquire the ww_mutex.
262  *
263  * Locks a buffer object for validation. (Or prevents other processes from
264  * locking it for validation), while taking a number of measures to prevent
265  * deadlocks.
266  *
267  * Returns:
268  * -EDEADLK: The reservation may cause a deadlock.
269  * Release all buffer reservations, wait for @bo to become unreserved and
270  * try again.
271  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
272  * a signal. Release all buffer reservations and return to user-space.
273  * -EBUSY: The function needed to sleep, but @no_wait was true
274  * -EALREADY: Bo already reserved using @ticket. This error code will only
275  * be returned if @use_ticket is set to true.
276  */
ttm_bo_reserve(struct ttm_buffer_object * bo,bool interruptible,bool no_wait,struct ww_acquire_ctx * ticket)277 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
278 				 bool interruptible, bool no_wait,
279 				 struct ww_acquire_ctx *ticket)
280 {
281 	int ret = 0;
282 
283 	if (no_wait) {
284 		bool success;
285 
286 		if (WARN_ON(ticket))
287 			return -EBUSY;
288 
289 		success = dma_resv_trylock(bo->base.resv);
290 		return success ? 0 : -EBUSY;
291 	}
292 
293 	if (interruptible)
294 		ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
295 	else
296 		ret = dma_resv_lock(bo->base.resv, ticket);
297 	if (ret == -EINTR)
298 		return -ERESTARTSYS;
299 	return ret;
300 }
301 
302 /**
303  * ttm_bo_reserve_slowpath:
304  * @bo: A pointer to a struct ttm_buffer_object.
305  * @interruptible: Sleep interruptible if waiting.
306  * @ticket: Ticket used to acquire the ww_mutex.
307  *
308  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
309  * from all our other reservations. Because there are no other reservations
310  * held by us, this function cannot deadlock any more.
311  */
ttm_bo_reserve_slowpath(struct ttm_buffer_object * bo,bool interruptible,struct ww_acquire_ctx * ticket)312 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
313 					  bool interruptible,
314 					  struct ww_acquire_ctx *ticket)
315 {
316 	if (interruptible) {
317 		int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
318 							   ticket);
319 		if (ret == -EINTR)
320 			ret = -ERESTARTSYS;
321 		return ret;
322 	}
323 	dma_resv_lock_slow(bo->base.resv, ticket);
324 	return 0;
325 }
326 
327 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
328 
329 static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object * bo)330 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
331 {
332 	spin_lock(&bo->bdev->lru_lock);
333 	ttm_bo_move_to_lru_tail(bo);
334 	spin_unlock(&bo->bdev->lru_lock);
335 }
336 
ttm_bo_assign_mem(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)337 static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
338 				     struct ttm_resource *new_mem)
339 {
340 	WARN_ON(bo->resource);
341 	bo->resource = new_mem;
342 }
343 
344 /**
345  * ttm_bo_move_null - assign memory for a buffer object.
346  * @bo: The bo to assign the memory to
347  * @new_mem: The memory to be assigned.
348  *
349  * Assign the memory from new_mem to the memory of the buffer object bo.
350  */
ttm_bo_move_null(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)351 static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
352 				    struct ttm_resource *new_mem)
353 {
354 	ttm_resource_free(bo, &bo->resource);
355 	ttm_bo_assign_mem(bo, new_mem);
356 }
357 
358 /**
359  * ttm_bo_unreserve
360  *
361  * @bo: A pointer to a struct ttm_buffer_object.
362  *
363  * Unreserve a previous reservation of @bo.
364  */
ttm_bo_unreserve(struct ttm_buffer_object * bo)365 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
366 {
367 	ttm_bo_move_to_lru_tail_unlocked(bo);
368 	dma_resv_unlock(bo->base.resv);
369 }
370 
371 /**
372  * ttm_kmap_obj_virtual
373  *
374  * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
375  * @is_iomem: Pointer to an integer that on return indicates 1 if the
376  * virtual map is io memory, 0 if normal memory.
377  *
378  * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
379  * If *is_iomem is 1 on return, the virtual address points to an io memory area,
380  * that should strictly be accessed by the iowriteXX() and similar functions.
381  */
ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj * map,bool * is_iomem)382 static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
383 					 bool *is_iomem)
384 {
385 	*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
386 	return map->virtual;
387 }
388 
389 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
390 		    struct ttm_operation_ctx *ctx);
391 int ttm_bo_validate(struct ttm_buffer_object *bo,
392 		    struct ttm_placement *placement,
393 		    struct ttm_operation_ctx *ctx);
394 void ttm_bo_put(struct ttm_buffer_object *bo);
395 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
396 			  struct ttm_lru_bulk_move *bulk);
397 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
398 			      const struct ttm_place *place);
399 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
400 			 enum ttm_bo_type type, struct ttm_placement *placement,
401 			 uint32_t alignment, struct ttm_operation_ctx *ctx,
402 			 struct sg_table *sg, struct dma_resv *resv,
403 			 void (*destroy)(struct ttm_buffer_object *));
404 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
405 			 enum ttm_bo_type type, struct ttm_placement *placement,
406 			 uint32_t alignment, bool interruptible,
407 			 struct sg_table *sg, struct dma_resv *resv,
408 			 void (*destroy)(struct ttm_buffer_object *));
409 int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
410 		unsigned long num_pages, struct ttm_bo_kmap_obj *map);
411 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
412 void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
413 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
414 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
415 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
416 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
417 		   struct ttm_resource_manager *man, gfp_t gfp_flags,
418 		   s64 target);
419 void ttm_bo_pin(struct ttm_buffer_object *bo);
420 void ttm_bo_unpin(struct ttm_buffer_object *bo);
421 int ttm_bo_evict_first(struct ttm_device *bdev,
422 		       struct ttm_resource_manager *man,
423 		       struct ttm_operation_ctx *ctx);
424 int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
425 		  void *buf, int len, int write);
426 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
427 			     struct vm_fault *vmf);
428 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
429 				    pgprot_t prot,
430 				    pgoff_t num_prefault);
431 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
432 void ttm_bo_vm_open(struct vm_area_struct *vma);
433 void ttm_bo_vm_close(struct vm_area_struct *vma);
434 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
435 		     void *buf, int len, int write);
436 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
437 
438 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
439 		     struct ttm_placement *placement,
440 		     struct ttm_resource **mem,
441 		     struct ttm_operation_ctx *ctx);
442 
443 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
444 /*
445  * ttm_bo_util.c
446  */
447 int ttm_mem_io_reserve(struct ttm_device *bdev,
448 		       struct ttm_resource *mem);
449 void ttm_mem_io_free(struct ttm_device *bdev,
450 		     struct ttm_resource *mem);
451 void ttm_move_memcpy(bool clear, u32 num_pages,
452 		     struct ttm_kmap_iter *dst_iter,
453 		     struct ttm_kmap_iter *src_iter);
454 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
455 		       struct ttm_operation_ctx *ctx,
456 		       struct ttm_resource *new_mem);
457 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
458 			      struct dma_fence *fence, bool evict,
459 			      bool pipeline,
460 			      struct ttm_resource *new_mem);
461 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
462 			      struct ttm_resource *new_mem);
463 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
464 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
465 		     pgprot_t tmp);
466 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
467 int ttm_bo_populate(struct ttm_buffer_object *bo,
468 		    struct ttm_operation_ctx *ctx);
469 
470 /* Driver LRU walk helpers initially targeted for shrinking. */
471 
472 /**
473  * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping
474  */
475 struct ttm_bo_lru_cursor {
476 	/** @res_curs: Embedded struct ttm_resource_cursor. */
477 	struct ttm_resource_cursor res_curs;
478 	/**
479 	 * @bo: Buffer object pointer if a buffer object is refcounted,
480 	 * NULL otherwise.
481 	 */
482 	struct ttm_buffer_object *bo;
483 	/**
484 	 * @needs_unlock: Valid iff @bo != NULL. The bo resv needs
485 	 * unlock before the next iteration or after loop exit.
486 	 */
487 	bool needs_unlock;
488 	/** @arg: Pointer to common BO LRU walk arguments. */
489 	struct ttm_lru_walk_arg *arg;
490 };
491 
492 void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
493 
494 struct ttm_bo_lru_cursor *
495 ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
496 		       struct ttm_resource_manager *man,
497 		       struct ttm_lru_walk_arg *arg);
498 
499 struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);
500 
501 struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs);
502 
503 /*
504  * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor.
505  */
506 DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
507 	     if (_T) {ttm_bo_lru_cursor_fini(_T); },
508 	     ttm_bo_lru_cursor_init(curs, man, arg),
509 	     struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
510 	     struct ttm_lru_walk_arg *arg);
511 static inline void *
class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t * _T)512 class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
513 { return *_T; }
514 #define class_ttm_bo_lru_cursor_is_conditional false
515 
516 /**
517  * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning
518  * resources on LRU lists.
519  * @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
520  * @_man: The resource manager whose LRU lists to iterate over.
521  * @_arg: The struct ttm_lru_walk_arg to govern the LRU walk.
522  * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
523  * for the current iteration.
524  *
525  * Iterate over all resources of @_man and for each resource, attempt to
526  * reference and lock (using the locking mode detailed in @_ctx) the buffer
527  * object it points to. If successful, assign @_bo to the address of the
528  * buffer object and update @_cursor. The iteration is guarded in the
529  * sense that @_cursor will be initialized before looping start and cleaned
530  * up at looping termination, even if terminated prematurely by, for
531  * example a return or break statement. Exiting the loop will also unlock
532  * (if needed) and unreference @_bo.
533  *
534  * Return: If locking of a bo returns an error, then iteration is terminated
535  * and @_bo is set to a corresponding error pointer. It's illegal to
536  * dereference @_bo after loop exit.
537  */
538 #define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _arg, _bo)	\
539 	scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _arg)		\
540 		for ((_bo) = ttm_bo_lru_cursor_first(_cursor);		\
541 		       !IS_ERR_OR_NULL(_bo);				\
542 		       (_bo) = ttm_bo_lru_cursor_next(_cursor))
543 
544 #endif
545