Lines Matching refs:ref

29 	struct i915_active *ref;  member
77 struct i915_active *ref = addr; in active_debug_hint() local
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; in active_debug_hint()
87 static void debug_active_init(struct i915_active *ref) in debug_active_init() argument
89 debug_object_init(ref, &active_debug_desc); in debug_active_init()
92 static void debug_active_activate(struct i915_active *ref) in debug_active_activate() argument
94 lockdep_assert_held(&ref->tree_lock); in debug_active_activate()
95 if (!atomic_read(&ref->count)) /* before the first inc */ in debug_active_activate()
96 debug_object_activate(ref, &active_debug_desc); in debug_active_activate()
99 static void debug_active_deactivate(struct i915_active *ref) in debug_active_deactivate() argument
101 lockdep_assert_held(&ref->tree_lock); in debug_active_deactivate()
102 if (!atomic_read(&ref->count)) /* after the last dec */ in debug_active_deactivate()
103 debug_object_deactivate(ref, &active_debug_desc); in debug_active_deactivate()
106 static void debug_active_fini(struct i915_active *ref) in debug_active_fini() argument
108 debug_object_free(ref, &active_debug_desc); in debug_active_fini()
111 static void debug_active_assert(struct i915_active *ref) in debug_active_assert() argument
113 debug_object_assert_init(ref, &active_debug_desc); in debug_active_assert()
118 static inline void debug_active_init(struct i915_active *ref) { } in debug_active_init() argument
119 static inline void debug_active_activate(struct i915_active *ref) { } in debug_active_activate() argument
120 static inline void debug_active_deactivate(struct i915_active *ref) { } in debug_active_deactivate() argument
121 static inline void debug_active_fini(struct i915_active *ref) { } in debug_active_fini() argument
122 static inline void debug_active_assert(struct i915_active *ref) { } in debug_active_assert() argument
127 __active_retire(struct i915_active *ref) in __active_retire() argument
133 GEM_BUG_ON(i915_active_is_idle(ref)); in __active_retire()
136 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) in __active_retire()
139 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); in __active_retire()
140 debug_active_deactivate(ref); in __active_retire()
143 if (!ref->cache) in __active_retire()
144 ref->cache = fetch_node(ref->tree.rb_node); in __active_retire()
147 if (ref->cache) { in __active_retire()
149 rb_erase(&ref->cache->node, &ref->tree); in __active_retire()
150 root = ref->tree; in __active_retire()
153 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node); in __active_retire()
154 rb_insert_color(&ref->cache->node, &ref->tree); in __active_retire()
155 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node); in __active_retire()
158 ref->cache->timeline = 0; /* needs cmpxchg(u64) */ in __active_retire()
161 spin_unlock_irqrestore(&ref->tree_lock, flags); in __active_retire()
164 if (ref->retire) in __active_retire()
165 ref->retire(ref); in __active_retire()
168 wake_up_var(ref); in __active_retire()
180 struct i915_active *ref = container_of(wrk, typeof(*ref), work); in active_work() local
182 GEM_BUG_ON(!atomic_read(&ref->count)); in active_work()
183 if (atomic_add_unless(&ref->count, -1, 1)) in active_work()
186 __active_retire(ref); in active_work()
190 active_retire(struct i915_active *ref) in active_retire() argument
192 GEM_BUG_ON(!atomic_read(&ref->count)); in active_retire()
193 if (atomic_add_unless(&ref->count, -1, 1)) in active_retire()
196 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { in active_retire()
197 queue_work(system_unbound_wq, &ref->work); in active_retire()
201 __active_retire(ref); in active_retire()
223 active_retire(container_of(cb, struct active_node, base.cb)->ref); in node_retire()
233 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) in __active_lookup() argument
246 it = READ_ONCE(ref->cache); in __active_lookup()
271 GEM_BUG_ON(i915_active_is_idle(ref)); in __active_lookup()
273 it = fetch_node(ref->tree.rb_node); in __active_lookup()
280 WRITE_ONCE(ref->cache, it); in __active_lookup()
290 active_instance(struct i915_active *ref, u64 idx) in active_instance() argument
295 node = __active_lookup(ref, idx); in active_instance()
299 spin_lock_irq(&ref->tree_lock); in active_instance()
300 GEM_BUG_ON(i915_active_is_idle(ref)); in active_instance()
303 p = &ref->tree.rb_node; in active_instance()
326 node->ref = ref; in active_instance()
330 rb_insert_color(&node->node, &ref->tree); in active_instance()
333 WRITE_ONCE(ref->cache, node); in active_instance()
334 spin_unlock_irq(&ref->tree_lock); in active_instance()
339 void __i915_active_init(struct i915_active *ref, in __i915_active_init() argument
340 int (*active)(struct i915_active *ref), in __i915_active_init() argument
341 void (*retire)(struct i915_active *ref), in __i915_active_init() argument
346 debug_active_init(ref); in __i915_active_init()
348 ref->flags = flags; in __i915_active_init()
349 ref->active = active; in __i915_active_init()
350 ref->retire = retire; in __i915_active_init()
352 spin_lock_init(&ref->tree_lock); in __i915_active_init()
353 ref->tree = RB_ROOT; in __i915_active_init()
354 ref->cache = NULL; in __i915_active_init()
356 init_llist_head(&ref->preallocated_barriers); in __i915_active_init()
357 atomic_set(&ref->count, 0); in __i915_active_init()
358 __mutex_init(&ref->mutex, "i915_active", mkey); in __i915_active_init()
359 __i915_active_fence_init(&ref->excl, NULL, excl_retire); in __i915_active_init()
360 INIT_WORK(&ref->work, active_work); in __i915_active_init()
362 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); in __i915_active_init()
366 static bool ____active_del_barrier(struct i915_active *ref, in ____active_del_barrier() argument
409 __active_del_barrier(struct i915_active *ref, struct active_node *node) in __active_del_barrier() argument
411 return ____active_del_barrier(ref, node, barrier_to_engine(node)); in __active_del_barrier()
415 replace_barrier(struct i915_active *ref, struct i915_active_fence *active) in replace_barrier() argument
425 __active_del_barrier(ref, node_from_active(active)); in replace_barrier()
429 int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) in i915_active_ref() argument
435 err = i915_active_acquire(ref); in i915_active_ref()
439 active = active_instance(ref, idx); in i915_active_ref()
445 if (replace_barrier(ref, active)) { in i915_active_ref()
447 atomic_dec(&ref->count); in i915_active_ref()
450 __i915_active_acquire(ref); in i915_active_ref()
453 i915_active_release(ref); in i915_active_ref()
458 __i915_active_set_fence(struct i915_active *ref, in __i915_active_set_fence() argument
464 if (replace_barrier(ref, active)) { in __i915_active_set_fence()
474 __i915_active_acquire(ref); in __i915_active_set_fence()
481 __active_fence(struct i915_active *ref, u64 idx) in __active_fence() argument
485 it = __active_lookup(ref, idx); in __active_fence()
487 spin_lock_irq(&ref->tree_lock); in __active_fence()
488 it = __active_lookup(ref, idx); in __active_fence()
489 spin_unlock_irq(&ref->tree_lock); in __active_fence()
497 __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) in __i915_active_ref() argument
500 return __i915_active_set_fence(ref, __active_fence(ref, idx), fence); in __i915_active_ref()
504 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) in i915_active_set_exclusive() argument
507 return __i915_active_set_fence(ref, &ref->excl, f); in i915_active_set_exclusive()
510 bool i915_active_acquire_if_busy(struct i915_active *ref) in i915_active_acquire_if_busy() argument
512 debug_active_assert(ref); in i915_active_acquire_if_busy()
513 return atomic_add_unless(&ref->count, 1, 0); in i915_active_acquire_if_busy()
516 static void __i915_active_activate(struct i915_active *ref) in __i915_active_activate() argument
518 spin_lock_irq(&ref->tree_lock); /* __active_retire() */ in __i915_active_activate()
519 if (!atomic_fetch_inc(&ref->count)) in __i915_active_activate()
520 debug_active_activate(ref); in __i915_active_activate()
521 spin_unlock_irq(&ref->tree_lock); in __i915_active_activate()
524 int i915_active_acquire(struct i915_active *ref) in i915_active_acquire() argument
528 if (i915_active_acquire_if_busy(ref)) in i915_active_acquire()
531 if (!ref->active) { in i915_active_acquire()
532 __i915_active_activate(ref); in i915_active_acquire()
536 err = mutex_lock_interruptible(&ref->mutex); in i915_active_acquire()
540 if (likely(!i915_active_acquire_if_busy(ref))) { in i915_active_acquire()
541 err = ref->active(ref); in i915_active_acquire()
543 __i915_active_activate(ref); in i915_active_acquire()
546 mutex_unlock(&ref->mutex); in i915_active_acquire()
551 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx) in i915_active_acquire_for_context() argument
556 err = i915_active_acquire(ref); in i915_active_acquire_for_context()
560 active = active_instance(ref, idx); in i915_active_acquire_for_context()
562 i915_active_release(ref); in i915_active_acquire_for_context()
569 void i915_active_release(struct i915_active *ref) in i915_active_release() argument
571 debug_active_assert(ref); in i915_active_release()
572 active_retire(ref); in i915_active_release()
605 static int flush_lazy_signals(struct i915_active *ref) in flush_lazy_signals() argument
610 enable_signaling(&ref->excl); in flush_lazy_signals()
611 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { in flush_lazy_signals()
622 int __i915_active_wait(struct i915_active *ref, int state) in __i915_active_wait() argument
627 if (i915_active_acquire_if_busy(ref)) { in __i915_active_wait()
630 err = flush_lazy_signals(ref); in __i915_active_wait()
631 i915_active_release(ref); in __i915_active_wait()
635 if (___wait_var_event(ref, i915_active_is_idle(ref), in __i915_active_wait()
644 flush_work(&ref->work); in __i915_active_wait()
672 struct i915_active *ref; member
680 if (i915_active_is_idle(wb->ref)) { in barrier_wake()
689 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence) in __await_barrier() argument
697 GEM_BUG_ON(i915_active_is_idle(ref)); in __await_barrier()
706 wb->ref = ref; in __await_barrier()
708 add_wait_queue(__var_waitqueue(ref), &wb->base); in __await_barrier()
712 static int await_active(struct i915_active *ref, in await_active() argument
719 if (!i915_active_acquire_if_busy(ref)) in await_active()
723 rcu_access_pointer(ref->excl.fence)) { in await_active()
724 err = __await_active(&ref->excl, fn, arg); in await_active()
732 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { in await_active()
740 err = flush_lazy_signals(ref); in await_active()
744 err = __await_barrier(ref, barrier); in await_active()
750 i915_active_release(ref); in await_active()
760 struct i915_active *ref, in i915_request_await_active() argument
763 return await_active(ref, flags, rq_await_fence, rq, &rq->submit); in i915_request_await_active()
773 struct i915_active *ref, in i915_sw_fence_await_active() argument
776 return await_active(ref, flags, sw_await_fence, fence, fence); in i915_sw_fence_await_active()
779 void i915_active_fini(struct i915_active *ref) in i915_active_fini() argument
781 debug_active_fini(ref); in i915_active_fini()
782 GEM_BUG_ON(atomic_read(&ref->count)); in i915_active_fini()
783 GEM_BUG_ON(work_pending(&ref->work)); in i915_active_fini()
784 mutex_destroy(&ref->mutex); in i915_active_fini()
786 if (ref->cache) in i915_active_fini()
787 kmem_cache_free(slab_cache, ref->cache); in i915_active_fini()
795 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) in reuse_idle_barrier() argument
799 if (RB_EMPTY_ROOT(&ref->tree)) in reuse_idle_barrier()
802 GEM_BUG_ON(i915_active_is_idle(ref)); in reuse_idle_barrier()
811 if (ref->cache && is_idle_barrier(ref->cache, idx)) { in reuse_idle_barrier()
812 p = &ref->cache->node; in reuse_idle_barrier()
817 p = ref->tree.rb_node; in reuse_idle_barrier()
862 ____active_del_barrier(ref, node, engine)) in reuse_idle_barrier()
869 spin_lock_irq(&ref->tree_lock); in reuse_idle_barrier()
870 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ in reuse_idle_barrier()
871 if (p == &ref->cache->node) in reuse_idle_barrier()
872 WRITE_ONCE(ref->cache, NULL); in reuse_idle_barrier()
873 spin_unlock_irq(&ref->tree_lock); in reuse_idle_barrier()
878 int i915_active_acquire_preallocate_barrier(struct i915_active *ref, in i915_active_acquire_preallocate_barrier() argument
885 GEM_BUG_ON(i915_active_is_idle(ref)); in i915_active_acquire_preallocate_barrier()
888 while (!llist_empty(&ref->preallocated_barriers)) in i915_active_acquire_preallocate_barrier()
904 node = reuse_idle_barrier(ref, idx); in i915_active_acquire_preallocate_barrier()
914 node->ref = ref; in i915_active_acquire_preallocate_barrier()
929 __i915_active_acquire(ref); in i915_active_acquire_preallocate_barrier()
941 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); in i915_active_acquire_preallocate_barrier()
942 llist_add_batch(first, last, &ref->preallocated_barriers); in i915_active_acquire_preallocate_barrier()
952 atomic_dec(&ref->count); in i915_active_acquire_preallocate_barrier()
960 void i915_active_acquire_barrier(struct i915_active *ref) in i915_active_acquire_barrier() argument
965 GEM_BUG_ON(i915_active_is_idle(ref)); in i915_active_acquire_barrier()
973 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { in i915_active_acquire_barrier()
978 spin_lock_irqsave_nested(&ref->tree_lock, flags, in i915_active_acquire_barrier()
981 p = &ref->tree.rb_node; in i915_active_acquire_barrier()
994 rb_insert_color(&node->node, &ref->tree); in i915_active_acquire_barrier()
995 spin_unlock_irqrestore(&ref->tree_lock, flags); in i915_active_acquire_barrier()
1120 struct kref ref; member
1123 struct i915_active *i915_active_get(struct i915_active *ref) in i915_active_get() argument
1125 struct auto_active *aa = container_of(ref, typeof(*aa), base); in i915_active_get()
1127 kref_get(&aa->ref); in i915_active_get()
1131 static void auto_release(struct kref *ref) in auto_release() argument
1133 struct auto_active *aa = container_of(ref, typeof(*aa), ref); in auto_release()
1139 void i915_active_put(struct i915_active *ref) in i915_active_put() argument
1141 struct auto_active *aa = container_of(ref, typeof(*aa), base); in i915_active_put()
1143 kref_put(&aa->ref, auto_release); in i915_active_put()
1146 static int auto_active(struct i915_active *ref) in auto_active() argument
1148 i915_active_get(ref); in auto_active()
1152 static void auto_retire(struct i915_active *ref) in auto_retire() argument
1154 i915_active_put(ref); in auto_retire()
1165 kref_init(&aa->ref); in i915_active_create()