Lines Matching refs:e
135 struct amdgpu_sync_entry *e; in amdgpu_sync_add_later() local
137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
138 if (dma_fence_is_signaled(e->fence)) { in amdgpu_sync_add_later()
139 dma_fence_put(e->fence); in amdgpu_sync_add_later()
140 e->fence = dma_fence_get(f); in amdgpu_sync_add_later()
144 if (likely(e->fence->context == f->context)) { in amdgpu_sync_add_later()
145 amdgpu_sync_keep_later(&e->fence, f); in amdgpu_sync_add_later()
164 struct amdgpu_sync_entry *e; in amdgpu_sync_fence() local
172 e = kmem_cache_alloc(amdgpu_sync_slab, flags); in amdgpu_sync_fence()
173 if (!e) in amdgpu_sync_fence()
176 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
177 e->fence = dma_fence_get(f); in amdgpu_sync_fence()
300 static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) in amdgpu_sync_entry_free() argument
302 hash_del(&e->node); in amdgpu_sync_entry_free()
303 dma_fence_put(e->fence); in amdgpu_sync_entry_free()
304 kmem_cache_free(amdgpu_sync_slab, e); in amdgpu_sync_entry_free()
319 struct amdgpu_sync_entry *e; in amdgpu_sync_peek_fence() local
323 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence()
324 struct dma_fence *f = e->fence; in amdgpu_sync_peek_fence()
328 amdgpu_sync_entry_free(e); in amdgpu_sync_peek_fence()
358 struct amdgpu_sync_entry *e; in amdgpu_sync_get_fence() local
363 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence()
365 f = e->fence; in amdgpu_sync_get_fence()
367 hash_del(&e->node); in amdgpu_sync_get_fence()
368 kmem_cache_free(amdgpu_sync_slab, e); in amdgpu_sync_get_fence()
389 struct amdgpu_sync_entry *e; in amdgpu_sync_clone() local
394 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone()
395 f = e->fence; in amdgpu_sync_clone()
401 amdgpu_sync_entry_free(e); in amdgpu_sync_clone()
436 struct amdgpu_sync_entry *e; in amdgpu_sync_push_to_job() local
441 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_push_to_job()
442 f = e->fence; in amdgpu_sync_push_to_job()
444 amdgpu_sync_entry_free(e); in amdgpu_sync_push_to_job()
460 struct amdgpu_sync_entry *e; in amdgpu_sync_wait() local
464 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait()
465 r = dma_fence_wait(e->fence, intr); in amdgpu_sync_wait()
469 amdgpu_sync_entry_free(e); in amdgpu_sync_wait()
484 struct amdgpu_sync_entry *e; in amdgpu_sync_free() local
488 hash_for_each_safe(sync->fences, i, tmp, e, node) in amdgpu_sync_free()
489 amdgpu_sync_entry_free(e); in amdgpu_sync_free()