| /linux/drivers/gpu/drm/i915/ |
| A D | i915_deps.c | 38 if (deps->fences != &deps->single) in i915_deps_reset_fences() 39 kfree(deps->fences); in i915_deps_reset_fences() 42 deps->fences = &deps->single; in i915_deps_reset_fences() 52 deps->fences = NULL; in i915_deps_init() 69 dma_fence_put(deps->fences[i]); in i915_deps_fini() 71 if (deps->fences != &deps->single) in i915_deps_fini() 72 kfree(deps->fences); in i915_deps_fini() 89 memcpy(new_fences, deps->fences, in i915_deps_grow() 91 swap(new_fences, deps->fences); in i915_deps_grow() 134 struct dma_fence **fences = deps->fences; in i915_deps_sync() local [all …]
|
| /linux/drivers/dma-buf/ |
| A D | dma-resv.c | 270 if (fences) in dma_resv_reset_max_fences() 271 fences->max_fences = fences->num_fences; in dma_resv_reset_max_fences() 371 if (cursor->fences) in dma_resv_iter_restart_unlocked() 379 if (!cursor->fences) in dma_resv_iter_walk_unlocked() 501 if (!cursor->fences || in dma_resv_iter_next() 575 *fences = NULL; in dma_resv_get_fences() 594 kfree(*fences); in dma_resv_get_fences() 595 *fences = NULL; in dma_resv_get_fences() 644 *fence = fences[0]; in dma_resv_get_singleton() 645 kfree(fences); in dma_resv_get_singleton() [all …]
|
| A D | dma-fence-array.c | 89 int error = array->fences[i]->error; in dma_fence_array_enable_signaling() 120 dma_fence_put(array->fences[i]); in dma_fence_array_release() 122 kfree(array->fences); in dma_fence_array_release() 173 int num_fences, struct dma_fence **fences, in dma_fence_array_init() argument 177 WARN_ON(!num_fences || !fences); in dma_fence_array_init() 187 array->fences = fences; in dma_fence_array_init() 227 struct dma_fence **fences, in dma_fence_array_create() argument 237 dma_fence_array_init(array, num_fences, fences, in dma_fence_array_create() 262 if (array->fences[i]->context != context) in dma_fence_match_context() 284 return array->fences[0]; in dma_fence_array_first() [all …]
|
| A D | dma-fence-unwrap.c | 64 struct dma_fence **fences, in __dma_fence_unwrap_merge() argument 76 dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { in __dma_fence_unwrap_merge() 106 fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]); in __dma_fence_unwrap_merge() 117 while (fences[i] && dma_fence_is_signaled(fences[i])) in __dma_fence_unwrap_merge() 118 fences[i] = dma_fence_unwrap_next(&iter[i]); in __dma_fence_unwrap_merge() 120 next = fences[i]; in __dma_fence_unwrap_merge() 139 fences[i] = dma_fence_unwrap_next(&iter[i]); in __dma_fence_unwrap_merge() 142 fences[sel] = dma_fence_unwrap_next(&iter[sel]); in __dma_fence_unwrap_merge() 149 fences[sel] = dma_fence_unwrap_next(&iter[sel]); in __dma_fence_unwrap_merge()
|
| A D | st-dma-fence-chain.c | 102 struct dma_fence **fences; member 124 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init() 126 if (!fc->fences) { in fence_chains_init() 133 fc->fences[i] = mock_fence(); in fence_chains_init() 134 if (!fc->fences[i]) { in fence_chains_init() 140 fc->fences[i], in fence_chains_init() 157 dma_fence_put(fc->fences[i]); in fence_chains_init() 160 kvfree(fc->fences); in fence_chains_init() 172 dma_fence_put(fc->fences[i]); in fence_chains_fini() 174 kvfree(fc->fences); in fence_chains_fini() [all …]
|
| A D | st-dma-fence-unwrap.c | 49 struct dma_fence **fences; in mock_array() local 53 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in mock_array() 54 if (!fences) in mock_array() 59 fences[i] = va_arg(valist, typeof(*fences)); in mock_array() 62 array = dma_fence_array_create(num_fences, fences, in mock_array() 70 kfree(fences); in mock_array() 75 dma_fence_put(va_arg(valist, typeof(*fences))); in mock_array()
|
| A D | st-dma-resv.c | 228 cursor.fences = (void*)~0; in test_for_each_unlocked() 247 struct dma_fence *f, **fences = NULL; in test_get_fences() local 274 r = dma_resv_get_fences(&resv, usage, &i, &fences); in test_get_fences() 280 if (i != 1 || fences[0] != f) { in test_get_fences() 288 dma_fence_put(fences[i]); in test_get_fences() 289 kfree(fences); in test_get_fences()
|
| A D | dma-fence.c | 811 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument 817 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any() 848 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument 855 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout() 860 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout() 876 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout() 894 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout() 907 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
|
| A D | st-dma-fence.c | 446 struct dma_fence __rcu **fences; member 477 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback() 482 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback() 514 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback() 538 t[i].fences = f; in race_signal_callback()
|
| /linux/drivers/gpu/host1x/ |
| A D | intr.c | 35 if (!list_empty(&sp->fences.list)) { in host1x_intr_update_hw_state() 36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list); in host1x_intr_update_hw_state() 47 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_add_fence_locked() 57 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_remove_fence() 83 spin_lock(&sp->fences.lock); in host1x_intr_handle_interrupt() 85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) { in host1x_intr_handle_interrupt() 98 spin_unlock(&sp->fences.lock); in host1x_intr_handle_interrupt() 112 spin_lock_init(&syncpt->fences.lock); in host1x_intr_init() 113 INIT_LIST_HEAD(&syncpt->fences.list); in host1x_intr_init()
|
| A D | debug.c | 96 spin_lock_irqsave(&m->syncpt[i].fences.lock, irqflags); in show_syncpts() 97 list_for_each(pos, &m->syncpt[i].fences.list) in show_syncpts() 99 spin_unlock_irqrestore(&m->syncpt[i].fences.lock, irqflags); in show_syncpts()
|
| /linux/Documentation/driver-api/ |
| A D | sync_file.rst | 9 the fences(struct dma_fence) that are needed to synchronize between drivers or 29 in-fences and out-fences 33 the driver to userspace we call the fences it contains 'out-fences'. They are 37 Out-fences are fences that the driver creates. 40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that 42 the in-fences. 72 of the Sync File to the kernel. The kernel can then retrieve the fences
|
| A D | dma-buf.rst | 21 - dma-resv, which manages a set of dma-fences for a particular dma-buf 169 :doc: DMA fences overview 256 batch DMA fences for memory management instead of context preemption DMA 257 fences which get reattached when the compute job is rescheduled. 260 fences and controls when they fire. Mixing indefinite fences with normal 272 dependent upon DMA fences. If the kernel also support indefinite fences in the 283 userspace [label="userspace controlled fences"] 298 fences in the kernel. This means: 300 * No future fences, proxy fences or userspace fences imported as DMA fences, 312 implications for DMA fences. [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_sync.c | 54 hash_init(sync->fences); in amdgpu_sync_create() 137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later() 169 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence() 317 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence() 357 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence() 388 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone() 416 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_push_to_job() 439 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait() 463 hash_for_each_safe(sync->fences, i, tmp, e, node) in amdgpu_sync_free()
|
| A D | amdgpu_ids.c | 204 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local 212 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); in amdgpu_vmid_grab_idle() 213 if (!fences) in amdgpu_vmid_grab_idle() 223 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); in amdgpu_vmid_grab_idle() 224 if (!fences[i]) in amdgpu_vmid_grab_idle() 238 dma_fence_get(fences[j]); in amdgpu_vmid_grab_idle() 240 array = dma_fence_array_create(i, fences, fence_context, in amdgpu_vmid_grab_idle() 244 dma_fence_put(fences[j]); in amdgpu_vmid_grab_idle() 245 kfree(fences); in amdgpu_vmid_grab_idle() 254 kfree(fences); in amdgpu_vmid_grab_idle()
|
| A D | amdgpu_fence.c | 184 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit() 298 ptr = &drv->fences[last_seq]; in amdgpu_fence_process() 349 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty() 426 fence = drv->fences[last_seq]; in amdgpu_fence_last_unsignaled_time_us() 450 fence = drv->fences[seq]; in amdgpu_fence_update_start_timestamp() 529 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring() 663 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini() 664 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini() 665 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini() 712 ptr = &ring->fence_drv.fences[i]; in amdgpu_fence_driver_clear_job_fences() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| A D | i915_sw_fence.c | 453 struct i915_sw_fence **fences; in test_chain() local 457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain() 458 if (!fences) in test_chain() 462 fences[i] = alloc_fence(); in test_chain() 463 if (!fences[i]) { in test_chain() 471 fences[i - 1], in test_chain() 478 i915_sw_fence_commit(fences[i]); in test_chain() 484 if (i915_sw_fence_done(fences[i])) { in test_chain() 490 i915_sw_fence_commit(fences[0]); in test_chain() 500 free_fence(fences[i]); in test_chain() [all …]
|
| /linux/drivers/gpu/drm/ |
| A D | drm_suballoc.c | 225 struct dma_fence **fences, in drm_suballoc_next_hole() argument 248 fences[i] = NULL; in drm_suballoc_next_hole() 257 fences[i] = sa->fence; in drm_suballoc_next_hole() 316 struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES]; in drm_suballoc_new() local 353 } while (drm_suballoc_next_hole(sa_manager, fences, tries)); in drm_suballoc_new() 356 if (fences[i]) in drm_suballoc_new() 357 fences[count++] = dma_fence_get(fences[i]); in drm_suballoc_new() 363 t = dma_fence_wait_any_timeout(fences, count, intr, in drm_suballoc_new() 367 dma_fence_put(fences[i]); in drm_suballoc_new()
|
| /linux/drivers/gpu/drm/xe/ |
| A D | xe_sync.c | 277 struct dma_fence **fences = NULL; in xe_sync_in_fence_get() local 299 fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL); in xe_sync_in_fence_get() 300 if (!fences) in xe_sync_in_fence_get() 305 fences[current_fence++] = sync[i].fence; in xe_sync_in_fence_get() 308 fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); in xe_sync_in_fence_get() 309 cf = dma_fence_array_create(num_in_fence, fences, in xe_sync_in_fence_get() 322 dma_fence_put(fences[--current_fence]); in xe_sync_in_fence_get() 323 kfree(fences); in xe_sync_in_fence_get()
|
| /linux/Documentation/gpu/ |
| A D | drm-vm-bind-async.rst | 20 synchronization objects can be either generic, like dma-fences or 31 understanding of dma-fences is required to digest this 38 the GPU and CPU. Memory fences are sometimes referred to as 39 user-fences, userspace-fences or gpu futexes and do not necessarily obey 46 a certain mode that disallows completion dma-fences. 72 IOCTL returns. A synchronous VM_BIND takes neither in-fences nor 96 memory fences given as VM_BIND in-fences need to be awaited 99 to depend on memory fences that don't have such a restriction. 104 operation is not allowed and any in-fences need to be awaited 108 above). Second, any dma-fences used as in-syncobjs for VM_BIND [all …]
|
| /linux/include/linux/ |
| A D | dma-fence-array.h | 44 struct dma_fence **fences; member 84 int num_fences, struct dma_fence **fences, 89 struct dma_fence **fences,
|
| A D | dma-resv.h | 178 struct dma_resv_list __rcu *fences; member 210 struct dma_resv_list *fences; member 476 unsigned int *num_fences, struct dma_fence ***fences);
|
| /linux/drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_execbuffer.c | 2796 kvfree(fences); in __free_fence_array() 2834 eb->fences = f; in add_timeline_fence_array() 2963 eb->fences = f; in add_fence_array() 3009 if (fences) in put_fence_array() 3207 fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL); in eb_composite_fence_create() 3208 if (!fences) in eb_composite_fence_create() 3218 fences, in eb_composite_fence_create() 3223 kfree(fences); in eb_composite_fence_create() 3270 if (eb->fences) { in eb_fences_add() 3387 eb.fences = NULL; in i915_gem_do_execbuffer() [all …]
|
| /linux/drivers/gpu/drm/virtio/ |
| A D | virtgpu_fence.c | 111 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit() 136 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process() 146 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
|
| /linux/drivers/gpu/drm/radeon/ |
| A D | radeon_trace.h | 36 __field(u32, fences) 42 __entry->fences = radeon_fence_count_emitted( 47 __entry->fences)
|