| /linux/drivers/dma-buf/ |
| A D | sync_file.c | 160 sync_file->fence = fences[0]; in sync_file_set_fence() 161 kfree(fences); in sync_file_set_fence() 182 return array->fences; in get_fences() 192 fences[*i] = fence; in add_fence() 228 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in sync_file_merge() 229 if (!fences) in sync_file_merge() 273 nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL); in sync_file_merge() 277 fences = nfences; in sync_file_merge() 288 dma_fence_put(fences[--i]); in sync_file_merge() 289 kfree(fences); in sync_file_merge() [all …]
|
| A D | st-dma-fence-chain.c | 100 struct dma_fence **fences; member 122 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init() 124 if (!fc->fences) { in fence_chains_init() 131 fc->fences[i] = mock_fence(); in fence_chains_init() 132 if (!fc->fences[i]) { in fence_chains_init() 138 fc->fences[i], in fence_chains_init() 153 dma_fence_put(fc->fences[i]); in fence_chains_init() 156 kvfree(fc->fences); in fence_chains_init() 168 dma_fence_put(fc->fences[i]); in fence_chains_fini() 170 kvfree(fc->fences); in fence_chains_fini() [all …]
|
| A D | dma-resv.c | 232 if (fences) in dma_resv_reset_shared_max() 233 fences->shared_max = fences->shared_count; in dma_resv_reset_shared_max() 338 cursor->fences = dma_resv_shared_list(cursor->obj); in dma_resv_iter_restart_unlocked() 339 if (cursor->fences) in dma_resv_iter_restart_unlocked() 340 cursor->shared_count = cursor->fences->shared_count; in dma_resv_iter_restart_unlocked() 342 cursor->fences = NULL; in dma_resv_iter_restart_unlocked() 369 } else if (!cursor->fences || in dma_resv_iter_walk_unlocked() 375 struct dma_resv_list *fences = cursor->fences; in dma_resv_iter_walk_unlocked() local 445 cursor->fences = dma_resv_shared_list(cursor->obj); in dma_resv_iter_first() 447 cursor->fences = NULL; in dma_resv_iter_first() [all …]
|
| A D | dma-fence-array.c | 87 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, in dma_fence_array_enable_signaling() 89 int error = array->fences[i]->error; in dma_fence_array_enable_signaling() 116 dma_fence_put(array->fences[i]); in dma_fence_array_release() 118 kfree(array->fences); in dma_fence_array_release() 151 struct dma_fence **fences, in dma_fence_array_create() argument 171 array->fences = fences; in dma_fence_array_create() 197 if (array->fences[i]->context != context) in dma_fence_match_context()
|
| A D | dma-fence.c | 805 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument 811 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any() 842 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument 849 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout() 854 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout() 870 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout() 888 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout() 901 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
|
| A D | st-dma-fence.c | 432 struct dma_fence __rcu **fences; member 461 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback() 466 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback() 498 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback() 522 t[i].fences = f; in race_signal_callback()
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_ids.c | 110 struct dma_fence *fence, **fences; in amdgpu_pasid_free_delayed() local 125 fence = fences[0]; in amdgpu_pasid_free_delayed() 126 kfree(fences); in amdgpu_pasid_free_delayed() 134 kfree(fences); in amdgpu_pasid_free_delayed() 203 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local 211 if (!fences) in amdgpu_vmid_grab_idle() 222 if (!fences[i]) in amdgpu_vmid_grab_idle() 236 dma_fence_get(fences[j]); in amdgpu_vmid_grab_idle() 242 dma_fence_put(fences[j]); in amdgpu_vmid_grab_idle() 243 kfree(fences); in amdgpu_vmid_grab_idle() [all …]
|
| A D | amdgpu_sa.c | 207 struct dma_fence **fences, in amdgpu_sa_bo_next_hole() argument 229 fences[i] = NULL; in amdgpu_sa_bo_next_hole() 238 fences[i] = sa_bo->fence; in amdgpu_sa_bo_next_hole() 279 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; in amdgpu_sa_bo_new() local 314 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); in amdgpu_sa_bo_new() 317 if (fences[i]) in amdgpu_sa_bo_new() 318 fences[count++] = dma_fence_get(fences[i]); in amdgpu_sa_bo_new() 322 t = dma_fence_wait_any_timeout(fences, count, false, in amdgpu_sa_bo_new() 326 dma_fence_put(fences[i]); in amdgpu_sa_bo_new()
|
| A D | amdgpu_sync.c | 53 hash_init(sync->fences); in amdgpu_sync_create() 137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later() 169 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence() 311 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence() 352 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence() 383 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone() 408 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait() 434 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_free()
|
| A D | amdgpu_fence.c | 180 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit() 292 ptr = &drv->fences[last_seq]; in amdgpu_fence_process() 343 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty() 479 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring() 481 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring() 581 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini() 582 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini() 583 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini() 628 ptr = &ring->fence_drv.fences[i]; in amdgpu_fence_driver_clear_job_fences()
|
| A D | amdgpu_jpeg.c | 78 unsigned int fences = 0; in amdgpu_jpeg_idle_work_handler() local 85 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); in amdgpu_jpeg_idle_work_handler() 88 if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) in amdgpu_jpeg_idle_work_handler()
|
| A D | amdgpu_ctx.c | 177 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), in amdgpu_ctx_init_entity() 253 dma_fence_put(entity->fences[i]); in amdgpu_ctx_fini_entity() 547 other = centity->fences[idx]; in amdgpu_ctx_add_fence() 554 centity->fences[idx] = fence; in amdgpu_ctx_add_fence() 586 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence() 648 other = dma_fence_get(centity->fences[idx]); in amdgpu_ctx_wait_prev_fence() 755 fence = dma_fence_get(centity->fences[i]); in amdgpu_ctx_fence_time()
|
| A D | amdgpu_debugfs.c | 1398 struct dma_fence **fences) in amdgpu_ib_preempt_fences_swap() argument 1414 ptr = &drv->fences[last_seq]; in amdgpu_ib_preempt_fences_swap() 1422 fences[last_seq] = fence; in amdgpu_ib_preempt_fences_swap() 1434 fence = fences[i]; in amdgpu_ib_preempt_signal_fences() 1475 ptr = &drv->fences[preempt_seq]; in amdgpu_ib_preempt_mark_partial_job() 1499 struct dma_fence **fences = NULL; in amdgpu_debugfs_ib_preempt() local 1515 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); in amdgpu_debugfs_ib_preempt() 1516 if (!fences) in amdgpu_debugfs_ib_preempt() 1545 amdgpu_ib_preempt_fences_swap(ring, fences); in amdgpu_debugfs_ib_preempt() 1556 amdgpu_ib_preempt_signal_fences(fences, length); in amdgpu_debugfs_ib_preempt() [all …]
|
| A D | amdgpu_cs.c | 1528 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_all_fences() argument 1538 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_all_fences() 1573 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_any_fence() argument 1591 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_any_fence() 1641 struct drm_amdgpu_fence *fences; in amdgpu_cs_wait_fences_ioctl() local 1647 if (fences == NULL) in amdgpu_cs_wait_fences_ioctl() 1650 fences_user = u64_to_user_ptr(wait->in.fences); in amdgpu_cs_wait_fences_ioctl() 1651 if (copy_from_user(fences, fences_user, in amdgpu_cs_wait_fences_ioctl() 1658 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); in amdgpu_cs_wait_fences_ioctl() 1660 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); in amdgpu_cs_wait_fences_ioctl() [all …]
|
| A D | amdgpu_vcn.c | 385 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; in amdgpu_vcn_idle_work_handler() local 410 fences += fence[j]; in amdgpu_vcn_idle_work_handler() 413 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { in amdgpu_vcn_idle_work_handler() 450 unsigned int fences = 0; in amdgpu_vcn_ring_begin_use() local 454 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use() 456 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
|
| A D | amdgpu_sync.h | 45 DECLARE_HASHTABLE(fences, 4);
|
| A D | vcn_v1_0.c | 1786 unsigned int fences = 0, i; in vcn_v1_0_idle_work_handler() local 1789 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); in vcn_v1_0_idle_work_handler() 1794 if (fences) in vcn_v1_0_idle_work_handler() 1807 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec); in vcn_v1_0_idle_work_handler() 1808 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); in vcn_v1_0_idle_work_handler() 1810 if (fences == 0) { in vcn_v1_0_idle_work_handler() 1851 unsigned int fences = 0, i; in vcn_v1_0_set_pg_for_begin_use() local 1854 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); in vcn_v1_0_set_pg_for_begin_use() 1856 if (fences) in vcn_v1_0_set_pg_for_begin_use()
|
| /linux/Documentation/driver-api/ |
| A D | sync_file.rst | 9 the fences(struct dma_fence) that are needed to synchronize between drivers or 29 in-fences and out-fences 33 the driver to userspace we call the fences it contains 'out-fences'. They are 37 Out-fences are fences that the driver creates. 40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that 42 the in-fences. 72 of the Sync File to the kernel. The kernel can then retrieve the fences
|
| A D | dma-buf.rst | 156 :doc: DMA fences overview 225 batch DMA fences for memory management instead of context preemption DMA 226 fences which get reattached when the compute job is rescheduled. 229 fences and controls when they fire. Mixing indefinite fences with normal 236 * Only userspace knows about all dependencies in indefinite fences and when 241 dependent upon DMA fences. If the kernel also support indefinite fences in the 252 userspace [label="userspace controlled fences"] 267 fences in the kernel. This means: 269 * No future fences, proxy fences or userspace fences imported as DMA fences, 281 implications for DMA fences. [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| A D | i915_sw_fence.c | 453 struct i915_sw_fence **fences; in test_chain() local 457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain() 458 if (!fences) in test_chain() 462 fences[i] = alloc_fence(); in test_chain() 463 if (!fences[i]) { in test_chain() 471 fences[i - 1], in test_chain() 478 i915_sw_fence_commit(fences[i]); in test_chain() 484 if (i915_sw_fence_done(fences[i])) { in test_chain() 490 i915_sw_fence_commit(fences[0]); in test_chain() 500 free_fence(fences[i]); in test_chain() [all …]
|
| /linux/drivers/gpu/drm/radeon/ |
| A D | radeon_sa.c | 248 struct radeon_fence **fences, in radeon_sa_bo_next_hole() argument 278 fences[i] = sa_bo->fence; in radeon_sa_bo_next_hole() 317 struct radeon_fence *fences[RADEON_NUM_RINGS]; in radeon_sa_bo_new() local 336 fences[i] = NULL; in radeon_sa_bo_new() 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); in radeon_sa_bo_new() 353 radeon_fence_ref(fences[i]); in radeon_sa_bo_new() 356 r = radeon_fence_wait_any(rdev, fences, false); in radeon_sa_bo_new() 358 radeon_fence_unref(&fences[i]); in radeon_sa_bo_new()
|
| A D | radeon_trace.h | 36 __field(u32, fences) 42 __entry->fences = radeon_fence_count_emitted( 47 __entry->fences)
|
| /linux/drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_execbuffer.c | 2598 kvfree(fences); in __free_fence_array() 2636 eb->fences = f; in add_timeline_fence_array() 2760 eb->fences = f; in add_fence_array() 2804 if (fences) in put_fence_array() 3007 fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL); in eb_composite_fence_create() 3008 if (!fences) in eb_composite_fence_create() 3018 fences, in eb_composite_fence_create() 3023 kfree(fences); in eb_composite_fence_create() 3070 if (eb->fences) { in eb_fences_add() 3187 eb.fences = NULL; in i915_gem_do_execbuffer() [all …]
|
| /linux/include/linux/ |
| A D | dma-fence-array.h | 43 struct dma_fence **fences; member 78 struct dma_fence **fences,
|
| /linux/drivers/gpu/drm/virtio/ |
| A D | virtgpu_fence.c | 111 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit() 136 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process() 146 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
|