/kernel/linux/linux-5.10/drivers/dma-buf/ |
D | sync_file.c | 149 struct dma_fence **fences, int num_fences) in sync_file_set_fence() argument 160 sync_file->fence = fences[0]; in sync_file_set_fence() 161 kfree(fences); in sync_file_set_fence() 163 array = dma_fence_array_create(num_fences, fences, in sync_file_set_fence() 182 return array->fences; in get_fences() 189 static void add_fence(struct dma_fence **fences, in add_fence() argument 192 fences[*i] = fence; in add_fence() 214 struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences; in sync_file_merge() local 228 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in sync_file_merge() 229 if (!fences) in sync_file_merge() [all …]
|
D | st-dma-fence-chain.c | 108 struct dma_fence **fences; member 130 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init() 132 if (!fc->fences) { in fence_chains_init() 139 fc->fences[i] = mock_fence(); in fence_chains_init() 140 if (!fc->fences[i]) { in fence_chains_init() 146 fc->fences[i], in fence_chains_init() 161 dma_fence_put(fc->fences[i]); in fence_chains_init() 164 kvfree(fc->fences); in fence_chains_init() 175 dma_fence_signal(fc->fences[i]); in fence_chains_fini() 176 dma_fence_put(fc->fences[i]); in fence_chains_fini() [all …]
|
D | dma-fence-array.c | 87 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, in dma_fence_array_enable_signaling() 89 int error = array->fences[i]->error; in dma_fence_array_enable_signaling() 120 dma_fence_put(array->fences[i]); in dma_fence_array_release() 122 kfree(array->fences); in dma_fence_array_release() 155 struct dma_fence **fences, in dma_fence_array_create() argument 175 array->fences = fences; in dma_fence_array_create() 201 if (array->fences[i]->context != context) in dma_fence_match_context()
|
D | dma-fence.c | 727 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument 733 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any() 764 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument 771 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout() 776 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout() 792 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout() 810 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout() 823 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
|
D | st-dma-fence.c | 432 struct dma_fence __rcu **fences; member 461 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback() 466 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback() 495 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback() 519 t[i].fences = f; in race_signal_callback()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ids.c | 110 struct dma_fence *fence, **fences; in amdgpu_pasid_free_delayed() local 115 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); in amdgpu_pasid_free_delayed() 125 fence = fences[0]; in amdgpu_pasid_free_delayed() 126 kfree(fences); in amdgpu_pasid_free_delayed() 131 array = dma_fence_array_create(count, fences, context, in amdgpu_pasid_free_delayed() 134 kfree(fences); in amdgpu_pasid_free_delayed() 204 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local 211 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); in amdgpu_vmid_grab_idle() 212 if (!fences) in amdgpu_vmid_grab_idle() 222 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); in amdgpu_vmid_grab_idle() [all …]
|
D | amdgpu_sync.c | 51 hash_init(sync->fences); in amdgpu_sync_create() 135 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later() 167 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence() 293 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence() 334 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence() 365 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone() 390 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait() 416 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_free()
|
D | amdgpu_sa.c | 207 struct dma_fence **fences, in amdgpu_sa_bo_next_hole() argument 229 fences[i] = NULL; in amdgpu_sa_bo_next_hole() 238 fences[i] = sa_bo->fence; in amdgpu_sa_bo_next_hole() 279 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; in amdgpu_sa_bo_new() local 314 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); in amdgpu_sa_bo_new() 317 if (fences[i]) in amdgpu_sa_bo_new() 318 fences[count++] = dma_fence_get(fences[i]); in amdgpu_sa_bo_new() 322 t = dma_fence_wait_any_timeout(fences, count, false, in amdgpu_sa_bo_new() 326 dma_fence_put(fences[i]); in amdgpu_sa_bo_new()
|
D | amdgpu_dma_buf.c | 129 struct dma_fence **fences; in __dma_resv_make_exclusive() local 136 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); in __dma_resv_make_exclusive() 143 dma_resv_add_excl_fence(obj, fences[0]); in __dma_resv_make_exclusive() 144 dma_fence_put(fences[0]); in __dma_resv_make_exclusive() 145 kfree(fences); in __dma_resv_make_exclusive() 149 array = dma_fence_array_create(count, fences, in __dma_resv_make_exclusive() 163 dma_fence_put(fences[count]); in __dma_resv_make_exclusive() 164 kfree(fences); in __dma_resv_make_exclusive()
|
D | amdgpu_fence.c | 159 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit() 271 ptr = &drv->fences[last_seq]; in amdgpu_fence_process() 328 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty() 465 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring() 467 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring() 548 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_fini() 549 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_fini() 550 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_fini()
|
D | amdgpu_jpeg.c | 80 unsigned int fences = 0; in amdgpu_jpeg_idle_work_handler() local 87 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); in amdgpu_jpeg_idle_work_handler() 90 if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) in amdgpu_jpeg_idle_work_handler()
|
D | amdgpu_ctx.c | 103 entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]), in amdgpu_ctx_init_entity() 178 dma_fence_put(entity->fences[i]); in amdgpu_ctx_fini_entity() 445 other = centity->fences[idx]; in amdgpu_ctx_add_fence() 452 centity->fences[idx] = fence; in amdgpu_ctx_add_fence() 484 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence() 546 other = dma_fence_get(centity->fences[idx]); in amdgpu_ctx_wait_prev_fence()
|
D | amdgpu_debugfs.c | 1346 struct dma_fence **fences) in amdgpu_ib_preempt_fences_swap() argument 1362 ptr = &drv->fences[last_seq]; in amdgpu_ib_preempt_fences_swap() 1370 fences[last_seq] = fence; in amdgpu_ib_preempt_fences_swap() 1375 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, in amdgpu_ib_preempt_signal_fences() argument 1382 fence = fences[i]; in amdgpu_ib_preempt_signal_fences() 1423 ptr = &drv->fences[preempt_seq]; in amdgpu_ib_preempt_mark_partial_job() 1447 struct dma_fence **fences = NULL; in amdgpu_debugfs_ib_preempt() local 1463 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); in amdgpu_debugfs_ib_preempt() 1464 if (!fences) in amdgpu_debugfs_ib_preempt() 1493 amdgpu_ib_preempt_fences_swap(ring, fences); in amdgpu_debugfs_ib_preempt() [all …]
|
D | amdgpu_cs.c | 1503 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_all_fences() argument 1513 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_all_fences() 1548 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_any_fence() argument 1566 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_any_fence() 1616 struct drm_amdgpu_fence *fences; in amdgpu_cs_wait_fences_ioctl() local 1620 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), in amdgpu_cs_wait_fences_ioctl() 1622 if (fences == NULL) in amdgpu_cs_wait_fences_ioctl() 1625 fences_user = u64_to_user_ptr(wait->in.fences); in amdgpu_cs_wait_fences_ioctl() 1626 if (copy_from_user(fences, fences_user, in amdgpu_cs_wait_fences_ioctl() 1633 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); in amdgpu_cs_wait_fences_ioctl() [all …]
|
D | amdgpu_sync.h | 45 DECLARE_HASHTABLE(fences, 4);
|
D | amdgpu_vcn.c | 320 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; in amdgpu_vcn_idle_work_handler() local 344 fences += fence[j]; in amdgpu_vcn_idle_work_handler() 347 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { in amdgpu_vcn_idle_work_handler() 373 unsigned int fences = 0; in amdgpu_vcn_ring_begin_use() local 377 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use() 379 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
|
D | amdgpu_ctx.h | 37 struct dma_fence *fences[]; member
|
/kernel/linux/linux-5.10/Documentation/driver-api/ |
D | sync_file.rst | 9 the fences(struct dma_fence) that are needed to synchronize between drivers or 29 in-fences and out-fences 33 the driver to userspace we call the fences it contains 'out-fences'. They are 37 Out-fences are fences that the driver creates. 40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that 42 the in-fences. 72 of the Sync File to the kernel. The kernel can then retrieve the fences
|
D | dma-buf.rst | 139 :doc: DMA fences overview 192 * Future fences, used in HWC1 to signal when a buffer isn't used by the display 196 * Proxy fences, proposed to handle &drm_syncobj for which the fence has not yet 199 * Userspace fences or gpu futexes, fine-grained locking within a command buffer 205 batch DMA fences for memory management instead of context preemption DMA 206 fences which get reattached when the compute job is rescheduled. 209 fences and controls when they fire. Mixing indefinite fences with normal 210 in-kernel DMA fences does not work, even when a fallback timeout is included to 216 * Only userspace knows about all dependencies in indefinite fences and when 220 for memory management needs, which means we must support indefinite fences being [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
D | i915_sw_fence.c | 453 struct i915_sw_fence **fences; in test_chain() local 457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain() 458 if (!fences) in test_chain() 462 fences[i] = alloc_fence(); in test_chain() 463 if (!fences[i]) { in test_chain() 470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i], in test_chain() 471 fences[i - 1], in test_chain() 478 i915_sw_fence_commit(fences[i]); in test_chain() 484 if (i915_sw_fence_done(fences[i])) { in test_chain() 490 i915_sw_fence_commit(fences[0]); in test_chain() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/radeon/ |
D | radeon_sa.c | 248 struct radeon_fence **fences, in radeon_sa_bo_next_hole() argument 278 fences[i] = sa_bo->fence; in radeon_sa_bo_next_hole() 317 struct radeon_fence *fences[RADEON_NUM_RINGS]; in radeon_sa_bo_new() local 336 fences[i] = NULL; in radeon_sa_bo_new() 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); in radeon_sa_bo_new() 353 radeon_fence_ref(fences[i]); in radeon_sa_bo_new() 356 r = radeon_fence_wait_any(rdev, fences, false); in radeon_sa_bo_new() 358 radeon_fence_unref(&fences[i]); in radeon_sa_bo_new()
|
D | radeon_trace.h | 36 __field(u32, fences) 42 __entry->fences = radeon_fence_count_emitted( 47 __entry->fences)
|
/kernel/linux/linux-5.10/include/linux/ |
D | dma-fence-array.h | 43 struct dma_fence **fences; member 78 struct dma_fence **fences,
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/ |
D | i915_gem_execbuffer.c | 302 struct eb_fence *fences; member 2670 __free_fence_array(struct eb_fence *fences, unsigned int n) in __free_fence_array() argument 2673 drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); in __free_fence_array() 2674 dma_fence_put(fences[n].dma_fence); in __free_fence_array() 2675 kfree(fences[n].chain_fence); in __free_fence_array() 2677 kvfree(fences); in __free_fence_array() 2709 f = krealloc(eb->fences, in add_timeline_fence_array() 2715 eb->fences = f; in add_timeline_fence_array() 2835 f = krealloc(eb->fences, in add_fence_array() 2841 eb->fences = f; in add_fence_array() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/virtio/ |
D | virtgpu_fence.c | 105 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit() 123 list_for_each_entry_safe(fence, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
|