Lines Matching full:exec
147 struct vc4_exec_info *exec[2]; in vc4_save_hang_state() local
159 exec[0] = vc4_first_bin_job(vc4); in vc4_save_hang_state()
160 exec[1] = vc4_first_render_job(vc4); in vc4_save_hang_state()
161 if (!exec[0] && !exec[1]) { in vc4_save_hang_state()
169 if (!exec[i]) in vc4_save_hang_state()
173 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) in vc4_save_hang_state()
175 state->bo_count += exec[i]->bo_count + unref_list_count; in vc4_save_hang_state()
188 if (!exec[i]) in vc4_save_hang_state()
191 for (j = 0; j < exec[i]->bo_count; j++) { in vc4_save_hang_state()
192 bo = to_vc4_bo(&exec[i]->bo[j]->base); in vc4_save_hang_state()
200 drm_gem_object_get(&exec[i]->bo[j]->base); in vc4_save_hang_state()
201 kernel_state->bo[k++] = &exec[i]->bo[j]->base; in vc4_save_hang_state()
204 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { in vc4_save_hang_state()
215 if (exec[0]) in vc4_save_hang_state()
216 state->start_bin = exec[0]->ct0ca; in vc4_save_hang_state()
217 if (exec[1]) in vc4_save_hang_state()
218 state->start_render = exec[1]->ct1ca; in vc4_save_hang_state()
462 struct vc4_exec_info *exec; in vc4_submit_next_bin_job() local
465 exec = vc4_first_bin_job(vc4); in vc4_submit_next_bin_job()
466 if (!exec) in vc4_submit_next_bin_job()
474 if (exec->perfmon && vc4->active_perfmon != exec->perfmon) in vc4_submit_next_bin_job()
475 vc4_perfmon_start(vc4, exec->perfmon); in vc4_submit_next_bin_job()
480 if (exec->ct0ca != exec->ct0ea) { in vc4_submit_next_bin_job()
481 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea); in vc4_submit_next_bin_job()
485 vc4_move_job_to_render(dev, exec); in vc4_submit_next_bin_job()
493 if (next && next->perfmon == exec->perfmon) in vc4_submit_next_bin_job()
502 struct vc4_exec_info *exec = vc4_first_render_job(vc4); in vc4_submit_next_render_job() local
504 if (!exec) in vc4_submit_next_render_job()
515 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); in vc4_submit_next_render_job()
519 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) in vc4_move_job_to_render() argument
524 list_move_tail(&exec->head, &vc4->render_job_list); in vc4_move_job_to_render()
530 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) in vc4_update_bo_seqnos() argument
535 for (i = 0; i < exec->bo_count; i++) { in vc4_update_bo_seqnos()
536 bo = to_vc4_bo(&exec->bo[i]->base); in vc4_update_bo_seqnos()
539 reservation_object_add_shared_fence(bo->resv, exec->fence); in vc4_update_bo_seqnos()
542 list_for_each_entry(bo, &exec->unref_list, unref_head) { in vc4_update_bo_seqnos()
546 for (i = 0; i < exec->rcl_write_bo_count; i++) { in vc4_update_bo_seqnos()
547 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); in vc4_update_bo_seqnos()
550 reservation_object_add_excl_fence(bo->resv, exec->fence); in vc4_update_bo_seqnos()
556 struct vc4_exec_info *exec, in vc4_unlock_bo_reservations() argument
561 for (i = 0; i < exec->bo_count; i++) { in vc4_unlock_bo_reservations()
562 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); in vc4_unlock_bo_reservations()
574 * (all of which are on exec->unref_list). They're entirely private
579 struct vc4_exec_info *exec, in vc4_lock_bo_reservations() argument
590 bo = to_vc4_bo(&exec->bo[contended_lock]->base); in vc4_lock_bo_reservations()
599 for (i = 0; i < exec->bo_count; i++) { in vc4_lock_bo_reservations()
603 bo = to_vc4_bo(&exec->bo[i]->base); in vc4_lock_bo_reservations()
610 bo = to_vc4_bo(&exec->bo[j]->base); in vc4_lock_bo_reservations()
615 bo = to_vc4_bo(&exec->bo[contended_lock]->base); in vc4_lock_bo_reservations()
635 for (i = 0; i < exec->bo_count; i++) { in vc4_lock_bo_reservations()
636 bo = to_vc4_bo(&exec->bo[i]->base); in vc4_lock_bo_reservations()
640 vc4_unlock_bo_reservations(dev, exec, acquire_ctx); in vc4_lock_bo_reservations()
658 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, in vc4_queue_submit() argument
676 exec->seqno = seqno; in vc4_queue_submit()
679 vc4->dma_fence_context, exec->seqno); in vc4_queue_submit()
680 fence->seqno = exec->seqno; in vc4_queue_submit()
681 exec->fence = &fence->base; in vc4_queue_submit()
684 drm_syncobj_replace_fence(out_sync, exec->fence); in vc4_queue_submit()
686 vc4_update_bo_seqnos(exec, seqno); in vc4_queue_submit()
688 vc4_unlock_bo_reservations(dev, exec, acquire_ctx); in vc4_queue_submit()
690 list_add_tail(&exec->head, &vc4->bin_job_list); in vc4_queue_submit()
698 if (vc4_first_bin_job(vc4) == exec && in vc4_queue_submit()
699 (!renderjob || renderjob->perfmon == exec->perfmon)) { in vc4_queue_submit()
710 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
714 * @exec: V3D job being set up
723 struct vc4_exec_info *exec) in vc4_cl_lookup_bos() argument
725 struct drm_vc4_submit_cl *args = exec->args; in vc4_cl_lookup_bos()
730 exec->bo_count = args->bo_handle_count; in vc4_cl_lookup_bos()
732 if (!exec->bo_count) { in vc4_cl_lookup_bos()
740 exec->bo = kvmalloc_array(exec->bo_count, in vc4_cl_lookup_bos()
743 if (!exec->bo) { in vc4_cl_lookup_bos()
748 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL); in vc4_cl_lookup_bos()
756 exec->bo_count * sizeof(uint32_t))) { in vc4_cl_lookup_bos()
763 for (i = 0; i < exec->bo_count; i++) { in vc4_cl_lookup_bos()
774 exec->bo[i] = (struct drm_gem_cma_object *)bo; in vc4_cl_lookup_bos()
781 for (i = 0; i < exec->bo_count; i++) { in vc4_cl_lookup_bos()
782 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base)); in vc4_cl_lookup_bos()
796 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release' in vc4_cl_lookup_bos()
800 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base)); in vc4_cl_lookup_bos()
804 for (i = 0; i < exec->bo_count && exec->bo[i]; i++) in vc4_cl_lookup_bos()
805 drm_gem_object_put_unlocked(&exec->bo[i]->base); in vc4_cl_lookup_bos()
809 kvfree(exec->bo); in vc4_cl_lookup_bos()
810 exec->bo = NULL; in vc4_cl_lookup_bos()
815 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) in vc4_get_bcl() argument
817 struct drm_vc4_submit_cl *args = exec->args; in vc4_get_bcl()
836 DRM_DEBUG("overflow in exec arguments\n"); in vc4_get_bcl()
856 exec->shader_rec_u = temp + shader_rec_offset; in vc4_get_bcl()
857 exec->uniforms_u = temp + uniforms_offset; in vc4_get_bcl()
858 exec->shader_state = temp + exec_size; in vc4_get_bcl()
859 exec->shader_state_size = args->shader_rec_count; in vc4_get_bcl()
868 if (copy_from_user(exec->shader_rec_u, in vc4_get_bcl()
875 if (copy_from_user(exec->uniforms_u, in vc4_get_bcl()
888 exec->exec_bo = &bo->base; in vc4_get_bcl()
890 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head, in vc4_get_bcl()
891 &exec->unref_list); in vc4_get_bcl()
893 exec->ct0ca = exec->exec_bo->paddr + bin_offset; in vc4_get_bcl()
895 exec->bin_u = bin; in vc4_get_bcl()
897 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset; in vc4_get_bcl()
898 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset; in vc4_get_bcl()
899 exec->shader_rec_size = args->shader_rec_size; in vc4_get_bcl()
901 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset; in vc4_get_bcl()
902 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset; in vc4_get_bcl()
903 exec->uniforms_size = args->uniforms_size; in vc4_get_bcl()
906 exec->exec_bo->vaddr + bin_offset, in vc4_get_bcl()
908 exec); in vc4_get_bcl()
912 ret = vc4_validate_shader_recs(dev, exec); in vc4_get_bcl()
920 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true); in vc4_get_bcl()
928 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) in vc4_complete_exec() argument
937 if (exec->fence) { in vc4_complete_exec()
938 dma_fence_signal(exec->fence); in vc4_complete_exec()
939 dma_fence_put(exec->fence); in vc4_complete_exec()
942 if (exec->bo) { in vc4_complete_exec()
943 for (i = 0; i < exec->bo_count; i++) { in vc4_complete_exec()
944 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); in vc4_complete_exec()
947 drm_gem_object_put_unlocked(&exec->bo[i]->base); in vc4_complete_exec()
949 kvfree(exec->bo); in vc4_complete_exec()
952 while (!list_empty(&exec->unref_list)) { in vc4_complete_exec()
953 struct vc4_bo *bo = list_first_entry(&exec->unref_list, in vc4_complete_exec()
961 vc4->bin_alloc_used &= ~exec->bin_slots; in vc4_complete_exec()
965 vc4_perfmon_put(exec->perfmon); in vc4_complete_exec()
974 kfree(exec); in vc4_complete_exec()
985 struct vc4_exec_info *exec = in vc4_job_handle_completed() local
988 list_del(&exec->head); in vc4_job_handle_completed()
991 vc4_complete_exec(vc4->dev, exec); in vc4_job_handle_completed()
1036 * jobs that had completed and unrefs their BOs and frees their exec
1122 struct vc4_exec_info *exec; in vc4_submit_cl_ioctl() local
1140 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); in vc4_submit_cl_ioctl()
1141 if (!exec) { in vc4_submit_cl_ioctl()
1142 DRM_ERROR("malloc failure on exec struct\n"); in vc4_submit_cl_ioctl()
1152 kfree(exec); in vc4_submit_cl_ioctl()
1158 exec->args = args; in vc4_submit_cl_ioctl()
1159 INIT_LIST_HEAD(&exec->unref_list); in vc4_submit_cl_ioctl()
1161 ret = vc4_cl_lookup_bos(dev, file_priv, exec); in vc4_submit_cl_ioctl()
1166 exec->perfmon = vc4_perfmon_find(vc4file, in vc4_submit_cl_ioctl()
1168 if (!exec->perfmon) { in vc4_submit_cl_ioctl()
1197 if (exec->args->bin_cl_size != 0) { in vc4_submit_cl_ioctl()
1198 ret = vc4_get_bcl(dev, exec); in vc4_submit_cl_ioctl()
1202 exec->ct0ca = 0; in vc4_submit_cl_ioctl()
1203 exec->ct0ea = 0; in vc4_submit_cl_ioctl()
1206 ret = vc4_get_rcl(dev, exec); in vc4_submit_cl_ioctl()
1210 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx); in vc4_submit_cl_ioctl()
1231 exec->args = NULL; in vc4_submit_cl_ioctl()
1233 ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync); in vc4_submit_cl_ioctl()
1235 /* The syncobj isn't part of the exec data and we need to free our in vc4_submit_cl_ioctl()
1250 vc4_complete_exec(vc4->dev, exec); in vc4_submit_cl_ioctl()
1284 /* Waiting for exec to finish would need to be done before in vc4_gem_destroy()