• Home
  • Raw
  • Download

Lines Matching full:queue

67                       struct nvk_upload_queue *queue)  in nvk_upload_queue_init()  argument
71 memset(queue, 0, sizeof(*queue)); in nvk_upload_queue_init()
73 simple_mtx_init(&queue->mutex, mtx_plain); in nvk_upload_queue_init()
76 &queue->drm.ws_ctx); in nvk_upload_queue_init()
85 err = drmSyncobjCreate(dev->ws_dev->fd, 0, &queue->drm.syncobj); in nvk_upload_queue_init()
91 list_inithead(&queue->bos); in nvk_upload_queue_init()
96 simple_mtx_destroy(&queue->mutex); in nvk_upload_queue_init()
98 nouveau_ws_context_destroy(queue->drm.ws_ctx); in nvk_upload_queue_init()
105 struct nvk_upload_queue *queue) in nvk_upload_queue_finish() argument
107 list_for_each_entry_safe(struct nvk_upload_bo, bo, &queue->bos, link) in nvk_upload_queue_finish()
110 if (queue->bo != NULL) in nvk_upload_queue_finish()
111 nvk_upload_bo_destroy(dev, queue->bo); in nvk_upload_queue_finish()
113 drmSyncobjDestroy(dev->ws_dev->fd, queue->drm.syncobj); in nvk_upload_queue_finish()
114 nouveau_ws_context_destroy(queue->drm.ws_ctx); in nvk_upload_queue_finish()
115 simple_mtx_destroy(&queue->mutex); in nvk_upload_queue_finish()
120 struct nvk_upload_queue *queue, in nvk_upload_queue_flush_locked() argument
123 if (queue->bo == NULL || queue->bo_push_start == queue->bo_push_end) { in nvk_upload_queue_flush_locked()
125 *time_point_out = queue->last_time_point; in nvk_upload_queue_flush_locked()
129 uint64_t time_point = queue->last_time_point + 1; in nvk_upload_queue_flush_locked()
134 .va = queue->bo->bo->offset + queue->bo_push_start, in nvk_upload_queue_flush_locked()
135 .va_len = queue->bo_push_end - queue->bo_push_start, in nvk_upload_queue_flush_locked()
140 .handle = queue->drm.syncobj, in nvk_upload_queue_flush_locked()
145 .channel = queue->drm.ws_ctx->channel, in nvk_upload_queue_flush_locked()
161 queue->last_time_point = time_point; in nvk_upload_queue_flush_locked()
163 queue->bo->idle_time_point = time_point; in nvk_upload_queue_flush_locked()
164 queue->bo_push_start = queue->bo_push_end; in nvk_upload_queue_flush_locked()
174 struct nvk_upload_queue *queue, in nvk_upload_queue_flush() argument
179 simple_mtx_lock(&queue->mutex); in nvk_upload_queue_flush()
180 result = nvk_upload_queue_flush_locked(dev, queue, time_point_out); in nvk_upload_queue_flush()
181 simple_mtx_unlock(&queue->mutex); in nvk_upload_queue_flush()
188 struct nvk_upload_queue *queue) in nvk_upload_queue_sync_locked() argument
192 result = nvk_upload_queue_flush_locked(dev, queue, NULL); in nvk_upload_queue_sync_locked()
196 if (queue->last_time_point == 0) in nvk_upload_queue_sync_locked()
199 int err = drmSyncobjTimelineWait(dev->ws_dev->fd, &queue->drm.syncobj, in nvk_upload_queue_sync_locked()
200 &queue->last_time_point, 1, INT64_MAX, in nvk_upload_queue_sync_locked()
211 struct nvk_upload_queue *queue) in nvk_upload_queue_sync() argument
215 simple_mtx_lock(&queue->mutex); in nvk_upload_queue_sync()
216 result = nvk_upload_queue_sync_locked(dev, queue); in nvk_upload_queue_sync()
217 simple_mtx_unlock(&queue->mutex); in nvk_upload_queue_sync()
224 struct nvk_upload_queue *queue, in nvk_upload_queue_reserve() argument
230 assert(queue->bo_push_end <= queue->bo_data_start); in nvk_upload_queue_reserve()
232 if (queue->bo != NULL) { in nvk_upload_queue_reserve()
233 if (queue->bo_data_start - queue->bo_push_end >= min_bo_size) in nvk_upload_queue_reserve()
237 result = nvk_upload_queue_flush_locked(dev, queue, NULL); in nvk_upload_queue_reserve()
241 assert(queue->bo_push_start == queue->bo_push_end); in nvk_upload_queue_reserve()
242 list_addtail(&queue->bo->link, &queue->bos); in nvk_upload_queue_reserve()
243 queue->bo = NULL; in nvk_upload_queue_reserve()
246 assert(queue->bo == NULL); in nvk_upload_queue_reserve()
247 queue->bo_push_start = queue->bo_push_end = 0; in nvk_upload_queue_reserve()
248 queue->bo_data_start = NVK_UPLOAD_BO_SIZE; in nvk_upload_queue_reserve()
251 if (!list_is_empty(&queue->bos)) { in nvk_upload_queue_reserve()
253 int err = drmSyncobjQuery(dev->ws_dev->fd, &queue->drm.syncobj, in nvk_upload_queue_reserve()
261 list_first_entry(&queue->bos, struct nvk_upload_bo, link); in nvk_upload_queue_reserve()
264 queue->bo = bo; in nvk_upload_queue_reserve()
269 return nvk_upload_bo_create(dev, &queue->bo); in nvk_upload_queue_reserve()
274 struct nvk_upload_queue *queue, in nvk_upload_queue_upload_locked() argument
290 result = nvk_upload_queue_reserve(dev, queue, min_size); in nvk_upload_queue_upload_locked()
294 assert(queue->bo != NULL); in nvk_upload_queue_upload_locked()
295 assert(queue->bo_data_start > queue->bo_push_end); in nvk_upload_queue_upload_locked()
296 const uint32_t avail = queue->bo_data_start - queue->bo_push_end; in nvk_upload_queue_upload_locked()
301 const uint32_t data_bo_offset = queue->bo_data_start - data_size; in nvk_upload_queue_upload_locked()
302 assert(queue->bo_push_end + cmd_size <= data_bo_offset); in nvk_upload_queue_upload_locked()
303 const uint64_t data_addr = queue->bo->bo->offset + data_bo_offset; in nvk_upload_queue_upload_locked()
304 memcpy(queue->bo->map + data_bo_offset, src, data_size); in nvk_upload_queue_upload_locked()
305 queue->bo_data_start = data_bo_offset; in nvk_upload_queue_upload_locked()
308 nv_push_init(&p, queue->bo->map + queue->bo_push_end, cmd_size_dw); in nvk_upload_queue_upload_locked()
330 queue->bo_push_end += nv_push_dw_count(&p) * 4; in nvk_upload_queue_upload_locked()
342 struct nvk_upload_queue *queue, in nvk_upload_queue_upload() argument
348 simple_mtx_lock(&queue->mutex); in nvk_upload_queue_upload()
349 result = nvk_upload_queue_upload_locked(dev, queue, dst_addr, src, size); in nvk_upload_queue_upload()
350 simple_mtx_unlock(&queue->mutex); in nvk_upload_queue_upload()