• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2 
3 /*
4  * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Rob Clark <robclark@freedesktop.org>
27  */
28 
29 #include <assert.h>
30 #include <inttypes.h>
31 
32 #include "xf86atomic.h"
33 #include "freedreno_ringbuffer.h"
34 #include "msm_priv.h"
35 
36 /* represents a single cmd buffer in the submit ioctl.  Each cmd buffer has
37  * a backing bo, and a reloc table.
38  */
39 struct msm_cmd {
40 	struct list_head list;
41 
42 	struct fd_ringbuffer *ring;
43 	struct fd_bo *ring_bo;
44 
45 	/* reloc's table: */
46 	DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
47 
48 	uint32_t size;
49 
50 	/* has cmd already been added to parent rb's submit.cmds table? */
51 	int is_appended_to_submit;
52 };
53 
54 struct msm_ringbuffer {
55 	struct fd_ringbuffer base;
56 
57 	/* submit ioctl related tables:
58 	 * Note that bos and cmds are tracked by the parent ringbuffer, since
59 	 * that is global to the submit ioctl call.  The reloc's table is tracked
60 	 * per cmd-buffer.
61 	 */
62 	struct {
63 		/* bo's table: */
64 		DECLARE_ARRAY(struct drm_msm_gem_submit_bo, bos);
65 
66 		/* cmd's table: */
67 		DECLARE_ARRAY(struct drm_msm_gem_submit_cmd, cmds);
68 	} submit;
69 
70 	/* should have matching entries in submit.bos: */
71 	/* Note, only in parent ringbuffer */
72 	DECLARE_ARRAY(struct fd_bo *, bos);
73 
74 	/* should have matching entries in submit.cmds: */
75 	DECLARE_ARRAY(struct msm_cmd *, cmds);
76 
77 	/* List of physical cmdstream buffers (msm_cmd) associated with this
78 	 * logical fd_ringbuffer.
79 	 *
80 	 * Note that this is different from msm_ringbuffer::cmds (which
81 	 * shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
82 	 * related stuff, and *only* is tracked in the parent ringbuffer.
83 	 * And only has "completed" cmd buffers (ie. we already know the
84 	 * size) added via get_cmd().
85 	 */
86 	struct list_head cmd_list;
87 
88 	int is_growable;
89 	unsigned cmd_count;
90 
91 	unsigned offset;    /* for sub-allocated stateobj rb's */
92 
93 	unsigned seqno;
94 
95 	/* maps fd_bo to idx: */
96 	void *bo_table;
97 
98 	/* maps msm_cmd to drm_msm_gem_submit_cmd in parent rb.  Each rb has a
99 	 * list of msm_cmd's which correspond to each chunk of cmdstream in
100 	 * a 'growable' rb.  For each of those we need to create one
101 	 * drm_msm_gem_submit_cmd in the parent rb which collects the state
102 	 * for the submit ioctl.  Because we can have multiple IB's to the same
103 	 * target rb (for example, or same stateobj emit multiple times), and
104 	 * because in theory we can have multiple different rb's that have a
105 	 * reference to a given target, we need a hashtable to track this per
106 	 * rb.
107 	 */
108 	void *cmd_table;
109 };
110 
to_msm_ringbuffer(struct fd_ringbuffer * x)111 static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
112 {
113 	return (struct msm_ringbuffer *)x;
114 }
115 
116 #define INIT_SIZE 0x1000
117 
118 static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
119 
current_cmd(struct fd_ringbuffer * ring)120 static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
121 {
122 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
123 	assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
124 	return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
125 }
126 
ring_cmd_del(struct msm_cmd * cmd)127 static void ring_cmd_del(struct msm_cmd *cmd)
128 {
129 	fd_bo_del(cmd->ring_bo);
130 	list_del(&cmd->list);
131 	to_msm_ringbuffer(cmd->ring)->cmd_count--;
132 	free(cmd->relocs);
133 	free(cmd);
134 }
135 
ring_cmd_new(struct fd_ringbuffer * ring,uint32_t size,enum fd_ringbuffer_flags flags)136 static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size,
137 		enum fd_ringbuffer_flags flags)
138 {
139 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
140 	struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
141 
142 	if (!cmd)
143 		return NULL;
144 
145 	cmd->ring = ring;
146 
147 	/* TODO separate suballoc buffer for small non-streaming state, using
148 	 * smaller page-sized backing bo's.
149 	 */
150 	if (flags & FD_RINGBUFFER_STREAMING) {
151 		struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
152 		unsigned suballoc_offset = 0;
153 		struct fd_bo *suballoc_bo = NULL;
154 
155 		if (msm_pipe->suballoc_ring) {
156 			struct msm_ringbuffer *suballoc_ring = to_msm_ringbuffer(msm_pipe->suballoc_ring);
157 
158 			assert(msm_pipe->suballoc_ring->flags & FD_RINGBUFFER_OBJECT);
159 			assert(suballoc_ring->cmd_count == 1);
160 
161 			suballoc_bo = current_cmd(msm_pipe->suballoc_ring)->ring_bo;
162 
163 			suballoc_offset = fd_ringbuffer_size(msm_pipe->suballoc_ring) +
164 					suballoc_ring->offset;
165 
166 			suballoc_offset = ALIGN(suballoc_offset, 0x10);
167 
168 			if ((size + suballoc_offset) > suballoc_bo->size) {
169 				suballoc_bo = NULL;
170 			}
171 		}
172 
173 		if (!suballoc_bo) {
174 			cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, 0x8000, 0);
175 			msm_ring->offset = 0;
176 		} else {
177 			cmd->ring_bo = fd_bo_ref(suballoc_bo);
178 			msm_ring->offset = suballoc_offset;
179 		}
180 
181 		if (msm_pipe->suballoc_ring)
182 			fd_ringbuffer_del(msm_pipe->suballoc_ring);
183 
184 		msm_pipe->suballoc_ring = fd_ringbuffer_ref(ring);
185 	} else {
186 		cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, size, 0);
187 	}
188 	if (!cmd->ring_bo)
189 		goto fail;
190 
191 	list_addtail(&cmd->list, &msm_ring->cmd_list);
192 	msm_ring->cmd_count++;
193 
194 	return cmd;
195 
196 fail:
197 	ring_cmd_del(cmd);
198 	return NULL;
199 }
200 
append_bo(struct fd_ringbuffer * ring,struct fd_bo * bo)201 static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
202 {
203 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
204 	uint32_t idx;
205 
206 	idx = APPEND(&msm_ring->submit, bos);
207 	idx = APPEND(msm_ring, bos);
208 
209 	msm_ring->submit.bos[idx].flags = 0;
210 	msm_ring->submit.bos[idx].handle = bo->handle;
211 	msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
212 
213 	msm_ring->bos[idx] = fd_bo_ref(bo);
214 
215 	return idx;
216 }
217 
218 /* add (if needed) bo, return idx: */
bo2idx(struct fd_ringbuffer * ring,struct fd_bo * bo,uint32_t flags)219 static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
220 {
221 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
222 	struct msm_bo *msm_bo = to_msm_bo(bo);
223 	uint32_t idx;
224 	pthread_mutex_lock(&idx_lock);
225 	if (msm_bo->current_ring_seqno == msm_ring->seqno) {
226 		idx = msm_bo->idx;
227 	} else {
228 		void *val;
229 
230 		if (!msm_ring->bo_table)
231 			msm_ring->bo_table = drmHashCreate();
232 
233 		if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
234 			/* found */
235 			idx = (uint32_t)(uintptr_t)val;
236 		} else {
237 			idx = append_bo(ring, bo);
238 			val = (void *)(uintptr_t)idx;
239 			drmHashInsert(msm_ring->bo_table, bo->handle, val);
240 		}
241 		msm_bo->current_ring_seqno = msm_ring->seqno;
242 		msm_bo->idx = idx;
243 	}
244 	pthread_mutex_unlock(&idx_lock);
245 	if (flags & FD_RELOC_READ)
246 		msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
247 	if (flags & FD_RELOC_WRITE)
248 		msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
249 	return idx;
250 }
251 
252 /* Ensure that submit has corresponding entry in cmds table for the
253  * target cmdstream buffer:
254  *
255  * Returns TRUE if new cmd added (else FALSE if it was already in
256  * the cmds table)
257  */
get_cmd(struct fd_ringbuffer * ring,struct msm_cmd * target_cmd,uint32_t submit_offset,uint32_t size,uint32_t type)258 static int get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
259 		uint32_t submit_offset, uint32_t size, uint32_t type)
260 {
261 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
262 	struct drm_msm_gem_submit_cmd *cmd;
263 	uint32_t i;
264 	void *val;
265 
266 	if (!msm_ring->cmd_table)
267 		msm_ring->cmd_table = drmHashCreate();
268 
269 	/* figure out if we already have a cmd buf.. short-circuit hash
270 	 * lookup if:
271 	 *  - target cmd has never been added to submit.cmds
272 	 *  - target cmd is not a streaming stateobj (which unlike longer
273 	 *    lived CSO stateobj, is not expected to be reused with multiple
274 	 *    submits)
275 	 */
276 	if (target_cmd->is_appended_to_submit &&
277 			!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING) &&
278 			!drmHashLookup(msm_ring->cmd_table, (unsigned long)target_cmd, &val)) {
279 		i = VOID2U64(val);
280 		cmd = &msm_ring->submit.cmds[i];
281 
282 		assert(cmd->submit_offset == submit_offset);
283 		assert(cmd->size == size);
284 		assert(cmd->type == type);
285 		assert(msm_ring->submit.bos[cmd->submit_idx].handle ==
286 				target_cmd->ring_bo->handle);
287 
288 		return FALSE;
289 	}
290 
291 	/* create cmd buf if not: */
292 	i = APPEND(&msm_ring->submit, cmds);
293 	APPEND(msm_ring, cmds);
294 	msm_ring->cmds[i] = target_cmd;
295 	cmd = &msm_ring->submit.cmds[i];
296 	cmd->type = type;
297 	cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
298 	cmd->submit_offset = submit_offset;
299 	cmd->size = size;
300 	cmd->pad = 0;
301 
302 	target_cmd->is_appended_to_submit = TRUE;
303 
304 	if (!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING)) {
305 		drmHashInsert(msm_ring->cmd_table, (unsigned long)target_cmd,
306 				U642VOID(i));
307 	}
308 
309 	target_cmd->size = size;
310 
311 	return TRUE;
312 }
313 
msm_ringbuffer_hostptr(struct fd_ringbuffer * ring)314 static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
315 {
316 	struct msm_cmd *cmd = current_cmd(ring);
317 	uint8_t *base = fd_bo_map(cmd->ring_bo);
318 	return base + to_msm_ringbuffer(ring)->offset;
319 }
320 
delete_cmds(struct msm_ringbuffer * msm_ring)321 static void delete_cmds(struct msm_ringbuffer *msm_ring)
322 {
323 	struct msm_cmd *cmd, *tmp;
324 
325 	LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
326 		ring_cmd_del(cmd);
327 	}
328 }
329 
flush_reset(struct fd_ringbuffer * ring)330 static void flush_reset(struct fd_ringbuffer *ring)
331 {
332 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
333 	unsigned i;
334 
335 	for (i = 0; i < msm_ring->nr_bos; i++) {
336 		struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
337 		if (!msm_bo)
338 			continue;
339 		msm_bo->current_ring_seqno = 0;
340 		fd_bo_del(&msm_bo->base);
341 	}
342 
343 	for (i = 0; i < msm_ring->nr_cmds; i++) {
344 		struct msm_cmd *msm_cmd = msm_ring->cmds[i];
345 
346 		if (msm_cmd->ring == ring)
347 			continue;
348 
349 		if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT)
350 			fd_ringbuffer_del(msm_cmd->ring);
351 	}
352 
353 	msm_ring->submit.nr_cmds = 0;
354 	msm_ring->submit.nr_bos = 0;
355 	msm_ring->nr_cmds = 0;
356 	msm_ring->nr_bos = 0;
357 
358 	if (msm_ring->bo_table) {
359 		drmHashDestroy(msm_ring->bo_table);
360 		msm_ring->bo_table = NULL;
361 	}
362 
363 	if (msm_ring->cmd_table) {
364 		drmHashDestroy(msm_ring->cmd_table);
365 		msm_ring->cmd_table = NULL;
366 	}
367 
368 	if (msm_ring->is_growable) {
369 		delete_cmds(msm_ring);
370 	} else {
371 		/* in old mode, just reset the # of relocs: */
372 		current_cmd(ring)->nr_relocs = 0;
373 	}
374 }
375 
finalize_current_cmd(struct fd_ringbuffer * ring,uint32_t * last_start)376 static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
377 {
378 	uint32_t submit_offset, size, type;
379 	struct fd_ringbuffer *parent;
380 
381 	if (ring->parent) {
382 		parent = ring->parent;
383 		type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
384 	} else {
385 		parent = ring;
386 		type = MSM_SUBMIT_CMD_BUF;
387 	}
388 
389 	submit_offset = offset_bytes(last_start, ring->start);
390 	size = offset_bytes(ring->cur, last_start);
391 
392 	get_cmd(parent, current_cmd(ring), submit_offset, size, type);
393 }
394 
dump_submit(struct msm_ringbuffer * msm_ring)395 static void dump_submit(struct msm_ringbuffer *msm_ring)
396 {
397 	uint32_t i, j;
398 
399 	for (i = 0; i < msm_ring->submit.nr_bos; i++) {
400 		struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
401 		ERROR_MSG("  bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
402 	}
403 	for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
404 		struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
405 		struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
406 		ERROR_MSG("  cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
407 				i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
408 		for (j = 0; j < cmd->nr_relocs; j++) {
409 			struct drm_msm_gem_submit_reloc *r = &relocs[j];
410 			ERROR_MSG("    reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
411 					", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
412 					r->reloc_idx, r->reloc_offset);
413 		}
414 	}
415 }
416 
417 static struct drm_msm_gem_submit_reloc *
handle_stateobj_relocs(struct fd_ringbuffer * parent,struct fd_ringbuffer * stateobj,struct drm_msm_gem_submit_reloc * orig_relocs,unsigned nr_relocs)418 handle_stateobj_relocs(struct fd_ringbuffer *parent, struct fd_ringbuffer *stateobj,
419 		struct drm_msm_gem_submit_reloc *orig_relocs, unsigned nr_relocs)
420 {
421 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(stateobj);
422 	struct drm_msm_gem_submit_reloc *relocs = malloc(nr_relocs * sizeof(*relocs));
423 	unsigned i;
424 
425 	for (i = 0; i < nr_relocs; i++) {
426 		unsigned idx = orig_relocs[i].reloc_idx;
427 		struct fd_bo *bo = msm_ring->bos[idx];
428 		unsigned flags = 0;
429 
430 		if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_READ)
431 			flags |= FD_RELOC_READ;
432 		if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_WRITE)
433 			flags |= FD_RELOC_WRITE;
434 
435 		relocs[i] = orig_relocs[i];
436 		relocs[i].reloc_idx = bo2idx(parent, bo, flags);
437 	}
438 
439 	/* stateobj rb's could have reloc's to other stateobj rb's which didn't
440 	 * get propagated to the parent rb at _emit_reloc_ring() time (because
441 	 * the parent wasn't known then), so fix that up now:
442 	 */
443 	for (i = 0; i < msm_ring->nr_cmds; i++) {
444 		struct msm_cmd *msm_cmd = msm_ring->cmds[i];
445 		struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
446 
447 		if (msm_ring->cmds[i]->ring == stateobj)
448 			continue;
449 
450 		assert(msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT);
451 
452 		if (get_cmd(parent, msm_cmd, cmd->submit_offset, cmd->size, cmd->type)) {
453 			fd_ringbuffer_ref(msm_cmd->ring);
454 		}
455 	}
456 
457 	return relocs;
458 }
459 
msm_ringbuffer_flush(struct fd_ringbuffer * ring,uint32_t * last_start,int in_fence_fd,int * out_fence_fd)460 static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
461 		int in_fence_fd, int *out_fence_fd)
462 {
463 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
464 	struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
465 	struct drm_msm_gem_submit req = {
466 			.flags = msm_pipe->pipe,
467 			.queueid = msm_pipe->queue_id,
468 	};
469 	uint32_t i;
470 	int ret;
471 
472 	assert(!ring->parent);
473 
474 	if (in_fence_fd != -1) {
475 		req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
476 		req.fence_fd = in_fence_fd;
477 	}
478 
479 	if (out_fence_fd) {
480 		req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
481 	}
482 
483 	finalize_current_cmd(ring, last_start);
484 
485 	/* for each of the cmd's fix up their reloc's: */
486 	for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
487 		struct msm_cmd *msm_cmd = msm_ring->cmds[i];
488 		struct drm_msm_gem_submit_reloc *relocs = msm_cmd->relocs;
489 		struct drm_msm_gem_submit_cmd *cmd;
490 		unsigned nr_relocs = msm_cmd->nr_relocs;
491 
492 		/* for reusable stateobjs, the reloc table has reloc_idx that
493 		 * points into it's own private bos table, rather than the global
494 		 * bos table used for the submit, so we need to add the stateobj's
495 		 * bos to the global table and construct new relocs table with
496 		 * corresponding reloc_idx
497 		 */
498 		if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
499 			relocs = handle_stateobj_relocs(ring, msm_cmd->ring,
500 					relocs, nr_relocs);
501 		}
502 
503 		cmd = &msm_ring->submit.cmds[i];
504 		cmd->relocs = VOID2U64(relocs);
505 		cmd->nr_relocs = nr_relocs;
506 	}
507 
508 	/* needs to be after get_cmd() as that could create bos/cmds table: */
509 	req.bos = VOID2U64(msm_ring->submit.bos),
510 	req.nr_bos = msm_ring->submit.nr_bos;
511 	req.cmds = VOID2U64(msm_ring->submit.cmds),
512 	req.nr_cmds = msm_ring->submit.nr_cmds;
513 
514 	DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
515 
516 	ret = drmCommandWriteRead(ring->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
517 			&req, sizeof(req));
518 	if (ret) {
519 		ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
520 		dump_submit(msm_ring);
521 	} else if (!ret) {
522 		/* update timestamp on all rings associated with submit: */
523 		for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
524 			struct msm_cmd *msm_cmd = msm_ring->cmds[i];
525 			msm_cmd->ring->last_timestamp = req.fence;
526 		}
527 
528 		if (out_fence_fd) {
529 			*out_fence_fd = req.fence_fd;
530 		}
531 	}
532 
533 	/* free dynamically constructed stateobj relocs tables: */
534 	for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
535 		struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
536 		struct msm_cmd *msm_cmd = msm_ring->cmds[i];
537 		if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
538 			free(U642VOID(cmd->relocs));
539 		}
540 	}
541 
542 	flush_reset(ring);
543 
544 	return ret;
545 }
546 
msm_ringbuffer_grow(struct fd_ringbuffer * ring,uint32_t size)547 static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
548 {
549 	assert(to_msm_ringbuffer(ring)->is_growable);
550 	finalize_current_cmd(ring, ring->last_start);
551 	ring_cmd_new(ring, size, 0);
552 }
553 
msm_ringbuffer_reset(struct fd_ringbuffer * ring)554 static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
555 {
556 	flush_reset(ring);
557 }
558 
msm_ringbuffer_emit_reloc(struct fd_ringbuffer * ring,const struct fd_reloc * r)559 static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
560 		const struct fd_reloc *r)
561 {
562 	struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
563 	struct msm_bo *msm_bo = to_msm_bo(r->bo);
564 	struct drm_msm_gem_submit_reloc *reloc;
565 	struct msm_cmd *cmd = current_cmd(ring);
566 	uint32_t idx = APPEND(cmd, relocs);
567 	uint32_t addr;
568 
569 	reloc = &cmd->relocs[idx];
570 
571 	reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
572 	reloc->reloc_offset = r->offset;
573 	reloc->or = r->or;
574 	reloc->shift = r->shift;
575 	reloc->submit_offset = offset_bytes(ring->cur, ring->start) +
576 			to_msm_ringbuffer(ring)->offset;
577 
578 	addr = msm_bo->presumed;
579 	if (reloc->shift < 0)
580 		addr >>= -reloc->shift;
581 	else
582 		addr <<= reloc->shift;
583 	(*ring->cur++) = addr | r->or;
584 
585 	if (ring->pipe->gpu_id >= 500) {
586 		struct drm_msm_gem_submit_reloc *reloc_hi;
587 
588 		/* NOTE: grab reloc_idx *before* APPEND() since that could
589 		 * realloc() meaning that 'reloc' ptr is no longer valid:
590 		 */
591 		uint32_t reloc_idx = reloc->reloc_idx;
592 
593 		idx = APPEND(cmd, relocs);
594 
595 		reloc_hi = &cmd->relocs[idx];
596 
597 		reloc_hi->reloc_idx = reloc_idx;
598 		reloc_hi->reloc_offset = r->offset;
599 		reloc_hi->or = r->orhi;
600 		reloc_hi->shift = r->shift - 32;
601 		reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start) +
602 				to_msm_ringbuffer(ring)->offset;
603 
604 		addr = msm_bo->presumed >> 32;
605 		if (reloc_hi->shift < 0)
606 			addr >>= -reloc_hi->shift;
607 		else
608 			addr <<= reloc_hi->shift;
609 		(*ring->cur++) = addr | r->orhi;
610 	}
611 }
612 
msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer * ring,struct fd_ringbuffer * target,uint32_t cmd_idx)613 static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
614 		struct fd_ringbuffer *target, uint32_t cmd_idx)
615 {
616 	struct msm_cmd *cmd = NULL;
617 	struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
618 	uint32_t idx = 0;
619 	int added_cmd = FALSE;
620 	uint32_t size;
621 	uint32_t submit_offset = msm_target->offset;
622 
623 	LIST_FOR_EACH_ENTRY(cmd, &msm_target->cmd_list, list) {
624 		if (idx == cmd_idx)
625 			break;
626 		idx++;
627 	}
628 
629 	assert(cmd && (idx == cmd_idx));
630 
631 	if (idx < (msm_target->cmd_count - 1)) {
632 		/* All but the last cmd buffer is fully "baked" (ie. already has
633 		 * done get_cmd() to add it to the cmds table).  But in this case,
634 		 * the size we get is invalid (since it is calculated from the
635 		 * last cmd buffer):
636 		 */
637 		size = cmd->size;
638 	} else {
639 		struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
640 		size = offset_bytes(target->cur, target->start);
641 		added_cmd = get_cmd(parent, cmd, submit_offset, size,
642 				MSM_SUBMIT_CMD_IB_TARGET_BUF);
643 	}
644 
645 	msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
646 		.bo = cmd->ring_bo,
647 		.flags = FD_RELOC_READ,
648 		.offset = submit_offset,
649 	});
650 
651 	/* Unlike traditional ringbuffers which are deleted as a set (after
652 	 * being flushed), mesa can't really guarantee that a stateobj isn't
653 	 * destroyed after emitted but before flush, so we must hold a ref:
654 	 */
655 	if (added_cmd && (target->flags & FD_RINGBUFFER_OBJECT)) {
656 		fd_ringbuffer_ref(target);
657 	}
658 
659 	return size;
660 }
661 
msm_ringbuffer_cmd_count(struct fd_ringbuffer * ring)662 static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
663 {
664 	return to_msm_ringbuffer(ring)->cmd_count;
665 }
666 
msm_ringbuffer_destroy(struct fd_ringbuffer * ring)667 static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
668 {
669 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
670 
671 	flush_reset(ring);
672 	delete_cmds(msm_ring);
673 
674 	free(msm_ring->submit.cmds);
675 	free(msm_ring->submit.bos);
676 	free(msm_ring->bos);
677 	free(msm_ring->cmds);
678 	free(msm_ring);
679 }
680 
681 static const struct fd_ringbuffer_funcs funcs = {
682 		.hostptr = msm_ringbuffer_hostptr,
683 		.flush = msm_ringbuffer_flush,
684 		.grow = msm_ringbuffer_grow,
685 		.reset = msm_ringbuffer_reset,
686 		.emit_reloc = msm_ringbuffer_emit_reloc,
687 		.emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
688 		.cmd_count = msm_ringbuffer_cmd_count,
689 		.destroy = msm_ringbuffer_destroy,
690 };
691 
msm_ringbuffer_new(struct fd_pipe * pipe,uint32_t size,enum fd_ringbuffer_flags flags)692 drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
693 		uint32_t size, enum fd_ringbuffer_flags flags)
694 {
695 	struct msm_ringbuffer *msm_ring;
696 	struct fd_ringbuffer *ring;
697 
698 	msm_ring = calloc(1, sizeof(*msm_ring));
699 	if (!msm_ring) {
700 		ERROR_MSG("allocation failed");
701 		return NULL;
702 	}
703 
704 	if (size == 0) {
705 		assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
706 		size = INIT_SIZE;
707 		msm_ring->is_growable = TRUE;
708 	}
709 
710 	list_inithead(&msm_ring->cmd_list);
711 	msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
712 
713 	ring = &msm_ring->base;
714 	atomic_set(&ring->refcnt, 1);
715 
716 	ring->funcs = &funcs;
717 	ring->size = size;
718 	ring->pipe = pipe;   /* needed in ring_cmd_new() */
719 
720 	ring_cmd_new(ring, size, flags);
721 
722 	return ring;
723 }
724