• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include <assert.h>
28 #include <inttypes.h>
29 
30 #include "util/hash_table.h"
31 #include "util/set.h"
32 #include "util/slab.h"
33 
34 #include "drm/freedreno_ringbuffer.h"
35 #include "msm_priv.h"
36 
37 /* The legacy implementation of submit/ringbuffer, which still does the
38  * traditional reloc and cmd tracking
39  */
40 
41 
42 #define INIT_SIZE 0x1000
43 
44 
45 struct msm_submit {
46 	struct fd_submit base;
47 
48 	DECLARE_ARRAY(struct drm_msm_gem_submit_bo, submit_bos);
49 	DECLARE_ARRAY(struct fd_bo *, bos);
50 
51 	/* maps fd_bo to idx in bos table: */
52 	struct hash_table *bo_table;
53 
54 	struct slab_mempool ring_pool;
55 
56 	/* hash-set of associated rings: */
57 	struct set *ring_set;
58 
59 	struct fd_ringbuffer *primary;
60 
61 	/* Allow for sub-allocation of stateobj ring buffers (ie. sharing
62 	 * the same underlying bo)..
63 	 *
64 	 * We also rely on previous stateobj having been fully constructed
65 	 * so we can reclaim extra space at it's end.
66 	 */
67 	struct fd_ringbuffer *suballoc_ring;
68 };
69 FD_DEFINE_CAST(fd_submit, msm_submit);
70 
71 /* for FD_RINGBUFFER_GROWABLE rb's, tracks the 'finalized' cmdstream buffers
72  * and sizes.  Ie. a finalized buffer can have no more commands appended to
73  * it.
74  */
75 struct msm_cmd {
76 	struct fd_bo *ring_bo;
77 	unsigned size;
78 	DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
79 };
80 
81 static struct msm_cmd *
cmd_new(struct fd_bo * ring_bo)82 cmd_new(struct fd_bo *ring_bo)
83 {
84 	struct msm_cmd *cmd = malloc(sizeof(*cmd));
85 	cmd->ring_bo = fd_bo_ref(ring_bo);
86 	cmd->size = 0;
87 	cmd->nr_relocs = cmd->max_relocs = 0;
88 	cmd->relocs = NULL;
89 	return cmd;
90 }
91 
92 static void
cmd_free(struct msm_cmd * cmd)93 cmd_free(struct msm_cmd *cmd)
94 {
95 	fd_bo_del(cmd->ring_bo);
96 	free(cmd->relocs);
97 	free(cmd);
98 }
99 
100 struct msm_ringbuffer {
101 	struct fd_ringbuffer base;
102 
103 	/* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
104 	unsigned offset;
105 
106 	union {
107 		/* for _FD_RINGBUFFER_OBJECT case: */
108 		struct {
109 			struct fd_pipe *pipe;
110 			DECLARE_ARRAY(struct fd_bo *, reloc_bos);
111 			struct set *ring_set;
112 		};
113 		/* for other cases: */
114 		struct {
115 			struct fd_submit *submit;
116 			DECLARE_ARRAY(struct msm_cmd *, cmds);
117 		};
118 	} u;
119 
120 	struct msm_cmd *cmd;          /* current cmd */
121 	struct fd_bo *ring_bo;
122 };
123 FD_DEFINE_CAST(fd_ringbuffer, msm_ringbuffer);
124 
125 static void finalize_current_cmd(struct fd_ringbuffer *ring);
126 static struct fd_ringbuffer * msm_ringbuffer_init(
127 		struct msm_ringbuffer *msm_ring,
128 		uint32_t size, enum fd_ringbuffer_flags flags);
129 
130 /* add (if needed) bo to submit and return index: */
131 static uint32_t
append_bo(struct msm_submit * submit,struct fd_bo * bo)132 append_bo(struct msm_submit *submit, struct fd_bo *bo)
133 {
134 	struct msm_bo *msm_bo = to_msm_bo(bo);
135 	uint32_t idx;
136 
137 	/* NOTE: it is legal to use the same bo on different threads for
138 	 * different submits.  But it is not legal to use the same submit
139 	 * from given threads.
140 	 */
141 	idx = READ_ONCE(msm_bo->idx);
142 
143 	if (unlikely((idx >= submit->nr_submit_bos) ||
144 			(submit->submit_bos[idx].handle != bo->handle))) {
145 		uint32_t hash = _mesa_hash_pointer(bo);
146 		struct hash_entry *entry;
147 
148 		entry = _mesa_hash_table_search_pre_hashed(submit->bo_table, hash, bo);
149 		if (entry) {
150 			/* found */
151 			idx = (uint32_t)(uintptr_t)entry->data;
152 		} else {
153 			idx = APPEND(submit, submit_bos);
154 			idx = APPEND(submit, bos);
155 
156 			submit->submit_bos[idx].flags = bo->flags &
157 					(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
158 			submit->submit_bos[idx].handle = bo->handle;
159 			submit->submit_bos[idx].presumed = 0;
160 
161 			submit->bos[idx] = fd_bo_ref(bo);
162 
163 			_mesa_hash_table_insert_pre_hashed(submit->bo_table, hash, bo,
164 					(void *)(uintptr_t)idx);
165 		}
166 		msm_bo->idx = idx;
167 	}
168 
169 	return idx;
170 }
171 
172 static void
append_ring(struct set * set,struct fd_ringbuffer * ring)173 append_ring(struct set *set, struct fd_ringbuffer *ring)
174 {
175 	uint32_t hash = _mesa_hash_pointer(ring);
176 
177 	if (!_mesa_set_search_pre_hashed(set, hash, ring)) {
178 		fd_ringbuffer_ref(ring);
179 		_mesa_set_add_pre_hashed(set, hash, ring);
180 	}
181 }
182 
183 static void
msm_submit_suballoc_ring_bo(struct fd_submit * submit,struct msm_ringbuffer * msm_ring,uint32_t size)184 msm_submit_suballoc_ring_bo(struct fd_submit *submit,
185 		struct msm_ringbuffer *msm_ring, uint32_t size)
186 {
187 	struct msm_submit *msm_submit = to_msm_submit(submit);
188 	unsigned suballoc_offset = 0;
189 	struct fd_bo *suballoc_bo = NULL;
190 
191 	if (msm_submit->suballoc_ring) {
192 		struct msm_ringbuffer *suballoc_ring =
193 				to_msm_ringbuffer(msm_submit->suballoc_ring);
194 
195 		suballoc_bo = suballoc_ring->ring_bo;
196 		suballoc_offset = fd_ringbuffer_size(msm_submit->suballoc_ring) +
197 				suballoc_ring->offset;
198 
199 		suballoc_offset = align(suballoc_offset, 0x10);
200 
201 		if ((size + suballoc_offset) > suballoc_bo->size) {
202 			suballoc_bo = NULL;
203 		}
204 	}
205 
206 	if (!suballoc_bo) {
207 		// TODO possibly larger size for streaming bo?
208 		msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, 0x8000);
209 		msm_ring->offset = 0;
210 	} else {
211 		msm_ring->ring_bo = fd_bo_ref(suballoc_bo);
212 		msm_ring->offset = suballoc_offset;
213 	}
214 
215 	struct fd_ringbuffer *old_suballoc_ring = msm_submit->suballoc_ring;
216 
217 	msm_submit->suballoc_ring = fd_ringbuffer_ref(&msm_ring->base);
218 
219 	if (old_suballoc_ring)
220 		fd_ringbuffer_del(old_suballoc_ring);
221 }
222 
223 static struct fd_ringbuffer *
msm_submit_new_ringbuffer(struct fd_submit * submit,uint32_t size,enum fd_ringbuffer_flags flags)224 msm_submit_new_ringbuffer(struct fd_submit *submit, uint32_t size,
225 		enum fd_ringbuffer_flags flags)
226 {
227 	struct msm_submit *msm_submit = to_msm_submit(submit);
228 	struct msm_ringbuffer *msm_ring;
229 
230 	msm_ring = slab_alloc_st(&msm_submit->ring_pool);
231 
232 	msm_ring->u.submit = submit;
233 
234 	/* NOTE: needs to be before _suballoc_ring_bo() since it could
235 	 * increment the refcnt of the current ring
236 	 */
237 	msm_ring->base.refcnt = 1;
238 
239 	if (flags & FD_RINGBUFFER_STREAMING) {
240 		msm_submit_suballoc_ring_bo(submit, msm_ring, size);
241 	} else {
242 		if (flags & FD_RINGBUFFER_GROWABLE)
243 			size = INIT_SIZE;
244 
245 		msm_ring->offset = 0;
246 		msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, size);
247 	}
248 
249 	if (!msm_ringbuffer_init(msm_ring, size, flags))
250 		return NULL;
251 
252 	if (flags & FD_RINGBUFFER_PRIMARY) {
253 		debug_assert(!msm_submit->primary);
254 		msm_submit->primary = fd_ringbuffer_ref(&msm_ring->base);
255 	}
256 
257 	return &msm_ring->base;
258 }
259 
260 static struct drm_msm_gem_submit_reloc *
handle_stateobj_relocs(struct msm_submit * submit,struct msm_ringbuffer * ring)261 handle_stateobj_relocs(struct msm_submit *submit, struct msm_ringbuffer *ring)
262 {
263 	struct msm_cmd *cmd = ring->cmd;
264 	struct drm_msm_gem_submit_reloc *relocs;
265 
266 	relocs = malloc(cmd->nr_relocs * sizeof(*relocs));
267 
268 	for (unsigned i = 0; i < cmd->nr_relocs; i++) {
269 		unsigned idx = cmd->relocs[i].reloc_idx;
270 		struct fd_bo *bo = ring->u.reloc_bos[idx];
271 
272 		relocs[i] = cmd->relocs[i];
273 		relocs[i].reloc_idx = append_bo(submit, bo);
274 	}
275 
276 	return relocs;
277 }
278 
279 static int
msm_submit_flush(struct fd_submit * submit,int in_fence_fd,int * out_fence_fd,uint32_t * out_fence)280 msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
281 		int *out_fence_fd, uint32_t *out_fence)
282 {
283 	struct msm_submit *msm_submit = to_msm_submit(submit);
284 	struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
285 	struct drm_msm_gem_submit req = {
286 			.flags = msm_pipe->pipe,
287 			.queueid = msm_pipe->queue_id,
288 	};
289 	int ret;
290 
291 	debug_assert(msm_submit->primary);
292 
293 	finalize_current_cmd(msm_submit->primary);
294 	append_ring(msm_submit->ring_set, msm_submit->primary);
295 
296 	unsigned nr_cmds = 0;
297 	unsigned nr_objs = 0;
298 
299 	set_foreach(msm_submit->ring_set, entry) {
300 		struct fd_ringbuffer *ring = (void *)entry->key;
301 		if (ring->flags & _FD_RINGBUFFER_OBJECT) {
302 			nr_cmds += 1;
303 			nr_objs += 1;
304 		} else {
305 			if (ring != msm_submit->primary)
306 				finalize_current_cmd(ring);
307 			nr_cmds += to_msm_ringbuffer(ring)->u.nr_cmds;
308 		}
309 	}
310 
311 	void *obj_relocs[nr_objs];
312 	struct drm_msm_gem_submit_cmd cmds[nr_cmds];
313 	unsigned i = 0, o = 0;
314 
315 	set_foreach(msm_submit->ring_set, entry) {
316 		struct fd_ringbuffer *ring = (void *)entry->key;
317 		struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
318 
319 		debug_assert(i < nr_cmds);
320 
321 		// TODO handle relocs:
322 		if (ring->flags & _FD_RINGBUFFER_OBJECT) {
323 
324 			debug_assert(o < nr_objs);
325 
326 			void *relocs = handle_stateobj_relocs(msm_submit, msm_ring);
327 			obj_relocs[o++] = relocs;
328 
329 			cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
330 			cmds[i].submit_idx =
331 				append_bo(msm_submit, msm_ring->ring_bo);
332 			cmds[i].submit_offset = msm_ring->offset;
333 			cmds[i].size = offset_bytes(ring->cur, ring->start);
334 			cmds[i].pad = 0;
335 			cmds[i].nr_relocs = msm_ring->cmd->nr_relocs;
336 			cmds[i].relocs = VOID2U64(relocs);
337 
338 			i++;
339 		} else {
340 			for (unsigned j = 0; j < msm_ring->u.nr_cmds; j++) {
341 				if (ring->flags & FD_RINGBUFFER_PRIMARY) {
342 					cmds[i].type = MSM_SUBMIT_CMD_BUF;
343 				} else {
344 					cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
345 				}
346 				cmds[i].submit_idx = append_bo(msm_submit,
347 						msm_ring->u.cmds[j]->ring_bo);
348 				cmds[i].submit_offset = msm_ring->offset;
349 				cmds[i].size = msm_ring->u.cmds[j]->size;
350 				cmds[i].pad = 0;
351 				cmds[i].nr_relocs = msm_ring->u.cmds[j]->nr_relocs;
352 				cmds[i].relocs = VOID2U64(msm_ring->u.cmds[j]->relocs);
353 
354 				i++;
355 			}
356 		}
357 	}
358 
359 	if (in_fence_fd != -1) {
360 		req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
361 		req.fence_fd = in_fence_fd;
362 	}
363 
364 	if (out_fence_fd) {
365 		req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
366 	}
367 
368 	/* needs to be after get_cmd() as that could create bos/cmds table: */
369 	req.bos = VOID2U64(msm_submit->submit_bos),
370 	req.nr_bos = msm_submit->nr_submit_bos;
371 	req.cmds = VOID2U64(cmds),
372 	req.nr_cmds = nr_cmds;
373 
374 	DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
375 
376 	ret = drmCommandWriteRead(submit->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
377 			&req, sizeof(req));
378 	if (ret) {
379 		ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
380 		msm_dump_submit(&req);
381 	} else if (!ret) {
382 		if (out_fence)
383 			*out_fence = req.fence;
384 
385 		if (out_fence_fd)
386 			*out_fence_fd = req.fence_fd;
387 	}
388 
389 	for (unsigned o = 0; o < nr_objs; o++)
390 		free(obj_relocs[o]);
391 
392 	return ret;
393 }
394 
395 static void
unref_rings(struct set_entry * entry)396 unref_rings(struct set_entry *entry)
397 {
398 	struct fd_ringbuffer *ring = (void *)entry->key;
399 	fd_ringbuffer_del(ring);
400 }
401 
402 static void
msm_submit_destroy(struct fd_submit * submit)403 msm_submit_destroy(struct fd_submit *submit)
404 {
405 	struct msm_submit *msm_submit = to_msm_submit(submit);
406 
407 	if (msm_submit->primary)
408 		fd_ringbuffer_del(msm_submit->primary);
409 	if (msm_submit->suballoc_ring)
410 		fd_ringbuffer_del(msm_submit->suballoc_ring);
411 
412 	_mesa_hash_table_destroy(msm_submit->bo_table, NULL);
413 	_mesa_set_destroy(msm_submit->ring_set, unref_rings);
414 
415 	// TODO it would be nice to have a way to debug_assert() if all
416 	// rb's haven't been free'd back to the slab, because that is
417 	// an indication that we are leaking bo's
418 	slab_destroy(&msm_submit->ring_pool);
419 
420 	for (unsigned i = 0; i < msm_submit->nr_bos; i++)
421 		fd_bo_del(msm_submit->bos[i]);
422 
423 	free(msm_submit->submit_bos);
424 	free(msm_submit->bos);
425 	free(msm_submit);
426 }
427 
428 static const struct fd_submit_funcs submit_funcs = {
429 		.new_ringbuffer = msm_submit_new_ringbuffer,
430 		.flush = msm_submit_flush,
431 		.destroy = msm_submit_destroy,
432 };
433 
434 struct fd_submit *
msm_submit_new(struct fd_pipe * pipe)435 msm_submit_new(struct fd_pipe *pipe)
436 {
437 	struct msm_submit *msm_submit = calloc(1, sizeof(*msm_submit));
438 	struct fd_submit *submit;
439 
440 	msm_submit->bo_table = _mesa_hash_table_create(NULL,
441 			_mesa_hash_pointer, _mesa_key_pointer_equal);
442 	msm_submit->ring_set = _mesa_set_create(NULL,
443 			_mesa_hash_pointer, _mesa_key_pointer_equal);
444 	// TODO tune size:
445 	slab_create(&msm_submit->ring_pool, sizeof(struct msm_ringbuffer), 16);
446 
447 	submit = &msm_submit->base;
448 	submit->pipe = pipe;
449 	submit->funcs = &submit_funcs;
450 
451 	return submit;
452 }
453 
454 
455 static void
finalize_current_cmd(struct fd_ringbuffer * ring)456 finalize_current_cmd(struct fd_ringbuffer *ring)
457 {
458 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
459 
460 	debug_assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
461 
462 	if (!msm_ring->cmd)
463 		return;
464 
465 	debug_assert(msm_ring->cmd->ring_bo == msm_ring->ring_bo);
466 
467 	unsigned idx = APPEND(&msm_ring->u, cmds);
468 
469 	msm_ring->u.cmds[idx] = msm_ring->cmd;
470 	msm_ring->cmd = NULL;
471 
472 	msm_ring->u.cmds[idx]->size = offset_bytes(ring->cur, ring->start);
473 }
474 
475 static void
msm_ringbuffer_grow(struct fd_ringbuffer * ring,uint32_t size)476 msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
477 {
478 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
479 	struct fd_pipe *pipe = msm_ring->u.submit->pipe;
480 
481 	debug_assert(ring->flags & FD_RINGBUFFER_GROWABLE);
482 
483 	finalize_current_cmd(ring);
484 
485 	fd_bo_del(msm_ring->ring_bo);
486 	msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
487 	msm_ring->cmd = cmd_new(msm_ring->ring_bo);
488 
489 	ring->start = fd_bo_map(msm_ring->ring_bo);
490 	ring->end = &(ring->start[size/4]);
491 	ring->cur = ring->start;
492 	ring->size = size;
493 }
494 
495 static void
msm_ringbuffer_emit_reloc(struct fd_ringbuffer * ring,const struct fd_reloc * reloc)496 msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
497 		const struct fd_reloc *reloc)
498 {
499 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
500 	struct fd_pipe *pipe;
501 	unsigned reloc_idx;
502 
503 	if (ring->flags & _FD_RINGBUFFER_OBJECT) {
504 		unsigned idx = APPEND(&msm_ring->u, reloc_bos);
505 
506 		msm_ring->u.reloc_bos[idx] = fd_bo_ref(reloc->bo);
507 
508 		/* this gets fixed up at submit->flush() time, since this state-
509 		 * object rb can be used with many different submits
510 		 */
511 		reloc_idx = idx;
512 
513 		pipe = msm_ring->u.pipe;
514 	} else {
515 		struct msm_submit *msm_submit =
516 				to_msm_submit(msm_ring->u.submit);
517 
518 		reloc_idx = append_bo(msm_submit, reloc->bo);
519 
520 		pipe = msm_ring->u.submit->pipe;
521 	}
522 
523 	struct drm_msm_gem_submit_reloc *r;
524 	unsigned idx = APPEND(msm_ring->cmd, relocs);
525 
526 	r = &msm_ring->cmd->relocs[idx];
527 
528 	r->reloc_idx = reloc_idx;
529 	r->reloc_offset = reloc->offset;
530 	r->or = reloc->or;
531 	r->shift = reloc->shift;
532 	r->submit_offset = offset_bytes(ring->cur, ring->start) +
533 			msm_ring->offset;
534 
535 	ring->cur++;
536 
537 	if (pipe->gpu_id >= 500) {
538 		idx = APPEND(msm_ring->cmd, relocs);
539 		r = &msm_ring->cmd->relocs[idx];
540 
541 		r->reloc_idx = reloc_idx;
542 		r->reloc_offset = reloc->offset;
543 		r->or = reloc->orhi;
544 		r->shift = reloc->shift - 32;
545 		r->submit_offset = offset_bytes(ring->cur, ring->start) +
546 				msm_ring->offset;
547 
548 		ring->cur++;
549 	}
550 }
551 
552 static void
append_stateobj_rings(struct msm_submit * submit,struct fd_ringbuffer * target)553 append_stateobj_rings(struct msm_submit *submit, struct fd_ringbuffer *target)
554 {
555 	struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
556 
557 	debug_assert(target->flags & _FD_RINGBUFFER_OBJECT);
558 
559 	set_foreach(msm_target->u.ring_set, entry) {
560 		struct fd_ringbuffer *ring = (void *)entry->key;
561 
562 		append_ring(submit->ring_set, ring);
563 
564 		if (ring->flags & _FD_RINGBUFFER_OBJECT) {
565 			append_stateobj_rings(submit, ring);
566 		}
567 	}
568 }
569 
570 static uint32_t
msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer * ring,struct fd_ringbuffer * target,uint32_t cmd_idx)571 msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
572 		struct fd_ringbuffer *target, uint32_t cmd_idx)
573 {
574 	struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
575 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
576 	struct fd_bo *bo;
577 	uint32_t size;
578 
579 	if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
580 			(cmd_idx < msm_target->u.nr_cmds)) {
581 		bo   = msm_target->u.cmds[cmd_idx]->ring_bo;
582 		size = msm_target->u.cmds[cmd_idx]->size;
583 	} else {
584 		bo   = msm_target->ring_bo;
585 		size = offset_bytes(target->cur, target->start);
586 	}
587 
588 	msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
589 		.bo     = bo,
590 		.offset = msm_target->offset,
591 	});
592 
593 	if (!size)
594 		return 0;
595 
596 	if ((target->flags & _FD_RINGBUFFER_OBJECT) &&
597 			!(ring->flags & _FD_RINGBUFFER_OBJECT)) {
598 		struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
599 
600 		append_stateobj_rings(msm_submit, target);
601 	}
602 
603 	if (ring->flags & _FD_RINGBUFFER_OBJECT) {
604 		append_ring(msm_ring->u.ring_set, target);
605 	} else {
606 		struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
607 		append_ring(msm_submit->ring_set, target);
608 	}
609 
610 	return size;
611 }
612 
613 static uint32_t
msm_ringbuffer_cmd_count(struct fd_ringbuffer * ring)614 msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
615 {
616 	if (ring->flags & FD_RINGBUFFER_GROWABLE)
617 		return to_msm_ringbuffer(ring)->u.nr_cmds + 1;
618 	return 1;
619 }
620 
621 static void
msm_ringbuffer_destroy(struct fd_ringbuffer * ring)622 msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
623 {
624 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
625 
626 	fd_bo_del(msm_ring->ring_bo);
627 	if (msm_ring->cmd)
628 		cmd_free(msm_ring->cmd);
629 
630 	if (ring->flags & _FD_RINGBUFFER_OBJECT) {
631 		for (unsigned i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
632 			fd_bo_del(msm_ring->u.reloc_bos[i]);
633 		}
634 
635 		_mesa_set_destroy(msm_ring->u.ring_set, unref_rings);
636 
637 		free(msm_ring->u.reloc_bos);
638 		free(msm_ring);
639 	} else {
640 		struct fd_submit *submit = msm_ring->u.submit;
641 
642 		for (unsigned i = 0; i < msm_ring->u.nr_cmds; i++) {
643 			cmd_free(msm_ring->u.cmds[i]);
644 		}
645 
646 		free(msm_ring->u.cmds);
647 		slab_free_st(&to_msm_submit(submit)->ring_pool, msm_ring);
648 	}
649 }
650 
651 static const struct fd_ringbuffer_funcs ring_funcs = {
652 		.grow = msm_ringbuffer_grow,
653 		.emit_reloc = msm_ringbuffer_emit_reloc,
654 		.emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
655 		.cmd_count = msm_ringbuffer_cmd_count,
656 		.destroy = msm_ringbuffer_destroy,
657 };
658 
659 static inline struct fd_ringbuffer *
msm_ringbuffer_init(struct msm_ringbuffer * msm_ring,uint32_t size,enum fd_ringbuffer_flags flags)660 msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
661 		enum fd_ringbuffer_flags flags)
662 {
663 	struct fd_ringbuffer *ring = &msm_ring->base;
664 
665 	debug_assert(msm_ring->ring_bo);
666 
667 	uint8_t *base = fd_bo_map(msm_ring->ring_bo);
668 	ring->start = (void *)(base + msm_ring->offset);
669 	ring->end = &(ring->start[size/4]);
670 	ring->cur = ring->start;
671 
672 	ring->size = size;
673 	ring->flags = flags;
674 
675 	ring->funcs = &ring_funcs;
676 
677 	msm_ring->u.cmds = NULL;
678 	msm_ring->u.nr_cmds = msm_ring->u.max_cmds = 0;
679 
680 	msm_ring->cmd = cmd_new(msm_ring->ring_bo);
681 
682 	return ring;
683 }
684 
685 struct fd_ringbuffer *
msm_ringbuffer_new_object(struct fd_pipe * pipe,uint32_t size)686 msm_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
687 {
688 	struct msm_ringbuffer *msm_ring = malloc(sizeof(*msm_ring));
689 
690 	msm_ring->u.pipe = pipe;
691 	msm_ring->offset = 0;
692 	msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
693 	msm_ring->base.refcnt = 1;
694 
695 	msm_ring->u.reloc_bos = NULL;
696 	msm_ring->u.nr_reloc_bos = msm_ring->u.max_reloc_bos = 0;
697 
698 	msm_ring->u.ring_set = _mesa_set_create(NULL,
699 			_mesa_hash_pointer, _mesa_key_pointer_equal);
700 
701 	return msm_ringbuffer_init(msm_ring, size, _FD_RINGBUFFER_OBJECT);
702 }
703