1 /*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <assert.h>
28 #include <inttypes.h>
29
30 #include "util/hash_table.h"
31 #include "util/set.h"
32 #include "util/slab.h"
33
34 #include "drm/freedreno_ringbuffer.h"
35 #include "msm_priv.h"
36
37 /* The legacy implementation of submit/ringbuffer, which still does the
38 * traditional reloc and cmd tracking
39 */
40
41 #define INIT_SIZE 0x1000
42
43 struct msm_submit {
44 struct fd_submit base;
45
46 DECLARE_ARRAY(struct drm_msm_gem_submit_bo, submit_bos);
47 DECLARE_ARRAY(struct fd_bo *, bos);
48
49 /* maps fd_bo to idx in bos table: */
50 struct hash_table *bo_table;
51
52 struct slab_mempool ring_pool;
53
54 /* hash-set of associated rings: */
55 struct set *ring_set;
56
57 /* Allow for sub-allocation of stateobj ring buffers (ie. sharing
58 * the same underlying bo)..
59 *
60 * We also rely on previous stateobj having been fully constructed
61 * so we can reclaim extra space at it's end.
62 */
63 struct fd_ringbuffer *suballoc_ring;
64 };
65 FD_DEFINE_CAST(fd_submit, msm_submit);
66
67 /* for FD_RINGBUFFER_GROWABLE rb's, tracks the 'finalized' cmdstream buffers
68 * and sizes. Ie. a finalized buffer can have no more commands appended to
69 * it.
70 */
71 struct msm_cmd {
72 struct fd_bo *ring_bo;
73 unsigned size;
74 DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
75 };
76
77 static struct msm_cmd *
cmd_new(struct fd_bo * ring_bo)78 cmd_new(struct fd_bo *ring_bo)
79 {
80 struct msm_cmd *cmd = malloc(sizeof(*cmd));
81 cmd->ring_bo = fd_bo_ref(ring_bo);
82 cmd->size = 0;
83 cmd->nr_relocs = cmd->max_relocs = 0;
84 cmd->relocs = NULL;
85 return cmd;
86 }
87
88 static void
cmd_free(struct msm_cmd * cmd)89 cmd_free(struct msm_cmd *cmd)
90 {
91 fd_bo_del(cmd->ring_bo);
92 free(cmd->relocs);
93 free(cmd);
94 }
95
96 struct msm_ringbuffer {
97 struct fd_ringbuffer base;
98
99 /* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
100 unsigned offset;
101
102 union {
103 /* for _FD_RINGBUFFER_OBJECT case: */
104 struct {
105 struct fd_pipe *pipe;
106 DECLARE_ARRAY(struct fd_bo *, reloc_bos);
107 struct set *ring_set;
108 };
109 /* for other cases: */
110 struct {
111 struct fd_submit *submit;
112 DECLARE_ARRAY(struct msm_cmd *, cmds);
113 };
114 } u;
115
116 struct msm_cmd *cmd; /* current cmd */
117 struct fd_bo *ring_bo;
118 };
119 FD_DEFINE_CAST(fd_ringbuffer, msm_ringbuffer);
120
121 static void finalize_current_cmd(struct fd_ringbuffer *ring);
122 static struct fd_ringbuffer *
123 msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
124 enum fd_ringbuffer_flags flags);
125
126 /* add (if needed) bo to submit and return index: */
127 static uint32_t
append_bo(struct msm_submit * submit,struct fd_bo * bo)128 append_bo(struct msm_submit *submit, struct fd_bo *bo)
129 {
130 uint32_t idx;
131
132 /* NOTE: it is legal to use the same bo on different threads for
133 * different submits. But it is not legal to use the same submit
134 * from given threads.
135 */
136 idx = READ_ONCE(bo->idx);
137
138 if (unlikely((idx >= submit->nr_submit_bos) ||
139 (submit->submit_bos[idx].handle != bo->handle))) {
140 uint32_t hash = _mesa_hash_pointer(bo);
141 struct hash_entry *entry;
142
143 entry = _mesa_hash_table_search_pre_hashed(submit->bo_table, hash, bo);
144 if (entry) {
145 /* found */
146 idx = (uint32_t)(uintptr_t)entry->data;
147 } else {
148 idx = APPEND(
149 submit, submit_bos,
150 (struct drm_msm_gem_submit_bo){
151 .flags = bo->reloc_flags & (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE),
152 .handle = bo->handle,
153 .presumed = 0,
154 });
155 APPEND(submit, bos, fd_bo_ref(bo));
156
157 _mesa_hash_table_insert_pre_hashed(submit->bo_table, hash, bo,
158 (void *)(uintptr_t)idx);
159 }
160 bo->idx = idx;
161 }
162
163 return idx;
164 }
165
166 static void
append_ring(struct set * set,struct fd_ringbuffer * ring)167 append_ring(struct set *set, struct fd_ringbuffer *ring)
168 {
169 uint32_t hash = _mesa_hash_pointer(ring);
170
171 if (!_mesa_set_search_pre_hashed(set, hash, ring)) {
172 fd_ringbuffer_ref(ring);
173 _mesa_set_add_pre_hashed(set, hash, ring);
174 }
175 }
176
177 static void
msm_submit_suballoc_ring_bo(struct fd_submit * submit,struct msm_ringbuffer * msm_ring,uint32_t size)178 msm_submit_suballoc_ring_bo(struct fd_submit *submit,
179 struct msm_ringbuffer *msm_ring, uint32_t size)
180 {
181 struct msm_submit *msm_submit = to_msm_submit(submit);
182 unsigned suballoc_offset = 0;
183 struct fd_bo *suballoc_bo = NULL;
184
185 if (msm_submit->suballoc_ring) {
186 struct msm_ringbuffer *suballoc_ring =
187 to_msm_ringbuffer(msm_submit->suballoc_ring);
188
189 suballoc_bo = suballoc_ring->ring_bo;
190 suballoc_offset =
191 fd_ringbuffer_size(msm_submit->suballoc_ring) + suballoc_ring->offset;
192
193 suballoc_offset = align(suballoc_offset, 0x10);
194
195 if ((size + suballoc_offset) > suballoc_bo->size) {
196 suballoc_bo = NULL;
197 }
198 }
199
200 if (!suballoc_bo) {
201 // TODO possibly larger size for streaming bo?
202 msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, 0x8000);
203 msm_ring->offset = 0;
204 } else {
205 msm_ring->ring_bo = fd_bo_ref(suballoc_bo);
206 msm_ring->offset = suballoc_offset;
207 }
208
209 struct fd_ringbuffer *old_suballoc_ring = msm_submit->suballoc_ring;
210
211 msm_submit->suballoc_ring = fd_ringbuffer_ref(&msm_ring->base);
212
213 if (old_suballoc_ring)
214 fd_ringbuffer_del(old_suballoc_ring);
215 }
216
217 static struct fd_ringbuffer *
msm_submit_new_ringbuffer(struct fd_submit * submit,uint32_t size,enum fd_ringbuffer_flags flags)218 msm_submit_new_ringbuffer(struct fd_submit *submit, uint32_t size,
219 enum fd_ringbuffer_flags flags)
220 {
221 struct msm_submit *msm_submit = to_msm_submit(submit);
222 struct msm_ringbuffer *msm_ring;
223
224 msm_ring = slab_alloc_st(&msm_submit->ring_pool);
225
226 msm_ring->u.submit = submit;
227
228 /* NOTE: needs to be before _suballoc_ring_bo() since it could
229 * increment the refcnt of the current ring
230 */
231 msm_ring->base.refcnt = 1;
232
233 if (flags & FD_RINGBUFFER_STREAMING) {
234 msm_submit_suballoc_ring_bo(submit, msm_ring, size);
235 } else {
236 if (flags & FD_RINGBUFFER_GROWABLE)
237 size = INIT_SIZE;
238
239 msm_ring->offset = 0;
240 msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, size);
241 }
242
243 if (!msm_ringbuffer_init(msm_ring, size, flags))
244 return NULL;
245
246 return &msm_ring->base;
247 }
248
249 static struct drm_msm_gem_submit_reloc *
handle_stateobj_relocs(struct msm_submit * submit,struct msm_ringbuffer * ring)250 handle_stateobj_relocs(struct msm_submit *submit, struct msm_ringbuffer *ring)
251 {
252 struct msm_cmd *cmd = ring->cmd;
253 struct drm_msm_gem_submit_reloc *relocs;
254
255 relocs = malloc(cmd->nr_relocs * sizeof(*relocs));
256
257 for (unsigned i = 0; i < cmd->nr_relocs; i++) {
258 unsigned idx = cmd->relocs[i].reloc_idx;
259 struct fd_bo *bo = ring->u.reloc_bos[idx];
260
261 relocs[i] = cmd->relocs[i];
262 relocs[i].reloc_idx = append_bo(submit, bo);
263 }
264
265 return relocs;
266 }
267
268 static struct fd_fence *
msm_submit_flush(struct fd_submit * submit,int in_fence_fd,bool use_fence_fd)269 msm_submit_flush(struct fd_submit *submit, int in_fence_fd, bool use_fence_fd)
270 {
271 struct msm_submit *msm_submit = to_msm_submit(submit);
272 struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
273 struct drm_msm_gem_submit req = {
274 .flags = msm_pipe->pipe,
275 .queueid = msm_pipe->queue_id,
276 };
277 int ret;
278
279 finalize_current_cmd(submit->primary);
280 append_ring(msm_submit->ring_set, submit->primary);
281
282 unsigned nr_cmds = 0;
283 unsigned nr_objs = 0;
284
285 set_foreach (msm_submit->ring_set, entry) {
286 struct fd_ringbuffer *ring = (void *)entry->key;
287 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
288 nr_cmds += 1;
289 nr_objs += 1;
290 } else {
291 if (ring != submit->primary)
292 finalize_current_cmd(ring);
293 nr_cmds += to_msm_ringbuffer(ring)->u.nr_cmds;
294 }
295 }
296
297 void *obj_relocs[nr_objs];
298 struct drm_msm_gem_submit_cmd cmds[nr_cmds];
299 unsigned i = 0, o = 0;
300
301 set_foreach (msm_submit->ring_set, entry) {
302 struct fd_ringbuffer *ring = (void *)entry->key;
303 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
304
305 assert(i < nr_cmds);
306
307 // TODO handle relocs:
308 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
309
310 assert(o < nr_objs);
311
312 void *relocs = handle_stateobj_relocs(msm_submit, msm_ring);
313 obj_relocs[o++] = relocs;
314
315 cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
316 cmds[i].submit_idx = append_bo(msm_submit, msm_ring->ring_bo);
317 cmds[i].submit_offset = submit_offset(msm_ring->ring_bo, msm_ring->offset);
318 cmds[i].size = offset_bytes(ring->cur, ring->start);
319 cmds[i].pad = 0;
320 cmds[i].nr_relocs = msm_ring->cmd->nr_relocs;
321 cmds[i].relocs = VOID2U64(relocs);
322
323 i++;
324 } else {
325 for (unsigned j = 0; j < msm_ring->u.nr_cmds; j++) {
326 if (ring->flags & FD_RINGBUFFER_PRIMARY) {
327 cmds[i].type = MSM_SUBMIT_CMD_BUF;
328 } else {
329 cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
330 }
331 struct fd_bo *ring_bo = msm_ring->u.cmds[j]->ring_bo;
332 cmds[i].submit_idx = append_bo(msm_submit, ring_bo);
333 cmds[i].submit_offset = submit_offset(ring_bo, msm_ring->offset);
334 cmds[i].size = msm_ring->u.cmds[j]->size;
335 cmds[i].pad = 0;
336 cmds[i].nr_relocs = msm_ring->u.cmds[j]->nr_relocs;
337 cmds[i].relocs = VOID2U64(msm_ring->u.cmds[j]->relocs);
338
339 i++;
340 }
341 }
342 }
343
344 struct fd_fence *out_fence = fd_fence_new(submit->pipe, use_fence_fd);
345
346 simple_mtx_lock(&fence_lock);
347 for (unsigned j = 0; j < msm_submit->nr_bos; j++) {
348 fd_bo_add_fence(msm_submit->bos[j], out_fence);
349 }
350 simple_mtx_unlock(&fence_lock);
351
352 if (in_fence_fd != -1) {
353 req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
354 req.fence_fd = in_fence_fd;
355 }
356
357 if (out_fence->use_fence_fd) {
358 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
359 }
360
361 /* needs to be after get_cmd() as that could create bos/cmds table: */
362 req.bos = VOID2U64(msm_submit->submit_bos),
363 req.nr_bos = msm_submit->nr_submit_bos;
364 req.cmds = VOID2U64(cmds), req.nr_cmds = nr_cmds;
365
366 DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
367
368 ret = drmCommandWriteRead(submit->pipe->dev->fd, DRM_MSM_GEM_SUBMIT, &req,
369 sizeof(req));
370 if (ret) {
371 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
372 fd_fence_del(out_fence);
373 out_fence = NULL;
374 msm_dump_submit(&req);
375 } else if (!ret && out_fence) {
376 out_fence->kfence = req.fence;
377 out_fence->ufence = submit->fence;
378 out_fence->fence_fd = req.fence_fd;
379 }
380
381 for (unsigned o = 0; o < nr_objs; o++)
382 free(obj_relocs[o]);
383
384 return out_fence;
385 }
386
387 static void
unref_rings(struct set_entry * entry)388 unref_rings(struct set_entry *entry)
389 {
390 struct fd_ringbuffer *ring = (void *)entry->key;
391 fd_ringbuffer_del(ring);
392 }
393
394 static void
msm_submit_destroy(struct fd_submit * submit)395 msm_submit_destroy(struct fd_submit *submit)
396 {
397 struct msm_submit *msm_submit = to_msm_submit(submit);
398
399 if (msm_submit->suballoc_ring)
400 fd_ringbuffer_del(msm_submit->suballoc_ring);
401
402 _mesa_hash_table_destroy(msm_submit->bo_table, NULL);
403 _mesa_set_destroy(msm_submit->ring_set, unref_rings);
404
405 // TODO it would be nice to have a way to assert() if all
406 // rb's haven't been free'd back to the slab, because that is
407 // an indication that we are leaking bo's
408 slab_destroy(&msm_submit->ring_pool);
409
410 for (unsigned i = 0; i < msm_submit->nr_bos; i++)
411 fd_bo_del(msm_submit->bos[i]);
412
413 free(msm_submit->submit_bos);
414 free(msm_submit->bos);
415 free(msm_submit);
416 }
417
418 static const struct fd_submit_funcs submit_funcs = {
419 .new_ringbuffer = msm_submit_new_ringbuffer,
420 .flush = msm_submit_flush,
421 .destroy = msm_submit_destroy,
422 };
423
424 struct fd_submit *
msm_submit_new(struct fd_pipe * pipe)425 msm_submit_new(struct fd_pipe *pipe)
426 {
427 struct msm_submit *msm_submit = calloc(1, sizeof(*msm_submit));
428 struct fd_submit *submit;
429
430 msm_submit->bo_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
431 _mesa_key_pointer_equal);
432 msm_submit->ring_set =
433 _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
434 // TODO tune size:
435 slab_create(&msm_submit->ring_pool, sizeof(struct msm_ringbuffer), 16);
436
437 submit = &msm_submit->base;
438 submit->funcs = &submit_funcs;
439
440 return submit;
441 }
442
443 static void
finalize_current_cmd(struct fd_ringbuffer * ring)444 finalize_current_cmd(struct fd_ringbuffer *ring)
445 {
446 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
447
448 assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
449
450 if (!msm_ring->cmd)
451 return;
452
453 assert(msm_ring->cmd->ring_bo == msm_ring->ring_bo);
454
455 msm_ring->cmd->size = offset_bytes(ring->cur, ring->start);
456 APPEND(&msm_ring->u, cmds, msm_ring->cmd);
457 msm_ring->cmd = NULL;
458 }
459
460 static void
msm_ringbuffer_grow(struct fd_ringbuffer * ring,uint32_t size)461 msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
462 {
463 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
464 struct fd_pipe *pipe = msm_ring->u.submit->pipe;
465
466 assert(ring->flags & FD_RINGBUFFER_GROWABLE);
467
468 finalize_current_cmd(ring);
469
470 fd_bo_del(msm_ring->ring_bo);
471 msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
472 msm_ring->cmd = cmd_new(msm_ring->ring_bo);
473
474 ring->start = fd_bo_map(msm_ring->ring_bo);
475 ring->end = &(ring->start[size / 4]);
476 ring->cur = ring->start;
477 ring->size = size;
478 }
479
480 static void
msm_ringbuffer_emit_reloc(struct fd_ringbuffer * ring,const struct fd_reloc * reloc)481 msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
482 const struct fd_reloc *reloc)
483 {
484 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
485 struct fd_pipe *pipe;
486 unsigned reloc_idx;
487
488 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
489 unsigned idx = APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(reloc->bo));
490
491 /* this gets fixed up at submit->flush() time, since this state-
492 * object rb can be used with many different submits
493 */
494 reloc_idx = idx;
495
496 pipe = msm_ring->u.pipe;
497 } else {
498 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
499
500 reloc_idx = append_bo(msm_submit, reloc->bo);
501
502 pipe = msm_ring->u.submit->pipe;
503 }
504
505 APPEND(msm_ring->cmd, relocs,
506 (struct drm_msm_gem_submit_reloc){
507 .reloc_idx = reloc_idx,
508 .reloc_offset = reloc->offset,
509 .or = reloc->orval,
510 .shift = reloc->shift,
511 .submit_offset =
512 offset_bytes(ring->cur, ring->start) + msm_ring->offset,
513 });
514
515 ring->cur++;
516
517 if (pipe->is_64bit) {
518 APPEND(msm_ring->cmd, relocs,
519 (struct drm_msm_gem_submit_reloc){
520 .reloc_idx = reloc_idx,
521 .reloc_offset = reloc->offset,
522 .or = reloc->orval >> 32,
523 .shift = reloc->shift - 32,
524 .submit_offset =
525 offset_bytes(ring->cur, ring->start) + msm_ring->offset,
526 });
527
528 ring->cur++;
529 }
530 }
531
532 static void
append_stateobj_rings(struct msm_submit * submit,struct fd_ringbuffer * target)533 append_stateobj_rings(struct msm_submit *submit, struct fd_ringbuffer *target)
534 {
535 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
536
537 assert(target->flags & _FD_RINGBUFFER_OBJECT);
538
539 set_foreach (msm_target->u.ring_set, entry) {
540 struct fd_ringbuffer *ring = (void *)entry->key;
541
542 append_ring(submit->ring_set, ring);
543
544 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
545 append_stateobj_rings(submit, ring);
546 }
547 }
548 }
549
550 static uint32_t
msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer * ring,struct fd_ringbuffer * target,uint32_t cmd_idx)551 msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
552 struct fd_ringbuffer *target, uint32_t cmd_idx)
553 {
554 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
555 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
556 struct fd_bo *bo;
557 uint32_t size;
558
559 if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
560 (cmd_idx < msm_target->u.nr_cmds)) {
561 bo = msm_target->u.cmds[cmd_idx]->ring_bo;
562 size = msm_target->u.cmds[cmd_idx]->size;
563 } else {
564 bo = msm_target->ring_bo;
565 size = offset_bytes(target->cur, target->start);
566 }
567
568 msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
569 .bo = bo,
570 .iova = bo->iova + msm_target->offset,
571 .offset = msm_target->offset,
572 });
573
574 if (!size)
575 return 0;
576
577 if ((target->flags & _FD_RINGBUFFER_OBJECT) &&
578 !(ring->flags & _FD_RINGBUFFER_OBJECT)) {
579 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
580
581 append_stateobj_rings(msm_submit, target);
582 }
583
584 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
585 append_ring(msm_ring->u.ring_set, target);
586 } else {
587 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
588 append_ring(msm_submit->ring_set, target);
589 }
590
591 return size;
592 }
593
594 static uint32_t
msm_ringbuffer_cmd_count(struct fd_ringbuffer * ring)595 msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
596 {
597 if (ring->flags & FD_RINGBUFFER_GROWABLE)
598 return to_msm_ringbuffer(ring)->u.nr_cmds + 1;
599 return 1;
600 }
601
602 static bool
msm_ringbuffer_check_size(struct fd_ringbuffer * ring)603 msm_ringbuffer_check_size(struct fd_ringbuffer *ring)
604 {
605 assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
606 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
607 struct fd_submit *submit = msm_ring->u.submit;
608 struct fd_pipe *pipe = submit->pipe;
609
610 if ((fd_device_version(pipe->dev) < FD_VERSION_UNLIMITED_CMDS) &&
611 ((ring->cur - ring->start) > (ring->size / 4 - 0x1000))) {
612 return false;
613 }
614
615 if (to_msm_submit(submit)->nr_bos > MAX_ARRAY_SIZE/2) {
616 return false;
617 }
618
619 return true;
620 }
621
622 static void
msm_ringbuffer_destroy(struct fd_ringbuffer * ring)623 msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
624 {
625 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
626
627 fd_bo_del(msm_ring->ring_bo);
628 if (msm_ring->cmd)
629 cmd_free(msm_ring->cmd);
630
631 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
632 for (unsigned i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
633 fd_bo_del(msm_ring->u.reloc_bos[i]);
634 }
635
636 _mesa_set_destroy(msm_ring->u.ring_set, unref_rings);
637
638 free(msm_ring->u.reloc_bos);
639 free(msm_ring);
640 } else {
641 struct fd_submit *submit = msm_ring->u.submit;
642
643 for (unsigned i = 0; i < msm_ring->u.nr_cmds; i++) {
644 cmd_free(msm_ring->u.cmds[i]);
645 }
646
647 free(msm_ring->u.cmds);
648 slab_free_st(&to_msm_submit(submit)->ring_pool, msm_ring);
649 }
650 }
651
652 static const struct fd_ringbuffer_funcs ring_funcs = {
653 .grow = msm_ringbuffer_grow,
654 .emit_reloc = msm_ringbuffer_emit_reloc,
655 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
656 .cmd_count = msm_ringbuffer_cmd_count,
657 .check_size = msm_ringbuffer_check_size,
658 .destroy = msm_ringbuffer_destroy,
659 };
660
661 static inline struct fd_ringbuffer *
msm_ringbuffer_init(struct msm_ringbuffer * msm_ring,uint32_t size,enum fd_ringbuffer_flags flags)662 msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
663 enum fd_ringbuffer_flags flags)
664 {
665 struct fd_ringbuffer *ring = &msm_ring->base;
666
667 assert(msm_ring->ring_bo);
668
669 uint8_t *base = fd_bo_map(msm_ring->ring_bo);
670 ring->start = (void *)(base + msm_ring->offset);
671 ring->end = &(ring->start[size / 4]);
672 ring->cur = ring->start;
673
674 ring->size = size;
675 ring->flags = flags;
676
677 ring->funcs = &ring_funcs;
678
679 msm_ring->u.cmds = NULL;
680 msm_ring->u.nr_cmds = msm_ring->u.max_cmds = 0;
681
682 msm_ring->cmd = cmd_new(msm_ring->ring_bo);
683
684 return ring;
685 }
686
687 struct fd_ringbuffer *
msm_ringbuffer_new_object(struct fd_pipe * pipe,uint32_t size)688 msm_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
689 {
690 struct msm_ringbuffer *msm_ring = malloc(sizeof(*msm_ring));
691
692 msm_ring->u.pipe = pipe;
693 msm_ring->offset = 0;
694 msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
695 msm_ring->base.refcnt = 1;
696
697 msm_ring->u.reloc_bos = NULL;
698 msm_ring->u.nr_reloc_bos = msm_ring->u.max_reloc_bos = 0;
699
700 msm_ring->u.ring_set =
701 _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
702
703 return msm_ringbuffer_init(msm_ring, size, _FD_RINGBUFFER_OBJECT);
704 }
705