1 /*
2 * Copyright 2018 Alyssa Rosenzweig
3 * Copyright 2019 Collabora, Ltd.
4 * SPDX-License-Identifier: MIT
5 *
6 */
7
8 #include "pool.h"
9 #include "agx_bo.h"
10 #include "agx_device.h"
11
12 /* Transient command stream pooling: command stream uploads try to simply copy
13 * into wherever we left off. If there isn't space, we allocate a new entry
14 * into the pool and copy there */
15
16 #define POOL_SLAB_SIZE (256 * 1024)
17
18 static struct agx_bo *
agx_pool_alloc_backing(struct agx_pool * pool,size_t bo_sz)19 agx_pool_alloc_backing(struct agx_pool *pool, size_t bo_sz)
20 {
21 struct agx_bo *bo =
22 agx_bo_create(pool->dev, bo_sz, 0, pool->create_flags, pool->label);
23
24 util_dynarray_append(&pool->bos, struct agx_bo *, bo);
25 pool->transient_bo = bo;
26 pool->transient_offset = 0;
27
28 return bo;
29 }
30
31 void
agx_pool_init(struct agx_pool * pool,struct agx_device * dev,const char * label,unsigned create_flags,bool prealloc)32 agx_pool_init(struct agx_pool *pool, struct agx_device *dev, const char *label,
33 unsigned create_flags, bool prealloc)
34 {
35 memset(pool, 0, sizeof(*pool));
36 pool->dev = dev;
37 pool->create_flags = create_flags;
38 pool->label = label;
39 util_dynarray_init(&pool->bos, NULL);
40
41 if (prealloc)
42 agx_pool_alloc_backing(pool, POOL_SLAB_SIZE);
43 }
44
45 void
agx_pool_cleanup(struct agx_pool * pool)46 agx_pool_cleanup(struct agx_pool *pool)
47 {
48 util_dynarray_foreach(&pool->bos, struct agx_bo *, bo) {
49 agx_bo_unreference(pool->dev, *bo);
50 }
51
52 util_dynarray_fini(&pool->bos);
53 }
54
55 struct agx_ptr
agx_pool_alloc_aligned_with_bo(struct agx_pool * pool,size_t sz,unsigned alignment,struct agx_bo ** out_bo)56 agx_pool_alloc_aligned_with_bo(struct agx_pool *pool, size_t sz,
57 unsigned alignment, struct agx_bo **out_bo)
58 {
59 assert(alignment == util_next_power_of_two(alignment));
60
61 /* Find or create a suitable BO */
62 struct agx_bo *bo = pool->transient_bo;
63 unsigned offset = ALIGN_POT(pool->transient_offset, alignment);
64
65 /* If we don't fit, allocate a new backing */
66 if (unlikely(bo == NULL || (offset + sz) >= POOL_SLAB_SIZE)) {
67 bo = agx_pool_alloc_backing(pool,
68 ALIGN_POT(MAX2(POOL_SLAB_SIZE, sz), 16384));
69 offset = 0;
70 }
71
72 pool->transient_offset = offset + sz;
73
74 struct agx_ptr ret = {
75 .cpu = agx_bo_map(bo) + offset,
76 .gpu = bo->va->addr + offset,
77 };
78
79 if (out_bo)
80 *out_bo = bo;
81
82 return ret;
83 }
84
85 uint64_t
agx_pool_upload(struct agx_pool * pool,const void * data,size_t sz)86 agx_pool_upload(struct agx_pool *pool, const void *data, size_t sz)
87 {
88 return agx_pool_upload_aligned(pool, data, sz, util_next_power_of_two(sz));
89 }
90
91 uint64_t
agx_pool_upload_aligned_with_bo(struct agx_pool * pool,const void * data,size_t sz,unsigned alignment,struct agx_bo ** bo)92 agx_pool_upload_aligned_with_bo(struct agx_pool *pool, const void *data,
93 size_t sz, unsigned alignment,
94 struct agx_bo **bo)
95 {
96 struct agx_ptr transfer =
97 agx_pool_alloc_aligned_with_bo(pool, sz, alignment, bo);
98 memcpy(transfer.cpu, data, sz);
99 return transfer.gpu;
100 }
101