• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 #include <inttypes.h>
3 
4 #include "util/u_inlines.h"
5 #include "util/u_memory.h"
6 #include "util/list.h"
7 
8 #include "nouveau_winsys.h"
9 #include "nouveau_screen.h"
10 #include "nouveau_mm.h"
11 
12 /* TODO: Higher orders can waste a lot of space for npot size buffers, should
13  * add an extra cache for such buffer objects.
14  *
15  * HACK: Max order == 21 to accommodate TF2's 1.5 MiB, frequently reallocated
16  * vertex buffer (VM flush (?) decreases performance dramatically).
17  */
18 
19 #define MM_MIN_ORDER 7 /* >= 6 to not violate ARB_map_buffer_alignment */
20 #define MM_MAX_ORDER 21
21 
22 #define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
23 
24 #define MM_MIN_SIZE (1 << MM_MIN_ORDER)
25 #define MM_MAX_SIZE (1 << MM_MAX_ORDER)
26 
27 struct mm_bucket {
28    struct list_head free;
29    struct list_head used;
30    struct list_head full;
31    int num_free;
32 };
33 
34 struct nouveau_mman {
35    struct nouveau_device *dev;
36    struct mm_bucket bucket[MM_NUM_BUCKETS];
37    uint32_t domain;
38    union nouveau_bo_config config;
39    uint64_t allocated;
40 };
41 
42 struct mm_slab {
43    struct list_head head;
44    struct nouveau_bo *bo;
45    struct nouveau_mman *cache;
46    int order;
47    int count;
48    int free;
49    uint32_t bits[0];
50 };
51 
52 static int
mm_slab_alloc(struct mm_slab * slab)53 mm_slab_alloc(struct mm_slab *slab)
54 {
55    int i, n, b;
56 
57    if (slab->free == 0)
58       return -1;
59 
60    for (i = 0; i < (slab->count + 31) / 32; ++i) {
61       b = ffs(slab->bits[i]) - 1;
62       if (b >= 0) {
63          n = i * 32 + b;
64          assert(n < slab->count);
65          slab->free--;
66          slab->bits[i] &= ~(1 << b);
67          return n;
68       }
69    }
70    return -1;
71 }
72 
73 static inline void
mm_slab_free(struct mm_slab * slab,int i)74 mm_slab_free(struct mm_slab *slab, int i)
75 {
76    assert(i < slab->count);
77    slab->bits[i / 32] |= 1 << (i % 32);
78    slab->free++;
79    assert(slab->free <= slab->count);
80 }
81 
82 static inline int
mm_get_order(uint32_t size)83 mm_get_order(uint32_t size)
84 {
85    int s = __builtin_clz(size) ^ 31;
86 
87    if (size > (1 << s))
88       s += 1;
89    return s;
90 }
91 
92 static struct mm_bucket *
mm_bucket_by_order(struct nouveau_mman * cache,int order)93 mm_bucket_by_order(struct nouveau_mman *cache, int order)
94 {
95    if (order > MM_MAX_ORDER)
96       return NULL;
97    return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
98 }
99 
100 static struct mm_bucket *
mm_bucket_by_size(struct nouveau_mman * cache,unsigned size)101 mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
102 {
103    return mm_bucket_by_order(cache, mm_get_order(size));
104 }
105 
106 /* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
107 static inline uint32_t
mm_default_slab_size(unsigned chunk_order)108 mm_default_slab_size(unsigned chunk_order)
109 {
110    static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
111    {
112       12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22, 22
113    };
114 
115    assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
116 
117    return 1 << slab_order[chunk_order - MM_MIN_ORDER];
118 }
119 
120 static int
mm_slab_new(struct nouveau_mman * cache,struct mm_bucket * bucket,int chunk_order)121 mm_slab_new(struct nouveau_mman *cache, struct mm_bucket *bucket, int chunk_order)
122 {
123    struct mm_slab *slab;
124    int words, ret;
125    const uint32_t size = mm_default_slab_size(chunk_order);
126 
127    words = ((size >> chunk_order) + 31) / 32;
128    assert(words);
129 
130    slab = MALLOC(sizeof(struct mm_slab) + words * 4);
131    if (!slab)
132       return PIPE_ERROR_OUT_OF_MEMORY;
133 
134    memset(&slab->bits[0], ~0, words * 4);
135 
136    slab->bo = NULL;
137 
138    ret = nouveau_bo_new(cache->dev, cache->domain, 0, size, &cache->config,
139                         &slab->bo);
140    if (ret) {
141       FREE(slab);
142       return PIPE_ERROR_OUT_OF_MEMORY;
143    }
144 
145    list_inithead(&slab->head);
146 
147    slab->cache = cache;
148    slab->order = chunk_order;
149    slab->count = slab->free = size >> chunk_order;
150 
151    assert(bucket == mm_bucket_by_order(cache, chunk_order));
152    list_add(&slab->head, &bucket->free);
153 
154    cache->allocated += size;
155 
156    if (nouveau_mesa_debug)
157       debug_printf("MM: new slab, total memory = %"PRIu64" KiB\n",
158                    cache->allocated / 1024);
159 
160    return PIPE_OK;
161 }
162 
163 /* @return token to identify slab or NULL if we just allocated a new bo */
164 struct nouveau_mm_allocation *
nouveau_mm_allocate(struct nouveau_mman * cache,uint32_t size,struct nouveau_bo ** bo,uint32_t * offset)165 nouveau_mm_allocate(struct nouveau_mman *cache,
166                     uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
167 {
168    struct mm_bucket *bucket;
169    struct mm_slab *slab;
170    struct nouveau_mm_allocation *alloc;
171    int ret;
172 
173    bucket = mm_bucket_by_size(cache, size);
174    if (!bucket) {
175       ret = nouveau_bo_new(cache->dev, cache->domain, 0, size, &cache->config,
176                            bo);
177       if (ret)
178          debug_printf("bo_new(%x, %x): %i\n",
179                       size, cache->config.nv50.memtype, ret);
180 
181       *offset = 0;
182       return NULL;
183    }
184 
185    if (!list_is_empty(&bucket->used)) {
186       slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
187    } else {
188       if (list_is_empty(&bucket->free)) {
189          mm_slab_new(cache, bucket, MAX2(mm_get_order(size), MM_MIN_ORDER));
190       }
191       slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
192 
193       list_del(&slab->head);
194       list_add(&slab->head, &bucket->used);
195    }
196 
197    *offset = mm_slab_alloc(slab) << slab->order;
198 
199    alloc = MALLOC_STRUCT(nouveau_mm_allocation);
200    if (!alloc)
201       return NULL;
202 
203    nouveau_bo_ref(slab->bo, bo);
204 
205    if (slab->free == 0) {
206       list_del(&slab->head);
207       list_add(&slab->head, &bucket->full);
208    }
209 
210    alloc->offset = *offset;
211    alloc->priv = (void *)slab;
212 
213    return alloc;
214 }
215 
216 void
nouveau_mm_free(struct nouveau_mm_allocation * alloc)217 nouveau_mm_free(struct nouveau_mm_allocation *alloc)
218 {
219    struct mm_slab *slab = (struct mm_slab *)alloc->priv;
220    struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
221 
222    mm_slab_free(slab, alloc->offset >> slab->order);
223 
224    if (slab->free == slab->count) {
225       list_del(&slab->head);
226       list_addtail(&slab->head, &bucket->free);
227    } else
228    if (slab->free == 1) {
229       list_del(&slab->head);
230       list_addtail(&slab->head, &bucket->used);
231    }
232 
233    FREE(alloc);
234 }
235 
236 void
nouveau_mm_free_work(void * data)237 nouveau_mm_free_work(void *data)
238 {
239    nouveau_mm_free(data);
240 }
241 
242 struct nouveau_mman *
nouveau_mm_create(struct nouveau_device * dev,uint32_t domain,union nouveau_bo_config * config)243 nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
244                   union nouveau_bo_config *config)
245 {
246    struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
247    int i;
248 
249    if (!cache)
250       return NULL;
251 
252    cache->dev = dev;
253    cache->domain = domain;
254    cache->config = *config;
255    cache->allocated = 0;
256 
257    for (i = 0; i < MM_NUM_BUCKETS; ++i) {
258       list_inithead(&cache->bucket[i].free);
259       list_inithead(&cache->bucket[i].used);
260       list_inithead(&cache->bucket[i].full);
261    }
262 
263    return cache;
264 }
265 
266 static inline void
nouveau_mm_free_slabs(struct list_head * head)267 nouveau_mm_free_slabs(struct list_head *head)
268 {
269    struct mm_slab *slab, *next;
270 
271    LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
272       list_del(&slab->head);
273       nouveau_bo_ref(NULL, &slab->bo);
274       FREE(slab);
275    }
276 }
277 
278 void
nouveau_mm_destroy(struct nouveau_mman * cache)279 nouveau_mm_destroy(struct nouveau_mman *cache)
280 {
281    int i;
282 
283    if (!cache)
284       return;
285 
286    for (i = 0; i < MM_NUM_BUCKETS; ++i) {
287       if (!list_is_empty(&cache->bucket[i].used) ||
288           !list_is_empty(&cache->bucket[i].full))
289          debug_printf("WARNING: destroying GPU memory cache "
290                       "with some buffers still in use\n");
291 
292       nouveau_mm_free_slabs(&cache->bucket[i].free);
293       nouveau_mm_free_slabs(&cache->bucket[i].used);
294       nouveau_mm_free_slabs(&cache->bucket[i].full);
295    }
296 
297    FREE(cache);
298 }
299