• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <assert.h>
28 #include <sys/mman.h>
29 
30 #include "anv_private.h"
31 
32 #include "common/gen_aux_map.h"
33 #include "util/anon_file.h"
34 
35 #ifdef HAVE_VALGRIND
36 #define VG_NOACCESS_READ(__ptr) ({                       \
37    VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
38    __typeof(*(__ptr)) __val = *(__ptr);                  \
39    VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
40    __val;                                                \
41 })
42 #define VG_NOACCESS_WRITE(__ptr, __val) ({                  \
43    VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr)));  \
44    *(__ptr) = (__val);                                      \
45    VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));   \
46 })
47 #else
48 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
49 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
50 #endif
51 
52 #ifndef MAP_POPULATE
53 #define MAP_POPULATE 0
54 #endif
55 
56 /* Design goals:
57  *
58  *  - Lock free (except when resizing underlying bos)
59  *
60  *  - Constant time allocation with typically only one atomic
61  *
62  *  - Multiple allocation sizes without fragmentation
63  *
64  *  - Can grow while keeping addresses and offset of contents stable
65  *
66  *  - All allocations within one bo so we can point one of the
67  *    STATE_BASE_ADDRESS pointers at it.
68  *
69  * The overall design is a two-level allocator: top level is a fixed size, big
70  * block (8k) allocator, which operates out of a bo.  Allocation is done by
71  * either pulling a block from the free list or growing the used range of the
72  * bo.  Growing the range may run out of space in the bo which we then need to
73  * grow.  Growing the bo is tricky in a multi-threaded, lockless environment:
74  * we need to keep all pointers and contents in the old map valid.  GEM bos in
75  * general can't grow, but we use a trick: we create a memfd and use ftruncate
76  * to grow it as necessary.  We mmap the new size and then create a gem bo for
77  * it using the new gem userptr ioctl.  Without heavy-handed locking around
78  * our allocation fast-path, there isn't really a way to munmap the old mmap,
79  * so we just keep it around until garbage collection time.  While the block
80  * allocator is lockless for normal operations, we block other threads trying
81  * to allocate while we're growing the map.  It sholdn't happen often, and
82  * growing is fast anyway.
83  *
84  * At the next level we can use various sub-allocators.  The state pool is a
85  * pool of smaller, fixed size objects, which operates much like the block
86  * pool.  It uses a free list for freeing objects, but when it runs out of
87  * space it just allocates a new block from the block pool.  This allocator is
88  * intended for longer lived state objects such as SURFACE_STATE and most
89  * other persistent state objects in the API.  We may need to track more info
90  * with these object and a pointer back to the CPU object (eg VkImage).  In
91  * those cases we just allocate a slightly bigger object and put the extra
92  * state after the GPU state object.
93  *
94  * The state stream allocator works similar to how the i965 DRI driver streams
95  * all its state.  Even with Vulkan, we need to emit transient state (whether
96  * surface state base or dynamic state base), and for that we can just get a
97  * block and fill it up.  These cases are local to a command buffer and the
98  * sub-allocator need not be thread safe.  The streaming allocator gets a new
99  * block when it runs out of space and chains them together so they can be
100  * easily freed.
101  */
102 
103 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
104  * We use it to indicate the free list is empty. */
105 #define EMPTY UINT32_MAX
106 
107 #define PAGE_SIZE 4096
108 
109 struct anv_mmap_cleanup {
110    void *map;
111    size_t size;
112 };
113 
114 static inline uint32_t
ilog2_round_up(uint32_t value)115 ilog2_round_up(uint32_t value)
116 {
117    assert(value != 0);
118    return 32 - __builtin_clz(value - 1);
119 }
120 
121 static inline uint32_t
round_to_power_of_two(uint32_t value)122 round_to_power_of_two(uint32_t value)
123 {
124    return 1 << ilog2_round_up(value);
125 }
126 
127 struct anv_state_table_cleanup {
128    void *map;
129    size_t size;
130 };
131 
132 #define ANV_STATE_TABLE_CLEANUP_INIT ((struct anv_state_table_cleanup){0})
133 #define ANV_STATE_ENTRY_SIZE (sizeof(struct anv_free_entry))
134 
135 static VkResult
136 anv_state_table_expand_range(struct anv_state_table *table, uint32_t size);
137 
138 VkResult
anv_state_table_init(struct anv_state_table * table,struct anv_device * device,uint32_t initial_entries)139 anv_state_table_init(struct anv_state_table *table,
140                     struct anv_device *device,
141                     uint32_t initial_entries)
142 {
143    VkResult result;
144 
145    table->device = device;
146 
147    /* Just make it 2GB up-front.  The Linux kernel won't actually back it
148     * with pages until we either map and fault on one of them or we use
149     * userptr and send a chunk of it off to the GPU.
150     */
151    table->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "state table");
152    if (table->fd == -1) {
153       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
154       goto fail_fd;
155    }
156 
157    if (!u_vector_init(&table->cleanups,
158                       round_to_power_of_two(sizeof(struct anv_state_table_cleanup)),
159                       128)) {
160       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
161       goto fail_fd;
162    }
163 
164    table->state.next = 0;
165    table->state.end = 0;
166    table->size = 0;
167 
168    uint32_t initial_size = initial_entries * ANV_STATE_ENTRY_SIZE;
169    result = anv_state_table_expand_range(table, initial_size);
170    if (result != VK_SUCCESS)
171       goto fail_cleanups;
172 
173    return VK_SUCCESS;
174 
175  fail_cleanups:
176    u_vector_finish(&table->cleanups);
177  fail_fd:
178    close(table->fd);
179 
180    return result;
181 }
182 
183 static VkResult
anv_state_table_expand_range(struct anv_state_table * table,uint32_t size)184 anv_state_table_expand_range(struct anv_state_table *table, uint32_t size)
185 {
186    void *map;
187    struct anv_state_table_cleanup *cleanup;
188 
189    /* Assert that we only ever grow the pool */
190    assert(size >= table->state.end);
191 
192    /* Make sure that we don't go outside the bounds of the memfd */
193    if (size > BLOCK_POOL_MEMFD_SIZE)
194       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
195 
196    cleanup = u_vector_add(&table->cleanups);
197    if (!cleanup)
198       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
199 
200    *cleanup = ANV_STATE_TABLE_CLEANUP_INIT;
201 
202    /* Just leak the old map until we destroy the pool.  We can't munmap it
203     * without races or imposing locking on the block allocate fast path. On
204     * the whole the leaked maps adds up to less than the size of the
205     * current map.  MAP_POPULATE seems like the right thing to do, but we
206     * should try to get some numbers.
207     */
208    map = mmap(NULL, size, PROT_READ | PROT_WRITE,
209               MAP_SHARED | MAP_POPULATE, table->fd, 0);
210    if (map == MAP_FAILED) {
211       return vk_errorf(table->device, table->device,
212                        VK_ERROR_OUT_OF_HOST_MEMORY, "mmap failed: %m");
213    }
214 
215    cleanup->map = map;
216    cleanup->size = size;
217 
218    table->map = map;
219    table->size = size;
220 
221    return VK_SUCCESS;
222 }
223 
224 static VkResult
anv_state_table_grow(struct anv_state_table * table)225 anv_state_table_grow(struct anv_state_table *table)
226 {
227    VkResult result = VK_SUCCESS;
228 
229    uint32_t used = align_u32(table->state.next * ANV_STATE_ENTRY_SIZE,
230                              PAGE_SIZE);
231    uint32_t old_size = table->size;
232 
233    /* The block pool is always initialized to a nonzero size and this function
234     * is always called after initialization.
235     */
236    assert(old_size > 0);
237 
238    uint32_t required = MAX2(used, old_size);
239    if (used * 2 <= required) {
240       /* If we're in this case then this isn't the firsta allocation and we
241        * already have enough space on both sides to hold double what we
242        * have allocated.  There's nothing for us to do.
243        */
244       goto done;
245    }
246 
247    uint32_t size = old_size * 2;
248    while (size < required)
249       size *= 2;
250 
251    assert(size > table->size);
252 
253    result = anv_state_table_expand_range(table, size);
254 
255  done:
256    return result;
257 }
258 
259 void
anv_state_table_finish(struct anv_state_table * table)260 anv_state_table_finish(struct anv_state_table *table)
261 {
262    struct anv_state_table_cleanup *cleanup;
263 
264    u_vector_foreach(cleanup, &table->cleanups) {
265       if (cleanup->map)
266          munmap(cleanup->map, cleanup->size);
267    }
268 
269    u_vector_finish(&table->cleanups);
270 
271    close(table->fd);
272 }
273 
274 VkResult
anv_state_table_add(struct anv_state_table * table,uint32_t * idx,uint32_t count)275 anv_state_table_add(struct anv_state_table *table, uint32_t *idx,
276                     uint32_t count)
277 {
278    struct anv_block_state state, old, new;
279    VkResult result;
280 
281    assert(idx);
282 
283    while(1) {
284       state.u64 = __sync_fetch_and_add(&table->state.u64, count);
285       if (state.next + count <= state.end) {
286          assert(table->map);
287          struct anv_free_entry *entry = &table->map[state.next];
288          for (int i = 0; i < count; i++) {
289             entry[i].state.idx = state.next + i;
290          }
291          *idx = state.next;
292          return VK_SUCCESS;
293       } else if (state.next <= state.end) {
294          /* We allocated the first block outside the pool so we have to grow
295           * the pool.  pool_state->next acts a mutex: threads who try to
296           * allocate now will get block indexes above the current limit and
297           * hit futex_wait below.
298           */
299          new.next = state.next + count;
300          do {
301             result = anv_state_table_grow(table);
302             if (result != VK_SUCCESS)
303                return result;
304             new.end = table->size / ANV_STATE_ENTRY_SIZE;
305          } while (new.end < new.next);
306 
307          old.u64 = __sync_lock_test_and_set(&table->state.u64, new.u64);
308          if (old.next != state.next)
309             futex_wake(&table->state.end, INT_MAX);
310       } else {
311          futex_wait(&table->state.end, state.end, NULL);
312          continue;
313       }
314    }
315 }
316 
317 void
anv_free_list_push(union anv_free_list * list,struct anv_state_table * table,uint32_t first,uint32_t count)318 anv_free_list_push(union anv_free_list *list,
319                    struct anv_state_table *table,
320                    uint32_t first, uint32_t count)
321 {
322    union anv_free_list current, old, new;
323    uint32_t last = first;
324 
325    for (uint32_t i = 1; i < count; i++, last++)
326       table->map[last].next = last + 1;
327 
328    old = *list;
329    do {
330       current = old;
331       table->map[last].next = current.offset;
332       new.offset = first;
333       new.count = current.count + 1;
334       old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
335    } while (old.u64 != current.u64);
336 }
337 
338 struct anv_state *
anv_free_list_pop(union anv_free_list * list,struct anv_state_table * table)339 anv_free_list_pop(union anv_free_list *list,
340                   struct anv_state_table *table)
341 {
342    union anv_free_list current, new, old;
343 
344    current.u64 = list->u64;
345    while (current.offset != EMPTY) {
346       __sync_synchronize();
347       new.offset = table->map[current.offset].next;
348       new.count = current.count + 1;
349       old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
350       if (old.u64 == current.u64) {
351          struct anv_free_entry *entry = &table->map[current.offset];
352          return &entry->state;
353       }
354       current = old;
355    }
356 
357    return NULL;
358 }
359 
360 static VkResult
361 anv_block_pool_expand_range(struct anv_block_pool *pool,
362                             uint32_t center_bo_offset, uint32_t size);
363 
364 VkResult
anv_block_pool_init(struct anv_block_pool * pool,struct anv_device * device,uint64_t start_address,uint32_t initial_size)365 anv_block_pool_init(struct anv_block_pool *pool,
366                     struct anv_device *device,
367                     uint64_t start_address,
368                     uint32_t initial_size)
369 {
370    VkResult result;
371 
372    pool->device = device;
373    pool->use_softpin = device->physical->use_softpin;
374    pool->nbos = 0;
375    pool->size = 0;
376    pool->center_bo_offset = 0;
377    pool->start_address = gen_canonical_address(start_address);
378    pool->map = NULL;
379 
380    if (pool->use_softpin) {
381       pool->bo = NULL;
382       pool->fd = -1;
383    } else {
384       /* Just make it 2GB up-front.  The Linux kernel won't actually back it
385        * with pages until we either map and fault on one of them or we use
386        * userptr and send a chunk of it off to the GPU.
387        */
388       pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool");
389       if (pool->fd == -1)
390          return vk_error(VK_ERROR_INITIALIZATION_FAILED);
391 
392       pool->wrapper_bo = (struct anv_bo) {
393          .refcount = 1,
394          .offset = -1,
395          .is_wrapper = true,
396       };
397       pool->bo = &pool->wrapper_bo;
398    }
399 
400    if (!u_vector_init(&pool->mmap_cleanups,
401                       round_to_power_of_two(sizeof(struct anv_mmap_cleanup)),
402                       128)) {
403       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
404       goto fail_fd;
405    }
406 
407    pool->state.next = 0;
408    pool->state.end = 0;
409    pool->back_state.next = 0;
410    pool->back_state.end = 0;
411 
412    result = anv_block_pool_expand_range(pool, 0, initial_size);
413    if (result != VK_SUCCESS)
414       goto fail_mmap_cleanups;
415 
416    /* Make the entire pool available in the front of the pool.  If back
417     * allocation needs to use this space, the "ends" will be re-arranged.
418     */
419    pool->state.end = pool->size;
420 
421    return VK_SUCCESS;
422 
423  fail_mmap_cleanups:
424    u_vector_finish(&pool->mmap_cleanups);
425  fail_fd:
426    if (pool->fd >= 0)
427       close(pool->fd);
428 
429    return result;
430 }
431 
432 void
anv_block_pool_finish(struct anv_block_pool * pool)433 anv_block_pool_finish(struct anv_block_pool *pool)
434 {
435    anv_block_pool_foreach_bo(bo, pool) {
436       if (bo->map)
437          anv_gem_munmap(pool->device, bo->map, bo->size);
438       anv_gem_close(pool->device, bo->gem_handle);
439    }
440 
441    struct anv_mmap_cleanup *cleanup;
442    u_vector_foreach(cleanup, &pool->mmap_cleanups)
443       munmap(cleanup->map, cleanup->size);
444    u_vector_finish(&pool->mmap_cleanups);
445 
446    if (pool->fd >= 0)
447       close(pool->fd);
448 }
449 
450 static VkResult
anv_block_pool_expand_range(struct anv_block_pool * pool,uint32_t center_bo_offset,uint32_t size)451 anv_block_pool_expand_range(struct anv_block_pool *pool,
452                             uint32_t center_bo_offset, uint32_t size)
453 {
454    /* Assert that we only ever grow the pool */
455    assert(center_bo_offset >= pool->back_state.end);
456    assert(size - center_bo_offset >= pool->state.end);
457 
458    /* Assert that we don't go outside the bounds of the memfd */
459    assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER);
460    assert(pool->use_softpin ||
461           size - center_bo_offset <=
462           BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER);
463 
464    /* For state pool BOs we have to be a bit careful about where we place them
465     * in the GTT.  There are two documented workarounds for state base address
466     * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
467     * which state that those two base addresses do not support 48-bit
468     * addresses and need to be placed in the bottom 32-bit range.
469     * Unfortunately, this is not quite accurate.
470     *
471     * The real problem is that we always set the size of our state pools in
472     * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
473     * likely significantly smaller.  We do this because we do not no at the
474     * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
475     * the pool during command buffer building so we don't actually have a
476     * valid final size.  If the address + size, as seen by STATE_BASE_ADDRESS
477     * overflows 48 bits, the GPU appears to treat all accesses to the buffer
478     * as being out of bounds and returns zero.  For dynamic state, this
479     * usually just leads to rendering corruptions, but shaders that are all
480     * zero hang the GPU immediately.
481     *
482     * The easiest solution to do is exactly what the bogus workarounds say to
483     * do: restrict these buffers to 32-bit addresses.  We could also pin the
484     * BO to some particular location of our choosing, but that's significantly
485     * more work than just not setting a flag.  So, we explicitly DO NOT set
486     * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
487     * hard work for us.  When using softpin, we're in control and the fixed
488     * addresses we choose are fine for base addresses.
489     */
490    enum anv_bo_alloc_flags bo_alloc_flags = ANV_BO_ALLOC_CAPTURE;
491    if (!pool->use_softpin)
492       bo_alloc_flags |= ANV_BO_ALLOC_32BIT_ADDRESS;
493 
494    if (pool->use_softpin) {
495       uint32_t new_bo_size = size - pool->size;
496       struct anv_bo *new_bo;
497       assert(center_bo_offset == 0);
498       VkResult result = anv_device_alloc_bo(pool->device, new_bo_size,
499                                             bo_alloc_flags |
500                                             ANV_BO_ALLOC_FIXED_ADDRESS |
501                                             ANV_BO_ALLOC_MAPPED |
502                                             ANV_BO_ALLOC_SNOOPED,
503                                             pool->start_address + pool->size,
504                                             &new_bo);
505       if (result != VK_SUCCESS)
506          return result;
507 
508       pool->bos[pool->nbos++] = new_bo;
509 
510       /* This pointer will always point to the first BO in the list */
511       pool->bo = pool->bos[0];
512    } else {
513       /* Just leak the old map until we destroy the pool.  We can't munmap it
514        * without races or imposing locking on the block allocate fast path. On
515        * the whole the leaked maps adds up to less than the size of the
516        * current map.  MAP_POPULATE seems like the right thing to do, but we
517        * should try to get some numbers.
518        */
519       void *map = mmap(NULL, size, PROT_READ | PROT_WRITE,
520                        MAP_SHARED | MAP_POPULATE, pool->fd,
521                        BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
522       if (map == MAP_FAILED)
523          return vk_errorf(pool->device, pool->device,
524                           VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
525 
526       struct anv_bo *new_bo;
527       VkResult result = anv_device_import_bo_from_host_ptr(pool->device,
528                                                            map, size,
529                                                            bo_alloc_flags,
530                                                            0 /* client_address */,
531                                                            &new_bo);
532       if (result != VK_SUCCESS) {
533          munmap(map, size);
534          return result;
535       }
536 
537       struct anv_mmap_cleanup *cleanup = u_vector_add(&pool->mmap_cleanups);
538       if (!cleanup) {
539          munmap(map, size);
540          anv_device_release_bo(pool->device, new_bo);
541          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
542       }
543       cleanup->map = map;
544       cleanup->size = size;
545 
546       /* Now that we mapped the new memory, we can write the new
547        * center_bo_offset back into pool and update pool->map. */
548       pool->center_bo_offset = center_bo_offset;
549       pool->map = map + center_bo_offset;
550 
551       pool->bos[pool->nbos++] = new_bo;
552       pool->wrapper_bo.map = new_bo;
553    }
554 
555    assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS);
556    pool->size = size;
557 
558    return VK_SUCCESS;
559 }
560 
561 /** Returns current memory map of the block pool.
562  *
563  * The returned pointer points to the map for the memory at the specified
564  * offset. The offset parameter is relative to the "center" of the block pool
565  * rather than the start of the block pool BO map.
566  */
567 void*
anv_block_pool_map(struct anv_block_pool * pool,int32_t offset,uint32_t size)568 anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t size)
569 {
570    if (pool->use_softpin) {
571       struct anv_bo *bo = NULL;
572       int32_t bo_offset = 0;
573       anv_block_pool_foreach_bo(iter_bo, pool) {
574          if (offset < bo_offset + iter_bo->size) {
575             bo = iter_bo;
576             break;
577          }
578          bo_offset += iter_bo->size;
579       }
580       assert(bo != NULL);
581       assert(offset >= bo_offset);
582       assert((offset - bo_offset) + size <= bo->size);
583 
584       return bo->map + (offset - bo_offset);
585    } else {
586       return pool->map + offset;
587    }
588 }
589 
590 /** Grows and re-centers the block pool.
591  *
592  * We grow the block pool in one or both directions in such a way that the
593  * following conditions are met:
594  *
595  *  1) The size of the entire pool is always a power of two.
596  *
597  *  2) The pool only grows on both ends.  Neither end can get
598  *     shortened.
599  *
600  *  3) At the end of the allocation, we have about twice as much space
601  *     allocated for each end as we have used.  This way the pool doesn't
602  *     grow too far in one direction or the other.
603  *
604  *  4) If the _alloc_back() has never been called, then the back portion of
605  *     the pool retains a size of zero.  (This makes it easier for users of
606  *     the block pool that only want a one-sided pool.)
607  *
608  *  5) We have enough space allocated for at least one more block in
609  *     whichever side `state` points to.
610  *
611  *  6) The center of the pool is always aligned to both the block_size of
612  *     the pool and a 4K CPU page.
613  */
614 static uint32_t
anv_block_pool_grow(struct anv_block_pool * pool,struct anv_block_state * state,uint32_t contiguous_size)615 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state,
616                     uint32_t contiguous_size)
617 {
618    VkResult result = VK_SUCCESS;
619 
620    pthread_mutex_lock(&pool->device->mutex);
621 
622    assert(state == &pool->state || state == &pool->back_state);
623 
624    /* Gather a little usage information on the pool.  Since we may have
625     * threadsd waiting in queue to get some storage while we resize, it's
626     * actually possible that total_used will be larger than old_size.  In
627     * particular, block_pool_alloc() increments state->next prior to
628     * calling block_pool_grow, so this ensures that we get enough space for
629     * which ever side tries to grow the pool.
630     *
631     * We align to a page size because it makes it easier to do our
632     * calculations later in such a way that we state page-aigned.
633     */
634    uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE);
635    uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE);
636    uint32_t total_used = front_used + back_used;
637 
638    assert(state == &pool->state || back_used > 0);
639 
640    uint32_t old_size = pool->size;
641 
642    /* The block pool is always initialized to a nonzero size and this function
643     * is always called after initialization.
644     */
645    assert(old_size > 0);
646 
647    const uint32_t old_back = pool->center_bo_offset;
648    const uint32_t old_front = old_size - pool->center_bo_offset;
649 
650    /* The back_used and front_used may actually be smaller than the actual
651     * requirement because they are based on the next pointers which are
652     * updated prior to calling this function.
653     */
654    uint32_t back_required = MAX2(back_used, old_back);
655    uint32_t front_required = MAX2(front_used, old_front);
656 
657    if (pool->use_softpin) {
658       /* With softpin, the pool is made up of a bunch of buffers with separate
659        * maps.  Make sure we have enough contiguous space that we can get a
660        * properly contiguous map for the next chunk.
661        */
662       assert(old_back == 0);
663       front_required = MAX2(front_required, old_front + contiguous_size);
664    }
665 
666    if (back_used * 2 <= back_required && front_used * 2 <= front_required) {
667       /* If we're in this case then this isn't the firsta allocation and we
668        * already have enough space on both sides to hold double what we
669        * have allocated.  There's nothing for us to do.
670        */
671       goto done;
672    }
673 
674    uint32_t size = old_size * 2;
675    while (size < back_required + front_required)
676       size *= 2;
677 
678    assert(size > pool->size);
679 
680    /* We compute a new center_bo_offset such that, when we double the size
681     * of the pool, we maintain the ratio of how much is used by each side.
682     * This way things should remain more-or-less balanced.
683     */
684    uint32_t center_bo_offset;
685    if (back_used == 0) {
686       /* If we're in this case then we have never called alloc_back().  In
687        * this case, we want keep the offset at 0 to make things as simple
688        * as possible for users that don't care about back allocations.
689        */
690       center_bo_offset = 0;
691    } else {
692       /* Try to "center" the allocation based on how much is currently in
693        * use on each side of the center line.
694        */
695       center_bo_offset = ((uint64_t)size * back_used) / total_used;
696 
697       /* Align down to a multiple of the page size */
698       center_bo_offset &= ~(PAGE_SIZE - 1);
699 
700       assert(center_bo_offset >= back_used);
701 
702       /* Make sure we don't shrink the back end of the pool */
703       if (center_bo_offset < back_required)
704          center_bo_offset = back_required;
705 
706       /* Make sure that we don't shrink the front end of the pool */
707       if (size - center_bo_offset < front_required)
708          center_bo_offset = size - front_required;
709    }
710 
711    assert(center_bo_offset % PAGE_SIZE == 0);
712 
713    result = anv_block_pool_expand_range(pool, center_bo_offset, size);
714 
715 done:
716    pthread_mutex_unlock(&pool->device->mutex);
717 
718    if (result == VK_SUCCESS) {
719       /* Return the appropriate new size.  This function never actually
720        * updates state->next.  Instead, we let the caller do that because it
721        * needs to do so in order to maintain its concurrency model.
722        */
723       if (state == &pool->state) {
724          return pool->size - pool->center_bo_offset;
725       } else {
726          assert(pool->center_bo_offset > 0);
727          return pool->center_bo_offset;
728       }
729    } else {
730       return 0;
731    }
732 }
733 
734 static uint32_t
anv_block_pool_alloc_new(struct anv_block_pool * pool,struct anv_block_state * pool_state,uint32_t block_size,uint32_t * padding)735 anv_block_pool_alloc_new(struct anv_block_pool *pool,
736                          struct anv_block_state *pool_state,
737                          uint32_t block_size, uint32_t *padding)
738 {
739    struct anv_block_state state, old, new;
740 
741    /* Most allocations won't generate any padding */
742    if (padding)
743       *padding = 0;
744 
745    while (1) {
746       state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
747       if (state.next + block_size <= state.end) {
748          return state.next;
749       } else if (state.next <= state.end) {
750          if (pool->use_softpin && state.next < state.end) {
751             /* We need to grow the block pool, but still have some leftover
752              * space that can't be used by that particular allocation. So we
753              * add that as a "padding", and return it.
754              */
755             uint32_t leftover = state.end - state.next;
756 
757             /* If there is some leftover space in the pool, the caller must
758              * deal with it.
759              */
760             assert(leftover == 0 || padding);
761             if (padding)
762                *padding = leftover;
763             state.next += leftover;
764          }
765 
766          /* We allocated the first block outside the pool so we have to grow
767           * the pool.  pool_state->next acts a mutex: threads who try to
768           * allocate now will get block indexes above the current limit and
769           * hit futex_wait below.
770           */
771          new.next = state.next + block_size;
772          do {
773             new.end = anv_block_pool_grow(pool, pool_state, block_size);
774          } while (new.end < new.next);
775 
776          old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
777          if (old.next != state.next)
778             futex_wake(&pool_state->end, INT_MAX);
779          return state.next;
780       } else {
781          futex_wait(&pool_state->end, state.end, NULL);
782          continue;
783       }
784    }
785 }
786 
787 int32_t
anv_block_pool_alloc(struct anv_block_pool * pool,uint32_t block_size,uint32_t * padding)788 anv_block_pool_alloc(struct anv_block_pool *pool,
789                      uint32_t block_size, uint32_t *padding)
790 {
791    uint32_t offset;
792 
793    offset = anv_block_pool_alloc_new(pool, &pool->state, block_size, padding);
794 
795    return offset;
796 }
797 
798 /* Allocates a block out of the back of the block pool.
799  *
800  * This will allocated a block earlier than the "start" of the block pool.
801  * The offsets returned from this function will be negative but will still
802  * be correct relative to the block pool's map pointer.
803  *
804  * If you ever use anv_block_pool_alloc_back, then you will have to do
805  * gymnastics with the block pool's BO when doing relocations.
806  */
807 int32_t
anv_block_pool_alloc_back(struct anv_block_pool * pool,uint32_t block_size)808 anv_block_pool_alloc_back(struct anv_block_pool *pool,
809                           uint32_t block_size)
810 {
811    int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state,
812                                              block_size, NULL);
813 
814    /* The offset we get out of anv_block_pool_alloc_new() is actually the
815     * number of bytes downwards from the middle to the end of the block.
816     * We need to turn it into a (negative) offset from the middle to the
817     * start of the block.
818     */
819    assert(offset >= 0);
820    return -(offset + block_size);
821 }
822 
823 VkResult
anv_state_pool_init(struct anv_state_pool * pool,struct anv_device * device,uint64_t base_address,int32_t start_offset,uint32_t block_size)824 anv_state_pool_init(struct anv_state_pool *pool,
825                     struct anv_device *device,
826                     uint64_t base_address,
827                     int32_t start_offset,
828                     uint32_t block_size)
829 {
830    /* We don't want to ever see signed overflow */
831    assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE);
832 
833    VkResult result = anv_block_pool_init(&pool->block_pool, device,
834                                          base_address + start_offset,
835                                          block_size * 16);
836    if (result != VK_SUCCESS)
837       return result;
838 
839    pool->start_offset = start_offset;
840 
841    result = anv_state_table_init(&pool->table, device, 64);
842    if (result != VK_SUCCESS) {
843       anv_block_pool_finish(&pool->block_pool);
844       return result;
845    }
846 
847    assert(util_is_power_of_two_or_zero(block_size));
848    pool->block_size = block_size;
849    pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
850    for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
851       pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
852       pool->buckets[i].block.next = 0;
853       pool->buckets[i].block.end = 0;
854    }
855    VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
856 
857    return VK_SUCCESS;
858 }
859 
860 void
anv_state_pool_finish(struct anv_state_pool * pool)861 anv_state_pool_finish(struct anv_state_pool *pool)
862 {
863    VG(VALGRIND_DESTROY_MEMPOOL(pool));
864    anv_state_table_finish(&pool->table);
865    anv_block_pool_finish(&pool->block_pool);
866 }
867 
868 static uint32_t
anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool * pool,struct anv_block_pool * block_pool,uint32_t state_size,uint32_t block_size,uint32_t * padding)869 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
870                                     struct anv_block_pool *block_pool,
871                                     uint32_t state_size,
872                                     uint32_t block_size,
873                                     uint32_t *padding)
874 {
875    struct anv_block_state block, old, new;
876    uint32_t offset;
877 
878    /* We don't always use anv_block_pool_alloc(), which would set *padding to
879     * zero for us. So if we have a pointer to padding, we must zero it out
880     * ourselves here, to make sure we always return some sensible value.
881     */
882    if (padding)
883       *padding = 0;
884 
885    /* If our state is large, we don't need any sub-allocation from a block.
886     * Instead, we just grab whole (potentially large) blocks.
887     */
888    if (state_size >= block_size)
889       return anv_block_pool_alloc(block_pool, state_size, padding);
890 
891  restart:
892    block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
893 
894    if (block.next < block.end) {
895       return block.next;
896    } else if (block.next == block.end) {
897       offset = anv_block_pool_alloc(block_pool, block_size, padding);
898       new.next = offset + state_size;
899       new.end = offset + block_size;
900       old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
901       if (old.next != block.next)
902          futex_wake(&pool->block.end, INT_MAX);
903       return offset;
904    } else {
905       futex_wait(&pool->block.end, block.end, NULL);
906       goto restart;
907    }
908 }
909 
910 static uint32_t
anv_state_pool_get_bucket(uint32_t size)911 anv_state_pool_get_bucket(uint32_t size)
912 {
913    unsigned size_log2 = ilog2_round_up(size);
914    assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
915    if (size_log2 < ANV_MIN_STATE_SIZE_LOG2)
916       size_log2 = ANV_MIN_STATE_SIZE_LOG2;
917    return size_log2 - ANV_MIN_STATE_SIZE_LOG2;
918 }
919 
920 static uint32_t
anv_state_pool_get_bucket_size(uint32_t bucket)921 anv_state_pool_get_bucket_size(uint32_t bucket)
922 {
923    uint32_t size_log2 = bucket + ANV_MIN_STATE_SIZE_LOG2;
924    return 1 << size_log2;
925 }
926 
927 /** Helper to push a chunk into the state table.
928  *
929  * It creates 'count' entries into the state table and update their sizes,
930  * offsets and maps, also pushing them as "free" states.
931  */
932 static void
anv_state_pool_return_blocks(struct anv_state_pool * pool,uint32_t chunk_offset,uint32_t count,uint32_t block_size)933 anv_state_pool_return_blocks(struct anv_state_pool *pool,
934                              uint32_t chunk_offset, uint32_t count,
935                              uint32_t block_size)
936 {
937    /* Disallow returning 0 chunks */
938    assert(count != 0);
939 
940    /* Make sure we always return chunks aligned to the block_size */
941    assert(chunk_offset % block_size == 0);
942 
943    uint32_t st_idx;
944    UNUSED VkResult result = anv_state_table_add(&pool->table, &st_idx, count);
945    assert(result == VK_SUCCESS);
946    for (int i = 0; i < count; i++) {
947       /* update states that were added back to the state table */
948       struct anv_state *state_i = anv_state_table_get(&pool->table,
949                                                       st_idx + i);
950       state_i->alloc_size = block_size;
951       state_i->offset = pool->start_offset + chunk_offset + block_size * i;
952       state_i->map = anv_block_pool_map(&pool->block_pool,
953                                         state_i->offset,
954                                         state_i->alloc_size);
955    }
956 
957    uint32_t block_bucket = anv_state_pool_get_bucket(block_size);
958    anv_free_list_push(&pool->buckets[block_bucket].free_list,
959                       &pool->table, st_idx, count);
960 }
961 
962 /** Returns a chunk of memory back to the state pool.
963  *
964  * Do a two-level split. If chunk_size is bigger than divisor
965  * (pool->block_size), we return as many divisor sized blocks as we can, from
966  * the end of the chunk.
967  *
968  * The remaining is then split into smaller blocks (starting at small_size if
969  * it is non-zero), with larger blocks always being taken from the end of the
970  * chunk.
971  */
972 static void
anv_state_pool_return_chunk(struct anv_state_pool * pool,uint32_t chunk_offset,uint32_t chunk_size,uint32_t small_size)973 anv_state_pool_return_chunk(struct anv_state_pool *pool,
974                             uint32_t chunk_offset, uint32_t chunk_size,
975                             uint32_t small_size)
976 {
977    uint32_t divisor = pool->block_size;
978    uint32_t nblocks = chunk_size / divisor;
979    uint32_t rest = chunk_size - nblocks * divisor;
980 
981    if (nblocks > 0) {
982       /* First return divisor aligned and sized chunks. We start returning
983        * larger blocks from the end fo the chunk, since they should already be
984        * aligned to divisor. Also anv_state_pool_return_blocks() only accepts
985        * aligned chunks.
986        */
987       uint32_t offset = chunk_offset + rest;
988       anv_state_pool_return_blocks(pool, offset, nblocks, divisor);
989    }
990 
991    chunk_size = rest;
992    divisor /= 2;
993 
994    if (small_size > 0 && small_size < divisor)
995       divisor = small_size;
996 
997    uint32_t min_size = 1 << ANV_MIN_STATE_SIZE_LOG2;
998 
999    /* Just as before, return larger divisor aligned blocks from the end of the
1000     * chunk first.
1001     */
1002    while (chunk_size > 0 && divisor >= min_size) {
1003       nblocks = chunk_size / divisor;
1004       rest = chunk_size - nblocks * divisor;
1005       if (nblocks > 0) {
1006          anv_state_pool_return_blocks(pool, chunk_offset + rest,
1007                                       nblocks, divisor);
1008          chunk_size = rest;
1009       }
1010       divisor /= 2;
1011    }
1012 }
1013 
1014 static struct anv_state
anv_state_pool_alloc_no_vg(struct anv_state_pool * pool,uint32_t size,uint32_t align)1015 anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
1016                            uint32_t size, uint32_t align)
1017 {
1018    uint32_t bucket = anv_state_pool_get_bucket(MAX2(size, align));
1019 
1020    struct anv_state *state;
1021    uint32_t alloc_size = anv_state_pool_get_bucket_size(bucket);
1022    int32_t offset;
1023 
1024    /* Try free list first. */
1025    state = anv_free_list_pop(&pool->buckets[bucket].free_list,
1026                              &pool->table);
1027    if (state) {
1028       assert(state->offset >= pool->start_offset);
1029       goto done;
1030    }
1031 
1032    /* Try to grab a chunk from some larger bucket and split it up */
1033    for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
1034       state = anv_free_list_pop(&pool->buckets[b].free_list, &pool->table);
1035       if (state) {
1036          unsigned chunk_size = anv_state_pool_get_bucket_size(b);
1037          int32_t chunk_offset = state->offset;
1038 
1039          /* First lets update the state we got to its new size. offset and map
1040           * remain the same.
1041           */
1042          state->alloc_size = alloc_size;
1043 
1044          /* Now return the unused part of the chunk back to the pool as free
1045           * blocks
1046           *
1047           * There are a couple of options as to what we do with it:
1048           *
1049           *    1) We could fully split the chunk into state.alloc_size sized
1050           *       pieces.  However, this would mean that allocating a 16B
1051           *       state could potentially split a 2MB chunk into 512K smaller
1052           *       chunks.  This would lead to unnecessary fragmentation.
1053           *
1054           *    2) The classic "buddy allocator" method would have us split the
1055           *       chunk in half and return one half.  Then we would split the
1056           *       remaining half in half and return one half, and repeat as
1057           *       needed until we get down to the size we want.  However, if
1058           *       you are allocating a bunch of the same size state (which is
1059           *       the common case), this means that every other allocation has
1060           *       to go up a level and every fourth goes up two levels, etc.
1061           *       This is not nearly as efficient as it could be if we did a
1062           *       little more work up-front.
1063           *
1064           *    3) Split the difference between (1) and (2) by doing a
1065           *       two-level split.  If it's bigger than some fixed block_size,
1066           *       we split it into block_size sized chunks and return all but
1067           *       one of them.  Then we split what remains into
1068           *       state.alloc_size sized chunks and return them.
1069           *
1070           * We choose something close to option (3), which is implemented with
1071           * anv_state_pool_return_chunk(). That is done by returning the
1072           * remaining of the chunk, with alloc_size as a hint of the size that
1073           * we want the smaller chunk split into.
1074           */
1075          anv_state_pool_return_chunk(pool, chunk_offset + alloc_size,
1076                                      chunk_size - alloc_size, alloc_size);
1077          goto done;
1078       }
1079    }
1080 
1081    uint32_t padding;
1082    offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
1083                                                 &pool->block_pool,
1084                                                 alloc_size,
1085                                                 pool->block_size,
1086                                                 &padding);
1087    /* Everytime we allocate a new state, add it to the state pool */
1088    uint32_t idx;
1089    UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
1090    assert(result == VK_SUCCESS);
1091 
1092    state = anv_state_table_get(&pool->table, idx);
1093    state->offset = pool->start_offset + offset;
1094    state->alloc_size = alloc_size;
1095    state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
1096 
1097    if (padding > 0) {
1098       uint32_t return_offset = offset - padding;
1099       anv_state_pool_return_chunk(pool, return_offset, padding, 0);
1100    }
1101 
1102 done:
1103    return *state;
1104 }
1105 
1106 struct anv_state
anv_state_pool_alloc(struct anv_state_pool * pool,uint32_t size,uint32_t align)1107 anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align)
1108 {
1109    if (size == 0)
1110       return ANV_STATE_NULL;
1111 
1112    struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align);
1113    VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size));
1114    return state;
1115 }
1116 
1117 struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool * pool)1118 anv_state_pool_alloc_back(struct anv_state_pool *pool)
1119 {
1120    struct anv_state *state;
1121    uint32_t alloc_size = pool->block_size;
1122 
1123    /* This function is only used with pools where start_offset == 0 */
1124    assert(pool->start_offset == 0);
1125 
1126    state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
1127    if (state) {
1128       assert(state->offset < pool->start_offset);
1129       goto done;
1130    }
1131 
1132    int32_t offset;
1133    offset = anv_block_pool_alloc_back(&pool->block_pool,
1134                                       pool->block_size);
1135    uint32_t idx;
1136    UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
1137    assert(result == VK_SUCCESS);
1138 
1139    state = anv_state_table_get(&pool->table, idx);
1140    state->offset = pool->start_offset + offset;
1141    state->alloc_size = alloc_size;
1142    state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
1143 
1144 done:
1145    VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
1146    return *state;
1147 }
1148 
1149 static void
anv_state_pool_free_no_vg(struct anv_state_pool * pool,struct anv_state state)1150 anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
1151 {
1152    assert(util_is_power_of_two_or_zero(state.alloc_size));
1153    unsigned bucket = anv_state_pool_get_bucket(state.alloc_size);
1154 
1155    if (state.offset < pool->start_offset) {
1156       assert(state.alloc_size == pool->block_size);
1157       anv_free_list_push(&pool->back_alloc_free_list,
1158                          &pool->table, state.idx, 1);
1159    } else {
1160       anv_free_list_push(&pool->buckets[bucket].free_list,
1161                          &pool->table, state.idx, 1);
1162    }
1163 }
1164 
1165 void
anv_state_pool_free(struct anv_state_pool * pool,struct anv_state state)1166 anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
1167 {
1168    if (state.alloc_size == 0)
1169       return;
1170 
1171    VG(VALGRIND_MEMPOOL_FREE(pool, state.map));
1172    anv_state_pool_free_no_vg(pool, state);
1173 }
1174 
1175 struct anv_state_stream_block {
1176    struct anv_state block;
1177 
1178    /* The next block */
1179    struct anv_state_stream_block *next;
1180 
1181 #ifdef HAVE_VALGRIND
1182    /* A pointer to the first user-allocated thing in this block.  This is
1183     * what valgrind sees as the start of the block.
1184     */
1185    void *_vg_ptr;
1186 #endif
1187 };
1188 
1189 /* The state stream allocator is a one-shot, single threaded allocator for
1190  * variable sized blocks.  We use it for allocating dynamic state.
1191  */
1192 void
anv_state_stream_init(struct anv_state_stream * stream,struct anv_state_pool * state_pool,uint32_t block_size)1193 anv_state_stream_init(struct anv_state_stream *stream,
1194                       struct anv_state_pool *state_pool,
1195                       uint32_t block_size)
1196 {
1197    stream->state_pool = state_pool;
1198    stream->block_size = block_size;
1199 
1200    stream->block = ANV_STATE_NULL;
1201 
1202    /* Ensure that next + whatever > block_size.  This way the first call to
1203     * state_stream_alloc fetches a new block.
1204     */
1205    stream->next = block_size;
1206 
1207    util_dynarray_init(&stream->all_blocks, NULL);
1208 
1209    VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
1210 }
1211 
1212 void
anv_state_stream_finish(struct anv_state_stream * stream)1213 anv_state_stream_finish(struct anv_state_stream *stream)
1214 {
1215    util_dynarray_foreach(&stream->all_blocks, struct anv_state, block) {
1216       VG(VALGRIND_MEMPOOL_FREE(stream, block->map));
1217       VG(VALGRIND_MAKE_MEM_NOACCESS(block->map, block->alloc_size));
1218       anv_state_pool_free_no_vg(stream->state_pool, *block);
1219    }
1220    util_dynarray_fini(&stream->all_blocks);
1221 
1222    VG(VALGRIND_DESTROY_MEMPOOL(stream));
1223 }
1224 
1225 struct anv_state
anv_state_stream_alloc(struct anv_state_stream * stream,uint32_t size,uint32_t alignment)1226 anv_state_stream_alloc(struct anv_state_stream *stream,
1227                        uint32_t size, uint32_t alignment)
1228 {
1229    if (size == 0)
1230       return ANV_STATE_NULL;
1231 
1232    assert(alignment <= PAGE_SIZE);
1233 
1234    uint32_t offset = align_u32(stream->next, alignment);
1235    if (offset + size > stream->block.alloc_size) {
1236       uint32_t block_size = stream->block_size;
1237       if (block_size < size)
1238          block_size = round_to_power_of_two(size);
1239 
1240       stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
1241                                                  block_size, PAGE_SIZE);
1242       util_dynarray_append(&stream->all_blocks,
1243                            struct anv_state, stream->block);
1244       VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, block_size));
1245 
1246       /* Reset back to the start */
1247       stream->next = offset = 0;
1248       assert(offset + size <= stream->block.alloc_size);
1249    }
1250    const bool new_block = stream->next == 0;
1251 
1252    struct anv_state state = stream->block;
1253    state.offset += offset;
1254    state.alloc_size = size;
1255    state.map += offset;
1256 
1257    stream->next = offset + size;
1258 
1259    if (new_block) {
1260       assert(state.map == stream->block.map);
1261       VG(VALGRIND_MEMPOOL_ALLOC(stream, state.map, size));
1262    } else {
1263       /* This only updates the mempool.  The newly allocated chunk is still
1264        * marked as NOACCESS. */
1265       VG(VALGRIND_MEMPOOL_CHANGE(stream, stream->block.map, stream->block.map,
1266                                  stream->next));
1267       /* Mark the newly allocated chunk as undefined */
1268       VG(VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size));
1269    }
1270 
1271    return state;
1272 }
1273 
1274 void
anv_state_reserved_pool_init(struct anv_state_reserved_pool * pool,struct anv_state_pool * parent,uint32_t count,uint32_t size,uint32_t alignment)1275 anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool,
1276                              struct anv_state_pool *parent,
1277                              uint32_t count, uint32_t size, uint32_t alignment)
1278 {
1279    pool->pool = parent;
1280    pool->reserved_blocks = ANV_FREE_LIST_EMPTY;
1281    pool->count = count;
1282 
1283    for (unsigned i = 0; i < count; i++) {
1284       struct anv_state state = anv_state_pool_alloc(pool->pool, size, alignment);
1285       anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
1286    }
1287 }
1288 
1289 void
anv_state_reserved_pool_finish(struct anv_state_reserved_pool * pool)1290 anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool)
1291 {
1292    struct anv_state *state;
1293 
1294    while ((state = anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table))) {
1295       anv_state_pool_free(pool->pool, *state);
1296       pool->count--;
1297    }
1298    assert(pool->count == 0);
1299 }
1300 
1301 struct anv_state
anv_state_reserved_pool_alloc(struct anv_state_reserved_pool * pool)1302 anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool)
1303 {
1304    return *anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table);
1305 }
1306 
1307 void
anv_state_reserved_pool_free(struct anv_state_reserved_pool * pool,struct anv_state state)1308 anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool,
1309                              struct anv_state state)
1310 {
1311    anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
1312 }
1313 
1314 void
anv_bo_pool_init(struct anv_bo_pool * pool,struct anv_device * device)1315 anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device)
1316 {
1317    pool->device = device;
1318    for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
1319       util_sparse_array_free_list_init(&pool->free_list[i],
1320                                        &device->bo_cache.bo_map, 0,
1321                                        offsetof(struct anv_bo, free_index));
1322    }
1323 
1324    VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
1325 }
1326 
1327 void
anv_bo_pool_finish(struct anv_bo_pool * pool)1328 anv_bo_pool_finish(struct anv_bo_pool *pool)
1329 {
1330    for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
1331       while (1) {
1332          struct anv_bo *bo =
1333             util_sparse_array_free_list_pop_elem(&pool->free_list[i]);
1334          if (bo == NULL)
1335             break;
1336 
1337          /* anv_device_release_bo is going to "free" it */
1338          VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1));
1339          anv_device_release_bo(pool->device, bo);
1340       }
1341    }
1342 
1343    VG(VALGRIND_DESTROY_MEMPOOL(pool));
1344 }
1345 
1346 VkResult
anv_bo_pool_alloc(struct anv_bo_pool * pool,uint32_t size,struct anv_bo ** bo_out)1347 anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size,
1348                   struct anv_bo **bo_out)
1349 {
1350    const unsigned size_log2 = size < 4096 ? 12 : ilog2_round_up(size);
1351    const unsigned pow2_size = 1 << size_log2;
1352    const unsigned bucket = size_log2 - 12;
1353    assert(bucket < ARRAY_SIZE(pool->free_list));
1354 
1355    struct anv_bo *bo =
1356       util_sparse_array_free_list_pop_elem(&pool->free_list[bucket]);
1357    if (bo != NULL) {
1358       VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
1359       *bo_out = bo;
1360       return VK_SUCCESS;
1361    }
1362 
1363    VkResult result = anv_device_alloc_bo(pool->device,
1364                                          pow2_size,
1365                                          ANV_BO_ALLOC_MAPPED |
1366                                          ANV_BO_ALLOC_SNOOPED |
1367                                          ANV_BO_ALLOC_CAPTURE,
1368                                          0 /* explicit_address */,
1369                                          &bo);
1370    if (result != VK_SUCCESS)
1371       return result;
1372 
1373    /* We want it to look like it came from this pool */
1374    VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
1375    VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
1376 
1377    *bo_out = bo;
1378 
1379    return VK_SUCCESS;
1380 }
1381 
1382 void
anv_bo_pool_free(struct anv_bo_pool * pool,struct anv_bo * bo)1383 anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo)
1384 {
1385    VG(VALGRIND_MEMPOOL_FREE(pool, bo->map));
1386 
1387    assert(util_is_power_of_two_or_zero(bo->size));
1388    const unsigned size_log2 = ilog2_round_up(bo->size);
1389    const unsigned bucket = size_log2 - 12;
1390    assert(bucket < ARRAY_SIZE(pool->free_list));
1391 
1392    assert(util_sparse_array_get(&pool->device->bo_cache.bo_map,
1393                                 bo->gem_handle) == bo);
1394    util_sparse_array_free_list_push(&pool->free_list[bucket],
1395                                     &bo->gem_handle, 1);
1396 }
1397 
1398 // Scratch pool
1399 
1400 void
anv_scratch_pool_init(struct anv_device * device,struct anv_scratch_pool * pool)1401 anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool)
1402 {
1403    memset(pool, 0, sizeof(*pool));
1404 }
1405 
1406 void
anv_scratch_pool_finish(struct anv_device * device,struct anv_scratch_pool * pool)1407 anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool)
1408 {
1409    for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1410       for (unsigned i = 0; i < 16; i++) {
1411          if (pool->bos[i][s] != NULL)
1412             anv_device_release_bo(device, pool->bos[i][s]);
1413       }
1414    }
1415 }
1416 
1417 struct anv_bo *
anv_scratch_pool_alloc(struct anv_device * device,struct anv_scratch_pool * pool,gl_shader_stage stage,unsigned per_thread_scratch)1418 anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
1419                        gl_shader_stage stage, unsigned per_thread_scratch)
1420 {
1421    if (per_thread_scratch == 0)
1422       return NULL;
1423 
1424    unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048);
1425    assert(scratch_size_log2 < 16);
1426 
1427    struct anv_bo *bo = p_atomic_read(&pool->bos[scratch_size_log2][stage]);
1428 
1429    if (bo != NULL)
1430       return bo;
1431 
1432    const struct gen_device_info *devinfo = &device->info;
1433 
1434    unsigned subslices = MAX2(device->physical->subslice_total, 1);
1435 
1436    /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
1437     *
1438     *    "Scratch Space per slice is computed based on 4 sub-slices.  SW
1439     *     must allocate scratch space enough so that each slice has 4
1440     *     slices allowed."
1441     *
1442     * According to the other driver team, this applies to compute shaders
1443     * as well.  This is not currently documented at all.
1444     *
1445     * This hack is no longer necessary on Gen11+.
1446     *
1447     * For, Gen11+, scratch space allocation is based on the number of threads
1448     * in the base configuration.
1449     */
1450    if (devinfo->gen == 12)
1451       subslices = (devinfo->is_dg1 || devinfo->gt == 2 ? 6 : 2);
1452    else if (devinfo->gen == 11)
1453       subslices = 8;
1454    else if (devinfo->gen >= 9)
1455       subslices = 4 * devinfo->num_slices;
1456 
1457    unsigned scratch_ids_per_subslice;
1458    if (devinfo->gen >= 12) {
1459       /* Same as ICL below, but with 16 EUs. */
1460       scratch_ids_per_subslice = 16 * 8;
1461    } else if (devinfo->gen == 11) {
1462       /* The MEDIA_VFE_STATE docs say:
1463        *
1464        *    "Starting with this configuration, the Maximum Number of
1465        *     Threads must be set to (#EU * 8) for GPGPU dispatches.
1466        *
1467        *     Although there are only 7 threads per EU in the configuration,
1468        *     the FFTID is calculated as if there are 8 threads per EU,
1469        *     which in turn requires a larger amount of Scratch Space to be
1470        *     allocated by the driver."
1471        */
1472       scratch_ids_per_subslice = 8 * 8;
1473    } else if (devinfo->is_haswell) {
1474       /* WaCSScratchSize:hsw
1475        *
1476        * Haswell's scratch space address calculation appears to be sparse
1477        * rather than tightly packed. The Thread ID has bits indicating
1478        * which subslice, EU within a subslice, and thread within an EU it
1479        * is. There's a maximum of two slices and two subslices, so these
1480        * can be stored with a single bit. Even though there are only 10 EUs
1481        * per subslice, this is stored in 4 bits, so there's an effective
1482        * maximum value of 16 EUs. Similarly, although there are only 7
1483        * threads per EU, this is stored in a 3 bit number, giving an
1484        * effective maximum value of 8 threads per EU.
1485        *
1486        * This means that we need to use 16 * 8 instead of 10 * 7 for the
1487        * number of threads per subslice.
1488        */
1489       scratch_ids_per_subslice = 16 * 8;
1490    } else if (devinfo->is_cherryview) {
1491       /* Cherryview devices have either 6 or 8 EUs per subslice, and each EU
1492        * has 7 threads. The 6 EU devices appear to calculate thread IDs as if
1493        * it had 8 EUs.
1494        */
1495       scratch_ids_per_subslice = 8 * 7;
1496    } else {
1497       scratch_ids_per_subslice = devinfo->max_cs_threads;
1498    }
1499 
1500    uint32_t max_threads[] = {
1501       [MESA_SHADER_VERTEX]           = devinfo->max_vs_threads,
1502       [MESA_SHADER_TESS_CTRL]        = devinfo->max_tcs_threads,
1503       [MESA_SHADER_TESS_EVAL]        = devinfo->max_tes_threads,
1504       [MESA_SHADER_GEOMETRY]         = devinfo->max_gs_threads,
1505       [MESA_SHADER_FRAGMENT]         = devinfo->max_wm_threads,
1506       [MESA_SHADER_COMPUTE]          = scratch_ids_per_subslice * subslices,
1507    };
1508 
1509    uint32_t size = per_thread_scratch * max_threads[stage];
1510 
1511    /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
1512     * are still relative to the general state base address.  When we emit
1513     * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
1514     * to the maximum (1 page under 4GB).  This allows us to just place the
1515     * scratch buffers anywhere we wish in the bottom 32 bits of address space
1516     * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
1517     * However, in order to do so, we need to ensure that the kernel does not
1518     * place the scratch BO above the 32-bit boundary.
1519     *
1520     * NOTE: Technically, it can't go "anywhere" because the top page is off
1521     * limits.  However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
1522     * kernel allocates space using
1523     *
1524     *    end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
1525     *
1526     * so nothing will ever touch the top page.
1527     */
1528    VkResult result = anv_device_alloc_bo(device, size,
1529                                          ANV_BO_ALLOC_32BIT_ADDRESS,
1530                                          0 /* explicit_address */,
1531                                          &bo);
1532    if (result != VK_SUCCESS)
1533       return NULL; /* TODO */
1534 
1535    struct anv_bo *current_bo =
1536       p_atomic_cmpxchg(&pool->bos[scratch_size_log2][stage], NULL, bo);
1537    if (current_bo) {
1538       anv_device_release_bo(device, bo);
1539       return current_bo;
1540    } else {
1541       return bo;
1542    }
1543 }
1544 
1545 VkResult
anv_bo_cache_init(struct anv_bo_cache * cache)1546 anv_bo_cache_init(struct anv_bo_cache *cache)
1547 {
1548    util_sparse_array_init(&cache->bo_map, sizeof(struct anv_bo), 1024);
1549 
1550    if (pthread_mutex_init(&cache->mutex, NULL)) {
1551       util_sparse_array_finish(&cache->bo_map);
1552       return vk_errorf(NULL, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
1553                        "pthread_mutex_init failed: %m");
1554    }
1555 
1556    return VK_SUCCESS;
1557 }
1558 
1559 void
anv_bo_cache_finish(struct anv_bo_cache * cache)1560 anv_bo_cache_finish(struct anv_bo_cache *cache)
1561 {
1562    util_sparse_array_finish(&cache->bo_map);
1563    pthread_mutex_destroy(&cache->mutex);
1564 }
1565 
1566 #define ANV_BO_CACHE_SUPPORTED_FLAGS \
1567    (EXEC_OBJECT_WRITE | \
1568     EXEC_OBJECT_ASYNC | \
1569     EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
1570     EXEC_OBJECT_PINNED | \
1571     EXEC_OBJECT_CAPTURE)
1572 
1573 static uint32_t
anv_bo_alloc_flags_to_bo_flags(struct anv_device * device,enum anv_bo_alloc_flags alloc_flags)1574 anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
1575                                enum anv_bo_alloc_flags alloc_flags)
1576 {
1577    struct anv_physical_device *pdevice = device->physical;
1578 
1579    uint64_t bo_flags = 0;
1580    if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) &&
1581        pdevice->supports_48bit_addresses)
1582       bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
1583 
1584    if ((alloc_flags & ANV_BO_ALLOC_CAPTURE) && pdevice->has_exec_capture)
1585       bo_flags |= EXEC_OBJECT_CAPTURE;
1586 
1587    if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) {
1588       assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC);
1589       bo_flags |= EXEC_OBJECT_WRITE;
1590    }
1591 
1592    if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async)
1593       bo_flags |= EXEC_OBJECT_ASYNC;
1594 
1595    if (pdevice->use_softpin)
1596       bo_flags |= EXEC_OBJECT_PINNED;
1597 
1598    return bo_flags;
1599 }
1600 
1601 static uint32_t
anv_device_get_bo_align(struct anv_device * device,enum anv_bo_alloc_flags alloc_flags)1602 anv_device_get_bo_align(struct anv_device *device,
1603                         enum anv_bo_alloc_flags alloc_flags)
1604 {
1605    /* Gen12 CCS surface addresses need to be 64K aligned. */
1606    if (device->info.gen >= 12 && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS))
1607       return 64 * 1024;
1608 
1609    return 4096;
1610 }
1611 
1612 VkResult
anv_device_alloc_bo(struct anv_device * device,uint64_t size,enum anv_bo_alloc_flags alloc_flags,uint64_t explicit_address,struct anv_bo ** bo_out)1613 anv_device_alloc_bo(struct anv_device *device,
1614                     uint64_t size,
1615                     enum anv_bo_alloc_flags alloc_flags,
1616                     uint64_t explicit_address,
1617                     struct anv_bo **bo_out)
1618 {
1619    if (!device->physical->has_implicit_ccs)
1620       assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
1621 
1622    const uint32_t bo_flags =
1623       anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
1624    assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
1625 
1626    /* The kernel is going to give us whole pages anyway */
1627    size = align_u64(size, 4096);
1628 
1629    const uint32_t align = anv_device_get_bo_align(device, alloc_flags);
1630 
1631    uint64_t ccs_size = 0;
1632    if (device->info.has_aux_map && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) {
1633       /* Align the size up to the next multiple of 64K so we don't have any
1634        * AUX-TT entries pointing from a 64K page to itself.
1635        */
1636       size = align_u64(size, 64 * 1024);
1637 
1638       /* See anv_bo::_ccs_size */
1639       ccs_size = align_u64(DIV_ROUND_UP(size, GEN_AUX_MAP_GEN12_CCS_SCALE), 4096);
1640    }
1641 
1642    uint32_t gem_handle = anv_gem_create(device, size + ccs_size);
1643    if (gem_handle == 0)
1644       return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1645 
1646    struct anv_bo new_bo = {
1647       .gem_handle = gem_handle,
1648       .refcount = 1,
1649       .offset = -1,
1650       .size = size,
1651       ._ccs_size = ccs_size,
1652       .flags = bo_flags,
1653       .is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL),
1654       .has_client_visible_address =
1655          (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
1656       .has_implicit_ccs = ccs_size > 0,
1657    };
1658 
1659    if (alloc_flags & ANV_BO_ALLOC_MAPPED) {
1660       new_bo.map = anv_gem_mmap(device, new_bo.gem_handle, 0, size, 0);
1661       if (new_bo.map == MAP_FAILED) {
1662          anv_gem_close(device, new_bo.gem_handle);
1663          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1664       }
1665    }
1666 
1667    if (alloc_flags & ANV_BO_ALLOC_SNOOPED) {
1668       assert(alloc_flags & ANV_BO_ALLOC_MAPPED);
1669       /* We don't want to change these defaults if it's going to be shared
1670        * with another process.
1671        */
1672       assert(!(alloc_flags & ANV_BO_ALLOC_EXTERNAL));
1673 
1674       /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
1675        * I915_CACHING_NONE on non-LLC platforms.  For many internal state
1676        * objects, we'd rather take the snooping overhead than risk forgetting
1677        * a CLFLUSH somewhere.  Userptr objects are always created as
1678        * I915_CACHING_CACHED, which on non-LLC means snooped so there's no
1679        * need to do this there.
1680        */
1681       if (!device->info.has_llc) {
1682          anv_gem_set_caching(device, new_bo.gem_handle,
1683                              I915_CACHING_CACHED);
1684       }
1685    }
1686 
1687    if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) {
1688       new_bo.has_fixed_address = true;
1689       new_bo.offset = explicit_address;
1690    } else if (new_bo.flags & EXEC_OBJECT_PINNED) {
1691       new_bo.offset = anv_vma_alloc(device, new_bo.size + new_bo._ccs_size,
1692                                     align, alloc_flags, explicit_address);
1693       if (new_bo.offset == 0) {
1694          if (new_bo.map)
1695             anv_gem_munmap(device, new_bo.map, size);
1696          anv_gem_close(device, new_bo.gem_handle);
1697          return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
1698                           "failed to allocate virtual address for BO");
1699       }
1700    } else {
1701       assert(!new_bo.has_client_visible_address);
1702    }
1703 
1704    if (new_bo._ccs_size > 0) {
1705       assert(device->info.has_aux_map);
1706       gen_aux_map_add_mapping(device->aux_map_ctx,
1707                               gen_canonical_address(new_bo.offset),
1708                               gen_canonical_address(new_bo.offset + new_bo.size),
1709                               new_bo.size, 0 /* format_bits */);
1710    }
1711 
1712    assert(new_bo.gem_handle);
1713 
1714    /* If we just got this gem_handle from anv_bo_init_new then we know no one
1715     * else is touching this BO at the moment so we don't need to lock here.
1716     */
1717    struct anv_bo *bo = anv_device_lookup_bo(device, new_bo.gem_handle);
1718    *bo = new_bo;
1719 
1720    *bo_out = bo;
1721 
1722    return VK_SUCCESS;
1723 }
1724 
1725 VkResult
anv_device_import_bo_from_host_ptr(struct anv_device * device,void * host_ptr,uint32_t size,enum anv_bo_alloc_flags alloc_flags,uint64_t client_address,struct anv_bo ** bo_out)1726 anv_device_import_bo_from_host_ptr(struct anv_device *device,
1727                                    void *host_ptr, uint32_t size,
1728                                    enum anv_bo_alloc_flags alloc_flags,
1729                                    uint64_t client_address,
1730                                    struct anv_bo **bo_out)
1731 {
1732    assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
1733                            ANV_BO_ALLOC_SNOOPED |
1734                            ANV_BO_ALLOC_FIXED_ADDRESS)));
1735 
1736    /* We can't do implicit CCS with an aux table on shared memory */
1737    if (!device->physical->has_implicit_ccs || device->info.has_aux_map)
1738        assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
1739 
1740    struct anv_bo_cache *cache = &device->bo_cache;
1741    const uint32_t bo_flags =
1742       anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
1743    assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
1744 
1745    uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
1746    if (!gem_handle)
1747       return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1748 
1749    pthread_mutex_lock(&cache->mutex);
1750 
1751    struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1752    if (bo->refcount > 0) {
1753       /* VK_EXT_external_memory_host doesn't require handling importing the
1754        * same pointer twice at the same time, but we don't get in the way.  If
1755        * kernel gives us the same gem_handle, only succeed if the flags match.
1756        */
1757       assert(bo->gem_handle == gem_handle);
1758       if (bo_flags != bo->flags) {
1759          pthread_mutex_unlock(&cache->mutex);
1760          return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1761                           "same host pointer imported two different ways");
1762       }
1763 
1764       if (bo->has_client_visible_address !=
1765           ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
1766          pthread_mutex_unlock(&cache->mutex);
1767          return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1768                           "The same BO was imported with and without buffer "
1769                           "device address");
1770       }
1771 
1772       if (client_address && client_address != gen_48b_address(bo->offset)) {
1773          pthread_mutex_unlock(&cache->mutex);
1774          return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1775                           "The same BO was imported at two different "
1776                           "addresses");
1777       }
1778 
1779       __sync_fetch_and_add(&bo->refcount, 1);
1780    } else {
1781       struct anv_bo new_bo = {
1782          .gem_handle = gem_handle,
1783          .refcount = 1,
1784          .offset = -1,
1785          .size = size,
1786          .map = host_ptr,
1787          .flags = bo_flags,
1788          .is_external = true,
1789          .from_host_ptr = true,
1790          .has_client_visible_address =
1791             (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
1792       };
1793 
1794       assert(client_address == gen_48b_address(client_address));
1795       if (new_bo.flags & EXEC_OBJECT_PINNED) {
1796          assert(new_bo._ccs_size == 0);
1797          new_bo.offset = anv_vma_alloc(device, new_bo.size,
1798                                        anv_device_get_bo_align(device,
1799                                                                alloc_flags),
1800                                        alloc_flags, client_address);
1801          if (new_bo.offset == 0) {
1802             anv_gem_close(device, new_bo.gem_handle);
1803             pthread_mutex_unlock(&cache->mutex);
1804             return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
1805                              "failed to allocate virtual address for BO");
1806          }
1807       } else {
1808          assert(!new_bo.has_client_visible_address);
1809       }
1810 
1811       *bo = new_bo;
1812    }
1813 
1814    pthread_mutex_unlock(&cache->mutex);
1815    *bo_out = bo;
1816 
1817    return VK_SUCCESS;
1818 }
1819 
1820 VkResult
anv_device_import_bo(struct anv_device * device,int fd,enum anv_bo_alloc_flags alloc_flags,uint64_t client_address,struct anv_bo ** bo_out)1821 anv_device_import_bo(struct anv_device *device,
1822                      int fd,
1823                      enum anv_bo_alloc_flags alloc_flags,
1824                      uint64_t client_address,
1825                      struct anv_bo **bo_out)
1826 {
1827    assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
1828                            ANV_BO_ALLOC_SNOOPED |
1829                            ANV_BO_ALLOC_FIXED_ADDRESS)));
1830 
1831    /* We can't do implicit CCS with an aux table on shared memory */
1832    if (!device->physical->has_implicit_ccs || device->info.has_aux_map)
1833        assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
1834 
1835    struct anv_bo_cache *cache = &device->bo_cache;
1836    const uint32_t bo_flags =
1837       anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
1838    assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
1839 
1840    pthread_mutex_lock(&cache->mutex);
1841 
1842    uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
1843    if (!gem_handle) {
1844       pthread_mutex_unlock(&cache->mutex);
1845       return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1846    }
1847 
1848    struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1849    if (bo->refcount > 0) {
1850       /* We have to be careful how we combine flags so that it makes sense.
1851        * Really, though, if we get to this case and it actually matters, the
1852        * client has imported a BO twice in different ways and they get what
1853        * they have coming.
1854        */
1855       uint64_t new_flags = 0;
1856       new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_WRITE;
1857       new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_ASYNC;
1858       new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
1859       new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_PINNED;
1860       new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_CAPTURE;
1861 
1862       /* It's theoretically possible for a BO to get imported such that it's
1863        * both pinned and not pinned.  The only way this can happen is if it
1864        * gets imported as both a semaphore and a memory object and that would
1865        * be an application error.  Just fail out in that case.
1866        */
1867       if ((bo->flags & EXEC_OBJECT_PINNED) !=
1868           (bo_flags & EXEC_OBJECT_PINNED)) {
1869          pthread_mutex_unlock(&cache->mutex);
1870          return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1871                           "The same BO was imported two different ways");
1872       }
1873 
1874       /* It's also theoretically possible that someone could export a BO from
1875        * one heap and import it into another or to import the same BO into two
1876        * different heaps.  If this happens, we could potentially end up both
1877        * allowing and disallowing 48-bit addresses.  There's not much we can
1878        * do about it if we're pinning so we just throw an error and hope no
1879        * app is actually that stupid.
1880        */
1881       if ((new_flags & EXEC_OBJECT_PINNED) &&
1882           (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
1883           (bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
1884          pthread_mutex_unlock(&cache->mutex);
1885          return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1886                           "The same BO was imported on two different heaps");
1887       }
1888 
1889       if (bo->has_client_visible_address !=
1890           ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
1891          pthread_mutex_unlock(&cache->mutex);
1892          return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1893                           "The same BO was imported with and without buffer "
1894                           "device address");
1895       }
1896 
1897       if (client_address && client_address != gen_48b_address(bo->offset)) {
1898          pthread_mutex_unlock(&cache->mutex);
1899          return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1900                           "The same BO was imported at two different "
1901                           "addresses");
1902       }
1903 
1904       bo->flags = new_flags;
1905 
1906       __sync_fetch_and_add(&bo->refcount, 1);
1907    } else {
1908       off_t size = lseek(fd, 0, SEEK_END);
1909       if (size == (off_t)-1) {
1910          anv_gem_close(device, gem_handle);
1911          pthread_mutex_unlock(&cache->mutex);
1912          return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1913       }
1914 
1915       struct anv_bo new_bo = {
1916          .gem_handle = gem_handle,
1917          .refcount = 1,
1918          .offset = -1,
1919          .size = size,
1920          .flags = bo_flags,
1921          .is_external = true,
1922          .has_client_visible_address =
1923             (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
1924       };
1925 
1926       assert(client_address == gen_48b_address(client_address));
1927       if (new_bo.flags & EXEC_OBJECT_PINNED) {
1928          assert(new_bo._ccs_size == 0);
1929          new_bo.offset = anv_vma_alloc(device, new_bo.size,
1930                                        anv_device_get_bo_align(device,
1931                                                                alloc_flags),
1932                                        alloc_flags, client_address);
1933          if (new_bo.offset == 0) {
1934             anv_gem_close(device, new_bo.gem_handle);
1935             pthread_mutex_unlock(&cache->mutex);
1936             return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
1937                              "failed to allocate virtual address for BO");
1938          }
1939       } else {
1940          assert(!new_bo.has_client_visible_address);
1941       }
1942 
1943       *bo = new_bo;
1944    }
1945 
1946    pthread_mutex_unlock(&cache->mutex);
1947    *bo_out = bo;
1948 
1949    return VK_SUCCESS;
1950 }
1951 
1952 VkResult
anv_device_export_bo(struct anv_device * device,struct anv_bo * bo,int * fd_out)1953 anv_device_export_bo(struct anv_device *device,
1954                      struct anv_bo *bo, int *fd_out)
1955 {
1956    assert(anv_device_lookup_bo(device, bo->gem_handle) == bo);
1957 
1958    /* This BO must have been flagged external in order for us to be able
1959     * to export it.  This is done based on external options passed into
1960     * anv_AllocateMemory.
1961     */
1962    assert(bo->is_external);
1963 
1964    int fd = anv_gem_handle_to_fd(device, bo->gem_handle);
1965    if (fd < 0)
1966       return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1967 
1968    *fd_out = fd;
1969 
1970    return VK_SUCCESS;
1971 }
1972 
1973 static bool
atomic_dec_not_one(uint32_t * counter)1974 atomic_dec_not_one(uint32_t *counter)
1975 {
1976    uint32_t old, val;
1977 
1978    val = *counter;
1979    while (1) {
1980       if (val == 1)
1981          return false;
1982 
1983       old = __sync_val_compare_and_swap(counter, val, val - 1);
1984       if (old == val)
1985          return true;
1986 
1987       val = old;
1988    }
1989 }
1990 
1991 void
anv_device_release_bo(struct anv_device * device,struct anv_bo * bo)1992 anv_device_release_bo(struct anv_device *device,
1993                       struct anv_bo *bo)
1994 {
1995    struct anv_bo_cache *cache = &device->bo_cache;
1996    assert(anv_device_lookup_bo(device, bo->gem_handle) == bo);
1997 
1998    /* Try to decrement the counter but don't go below one.  If this succeeds
1999     * then the refcount has been decremented and we are not the last
2000     * reference.
2001     */
2002    if (atomic_dec_not_one(&bo->refcount))
2003       return;
2004 
2005    pthread_mutex_lock(&cache->mutex);
2006 
2007    /* We are probably the last reference since our attempt to decrement above
2008     * failed.  However, we can't actually know until we are inside the mutex.
2009     * Otherwise, someone could import the BO between the decrement and our
2010     * taking the mutex.
2011     */
2012    if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
2013       /* Turns out we're not the last reference.  Unlock and bail. */
2014       pthread_mutex_unlock(&cache->mutex);
2015       return;
2016    }
2017    assert(bo->refcount == 0);
2018 
2019    if (bo->map && !bo->from_host_ptr)
2020       anv_gem_munmap(device, bo->map, bo->size);
2021 
2022    if (bo->_ccs_size > 0) {
2023       assert(device->physical->has_implicit_ccs);
2024       assert(device->info.has_aux_map);
2025       assert(bo->has_implicit_ccs);
2026       gen_aux_map_unmap_range(device->aux_map_ctx,
2027                               gen_canonical_address(bo->offset),
2028                               bo->size);
2029    }
2030 
2031    if ((bo->flags & EXEC_OBJECT_PINNED) && !bo->has_fixed_address)
2032       anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size);
2033 
2034    uint32_t gem_handle = bo->gem_handle;
2035 
2036    /* Memset the BO just in case.  The refcount being zero should be enough to
2037     * prevent someone from assuming the data is valid but it's safer to just
2038     * stomp to zero just in case.  We explicitly do this *before* we close the
2039     * GEM handle to ensure that if anyone allocates something and gets the
2040     * same GEM handle, the memset has already happen and won't stomp all over
2041     * any data they may write in this BO.
2042     */
2043    memset(bo, 0, sizeof(*bo));
2044 
2045    anv_gem_close(device, gem_handle);
2046 
2047    /* Don't unlock until we've actually closed the BO.  The whole point of
2048     * the BO cache is to ensure that we correctly handle races with creating
2049     * and releasing GEM handles and we don't want to let someone import the BO
2050     * again between mutex unlock and closing the GEM handle.
2051     */
2052    pthread_mutex_unlock(&cache->mutex);
2053 }
2054