Lines Matching full:pool
84 * At the next level we can use various sub-allocators. The state pool is a
85 * pool of smaller, fixed size objects, which operates much like the block
86 * pool. It uses a free list for freeing objects, but when it runs out of
87 * space it just allocates a new block from the block pool. This allocator is
192 /* Assert that we only ever grow the pool */ in anv_state_table_expand_range()
205 /* Just leak the old map until we destroy the pool. We can't munmap it in anv_state_table_expand_range()
236 /* The block pool is always initialized to a nonzero size and this function in anv_state_table_grow()
297 /* We allocated the first block outside the pool so we have to grow in anv_state_table_add()
298 * the pool. pool_state->next acts a mutex: threads who try to in anv_state_table_add()
364 anv_block_pool_expand_range(struct anv_block_pool *pool,
368 anv_block_pool_init(struct anv_block_pool *pool, in anv_block_pool_init() argument
376 pool->name = name; in anv_block_pool_init()
377 pool->device = device; in anv_block_pool_init()
378 pool->use_softpin = device->physical->use_softpin; in anv_block_pool_init()
379 pool->nbos = 0; in anv_block_pool_init()
380 pool->size = 0; in anv_block_pool_init()
381 pool->center_bo_offset = 0; in anv_block_pool_init()
382 pool->start_address = intel_canonical_address(start_address); in anv_block_pool_init()
383 pool->map = NULL; in anv_block_pool_init()
385 if (pool->use_softpin) { in anv_block_pool_init()
386 pool->bo = NULL; in anv_block_pool_init()
387 pool->fd = -1; in anv_block_pool_init()
393 pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool"); in anv_block_pool_init()
394 if (pool->fd == -1) in anv_block_pool_init()
397 pool->wrapper_bo = (struct anv_bo) { in anv_block_pool_init()
402 pool->bo = &pool->wrapper_bo; in anv_block_pool_init()
405 if (!u_vector_init(&pool->mmap_cleanups, 8, in anv_block_pool_init()
411 pool->state.next = 0; in anv_block_pool_init()
412 pool->state.end = 0; in anv_block_pool_init()
413 pool->back_state.next = 0; in anv_block_pool_init()
414 pool->back_state.end = 0; in anv_block_pool_init()
416 result = anv_block_pool_expand_range(pool, 0, initial_size); in anv_block_pool_init()
420 /* Make the entire pool available in the front of the pool. If back in anv_block_pool_init()
423 pool->state.end = pool->size; in anv_block_pool_init()
428 u_vector_finish(&pool->mmap_cleanups); in anv_block_pool_init()
430 if (pool->fd >= 0) in anv_block_pool_init()
431 close(pool->fd); in anv_block_pool_init()
437 anv_block_pool_finish(struct anv_block_pool *pool) in anv_block_pool_finish() argument
439 anv_block_pool_foreach_bo(bo, pool) { in anv_block_pool_finish()
441 anv_gem_munmap(pool->device, bo->map, bo->size); in anv_block_pool_finish()
442 anv_gem_close(pool->device, bo->gem_handle); in anv_block_pool_finish()
446 u_vector_foreach(cleanup, &pool->mmap_cleanups) in anv_block_pool_finish()
448 u_vector_finish(&pool->mmap_cleanups); in anv_block_pool_finish()
450 if (pool->fd >= 0) in anv_block_pool_finish()
451 close(pool->fd); in anv_block_pool_finish()
455 anv_block_pool_expand_range(struct anv_block_pool *pool, in anv_block_pool_expand_range() argument
458 /* Assert that we only ever grow the pool */ in anv_block_pool_expand_range()
459 assert(center_bo_offset >= pool->back_state.end); in anv_block_pool_expand_range()
460 assert(size - center_bo_offset >= pool->state.end); in anv_block_pool_expand_range()
464 assert(pool->use_softpin || in anv_block_pool_expand_range()
468 /* For state pool BOs we have to be a bit careful about where we place them in anv_block_pool_expand_range()
479 * the pool during command buffer building so we don't actually have a in anv_block_pool_expand_range()
495 if (!pool->use_softpin) in anv_block_pool_expand_range()
498 if (pool->use_softpin) { in anv_block_pool_expand_range()
499 uint32_t new_bo_size = size - pool->size; in anv_block_pool_expand_range()
502 VkResult result = anv_device_alloc_bo(pool->device, in anv_block_pool_expand_range()
503 pool->name, in anv_block_pool_expand_range()
510 pool->start_address + pool->size, in anv_block_pool_expand_range()
515 pool->bos[pool->nbos++] = new_bo; in anv_block_pool_expand_range()
518 pool->bo = pool->bos[0]; in anv_block_pool_expand_range()
520 /* Just leak the old map until we destroy the pool. We can't munmap it in anv_block_pool_expand_range()
527 MAP_SHARED | MAP_POPULATE, pool->fd, in anv_block_pool_expand_range()
530 return vk_errorf(pool->device, VK_ERROR_MEMORY_MAP_FAILED, in anv_block_pool_expand_range()
534 VkResult result = anv_device_import_bo_from_host_ptr(pool->device, in anv_block_pool_expand_range()
544 struct anv_mmap_cleanup *cleanup = u_vector_add(&pool->mmap_cleanups); in anv_block_pool_expand_range()
547 anv_device_release_bo(pool->device, new_bo); in anv_block_pool_expand_range()
548 return vk_error(pool->device, VK_ERROR_OUT_OF_HOST_MEMORY); in anv_block_pool_expand_range()
554 * center_bo_offset back into pool and update pool->map. */ in anv_block_pool_expand_range()
555 pool->center_bo_offset = center_bo_offset; in anv_block_pool_expand_range()
556 pool->map = map + center_bo_offset; in anv_block_pool_expand_range()
558 pool->bos[pool->nbos++] = new_bo; in anv_block_pool_expand_range()
559 pool->wrapper_bo.map = new_bo; in anv_block_pool_expand_range()
562 assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS); in anv_block_pool_expand_range()
563 pool->size = size; in anv_block_pool_expand_range()
568 /** Returns current memory map of the block pool.
571 * offset. The offset parameter is relative to the "center" of the block pool
572 * rather than the start of the block pool BO map.
575 anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t size) in anv_block_pool_map() argument
577 if (pool->use_softpin) { in anv_block_pool_map()
580 anv_block_pool_foreach_bo(iter_bo, pool) { in anv_block_pool_map()
593 return pool->map + offset; in anv_block_pool_map()
597 /** Grows and re-centers the block pool.
599 * We grow the block pool in one or both directions in such a way that the
602 * 1) The size of the entire pool is always a power of two.
604 * 2) The pool only grows on both ends. Neither end can get
608 * allocated for each end as we have used. This way the pool doesn't
612 * the pool retains a size of zero. (This makes it easier for users of
613 * the block pool that only want a one-sided pool.)
618 * 6) The center of the pool is always aligned to both the block_size of
619 * the pool and a 4K CPU page.
622 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state, in anv_block_pool_grow() argument
627 pthread_mutex_lock(&pool->device->mutex); in anv_block_pool_grow()
629 assert(state == &pool->state || state == &pool->back_state); in anv_block_pool_grow()
631 /* Gather a little usage information on the pool. Since we may have in anv_block_pool_grow()
636 * which ever side tries to grow the pool. in anv_block_pool_grow()
641 uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE); in anv_block_pool_grow()
642 uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE); in anv_block_pool_grow()
645 assert(state == &pool->state || back_used > 0); in anv_block_pool_grow()
647 uint32_t old_size = pool->size; in anv_block_pool_grow()
649 /* The block pool is always initialized to a nonzero size and this function in anv_block_pool_grow()
654 const uint32_t old_back = pool->center_bo_offset; in anv_block_pool_grow()
655 const uint32_t old_front = old_size - pool->center_bo_offset; in anv_block_pool_grow()
664 if (pool->use_softpin) { in anv_block_pool_grow()
665 /* With softpin, the pool is made up of a bunch of buffers with separate in anv_block_pool_grow()
685 assert(size > pool->size); in anv_block_pool_grow()
688 * of the pool, we maintain the ratio of how much is used by each side. in anv_block_pool_grow()
709 /* Make sure we don't shrink the back end of the pool */ in anv_block_pool_grow()
713 /* Make sure that we don't shrink the front end of the pool */ in anv_block_pool_grow()
720 result = anv_block_pool_expand_range(pool, center_bo_offset, size); in anv_block_pool_grow()
723 pthread_mutex_unlock(&pool->device->mutex); in anv_block_pool_grow()
730 if (state == &pool->state) { in anv_block_pool_grow()
731 return pool->size - pool->center_bo_offset; in anv_block_pool_grow()
733 assert(pool->center_bo_offset > 0); in anv_block_pool_grow()
734 return pool->center_bo_offset; in anv_block_pool_grow()
742 anv_block_pool_alloc_new(struct anv_block_pool *pool, in anv_block_pool_alloc_new() argument
757 if (pool->use_softpin && state.next < state.end) { in anv_block_pool_alloc_new()
758 /* We need to grow the block pool, but still have some leftover in anv_block_pool_alloc_new()
764 /* If there is some leftover space in the pool, the caller must in anv_block_pool_alloc_new()
773 /* We allocated the first block outside the pool so we have to grow in anv_block_pool_alloc_new()
774 * the pool. pool_state->next acts a mutex: threads who try to in anv_block_pool_alloc_new()
780 new.end = anv_block_pool_grow(pool, pool_state, block_size); in anv_block_pool_alloc_new()
795 anv_block_pool_alloc(struct anv_block_pool *pool, in anv_block_pool_alloc() argument
800 offset = anv_block_pool_alloc_new(pool, &pool->state, block_size, padding); in anv_block_pool_alloc()
805 /* Allocates a block out of the back of the block pool.
807 * This will allocated a block earlier than the "start" of the block pool.
809 * be correct relative to the block pool's map pointer.
812 * gymnastics with the block pool's BO when doing relocations.
815 anv_block_pool_alloc_back(struct anv_block_pool *pool, in anv_block_pool_alloc_back() argument
818 int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state, in anv_block_pool_alloc_back()
831 anv_state_pool_init(struct anv_state_pool *pool, in anv_state_pool_init() argument
841 VkResult result = anv_block_pool_init(&pool->block_pool, device, name, in anv_state_pool_init()
847 pool->start_offset = start_offset; in anv_state_pool_init()
849 result = anv_state_table_init(&pool->table, device, 64); in anv_state_pool_init()
851 anv_block_pool_finish(&pool->block_pool); in anv_state_pool_init()
856 pool->block_size = block_size; in anv_state_pool_init()
857 pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY; in anv_state_pool_init()
859 pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY; in anv_state_pool_init()
860 pool->buckets[i].block.next = 0; in anv_state_pool_init()
861 pool->buckets[i].block.end = 0; in anv_state_pool_init()
863 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); in anv_state_pool_init()
869 anv_state_pool_finish(struct anv_state_pool *pool) in anv_state_pool_finish() argument
871 VG(VALGRIND_DESTROY_MEMPOOL(pool)); in anv_state_pool_finish()
872 anv_state_table_finish(&pool->table); in anv_state_pool_finish()
873 anv_block_pool_finish(&pool->block_pool); in anv_state_pool_finish()
877 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool, in anv_fixed_size_state_pool_alloc_new() argument
900 block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size); in anv_fixed_size_state_pool_alloc_new()
908 old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64); in anv_fixed_size_state_pool_alloc_new()
910 futex_wake(&pool->block.end, INT_MAX); in anv_fixed_size_state_pool_alloc_new()
913 futex_wait(&pool->block.end, block.end, NULL); in anv_fixed_size_state_pool_alloc_new()
941 anv_state_pool_return_blocks(struct anv_state_pool *pool, in anv_state_pool_return_blocks() argument
952 UNUSED VkResult result = anv_state_table_add(&pool->table, &st_idx, count); in anv_state_pool_return_blocks()
956 struct anv_state *state_i = anv_state_table_get(&pool->table, in anv_state_pool_return_blocks()
959 state_i->offset = pool->start_offset + chunk_offset + block_size * i; in anv_state_pool_return_blocks()
960 state_i->map = anv_block_pool_map(&pool->block_pool, in anv_state_pool_return_blocks()
966 anv_free_list_push(&pool->buckets[block_bucket].free_list, in anv_state_pool_return_blocks()
967 &pool->table, st_idx, count); in anv_state_pool_return_blocks()
970 /** Returns a chunk of memory back to the state pool.
973 * (pool->block_size), we return as many divisor sized blocks as we can, from
981 anv_state_pool_return_chunk(struct anv_state_pool *pool, in anv_state_pool_return_chunk() argument
985 uint32_t divisor = pool->block_size; in anv_state_pool_return_chunk()
996 anv_state_pool_return_blocks(pool, offset, nblocks, divisor); in anv_state_pool_return_chunk()
1014 anv_state_pool_return_blocks(pool, chunk_offset + rest, in anv_state_pool_return_chunk()
1023 anv_state_pool_alloc_no_vg(struct anv_state_pool *pool, in anv_state_pool_alloc_no_vg() argument
1033 state = anv_free_list_pop(&pool->buckets[bucket].free_list, in anv_state_pool_alloc_no_vg()
1034 &pool->table); in anv_state_pool_alloc_no_vg()
1036 assert(state->offset >= pool->start_offset); in anv_state_pool_alloc_no_vg()
1042 state = anv_free_list_pop(&pool->buckets[b].free_list, &pool->table); in anv_state_pool_alloc_no_vg()
1052 /* Now return the unused part of the chunk back to the pool as free in anv_state_pool_alloc_no_vg()
1083 anv_state_pool_return_chunk(pool, chunk_offset + alloc_size, in anv_state_pool_alloc_no_vg()
1090 offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket], in anv_state_pool_alloc_no_vg()
1091 &pool->block_pool, in anv_state_pool_alloc_no_vg()
1093 pool->block_size, in anv_state_pool_alloc_no_vg()
1095 /* Everytime we allocate a new state, add it to the state pool */ in anv_state_pool_alloc_no_vg()
1097 UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1); in anv_state_pool_alloc_no_vg()
1100 state = anv_state_table_get(&pool->table, idx); in anv_state_pool_alloc_no_vg()
1101 state->offset = pool->start_offset + offset; in anv_state_pool_alloc_no_vg()
1103 state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size); in anv_state_pool_alloc_no_vg()
1107 anv_state_pool_return_chunk(pool, return_offset, padding, 0); in anv_state_pool_alloc_no_vg()
1115 anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align) in anv_state_pool_alloc() argument
1120 struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align); in anv_state_pool_alloc()
1121 VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size)); in anv_state_pool_alloc()
1126 anv_state_pool_alloc_back(struct anv_state_pool *pool) in anv_state_pool_alloc_back() argument
1129 uint32_t alloc_size = pool->block_size; in anv_state_pool_alloc_back()
1132 assert(pool->start_offset == 0); in anv_state_pool_alloc_back()
1134 state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table); in anv_state_pool_alloc_back()
1136 assert(state->offset < pool->start_offset); in anv_state_pool_alloc_back()
1141 offset = anv_block_pool_alloc_back(&pool->block_pool, in anv_state_pool_alloc_back()
1142 pool->block_size); in anv_state_pool_alloc_back()
1144 UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1); in anv_state_pool_alloc_back()
1147 state = anv_state_table_get(&pool->table, idx); in anv_state_pool_alloc_back()
1148 state->offset = pool->start_offset + offset; in anv_state_pool_alloc_back()
1150 state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size); in anv_state_pool_alloc_back()
1153 VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size)); in anv_state_pool_alloc_back()
1158 anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state) in anv_state_pool_free_no_vg() argument
1163 if (state.offset < pool->start_offset) { in anv_state_pool_free_no_vg()
1164 assert(state.alloc_size == pool->block_size); in anv_state_pool_free_no_vg()
1165 anv_free_list_push(&pool->back_alloc_free_list, in anv_state_pool_free_no_vg()
1166 &pool->table, state.idx, 1); in anv_state_pool_free_no_vg()
1168 anv_free_list_push(&pool->buckets[bucket].free_list, in anv_state_pool_free_no_vg()
1169 &pool->table, state.idx, 1); in anv_state_pool_free_no_vg()
1174 anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state) in anv_state_pool_free() argument
1179 VG(VALGRIND_MEMPOOL_FREE(pool, state.map)); in anv_state_pool_free()
1180 anv_state_pool_free_no_vg(pool, state); in anv_state_pool_free()
1283 anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool, in anv_state_reserved_pool_init() argument
1287 pool->pool = parent; in anv_state_reserved_pool_init()
1288 pool->reserved_blocks = ANV_FREE_LIST_EMPTY; in anv_state_reserved_pool_init()
1289 pool->count = count; in anv_state_reserved_pool_init()
1292 struct anv_state state = anv_state_pool_alloc(pool->pool, size, alignment); in anv_state_reserved_pool_init()
1293 anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1); in anv_state_reserved_pool_init()
1298 anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool) in anv_state_reserved_pool_finish() argument
1302 while ((state = anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table))) { in anv_state_reserved_pool_finish()
1303 anv_state_pool_free(pool->pool, *state); in anv_state_reserved_pool_finish()
1304 pool->count--; in anv_state_reserved_pool_finish()
1306 assert(pool->count == 0); in anv_state_reserved_pool_finish()
1310 anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool) in anv_state_reserved_pool_alloc() argument
1312 return *anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table); in anv_state_reserved_pool_alloc()
1316 anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool, in anv_state_reserved_pool_free() argument
1319 anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1); in anv_state_reserved_pool_free()
1323 anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device, in anv_bo_pool_init() argument
1326 pool->name = name; in anv_bo_pool_init()
1327 pool->device = device; in anv_bo_pool_init()
1328 for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) { in anv_bo_pool_init()
1329 util_sparse_array_free_list_init(&pool->free_list[i], in anv_bo_pool_init()
1334 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); in anv_bo_pool_init()
1338 anv_bo_pool_finish(struct anv_bo_pool *pool) in anv_bo_pool_finish() argument
1340 for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) { in anv_bo_pool_finish()
1343 util_sparse_array_free_list_pop_elem(&pool->free_list[i]); in anv_bo_pool_finish()
1349 anv_device_release_bo(pool->device, bo); in anv_bo_pool_finish()
1353 VG(VALGRIND_DESTROY_MEMPOOL(pool)); in anv_bo_pool_finish()
1357 anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size, in anv_bo_pool_alloc() argument
1363 assert(bucket < ARRAY_SIZE(pool->free_list)); in anv_bo_pool_alloc()
1366 util_sparse_array_free_list_pop_elem(&pool->free_list[bucket]); in anv_bo_pool_alloc()
1368 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); in anv_bo_pool_alloc()
1373 VkResult result = anv_device_alloc_bo(pool->device, in anv_bo_pool_alloc()
1374 pool->name, in anv_bo_pool_alloc()
1385 /* We want it to look like it came from this pool */ in anv_bo_pool_alloc()
1387 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); in anv_bo_pool_alloc()
1395 anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo) in anv_bo_pool_free() argument
1397 VG(VALGRIND_MEMPOOL_FREE(pool, bo->map)); in anv_bo_pool_free()
1402 assert(bucket < ARRAY_SIZE(pool->free_list)); in anv_bo_pool_free()
1404 assert(util_sparse_array_get(&pool->device->bo_cache.bo_map, in anv_bo_pool_free()
1406 util_sparse_array_free_list_push(&pool->free_list[bucket], in anv_bo_pool_free()
1410 // Scratch pool
1413 anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool) in anv_scratch_pool_init() argument
1415 memset(pool, 0, sizeof(*pool)); in anv_scratch_pool_init()
1419 anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool) in anv_scratch_pool_finish() argument
1421 for (unsigned s = 0; s < ARRAY_SIZE(pool->bos[0]); s++) { in anv_scratch_pool_finish()
1423 if (pool->bos[i][s] != NULL) in anv_scratch_pool_finish()
1424 anv_device_release_bo(device, pool->bos[i][s]); in anv_scratch_pool_finish()
1429 if (pool->surf_states[i].map != NULL) { in anv_scratch_pool_finish()
1431 pool->surf_states[i]); in anv_scratch_pool_finish()
1437 anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool, in anv_scratch_pool_alloc() argument
1446 assert(stage < ARRAY_SIZE(pool->bos)); in anv_scratch_pool_alloc()
1458 struct anv_bo *bo = p_atomic_read(&pool->bos[scratch_size_log2][stage]); in anv_scratch_pool_alloc()
1492 p_atomic_cmpxchg(&pool->bos[scratch_size_log2][stage], NULL, bo); in anv_scratch_pool_alloc()
1503 struct anv_scratch_pool *pool, in anv_scratch_pool_get_surf() argument
1512 uint32_t surf = p_atomic_read(&pool->surfs[scratch_size_log2]); in anv_scratch_pool_get_surf()
1517 anv_scratch_pool_alloc(device, pool, MESA_SHADER_COMPUTE, in anv_scratch_pool_get_surf()
1534 uint32_t current = p_atomic_cmpxchg(&pool->surfs[scratch_size_log2], in anv_scratch_pool_get_surf()
1540 pool->surf_states[scratch_size_log2] = state; in anv_scratch_pool_get_surf()