Lines Matching full:pool
81 * At the next level we can use various sub-allocators. The state pool is a
82 * pool of smaller, fixed size objects, which operates much like the block
83 * pool. It uses a free list for freeing objects, but when it runs out of
84 * space it just allocates a new block from the block pool. This allocator is
240 anv_block_pool_expand_range(struct anv_block_pool *pool,
244 anv_block_pool_init(struct anv_block_pool *pool, in anv_block_pool_init() argument
251 pool->device = device; in anv_block_pool_init()
252 pool->bo_flags = bo_flags; in anv_block_pool_init()
253 anv_bo_init(&pool->bo, 0, 0); in anv_block_pool_init()
255 pool->fd = memfd_create("block pool", MFD_CLOEXEC); in anv_block_pool_init()
256 if (pool->fd == -1) in anv_block_pool_init()
263 if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) { in anv_block_pool_init()
268 if (!u_vector_init(&pool->mmap_cleanups, in anv_block_pool_init()
275 pool->state.next = 0; in anv_block_pool_init()
276 pool->state.end = 0; in anv_block_pool_init()
277 pool->back_state.next = 0; in anv_block_pool_init()
278 pool->back_state.end = 0; in anv_block_pool_init()
280 result = anv_block_pool_expand_range(pool, 0, initial_size); in anv_block_pool_init()
287 u_vector_finish(&pool->mmap_cleanups); in anv_block_pool_init()
289 close(pool->fd); in anv_block_pool_init()
295 anv_block_pool_finish(struct anv_block_pool *pool) in anv_block_pool_finish() argument
299 u_vector_foreach(cleanup, &pool->mmap_cleanups) { in anv_block_pool_finish()
303 anv_gem_close(pool->device, cleanup->gem_handle); in anv_block_pool_finish()
306 u_vector_finish(&pool->mmap_cleanups); in anv_block_pool_finish()
308 close(pool->fd); in anv_block_pool_finish()
314 anv_block_pool_expand_range(struct anv_block_pool *pool, in anv_block_pool_expand_range() argument
321 /* Assert that we only ever grow the pool */ in anv_block_pool_expand_range()
322 assert(center_bo_offset >= pool->back_state.end); in anv_block_pool_expand_range()
323 assert(size - center_bo_offset >= pool->state.end); in anv_block_pool_expand_range()
330 cleanup = u_vector_add(&pool->mmap_cleanups); in anv_block_pool_expand_range()
336 /* Just leak the old map until we destroy the pool. We can't munmap it in anv_block_pool_expand_range()
343 MAP_SHARED | MAP_POPULATE, pool->fd, in anv_block_pool_expand_range()
346 return vk_errorf(pool->device->instance, pool->device, in anv_block_pool_expand_range()
349 gem_handle = anv_gem_userptr(pool->device, map, size); in anv_block_pool_expand_range()
352 return vk_errorf(pool->device->instance, pool->device, in anv_block_pool_expand_range()
367 if (!pool->device->info.has_llc) { in anv_block_pool_expand_range()
368 anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE); in anv_block_pool_expand_range()
369 anv_gem_set_domain(pool->device, gem_handle, in anv_block_pool_expand_range()
375 * values back into pool. */ in anv_block_pool_expand_range()
376 pool->map = map + center_bo_offset; in anv_block_pool_expand_range()
377 pool->center_bo_offset = center_bo_offset; in anv_block_pool_expand_range()
379 /* For block pool BOs we have to be a bit careful about where we place them in anv_block_pool_expand_range()
390 * the pool during command buffer building so we don't actually have a in anv_block_pool_expand_range()
404 anv_bo_init(&pool->bo, gem_handle, size); in anv_block_pool_expand_range()
405 pool->bo.flags = pool->bo_flags; in anv_block_pool_expand_range()
406 pool->bo.map = map; in anv_block_pool_expand_range()
411 /** Grows and re-centers the block pool.
413 * We grow the block pool in one or both directions in such a way that the
416 * 1) The size of the entire pool is always a power of two.
418 * 2) The pool only grows on both ends. Neither end can get
422 * allocated for each end as we have used. This way the pool doesn't
426 * the pool retains a size of zero. (This makes it easier for users of
427 * the block pool that only want a one-sided pool.)
432 * 6) The center of the pool is always aligned to both the block_size of
433 * the pool and a 4K CPU page.
436 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state) in anv_block_pool_grow() argument
440 pthread_mutex_lock(&pool->device->mutex); in anv_block_pool_grow()
442 assert(state == &pool->state || state == &pool->back_state); in anv_block_pool_grow()
444 /* Gather a little usage information on the pool. Since we may have in anv_block_pool_grow()
449 * which ever side tries to grow the pool. in anv_block_pool_grow()
454 uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE); in anv_block_pool_grow()
455 uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE); in anv_block_pool_grow()
458 assert(state == &pool->state || back_used > 0); in anv_block_pool_grow()
460 uint32_t old_size = pool->bo.size; in anv_block_pool_grow()
462 /* The block pool is always initialized to a nonzero size and this function in anv_block_pool_grow()
471 uint32_t back_required = MAX2(back_used, pool->center_bo_offset); in anv_block_pool_grow()
472 uint32_t front_required = MAX2(front_used, old_size - pool->center_bo_offset); in anv_block_pool_grow()
486 assert(size > pool->bo.size); in anv_block_pool_grow()
489 * of the pool, we maintain the ratio of how much is used by each side. in anv_block_pool_grow()
510 /* Make sure we don't shrink the back end of the pool */ in anv_block_pool_grow()
514 /* Make sure that we don't shrink the front end of the pool */ in anv_block_pool_grow()
521 result = anv_block_pool_expand_range(pool, center_bo_offset, size); in anv_block_pool_grow()
523 pool->bo.flags = pool->bo_flags; in anv_block_pool_grow()
526 pthread_mutex_unlock(&pool->device->mutex); in anv_block_pool_grow()
533 if (state == &pool->state) { in anv_block_pool_grow()
534 return pool->bo.size - pool->center_bo_offset; in anv_block_pool_grow()
536 assert(pool->center_bo_offset > 0); in anv_block_pool_grow()
537 return pool->center_bo_offset; in anv_block_pool_grow()
545 anv_block_pool_alloc_new(struct anv_block_pool *pool, in anv_block_pool_alloc_new() argument
554 assert(pool->map); in anv_block_pool_alloc_new()
557 /* We allocated the first block outside the pool so we have to grow in anv_block_pool_alloc_new()
558 * the pool. pool_state->next acts a mutex: threads who try to in anv_block_pool_alloc_new()
564 new.end = anv_block_pool_grow(pool, pool_state); in anv_block_pool_alloc_new()
579 anv_block_pool_alloc(struct anv_block_pool *pool, in anv_block_pool_alloc() argument
582 return anv_block_pool_alloc_new(pool, &pool->state, block_size); in anv_block_pool_alloc()
585 /* Allocates a block out of the back of the block pool.
587 * This will allocated a block earlier than the "start" of the block pool.
589 * be correct relative to the block pool's map pointer.
592 * gymnastics with the block pool's BO when doing relocations.
595 anv_block_pool_alloc_back(struct anv_block_pool *pool, in anv_block_pool_alloc_back() argument
598 int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state, in anv_block_pool_alloc_back()
611 anv_state_pool_init(struct anv_state_pool *pool, in anv_state_pool_init() argument
616 VkResult result = anv_block_pool_init(&pool->block_pool, device, in anv_state_pool_init()
623 pool->block_size = block_size; in anv_state_pool_init()
624 pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY; in anv_state_pool_init()
626 pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY; in anv_state_pool_init()
627 pool->buckets[i].block.next = 0; in anv_state_pool_init()
628 pool->buckets[i].block.end = 0; in anv_state_pool_init()
630 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); in anv_state_pool_init()
636 anv_state_pool_finish(struct anv_state_pool *pool) in anv_state_pool_finish() argument
638 VG(VALGRIND_DESTROY_MEMPOOL(pool)); in anv_state_pool_finish()
639 anv_block_pool_finish(&pool->block_pool); in anv_state_pool_finish()
643 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool, in anv_fixed_size_state_pool_alloc_new() argument
658 block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size); in anv_fixed_size_state_pool_alloc_new()
666 old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64); in anv_fixed_size_state_pool_alloc_new()
668 futex_wake(&pool->block.end, INT_MAX); in anv_fixed_size_state_pool_alloc_new()
671 futex_wait(&pool->block.end, block.end, NULL); in anv_fixed_size_state_pool_alloc_new()
694 anv_state_pool_alloc_no_vg(struct anv_state_pool *pool, in anv_state_pool_alloc_no_vg() argument
703 if (anv_free_list_pop(&pool->buckets[bucket].free_list, in anv_state_pool_alloc_no_vg()
704 &pool->block_pool.map, &state.offset)) { in anv_state_pool_alloc_no_vg()
712 if (anv_free_list_pop(&pool->buckets[b].free_list, in anv_state_pool_alloc_no_vg()
713 &pool->block_pool.map, &chunk_offset)) { in anv_state_pool_alloc_no_vg()
742 if (chunk_size > pool->block_size && in anv_state_pool_alloc_no_vg()
743 state.alloc_size < pool->block_size) { in anv_state_pool_alloc_no_vg()
744 assert(chunk_size % pool->block_size == 0); in anv_state_pool_alloc_no_vg()
751 anv_state_pool_get_bucket(pool->block_size); in anv_state_pool_alloc_no_vg()
752 anv_free_list_push(&pool->buckets[block_bucket].free_list, in anv_state_pool_alloc_no_vg()
753 pool->block_pool.map, in anv_state_pool_alloc_no_vg()
754 chunk_offset + pool->block_size, in anv_state_pool_alloc_no_vg()
755 pool->block_size, in anv_state_pool_alloc_no_vg()
756 (chunk_size / pool->block_size) - 1); in anv_state_pool_alloc_no_vg()
757 chunk_size = pool->block_size; in anv_state_pool_alloc_no_vg()
761 anv_free_list_push(&pool->buckets[bucket].free_list, in anv_state_pool_alloc_no_vg()
762 pool->block_pool.map, in anv_state_pool_alloc_no_vg()
772 state.offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket], in anv_state_pool_alloc_no_vg()
773 &pool->block_pool, in anv_state_pool_alloc_no_vg()
775 pool->block_size); in anv_state_pool_alloc_no_vg()
778 state.map = pool->block_pool.map + state.offset; in anv_state_pool_alloc_no_vg()
783 anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align) in anv_state_pool_alloc() argument
788 struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align); in anv_state_pool_alloc()
789 VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size)); in anv_state_pool_alloc()
794 anv_state_pool_alloc_back(struct anv_state_pool *pool) in anv_state_pool_alloc_back() argument
797 state.alloc_size = pool->block_size; in anv_state_pool_alloc_back()
799 if (anv_free_list_pop(&pool->back_alloc_free_list, in anv_state_pool_alloc_back()
800 &pool->block_pool.map, &state.offset)) { in anv_state_pool_alloc_back()
805 state.offset = anv_block_pool_alloc_back(&pool->block_pool, in anv_state_pool_alloc_back()
806 pool->block_size); in anv_state_pool_alloc_back()
809 state.map = pool->block_pool.map + state.offset; in anv_state_pool_alloc_back()
810 VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size)); in anv_state_pool_alloc_back()
815 anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state) in anv_state_pool_free_no_vg() argument
821 assert(state.alloc_size == pool->block_size); in anv_state_pool_free_no_vg()
822 anv_free_list_push(&pool->back_alloc_free_list, in anv_state_pool_free_no_vg()
823 pool->block_pool.map, state.offset, in anv_state_pool_free_no_vg()
826 anv_free_list_push(&pool->buckets[bucket].free_list, in anv_state_pool_free_no_vg()
827 pool->block_pool.map, state.offset, in anv_state_pool_free_no_vg()
833 anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state) in anv_state_pool_free() argument
838 VG(VALGRIND_MEMPOOL_FREE(pool, state.map)); in anv_state_pool_free()
839 anv_state_pool_free_no_vg(pool, state); in anv_state_pool_free()
960 anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device, in anv_bo_pool_init() argument
963 pool->device = device; in anv_bo_pool_init()
964 pool->bo_flags = bo_flags; in anv_bo_pool_init()
965 memset(pool->free_list, 0, sizeof(pool->free_list)); in anv_bo_pool_init()
967 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); in anv_bo_pool_init()
971 anv_bo_pool_finish(struct anv_bo_pool *pool) in anv_bo_pool_finish() argument
973 for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) { in anv_bo_pool_finish()
974 struct bo_pool_bo_link *link = PFL_PTR(pool->free_list[i]); in anv_bo_pool_finish()
979 anv_gem_close(pool->device, link_copy.bo.gem_handle); in anv_bo_pool_finish()
984 VG(VALGRIND_DESTROY_MEMPOOL(pool)); in anv_bo_pool_finish()
988 anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo, uint32_t size) in anv_bo_pool_alloc() argument
995 assert(bucket < ARRAY_SIZE(pool->free_list)); in anv_bo_pool_alloc()
998 if (anv_ptr_free_list_pop(&pool->free_list[bucket], &next_free_void)) { in anv_bo_pool_alloc()
1005 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); in anv_bo_pool_alloc()
1012 result = anv_bo_init_new(&new_bo, pool->device, pow2_size); in anv_bo_pool_alloc()
1016 new_bo.flags = pool->bo_flags; in anv_bo_pool_alloc()
1020 new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0); in anv_bo_pool_alloc()
1022 anv_gem_close(pool->device, new_bo.gem_handle); in anv_bo_pool_alloc()
1028 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); in anv_bo_pool_alloc()
1034 anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo_in) in anv_bo_pool_free() argument
1039 VG(VALGRIND_MEMPOOL_FREE(pool, bo.map)); in anv_bo_pool_free()
1047 assert(bucket < ARRAY_SIZE(pool->free_list)); in anv_bo_pool_free()
1049 anv_ptr_free_list_push(&pool->free_list[bucket], link); in anv_bo_pool_free()
1052 // Scratch pool
1055 anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool) in anv_scratch_pool_init() argument
1057 memset(pool, 0, sizeof(*pool)); in anv_scratch_pool_init()
1061 anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool) in anv_scratch_pool_finish() argument
1065 struct anv_scratch_bo *bo = &pool->bos[i][s]; in anv_scratch_pool_finish()
1073 anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool, in anv_scratch_pool_alloc() argument
1082 struct anv_scratch_bo *bo = &pool->bos[scratch_size_log2][stage]; in anv_scratch_pool_alloc()