Lines Matching refs:pool
361 anv_block_pool_expand_range(struct anv_block_pool *pool,
365 anv_block_pool_init(struct anv_block_pool *pool, in anv_block_pool_init() argument
372 pool->device = device; in anv_block_pool_init()
373 pool->use_softpin = device->physical->use_softpin; in anv_block_pool_init()
374 pool->nbos = 0; in anv_block_pool_init()
375 pool->size = 0; in anv_block_pool_init()
376 pool->center_bo_offset = 0; in anv_block_pool_init()
377 pool->start_address = gen_canonical_address(start_address); in anv_block_pool_init()
378 pool->map = NULL; in anv_block_pool_init()
380 if (pool->use_softpin) { in anv_block_pool_init()
381 pool->bo = NULL; in anv_block_pool_init()
382 pool->fd = -1; in anv_block_pool_init()
388 pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool"); in anv_block_pool_init()
389 if (pool->fd == -1) in anv_block_pool_init()
392 pool->wrapper_bo = (struct anv_bo) { in anv_block_pool_init()
397 pool->bo = &pool->wrapper_bo; in anv_block_pool_init()
400 if (!u_vector_init(&pool->mmap_cleanups, in anv_block_pool_init()
407 pool->state.next = 0; in anv_block_pool_init()
408 pool->state.end = 0; in anv_block_pool_init()
409 pool->back_state.next = 0; in anv_block_pool_init()
410 pool->back_state.end = 0; in anv_block_pool_init()
412 result = anv_block_pool_expand_range(pool, 0, initial_size); in anv_block_pool_init()
419 pool->state.end = pool->size; in anv_block_pool_init()
424 u_vector_finish(&pool->mmap_cleanups); in anv_block_pool_init()
426 if (pool->fd >= 0) in anv_block_pool_init()
427 close(pool->fd); in anv_block_pool_init()
433 anv_block_pool_finish(struct anv_block_pool *pool) in anv_block_pool_finish() argument
435 anv_block_pool_foreach_bo(bo, pool) { in anv_block_pool_finish()
437 anv_gem_munmap(pool->device, bo->map, bo->size); in anv_block_pool_finish()
438 anv_gem_close(pool->device, bo->gem_handle); in anv_block_pool_finish()
442 u_vector_foreach(cleanup, &pool->mmap_cleanups) in anv_block_pool_finish()
444 u_vector_finish(&pool->mmap_cleanups); in anv_block_pool_finish()
446 if (pool->fd >= 0) in anv_block_pool_finish()
447 close(pool->fd); in anv_block_pool_finish()
451 anv_block_pool_expand_range(struct anv_block_pool *pool, in anv_block_pool_expand_range() argument
455 assert(center_bo_offset >= pool->back_state.end); in anv_block_pool_expand_range()
456 assert(size - center_bo_offset >= pool->state.end); in anv_block_pool_expand_range()
460 assert(pool->use_softpin || in anv_block_pool_expand_range()
491 if (!pool->use_softpin) in anv_block_pool_expand_range()
494 if (pool->use_softpin) { in anv_block_pool_expand_range()
495 uint32_t new_bo_size = size - pool->size; in anv_block_pool_expand_range()
498 VkResult result = anv_device_alloc_bo(pool->device, new_bo_size, in anv_block_pool_expand_range()
503 pool->start_address + pool->size, in anv_block_pool_expand_range()
508 pool->bos[pool->nbos++] = new_bo; in anv_block_pool_expand_range()
511 pool->bo = pool->bos[0]; in anv_block_pool_expand_range()
520 MAP_SHARED | MAP_POPULATE, pool->fd, in anv_block_pool_expand_range()
523 return vk_errorf(pool->device, pool->device, in anv_block_pool_expand_range()
527 VkResult result = anv_device_import_bo_from_host_ptr(pool->device, in anv_block_pool_expand_range()
537 struct anv_mmap_cleanup *cleanup = u_vector_add(&pool->mmap_cleanups); in anv_block_pool_expand_range()
540 anv_device_release_bo(pool->device, new_bo); in anv_block_pool_expand_range()
548 pool->center_bo_offset = center_bo_offset; in anv_block_pool_expand_range()
549 pool->map = map + center_bo_offset; in anv_block_pool_expand_range()
551 pool->bos[pool->nbos++] = new_bo; in anv_block_pool_expand_range()
552 pool->wrapper_bo.map = new_bo; in anv_block_pool_expand_range()
555 assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS); in anv_block_pool_expand_range()
556 pool->size = size; in anv_block_pool_expand_range()
568 anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t size) in anv_block_pool_map() argument
570 if (pool->use_softpin) { in anv_block_pool_map()
573 anv_block_pool_foreach_bo(iter_bo, pool) { in anv_block_pool_map()
586 return pool->map + offset; in anv_block_pool_map()
615 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state, in anv_block_pool_grow() argument
620 pthread_mutex_lock(&pool->device->mutex); in anv_block_pool_grow()
622 assert(state == &pool->state || state == &pool->back_state); in anv_block_pool_grow()
634 uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE); in anv_block_pool_grow()
635 uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE); in anv_block_pool_grow()
638 assert(state == &pool->state || back_used > 0); in anv_block_pool_grow()
640 uint32_t old_size = pool->size; in anv_block_pool_grow()
647 const uint32_t old_back = pool->center_bo_offset; in anv_block_pool_grow()
648 const uint32_t old_front = old_size - pool->center_bo_offset; in anv_block_pool_grow()
657 if (pool->use_softpin) { in anv_block_pool_grow()
678 assert(size > pool->size); in anv_block_pool_grow()
713 result = anv_block_pool_expand_range(pool, center_bo_offset, size); in anv_block_pool_grow()
716 pthread_mutex_unlock(&pool->device->mutex); in anv_block_pool_grow()
723 if (state == &pool->state) { in anv_block_pool_grow()
724 return pool->size - pool->center_bo_offset; in anv_block_pool_grow()
726 assert(pool->center_bo_offset > 0); in anv_block_pool_grow()
727 return pool->center_bo_offset; in anv_block_pool_grow()
735 anv_block_pool_alloc_new(struct anv_block_pool *pool, in anv_block_pool_alloc_new() argument
750 if (pool->use_softpin && state.next < state.end) { in anv_block_pool_alloc_new()
773 new.end = anv_block_pool_grow(pool, pool_state, block_size); in anv_block_pool_alloc_new()
788 anv_block_pool_alloc(struct anv_block_pool *pool, in anv_block_pool_alloc() argument
793 offset = anv_block_pool_alloc_new(pool, &pool->state, block_size, padding); in anv_block_pool_alloc()
808 anv_block_pool_alloc_back(struct anv_block_pool *pool, in anv_block_pool_alloc_back() argument
811 int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state, in anv_block_pool_alloc_back()
824 anv_state_pool_init(struct anv_state_pool *pool, in anv_state_pool_init() argument
833 VkResult result = anv_block_pool_init(&pool->block_pool, device, in anv_state_pool_init()
839 pool->start_offset = start_offset; in anv_state_pool_init()
841 result = anv_state_table_init(&pool->table, device, 64); in anv_state_pool_init()
843 anv_block_pool_finish(&pool->block_pool); in anv_state_pool_init()
848 pool->block_size = block_size; in anv_state_pool_init()
849 pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY; in anv_state_pool_init()
851 pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY; in anv_state_pool_init()
852 pool->buckets[i].block.next = 0; in anv_state_pool_init()
853 pool->buckets[i].block.end = 0; in anv_state_pool_init()
855 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); in anv_state_pool_init()
861 anv_state_pool_finish(struct anv_state_pool *pool) in anv_state_pool_finish() argument
863 VG(VALGRIND_DESTROY_MEMPOOL(pool)); in anv_state_pool_finish()
864 anv_state_table_finish(&pool->table); in anv_state_pool_finish()
865 anv_block_pool_finish(&pool->block_pool); in anv_state_pool_finish()
869 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool, in anv_fixed_size_state_pool_alloc_new() argument
892 block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size); in anv_fixed_size_state_pool_alloc_new()
900 old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64); in anv_fixed_size_state_pool_alloc_new()
902 futex_wake(&pool->block.end, INT_MAX); in anv_fixed_size_state_pool_alloc_new()
905 futex_wait(&pool->block.end, block.end, NULL); in anv_fixed_size_state_pool_alloc_new()
933 anv_state_pool_return_blocks(struct anv_state_pool *pool, in anv_state_pool_return_blocks() argument
944 UNUSED VkResult result = anv_state_table_add(&pool->table, &st_idx, count); in anv_state_pool_return_blocks()
948 struct anv_state *state_i = anv_state_table_get(&pool->table, in anv_state_pool_return_blocks()
951 state_i->offset = pool->start_offset + chunk_offset + block_size * i; in anv_state_pool_return_blocks()
952 state_i->map = anv_block_pool_map(&pool->block_pool, in anv_state_pool_return_blocks()
958 anv_free_list_push(&pool->buckets[block_bucket].free_list, in anv_state_pool_return_blocks()
959 &pool->table, st_idx, count); in anv_state_pool_return_blocks()
973 anv_state_pool_return_chunk(struct anv_state_pool *pool, in anv_state_pool_return_chunk() argument
977 uint32_t divisor = pool->block_size; in anv_state_pool_return_chunk()
988 anv_state_pool_return_blocks(pool, offset, nblocks, divisor); in anv_state_pool_return_chunk()
1006 anv_state_pool_return_blocks(pool, chunk_offset + rest, in anv_state_pool_return_chunk()
1015 anv_state_pool_alloc_no_vg(struct anv_state_pool *pool, in anv_state_pool_alloc_no_vg() argument
1025 state = anv_free_list_pop(&pool->buckets[bucket].free_list, in anv_state_pool_alloc_no_vg()
1026 &pool->table); in anv_state_pool_alloc_no_vg()
1028 assert(state->offset >= pool->start_offset); in anv_state_pool_alloc_no_vg()
1034 state = anv_free_list_pop(&pool->buckets[b].free_list, &pool->table); in anv_state_pool_alloc_no_vg()
1075 anv_state_pool_return_chunk(pool, chunk_offset + alloc_size, in anv_state_pool_alloc_no_vg()
1082 offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket], in anv_state_pool_alloc_no_vg()
1083 &pool->block_pool, in anv_state_pool_alloc_no_vg()
1085 pool->block_size, in anv_state_pool_alloc_no_vg()
1089 UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1); in anv_state_pool_alloc_no_vg()
1092 state = anv_state_table_get(&pool->table, idx); in anv_state_pool_alloc_no_vg()
1093 state->offset = pool->start_offset + offset; in anv_state_pool_alloc_no_vg()
1095 state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size); in anv_state_pool_alloc_no_vg()
1099 anv_state_pool_return_chunk(pool, return_offset, padding, 0); in anv_state_pool_alloc_no_vg()
1107 anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align) in anv_state_pool_alloc() argument
1112 struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align); in anv_state_pool_alloc()
1113 VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size)); in anv_state_pool_alloc()
1118 anv_state_pool_alloc_back(struct anv_state_pool *pool) in anv_state_pool_alloc_back() argument
1121 uint32_t alloc_size = pool->block_size; in anv_state_pool_alloc_back()
1124 assert(pool->start_offset == 0); in anv_state_pool_alloc_back()
1126 state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table); in anv_state_pool_alloc_back()
1128 assert(state->offset < pool->start_offset); in anv_state_pool_alloc_back()
1133 offset = anv_block_pool_alloc_back(&pool->block_pool, in anv_state_pool_alloc_back()
1134 pool->block_size); in anv_state_pool_alloc_back()
1136 UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1); in anv_state_pool_alloc_back()
1139 state = anv_state_table_get(&pool->table, idx); in anv_state_pool_alloc_back()
1140 state->offset = pool->start_offset + offset; in anv_state_pool_alloc_back()
1142 state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size); in anv_state_pool_alloc_back()
1145 VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size)); in anv_state_pool_alloc_back()
1150 anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state) in anv_state_pool_free_no_vg() argument
1155 if (state.offset < pool->start_offset) { in anv_state_pool_free_no_vg()
1156 assert(state.alloc_size == pool->block_size); in anv_state_pool_free_no_vg()
1157 anv_free_list_push(&pool->back_alloc_free_list, in anv_state_pool_free_no_vg()
1158 &pool->table, state.idx, 1); in anv_state_pool_free_no_vg()
1160 anv_free_list_push(&pool->buckets[bucket].free_list, in anv_state_pool_free_no_vg()
1161 &pool->table, state.idx, 1); in anv_state_pool_free_no_vg()
1166 anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state) in anv_state_pool_free() argument
1171 VG(VALGRIND_MEMPOOL_FREE(pool, state.map)); in anv_state_pool_free()
1172 anv_state_pool_free_no_vg(pool, state); in anv_state_pool_free()
1275 anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool, in anv_state_reserved_pool_init() argument
1279 pool->pool = parent; in anv_state_reserved_pool_init()
1280 pool->reserved_blocks = ANV_FREE_LIST_EMPTY; in anv_state_reserved_pool_init()
1281 pool->count = count; in anv_state_reserved_pool_init()
1284 struct anv_state state = anv_state_pool_alloc(pool->pool, size, alignment); in anv_state_reserved_pool_init()
1285 anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1); in anv_state_reserved_pool_init()
1290 anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool) in anv_state_reserved_pool_finish() argument
1294 while ((state = anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table))) { in anv_state_reserved_pool_finish()
1295 anv_state_pool_free(pool->pool, *state); in anv_state_reserved_pool_finish()
1296 pool->count--; in anv_state_reserved_pool_finish()
1298 assert(pool->count == 0); in anv_state_reserved_pool_finish()
1302 anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool) in anv_state_reserved_pool_alloc() argument
1304 return *anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table); in anv_state_reserved_pool_alloc()
1308 anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool, in anv_state_reserved_pool_free() argument
1311 anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1); in anv_state_reserved_pool_free()
1315 anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device) in anv_bo_pool_init() argument
1317 pool->device = device; in anv_bo_pool_init()
1318 for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) { in anv_bo_pool_init()
1319 util_sparse_array_free_list_init(&pool->free_list[i], in anv_bo_pool_init()
1324 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); in anv_bo_pool_init()
1328 anv_bo_pool_finish(struct anv_bo_pool *pool) in anv_bo_pool_finish() argument
1330 for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) { in anv_bo_pool_finish()
1333 util_sparse_array_free_list_pop_elem(&pool->free_list[i]); in anv_bo_pool_finish()
1339 anv_device_release_bo(pool->device, bo); in anv_bo_pool_finish()
1343 VG(VALGRIND_DESTROY_MEMPOOL(pool)); in anv_bo_pool_finish()
1347 anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size, in anv_bo_pool_alloc() argument
1353 assert(bucket < ARRAY_SIZE(pool->free_list)); in anv_bo_pool_alloc()
1356 util_sparse_array_free_list_pop_elem(&pool->free_list[bucket]); in anv_bo_pool_alloc()
1358 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); in anv_bo_pool_alloc()
1363 VkResult result = anv_device_alloc_bo(pool->device, in anv_bo_pool_alloc()
1375 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); in anv_bo_pool_alloc()
1383 anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo) in anv_bo_pool_free() argument
1385 VG(VALGRIND_MEMPOOL_FREE(pool, bo->map)); in anv_bo_pool_free()
1390 assert(bucket < ARRAY_SIZE(pool->free_list)); in anv_bo_pool_free()
1392 assert(util_sparse_array_get(&pool->device->bo_cache.bo_map, in anv_bo_pool_free()
1394 util_sparse_array_free_list_push(&pool->free_list[bucket], in anv_bo_pool_free()
1401 anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool) in anv_scratch_pool_init() argument
1403 memset(pool, 0, sizeof(*pool)); in anv_scratch_pool_init()
1407 anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool) in anv_scratch_pool_finish() argument
1411 if (pool->bos[i][s] != NULL) in anv_scratch_pool_finish()
1412 anv_device_release_bo(device, pool->bos[i][s]); in anv_scratch_pool_finish()
1418 anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool, in anv_scratch_pool_alloc() argument
1427 struct anv_bo *bo = p_atomic_read(&pool->bos[scratch_size_log2][stage]); in anv_scratch_pool_alloc()
1536 p_atomic_cmpxchg(&pool->bos[scratch_size_log2][stage], NULL, bo); in anv_scratch_pool_alloc()