Lines Matching refs:pool
247 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state);
250 anv_block_pool_init(struct anv_block_pool *pool, in anv_block_pool_init() argument
257 pool->device = device; in anv_block_pool_init()
258 anv_bo_init(&pool->bo, 0, 0); in anv_block_pool_init()
259 pool->block_size = block_size; in anv_block_pool_init()
260 pool->free_list = ANV_FREE_LIST_EMPTY; in anv_block_pool_init()
261 pool->back_free_list = ANV_FREE_LIST_EMPTY; in anv_block_pool_init()
263 pool->fd = memfd_create("block pool", MFD_CLOEXEC); in anv_block_pool_init()
264 if (pool->fd == -1) in anv_block_pool_init()
271 if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) { in anv_block_pool_init()
276 if (!u_vector_init(&pool->mmap_cleanups, in anv_block_pool_init()
283 pool->state.next = 0; in anv_block_pool_init()
284 pool->state.end = 0; in anv_block_pool_init()
285 pool->back_state.next = 0; in anv_block_pool_init()
286 pool->back_state.end = 0; in anv_block_pool_init()
289 pool->state.end = anv_block_pool_grow(pool, &pool->state); in anv_block_pool_init()
294 close(pool->fd); in anv_block_pool_init()
300 anv_block_pool_finish(struct anv_block_pool *pool) in anv_block_pool_finish() argument
304 u_vector_foreach(cleanup, &pool->mmap_cleanups) { in anv_block_pool_finish()
308 anv_gem_close(pool->device, cleanup->gem_handle); in anv_block_pool_finish()
311 u_vector_finish(&pool->mmap_cleanups); in anv_block_pool_finish()
313 close(pool->fd); in anv_block_pool_finish()
343 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state) in anv_block_pool_grow() argument
350 pthread_mutex_lock(&pool->device->mutex); in anv_block_pool_grow()
352 assert(state == &pool->state || state == &pool->back_state); in anv_block_pool_grow()
364 uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE); in anv_block_pool_grow()
365 uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE); in anv_block_pool_grow()
368 assert(state == &pool->state || back_used > 0); in anv_block_pool_grow()
370 size_t old_size = pool->bo.size; in anv_block_pool_grow()
373 back_used * 2 <= pool->center_bo_offset && in anv_block_pool_grow()
374 front_used * 2 <= (old_size - pool->center_bo_offset)) { in anv_block_pool_grow()
384 size = MAX2(32 * pool->block_size, PAGE_SIZE); in anv_block_pool_grow()
413 uint32_t granularity = MAX2(pool->block_size, PAGE_SIZE); in anv_block_pool_grow()
420 if (center_bo_offset < pool->back_state.end) in anv_block_pool_grow()
421 center_bo_offset = pool->back_state.end; in anv_block_pool_grow()
424 if (size - center_bo_offset < pool->state.end) in anv_block_pool_grow()
425 center_bo_offset = size - pool->state.end; in anv_block_pool_grow()
428 assert(center_bo_offset % pool->block_size == 0); in anv_block_pool_grow()
432 assert(center_bo_offset >= pool->back_state.end); in anv_block_pool_grow()
433 assert(size - center_bo_offset >= pool->state.end); in anv_block_pool_grow()
435 cleanup = u_vector_add(&pool->mmap_cleanups); in anv_block_pool_grow()
447 MAP_SHARED | MAP_POPULATE, pool->fd, in anv_block_pool_grow()
455 gem_handle = anv_gem_userptr(pool->device, map, size); in anv_block_pool_grow()
467 if (!pool->device->info.has_llc) { in anv_block_pool_grow()
468 anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE); in anv_block_pool_grow()
469 anv_gem_set_domain(pool->device, gem_handle, in anv_block_pool_grow()
476 pool->map = map + center_bo_offset; in anv_block_pool_grow()
477 pool->center_bo_offset = center_bo_offset; in anv_block_pool_grow()
478 anv_bo_init(&pool->bo, gem_handle, size); in anv_block_pool_grow()
479 pool->bo.map = map; in anv_block_pool_grow()
482 pthread_mutex_unlock(&pool->device->mutex); in anv_block_pool_grow()
488 if (state == &pool->state) { in anv_block_pool_grow()
489 return pool->bo.size - pool->center_bo_offset; in anv_block_pool_grow()
491 assert(pool->center_bo_offset > 0); in anv_block_pool_grow()
492 return pool->center_bo_offset; in anv_block_pool_grow()
496 pthread_mutex_unlock(&pool->device->mutex); in anv_block_pool_grow()
502 anv_block_pool_alloc_new(struct anv_block_pool *pool, in anv_block_pool_alloc_new() argument
508 state.u64 = __sync_fetch_and_add(&pool_state->u64, pool->block_size); in anv_block_pool_alloc_new()
510 assert(pool->map); in anv_block_pool_alloc_new()
517 new.next = state.next + pool->block_size; in anv_block_pool_alloc_new()
518 new.end = anv_block_pool_grow(pool, pool_state); in anv_block_pool_alloc_new()
519 assert(new.end >= new.next && new.end % pool->block_size == 0); in anv_block_pool_alloc_new()
532 anv_block_pool_alloc(struct anv_block_pool *pool) in anv_block_pool_alloc() argument
537 if (anv_free_list_pop(&pool->free_list, &pool->map, &offset)) { in anv_block_pool_alloc()
539 assert(pool->map); in anv_block_pool_alloc()
543 return anv_block_pool_alloc_new(pool, &pool->state); in anv_block_pool_alloc()
556 anv_block_pool_alloc_back(struct anv_block_pool *pool) in anv_block_pool_alloc_back() argument
561 if (anv_free_list_pop(&pool->back_free_list, &pool->map, &offset)) { in anv_block_pool_alloc_back()
563 assert(pool->map); in anv_block_pool_alloc_back()
567 offset = anv_block_pool_alloc_new(pool, &pool->back_state); in anv_block_pool_alloc_back()
575 return -(offset + pool->block_size); in anv_block_pool_alloc_back()
579 anv_block_pool_free(struct anv_block_pool *pool, int32_t offset) in anv_block_pool_free() argument
582 anv_free_list_push(&pool->back_free_list, pool->map, offset); in anv_block_pool_free()
584 anv_free_list_push(&pool->free_list, pool->map, offset); in anv_block_pool_free()
589 anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool *pool, in anv_fixed_size_state_pool_init() argument
595 pool->state_size = state_size; in anv_fixed_size_state_pool_init()
596 pool->free_list = ANV_FREE_LIST_EMPTY; in anv_fixed_size_state_pool_init()
597 pool->block.next = 0; in anv_fixed_size_state_pool_init()
598 pool->block.end = 0; in anv_fixed_size_state_pool_init()
602 anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool, in anv_fixed_size_state_pool_alloc() argument
609 if (anv_free_list_pop(&pool->free_list, &block_pool->map, &offset)) { in anv_fixed_size_state_pool_alloc()
617 block.u64 = __sync_fetch_and_add(&pool->block.u64, pool->state_size); in anv_fixed_size_state_pool_alloc()
623 new.next = offset + pool->state_size; in anv_fixed_size_state_pool_alloc()
625 old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64); in anv_fixed_size_state_pool_alloc()
627 futex_wake(&pool->block.end, INT_MAX); in anv_fixed_size_state_pool_alloc()
630 futex_wait(&pool->block.end, block.end); in anv_fixed_size_state_pool_alloc()
636 anv_fixed_size_state_pool_free(struct anv_fixed_size_state_pool *pool, in anv_fixed_size_state_pool_free() argument
640 anv_free_list_push(&pool->free_list, block_pool->map, offset); in anv_fixed_size_state_pool_free()
644 anv_state_pool_init(struct anv_state_pool *pool, in anv_state_pool_init() argument
647 pool->block_pool = block_pool; in anv_state_pool_init()
650 anv_fixed_size_state_pool_init(&pool->buckets[i], size); in anv_state_pool_init()
652 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); in anv_state_pool_init()
656 anv_state_pool_finish(struct anv_state_pool *pool) in anv_state_pool_finish() argument
658 VG(VALGRIND_DESTROY_MEMPOOL(pool)); in anv_state_pool_finish()
662 anv_state_pool_alloc(struct anv_state_pool *pool, size_t size, size_t align) in anv_state_pool_alloc() argument
672 state.offset = anv_fixed_size_state_pool_alloc(&pool->buckets[bucket], in anv_state_pool_alloc()
673 pool->block_pool); in anv_state_pool_alloc()
674 state.map = pool->block_pool->map + state.offset; in anv_state_pool_alloc()
675 VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size)); in anv_state_pool_alloc()
680 anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state) in anv_state_pool_free() argument
688 VG(VALGRIND_MEMPOOL_FREE(pool, state.map)); in anv_state_pool_free()
689 anv_fixed_size_state_pool_free(&pool->buckets[bucket], in anv_state_pool_free()
690 pool->block_pool, state.offset); in anv_state_pool_free()
805 anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device) in anv_bo_pool_init() argument
807 pool->device = device; in anv_bo_pool_init()
808 memset(pool->free_list, 0, sizeof(pool->free_list)); in anv_bo_pool_init()
810 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); in anv_bo_pool_init()
814 anv_bo_pool_finish(struct anv_bo_pool *pool) in anv_bo_pool_finish() argument
816 for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) { in anv_bo_pool_finish()
817 struct bo_pool_bo_link *link = PFL_PTR(pool->free_list[i]); in anv_bo_pool_finish()
822 anv_gem_close(pool->device, link_copy.bo.gem_handle); in anv_bo_pool_finish()
827 VG(VALGRIND_DESTROY_MEMPOOL(pool)); in anv_bo_pool_finish()
831 anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo, uint32_t size) in anv_bo_pool_alloc() argument
838 assert(bucket < ARRAY_SIZE(pool->free_list)); in anv_bo_pool_alloc()
841 if (anv_ptr_free_list_pop(&pool->free_list[bucket], &next_free_void)) { in anv_bo_pool_alloc()
848 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); in anv_bo_pool_alloc()
855 result = anv_bo_init_new(&new_bo, pool->device, pow2_size); in anv_bo_pool_alloc()
861 new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0); in anv_bo_pool_alloc()
863 anv_gem_close(pool->device, new_bo.gem_handle); in anv_bo_pool_alloc()
869 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); in anv_bo_pool_alloc()
875 anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo_in) in anv_bo_pool_free() argument
880 VG(VALGRIND_MEMPOOL_FREE(pool, bo.map)); in anv_bo_pool_free()
888 assert(bucket < ARRAY_SIZE(pool->free_list)); in anv_bo_pool_free()
890 anv_ptr_free_list_push(&pool->free_list[bucket], link); in anv_bo_pool_free()
896 anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool) in anv_scratch_pool_init() argument
898 memset(pool, 0, sizeof(*pool)); in anv_scratch_pool_init()
902 anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool) in anv_scratch_pool_finish() argument
906 struct anv_scratch_bo *bo = &pool->bos[i][s]; in anv_scratch_pool_finish()
914 anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool, in anv_scratch_pool_alloc() argument
923 struct anv_scratch_bo *bo = &pool->bos[scratch_size_log2][stage]; in anv_scratch_pool_alloc()