Lines Matching refs:pool
95 void (*flush_function)(struct ib_fmr_pool *pool,
114 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, in ib_fmr_cache_lookup() argument
122 if (!pool->cache_bucket) in ib_fmr_cache_lookup()
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup()
137 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) in ib_fmr_batch_release() argument
144 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
146 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release()
159 list_splice_init(&pool->dirty_list, &unmap_list); in ib_fmr_batch_release()
160 pool->dirty_len = 0; in ib_fmr_batch_release()
162 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release()
172 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
173 list_splice(&unmap_list, &pool->free_list); in ib_fmr_batch_release()
174 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release()
179 struct ib_fmr_pool *pool = pool_ptr; in ib_fmr_cleanup_thread() local
182 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { in ib_fmr_cleanup_thread()
183 ib_fmr_batch_release(pool); in ib_fmr_cleanup_thread()
185 atomic_inc(&pool->flush_ser); in ib_fmr_cleanup_thread()
186 wake_up_interruptible(&pool->force_wait); in ib_fmr_cleanup_thread()
188 if (pool->flush_function) in ib_fmr_cleanup_thread()
189 pool->flush_function(pool, pool->flush_arg); in ib_fmr_cleanup_thread()
193 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && in ib_fmr_cleanup_thread()
214 struct ib_fmr_pool *pool; in ib_create_fmr_pool() local
234 pool = kmalloc(sizeof *pool, GFP_KERNEL); in ib_create_fmr_pool()
235 if (!pool) in ib_create_fmr_pool()
238 pool->cache_bucket = NULL; in ib_create_fmr_pool()
239 pool->flush_function = params->flush_function; in ib_create_fmr_pool()
240 pool->flush_arg = params->flush_arg; in ib_create_fmr_pool()
242 INIT_LIST_HEAD(&pool->free_list); in ib_create_fmr_pool()
243 INIT_LIST_HEAD(&pool->dirty_list); in ib_create_fmr_pool()
246 pool->cache_bucket = in ib_create_fmr_pool()
247 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, in ib_create_fmr_pool()
249 if (!pool->cache_bucket) { in ib_create_fmr_pool()
256 INIT_HLIST_HEAD(pool->cache_bucket + i); in ib_create_fmr_pool()
259 pool->pool_size = 0; in ib_create_fmr_pool()
260 pool->max_pages = params->max_pages_per_fmr; in ib_create_fmr_pool()
261 pool->max_remaps = max_remaps; in ib_create_fmr_pool()
262 pool->dirty_watermark = params->dirty_watermark; in ib_create_fmr_pool()
263 pool->dirty_len = 0; in ib_create_fmr_pool()
264 spin_lock_init(&pool->pool_lock); in ib_create_fmr_pool()
265 atomic_set(&pool->req_ser, 0); in ib_create_fmr_pool()
266 atomic_set(&pool->flush_ser, 0); in ib_create_fmr_pool()
267 init_waitqueue_head(&pool->force_wait); in ib_create_fmr_pool()
269 pool->thread = kthread_run(ib_fmr_cleanup_thread, in ib_create_fmr_pool()
270 pool, in ib_create_fmr_pool()
273 if (IS_ERR(pool->thread)) { in ib_create_fmr_pool()
275 ret = PTR_ERR(pool->thread); in ib_create_fmr_pool()
283 .max_maps = pool->max_remaps, in ib_create_fmr_pool()
288 if (pool->cache_bucket) in ib_create_fmr_pool()
296 fmr->pool = pool; in ib_create_fmr_pool()
309 list_add_tail(&fmr->list, &pool->free_list); in ib_create_fmr_pool()
310 ++pool->pool_size; in ib_create_fmr_pool()
314 return pool; in ib_create_fmr_pool()
317 kfree(pool->cache_bucket); in ib_create_fmr_pool()
318 kfree(pool); in ib_create_fmr_pool()
323 ib_destroy_fmr_pool(pool); in ib_create_fmr_pool()
335 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) in ib_destroy_fmr_pool() argument
342 kthread_stop(pool->thread); in ib_destroy_fmr_pool()
343 ib_fmr_batch_release(pool); in ib_destroy_fmr_pool()
346 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { in ib_destroy_fmr_pool()
358 if (i < pool->pool_size) in ib_destroy_fmr_pool()
360 pool->pool_size - i); in ib_destroy_fmr_pool()
362 kfree(pool->cache_bucket); in ib_destroy_fmr_pool()
363 kfree(pool); in ib_destroy_fmr_pool()
373 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) in ib_flush_fmr_pool() argument
384 spin_lock_irq(&pool->pool_lock); in ib_flush_fmr_pool()
385 list_for_each_entry_safe(fmr, next, &pool->free_list, list) { in ib_flush_fmr_pool()
387 list_move(&fmr->list, &pool->dirty_list); in ib_flush_fmr_pool()
389 spin_unlock_irq(&pool->pool_lock); in ib_flush_fmr_pool()
391 serial = atomic_inc_return(&pool->req_ser); in ib_flush_fmr_pool()
392 wake_up_process(pool->thread); in ib_flush_fmr_pool()
394 if (wait_event_interruptible(pool->force_wait, in ib_flush_fmr_pool()
395 atomic_read(&pool->flush_ser) - serial >= 0)) in ib_flush_fmr_pool()
416 struct ib_fmr_pool *pool = pool_handle; in ib_fmr_pool_map_phys() local
421 if (list_len < 1 || list_len > pool->max_pages) in ib_fmr_pool_map_phys()
424 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
425 fmr = ib_fmr_cache_lookup(pool, in ib_fmr_pool_map_phys()
436 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
441 if (list_empty(&pool->free_list)) { in ib_fmr_pool_map_phys()
442 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
446 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list); in ib_fmr_pool_map_phys()
449 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
455 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
456 list_add(&fmr->list, &pool->free_list); in ib_fmr_pool_map_phys()
457 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
467 if (pool->cache_bucket) { in ib_fmr_pool_map_phys()
472 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
474 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); in ib_fmr_pool_map_phys()
475 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
491 struct ib_fmr_pool *pool; in ib_fmr_pool_unmap() local
494 pool = fmr->pool; in ib_fmr_pool_unmap()
496 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_unmap()
500 if (fmr->remap_count < pool->max_remaps) { in ib_fmr_pool_unmap()
501 list_add_tail(&fmr->list, &pool->free_list); in ib_fmr_pool_unmap()
503 list_add_tail(&fmr->list, &pool->dirty_list); in ib_fmr_pool_unmap()
504 if (++pool->dirty_len >= pool->dirty_watermark) { in ib_fmr_pool_unmap()
505 atomic_inc(&pool->req_ser); in ib_fmr_pool_unmap()
506 wake_up_process(pool->thread); in ib_fmr_pool_unmap()
517 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_unmap()