Lines Matching refs:pool
95 void (*flush_function)(struct ib_fmr_pool *pool,
115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, in ib_fmr_cache_lookup() argument
123 if (!pool->cache_bucket) in ib_fmr_cache_lookup()
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup()
138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) in ib_fmr_batch_release() argument
145 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
147 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release()
160 list_splice_init(&pool->dirty_list, &unmap_list); in ib_fmr_batch_release()
161 pool->dirty_len = 0; in ib_fmr_batch_release()
163 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release()
173 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
174 list_splice(&unmap_list, &pool->free_list); in ib_fmr_batch_release()
175 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release()
180 struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work); in ib_fmr_cleanup_func() local
182 ib_fmr_batch_release(pool); in ib_fmr_cleanup_func()
183 atomic_inc(&pool->flush_ser); in ib_fmr_cleanup_func()
184 wake_up_interruptible(&pool->force_wait); in ib_fmr_cleanup_func()
186 if (pool->flush_function) in ib_fmr_cleanup_func()
187 pool->flush_function(pool, pool->flush_arg); in ib_fmr_cleanup_func()
189 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) in ib_fmr_cleanup_func()
190 kthread_queue_work(pool->worker, &pool->work); in ib_fmr_cleanup_func()
205 struct ib_fmr_pool *pool; in ib_create_fmr_pool() local
225 pool = kmalloc(sizeof *pool, GFP_KERNEL); in ib_create_fmr_pool()
226 if (!pool) in ib_create_fmr_pool()
229 pool->cache_bucket = NULL; in ib_create_fmr_pool()
230 pool->flush_function = params->flush_function; in ib_create_fmr_pool()
231 pool->flush_arg = params->flush_arg; in ib_create_fmr_pool()
233 INIT_LIST_HEAD(&pool->free_list); in ib_create_fmr_pool()
234 INIT_LIST_HEAD(&pool->dirty_list); in ib_create_fmr_pool()
237 pool->cache_bucket = in ib_create_fmr_pool()
238 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, in ib_create_fmr_pool()
240 if (!pool->cache_bucket) { in ib_create_fmr_pool()
246 INIT_HLIST_HEAD(pool->cache_bucket + i); in ib_create_fmr_pool()
249 pool->pool_size = 0; in ib_create_fmr_pool()
250 pool->max_pages = params->max_pages_per_fmr; in ib_create_fmr_pool()
251 pool->max_remaps = max_remaps; in ib_create_fmr_pool()
252 pool->dirty_watermark = params->dirty_watermark; in ib_create_fmr_pool()
253 pool->dirty_len = 0; in ib_create_fmr_pool()
254 spin_lock_init(&pool->pool_lock); in ib_create_fmr_pool()
255 atomic_set(&pool->req_ser, 0); in ib_create_fmr_pool()
256 atomic_set(&pool->flush_ser, 0); in ib_create_fmr_pool()
257 init_waitqueue_head(&pool->force_wait); in ib_create_fmr_pool()
259 pool->worker = kthread_create_worker(0, "ib_fmr(%s)", device->name); in ib_create_fmr_pool()
260 if (IS_ERR(pool->worker)) { in ib_create_fmr_pool()
262 ret = PTR_ERR(pool->worker); in ib_create_fmr_pool()
265 kthread_init_work(&pool->work, ib_fmr_cleanup_func); in ib_create_fmr_pool()
271 .max_maps = pool->max_remaps, in ib_create_fmr_pool()
276 if (pool->cache_bucket) in ib_create_fmr_pool()
284 fmr->pool = pool; in ib_create_fmr_pool()
297 list_add_tail(&fmr->list, &pool->free_list); in ib_create_fmr_pool()
298 ++pool->pool_size; in ib_create_fmr_pool()
302 return pool; in ib_create_fmr_pool()
305 kfree(pool->cache_bucket); in ib_create_fmr_pool()
306 kfree(pool); in ib_create_fmr_pool()
311 ib_destroy_fmr_pool(pool); in ib_create_fmr_pool()
323 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) in ib_destroy_fmr_pool() argument
330 kthread_destroy_worker(pool->worker); in ib_destroy_fmr_pool()
331 ib_fmr_batch_release(pool); in ib_destroy_fmr_pool()
334 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { in ib_destroy_fmr_pool()
346 if (i < pool->pool_size) in ib_destroy_fmr_pool()
348 pool->pool_size - i); in ib_destroy_fmr_pool()
350 kfree(pool->cache_bucket); in ib_destroy_fmr_pool()
351 kfree(pool); in ib_destroy_fmr_pool()
361 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) in ib_flush_fmr_pool() argument
372 spin_lock_irq(&pool->pool_lock); in ib_flush_fmr_pool()
373 list_for_each_entry_safe(fmr, next, &pool->free_list, list) { in ib_flush_fmr_pool()
375 list_move(&fmr->list, &pool->dirty_list); in ib_flush_fmr_pool()
377 spin_unlock_irq(&pool->pool_lock); in ib_flush_fmr_pool()
379 serial = atomic_inc_return(&pool->req_ser); in ib_flush_fmr_pool()
380 kthread_queue_work(pool->worker, &pool->work); in ib_flush_fmr_pool()
382 if (wait_event_interruptible(pool->force_wait, in ib_flush_fmr_pool()
383 atomic_read(&pool->flush_ser) - serial >= 0)) in ib_flush_fmr_pool()
404 struct ib_fmr_pool *pool = pool_handle; in ib_fmr_pool_map_phys() local
409 if (list_len < 1 || list_len > pool->max_pages) in ib_fmr_pool_map_phys()
412 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
413 fmr = ib_fmr_cache_lookup(pool, in ib_fmr_pool_map_phys()
424 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
429 if (list_empty(&pool->free_list)) { in ib_fmr_pool_map_phys()
430 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
434 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list); in ib_fmr_pool_map_phys()
437 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
443 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
444 list_add(&fmr->list, &pool->free_list); in ib_fmr_pool_map_phys()
445 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
455 if (pool->cache_bucket) { in ib_fmr_pool_map_phys()
460 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
462 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); in ib_fmr_pool_map_phys()
463 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
479 struct ib_fmr_pool *pool; in ib_fmr_pool_unmap() local
482 pool = fmr->pool; in ib_fmr_pool_unmap()
484 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_unmap()
488 if (fmr->remap_count < pool->max_remaps) { in ib_fmr_pool_unmap()
489 list_add_tail(&fmr->list, &pool->free_list); in ib_fmr_pool_unmap()
491 list_add_tail(&fmr->list, &pool->dirty_list); in ib_fmr_pool_unmap()
492 if (++pool->dirty_len >= pool->dirty_watermark) { in ib_fmr_pool_unmap()
493 atomic_inc(&pool->req_ser); in ib_fmr_pool_unmap()
494 kthread_queue_work(pool->worker, &pool->work); in ib_fmr_pool_unmap()
505 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_unmap()