• Home
  • Raw
  • Download

Lines Matching full:ent

76 	if (order < cache->ent[0].order)  in order2idx()
79 return order - cache->ent[0].order; in order2idx()
122 struct mlx5_cache_ent *ent = &cache->ent[c]; in reg_mr_callback() local
128 spin_lock_irqsave(&ent->lock, flags); in reg_mr_callback()
129 ent->pending--; in reg_mr_callback()
130 spin_unlock_irqrestore(&ent->lock, flags); in reg_mr_callback()
147 spin_lock_irqsave(&ent->lock, flags); in reg_mr_callback()
148 list_add_tail(&mr->list, &ent->head); in reg_mr_callback()
149 ent->cur++; in reg_mr_callback()
150 ent->size++; in reg_mr_callback()
151 spin_unlock_irqrestore(&ent->lock, flags); in reg_mr_callback()
160 if (!completion_done(&ent->compl)) in reg_mr_callback()
161 complete(&ent->compl); in reg_mr_callback()
167 struct mlx5_cache_ent *ent = &cache->ent[c]; in add_keys() local
181 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
191 mr->order = ent->order; in add_keys()
197 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); in add_keys()
199 (ent->access_mode >> 2) & 0x7); in add_keys()
202 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); in add_keys()
203 MLX5_SET(mkc, mkc, log_page_size, ent->page); in add_keys()
205 spin_lock_irq(&ent->lock); in add_keys()
206 ent->pending++; in add_keys()
207 spin_unlock_irq(&ent->lock); in add_keys()
213 spin_lock_irq(&ent->lock); in add_keys()
214 ent->pending--; in add_keys()
215 spin_unlock_irq(&ent->lock); in add_keys()
229 struct mlx5_cache_ent *ent = &cache->ent[c]; in remove_keys() local
236 spin_lock_irq(&ent->lock); in remove_keys()
237 if (list_empty(&ent->head)) { in remove_keys()
238 spin_unlock_irq(&ent->lock); in remove_keys()
241 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_keys()
243 ent->cur--; in remove_keys()
244 ent->size--; in remove_keys()
245 spin_unlock_irq(&ent->lock); in remove_keys()
262 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
263 struct mlx5_ib_dev *dev = ent->dev; in size_write()
273 c = order2idx(dev, ent->order); in size_write()
278 if (var < ent->limit) in size_write()
281 if (var > ent->size) { in size_write()
283 err = add_keys(dev, c, var - ent->size); in size_write()
289 } else if (var < ent->size) { in size_write()
290 remove_keys(dev, c, ent->size - var); in size_write()
299 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
303 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); in size_read()
320 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
321 struct mlx5_ib_dev *dev = ent->dev; in limit_write()
331 c = order2idx(dev, ent->order); in limit_write()
336 if (var > ent->size) in limit_write()
339 ent->limit = var; in limit_write()
341 if (ent->cur < ent->limit) { in limit_write()
342 err = add_keys(dev, c, 2 * ent->limit - ent->cur); in limit_write()
353 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
357 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
376 if (cache->ent[i].cur < cache->ent[i].limit) in someone_adding()
383 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
385 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
387 int i = order2idx(dev, ent->order); in __cache_work_func()
393 ent = &dev->cache.ent[i]; in __cache_work_func()
394 if (ent->cur < 2 * ent->limit && !dev->fill_delay) { in __cache_work_func()
396 if (ent->cur < 2 * ent->limit) { in __cache_work_func()
400 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
405 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
408 queue_work(cache->wq, &ent->work); in __cache_work_func()
411 } else if (ent->cur > 2 * ent->limit) { in __cache_work_func()
427 if (ent->cur > ent->limit) in __cache_work_func()
428 queue_work(cache->wq, &ent->work); in __cache_work_func()
430 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
437 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
439 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
440 __cache_work_func(ent); in delayed_cache_work_func()
445 struct mlx5_cache_ent *ent; in cache_work_func() local
447 ent = container_of(work, struct mlx5_cache_ent, work); in cache_work_func()
448 __cache_work_func(ent); in cache_work_func()
454 struct mlx5_cache_ent *ent; in mlx5_mr_cache_alloc() local
463 ent = &cache->ent[entry]; in mlx5_mr_cache_alloc()
465 spin_lock_irq(&ent->lock); in mlx5_mr_cache_alloc()
466 if (list_empty(&ent->head)) { in mlx5_mr_cache_alloc()
467 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
473 wait_for_completion(&ent->compl); in mlx5_mr_cache_alloc()
475 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in mlx5_mr_cache_alloc()
478 ent->cur--; in mlx5_mr_cache_alloc()
479 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
480 if (ent->cur < ent->limit) in mlx5_mr_cache_alloc()
481 queue_work(cache->wq, &ent->work); in mlx5_mr_cache_alloc()
491 struct mlx5_cache_ent *ent; in alloc_cached_mr() local
504 ent = &cache->ent[i]; in alloc_cached_mr()
506 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); in alloc_cached_mr()
508 spin_lock_irq(&ent->lock); in alloc_cached_mr()
509 if (!list_empty(&ent->head)) { in alloc_cached_mr()
510 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in alloc_cached_mr()
513 ent->cur--; in alloc_cached_mr()
514 spin_unlock_irq(&ent->lock); in alloc_cached_mr()
515 if (ent->cur < ent->limit) in alloc_cached_mr()
516 queue_work(cache->wq, &ent->work); in alloc_cached_mr()
519 spin_unlock_irq(&ent->lock); in alloc_cached_mr()
521 queue_work(cache->wq, &ent->work); in alloc_cached_mr()
525 cache->ent[c].miss++; in alloc_cached_mr()
533 struct mlx5_cache_ent *ent; in mlx5_mr_cache_free() local
546 ent = &cache->ent[c]; in mlx5_mr_cache_free()
547 if (ent->cur < ent->limit) in mlx5_mr_cache_free()
548 queue_work(cache->wq, &ent->work); in mlx5_mr_cache_free()
552 ent = &cache->ent[c]; in mlx5_mr_cache_free()
553 spin_lock_irq(&ent->lock); in mlx5_mr_cache_free()
554 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
555 ent->cur++; in mlx5_mr_cache_free()
556 if (ent->cur > 2 * ent->limit) in mlx5_mr_cache_free()
558 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_free()
561 queue_work(cache->wq, &ent->work); in mlx5_mr_cache_free()
567 struct mlx5_cache_ent *ent = &cache->ent[c]; in clean_keys() local
572 cancel_delayed_work(&ent->dwork); in clean_keys()
574 spin_lock_irq(&ent->lock); in clean_keys()
575 if (list_empty(&ent->head)) { in clean_keys()
576 spin_unlock_irq(&ent->lock); in clean_keys()
579 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
581 ent->cur--; in clean_keys()
582 ent->size--; in clean_keys()
583 spin_unlock_irq(&ent->lock); in clean_keys()
609 struct mlx5_cache_ent *ent; in mlx5_mr_cache_debugfs_init() local
620 ent = &cache->ent[i]; in mlx5_mr_cache_debugfs_init()
621 sprintf(ent->name, "%d", ent->order); in mlx5_mr_cache_debugfs_init()
622 ent->dir = debugfs_create_dir(ent->name, cache->root); in mlx5_mr_cache_debugfs_init()
623 if (!ent->dir) in mlx5_mr_cache_debugfs_init()
626 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, in mlx5_mr_cache_debugfs_init()
628 if (!ent->fsize) in mlx5_mr_cache_debugfs_init()
631 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, in mlx5_mr_cache_debugfs_init()
633 if (!ent->flimit) in mlx5_mr_cache_debugfs_init()
636 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, in mlx5_mr_cache_debugfs_init()
637 &ent->cur); in mlx5_mr_cache_debugfs_init()
638 if (!ent->fcur) in mlx5_mr_cache_debugfs_init()
641 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, in mlx5_mr_cache_debugfs_init()
642 &ent->miss); in mlx5_mr_cache_debugfs_init()
643 if (!ent->fmiss) in mlx5_mr_cache_debugfs_init()
664 struct mlx5_cache_ent *ent; in mlx5_mr_cache_init() local
677 ent = &cache->ent[i]; in mlx5_mr_cache_init()
678 INIT_LIST_HEAD(&ent->head); in mlx5_mr_cache_init()
679 spin_lock_init(&ent->lock); in mlx5_mr_cache_init()
680 ent->order = i + 2; in mlx5_mr_cache_init()
681 ent->dev = dev; in mlx5_mr_cache_init()
682 ent->limit = 0; in mlx5_mr_cache_init()
684 init_completion(&ent->compl); in mlx5_mr_cache_init()
685 INIT_WORK(&ent->work, cache_work_func); in mlx5_mr_cache_init()
686 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5_mr_cache_init()
689 mlx5_odp_init_mr_cache_entry(ent); in mlx5_mr_cache_init()
693 if (ent->order > mr_cache_max_order(dev)) in mlx5_mr_cache_init()
696 ent->page = PAGE_SHIFT; in mlx5_mr_cache_init()
697 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / in mlx5_mr_cache_init()
699 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; in mlx5_mr_cache_init()
703 ent->limit = dev->mdev->profile->mr_cache[i].limit; in mlx5_mr_cache_init()
705 ent->limit = 0; in mlx5_mr_cache_init()
706 queue_work(cache->wq, &ent->work); in mlx5_mr_cache_init()
724 struct mlx5_cache_ent *ent; in wait_for_async_commands() local
730 ent = &cache->ent[i]; in wait_for_async_commands()
732 if (!ent->pending) in wait_for_async_commands()
738 ent = &cache->ent[i]; in wait_for_async_commands()
739 total += ent->pending; in wait_for_async_commands()