• Home
  • Raw
  • Download

Lines Matching refs:ent

112 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
138 struct mlx5_cache_ent *ent = mr->cache_ent; in create_mkey_callback() local
144 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
145 ent->pending--; in create_mkey_callback()
147 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
158 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
159 list_add_tail(&mr->list, &ent->head); in create_mkey_callback()
160 ent->available_mrs++; in create_mkey_callback()
161 ent->total_mrs++; in create_mkey_callback()
163 queue_adjust_cache_locked(ent); in create_mkey_callback()
164 ent->pending--; in create_mkey_callback()
165 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
168 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc) in alloc_cache_mr() argument
175 mr->order = ent->order; in alloc_cache_mr()
176 mr->cache_ent = ent; in alloc_cache_mr()
177 mr->dev = ent->dev; in alloc_cache_mr()
179 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); in alloc_cache_mr()
182 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); in alloc_cache_mr()
183 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); in alloc_cache_mr()
185 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); in alloc_cache_mr()
186 MLX5_SET(mkc, mkc, log_page_size, ent->page); in alloc_cache_mr()
191 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) in add_keys() argument
206 mr = alloc_cache_mr(ent, mkc); in add_keys()
211 spin_lock_irq(&ent->lock); in add_keys()
212 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
214 spin_unlock_irq(&ent->lock); in add_keys()
218 ent->pending++; in add_keys()
219 spin_unlock_irq(&ent->lock); in add_keys()
220 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, in add_keys()
221 &ent->dev->async_ctx, in, inlen, in add_keys()
225 spin_lock_irq(&ent->lock); in add_keys()
226 ent->pending--; in add_keys()
227 spin_unlock_irq(&ent->lock); in add_keys()
228 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
239 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) in create_cache_mr() argument
252 mr = alloc_cache_mr(ent, mkc); in create_cache_mr()
258 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); in create_cache_mr()
263 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mr()
264 spin_lock_irq(&ent->lock); in create_cache_mr()
265 ent->total_mrs++; in create_cache_mr()
266 spin_unlock_irq(&ent->lock); in create_cache_mr()
276 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) in remove_cache_mr_locked() argument
280 lockdep_assert_held(&ent->lock); in remove_cache_mr_locked()
281 if (list_empty(&ent->head)) in remove_cache_mr_locked()
283 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_cache_mr_locked()
285 ent->available_mrs--; in remove_cache_mr_locked()
286 ent->total_mrs--; in remove_cache_mr_locked()
287 spin_unlock_irq(&ent->lock); in remove_cache_mr_locked()
288 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); in remove_cache_mr_locked()
290 spin_lock_irq(&ent->lock); in remove_cache_mr_locked()
293 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, in resize_available_mrs() argument
298 lockdep_assert_held(&ent->lock); in resize_available_mrs()
302 target = ent->limit * 2; in resize_available_mrs()
303 if (target == ent->available_mrs + ent->pending) in resize_available_mrs()
305 if (target > ent->available_mrs + ent->pending) { in resize_available_mrs()
306 u32 todo = target - (ent->available_mrs + ent->pending); in resize_available_mrs()
308 spin_unlock_irq(&ent->lock); in resize_available_mrs()
309 err = add_keys(ent, todo); in resize_available_mrs()
312 spin_lock_irq(&ent->lock); in resize_available_mrs()
319 remove_cache_mr_locked(ent); in resize_available_mrs()
327 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
340 spin_lock_irq(&ent->lock); in size_write()
341 if (target < ent->total_mrs - ent->available_mrs) { in size_write()
345 target = target - (ent->total_mrs - ent->available_mrs); in size_write()
346 if (target < ent->limit || target > ent->limit*2) { in size_write()
350 err = resize_available_mrs(ent, target, false); in size_write()
353 spin_unlock_irq(&ent->lock); in size_write()
358 spin_unlock_irq(&ent->lock); in size_write()
365 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
369 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); in size_read()
386 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
398 spin_lock_irq(&ent->lock); in limit_write()
399 ent->limit = var; in limit_write()
400 err = resize_available_mrs(ent, 0, true); in limit_write()
401 spin_unlock_irq(&ent->lock); in limit_write()
410 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
414 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
433 struct mlx5_cache_ent *ent = &cache->ent[i]; in someone_adding() local
436 spin_lock_irq(&ent->lock); in someone_adding()
437 ret = ent->available_mrs < ent->limit; in someone_adding()
438 spin_unlock_irq(&ent->lock); in someone_adding()
450 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) in queue_adjust_cache_locked() argument
452 lockdep_assert_held(&ent->lock); in queue_adjust_cache_locked()
454 if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) in queue_adjust_cache_locked()
456 if (ent->available_mrs < ent->limit) { in queue_adjust_cache_locked()
457 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
458 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
459 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
460 ent->available_mrs + ent->pending < 2 * ent->limit) { in queue_adjust_cache_locked()
465 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
466 } else if (ent->available_mrs == 2 * ent->limit) { in queue_adjust_cache_locked()
467 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
468 } else if (ent->available_mrs > 2 * ent->limit) { in queue_adjust_cache_locked()
470 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
471 if (ent->pending) in queue_adjust_cache_locked()
472 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
475 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
479 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
481 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
485 spin_lock_irq(&ent->lock); in __cache_work_func()
486 if (ent->disabled) in __cache_work_func()
489 if (ent->fill_to_high_water && in __cache_work_func()
490 ent->available_mrs + ent->pending < 2 * ent->limit && in __cache_work_func()
492 spin_unlock_irq(&ent->lock); in __cache_work_func()
493 err = add_keys(ent, 1); in __cache_work_func()
494 spin_lock_irq(&ent->lock); in __cache_work_func()
495 if (ent->disabled) in __cache_work_func()
507 ent->order, err); in __cache_work_func()
508 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
512 } else if (ent->available_mrs > 2 * ent->limit) { in __cache_work_func()
527 spin_unlock_irq(&ent->lock); in __cache_work_func()
531 spin_lock_irq(&ent->lock); in __cache_work_func()
532 if (ent->disabled) in __cache_work_func()
535 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
538 remove_cache_mr_locked(ent); in __cache_work_func()
539 queue_adjust_cache_locked(ent); in __cache_work_func()
542 spin_unlock_irq(&ent->lock); in __cache_work_func()
547 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
549 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
550 __cache_work_func(ent); in delayed_cache_work_func()
555 struct mlx5_cache_ent *ent; in cache_work_func() local
557 ent = container_of(work, struct mlx5_cache_ent, work); in cache_work_func()
558 __cache_work_func(ent); in cache_work_func()
566 struct mlx5_cache_ent *ent; in mlx5_mr_cache_alloc() local
570 entry >= ARRAY_SIZE(cache->ent))) in mlx5_mr_cache_alloc()
577 ent = &cache->ent[entry]; in mlx5_mr_cache_alloc()
578 spin_lock_irq(&ent->lock); in mlx5_mr_cache_alloc()
579 if (list_empty(&ent->head)) { in mlx5_mr_cache_alloc()
580 queue_adjust_cache_locked(ent); in mlx5_mr_cache_alloc()
581 ent->miss++; in mlx5_mr_cache_alloc()
582 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
583 mr = create_cache_mr(ent); in mlx5_mr_cache_alloc()
587 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in mlx5_mr_cache_alloc()
589 ent->available_mrs--; in mlx5_mr_cache_alloc()
590 queue_adjust_cache_locked(ent); in mlx5_mr_cache_alloc()
591 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
602 struct mlx5_cache_ent *ent = req_ent; in get_cache_mr() local
605 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { in get_cache_mr()
606 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, in get_cache_mr()
607 ent - dev->cache.ent); in get_cache_mr()
609 spin_lock_irq(&ent->lock); in get_cache_mr()
610 if (!list_empty(&ent->head)) { in get_cache_mr()
611 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in get_cache_mr()
614 ent->available_mrs--; in get_cache_mr()
615 queue_adjust_cache_locked(ent); in get_cache_mr()
616 spin_unlock_irq(&ent->lock); in get_cache_mr()
619 queue_adjust_cache_locked(ent); in get_cache_mr()
620 spin_unlock_irq(&ent->lock); in get_cache_mr()
631 struct mlx5_cache_ent *ent = mr->cache_ent; in detach_mr_from_cache() local
634 spin_lock_irq(&ent->lock); in detach_mr_from_cache()
635 ent->total_mrs--; in detach_mr_from_cache()
636 spin_unlock_irq(&ent->lock); in detach_mr_from_cache()
641 struct mlx5_cache_ent *ent = mr->cache_ent; in mlx5_mr_cache_free() local
643 if (!ent) in mlx5_mr_cache_free()
653 spin_lock_irq(&ent->lock); in mlx5_mr_cache_free()
654 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
655 ent->available_mrs++; in mlx5_mr_cache_free()
656 queue_adjust_cache_locked(ent); in mlx5_mr_cache_free()
657 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_free()
663 struct mlx5_cache_ent *ent = &cache->ent[c]; in clean_keys() local
668 cancel_delayed_work(&ent->dwork); in clean_keys()
670 spin_lock_irq(&ent->lock); in clean_keys()
671 if (list_empty(&ent->head)) { in clean_keys()
672 spin_unlock_irq(&ent->lock); in clean_keys()
675 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
677 ent->available_mrs--; in clean_keys()
678 ent->total_mrs--; in clean_keys()
679 spin_unlock_irq(&ent->lock); in clean_keys()
701 struct mlx5_cache_ent *ent; in mlx5_mr_cache_debugfs_init() local
711 ent = &cache->ent[i]; in mlx5_mr_cache_debugfs_init()
712 sprintf(ent->name, "%d", ent->order); in mlx5_mr_cache_debugfs_init()
713 dir = debugfs_create_dir(ent->name, cache->root); in mlx5_mr_cache_debugfs_init()
714 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mr_cache_debugfs_init()
715 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mr_cache_debugfs_init()
716 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); in mlx5_mr_cache_debugfs_init()
717 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mr_cache_debugfs_init()
731 struct mlx5_cache_ent *ent; in mlx5_mr_cache_init() local
744 ent = &cache->ent[i]; in mlx5_mr_cache_init()
745 INIT_LIST_HEAD(&ent->head); in mlx5_mr_cache_init()
746 spin_lock_init(&ent->lock); in mlx5_mr_cache_init()
747 ent->order = i + 2; in mlx5_mr_cache_init()
748 ent->dev = dev; in mlx5_mr_cache_init()
749 ent->limit = 0; in mlx5_mr_cache_init()
751 INIT_WORK(&ent->work, cache_work_func); in mlx5_mr_cache_init()
752 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5_mr_cache_init()
755 mlx5_odp_init_mr_cache_entry(ent); in mlx5_mr_cache_init()
759 if (ent->order > mr_cache_max_order(dev)) in mlx5_mr_cache_init()
762 ent->page = PAGE_SHIFT; in mlx5_mr_cache_init()
763 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / in mlx5_mr_cache_init()
765 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; in mlx5_mr_cache_init()
769 ent->limit = dev->mdev->profile->mr_cache[i].limit; in mlx5_mr_cache_init()
771 ent->limit = 0; in mlx5_mr_cache_init()
772 spin_lock_irq(&ent->lock); in mlx5_mr_cache_init()
773 queue_adjust_cache_locked(ent); in mlx5_mr_cache_init()
774 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_init()
790 struct mlx5_cache_ent *ent = &dev->cache.ent[i]; in mlx5_mr_cache_cleanup() local
792 spin_lock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
793 ent->disabled = true; in mlx5_mr_cache_cleanup()
794 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
795 cancel_work_sync(&ent->work); in mlx5_mr_cache_cleanup()
796 cancel_delayed_work_sync(&ent->dwork); in mlx5_mr_cache_cleanup()
974 if (order < cache->ent[0].order) in mr_cache_ent_from_order()
975 return &cache->ent[0]; in mr_cache_ent_from_order()
976 order = order - cache->ent[0].order; in mr_cache_ent_from_order()
979 return &cache->ent[order]; in mr_cache_ent_from_order()
988 struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order); in alloc_mr_from_cache() local
991 if (!ent) in alloc_mr_from_cache()
998 mr = get_cache_mr(ent); in alloc_mr_from_cache()
1000 mr = create_cache_mr(ent); in alloc_mr_from_cache()