Lines Matching refs:ent
125 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
143 struct mlx5_cache_ent *ent = mr->cache_ent; in create_mkey_callback() local
144 struct mlx5_ib_dev *dev = ent->dev; in create_mkey_callback()
150 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
151 ent->pending--; in create_mkey_callback()
153 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
165 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
166 list_add_tail(&mr->list, &ent->head); in create_mkey_callback()
167 ent->available_mrs++; in create_mkey_callback()
168 ent->total_mrs++; in create_mkey_callback()
170 queue_adjust_cache_locked(ent); in create_mkey_callback()
171 ent->pending--; in create_mkey_callback()
172 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
175 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc) in alloc_cache_mr() argument
182 mr->cache_ent = ent; in alloc_cache_mr()
184 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); in alloc_cache_mr()
187 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); in alloc_cache_mr()
188 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); in alloc_cache_mr()
190 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); in alloc_cache_mr()
191 MLX5_SET(mkc, mkc, log_page_size, ent->page); in alloc_cache_mr()
196 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) in add_keys() argument
211 mr = alloc_cache_mr(ent, mkc); in add_keys()
216 spin_lock_irq(&ent->lock); in add_keys()
217 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
219 spin_unlock_irq(&ent->lock); in add_keys()
223 ent->pending++; in add_keys()
224 spin_unlock_irq(&ent->lock); in add_keys()
225 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, in add_keys()
226 &ent->dev->async_ctx, in, inlen, in add_keys()
230 spin_lock_irq(&ent->lock); in add_keys()
231 ent->pending--; in add_keys()
232 spin_unlock_irq(&ent->lock); in add_keys()
233 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
244 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) in create_cache_mr() argument
257 mr = alloc_cache_mr(ent, mkc); in create_cache_mr()
263 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); in create_cache_mr()
268 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mr()
269 spin_lock_irq(&ent->lock); in create_cache_mr()
270 ent->total_mrs++; in create_cache_mr()
271 spin_unlock_irq(&ent->lock); in create_cache_mr()
281 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) in remove_cache_mr_locked() argument
285 lockdep_assert_held(&ent->lock); in remove_cache_mr_locked()
286 if (list_empty(&ent->head)) in remove_cache_mr_locked()
288 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_cache_mr_locked()
290 ent->available_mrs--; in remove_cache_mr_locked()
291 ent->total_mrs--; in remove_cache_mr_locked()
292 spin_unlock_irq(&ent->lock); in remove_cache_mr_locked()
293 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); in remove_cache_mr_locked()
295 spin_lock_irq(&ent->lock); in remove_cache_mr_locked()
298 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, in resize_available_mrs() argument
303 lockdep_assert_held(&ent->lock); in resize_available_mrs()
307 target = ent->limit * 2; in resize_available_mrs()
308 if (target == ent->available_mrs + ent->pending) in resize_available_mrs()
310 if (target > ent->available_mrs + ent->pending) { in resize_available_mrs()
311 u32 todo = target - (ent->available_mrs + ent->pending); in resize_available_mrs()
313 spin_unlock_irq(&ent->lock); in resize_available_mrs()
314 err = add_keys(ent, todo); in resize_available_mrs()
317 spin_lock_irq(&ent->lock); in resize_available_mrs()
324 remove_cache_mr_locked(ent); in resize_available_mrs()
332 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
345 spin_lock_irq(&ent->lock); in size_write()
346 if (target < ent->total_mrs - ent->available_mrs) { in size_write()
350 target = target - (ent->total_mrs - ent->available_mrs); in size_write()
351 if (target < ent->limit || target > ent->limit*2) { in size_write()
355 err = resize_available_mrs(ent, target, false); in size_write()
358 spin_unlock_irq(&ent->lock); in size_write()
363 spin_unlock_irq(&ent->lock); in size_write()
370 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
374 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); in size_read()
391 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
403 spin_lock_irq(&ent->lock); in limit_write()
404 ent->limit = var; in limit_write()
405 err = resize_available_mrs(ent, 0, true); in limit_write()
406 spin_unlock_irq(&ent->lock); in limit_write()
415 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
419 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
438 struct mlx5_cache_ent *ent = &cache->ent[i]; in someone_adding() local
441 spin_lock_irq(&ent->lock); in someone_adding()
442 ret = ent->available_mrs < ent->limit; in someone_adding()
443 spin_unlock_irq(&ent->lock); in someone_adding()
455 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) in queue_adjust_cache_locked() argument
457 lockdep_assert_held(&ent->lock); in queue_adjust_cache_locked()
459 if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) in queue_adjust_cache_locked()
461 if (ent->available_mrs < ent->limit) { in queue_adjust_cache_locked()
462 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
463 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
464 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
465 ent->available_mrs + ent->pending < 2 * ent->limit) { in queue_adjust_cache_locked()
470 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
471 } else if (ent->available_mrs == 2 * ent->limit) { in queue_adjust_cache_locked()
472 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
473 } else if (ent->available_mrs > 2 * ent->limit) { in queue_adjust_cache_locked()
475 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
476 if (ent->pending) in queue_adjust_cache_locked()
477 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
480 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
484 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
486 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
490 spin_lock_irq(&ent->lock); in __cache_work_func()
491 if (ent->disabled) in __cache_work_func()
494 if (ent->fill_to_high_water && in __cache_work_func()
495 ent->available_mrs + ent->pending < 2 * ent->limit && in __cache_work_func()
497 spin_unlock_irq(&ent->lock); in __cache_work_func()
498 err = add_keys(ent, 1); in __cache_work_func()
499 spin_lock_irq(&ent->lock); in __cache_work_func()
500 if (ent->disabled) in __cache_work_func()
512 ent->order, err); in __cache_work_func()
513 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
517 } else if (ent->available_mrs > 2 * ent->limit) { in __cache_work_func()
532 spin_unlock_irq(&ent->lock); in __cache_work_func()
536 spin_lock_irq(&ent->lock); in __cache_work_func()
537 if (ent->disabled) in __cache_work_func()
540 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
543 remove_cache_mr_locked(ent); in __cache_work_func()
544 queue_adjust_cache_locked(ent); in __cache_work_func()
547 spin_unlock_irq(&ent->lock); in __cache_work_func()
552 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
554 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
555 __cache_work_func(ent); in delayed_cache_work_func()
560 struct mlx5_cache_ent *ent; in cache_work_func() local
562 ent = container_of(work, struct mlx5_cache_ent, work); in cache_work_func()
563 __cache_work_func(ent); in cache_work_func()
571 struct mlx5_cache_ent *ent; in mlx5_mr_cache_alloc() local
575 entry >= ARRAY_SIZE(cache->ent))) in mlx5_mr_cache_alloc()
582 ent = &cache->ent[entry]; in mlx5_mr_cache_alloc()
583 spin_lock_irq(&ent->lock); in mlx5_mr_cache_alloc()
584 if (list_empty(&ent->head)) { in mlx5_mr_cache_alloc()
585 queue_adjust_cache_locked(ent); in mlx5_mr_cache_alloc()
586 ent->miss++; in mlx5_mr_cache_alloc()
587 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
588 mr = create_cache_mr(ent); in mlx5_mr_cache_alloc()
592 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in mlx5_mr_cache_alloc()
594 ent->available_mrs--; in mlx5_mr_cache_alloc()
595 queue_adjust_cache_locked(ent); in mlx5_mr_cache_alloc()
596 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
609 struct mlx5_cache_ent *ent = req_ent; in get_cache_mr() local
612 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { in get_cache_mr()
613 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, in get_cache_mr()
614 ent - dev->cache.ent); in get_cache_mr()
616 spin_lock_irq(&ent->lock); in get_cache_mr()
617 if (!list_empty(&ent->head)) { in get_cache_mr()
618 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in get_cache_mr()
621 ent->available_mrs--; in get_cache_mr()
622 queue_adjust_cache_locked(ent); in get_cache_mr()
623 spin_unlock_irq(&ent->lock); in get_cache_mr()
627 queue_adjust_cache_locked(ent); in get_cache_mr()
628 spin_unlock_irq(&ent->lock); in get_cache_mr()
636 struct mlx5_cache_ent *ent = mr->cache_ent; in mlx5_mr_cache_free() local
639 spin_lock_irq(&ent->lock); in mlx5_mr_cache_free()
640 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
641 ent->available_mrs++; in mlx5_mr_cache_free()
642 queue_adjust_cache_locked(ent); in mlx5_mr_cache_free()
643 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_free()
649 struct mlx5_cache_ent *ent = &cache->ent[c]; in clean_keys() local
654 cancel_delayed_work(&ent->dwork); in clean_keys()
656 spin_lock_irq(&ent->lock); in clean_keys()
657 if (list_empty(&ent->head)) { in clean_keys()
658 spin_unlock_irq(&ent->lock); in clean_keys()
661 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
663 ent->available_mrs--; in clean_keys()
664 ent->total_mrs--; in clean_keys()
665 spin_unlock_irq(&ent->lock); in clean_keys()
687 struct mlx5_cache_ent *ent; in mlx5_mr_cache_debugfs_init() local
697 ent = &cache->ent[i]; in mlx5_mr_cache_debugfs_init()
698 sprintf(ent->name, "%d", ent->order); in mlx5_mr_cache_debugfs_init()
699 dir = debugfs_create_dir(ent->name, cache->root); in mlx5_mr_cache_debugfs_init()
700 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mr_cache_debugfs_init()
701 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mr_cache_debugfs_init()
702 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); in mlx5_mr_cache_debugfs_init()
703 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mr_cache_debugfs_init()
717 struct mlx5_cache_ent *ent; in mlx5_mr_cache_init() local
730 ent = &cache->ent[i]; in mlx5_mr_cache_init()
731 INIT_LIST_HEAD(&ent->head); in mlx5_mr_cache_init()
732 spin_lock_init(&ent->lock); in mlx5_mr_cache_init()
733 ent->order = i + 2; in mlx5_mr_cache_init()
734 ent->dev = dev; in mlx5_mr_cache_init()
735 ent->limit = 0; in mlx5_mr_cache_init()
737 INIT_WORK(&ent->work, cache_work_func); in mlx5_mr_cache_init()
738 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5_mr_cache_init()
741 mlx5_odp_init_mr_cache_entry(ent); in mlx5_mr_cache_init()
745 if (ent->order > mr_cache_max_order(dev)) in mlx5_mr_cache_init()
748 ent->page = PAGE_SHIFT; in mlx5_mr_cache_init()
749 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / in mlx5_mr_cache_init()
751 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; in mlx5_mr_cache_init()
755 ent->limit = dev->mdev->profile.mr_cache[i].limit; in mlx5_mr_cache_init()
757 ent->limit = 0; in mlx5_mr_cache_init()
758 spin_lock_irq(&ent->lock); in mlx5_mr_cache_init()
759 queue_adjust_cache_locked(ent); in mlx5_mr_cache_init()
760 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_init()
776 struct mlx5_cache_ent *ent = &dev->cache.ent[i]; in mlx5_mr_cache_cleanup() local
778 spin_lock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
779 ent->disabled = true; in mlx5_mr_cache_cleanup()
780 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
781 cancel_work_sync(&ent->work); in mlx5_mr_cache_cleanup()
782 cancel_delayed_work_sync(&ent->dwork); in mlx5_mr_cache_cleanup()
910 if (order < cache->ent[0].order) in mr_cache_ent_from_order()
911 return &cache->ent[0]; in mr_cache_ent_from_order()
912 order = order - cache->ent[0].order; in mr_cache_ent_from_order()
915 return &cache->ent[order]; in mr_cache_ent_from_order()
944 struct mlx5_cache_ent *ent; in alloc_cacheable_mr() local
955 ent = mr_cache_ent_from_order( in alloc_cacheable_mr()
961 if (!ent || ent->limit == 0 || in alloc_cacheable_mr()
969 mr = get_cache_mr(ent); in alloc_cacheable_mr()
971 mr = create_cache_mr(ent); in alloc_cacheable_mr()