• Home
  • Raw
  • Download

Lines Matching full:ent

111 	struct mlx5_ib_dev *dev = async_create->ent->dev;  in mlx5_ib_create_mkey_cb()
124 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
146 static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings, in push_mkey_locked() argument
149 XA_STATE(xas, &ent->mkeys, 0); in push_mkey_locked()
153 (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) in push_mkey_locked()
162 xas_set(&xas, ent->reserved); in push_mkey_locked()
165 if (to_store && ent->stored == ent->reserved) in push_mkey_locked()
170 ent->reserved++; in push_mkey_locked()
172 if (ent->stored != ent->reserved) in push_mkey_locked()
173 __xa_store(&ent->mkeys, in push_mkey_locked()
174 ent->stored, in push_mkey_locked()
177 ent->stored++; in push_mkey_locked()
178 queue_adjust_cache_locked(ent); in push_mkey_locked()
179 WRITE_ONCE(ent->dev->cache.last_add, in push_mkey_locked()
184 xa_unlock_irq(&ent->mkeys); in push_mkey_locked()
192 xa_lock_irq(&ent->mkeys); in push_mkey_locked()
194 xa_lock_irq(&ent->mkeys); in push_mkey_locked()
202 static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings, in push_mkey() argument
207 xa_lock_irq(&ent->mkeys); in push_mkey()
208 ret = push_mkey_locked(ent, limit_pendings, to_store); in push_mkey()
209 xa_unlock_irq(&ent->mkeys); in push_mkey()
213 static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent) in undo_push_reserve_mkey() argument
217 ent->reserved--; in undo_push_reserve_mkey()
218 old = __xa_erase(&ent->mkeys, ent->reserved); in undo_push_reserve_mkey()
222 static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey) in push_to_reserved() argument
226 old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0); in push_to_reserved()
228 ent->stored++; in push_to_reserved()
231 static u32 pop_stored_mkey(struct mlx5_cache_ent *ent) in pop_stored_mkey() argument
235 ent->stored--; in pop_stored_mkey()
236 ent->reserved--; in pop_stored_mkey()
238 if (ent->stored == ent->reserved) { in pop_stored_mkey()
239 xa_mkey = __xa_erase(&ent->mkeys, ent->stored); in pop_stored_mkey()
244 xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY, in pop_stored_mkey()
247 old = __xa_erase(&ent->mkeys, ent->reserved); in pop_stored_mkey()
256 struct mlx5_cache_ent *ent = mkey_out->ent; in create_mkey_callback() local
257 struct mlx5_ib_dev *dev = ent->dev; in create_mkey_callback()
263 xa_lock_irqsave(&ent->mkeys, flags); in create_mkey_callback()
264 undo_push_reserve_mkey(ent); in create_mkey_callback()
266 xa_unlock_irqrestore(&ent->mkeys, flags); in create_mkey_callback()
275 xa_lock_irqsave(&ent->mkeys, flags); in create_mkey_callback()
276 push_to_reserved(ent, mkey_out->mkey); in create_mkey_callback()
278 queue_adjust_cache_locked(ent); in create_mkey_callback()
279 xa_unlock_irqrestore(&ent->mkeys, flags); in create_mkey_callback()
302 static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) in set_cache_mkc() argument
304 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, in set_cache_mkc()
305 ent->dev->umrc.pd); in set_cache_mkc()
308 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); in set_cache_mkc()
310 (ent->rb_key.access_mode >> 2) & 0x7); in set_cache_mkc()
313 get_mkc_octo_size(ent->rb_key.access_mode, in set_cache_mkc()
314 ent->rb_key.ndescs)); in set_cache_mkc()
319 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) in add_keys() argument
333 set_cache_mkc(ent, mkc); in add_keys()
334 async_create->ent = ent; in add_keys()
336 err = push_mkey(ent, true, NULL); in add_keys()
342 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
350 xa_lock_irq(&ent->mkeys); in add_keys()
351 undo_push_reserve_mkey(ent); in add_keys()
352 xa_unlock_irq(&ent->mkeys); in add_keys()
359 static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey) in create_cache_mkey() argument
370 set_cache_mkc(ent, mkc); in create_cache_mkey()
372 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen); in create_cache_mkey()
376 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mkey()
382 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) in remove_cache_mr_locked() argument
386 lockdep_assert_held(&ent->mkeys.xa_lock); in remove_cache_mr_locked()
387 if (!ent->stored) in remove_cache_mr_locked()
389 mkey = pop_stored_mkey(ent); in remove_cache_mr_locked()
390 xa_unlock_irq(&ent->mkeys); in remove_cache_mr_locked()
391 mlx5_core_destroy_mkey(ent->dev->mdev, mkey); in remove_cache_mr_locked()
392 xa_lock_irq(&ent->mkeys); in remove_cache_mr_locked()
395 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, in resize_available_mrs() argument
397 __acquires(&ent->mkeys) __releases(&ent->mkeys) in resize_available_mrs()
401 lockdep_assert_held(&ent->mkeys.xa_lock); in resize_available_mrs()
405 target = ent->limit * 2; in resize_available_mrs()
406 if (target == ent->reserved) in resize_available_mrs()
408 if (target > ent->reserved) { in resize_available_mrs()
409 u32 todo = target - ent->reserved; in resize_available_mrs()
411 xa_unlock_irq(&ent->mkeys); in resize_available_mrs()
412 err = add_keys(ent, todo); in resize_available_mrs()
415 xa_lock_irq(&ent->mkeys); in resize_available_mrs()
422 remove_cache_mr_locked(ent); in resize_available_mrs()
430 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
443 xa_lock_irq(&ent->mkeys); in size_write()
444 if (target < ent->in_use) { in size_write()
448 target = target - ent->in_use; in size_write()
449 if (target < ent->limit || target > ent->limit*2) { in size_write()
453 err = resize_available_mrs(ent, target, false); in size_write()
456 xa_unlock_irq(&ent->mkeys); in size_write()
461 xa_unlock_irq(&ent->mkeys); in size_write()
468 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
472 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use); in size_read()
489 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
501 xa_lock_irq(&ent->mkeys); in limit_write()
502 ent->limit = var; in limit_write()
503 err = resize_available_mrs(ent, 0, true); in limit_write()
504 xa_unlock_irq(&ent->mkeys); in limit_write()
513 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
517 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
533 struct mlx5_cache_ent *ent; in someone_adding() local
539 ent = rb_entry(node, struct mlx5_cache_ent, node); in someone_adding()
540 xa_lock_irq(&ent->mkeys); in someone_adding()
541 ret = ent->stored < ent->limit; in someone_adding()
542 xa_unlock_irq(&ent->mkeys); in someone_adding()
557 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) in queue_adjust_cache_locked() argument
559 lockdep_assert_held(&ent->mkeys.xa_lock); in queue_adjust_cache_locked()
561 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) in queue_adjust_cache_locked()
563 if (ent->stored < ent->limit) { in queue_adjust_cache_locked()
564 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
565 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
566 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
567 ent->reserved < 2 * ent->limit) { in queue_adjust_cache_locked()
572 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
573 } else if (ent->stored == 2 * ent->limit) { in queue_adjust_cache_locked()
574 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
575 } else if (ent->stored > 2 * ent->limit) { in queue_adjust_cache_locked()
577 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
578 if (ent->stored != ent->reserved) in queue_adjust_cache_locked()
579 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
582 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
586 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
588 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
592 xa_lock_irq(&ent->mkeys); in __cache_work_func()
593 if (ent->disabled) in __cache_work_func()
596 if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit && in __cache_work_func()
598 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
599 err = add_keys(ent, 1); in __cache_work_func()
600 xa_lock_irq(&ent->mkeys); in __cache_work_func()
601 if (ent->disabled) in __cache_work_func()
614 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
618 } else if (ent->stored > 2 * ent->limit) { in __cache_work_func()
633 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
637 xa_lock_irq(&ent->mkeys); in __cache_work_func()
638 if (ent->disabled) in __cache_work_func()
641 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
644 remove_cache_mr_locked(ent); in __cache_work_func()
645 queue_adjust_cache_locked(ent); in __cache_work_func()
648 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
653 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
655 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
656 __cache_work_func(ent); in delayed_cache_work_func()
685 struct mlx5_cache_ent *ent) in mlx5_cache_ent_insert() argument
695 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key); in mlx5_cache_ent_insert()
707 rb_link_node(&ent->node, parent, new); in mlx5_cache_ent_insert()
708 rb_insert_color(&ent->node, &cache->rb_root); in mlx5_cache_ent_insert()
722 * Find the smallest ent with order >= requested_order. in mkey_cache_ent_from_rb_key()
746 struct mlx5_cache_ent *ent, in _mlx5_mr_cache_alloc() argument
756 xa_lock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
757 ent->in_use++; in _mlx5_mr_cache_alloc()
759 if (!ent->stored) { in _mlx5_mr_cache_alloc()
760 queue_adjust_cache_locked(ent); in _mlx5_mr_cache_alloc()
761 ent->miss++; in _mlx5_mr_cache_alloc()
762 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
763 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
765 xa_lock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
766 ent->in_use--; in _mlx5_mr_cache_alloc()
767 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
772 mr->mmkey.key = pop_stored_mkey(ent); in _mlx5_mr_cache_alloc()
773 queue_adjust_cache_locked(ent); in _mlx5_mr_cache_alloc()
774 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
776 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
815 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key); in mlx5_mr_cache_alloc() local
817 if (!ent) in mlx5_mr_cache_alloc()
820 return _mlx5_mr_cache_alloc(dev, ent, access_flags); in mlx5_mr_cache_alloc()
823 static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) in clean_keys() argument
827 cancel_delayed_work(&ent->dwork); in clean_keys()
828 xa_lock_irq(&ent->mkeys); in clean_keys()
829 while (ent->stored) { in clean_keys()
830 mkey = pop_stored_mkey(ent); in clean_keys()
831 xa_unlock_irq(&ent->mkeys); in clean_keys()
833 xa_lock_irq(&ent->mkeys); in clean_keys()
835 xa_unlock_irq(&ent->mkeys); in clean_keys()
848 struct mlx5_cache_ent *ent) in mlx5_mkey_cache_debugfs_add_ent() argument
850 int order = order_base_2(ent->rb_key.ndescs); in mlx5_mkey_cache_debugfs_add_ent()
856 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) in mlx5_mkey_cache_debugfs_add_ent()
859 sprintf(ent->name, "%d", order); in mlx5_mkey_cache_debugfs_add_ent()
860 dir = debugfs_create_dir(ent->name, dev->cache.fs_root); in mlx5_mkey_cache_debugfs_add_ent()
861 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mkey_cache_debugfs_add_ent()
862 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mkey_cache_debugfs_add_ent()
863 debugfs_create_ulong("cur", 0400, dir, &ent->stored); in mlx5_mkey_cache_debugfs_add_ent()
864 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mkey_cache_debugfs_add_ent()
890 struct mlx5_cache_ent *ent; in mlx5r_cache_create_ent_locked() local
894 ent = kzalloc(sizeof(*ent), GFP_KERNEL); in mlx5r_cache_create_ent_locked()
895 if (!ent) in mlx5r_cache_create_ent_locked()
898 xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); in mlx5r_cache_create_ent_locked()
899 ent->rb_key = rb_key; in mlx5r_cache_create_ent_locked()
900 ent->dev = dev; in mlx5r_cache_create_ent_locked()
901 ent->is_tmp = !persistent_entry; in mlx5r_cache_create_ent_locked()
903 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5r_cache_create_ent_locked()
905 ret = mlx5_cache_ent_insert(&dev->cache, ent); in mlx5r_cache_create_ent_locked()
907 kfree(ent); in mlx5r_cache_create_ent_locked()
920 ent->limit = dev->mdev->profile.mr_cache[order].limit; in mlx5r_cache_create_ent_locked()
922 ent->limit = 0; in mlx5r_cache_create_ent_locked()
924 mlx5_mkey_cache_debugfs_add_ent(dev, ent); in mlx5r_cache_create_ent_locked()
926 mod_delayed_work(ent->dev->cache.wq, in mlx5r_cache_create_ent_locked()
927 &ent->dev->cache.remove_ent_dwork, in mlx5r_cache_create_ent_locked()
931 return ent; in mlx5r_cache_create_ent_locked()
937 struct mlx5_cache_ent *ent; in remove_ent_work_func() local
945 ent = rb_entry(cur, struct mlx5_cache_ent, node); in remove_ent_work_func()
949 xa_lock_irq(&ent->mkeys); in remove_ent_work_func()
950 if (!ent->is_tmp) { in remove_ent_work_func()
951 xa_unlock_irq(&ent->mkeys); in remove_ent_work_func()
955 xa_unlock_irq(&ent->mkeys); in remove_ent_work_func()
957 clean_keys(ent->dev, ent); in remove_ent_work_func()
970 struct mlx5_cache_ent *ent; in mlx5_mkey_cache_init() local
991 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); in mlx5_mkey_cache_init()
992 if (IS_ERR(ent)) { in mlx5_mkey_cache_init()
993 ret = PTR_ERR(ent); in mlx5_mkey_cache_init()
1004 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_init()
1005 xa_lock_irq(&ent->mkeys); in mlx5_mkey_cache_init()
1006 queue_adjust_cache_locked(ent); in mlx5_mkey_cache_init()
1007 xa_unlock_irq(&ent->mkeys); in mlx5_mkey_cache_init()
1022 struct mlx5_cache_ent *ent; in mlx5_mkey_cache_cleanup() local
1031 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_cleanup()
1032 xa_lock_irq(&ent->mkeys); in mlx5_mkey_cache_cleanup()
1033 ent->disabled = true; in mlx5_mkey_cache_cleanup()
1034 xa_unlock_irq(&ent->mkeys); in mlx5_mkey_cache_cleanup()
1035 cancel_delayed_work(&ent->dwork); in mlx5_mkey_cache_cleanup()
1052 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_cleanup()
1054 clean_keys(dev, ent); in mlx5_mkey_cache_cleanup()
1055 rb_erase(&ent->node, root); in mlx5_mkey_cache_cleanup()
1056 kfree(ent); in mlx5_mkey_cache_cleanup()
1159 struct mlx5_cache_ent *ent; in alloc_cacheable_mr() local
1174 ent = mkey_cache_ent_from_rb_key(dev, rb_key); in alloc_cacheable_mr()
1179 if (!ent) { in alloc_cacheable_mr()
1189 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); in alloc_cacheable_mr()
1824 struct mlx5_cache_ent *ent; in cache_ent_find_and_store() local
1834 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1835 if (ent) { in cache_ent_find_and_store()
1836 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1837 if (ent->disabled) { in cache_ent_find_and_store()
1841 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1848 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1850 if (IS_ERR(ent)) in cache_ent_find_and_store()
1851 return PTR_ERR(ent); in cache_ent_find_and_store()
1853 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()