• Home
  • Raw
  • Download

Lines Matching +full:sig +full:- +full:dir

2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
15 * - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
40 #include <linux/dma-buf.h>
41 #include <linux/dma-resv.h>
63 struct mlx5_ib_dev *dev = to_mdev(pd->device); in set_mkc_access_pd_addr_fields()
72 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) in set_mkc_access_pd_addr_fields()
75 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || in set_mkc_access_pd_addr_fields()
76 (MLX5_CAP_GEN(dev->mdev, in set_mkc_access_pd_addr_fields()
78 pcie_relaxed_ordering_enabled(dev->mdev->pdev))) in set_mkc_access_pd_addr_fields()
82 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); in set_mkc_access_pd_addr_fields()
89 u8 key = atomic_inc_return(&dev->mkey_var); in assign_mkey_variant()
102 assign_mkey_variant(dev, &mkey->key, in); in mlx5_ib_create_mkey()
103 ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); in mlx5_ib_create_mkey()
105 init_waitqueue_head(&mkey->wait); in mlx5_ib_create_mkey()
112 struct mlx5_ib_dev *dev = async_create->ent->dev; in mlx5_ib_create_mkey_cb()
116 MLX5_SET(create_mkey_in, async_create->in, opcode, in mlx5_ib_create_mkey_cb()
118 assign_mkey_variant(dev, &async_create->mkey, async_create->in); in mlx5_ib_create_mkey_cb()
119 return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen, in mlx5_ib_create_mkey_cb()
120 async_create->out, outlen, create_mkey_callback, in mlx5_ib_create_mkey_cb()
121 &async_create->cb_work); in mlx5_ib_create_mkey_cb()
129 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
131 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in destroy_mkey()
136 if (status == -ENXIO) /* core driver is not available */ in create_mkey_warn()
140 if (status != -EREMOTEIO) /* driver specific failure */ in create_mkey_warn()
144 mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); in create_mkey_warn()
150 XA_STATE(xas, &ent->mkeys, 0); in push_mkey_locked()
154 (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) in push_mkey_locked()
155 return -EAGAIN; in push_mkey_locked()
163 xas_set(&xas, ent->reserved); in push_mkey_locked()
166 if (to_store && ent->stored == ent->reserved) in push_mkey_locked()
171 ent->reserved++; in push_mkey_locked()
173 if (ent->stored != ent->reserved) in push_mkey_locked()
174 __xa_store(&ent->mkeys, in push_mkey_locked()
175 ent->stored, in push_mkey_locked()
178 ent->stored++; in push_mkey_locked()
180 WRITE_ONCE(ent->dev->cache.last_add, in push_mkey_locked()
185 xa_unlock_irq(&ent->mkeys); in push_mkey_locked()
193 xa_lock_irq(&ent->mkeys); in push_mkey_locked()
195 xa_lock_irq(&ent->mkeys); in push_mkey_locked()
199 return -EINVAL; in push_mkey_locked()
208 xa_lock_irq(&ent->mkeys); in push_mkey()
210 xa_unlock_irq(&ent->mkeys); in push_mkey()
218 ent->reserved--; in undo_push_reserve_mkey()
219 old = __xa_erase(&ent->mkeys, ent->reserved); in undo_push_reserve_mkey()
227 old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0); in push_to_reserved()
229 ent->stored++; in push_to_reserved()
236 ent->stored--; in pop_stored_mkey()
237 ent->reserved--; in pop_stored_mkey()
239 if (ent->stored == ent->reserved) { in pop_stored_mkey()
240 xa_mkey = __xa_erase(&ent->mkeys, ent->stored); in pop_stored_mkey()
245 xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY, in pop_stored_mkey()
248 old = __xa_erase(&ent->mkeys, ent->reserved); in pop_stored_mkey()
257 struct mlx5_cache_ent *ent = mkey_out->ent; in create_mkey_callback()
258 struct mlx5_ib_dev *dev = ent->dev; in create_mkey_callback()
262 create_mkey_warn(dev, status, mkey_out->out); in create_mkey_callback()
264 xa_lock_irqsave(&ent->mkeys, flags); in create_mkey_callback()
266 WRITE_ONCE(dev->fill_delay, 1); in create_mkey_callback()
267 xa_unlock_irqrestore(&ent->mkeys, flags); in create_mkey_callback()
268 mod_timer(&dev->delay_timer, jiffies + HZ); in create_mkey_callback()
272 mkey_out->mkey |= mlx5_idx_to_mkey( in create_mkey_callback()
273 MLX5_GET(create_mkey_out, mkey_out->out, mkey_index)); in create_mkey_callback()
274 WRITE_ONCE(dev->cache.last_add, jiffies); in create_mkey_callback()
276 xa_lock_irqsave(&ent->mkeys, flags); in create_mkey_callback()
277 push_to_reserved(ent, mkey_out->mkey); in create_mkey_callback()
280 xa_unlock_irqrestore(&ent->mkeys, flags); in create_mkey_callback()
305 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, in set_cache_mkc()
306 ent->dev->umrc.pd); in set_cache_mkc()
309 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); in set_cache_mkc()
311 (ent->rb_key.access_mode >> 2) & 0x7); in set_cache_mkc()
312 MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); in set_cache_mkc()
315 get_mkc_octo_size(ent->rb_key.access_mode, in set_cache_mkc()
316 ent->rb_key.ndescs)); in set_cache_mkc()
332 return -ENOMEM; in add_keys()
333 mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in, in add_keys()
336 async_create->ent = ent; in add_keys()
344 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
352 xa_lock_irq(&ent->mkeys); in add_keys()
354 xa_unlock_irq(&ent->mkeys); in add_keys()
370 return -ENOMEM; in create_cache_mkey()
374 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen); in create_cache_mkey()
378 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mkey()
388 lockdep_assert_held(&ent->mkeys.xa_lock); in remove_cache_mr_locked()
389 if (!ent->stored) in remove_cache_mr_locked()
392 xa_unlock_irq(&ent->mkeys); in remove_cache_mr_locked()
393 mlx5_core_destroy_mkey(ent->dev->mdev, mkey); in remove_cache_mr_locked()
394 xa_lock_irq(&ent->mkeys); in remove_cache_mr_locked()
399 __acquires(&ent->mkeys) __releases(&ent->mkeys) in resize_available_mrs()
403 lockdep_assert_held(&ent->mkeys.xa_lock); in resize_available_mrs()
407 target = ent->limit * 2; in resize_available_mrs()
408 if (target == ent->reserved) in resize_available_mrs()
410 if (target > ent->reserved) { in resize_available_mrs()
411 u32 todo = target - ent->reserved; in resize_available_mrs()
413 xa_unlock_irq(&ent->mkeys); in resize_available_mrs()
415 if (err == -EAGAIN) in resize_available_mrs()
417 xa_lock_irq(&ent->mkeys); in resize_available_mrs()
419 if (err != -EAGAIN) in resize_available_mrs()
432 struct mlx5_cache_ent *ent = filp->private_data; in size_write()
445 xa_lock_irq(&ent->mkeys); in size_write()
446 if (target < ent->in_use) { in size_write()
447 err = -EINVAL; in size_write()
450 target = target - ent->in_use; in size_write()
451 if (target < ent->limit || target > ent->limit*2) { in size_write()
452 err = -EINVAL; in size_write()
458 xa_unlock_irq(&ent->mkeys); in size_write()
463 xa_unlock_irq(&ent->mkeys); in size_write()
470 struct mlx5_cache_ent *ent = filp->private_data; in size_read()
474 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use); in size_read()
491 struct mlx5_cache_ent *ent = filp->private_data; in limit_write()
503 xa_lock_irq(&ent->mkeys); in limit_write()
504 ent->limit = var; in limit_write()
506 xa_unlock_irq(&ent->mkeys); in limit_write()
515 struct mlx5_cache_ent *ent = filp->private_data; in limit_read()
519 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
539 mutex_lock(&cache->rb_lock); in someone_adding()
540 for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) { in someone_adding()
542 xa_lock_irq(&ent->mkeys); in someone_adding()
543 ret = ent->stored < ent->limit; in someone_adding()
544 xa_unlock_irq(&ent->mkeys); in someone_adding()
546 mutex_unlock(&cache->rb_lock); in someone_adding()
550 mutex_unlock(&cache->rb_lock); in someone_adding()
561 lockdep_assert_held(&ent->mkeys.xa_lock); in queue_adjust_cache_locked()
563 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) in queue_adjust_cache_locked()
565 if (ent->stored < ent->limit) { in queue_adjust_cache_locked()
566 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
567 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
568 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
569 ent->reserved < 2 * ent->limit) { in queue_adjust_cache_locked()
574 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
575 } else if (ent->stored == 2 * ent->limit) { in queue_adjust_cache_locked()
576 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
577 } else if (ent->stored > 2 * ent->limit) { in queue_adjust_cache_locked()
579 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
580 if (ent->stored != ent->reserved) in queue_adjust_cache_locked()
581 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
584 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
590 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
591 struct mlx5_mkey_cache *cache = &dev->cache; in __cache_work_func()
594 xa_lock_irq(&ent->mkeys); in __cache_work_func()
595 if (ent->disabled) in __cache_work_func()
598 if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit && in __cache_work_func()
599 !READ_ONCE(dev->fill_delay)) { in __cache_work_func()
600 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
602 xa_lock_irq(&ent->mkeys); in __cache_work_func()
603 if (ent->disabled) in __cache_work_func()
611 if (err != -EAGAIN) { in __cache_work_func()
616 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
620 } else if (ent->stored > 2 * ent->limit) { in __cache_work_func()
635 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
638 READ_ONCE(cache->last_add) + 300 * HZ); in __cache_work_func()
639 xa_lock_irq(&ent->mkeys); in __cache_work_func()
640 if (ent->disabled) in __cache_work_func()
643 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
650 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
666 res = key1.ats - key2.ats; in cache_ent_key_cmp()
670 res = key1.access_mode - key2.access_mode; in cache_ent_key_cmp()
674 res = key1.access_flags - key2.access_flags; in cache_ent_key_cmp()
683 return key1.ndescs - key2.ndescs; in cache_ent_key_cmp()
689 struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL; in mlx5_cache_ent_insert()
697 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key); in mlx5_cache_ent_insert()
699 new = &((*new)->rb_left); in mlx5_cache_ent_insert()
701 new = &((*new)->rb_right); in mlx5_cache_ent_insert()
703 return -EEXIST; in mlx5_cache_ent_insert()
707 rb_link_node(&ent->node, parent, new); in mlx5_cache_ent_insert()
708 rb_insert_color(&ent->node, &cache->rb_root); in mlx5_cache_ent_insert()
717 struct rb_node *node = dev->cache.rb_root.rb_node; in mkey_cache_ent_from_rb_key()
727 cmp = cache_ent_key_cmp(cur->rb_key, rb_key); in mkey_cache_ent_from_rb_key()
730 node = node->rb_left; in mkey_cache_ent_from_rb_key()
733 node = node->rb_right; in mkey_cache_ent_from_rb_key()
746 smallest->rb_key.access_mode == rb_key.access_mode && in mkey_cache_ent_from_rb_key()
747 smallest->rb_key.access_flags == rb_key.access_flags && in mkey_cache_ent_from_rb_key()
748 smallest->rb_key.ats == rb_key.ats && in mkey_cache_ent_from_rb_key()
749 smallest->rb_key.ndescs <= ndescs_limit) ? in mkey_cache_ent_from_rb_key()
763 return ERR_PTR(-ENOMEM); in _mlx5_mr_cache_alloc()
765 xa_lock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
766 ent->in_use++; in _mlx5_mr_cache_alloc()
768 if (!ent->stored) { in _mlx5_mr_cache_alloc()
770 ent->miss++; in _mlx5_mr_cache_alloc()
771 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
772 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
774 xa_lock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
775 ent->in_use--; in _mlx5_mr_cache_alloc()
776 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
781 mr->mmkey.key = pop_stored_mkey(ent); in _mlx5_mr_cache_alloc()
783 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
785 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
786 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_mr_cache_alloc()
787 init_waitqueue_head(&mr->mmkey.wait); in _mlx5_mr_cache_alloc()
797 MLX5_CAP_GEN(dev->mdev, atomic) && in get_unchangeable_access_flags()
798 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in get_unchangeable_access_flags()
802 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && in get_unchangeable_access_flags()
803 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in get_unchangeable_access_flags()
807 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || in get_unchangeable_access_flags()
808 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) && in get_unchangeable_access_flags()
809 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in get_unchangeable_access_flags()
827 return ERR_PTR(-EOPNOTSUPP); in mlx5_mr_cache_alloc()
836 cancel_delayed_work(&ent->dwork); in clean_keys()
837 xa_lock_irq(&ent->mkeys); in clean_keys()
838 while (ent->stored) { in clean_keys()
840 xa_unlock_irq(&ent->mkeys); in clean_keys()
841 mlx5_core_destroy_mkey(dev->mdev, mkey); in clean_keys()
842 xa_lock_irq(&ent->mkeys); in clean_keys()
844 xa_unlock_irq(&ent->mkeys); in clean_keys()
849 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mkey_cache_debugfs_cleanup()
852 debugfs_remove_recursive(dev->cache.fs_root); in mlx5_mkey_cache_debugfs_cleanup()
853 dev->cache.fs_root = NULL; in mlx5_mkey_cache_debugfs_cleanup()
859 int order = order_base_2(ent->rb_key.ndescs); in mlx5_mkey_cache_debugfs_add_ent()
860 struct dentry *dir; in mlx5_mkey_cache_debugfs_add_ent() local
862 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mkey_cache_debugfs_add_ent()
865 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) in mlx5_mkey_cache_debugfs_add_ent()
868 sprintf(ent->name, "%d", order); in mlx5_mkey_cache_debugfs_add_ent()
869 dir = debugfs_create_dir(ent->name, dev->cache.fs_root); in mlx5_mkey_cache_debugfs_add_ent()
870 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mkey_cache_debugfs_add_ent()
871 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mkey_cache_debugfs_add_ent()
872 debugfs_create_ulong("cur", 0400, dir, &ent->stored); in mlx5_mkey_cache_debugfs_add_ent()
873 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mkey_cache_debugfs_add_ent()
878 struct dentry *dbg_root = mlx5_debugfs_get_dev_root(dev->mdev); in mlx5_mkey_cache_debugfs_init()
879 struct mlx5_mkey_cache *cache = &dev->cache; in mlx5_mkey_cache_debugfs_init()
881 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mkey_cache_debugfs_init()
884 cache->fs_root = debugfs_create_dir("mr_cache", dbg_root); in mlx5_mkey_cache_debugfs_init()
891 WRITE_ONCE(dev->fill_delay, 0); in delay_time_func()
905 return ERR_PTR(-ENOMEM); in mlx5r_cache_create_ent_locked()
907 xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); in mlx5r_cache_create_ent_locked()
908 ent->rb_key = rb_key; in mlx5r_cache_create_ent_locked()
909 ent->dev = dev; in mlx5r_cache_create_ent_locked()
910 ent->is_tmp = !persistent_entry; in mlx5r_cache_create_ent_locked()
912 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5r_cache_create_ent_locked()
914 ret = mlx5_cache_ent_insert(&dev->cache, ent); in mlx5r_cache_create_ent_locked()
924 order = order_base_2(rb_key.ndescs) - 2; in mlx5r_cache_create_ent_locked()
926 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && in mlx5r_cache_create_ent_locked()
927 !dev->is_rep && mlx5_core_is_pf(dev->mdev) && in mlx5r_cache_create_ent_locked()
929 ent->limit = dev->mdev->profile.mr_cache[order].limit; in mlx5r_cache_create_ent_locked()
931 ent->limit = 0; in mlx5r_cache_create_ent_locked()
935 mod_delayed_work(ent->dev->cache.wq, in mlx5r_cache_create_ent_locked()
936 &ent->dev->cache.remove_ent_dwork, in mlx5r_cache_create_ent_locked()
951 mutex_lock(&cache->rb_lock); in remove_ent_work_func()
952 cur = rb_last(&cache->rb_root); in remove_ent_work_func()
956 mutex_unlock(&cache->rb_lock); in remove_ent_work_func()
958 xa_lock_irq(&ent->mkeys); in remove_ent_work_func()
959 if (!ent->is_tmp) { in remove_ent_work_func()
960 xa_unlock_irq(&ent->mkeys); in remove_ent_work_func()
961 mutex_lock(&cache->rb_lock); in remove_ent_work_func()
964 xa_unlock_irq(&ent->mkeys); in remove_ent_work_func()
966 clean_keys(ent->dev, ent); in remove_ent_work_func()
967 mutex_lock(&cache->rb_lock); in remove_ent_work_func()
969 mutex_unlock(&cache->rb_lock); in remove_ent_work_func()
974 struct mlx5_mkey_cache *cache = &dev->cache; in mlx5_mkey_cache_init()
975 struct rb_root *root = &dev->cache.rb_root; in mlx5_mkey_cache_init()
984 mutex_init(&dev->slow_path_mutex); in mlx5_mkey_cache_init()
985 mutex_init(&dev->cache.rb_lock); in mlx5_mkey_cache_init()
986 dev->cache.rb_root = RB_ROOT; in mlx5_mkey_cache_init()
987 INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func); in mlx5_mkey_cache_init()
988 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); in mlx5_mkey_cache_init()
989 if (!cache->wq) { in mlx5_mkey_cache_init()
991 return -ENOMEM; in mlx5_mkey_cache_init()
994 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); in mlx5_mkey_cache_init()
995 timer_setup(&dev->delay_timer, delay_time_func, 0); in mlx5_mkey_cache_init()
997 mutex_lock(&cache->rb_lock); in mlx5_mkey_cache_init()
1011 mutex_unlock(&cache->rb_lock); in mlx5_mkey_cache_init()
1014 xa_lock_irq(&ent->mkeys); in mlx5_mkey_cache_init()
1016 xa_unlock_irq(&ent->mkeys); in mlx5_mkey_cache_init()
1022 mutex_unlock(&cache->rb_lock); in mlx5_mkey_cache_init()
1030 struct rb_root *root = &dev->cache.rb_root; in mlx5_mkey_cache_cleanup()
1034 if (!dev->cache.wq) in mlx5_mkey_cache_cleanup()
1037 mutex_lock(&dev->cache.rb_lock); in mlx5_mkey_cache_cleanup()
1038 cancel_delayed_work(&dev->cache.remove_ent_dwork); in mlx5_mkey_cache_cleanup()
1041 xa_lock_irq(&ent->mkeys); in mlx5_mkey_cache_cleanup()
1042 ent->disabled = true; in mlx5_mkey_cache_cleanup()
1043 xa_unlock_irq(&ent->mkeys); in mlx5_mkey_cache_cleanup()
1044 cancel_delayed_work(&ent->dwork); in mlx5_mkey_cache_cleanup()
1046 mutex_unlock(&dev->cache.rb_lock); in mlx5_mkey_cache_cleanup()
1052 flush_workqueue(dev->cache.wq); in mlx5_mkey_cache_cleanup()
1055 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); in mlx5_mkey_cache_cleanup()
1058 mutex_lock(&dev->cache.rb_lock); in mlx5_mkey_cache_cleanup()
1064 rb_erase(&ent->node, root); in mlx5_mkey_cache_cleanup()
1067 mutex_unlock(&dev->cache.rb_lock); in mlx5_mkey_cache_cleanup()
1069 destroy_workqueue(dev->cache.wq); in mlx5_mkey_cache_cleanup()
1070 del_timer_sync(&dev->delay_timer); in mlx5_mkey_cache_cleanup()
1075 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dma_mr()
1084 return ERR_PTR(-ENOMEM); in mlx5_ib_get_dma_mr()
1088 err = -ENOMEM; in mlx5_ib_get_dma_mr()
1099 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
1104 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
1105 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1106 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1107 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1109 return &mr->ibmr; in mlx5_ib_get_dma_mr()
1126 offset = addr & (page_size - 1); in get_octo_len()
1133 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) in mkey_cache_max_order()
1141 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
1142 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
1143 mr->ibmr.length = length; in set_mr_fields()
1144 mr->ibmr.device = &dev->ib_dev; in set_mr_fields()
1145 mr->ibmr.iova = iova; in set_mr_fields()
1146 mr->access_flags = access_flags; in set_mr_fields()
1156 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
1167 struct mlx5_ib_dev *dev = to_mdev(pd->device); in alloc_cacheable_mr()
1172 if (umem->is_dmabuf) in alloc_cacheable_mr()
1178 return ERR_PTR(-EINVAL); in alloc_cacheable_mr()
1189 mutex_lock(&dev->slow_path_mutex); in alloc_cacheable_mr()
1191 mutex_unlock(&dev->slow_path_mutex); in alloc_cacheable_mr()
1194 mr->mmkey.rb_key = rb_key; in alloc_cacheable_mr()
1202 mr->ibmr.pd = pd; in alloc_cacheable_mr()
1203 mr->umem = umem; in alloc_cacheable_mr()
1204 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr()
1205 set_mr_fields(dev, mr, umem->length, access_flags, iova); in alloc_cacheable_mr()
1218 struct mlx5_ib_dev *dev = to_mdev(pd->device); in reg_create()
1225 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); in reg_create()
1228 return ERR_PTR(-EINVAL); in reg_create()
1231 return ERR_PTR(-ENOMEM); in reg_create()
1233 mr->ibmr.pd = pd; in reg_create()
1234 mr->access_flags = access_flags; in reg_create()
1235 mr->page_shift = order_base_2(page_size); in reg_create()
1243 err = -ENOMEM; in reg_create()
1249 err = -EINVAL; in reg_create()
1252 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, in reg_create()
1263 populate ? pd : dev->umrc.pd); in reg_create()
1268 MLX5_SET64(mkc, mkc, len, umem->length); in reg_create()
1271 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1272 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create()
1277 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1280 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1285 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1286 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift); in reg_create()
1287 mr->umem = umem; in reg_create()
1288 set_mr_fields(dev, mr, umem->length, access_flags, iova); in reg_create()
1291 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1305 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dm_mr()
1314 return ERR_PTR(-ENOMEM); in mlx5_ib_get_dm_mr()
1318 err = -ENOMEM; in mlx5_ib_get_dm_mr()
1329 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1337 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1358 return -EOPNOTSUPP; in mlx5_ib_advise_mr()
1369 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; in mlx5_ib_reg_dm_mr()
1370 u64 start_addr = mdm->dev_addr + attr->offset; in mlx5_ib_reg_dm_mr()
1373 switch (mdm->type) { in mlx5_ib_reg_dm_mr()
1375 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1376 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1379 start_addr -= pci_resource_start(dev->pdev, 0); in mlx5_ib_reg_dm_mr()
1384 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1385 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1390 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1393 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, in mlx5_ib_reg_dm_mr()
1394 attr->access_flags, mode); in mlx5_ib_reg_dm_mr()
1400 struct mlx5_ib_dev *dev = to_mdev(pd->device); in create_real_mr()
1405 xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length); in create_real_mr()
1412 mutex_lock(&dev->slow_path_mutex); in create_real_mr()
1414 mutex_unlock(&dev->slow_path_mutex); in create_real_mr()
1421 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in create_real_mr()
1423 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); in create_real_mr()
1433 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_real_mr()
1437 return &mr->ibmr; in create_real_mr()
1444 struct mlx5_ib_dev *dev = to_mdev(pd->device); in create_user_odp_mr()
1450 return ERR_PTR(-EOPNOTSUPP); in create_user_odp_mr()
1452 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq); in create_user_odp_mr()
1457 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1458 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) in create_user_odp_mr()
1459 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1464 return &mr->ibmr; in create_user_odp_mr()
1469 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1471 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, in create_user_odp_mr()
1476 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); in create_user_odp_mr()
1478 ib_umem_release(&odp->umem); in create_user_odp_mr()
1481 xa_init(&mr->implicit_children); in create_user_odp_mr()
1483 odp->private = mr; in create_user_odp_mr()
1484 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in create_user_odp_mr()
1491 return &mr->ibmr; in create_user_odp_mr()
1494 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_user_odp_mr()
1502 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr()
1506 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_reg_user_mr()
1514 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); in mlx5_ib_reg_user_mr()
1522 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; in mlx5_ib_dmabuf_invalidate_cb()
1523 struct mlx5_ib_mr *mr = umem_dmabuf->private; in mlx5_ib_dmabuf_invalidate_cb()
1525 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); in mlx5_ib_dmabuf_invalidate_cb()
1527 if (!umem_dmabuf->sgt) in mlx5_ib_dmabuf_invalidate_cb()
1544 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr_dmabuf()
1551 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_reg_user_mr_dmabuf()
1559 return ERR_PTR(-EINVAL); in mlx5_ib_reg_user_mr_dmabuf()
1561 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, in mlx5_ib_reg_user_mr_dmabuf()
1570 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, in mlx5_ib_reg_user_mr_dmabuf()
1573 ib_umem_release(&umem_dmabuf->umem); in mlx5_ib_reg_user_mr_dmabuf()
1577 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in mlx5_ib_reg_user_mr_dmabuf()
1579 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); in mlx5_ib_reg_user_mr_dmabuf()
1580 umem_dmabuf->private = mr; in mlx5_ib_reg_user_mr_dmabuf()
1581 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in mlx5_ib_reg_user_mr_dmabuf()
1588 return &mr->ibmr; in mlx5_ib_reg_user_mr_dmabuf()
1591 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in mlx5_ib_reg_user_mr_dmabuf()
1618 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in can_use_umr_rereg_pas()
1621 if (!mr->mmkey.cache_ent) in can_use_umr_rereg_pas()
1623 if (!mlx5r_umr_can_load_pas(dev, new_umem->length)) in can_use_umr_rereg_pas()
1630 return (mr->mmkey.cache_ent->rb_key.ndescs) >= in can_use_umr_rereg_pas()
1638 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pas()
1640 struct ib_umem *old_umem = mr->umem; in umr_rereg_pas()
1653 mr->ibmr.pd = pd; in umr_rereg_pas()
1657 mr->access_flags = access_flags; in umr_rereg_pas()
1661 mr->ibmr.iova = iova; in umr_rereg_pas()
1662 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1663 mr->page_shift = order_base_2(page_size); in umr_rereg_pas()
1664 mr->umem = new_umem; in umr_rereg_pas()
1671 mr->umem = old_umem; in umr_rereg_pas()
1675 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages); in umr_rereg_pas()
1677 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages); in umr_rereg_pas()
1686 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); in mlx5_ib_rereg_user_mr()
1691 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_rereg_user_mr()
1699 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_rereg_user_mr()
1702 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1704 new_pd = ib_mr->pd; in mlx5_ib_rereg_user_mr()
1710 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1718 /* DM or ODP MR's don't have a normal umem so we can't re-use it */ in mlx5_ib_rereg_user_mr()
1719 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1729 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1730 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1731 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); in mlx5_ib_rereg_user_mr()
1733 return create_real_mr(new_pd, umem, mr->ibmr.iova, in mlx5_ib_rereg_user_mr()
1738 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does in mlx5_ib_rereg_user_mr()
1741 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1745 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()
1749 new_umem = ib_umem_get(&dev->ib_dev, start, length, in mlx5_ib_rereg_user_mr()
1784 struct device *ddev = &dev->mdev->pdev->dev; in mlx5_alloc_priv_descs()
1789 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); in mlx5_alloc_priv_descs()
1793 add_size = min_t(int, end - size, add_size); in mlx5_alloc_priv_descs()
1796 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1797 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1798 return -ENOMEM; in mlx5_alloc_priv_descs()
1800 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1802 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); in mlx5_alloc_priv_descs()
1803 if (dma_mapping_error(ddev, mr->desc_map)) { in mlx5_alloc_priv_descs()
1804 ret = -ENOMEM; in mlx5_alloc_priv_descs()
1810 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1818 if (!mr->umem && mr->descs) { in mlx5_free_priv_descs()
1819 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1820 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1823 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, in mlx5_free_priv_descs()
1825 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1826 mr->descs = NULL; in mlx5_free_priv_descs()
1833 struct mlx5_mkey_cache *cache = &dev->cache; in cache_ent_find_and_store()
1837 if (mr->mmkey.cache_ent) { in cache_ent_find_and_store()
1838 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1839 mr->mmkey.cache_ent->in_use--; in cache_ent_find_and_store()
1843 mutex_lock(&cache->rb_lock); in cache_ent_find_and_store()
1844 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1846 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1847 if (ent->disabled) { in cache_ent_find_and_store()
1848 mutex_unlock(&cache->rb_lock); in cache_ent_find_and_store()
1849 return -EOPNOTSUPP; in cache_ent_find_and_store()
1851 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1852 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1853 mutex_unlock(&cache->rb_lock); in cache_ent_find_and_store()
1858 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1859 mutex_unlock(&cache->rb_lock); in cache_ent_find_and_store()
1863 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1864 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1867 ret = push_mkey_locked(mr->mmkey.cache_ent, false, in cache_ent_find_and_store()
1868 xa_mk_value(mr->mmkey.key)); in cache_ent_find_and_store()
1869 xa_unlock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1876 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); in mlx5_ib_dereg_mr()
1885 refcount_read(&mr->mmkey.usecount) != 0 && in mlx5_ib_dereg_mr()
1886 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) in mlx5_ib_dereg_mr()
1887 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in mlx5_ib_dereg_mr()
1889 if (ibmr->type == IB_MR_TYPE_INTEGRITY) { in mlx5_ib_dereg_mr()
1890 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_ib_dereg_mr()
1891 mr->sig, NULL, GFP_KERNEL); in mlx5_ib_dereg_mr()
1893 if (mr->mtt_mr) { in mlx5_ib_dereg_mr()
1894 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1897 mr->mtt_mr = NULL; in mlx5_ib_dereg_mr()
1899 if (mr->klm_mr) { in mlx5_ib_dereg_mr()
1900 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1903 mr->klm_mr = NULL; in mlx5_ib_dereg_mr()
1906 if (mlx5_core_destroy_psv(dev->mdev, in mlx5_ib_dereg_mr()
1907 mr->sig->psv_memory.psv_idx)) in mlx5_ib_dereg_mr()
1909 mr->sig->psv_memory.psv_idx); in mlx5_ib_dereg_mr()
1910 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_ib_dereg_mr()
1912 mr->sig->psv_wire.psv_idx); in mlx5_ib_dereg_mr()
1913 kfree(mr->sig); in mlx5_ib_dereg_mr()
1914 mr->sig = NULL; in mlx5_ib_dereg_mr()
1918 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) in mlx5_ib_dereg_mr()
1921 mr->mmkey.cache_ent = NULL; in mlx5_ib_dereg_mr()
1923 if (!mr->mmkey.cache_ent) { in mlx5_ib_dereg_mr()
1924 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); in mlx5_ib_dereg_mr()
1929 if (mr->umem) { in mlx5_ib_dereg_mr()
1933 atomic_sub(ib_umem_num_pages(mr->umem), in mlx5_ib_dereg_mr()
1934 &dev->mdev->priv.reg_pages); in mlx5_ib_dereg_mr()
1935 ib_umem_release(mr->umem); in mlx5_ib_dereg_mr()
1940 if (!mr->mmkey.cache_ent) in mlx5_ib_dereg_mr()
1968 struct mlx5_ib_dev *dev = to_mdev(pd->device); in _mlx5_alloc_mkey_descs()
1971 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
1972 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
1973 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
1975 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
1981 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
1985 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
1986 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1987 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2009 return ERR_PTR(-ENOMEM); in mlx5_ib_alloc_pi_mr()
2011 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
2012 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
2016 err = -ENOMEM; in mlx5_ib_alloc_pi_mr()
2028 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
2059 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_alloc_integrity_descs()
2064 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
2065 if (!mr->sig) in mlx5_alloc_integrity_descs()
2066 return -ENOMEM; in mlx5_alloc_integrity_descs()
2069 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); in mlx5_alloc_integrity_descs()
2073 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
2074 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
2076 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
2077 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
2079 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
2080 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2083 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
2084 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
2087 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2090 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
2091 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
2105 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
2106 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
2115 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2116 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
2118 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2119 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
2121 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
2123 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
2124 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
2126 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
2128 kfree(mr->sig); in mlx5_alloc_integrity_descs()
2137 struct mlx5_ib_dev *dev = to_mdev(pd->device); in __mlx5_ib_alloc_mr()
2146 return ERR_PTR(-ENOMEM); in __mlx5_ib_alloc_mr()
2150 err = -ENOMEM; in __mlx5_ib_alloc_mr()
2154 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
2155 mr->umem = NULL; in __mlx5_ib_alloc_mr()
2170 err = -EINVAL; in __mlx5_ib_alloc_mr()
2178 return &mr->ibmr; in __mlx5_ib_alloc_mr()
2202 struct mlx5_ib_dev *dev = to_mdev(ibmw->device); in mlx5_ib_alloc_mw()
2215 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); in mlx5_ib_alloc_mw()
2220 return -EOPNOTSUPP; in mlx5_ib_alloc_mw()
2222 if (udata->inlen > sizeof(req) && in mlx5_ib_alloc_mw()
2224 udata->inlen - sizeof(req))) in mlx5_ib_alloc_mw()
2225 return -EOPNOTSUPP; in mlx5_ib_alloc_mw()
2231 return -ENOMEM; in mlx5_ib_alloc_mw()
2237 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); in mlx5_ib_alloc_mw()
2241 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); in mlx5_ib_alloc_mw()
2244 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); in mlx5_ib_alloc_mw()
2248 mw->mmkey.type = MLX5_MKEY_MW; in mlx5_ib_alloc_mw()
2249 ibmw->rkey = mw->mmkey.key; in mlx5_ib_alloc_mw()
2250 mw->mmkey.ndescs = ndescs; in mlx5_ib_alloc_mw()
2253 min(offsetofend(typeof(resp), response_length), udata->outlen); in mlx5_ib_alloc_mw()
2261 err = mlx5r_store_odp_mkey(dev, &mw->mmkey); in mlx5_ib_alloc_mw()
2270 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); in mlx5_ib_alloc_mw()
2278 struct mlx5_ib_dev *dev = to_mdev(mw->device); in mlx5_ib_dealloc_mw()
2282 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key))) in mlx5_ib_dealloc_mw()
2287 mlx5r_deref_wait_odp_mkey(&mmw->mmkey); in mlx5_ib_dealloc_mw()
2289 return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); in mlx5_ib_dealloc_mw()
2300 ret = -EINVAL; in mlx5_ib_check_mr_status()
2304 mr_status->fail_status = 0; in mlx5_ib_check_mr_status()
2306 if (!mmr->sig) { in mlx5_ib_check_mr_status()
2307 ret = -EINVAL; in mlx5_ib_check_mr_status()
2308 pr_err("signature status check requested on a non-signature enabled MR\n"); in mlx5_ib_check_mr_status()
2312 mmr->sig->sig_status_checked = true; in mlx5_ib_check_mr_status()
2313 if (!mmr->sig->sig_err_exists) in mlx5_ib_check_mr_status()
2316 if (ibmr->lkey == mmr->sig->err_item.key) in mlx5_ib_check_mr_status()
2317 memcpy(&mr_status->sig_err, &mmr->sig->err_item, in mlx5_ib_check_mr_status()
2318 sizeof(mr_status->sig_err)); in mlx5_ib_check_mr_status()
2320 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; in mlx5_ib_check_mr_status()
2321 mr_status->sig_err.sig_err_offset = 0; in mlx5_ib_check_mr_status()
2322 mr_status->sig_err.key = mmr->sig->err_item.key; in mlx5_ib_check_mr_status()
2325 mmr->sig->sig_err_exists = false; in mlx5_ib_check_mr_status()
2326 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; in mlx5_ib_check_mr_status()
2343 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2346 mr->mmkey.ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2349 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2350 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2353 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2358 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2359 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2361 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2377 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2379 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2382 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2383 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2386 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2389 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); in mlx5_ib_sg_to_klms()
2391 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2399 mr->mmkey.ndescs = i; in mlx5_ib_sg_to_klms()
2400 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2406 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2410 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - in mlx5_ib_sg_to_klms()
2413 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2420 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2421 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2432 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) in mlx5_set_page()
2433 return -ENOMEM; in mlx5_set_page()
2435 descs = mr->descs; in mlx5_set_page()
2436 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2446 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2447 return -ENOMEM; in mlx5_set_page_pi()
2449 descs = mr->descs; in mlx5_set_page_pi()
2450 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2463 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2466 pi_mr->mmkey.ndescs = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2467 pi_mr->meta_ndescs = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2468 pi_mr->meta_length = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2470 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_mtt_mr_sg_pi()
2471 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_mtt_mr_sg_pi()
2474 pi_mr->ibmr.page_size = ibmr->page_size; in mlx5_ib_map_mtt_mr_sg_pi()
2475 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, in mlx5_ib_map_mtt_mr_sg_pi()
2480 pi_mr->data_iova = pi_mr->ibmr.iova; in mlx5_ib_map_mtt_mr_sg_pi()
2481 pi_mr->data_length = pi_mr->ibmr.length; in mlx5_ib_map_mtt_mr_sg_pi()
2482 pi_mr->ibmr.length = pi_mr->data_length; in mlx5_ib_map_mtt_mr_sg_pi()
2483 ibmr->length = pi_mr->data_length; in mlx5_ib_map_mtt_mr_sg_pi()
2486 u64 page_mask = ~((u64)ibmr->page_size - 1); in mlx5_ib_map_mtt_mr_sg_pi()
2487 u64 iova = pi_mr->data_iova; in mlx5_ib_map_mtt_mr_sg_pi()
2489 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, in mlx5_ib_map_mtt_mr_sg_pi()
2492 pi_mr->meta_length = pi_mr->ibmr.length; in mlx5_ib_map_mtt_mr_sg_pi()
2499 pi_mr->pi_iova = (iova & page_mask) + in mlx5_ib_map_mtt_mr_sg_pi()
2500 pi_mr->mmkey.ndescs * ibmr->page_size + in mlx5_ib_map_mtt_mr_sg_pi()
2501 (pi_mr->ibmr.iova & ~page_mask); in mlx5_ib_map_mtt_mr_sg_pi()
2505 * the metadata (the sig MR will verify that the HW will access in mlx5_ib_map_mtt_mr_sg_pi()
2509 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; in mlx5_ib_map_mtt_mr_sg_pi()
2510 pi_mr->ibmr.iova = iova; in mlx5_ib_map_mtt_mr_sg_pi()
2511 ibmr->length += pi_mr->meta_length; in mlx5_ib_map_mtt_mr_sg_pi()
2514 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_mtt_mr_sg_pi()
2515 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_mtt_mr_sg_pi()
2528 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2531 pi_mr->mmkey.ndescs = 0; in mlx5_ib_map_klm_mr_sg_pi()
2532 pi_mr->meta_ndescs = 0; in mlx5_ib_map_klm_mr_sg_pi()
2533 pi_mr->meta_length = 0; in mlx5_ib_map_klm_mr_sg_pi()
2535 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_klm_mr_sg_pi()
2536 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_klm_mr_sg_pi()
2542 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_klm_mr_sg_pi()
2543 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_klm_mr_sg_pi()
2546 /* This is zero-based memory region */ in mlx5_ib_map_klm_mr_sg_pi()
2547 pi_mr->data_iova = 0; in mlx5_ib_map_klm_mr_sg_pi()
2548 pi_mr->ibmr.iova = 0; in mlx5_ib_map_klm_mr_sg_pi()
2549 pi_mr->pi_iova = pi_mr->data_length; in mlx5_ib_map_klm_mr_sg_pi()
2550 ibmr->length = pi_mr->ibmr.length; in mlx5_ib_map_klm_mr_sg_pi()
2564 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); in mlx5_ib_map_mr_sg_pi()
2566 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2567 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2568 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2569 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2570 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2590 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2597 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2602 return -ENOMEM; in mlx5_ib_map_mr_sg_pi()
2605 /* This is zero-based memory region */ in mlx5_ib_map_mr_sg_pi()
2606 ibmr->iova = 0; in mlx5_ib_map_mr_sg_pi()
2607 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2609 ibmr->sig_attrs->meta_length = pi_mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2611 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2622 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg()
2624 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2625 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2628 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2635 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2636 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()