• Home
  • Raw
  • Download

Lines Matching +full:sig +full:- +full:dir

2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
15 * - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
40 #include <linux/dma-buf.h>
41 #include <linux/dma-resv.h>
62 struct mlx5_ib_dev *dev = to_mdev(pd->device); in set_mkc_access_pd_addr_fields()
71 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) in set_mkc_access_pd_addr_fields()
74 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || in set_mkc_access_pd_addr_fields()
75 (MLX5_CAP_GEN(dev->mdev, in set_mkc_access_pd_addr_fields()
77 pcie_relaxed_ordering_enabled(dev->mdev->pdev))) in set_mkc_access_pd_addr_fields()
81 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); in set_mkc_access_pd_addr_fields()
88 u8 key = atomic_inc_return(&dev->mkey_var); in assign_mkey_variant()
101 assign_mkey_variant(dev, &mkey->key, in); in mlx5_ib_create_mkey()
102 ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); in mlx5_ib_create_mkey()
104 init_waitqueue_head(&mkey->wait); in mlx5_ib_create_mkey()
111 struct mlx5_ib_dev *dev = async_create->ent->dev; in mlx5_ib_create_mkey_cb()
115 MLX5_SET(create_mkey_in, async_create->in, opcode, in mlx5_ib_create_mkey_cb()
117 assign_mkey_variant(dev, &async_create->mkey, async_create->in); in mlx5_ib_create_mkey_cb()
118 return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen, in mlx5_ib_create_mkey_cb()
119 async_create->out, outlen, create_mkey_callback, in mlx5_ib_create_mkey_cb()
120 &async_create->cb_work); in mlx5_ib_create_mkey_cb()
128 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
130 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in destroy_mkey()
135 if (status == -ENXIO) /* core driver is not available */ in create_mkey_warn()
139 if (status != -EREMOTEIO) /* driver specific failure */ in create_mkey_warn()
143 mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); in create_mkey_warn()
149 XA_STATE(xas, &ent->mkeys, 0); in push_mkey_locked()
153 (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) in push_mkey_locked()
154 return -EAGAIN; in push_mkey_locked()
162 xas_set(&xas, ent->reserved); in push_mkey_locked()
165 if (to_store && ent->stored == ent->reserved) in push_mkey_locked()
170 ent->reserved++; in push_mkey_locked()
172 if (ent->stored != ent->reserved) in push_mkey_locked()
173 __xa_store(&ent->mkeys, in push_mkey_locked()
174 ent->stored, in push_mkey_locked()
177 ent->stored++; in push_mkey_locked()
179 WRITE_ONCE(ent->dev->cache.last_add, in push_mkey_locked()
184 xa_unlock_irq(&ent->mkeys); in push_mkey_locked()
192 xa_lock_irq(&ent->mkeys); in push_mkey_locked()
194 xa_lock_irq(&ent->mkeys); in push_mkey_locked()
198 return -EINVAL; in push_mkey_locked()
207 xa_lock_irq(&ent->mkeys); in push_mkey()
209 xa_unlock_irq(&ent->mkeys); in push_mkey()
217 ent->reserved--; in undo_push_reserve_mkey()
218 old = __xa_erase(&ent->mkeys, ent->reserved); in undo_push_reserve_mkey()
226 old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0); in push_to_reserved()
228 ent->stored++; in push_to_reserved()
235 ent->stored--; in pop_stored_mkey()
236 ent->reserved--; in pop_stored_mkey()
238 if (ent->stored == ent->reserved) { in pop_stored_mkey()
239 xa_mkey = __xa_erase(&ent->mkeys, ent->stored); in pop_stored_mkey()
244 xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY, in pop_stored_mkey()
247 old = __xa_erase(&ent->mkeys, ent->reserved); in pop_stored_mkey()
256 struct mlx5_cache_ent *ent = mkey_out->ent; in create_mkey_callback()
257 struct mlx5_ib_dev *dev = ent->dev; in create_mkey_callback()
261 create_mkey_warn(dev, status, mkey_out->out); in create_mkey_callback()
263 xa_lock_irqsave(&ent->mkeys, flags); in create_mkey_callback()
265 WRITE_ONCE(dev->fill_delay, 1); in create_mkey_callback()
266 xa_unlock_irqrestore(&ent->mkeys, flags); in create_mkey_callback()
267 mod_timer(&dev->delay_timer, jiffies + HZ); in create_mkey_callback()
271 mkey_out->mkey |= mlx5_idx_to_mkey( in create_mkey_callback()
272 MLX5_GET(create_mkey_out, mkey_out->out, mkey_index)); in create_mkey_callback()
273 WRITE_ONCE(dev->cache.last_add, jiffies); in create_mkey_callback()
275 xa_lock_irqsave(&ent->mkeys, flags); in create_mkey_callback()
276 push_to_reserved(ent, mkey_out->mkey); in create_mkey_callback()
279 xa_unlock_irqrestore(&ent->mkeys, flags); in create_mkey_callback()
304 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, in set_cache_mkc()
305 ent->dev->umrc.pd); in set_cache_mkc()
308 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); in set_cache_mkc()
310 (ent->rb_key.access_mode >> 2) & 0x7); in set_cache_mkc()
313 get_mkc_octo_size(ent->rb_key.access_mode, in set_cache_mkc()
314 ent->rb_key.ndescs)); in set_cache_mkc()
330 return -ENOMEM; in add_keys()
331 mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in, in add_keys()
334 async_create->ent = ent; in add_keys()
342 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
350 xa_lock_irq(&ent->mkeys); in add_keys()
352 xa_unlock_irq(&ent->mkeys); in add_keys()
368 return -ENOMEM; in create_cache_mkey()
372 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen); in create_cache_mkey()
376 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mkey()
386 lockdep_assert_held(&ent->mkeys.xa_lock); in remove_cache_mr_locked()
387 if (!ent->stored) in remove_cache_mr_locked()
390 xa_unlock_irq(&ent->mkeys); in remove_cache_mr_locked()
391 mlx5_core_destroy_mkey(ent->dev->mdev, mkey); in remove_cache_mr_locked()
392 xa_lock_irq(&ent->mkeys); in remove_cache_mr_locked()
397 __acquires(&ent->mkeys) __releases(&ent->mkeys) in resize_available_mrs()
401 lockdep_assert_held(&ent->mkeys.xa_lock); in resize_available_mrs()
405 target = ent->limit * 2; in resize_available_mrs()
406 if (target == ent->reserved) in resize_available_mrs()
408 if (target > ent->reserved) { in resize_available_mrs()
409 u32 todo = target - ent->reserved; in resize_available_mrs()
411 xa_unlock_irq(&ent->mkeys); in resize_available_mrs()
413 if (err == -EAGAIN) in resize_available_mrs()
415 xa_lock_irq(&ent->mkeys); in resize_available_mrs()
417 if (err != -EAGAIN) in resize_available_mrs()
430 struct mlx5_cache_ent *ent = filp->private_data; in size_write()
443 xa_lock_irq(&ent->mkeys); in size_write()
444 if (target < ent->in_use) { in size_write()
445 err = -EINVAL; in size_write()
448 target = target - ent->in_use; in size_write()
449 if (target < ent->limit || target > ent->limit*2) { in size_write()
450 err = -EINVAL; in size_write()
456 xa_unlock_irq(&ent->mkeys); in size_write()
461 xa_unlock_irq(&ent->mkeys); in size_write()
468 struct mlx5_cache_ent *ent = filp->private_data; in size_read()
472 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use); in size_read()
489 struct mlx5_cache_ent *ent = filp->private_data; in limit_write()
501 xa_lock_irq(&ent->mkeys); in limit_write()
502 ent->limit = var; in limit_write()
504 xa_unlock_irq(&ent->mkeys); in limit_write()
513 struct mlx5_cache_ent *ent = filp->private_data; in limit_read()
517 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
537 mutex_lock(&cache->rb_lock); in someone_adding()
538 for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) { in someone_adding()
540 xa_lock_irq(&ent->mkeys); in someone_adding()
541 ret = ent->stored < ent->limit; in someone_adding()
542 xa_unlock_irq(&ent->mkeys); in someone_adding()
544 mutex_unlock(&cache->rb_lock); in someone_adding()
548 mutex_unlock(&cache->rb_lock); in someone_adding()
559 lockdep_assert_held(&ent->mkeys.xa_lock); in queue_adjust_cache_locked()
561 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) in queue_adjust_cache_locked()
563 if (ent->stored < ent->limit) { in queue_adjust_cache_locked()
564 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
565 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
566 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
567 ent->reserved < 2 * ent->limit) { in queue_adjust_cache_locked()
572 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
573 } else if (ent->stored == 2 * ent->limit) { in queue_adjust_cache_locked()
574 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
575 } else if (ent->stored > 2 * ent->limit) { in queue_adjust_cache_locked()
577 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
578 if (ent->stored != ent->reserved) in queue_adjust_cache_locked()
579 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
582 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
588 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
589 struct mlx5_mkey_cache *cache = &dev->cache; in __cache_work_func()
592 xa_lock_irq(&ent->mkeys); in __cache_work_func()
593 if (ent->disabled) in __cache_work_func()
596 if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit && in __cache_work_func()
597 !READ_ONCE(dev->fill_delay)) { in __cache_work_func()
598 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
600 xa_lock_irq(&ent->mkeys); in __cache_work_func()
601 if (ent->disabled) in __cache_work_func()
609 if (err != -EAGAIN) { in __cache_work_func()
614 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
618 } else if (ent->stored > 2 * ent->limit) { in __cache_work_func()
633 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
636 READ_ONCE(cache->last_add) + 300 * HZ); in __cache_work_func()
637 xa_lock_irq(&ent->mkeys); in __cache_work_func()
638 if (ent->disabled) in __cache_work_func()
641 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
648 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
664 res = key1.ats - key2.ats; in cache_ent_key_cmp()
668 res = key1.access_mode - key2.access_mode; in cache_ent_key_cmp()
672 res = key1.access_flags - key2.access_flags; in cache_ent_key_cmp()
681 return key1.ndescs - key2.ndescs; in cache_ent_key_cmp()
687 struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL; in mlx5_cache_ent_insert()
695 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key); in mlx5_cache_ent_insert()
697 new = &((*new)->rb_left); in mlx5_cache_ent_insert()
699 new = &((*new)->rb_right); in mlx5_cache_ent_insert()
701 mutex_unlock(&cache->rb_lock); in mlx5_cache_ent_insert()
702 return -EEXIST; in mlx5_cache_ent_insert()
707 rb_link_node(&ent->node, parent, new); in mlx5_cache_ent_insert()
708 rb_insert_color(&ent->node, &cache->rb_root); in mlx5_cache_ent_insert()
717 struct rb_node *node = dev->cache.rb_root.rb_node; in mkey_cache_ent_from_rb_key()
726 cmp = cache_ent_key_cmp(cur->rb_key, rb_key); in mkey_cache_ent_from_rb_key()
729 node = node->rb_left; in mkey_cache_ent_from_rb_key()
732 node = node->rb_right; in mkey_cache_ent_from_rb_key()
738 smallest->rb_key.access_mode == rb_key.access_mode && in mkey_cache_ent_from_rb_key()
739 smallest->rb_key.access_flags == rb_key.access_flags && in mkey_cache_ent_from_rb_key()
740 smallest->rb_key.ats == rb_key.ats) ? in mkey_cache_ent_from_rb_key()
754 return ERR_PTR(-ENOMEM); in _mlx5_mr_cache_alloc()
756 xa_lock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
757 ent->in_use++; in _mlx5_mr_cache_alloc()
759 if (!ent->stored) { in _mlx5_mr_cache_alloc()
761 ent->miss++; in _mlx5_mr_cache_alloc()
762 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
763 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
765 xa_lock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
766 ent->in_use--; in _mlx5_mr_cache_alloc()
767 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
772 mr->mmkey.key = pop_stored_mkey(ent); in _mlx5_mr_cache_alloc()
774 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
776 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
777 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_mr_cache_alloc()
778 init_waitqueue_head(&mr->mmkey.wait); in _mlx5_mr_cache_alloc()
788 MLX5_CAP_GEN(dev->mdev, atomic) && in get_unchangeable_access_flags()
789 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in get_unchangeable_access_flags()
793 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && in get_unchangeable_access_flags()
794 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in get_unchangeable_access_flags()
798 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || in get_unchangeable_access_flags()
799 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) && in get_unchangeable_access_flags()
800 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in get_unchangeable_access_flags()
818 return ERR_PTR(-EOPNOTSUPP); in mlx5_mr_cache_alloc()
827 cancel_delayed_work(&ent->dwork); in clean_keys()
828 xa_lock_irq(&ent->mkeys); in clean_keys()
829 while (ent->stored) { in clean_keys()
831 xa_unlock_irq(&ent->mkeys); in clean_keys()
832 mlx5_core_destroy_mkey(dev->mdev, mkey); in clean_keys()
833 xa_lock_irq(&ent->mkeys); in clean_keys()
835 xa_unlock_irq(&ent->mkeys); in clean_keys()
840 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mkey_cache_debugfs_cleanup()
843 debugfs_remove_recursive(dev->cache.fs_root); in mlx5_mkey_cache_debugfs_cleanup()
844 dev->cache.fs_root = NULL; in mlx5_mkey_cache_debugfs_cleanup()
850 int order = order_base_2(ent->rb_key.ndescs); in mlx5_mkey_cache_debugfs_add_ent()
851 struct dentry *dir; in mlx5_mkey_cache_debugfs_add_ent() local
853 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mkey_cache_debugfs_add_ent()
856 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) in mlx5_mkey_cache_debugfs_add_ent()
859 sprintf(ent->name, "%d", order); in mlx5_mkey_cache_debugfs_add_ent()
860 dir = debugfs_create_dir(ent->name, dev->cache.fs_root); in mlx5_mkey_cache_debugfs_add_ent()
861 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mkey_cache_debugfs_add_ent()
862 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mkey_cache_debugfs_add_ent()
863 debugfs_create_ulong("cur", 0400, dir, &ent->stored); in mlx5_mkey_cache_debugfs_add_ent()
864 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mkey_cache_debugfs_add_ent()
869 struct dentry *dbg_root = mlx5_debugfs_get_dev_root(dev->mdev); in mlx5_mkey_cache_debugfs_init()
870 struct mlx5_mkey_cache *cache = &dev->cache; in mlx5_mkey_cache_debugfs_init()
872 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mkey_cache_debugfs_init()
875 cache->fs_root = debugfs_create_dir("mr_cache", dbg_root); in mlx5_mkey_cache_debugfs_init()
882 WRITE_ONCE(dev->fill_delay, 0); in delay_time_func()
896 return ERR_PTR(-ENOMEM); in mlx5r_cache_create_ent_locked()
898 xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); in mlx5r_cache_create_ent_locked()
899 ent->rb_key = rb_key; in mlx5r_cache_create_ent_locked()
900 ent->dev = dev; in mlx5r_cache_create_ent_locked()
901 ent->is_tmp = !persistent_entry; in mlx5r_cache_create_ent_locked()
903 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5r_cache_create_ent_locked()
905 ret = mlx5_cache_ent_insert(&dev->cache, ent); in mlx5r_cache_create_ent_locked()
915 order = order_base_2(rb_key.ndescs) - 2; in mlx5r_cache_create_ent_locked()
917 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && in mlx5r_cache_create_ent_locked()
918 !dev->is_rep && mlx5_core_is_pf(dev->mdev) && in mlx5r_cache_create_ent_locked()
920 ent->limit = dev->mdev->profile.mr_cache[order].limit; in mlx5r_cache_create_ent_locked()
922 ent->limit = 0; in mlx5r_cache_create_ent_locked()
926 mod_delayed_work(ent->dev->cache.wq, in mlx5r_cache_create_ent_locked()
927 &ent->dev->cache.remove_ent_dwork, in mlx5r_cache_create_ent_locked()
942 mutex_lock(&cache->rb_lock); in remove_ent_work_func()
943 cur = rb_last(&cache->rb_root); in remove_ent_work_func()
947 mutex_unlock(&cache->rb_lock); in remove_ent_work_func()
949 xa_lock_irq(&ent->mkeys); in remove_ent_work_func()
950 if (!ent->is_tmp) { in remove_ent_work_func()
951 xa_unlock_irq(&ent->mkeys); in remove_ent_work_func()
952 mutex_lock(&cache->rb_lock); in remove_ent_work_func()
955 xa_unlock_irq(&ent->mkeys); in remove_ent_work_func()
957 clean_keys(ent->dev, ent); in remove_ent_work_func()
958 mutex_lock(&cache->rb_lock); in remove_ent_work_func()
960 mutex_unlock(&cache->rb_lock); in remove_ent_work_func()
965 struct mlx5_mkey_cache *cache = &dev->cache; in mlx5_mkey_cache_init()
966 struct rb_root *root = &dev->cache.rb_root; in mlx5_mkey_cache_init()
975 mutex_init(&dev->slow_path_mutex); in mlx5_mkey_cache_init()
976 mutex_init(&dev->cache.rb_lock); in mlx5_mkey_cache_init()
977 dev->cache.rb_root = RB_ROOT; in mlx5_mkey_cache_init()
978 INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func); in mlx5_mkey_cache_init()
979 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); in mlx5_mkey_cache_init()
980 if (!cache->wq) { in mlx5_mkey_cache_init()
982 return -ENOMEM; in mlx5_mkey_cache_init()
985 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); in mlx5_mkey_cache_init()
986 timer_setup(&dev->delay_timer, delay_time_func, 0); in mlx5_mkey_cache_init()
988 mutex_lock(&cache->rb_lock); in mlx5_mkey_cache_init()
1002 mutex_unlock(&cache->rb_lock); in mlx5_mkey_cache_init()
1005 xa_lock_irq(&ent->mkeys); in mlx5_mkey_cache_init()
1007 xa_unlock_irq(&ent->mkeys); in mlx5_mkey_cache_init()
1013 mutex_unlock(&cache->rb_lock); in mlx5_mkey_cache_init()
1021 struct rb_root *root = &dev->cache.rb_root; in mlx5_mkey_cache_cleanup()
1025 if (!dev->cache.wq) in mlx5_mkey_cache_cleanup()
1028 mutex_lock(&dev->cache.rb_lock); in mlx5_mkey_cache_cleanup()
1029 cancel_delayed_work(&dev->cache.remove_ent_dwork); in mlx5_mkey_cache_cleanup()
1032 xa_lock_irq(&ent->mkeys); in mlx5_mkey_cache_cleanup()
1033 ent->disabled = true; in mlx5_mkey_cache_cleanup()
1034 xa_unlock_irq(&ent->mkeys); in mlx5_mkey_cache_cleanup()
1035 cancel_delayed_work(&ent->dwork); in mlx5_mkey_cache_cleanup()
1037 mutex_unlock(&dev->cache.rb_lock); in mlx5_mkey_cache_cleanup()
1043 flush_workqueue(dev->cache.wq); in mlx5_mkey_cache_cleanup()
1046 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); in mlx5_mkey_cache_cleanup()
1049 mutex_lock(&dev->cache.rb_lock); in mlx5_mkey_cache_cleanup()
1055 rb_erase(&ent->node, root); in mlx5_mkey_cache_cleanup()
1058 mutex_unlock(&dev->cache.rb_lock); in mlx5_mkey_cache_cleanup()
1060 destroy_workqueue(dev->cache.wq); in mlx5_mkey_cache_cleanup()
1061 del_timer_sync(&dev->delay_timer); in mlx5_mkey_cache_cleanup()
1066 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dma_mr()
1075 return ERR_PTR(-ENOMEM); in mlx5_ib_get_dma_mr()
1079 err = -ENOMEM; in mlx5_ib_get_dma_mr()
1090 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
1095 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
1096 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1097 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1098 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1100 return &mr->ibmr; in mlx5_ib_get_dma_mr()
1117 offset = addr & (page_size - 1); in get_octo_len()
1124 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) in mkey_cache_max_order()
1132 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
1133 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
1134 mr->ibmr.length = length; in set_mr_fields()
1135 mr->ibmr.device = &dev->ib_dev; in set_mr_fields()
1136 mr->ibmr.iova = iova; in set_mr_fields()
1137 mr->access_flags = access_flags; in set_mr_fields()
1147 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
1158 struct mlx5_ib_dev *dev = to_mdev(pd->device); in alloc_cacheable_mr()
1163 if (umem->is_dmabuf) in alloc_cacheable_mr()
1169 return ERR_PTR(-EINVAL); in alloc_cacheable_mr()
1180 mutex_lock(&dev->slow_path_mutex); in alloc_cacheable_mr()
1182 mutex_unlock(&dev->slow_path_mutex); in alloc_cacheable_mr()
1185 mr->mmkey.rb_key = rb_key; in alloc_cacheable_mr()
1193 mr->ibmr.pd = pd; in alloc_cacheable_mr()
1194 mr->umem = umem; in alloc_cacheable_mr()
1195 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr()
1196 set_mr_fields(dev, mr, umem->length, access_flags, iova); in alloc_cacheable_mr()
1209 struct mlx5_ib_dev *dev = to_mdev(pd->device); in reg_create()
1216 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); in reg_create()
1219 return ERR_PTR(-EINVAL); in reg_create()
1222 return ERR_PTR(-ENOMEM); in reg_create()
1224 mr->ibmr.pd = pd; in reg_create()
1225 mr->access_flags = access_flags; in reg_create()
1226 mr->page_shift = order_base_2(page_size); in reg_create()
1234 err = -ENOMEM; in reg_create()
1240 err = -EINVAL; in reg_create()
1243 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, in reg_create()
1254 populate ? pd : dev->umrc.pd); in reg_create()
1259 MLX5_SET64(mkc, mkc, len, umem->length); in reg_create()
1262 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1263 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create()
1268 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1271 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1276 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1277 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift); in reg_create()
1278 mr->umem = umem; in reg_create()
1279 set_mr_fields(dev, mr, umem->length, access_flags, iova); in reg_create()
1282 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1296 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dm_mr()
1305 return ERR_PTR(-ENOMEM); in mlx5_ib_get_dm_mr()
1309 err = -ENOMEM; in mlx5_ib_get_dm_mr()
1320 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1328 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1349 return -EOPNOTSUPP; in mlx5_ib_advise_mr()
1360 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; in mlx5_ib_reg_dm_mr()
1361 u64 start_addr = mdm->dev_addr + attr->offset; in mlx5_ib_reg_dm_mr()
1364 switch (mdm->type) { in mlx5_ib_reg_dm_mr()
1366 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1367 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1370 start_addr -= pci_resource_start(dev->pdev, 0); in mlx5_ib_reg_dm_mr()
1375 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1376 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1381 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1384 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, in mlx5_ib_reg_dm_mr()
1385 attr->access_flags, mode); in mlx5_ib_reg_dm_mr()
1391 struct mlx5_ib_dev *dev = to_mdev(pd->device); in create_real_mr()
1396 xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length); in create_real_mr()
1403 mutex_lock(&dev->slow_path_mutex); in create_real_mr()
1405 mutex_unlock(&dev->slow_path_mutex); in create_real_mr()
1412 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in create_real_mr()
1414 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); in create_real_mr()
1424 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_real_mr()
1428 return &mr->ibmr; in create_real_mr()
1435 struct mlx5_ib_dev *dev = to_mdev(pd->device); in create_user_odp_mr()
1441 return ERR_PTR(-EOPNOTSUPP); in create_user_odp_mr()
1443 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq); in create_user_odp_mr()
1448 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1449 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) in create_user_odp_mr()
1450 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1455 return &mr->ibmr; in create_user_odp_mr()
1460 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1462 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, in create_user_odp_mr()
1467 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); in create_user_odp_mr()
1469 ib_umem_release(&odp->umem); in create_user_odp_mr()
1472 xa_init(&mr->implicit_children); in create_user_odp_mr()
1474 odp->private = mr; in create_user_odp_mr()
1475 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in create_user_odp_mr()
1482 return &mr->ibmr; in create_user_odp_mr()
1485 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_user_odp_mr()
1493 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr()
1497 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_reg_user_mr()
1505 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); in mlx5_ib_reg_user_mr()
1513 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; in mlx5_ib_dmabuf_invalidate_cb()
1514 struct mlx5_ib_mr *mr = umem_dmabuf->private; in mlx5_ib_dmabuf_invalidate_cb()
1516 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); in mlx5_ib_dmabuf_invalidate_cb()
1518 if (!umem_dmabuf->sgt) in mlx5_ib_dmabuf_invalidate_cb()
1535 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr_dmabuf()
1542 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_reg_user_mr_dmabuf()
1550 return ERR_PTR(-EINVAL); in mlx5_ib_reg_user_mr_dmabuf()
1552 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, in mlx5_ib_reg_user_mr_dmabuf()
1561 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, in mlx5_ib_reg_user_mr_dmabuf()
1564 ib_umem_release(&umem_dmabuf->umem); in mlx5_ib_reg_user_mr_dmabuf()
1568 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in mlx5_ib_reg_user_mr_dmabuf()
1570 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); in mlx5_ib_reg_user_mr_dmabuf()
1571 umem_dmabuf->private = mr; in mlx5_ib_reg_user_mr_dmabuf()
1572 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in mlx5_ib_reg_user_mr_dmabuf()
1579 return &mr->ibmr; in mlx5_ib_reg_user_mr_dmabuf()
1582 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in mlx5_ib_reg_user_mr_dmabuf()
1608 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in can_use_umr_rereg_pas()
1611 if (!mr->mmkey.cache_ent) in can_use_umr_rereg_pas()
1613 if (!mlx5r_umr_can_load_pas(dev, new_umem->length)) in can_use_umr_rereg_pas()
1620 return (mr->mmkey.cache_ent->rb_key.ndescs) >= in can_use_umr_rereg_pas()
1628 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pas()
1630 struct ib_umem *old_umem = mr->umem; in umr_rereg_pas()
1643 mr->ibmr.pd = pd; in umr_rereg_pas()
1647 mr->access_flags = access_flags; in umr_rereg_pas()
1651 mr->ibmr.iova = iova; in umr_rereg_pas()
1652 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1653 mr->page_shift = order_base_2(page_size); in umr_rereg_pas()
1654 mr->umem = new_umem; in umr_rereg_pas()
1661 mr->umem = old_umem; in umr_rereg_pas()
1665 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages); in umr_rereg_pas()
1667 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages); in umr_rereg_pas()
1676 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); in mlx5_ib_rereg_user_mr()
1681 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_rereg_user_mr()
1689 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_rereg_user_mr()
1692 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1694 new_pd = ib_mr->pd; in mlx5_ib_rereg_user_mr()
1700 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1708 /* DM or ODP MR's don't have a normal umem so we can't re-use it */ in mlx5_ib_rereg_user_mr()
1709 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1719 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1720 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1721 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); in mlx5_ib_rereg_user_mr()
1723 return create_real_mr(new_pd, umem, mr->ibmr.iova, in mlx5_ib_rereg_user_mr()
1728 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does in mlx5_ib_rereg_user_mr()
1731 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1735 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()
1739 new_umem = ib_umem_get(&dev->ib_dev, start, length, in mlx5_ib_rereg_user_mr()
1774 struct device *ddev = &dev->mdev->pdev->dev; in mlx5_alloc_priv_descs()
1779 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); in mlx5_alloc_priv_descs()
1783 add_size = min_t(int, end - size, add_size); in mlx5_alloc_priv_descs()
1786 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1787 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1788 return -ENOMEM; in mlx5_alloc_priv_descs()
1790 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1792 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); in mlx5_alloc_priv_descs()
1793 if (dma_mapping_error(ddev, mr->desc_map)) { in mlx5_alloc_priv_descs()
1794 ret = -ENOMEM; in mlx5_alloc_priv_descs()
1800 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1808 if (!mr->umem && mr->descs) { in mlx5_free_priv_descs()
1809 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1810 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1813 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, in mlx5_free_priv_descs()
1815 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1816 mr->descs = NULL; in mlx5_free_priv_descs()
1823 struct mlx5_mkey_cache *cache = &dev->cache; in cache_ent_find_and_store()
1827 if (mr->mmkey.cache_ent) { in cache_ent_find_and_store()
1828 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1829 mr->mmkey.cache_ent->in_use--; in cache_ent_find_and_store()
1833 mutex_lock(&cache->rb_lock); in cache_ent_find_and_store()
1834 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1836 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1837 if (ent->disabled) { in cache_ent_find_and_store()
1838 mutex_unlock(&cache->rb_lock); in cache_ent_find_and_store()
1839 return -EOPNOTSUPP; in cache_ent_find_and_store()
1841 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1842 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1843 mutex_unlock(&cache->rb_lock); in cache_ent_find_and_store()
1848 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1849 mutex_unlock(&cache->rb_lock); in cache_ent_find_and_store()
1853 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1854 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1857 ret = push_mkey_locked(mr->mmkey.cache_ent, false, in cache_ent_find_and_store()
1858 xa_mk_value(mr->mmkey.key)); in cache_ent_find_and_store()
1859 xa_unlock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1866 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); in mlx5_ib_dereg_mr()
1875 refcount_read(&mr->mmkey.usecount) != 0 && in mlx5_ib_dereg_mr()
1876 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) in mlx5_ib_dereg_mr()
1877 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in mlx5_ib_dereg_mr()
1879 if (ibmr->type == IB_MR_TYPE_INTEGRITY) { in mlx5_ib_dereg_mr()
1880 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_ib_dereg_mr()
1881 mr->sig, NULL, GFP_KERNEL); in mlx5_ib_dereg_mr()
1883 if (mr->mtt_mr) { in mlx5_ib_dereg_mr()
1884 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1887 mr->mtt_mr = NULL; in mlx5_ib_dereg_mr()
1889 if (mr->klm_mr) { in mlx5_ib_dereg_mr()
1890 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1893 mr->klm_mr = NULL; in mlx5_ib_dereg_mr()
1896 if (mlx5_core_destroy_psv(dev->mdev, in mlx5_ib_dereg_mr()
1897 mr->sig->psv_memory.psv_idx)) in mlx5_ib_dereg_mr()
1899 mr->sig->psv_memory.psv_idx); in mlx5_ib_dereg_mr()
1900 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_ib_dereg_mr()
1902 mr->sig->psv_wire.psv_idx); in mlx5_ib_dereg_mr()
1903 kfree(mr->sig); in mlx5_ib_dereg_mr()
1904 mr->sig = NULL; in mlx5_ib_dereg_mr()
1908 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) in mlx5_ib_dereg_mr()
1911 mr->mmkey.cache_ent = NULL; in mlx5_ib_dereg_mr()
1913 if (!mr->mmkey.cache_ent) { in mlx5_ib_dereg_mr()
1914 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); in mlx5_ib_dereg_mr()
1919 if (mr->umem) { in mlx5_ib_dereg_mr()
1923 atomic_sub(ib_umem_num_pages(mr->umem), in mlx5_ib_dereg_mr()
1924 &dev->mdev->priv.reg_pages); in mlx5_ib_dereg_mr()
1925 ib_umem_release(mr->umem); in mlx5_ib_dereg_mr()
1930 if (!mr->mmkey.cache_ent) in mlx5_ib_dereg_mr()
1958 struct mlx5_ib_dev *dev = to_mdev(pd->device); in _mlx5_alloc_mkey_descs()
1961 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
1962 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
1963 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
1965 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
1971 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
1975 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
1976 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1977 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1999 return ERR_PTR(-ENOMEM); in mlx5_ib_alloc_pi_mr()
2001 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
2002 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
2006 err = -ENOMEM; in mlx5_ib_alloc_pi_mr()
2018 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
2049 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_alloc_integrity_descs()
2054 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
2055 if (!mr->sig) in mlx5_alloc_integrity_descs()
2056 return -ENOMEM; in mlx5_alloc_integrity_descs()
2059 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); in mlx5_alloc_integrity_descs()
2063 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
2064 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
2066 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
2067 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
2069 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
2070 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2073 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
2074 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
2077 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2080 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
2081 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
2095 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
2096 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
2105 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2106 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
2108 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2109 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
2111 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
2113 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
2114 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
2116 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
2118 kfree(mr->sig); in mlx5_alloc_integrity_descs()
2127 struct mlx5_ib_dev *dev = to_mdev(pd->device); in __mlx5_ib_alloc_mr()
2136 return ERR_PTR(-ENOMEM); in __mlx5_ib_alloc_mr()
2140 err = -ENOMEM; in __mlx5_ib_alloc_mr()
2144 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
2145 mr->umem = NULL; in __mlx5_ib_alloc_mr()
2160 err = -EINVAL; in __mlx5_ib_alloc_mr()
2168 return &mr->ibmr; in __mlx5_ib_alloc_mr()
2192 struct mlx5_ib_dev *dev = to_mdev(ibmw->device); in mlx5_ib_alloc_mw()
2205 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); in mlx5_ib_alloc_mw()
2210 return -EOPNOTSUPP; in mlx5_ib_alloc_mw()
2212 if (udata->inlen > sizeof(req) && in mlx5_ib_alloc_mw()
2214 udata->inlen - sizeof(req))) in mlx5_ib_alloc_mw()
2215 return -EOPNOTSUPP; in mlx5_ib_alloc_mw()
2221 return -ENOMEM; in mlx5_ib_alloc_mw()
2227 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); in mlx5_ib_alloc_mw()
2231 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); in mlx5_ib_alloc_mw()
2234 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); in mlx5_ib_alloc_mw()
2238 mw->mmkey.type = MLX5_MKEY_MW; in mlx5_ib_alloc_mw()
2239 ibmw->rkey = mw->mmkey.key; in mlx5_ib_alloc_mw()
2240 mw->mmkey.ndescs = ndescs; in mlx5_ib_alloc_mw()
2243 min(offsetofend(typeof(resp), response_length), udata->outlen); in mlx5_ib_alloc_mw()
2251 err = mlx5r_store_odp_mkey(dev, &mw->mmkey); in mlx5_ib_alloc_mw()
2260 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); in mlx5_ib_alloc_mw()
2268 struct mlx5_ib_dev *dev = to_mdev(mw->device); in mlx5_ib_dealloc_mw()
2272 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key))) in mlx5_ib_dealloc_mw()
2277 mlx5r_deref_wait_odp_mkey(&mmw->mmkey); in mlx5_ib_dealloc_mw()
2279 return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); in mlx5_ib_dealloc_mw()
2290 ret = -EINVAL; in mlx5_ib_check_mr_status()
2294 mr_status->fail_status = 0; in mlx5_ib_check_mr_status()
2296 if (!mmr->sig) { in mlx5_ib_check_mr_status()
2297 ret = -EINVAL; in mlx5_ib_check_mr_status()
2298 pr_err("signature status check requested on a non-signature enabled MR\n"); in mlx5_ib_check_mr_status()
2302 mmr->sig->sig_status_checked = true; in mlx5_ib_check_mr_status()
2303 if (!mmr->sig->sig_err_exists) in mlx5_ib_check_mr_status()
2306 if (ibmr->lkey == mmr->sig->err_item.key) in mlx5_ib_check_mr_status()
2307 memcpy(&mr_status->sig_err, &mmr->sig->err_item, in mlx5_ib_check_mr_status()
2308 sizeof(mr_status->sig_err)); in mlx5_ib_check_mr_status()
2310 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; in mlx5_ib_check_mr_status()
2311 mr_status->sig_err.sig_err_offset = 0; in mlx5_ib_check_mr_status()
2312 mr_status->sig_err.key = mmr->sig->err_item.key; in mlx5_ib_check_mr_status()
2315 mmr->sig->sig_err_exists = false; in mlx5_ib_check_mr_status()
2316 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; in mlx5_ib_check_mr_status()
2333 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2336 mr->mmkey.ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2339 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2340 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2343 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2348 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2349 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2351 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2367 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2369 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2372 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2373 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2376 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2379 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); in mlx5_ib_sg_to_klms()
2381 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2389 mr->mmkey.ndescs = i; in mlx5_ib_sg_to_klms()
2390 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2396 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2400 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - in mlx5_ib_sg_to_klms()
2403 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2410 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2411 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2422 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) in mlx5_set_page()
2423 return -ENOMEM; in mlx5_set_page()
2425 descs = mr->descs; in mlx5_set_page()
2426 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2436 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2437 return -ENOMEM; in mlx5_set_page_pi()
2439 descs = mr->descs; in mlx5_set_page_pi()
2440 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2453 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2456 pi_mr->mmkey.ndescs = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2457 pi_mr->meta_ndescs = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2458 pi_mr->meta_length = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2460 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_mtt_mr_sg_pi()
2461 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_mtt_mr_sg_pi()
2464 pi_mr->ibmr.page_size = ibmr->page_size; in mlx5_ib_map_mtt_mr_sg_pi()
2465 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, in mlx5_ib_map_mtt_mr_sg_pi()
2470 pi_mr->data_iova = pi_mr->ibmr.iova; in mlx5_ib_map_mtt_mr_sg_pi()
2471 pi_mr->data_length = pi_mr->ibmr.length; in mlx5_ib_map_mtt_mr_sg_pi()
2472 pi_mr->ibmr.length = pi_mr->data_length; in mlx5_ib_map_mtt_mr_sg_pi()
2473 ibmr->length = pi_mr->data_length; in mlx5_ib_map_mtt_mr_sg_pi()
2476 u64 page_mask = ~((u64)ibmr->page_size - 1); in mlx5_ib_map_mtt_mr_sg_pi()
2477 u64 iova = pi_mr->data_iova; in mlx5_ib_map_mtt_mr_sg_pi()
2479 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, in mlx5_ib_map_mtt_mr_sg_pi()
2482 pi_mr->meta_length = pi_mr->ibmr.length; in mlx5_ib_map_mtt_mr_sg_pi()
2489 pi_mr->pi_iova = (iova & page_mask) + in mlx5_ib_map_mtt_mr_sg_pi()
2490 pi_mr->mmkey.ndescs * ibmr->page_size + in mlx5_ib_map_mtt_mr_sg_pi()
2491 (pi_mr->ibmr.iova & ~page_mask); in mlx5_ib_map_mtt_mr_sg_pi()
2495 * the metadata (the sig MR will verify that the HW will access in mlx5_ib_map_mtt_mr_sg_pi()
2499 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; in mlx5_ib_map_mtt_mr_sg_pi()
2500 pi_mr->ibmr.iova = iova; in mlx5_ib_map_mtt_mr_sg_pi()
2501 ibmr->length += pi_mr->meta_length; in mlx5_ib_map_mtt_mr_sg_pi()
2504 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_mtt_mr_sg_pi()
2505 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_mtt_mr_sg_pi()
2518 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2521 pi_mr->mmkey.ndescs = 0; in mlx5_ib_map_klm_mr_sg_pi()
2522 pi_mr->meta_ndescs = 0; in mlx5_ib_map_klm_mr_sg_pi()
2523 pi_mr->meta_length = 0; in mlx5_ib_map_klm_mr_sg_pi()
2525 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_klm_mr_sg_pi()
2526 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_klm_mr_sg_pi()
2532 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_klm_mr_sg_pi()
2533 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_klm_mr_sg_pi()
2536 /* This is zero-based memory region */ in mlx5_ib_map_klm_mr_sg_pi()
2537 pi_mr->data_iova = 0; in mlx5_ib_map_klm_mr_sg_pi()
2538 pi_mr->ibmr.iova = 0; in mlx5_ib_map_klm_mr_sg_pi()
2539 pi_mr->pi_iova = pi_mr->data_length; in mlx5_ib_map_klm_mr_sg_pi()
2540 ibmr->length = pi_mr->ibmr.length; in mlx5_ib_map_klm_mr_sg_pi()
2554 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); in mlx5_ib_map_mr_sg_pi()
2556 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2557 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2558 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2559 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2560 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2580 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2587 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2592 return -ENOMEM; in mlx5_ib_map_mr_sg_pi()
2595 /* This is zero-based memory region */ in mlx5_ib_map_mr_sg_pi()
2596 ibmr->iova = 0; in mlx5_ib_map_mr_sg_pi()
2597 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2599 ibmr->sig_attrs->meta_length = pi_mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2601 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2612 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg()
2614 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2615 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2618 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2625 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2626 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()