• Home
  • Raw
  • Download

Lines Matching +full:sig +full:- +full:dir

2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
56 struct mlx5_ib_dev *dev = to_mdev(pd->device); in set_mkc_access_pd_addr_fields()
64 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) in set_mkc_access_pd_addr_fields()
67 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) in set_mkc_access_pd_addr_fields()
71 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); in set_mkc_access_pd_addr_fields()
80 u8 key = atomic_inc_return(&dev->mkey_var); in assign_mkey_variant()
85 mkey->key = key; in assign_mkey_variant()
93 return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen); in mlx5_ib_create_mkey()
116 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); in umr_can_use_indirect_mkey()
121 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
123 return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); in destroy_mkey()
129 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= in mlx5_ib_pas_fits_in_mr()
130 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); in mlx5_ib_pas_fits_in_mr()
137 struct mlx5_ib_dev *dev = mr->dev; in create_mkey_callback()
138 struct mlx5_cache_ent *ent = mr->cache_ent; in create_mkey_callback()
144 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
145 ent->pending--; in create_mkey_callback()
146 WRITE_ONCE(dev->fill_delay, 1); in create_mkey_callback()
147 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
148 mod_timer(&dev->delay_timer, jiffies + HZ); in create_mkey_callback()
152 mr->mmkey.type = MLX5_MKEY_MR; in create_mkey_callback()
153 mr->mmkey.key |= mlx5_idx_to_mkey( in create_mkey_callback()
154 MLX5_GET(create_mkey_out, mr->out, mkey_index)); in create_mkey_callback()
156 WRITE_ONCE(dev->cache.last_add, jiffies); in create_mkey_callback()
158 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
159 list_add_tail(&mr->list, &ent->head); in create_mkey_callback()
160 ent->available_mrs++; in create_mkey_callback()
161 ent->total_mrs++; in create_mkey_callback()
164 ent->pending--; in create_mkey_callback()
165 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
175 mr->order = ent->order; in alloc_cache_mr()
176 mr->cache_ent = ent; in alloc_cache_mr()
177 mr->dev = ent->dev; in alloc_cache_mr()
179 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); in alloc_cache_mr()
182 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); in alloc_cache_mr()
183 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); in alloc_cache_mr()
185 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); in alloc_cache_mr()
186 MLX5_SET(mkc, mkc, log_page_size, ent->page); in alloc_cache_mr()
202 return -ENOMEM; in add_keys()
208 err = -ENOMEM; in add_keys()
211 spin_lock_irq(&ent->lock); in add_keys()
212 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
213 err = -EAGAIN; in add_keys()
214 spin_unlock_irq(&ent->lock); in add_keys()
218 ent->pending++; in add_keys()
219 spin_unlock_irq(&ent->lock); in add_keys()
220 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, in add_keys()
221 &ent->dev->async_ctx, in, inlen, in add_keys()
222 mr->out, sizeof(mr->out), in add_keys()
223 &mr->cb_work); in add_keys()
225 spin_lock_irq(&ent->lock); in add_keys()
226 ent->pending--; in add_keys()
227 spin_unlock_irq(&ent->lock); in add_keys()
228 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
249 return ERR_PTR(-ENOMEM); in create_cache_mr()
254 err = -ENOMEM; in create_cache_mr()
258 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); in create_cache_mr()
262 mr->mmkey.type = MLX5_MKEY_MR; in create_cache_mr()
263 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mr()
264 spin_lock_irq(&ent->lock); in create_cache_mr()
265 ent->total_mrs++; in create_cache_mr()
266 spin_unlock_irq(&ent->lock); in create_cache_mr()
280 lockdep_assert_held(&ent->lock); in remove_cache_mr_locked()
281 if (list_empty(&ent->head)) in remove_cache_mr_locked()
283 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_cache_mr_locked()
284 list_del(&mr->list); in remove_cache_mr_locked()
285 ent->available_mrs--; in remove_cache_mr_locked()
286 ent->total_mrs--; in remove_cache_mr_locked()
287 spin_unlock_irq(&ent->lock); in remove_cache_mr_locked()
288 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); in remove_cache_mr_locked()
290 spin_lock_irq(&ent->lock); in remove_cache_mr_locked()
298 lockdep_assert_held(&ent->lock); in resize_available_mrs()
302 target = ent->limit * 2; in resize_available_mrs()
303 if (target == ent->available_mrs + ent->pending) in resize_available_mrs()
305 if (target > ent->available_mrs + ent->pending) { in resize_available_mrs()
306 u32 todo = target - (ent->available_mrs + ent->pending); in resize_available_mrs()
308 spin_unlock_irq(&ent->lock); in resize_available_mrs()
310 if (err == -EAGAIN) in resize_available_mrs()
312 spin_lock_irq(&ent->lock); in resize_available_mrs()
314 if (err != -EAGAIN) in resize_available_mrs()
327 struct mlx5_cache_ent *ent = filp->private_data; in size_write()
340 spin_lock_irq(&ent->lock); in size_write()
341 if (target < ent->total_mrs - ent->available_mrs) { in size_write()
342 err = -EINVAL; in size_write()
345 target = target - (ent->total_mrs - ent->available_mrs); in size_write()
346 if (target < ent->limit || target > ent->limit*2) { in size_write()
347 err = -EINVAL; in size_write()
353 spin_unlock_irq(&ent->lock); in size_write()
358 spin_unlock_irq(&ent->lock); in size_write()
365 struct mlx5_cache_ent *ent = filp->private_data; in size_read()
369 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); in size_read()
386 struct mlx5_cache_ent *ent = filp->private_data; in limit_write()
398 spin_lock_irq(&ent->lock); in limit_write()
399 ent->limit = var; in limit_write()
401 spin_unlock_irq(&ent->lock); in limit_write()
410 struct mlx5_cache_ent *ent = filp->private_data; in limit_read()
414 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
433 struct mlx5_cache_ent *ent = &cache->ent[i]; in someone_adding()
436 spin_lock_irq(&ent->lock); in someone_adding()
437 ret = ent->available_mrs < ent->limit; in someone_adding()
438 spin_unlock_irq(&ent->lock); in someone_adding()
452 lockdep_assert_held(&ent->lock); in queue_adjust_cache_locked()
454 if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) in queue_adjust_cache_locked()
456 if (ent->available_mrs < ent->limit) { in queue_adjust_cache_locked()
457 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
458 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
459 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
460 ent->available_mrs + ent->pending < 2 * ent->limit) { in queue_adjust_cache_locked()
465 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
466 } else if (ent->available_mrs == 2 * ent->limit) { in queue_adjust_cache_locked()
467 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
468 } else if (ent->available_mrs > 2 * ent->limit) { in queue_adjust_cache_locked()
470 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
471 if (ent->pending) in queue_adjust_cache_locked()
472 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
475 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
481 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
482 struct mlx5_mr_cache *cache = &dev->cache; in __cache_work_func()
485 spin_lock_irq(&ent->lock); in __cache_work_func()
486 if (ent->disabled) in __cache_work_func()
489 if (ent->fill_to_high_water && in __cache_work_func()
490 ent->available_mrs + ent->pending < 2 * ent->limit && in __cache_work_func()
491 !READ_ONCE(dev->fill_delay)) { in __cache_work_func()
492 spin_unlock_irq(&ent->lock); in __cache_work_func()
494 spin_lock_irq(&ent->lock); in __cache_work_func()
495 if (ent->disabled) in __cache_work_func()
503 if (err != -EAGAIN) { in __cache_work_func()
507 ent->order, err); in __cache_work_func()
508 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
512 } else if (ent->available_mrs > 2 * ent->limit) { in __cache_work_func()
527 spin_unlock_irq(&ent->lock); in __cache_work_func()
530 READ_ONCE(cache->last_add) + 300 * HZ); in __cache_work_func()
531 spin_lock_irq(&ent->lock); in __cache_work_func()
532 if (ent->disabled) in __cache_work_func()
535 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
542 spin_unlock_irq(&ent->lock); in __cache_work_func()
565 struct mlx5_mr_cache *cache = &dev->cache; in mlx5_mr_cache_alloc()
570 entry >= ARRAY_SIZE(cache->ent))) in mlx5_mr_cache_alloc()
571 return ERR_PTR(-EINVAL); in mlx5_mr_cache_alloc()
575 return ERR_PTR(-EOPNOTSUPP); in mlx5_mr_cache_alloc()
577 ent = &cache->ent[entry]; in mlx5_mr_cache_alloc()
578 spin_lock_irq(&ent->lock); in mlx5_mr_cache_alloc()
579 if (list_empty(&ent->head)) { in mlx5_mr_cache_alloc()
581 ent->miss++; in mlx5_mr_cache_alloc()
582 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
587 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in mlx5_mr_cache_alloc()
588 list_del(&mr->list); in mlx5_mr_cache_alloc()
589 ent->available_mrs--; in mlx5_mr_cache_alloc()
591 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
593 mr->access_flags = access_flags; in mlx5_mr_cache_alloc()
600 struct mlx5_ib_dev *dev = req_ent->dev; in get_cache_mr()
605 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { in get_cache_mr()
606 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, in get_cache_mr()
607 ent - dev->cache.ent); in get_cache_mr()
609 spin_lock_irq(&ent->lock); in get_cache_mr()
610 if (!list_empty(&ent->head)) { in get_cache_mr()
611 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in get_cache_mr()
613 list_del(&mr->list); in get_cache_mr()
614 ent->available_mrs--; in get_cache_mr()
616 spin_unlock_irq(&ent->lock); in get_cache_mr()
620 spin_unlock_irq(&ent->lock); in get_cache_mr()
624 req_ent->miss++; in get_cache_mr()
631 struct mlx5_cache_ent *ent = mr->cache_ent; in detach_mr_from_cache()
633 mr->cache_ent = NULL; in detach_mr_from_cache()
634 spin_lock_irq(&ent->lock); in detach_mr_from_cache()
635 ent->total_mrs--; in detach_mr_from_cache()
636 spin_unlock_irq(&ent->lock); in detach_mr_from_cache()
641 struct mlx5_cache_ent *ent = mr->cache_ent; in mlx5_mr_cache_free()
653 spin_lock_irq(&ent->lock); in mlx5_mr_cache_free()
654 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
655 ent->available_mrs++; in mlx5_mr_cache_free()
657 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_free()
662 struct mlx5_mr_cache *cache = &dev->cache; in clean_keys()
663 struct mlx5_cache_ent *ent = &cache->ent[c]; in clean_keys()
668 cancel_delayed_work(&ent->dwork); in clean_keys()
670 spin_lock_irq(&ent->lock); in clean_keys()
671 if (list_empty(&ent->head)) { in clean_keys()
672 spin_unlock_irq(&ent->lock); in clean_keys()
675 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
676 list_move(&mr->list, &del_list); in clean_keys()
677 ent->available_mrs--; in clean_keys()
678 ent->total_mrs--; in clean_keys()
679 spin_unlock_irq(&ent->lock); in clean_keys()
680 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); in clean_keys()
684 list_del(&mr->list); in clean_keys()
691 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mr_cache_debugfs_cleanup()
694 debugfs_remove_recursive(dev->cache.root); in mlx5_mr_cache_debugfs_cleanup()
695 dev->cache.root = NULL; in mlx5_mr_cache_debugfs_cleanup()
700 struct mlx5_mr_cache *cache = &dev->cache; in mlx5_mr_cache_debugfs_init()
702 struct dentry *dir; in mlx5_mr_cache_debugfs_init() local
705 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mr_cache_debugfs_init()
708 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); in mlx5_mr_cache_debugfs_init()
711 ent = &cache->ent[i]; in mlx5_mr_cache_debugfs_init()
712 sprintf(ent->name, "%d", ent->order); in mlx5_mr_cache_debugfs_init()
713 dir = debugfs_create_dir(ent->name, cache->root); in mlx5_mr_cache_debugfs_init()
714 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mr_cache_debugfs_init()
715 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mr_cache_debugfs_init()
716 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); in mlx5_mr_cache_debugfs_init()
717 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mr_cache_debugfs_init()
725 WRITE_ONCE(dev->fill_delay, 0); in delay_time_func()
730 struct mlx5_mr_cache *cache = &dev->cache; in mlx5_mr_cache_init()
734 mutex_init(&dev->slow_path_mutex); in mlx5_mr_cache_init()
735 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); in mlx5_mr_cache_init()
736 if (!cache->wq) { in mlx5_mr_cache_init()
738 return -ENOMEM; in mlx5_mr_cache_init()
741 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); in mlx5_mr_cache_init()
742 timer_setup(&dev->delay_timer, delay_time_func, 0); in mlx5_mr_cache_init()
744 ent = &cache->ent[i]; in mlx5_mr_cache_init()
745 INIT_LIST_HEAD(&ent->head); in mlx5_mr_cache_init()
746 spin_lock_init(&ent->lock); in mlx5_mr_cache_init()
747 ent->order = i + 2; in mlx5_mr_cache_init()
748 ent->dev = dev; in mlx5_mr_cache_init()
749 ent->limit = 0; in mlx5_mr_cache_init()
751 INIT_WORK(&ent->work, cache_work_func); in mlx5_mr_cache_init()
752 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5_mr_cache_init()
759 if (ent->order > mr_cache_max_order(dev)) in mlx5_mr_cache_init()
762 ent->page = PAGE_SHIFT; in mlx5_mr_cache_init()
763 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / in mlx5_mr_cache_init()
765 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; in mlx5_mr_cache_init()
766 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && in mlx5_mr_cache_init()
767 !dev->is_rep && mlx5_core_is_pf(dev->mdev) && in mlx5_mr_cache_init()
769 ent->limit = dev->mdev->profile->mr_cache[i].limit; in mlx5_mr_cache_init()
771 ent->limit = 0; in mlx5_mr_cache_init()
772 spin_lock_irq(&ent->lock); in mlx5_mr_cache_init()
774 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_init()
786 if (!dev->cache.wq) in mlx5_mr_cache_cleanup()
790 struct mlx5_cache_ent *ent = &dev->cache.ent[i]; in mlx5_mr_cache_cleanup()
792 spin_lock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
793 ent->disabled = true; in mlx5_mr_cache_cleanup()
794 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
795 cancel_work_sync(&ent->work); in mlx5_mr_cache_cleanup()
796 cancel_delayed_work_sync(&ent->dwork); in mlx5_mr_cache_cleanup()
800 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); in mlx5_mr_cache_cleanup()
805 destroy_workqueue(dev->cache.wq); in mlx5_mr_cache_cleanup()
806 del_timer_sync(&dev->delay_timer); in mlx5_mr_cache_cleanup()
813 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dma_mr()
822 return ERR_PTR(-ENOMEM); in mlx5_ib_get_dma_mr()
826 err = -ENOMEM; in mlx5_ib_get_dma_mr()
836 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
841 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
842 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
843 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
844 mr->umem = NULL; in mlx5_ib_get_dma_mr()
846 return &mr->ibmr; in mlx5_ib_get_dma_mr()
863 offset = addr & (page_size - 1); in get_octo_len()
870 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) in mr_cache_max_order()
886 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, in mr_umem_get()
894 u = &odp->umem; in mr_umem_get()
896 *page_shift = odp->page_shift; in mr_umem_get()
898 *npages = *ncont << (*page_shift - PAGE_SHIFT); in mr_umem_get()
902 u = ib_umem_get(&dev->ib_dev, start, length, access_flags); in mr_umem_get()
915 return -EINVAL; in mr_umem_get()
929 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); in mlx5_ib_umr_done()
931 context->status = wc->status; in mlx5_ib_umr_done()
932 complete(&context->done); in mlx5_ib_umr_done()
937 context->cqe.done = mlx5_ib_umr_done; in mlx5_ib_init_umr_context()
938 context->status = -1; in mlx5_ib_init_umr_context()
939 init_completion(&context->done); in mlx5_ib_init_umr_context()
945 struct umr_common *umrc = &dev->umrc; in mlx5_ib_post_send_wait()
951 umrwr->wr.wr_cqe = &umr_context.cqe; in mlx5_ib_post_send_wait()
953 down(&umrc->sem); in mlx5_ib_post_send_wait()
954 err = ib_post_send(umrc->qp, &umrwr->wr, &bad); in mlx5_ib_post_send_wait()
962 err = -EFAULT; in mlx5_ib_post_send_wait()
965 up(&umrc->sem); in mlx5_ib_post_send_wait()
972 struct mlx5_mr_cache *cache = &dev->cache; in mr_cache_ent_from_order()
974 if (order < cache->ent[0].order) in mr_cache_ent_from_order()
975 return &cache->ent[0]; in mr_cache_ent_from_order()
976 order = order - cache->ent[0].order; in mr_cache_ent_from_order()
979 return &cache->ent[order]; in mr_cache_ent_from_order()
987 struct mlx5_ib_dev *dev = to_mdev(pd->device); in alloc_mr_from_cache()
992 return ERR_PTR(-E2BIG); in alloc_mr_from_cache()
996 return ERR_PTR(-EOPNOTSUPP); in alloc_mr_from_cache()
1005 mr->ibmr.pd = pd; in alloc_mr_from_cache()
1006 mr->umem = umem; in alloc_mr_from_cache()
1007 mr->access_flags = access_flags; in alloc_mr_from_cache()
1008 mr->desc_size = sizeof(struct mlx5_mtt); in alloc_mr_from_cache()
1009 mr->mmkey.iova = virt_addr; in alloc_mr_from_cache()
1010 mr->mmkey.size = len; in alloc_mr_from_cache()
1011 mr->mmkey.pd = to_mpd(pd)->pdn; in alloc_mr_from_cache()
1016 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1023 struct mlx5_ib_dev *dev = mr->dev; in mlx5_ib_update_xlt()
1024 struct device *ddev = dev->ib_dev.dev.parent; in mlx5_ib_update_xlt()
1035 const int page_mask = page_align - 1; in mlx5_ib_update_xlt()
1045 return -EPERM; in mlx5_ib_update_xlt()
1082 err = -ENOMEM; in mlx5_ib_update_xlt()
1086 if (mr->umem->is_odp) { in mlx5_ib_update_xlt()
1088 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in mlx5_ib_update_xlt()
1089 size_t max_pages = ib_umem_odp_num_pages(odp) - idx; in mlx5_ib_update_xlt()
1096 sg.lkey = dev->umrc.pd->local_dma_lkey; in mlx5_ib_update_xlt()
1106 wr.pd = mr->ibmr.pd; in mlx5_ib_update_xlt()
1107 wr.mkey = mr->mmkey.key; in mlx5_ib_update_xlt()
1108 wr.length = mr->mmkey.size; in mlx5_ib_update_xlt()
1109 wr.virt_addr = mr->mmkey.iova; in mlx5_ib_update_xlt()
1110 wr.access_flags = mr->access_flags; in mlx5_ib_update_xlt()
1116 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); in mlx5_ib_update_xlt()
1119 if (mr->umem->is_odp) { in mlx5_ib_update_xlt()
1122 __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx, in mlx5_ib_update_xlt()
1128 memset(xlt + size_to_map, 0, size - size_to_map); in mlx5_ib_update_xlt()
1175 struct mlx5_ib_dev *dev = to_mdev(pd->device); in reg_create()
1182 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); in reg_create()
1186 return ERR_PTR(-ENOMEM); in reg_create()
1188 mr->ibmr.pd = pd; in reg_create()
1189 mr->access_flags = access_flags; in reg_create()
1196 err = -ENOMEM; in reg_create()
1202 err = -EINVAL; in reg_create()
1215 populate ? pd : dev->umrc.pd); in reg_create()
1230 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1235 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1236 mr->desc_size = sizeof(struct mlx5_mtt); in reg_create()
1237 mr->dev = dev; in reg_create()
1240 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1257 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
1258 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
1259 mr->ibmr.length = length; in set_mr_fields()
1260 mr->access_flags = access_flags; in set_mr_fields()
1266 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dm_mr()
1275 return ERR_PTR(-ENOMEM); in mlx5_ib_get_dm_mr()
1279 err = -ENOMEM; in mlx5_ib_get_dm_mr()
1290 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1298 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1319 return -EOPNOTSUPP; in mlx5_ib_advise_mr()
1330 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; in mlx5_ib_reg_dm_mr()
1331 u64 start_addr = mdm->dev_addr + attr->offset; in mlx5_ib_reg_dm_mr()
1334 switch (mdm->type) { in mlx5_ib_reg_dm_mr()
1336 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1337 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1340 start_addr -= pci_resource_start(dev->pdev, 0); in mlx5_ib_reg_dm_mr()
1344 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1345 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1350 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1353 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, in mlx5_ib_reg_dm_mr()
1354 attr->access_flags, mode); in mlx5_ib_reg_dm_mr()
1361 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr()
1372 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_reg_user_mr()
1380 return ERR_PTR(-EINVAL); in mlx5_ib_reg_user_mr()
1385 return ERR_PTR(-EINVAL); in mlx5_ib_reg_user_mr()
1387 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) in mlx5_ib_reg_user_mr()
1388 return ERR_PTR(-EINVAL); in mlx5_ib_reg_user_mr()
1393 return &mr->ibmr; in mlx5_ib_reg_user_mr()
1410 mutex_lock(&dev->slow_path_mutex); in mlx5_ib_reg_user_mr()
1413 mutex_unlock(&dev->slow_path_mutex); in mlx5_ib_reg_user_mr()
1421 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in mlx5_ib_reg_user_mr()
1423 mr->umem = umem; in mlx5_ib_reg_user_mr()
1424 mr->npages = npages; in mlx5_ib_reg_user_mr()
1425 atomic_add(mr->npages, &dev->mdev->priv.reg_pages); in mlx5_ib_reg_user_mr()
1445 to_ib_umem_odp(mr->umem)->private = mr; in mlx5_ib_reg_user_mr()
1446 init_waitqueue_head(&mr->q_deferred_work); in mlx5_ib_reg_user_mr()
1447 atomic_set(&mr->num_deferred_work, 0); in mlx5_ib_reg_user_mr()
1448 err = xa_err(xa_store(&dev->odp_mkeys, in mlx5_ib_reg_user_mr()
1449 mlx5_base_mkey(mr->mmkey.key), &mr->mmkey, in mlx5_ib_reg_user_mr()
1463 return &mr->ibmr; in mlx5_ib_reg_user_mr()
1470 * mlx5_mr_cache_invalidate - Fence all DMA on the MR
1481 if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) in mlx5_mr_cache_invalidate()
1487 umrwr.pd = mr->dev->umrc.pd; in mlx5_mr_cache_invalidate()
1488 umrwr.mkey = mr->mmkey.key; in mlx5_mr_cache_invalidate()
1491 return mlx5_ib_post_send_wait(mr->dev, &umrwr); in mlx5_mr_cache_invalidate()
1497 struct mlx5_ib_dev *dev = to_mdev(pd->device); in rereg_umr()
1504 umrwr.mkey = mr->mmkey.key; in rereg_umr()
1521 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); in mlx5_ib_rereg_user_mr()
1523 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd; in mlx5_ib_rereg_user_mr()
1526 mr->access_flags; in mlx5_ib_rereg_user_mr()
1538 if (!mr->umem) in mlx5_ib_rereg_user_mr()
1539 return -EINVAL; in mlx5_ib_rereg_user_mr()
1542 return -EOPNOTSUPP; in mlx5_ib_rereg_user_mr()
1548 addr = mr->umem->address; in mlx5_ib_rereg_user_mr()
1549 len = mr->umem->length; in mlx5_ib_rereg_user_mr()
1558 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); in mlx5_ib_rereg_user_mr()
1559 mr->npages = 0; in mlx5_ib_rereg_user_mr()
1560 ib_umem_release(mr->umem); in mlx5_ib_rereg_user_mr()
1561 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1563 err = mr_umem_get(dev, addr, len, access_flags, &mr->umem, in mlx5_ib_rereg_user_mr()
1567 mr->npages = ncont; in mlx5_ib_rereg_user_mr()
1568 atomic_add(mr->npages, &dev->mdev->priv.reg_pages); in mlx5_ib_rereg_user_mr()
1571 if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1577 * UMR can't be used - MKey needs to be replaced. in mlx5_ib_rereg_user_mr()
1579 if (mr->cache_ent) in mlx5_ib_rereg_user_mr()
1585 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, in mlx5_ib_rereg_user_mr()
1597 mr->ibmr.pd = pd; in mlx5_ib_rereg_user_mr()
1598 mr->access_flags = access_flags; in mlx5_ib_rereg_user_mr()
1599 mr->mmkey.iova = addr; in mlx5_ib_rereg_user_mr()
1600 mr->mmkey.size = len; in mlx5_ib_rereg_user_mr()
1601 mr->mmkey.pd = to_mpd(pd)->pdn; in mlx5_ib_rereg_user_mr()
1624 ib_umem_release(mr->umem); in mlx5_ib_rereg_user_mr()
1625 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1641 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); in mlx5_alloc_priv_descs()
1643 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1644 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1645 return -ENOMEM; in mlx5_alloc_priv_descs()
1647 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1649 mr->desc_map = dma_map_single(device->dev.parent, mr->descs, in mlx5_alloc_priv_descs()
1651 if (dma_mapping_error(device->dev.parent, mr->desc_map)) { in mlx5_alloc_priv_descs()
1652 ret = -ENOMEM; in mlx5_alloc_priv_descs()
1658 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1666 if (mr->descs) { in mlx5_free_priv_descs()
1667 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1668 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1670 dma_unmap_single(device->dev.parent, mr->desc_map, in mlx5_free_priv_descs()
1672 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1673 mr->descs = NULL; in mlx5_free_priv_descs()
1679 if (mr->sig) { in clean_mr()
1680 if (mlx5_core_destroy_psv(dev->mdev, in clean_mr()
1681 mr->sig->psv_memory.psv_idx)) in clean_mr()
1683 mr->sig->psv_memory.psv_idx); in clean_mr()
1684 if (mlx5_core_destroy_psv(dev->mdev, in clean_mr()
1685 mr->sig->psv_wire.psv_idx)) in clean_mr()
1687 mr->sig->psv_wire.psv_idx); in clean_mr()
1688 xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key)); in clean_mr()
1689 kfree(mr->sig); in clean_mr()
1690 mr->sig = NULL; in clean_mr()
1693 if (!mr->cache_ent) { in clean_mr()
1701 int npages = mr->npages; in dereg_mr()
1702 struct ib_umem *umem = mr->umem; in dereg_mr()
1710 if (mr->cache_ent) in dereg_mr()
1716 atomic_sub(npages, &dev->mdev->priv.reg_pages); in dereg_mr()
1724 if (ibmr->type == IB_MR_TYPE_INTEGRITY) { in mlx5_ib_dereg_mr()
1725 dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr); in mlx5_ib_dereg_mr()
1726 dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr); in mlx5_ib_dereg_mr()
1729 if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) { in mlx5_ib_dereg_mr()
1734 dereg_mr(to_mdev(ibmr->device), mmr); in mlx5_ib_dereg_mr()
1760 struct mlx5_ib_dev *dev = to_mdev(pd->device); in _mlx5_alloc_mkey_descs()
1763 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
1764 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
1765 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
1767 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
1773 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
1777 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
1778 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1779 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1801 return ERR_PTR(-ENOMEM); in mlx5_ib_alloc_pi_mr()
1803 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
1804 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
1808 err = -ENOMEM; in mlx5_ib_alloc_pi_mr()
1820 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
1851 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_alloc_integrity_descs()
1856 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
1857 if (!mr->sig) in mlx5_alloc_integrity_descs()
1858 return -ENOMEM; in mlx5_alloc_integrity_descs()
1861 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); in mlx5_alloc_integrity_descs()
1865 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
1866 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
1868 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
1869 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
1871 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
1872 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
1875 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
1876 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
1879 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
1882 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
1883 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
1897 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
1898 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
1907 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr); in mlx5_alloc_integrity_descs()
1908 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
1910 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr); in mlx5_alloc_integrity_descs()
1911 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
1913 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
1915 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
1916 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
1918 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
1920 kfree(mr->sig); in mlx5_alloc_integrity_descs()
1929 struct mlx5_ib_dev *dev = to_mdev(pd->device); in __mlx5_ib_alloc_mr()
1938 return ERR_PTR(-ENOMEM); in __mlx5_ib_alloc_mr()
1942 err = -ENOMEM; in __mlx5_ib_alloc_mr()
1946 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
1947 mr->umem = NULL; in __mlx5_ib_alloc_mr()
1962 err = -EINVAL; in __mlx5_ib_alloc_mr()
1970 return &mr->ibmr; in __mlx5_ib_alloc_mr()
1994 struct mlx5_ib_dev *dev = to_mdev(ibmw->device); in mlx5_ib_alloc_mw()
2007 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); in mlx5_ib_alloc_mw()
2012 return -EOPNOTSUPP; in mlx5_ib_alloc_mw()
2014 if (udata->inlen > sizeof(req) && in mlx5_ib_alloc_mw()
2016 udata->inlen - sizeof(req))) in mlx5_ib_alloc_mw()
2017 return -EOPNOTSUPP; in mlx5_ib_alloc_mw()
2023 err = -ENOMEM; in mlx5_ib_alloc_mw()
2031 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); in mlx5_ib_alloc_mw()
2035 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); in mlx5_ib_alloc_mw()
2038 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); in mlx5_ib_alloc_mw()
2042 mw->mmkey.type = MLX5_MKEY_MW; in mlx5_ib_alloc_mw()
2043 ibmw->rkey = mw->mmkey.key; in mlx5_ib_alloc_mw()
2044 mw->ndescs = ndescs; in mlx5_ib_alloc_mw()
2047 min(offsetofend(typeof(resp), response_length), udata->outlen); in mlx5_ib_alloc_mw()
2055 err = xa_err(xa_store(&dev->odp_mkeys, in mlx5_ib_alloc_mw()
2056 mlx5_base_mkey(mw->mmkey.key), &mw->mmkey, in mlx5_ib_alloc_mw()
2066 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); in mlx5_ib_alloc_mw()
2074 struct mlx5_ib_dev *dev = to_mdev(mw->device); in mlx5_ib_dealloc_mw()
2078 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)); in mlx5_ib_dealloc_mw()
2083 synchronize_srcu(&dev->odp_srcu); in mlx5_ib_dealloc_mw()
2086 return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); in mlx5_ib_dealloc_mw()
2097 ret = -EINVAL; in mlx5_ib_check_mr_status()
2101 mr_status->fail_status = 0; in mlx5_ib_check_mr_status()
2103 if (!mmr->sig) { in mlx5_ib_check_mr_status()
2104 ret = -EINVAL; in mlx5_ib_check_mr_status()
2105 pr_err("signature status check requested on a non-signature enabled MR\n"); in mlx5_ib_check_mr_status()
2109 mmr->sig->sig_status_checked = true; in mlx5_ib_check_mr_status()
2110 if (!mmr->sig->sig_err_exists) in mlx5_ib_check_mr_status()
2113 if (ibmr->lkey == mmr->sig->err_item.key) in mlx5_ib_check_mr_status()
2114 memcpy(&mr_status->sig_err, &mmr->sig->err_item, in mlx5_ib_check_mr_status()
2115 sizeof(mr_status->sig_err)); in mlx5_ib_check_mr_status()
2117 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; in mlx5_ib_check_mr_status()
2118 mr_status->sig_err.sig_err_offset = 0; in mlx5_ib_check_mr_status()
2119 mr_status->sig_err.key = mmr->sig->err_item.key; in mlx5_ib_check_mr_status()
2122 mmr->sig->sig_err_exists = false; in mlx5_ib_check_mr_status()
2123 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; in mlx5_ib_check_mr_status()
2140 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2143 mr->ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2146 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2147 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2150 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2155 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2156 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2158 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2174 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2176 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2179 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2180 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2183 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2186 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); in mlx5_ib_sg_to_klms()
2188 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2196 mr->ndescs = i; in mlx5_ib_sg_to_klms()
2197 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2203 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2207 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - in mlx5_ib_sg_to_klms()
2210 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2217 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2218 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2229 if (unlikely(mr->ndescs == mr->max_descs)) in mlx5_set_page()
2230 return -ENOMEM; in mlx5_set_page()
2232 descs = mr->descs; in mlx5_set_page()
2233 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2243 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2244 return -ENOMEM; in mlx5_set_page_pi()
2246 descs = mr->descs; in mlx5_set_page_pi()
2247 descs[mr->ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2260 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2263 pi_mr->ndescs = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2264 pi_mr->meta_ndescs = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2265 pi_mr->meta_length = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2267 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_mtt_mr_sg_pi()
2268 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_mtt_mr_sg_pi()
2271 pi_mr->ibmr.page_size = ibmr->page_size; in mlx5_ib_map_mtt_mr_sg_pi()
2272 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, in mlx5_ib_map_mtt_mr_sg_pi()
2277 pi_mr->data_iova = pi_mr->ibmr.iova; in mlx5_ib_map_mtt_mr_sg_pi()
2278 pi_mr->data_length = pi_mr->ibmr.length; in mlx5_ib_map_mtt_mr_sg_pi()
2279 pi_mr->ibmr.length = pi_mr->data_length; in mlx5_ib_map_mtt_mr_sg_pi()
2280 ibmr->length = pi_mr->data_length; in mlx5_ib_map_mtt_mr_sg_pi()
2283 u64 page_mask = ~((u64)ibmr->page_size - 1); in mlx5_ib_map_mtt_mr_sg_pi()
2284 u64 iova = pi_mr->data_iova; in mlx5_ib_map_mtt_mr_sg_pi()
2286 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, in mlx5_ib_map_mtt_mr_sg_pi()
2289 pi_mr->meta_length = pi_mr->ibmr.length; in mlx5_ib_map_mtt_mr_sg_pi()
2296 pi_mr->pi_iova = (iova & page_mask) + in mlx5_ib_map_mtt_mr_sg_pi()
2297 pi_mr->ndescs * ibmr->page_size + in mlx5_ib_map_mtt_mr_sg_pi()
2298 (pi_mr->ibmr.iova & ~page_mask); in mlx5_ib_map_mtt_mr_sg_pi()
2302 * the metadata (the sig MR will verify that the HW will access in mlx5_ib_map_mtt_mr_sg_pi()
2306 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; in mlx5_ib_map_mtt_mr_sg_pi()
2307 pi_mr->ibmr.iova = iova; in mlx5_ib_map_mtt_mr_sg_pi()
2308 ibmr->length += pi_mr->meta_length; in mlx5_ib_map_mtt_mr_sg_pi()
2311 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_mtt_mr_sg_pi()
2312 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_mtt_mr_sg_pi()
2325 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2328 pi_mr->ndescs = 0; in mlx5_ib_map_klm_mr_sg_pi()
2329 pi_mr->meta_ndescs = 0; in mlx5_ib_map_klm_mr_sg_pi()
2330 pi_mr->meta_length = 0; in mlx5_ib_map_klm_mr_sg_pi()
2332 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_klm_mr_sg_pi()
2333 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_klm_mr_sg_pi()
2339 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_klm_mr_sg_pi()
2340 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_klm_mr_sg_pi()
2343 /* This is zero-based memory region */ in mlx5_ib_map_klm_mr_sg_pi()
2344 pi_mr->data_iova = 0; in mlx5_ib_map_klm_mr_sg_pi()
2345 pi_mr->ibmr.iova = 0; in mlx5_ib_map_klm_mr_sg_pi()
2346 pi_mr->pi_iova = pi_mr->data_length; in mlx5_ib_map_klm_mr_sg_pi()
2347 ibmr->length = pi_mr->ibmr.length; in mlx5_ib_map_klm_mr_sg_pi()
2361 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); in mlx5_ib_map_mr_sg_pi()
2363 mr->ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2364 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2365 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2366 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2367 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2387 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2394 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2399 return -ENOMEM; in mlx5_ib_map_mr_sg_pi()
2402 /* This is zero-based memory region */ in mlx5_ib_map_mr_sg_pi()
2403 ibmr->iova = 0; in mlx5_ib_map_mr_sg_pi()
2404 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2406 ibmr->sig_attrs->meta_length = pi_mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2408 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2419 mr->ndescs = 0; in mlx5_ib_map_mr_sg()
2421 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2422 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2425 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2432 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2433 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()