• Home
  • Raw
  • Download

Lines Matching full:mr

126 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)  in destroy_mkey()  argument
128 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
130 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in destroy_mkey()
138 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); in create_mkey_warn()
358 /* Synchronously create a MR in the cache */
749 struct mlx5_ib_mr *mr; in _mlx5_mr_cache_alloc() local
752 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in _mlx5_mr_cache_alloc()
753 if (!mr) in _mlx5_mr_cache_alloc()
763 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
768 kfree(mr); in _mlx5_mr_cache_alloc()
772 mr->mmkey.key = pop_stored_mkey(ent); in _mlx5_mr_cache_alloc()
776 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
777 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_mr_cache_alloc()
778 init_waitqueue_head(&mr->mmkey.wait); in _mlx5_mr_cache_alloc()
779 return mr; in _mlx5_mr_cache_alloc()
1068 struct mlx5_ib_mr *mr; in mlx5_ib_get_dma_mr() local
1073 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dma_mr()
1074 if (!mr) in mlx5_ib_get_dma_mr()
1090 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
1095 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
1096 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1097 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1098 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1100 return &mr->ibmr; in mlx5_ib_get_dma_mr()
1106 kfree(mr); in mlx5_ib_get_dma_mr()
1129 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, in set_mr_fields() argument
1132 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
1133 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
1134 mr->ibmr.length = length; in set_mr_fields()
1135 mr->ibmr.device = &dev->ib_dev; in set_mr_fields()
1136 mr->ibmr.iova = iova; in set_mr_fields()
1137 mr->access_flags = access_flags; in set_mr_fields()
1160 struct mlx5_ib_mr *mr; in alloc_cacheable_mr() local
1176 * If the MR can't come from the cache then synchronously create an uncached in alloc_cacheable_mr()
1181 mr = reg_create(pd, umem, iova, access_flags, page_size, false); in alloc_cacheable_mr()
1183 if (IS_ERR(mr)) in alloc_cacheable_mr()
1184 return mr; in alloc_cacheable_mr()
1185 mr->mmkey.rb_key = rb_key; in alloc_cacheable_mr()
1186 return mr; in alloc_cacheable_mr()
1189 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); in alloc_cacheable_mr()
1190 if (IS_ERR(mr)) in alloc_cacheable_mr()
1191 return mr; in alloc_cacheable_mr()
1193 mr->ibmr.pd = pd; in alloc_cacheable_mr()
1194 mr->umem = umem; in alloc_cacheable_mr()
1195 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr()
1196 set_mr_fields(dev, mr, umem->length, access_flags, iova); in alloc_cacheable_mr()
1198 return mr; in alloc_cacheable_mr()
1210 struct mlx5_ib_mr *mr; in reg_create() local
1220 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in reg_create()
1221 if (!mr) in reg_create()
1224 mr->ibmr.pd = pd; in reg_create()
1225 mr->access_flags = access_flags; in reg_create()
1226 mr->page_shift = order_base_2(page_size); in reg_create()
1243 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, in reg_create()
1262 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1263 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create()
1268 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1271 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1276 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1277 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift); in reg_create()
1278 mr->umem = umem; in reg_create()
1279 set_mr_fields(dev, mr, umem->length, access_flags, iova); in reg_create()
1282 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1284 return mr; in reg_create()
1289 kfree(mr); in reg_create()
1298 struct mlx5_ib_mr *mr; in mlx5_ib_get_dm_mr() local
1303 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dm_mr()
1304 if (!mr) in mlx5_ib_get_dm_mr()
1320 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1326 set_mr_fields(dev, mr, length, acc, start_addr); in mlx5_ib_get_dm_mr()
1328 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1334 kfree(mr); in mlx5_ib_get_dm_mr()
1392 struct mlx5_ib_mr *mr = NULL; in create_real_mr() local
1398 mr = alloc_cacheable_mr(pd, umem, iova, access_flags); in create_real_mr()
1404 mr = reg_create(pd, umem, iova, access_flags, page_size, true); in create_real_mr()
1407 if (IS_ERR(mr)) { in create_real_mr()
1409 return ERR_CAST(mr); in create_real_mr()
1412 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in create_real_mr()
1418 * If the MR was created with reg_create then it will be in create_real_mr()
1422 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); in create_real_mr()
1424 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_real_mr()
1428 return &mr->ibmr; in create_real_mr()
1437 struct mlx5_ib_mr *mr; in create_user_odp_mr() local
1452 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); in create_user_odp_mr()
1453 if (IS_ERR(mr)) in create_user_odp_mr()
1454 return ERR_CAST(mr); in create_user_odp_mr()
1455 return &mr->ibmr; in create_user_odp_mr()
1467 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); in create_user_odp_mr()
1468 if (IS_ERR(mr)) { in create_user_odp_mr()
1470 return ERR_CAST(mr); in create_user_odp_mr()
1472 xa_init(&mr->implicit_children); in create_user_odp_mr()
1474 odp->private = mr; in create_user_odp_mr()
1475 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in create_user_odp_mr()
1479 err = mlx5_ib_init_odp_mr(mr); in create_user_odp_mr()
1482 return &mr->ibmr; in create_user_odp_mr()
1485 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_user_odp_mr()
1514 struct mlx5_ib_mr *mr = umem_dmabuf->private; in mlx5_ib_dmabuf_invalidate_cb() local
1521 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); in mlx5_ib_dmabuf_invalidate_cb()
1536 struct mlx5_ib_mr *mr = NULL; in mlx5_ib_reg_user_mr_dmabuf() local
1561 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, in mlx5_ib_reg_user_mr_dmabuf()
1563 if (IS_ERR(mr)) { in mlx5_ib_reg_user_mr_dmabuf()
1565 return ERR_CAST(mr); in mlx5_ib_reg_user_mr_dmabuf()
1568 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in mlx5_ib_reg_user_mr_dmabuf()
1570 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); in mlx5_ib_reg_user_mr_dmabuf()
1571 umem_dmabuf->private = mr; in mlx5_ib_reg_user_mr_dmabuf()
1572 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in mlx5_ib_reg_user_mr_dmabuf()
1576 err = mlx5_ib_init_dmabuf_mr(mr); in mlx5_ib_reg_user_mr_dmabuf()
1579 return &mr->ibmr; in mlx5_ib_reg_user_mr_dmabuf()
1582 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in mlx5_ib_reg_user_mr_dmabuf()
1603 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, in can_use_umr_rereg_pas() argument
1608 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in can_use_umr_rereg_pas()
1611 if (!mr->mmkey.cache_ent) in can_use_umr_rereg_pas()
1620 return (mr->mmkey.cache_ent->rb_key.ndescs) >= in can_use_umr_rereg_pas()
1624 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, in umr_rereg_pas() argument
1628 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pas()
1630 struct ib_umem *old_umem = mr->umem; in umr_rereg_pas()
1634 * To keep everything simple the MR is revoked before we start to mess in umr_rereg_pas()
1636 * MR. in umr_rereg_pas()
1638 err = mlx5r_umr_revoke_mr(mr); in umr_rereg_pas()
1643 mr->ibmr.pd = pd; in umr_rereg_pas()
1647 mr->access_flags = access_flags; in umr_rereg_pas()
1651 mr->ibmr.iova = iova; in umr_rereg_pas()
1652 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1653 mr->page_shift = order_base_2(page_size); in umr_rereg_pas()
1654 mr->umem = new_umem; in umr_rereg_pas()
1655 err = mlx5r_umr_update_mr_pas(mr, upd_flags); in umr_rereg_pas()
1658 * The MR is revoked at this point so there is no issue to free in umr_rereg_pas()
1661 mr->umem = old_umem; in umr_rereg_pas()
1677 struct mlx5_ib_mr *mr = to_mmr(ib_mr); in mlx5_ib_rereg_user_mr() local
1692 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1700 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1702 err = mlx5r_umr_rereg_pd_access(mr, new_pd, in mlx5_ib_rereg_user_mr()
1708 /* DM or ODP MR's don't have a normal umem so we can't re-use it */ in mlx5_ib_rereg_user_mr()
1709 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1713 * Only one active MR can refer to a umem at one time, revoke in mlx5_ib_rereg_user_mr()
1714 * the old MR before assigning the umem to the new one. in mlx5_ib_rereg_user_mr()
1716 err = mlx5r_umr_revoke_mr(mr); in mlx5_ib_rereg_user_mr()
1719 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1720 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1723 return create_real_mr(new_pd, umem, mr->ibmr.iova, in mlx5_ib_rereg_user_mr()
1731 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1735 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()
1745 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, in mlx5_ib_rereg_user_mr()
1747 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, in mlx5_ib_rereg_user_mr()
1759 * Everything else has no state we can preserve, just create a new MR in mlx5_ib_rereg_user_mr()
1769 struct mlx5_ib_mr *mr, in mlx5_alloc_priv_descs() argument
1786 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1787 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1790 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1792 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); in mlx5_alloc_priv_descs()
1793 if (dma_mapping_error(ddev, mr->desc_map)) { in mlx5_alloc_priv_descs()
1800 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1806 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) in mlx5_free_priv_descs() argument
1808 if (!mr->umem && mr->descs) { in mlx5_free_priv_descs()
1809 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1810 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1813 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, in mlx5_free_priv_descs()
1815 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1816 mr->descs = NULL; in mlx5_free_priv_descs()
1821 struct mlx5_ib_mr *mr) in cache_ent_find_and_store() argument
1827 if (mr->mmkey.cache_ent) { in cache_ent_find_and_store()
1828 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1829 mr->mmkey.cache_ent->in_use--; in cache_ent_find_and_store()
1834 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1836 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1841 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1842 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1848 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1853 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1854 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1857 ret = push_mkey_locked(mr->mmkey.cache_ent, false, in cache_ent_find_and_store()
1858 xa_mk_value(mr->mmkey.key)); in cache_ent_find_and_store()
1859 xa_unlock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1865 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_dereg_mr() local
1870 * Any async use of the mr must hold the refcount, once the refcount in mlx5_ib_dereg_mr()
1875 refcount_read(&mr->mmkey.usecount) != 0 && in mlx5_ib_dereg_mr()
1876 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) in mlx5_ib_dereg_mr()
1877 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in mlx5_ib_dereg_mr()
1880 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_ib_dereg_mr()
1881 mr->sig, NULL, GFP_KERNEL); in mlx5_ib_dereg_mr()
1883 if (mr->mtt_mr) { in mlx5_ib_dereg_mr()
1884 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1887 mr->mtt_mr = NULL; in mlx5_ib_dereg_mr()
1889 if (mr->klm_mr) { in mlx5_ib_dereg_mr()
1890 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1893 mr->klm_mr = NULL; in mlx5_ib_dereg_mr()
1897 mr->sig->psv_memory.psv_idx)) in mlx5_ib_dereg_mr()
1899 mr->sig->psv_memory.psv_idx); in mlx5_ib_dereg_mr()
1900 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_ib_dereg_mr()
1902 mr->sig->psv_wire.psv_idx); in mlx5_ib_dereg_mr()
1903 kfree(mr->sig); in mlx5_ib_dereg_mr()
1904 mr->sig = NULL; in mlx5_ib_dereg_mr()
1908 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) in mlx5_ib_dereg_mr()
1909 if (mlx5r_umr_revoke_mr(mr) || in mlx5_ib_dereg_mr()
1910 cache_ent_find_and_store(dev, mr)) in mlx5_ib_dereg_mr()
1911 mr->mmkey.cache_ent = NULL; in mlx5_ib_dereg_mr()
1913 if (!mr->mmkey.cache_ent) { in mlx5_ib_dereg_mr()
1914 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); in mlx5_ib_dereg_mr()
1919 if (mr->umem) { in mlx5_ib_dereg_mr()
1920 bool is_odp = is_odp_mr(mr); in mlx5_ib_dereg_mr()
1923 atomic_sub(ib_umem_num_pages(mr->umem), in mlx5_ib_dereg_mr()
1925 ib_umem_release(mr->umem); in mlx5_ib_dereg_mr()
1927 mlx5_ib_free_odp_mr(mr); in mlx5_ib_dereg_mr()
1930 if (!mr->mmkey.cache_ent) in mlx5_ib_dereg_mr()
1931 mlx5_free_priv_descs(mr); in mlx5_ib_dereg_mr()
1933 kfree(mr); in mlx5_ib_dereg_mr()
1954 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in _mlx5_alloc_mkey_descs() argument
1961 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
1962 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
1963 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
1965 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
1971 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
1975 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
1976 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1977 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1982 mlx5_free_priv_descs(mr); in _mlx5_alloc_mkey_descs()
1993 struct mlx5_ib_mr *mr; in mlx5_ib_alloc_pi_mr() local
1997 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_alloc_pi_mr()
1998 if (!mr) in mlx5_ib_alloc_pi_mr()
2001 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
2002 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
2013 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, in mlx5_ib_alloc_pi_mr()
2018 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
2021 return mr; in mlx5_ib_alloc_pi_mr()
2026 kfree(mr); in mlx5_ib_alloc_pi_mr()
2030 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_mem_reg_descs() argument
2033 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), in mlx5_alloc_mem_reg_descs()
2038 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_sg_gaps_descs() argument
2041 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), in mlx5_alloc_sg_gaps_descs()
2045 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_integrity_descs() argument
2054 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
2055 if (!mr->sig) in mlx5_alloc_integrity_descs()
2063 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
2064 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
2066 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
2067 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
2069 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
2070 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2073 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
2074 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
2077 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2080 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
2081 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
2090 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, in mlx5_alloc_integrity_descs()
2095 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
2096 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
2102 destroy_mkey(dev, mr); in mlx5_alloc_integrity_descs()
2103 mlx5_free_priv_descs(mr); in mlx5_alloc_integrity_descs()
2105 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2106 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
2108 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2109 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
2111 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
2113 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
2114 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
2116 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
2118 kfree(mr->sig); in mlx5_alloc_integrity_descs()
2130 struct mlx5_ib_mr *mr; in __mlx5_ib_alloc_mr() local
2134 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in __mlx5_ib_alloc_mr()
2135 if (!mr) in __mlx5_ib_alloc_mr()
2144 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
2145 mr->umem = NULL; in __mlx5_ib_alloc_mr()
2149 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2152 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2155 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, in __mlx5_ib_alloc_mr()
2159 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); in __mlx5_ib_alloc_mr()
2168 return &mr->ibmr; in __mlx5_ib_alloc_mr()
2173 kfree(mr); in __mlx5_ib_alloc_mr()
2275 * if the user bound an ODP MR to this MW. in mlx5_ib_dealloc_mw()
2298 pr_err("signature status check requested on a non-signature enabled MR\n"); in mlx5_ib_check_mr_status()
2329 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_pa_mr_sg_pi() local
2333 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2336 mr->mmkey.ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2339 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2340 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2343 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2348 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2349 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2351 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2358 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, in mlx5_ib_sg_to_klms() argument
2367 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2369 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2372 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2373 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2376 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2381 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2389 mr->mmkey.ndescs = i; in mlx5_ib_sg_to_klms()
2390 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2396 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2403 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2410 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2411 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2419 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page() local
2422 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) in mlx5_set_page()
2425 descs = mr->descs; in mlx5_set_page()
2426 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2433 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page_pi() local
2436 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2439 descs = mr->descs; in mlx5_set_page_pi()
2440 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2452 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mtt_mr_sg_pi() local
2453 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2493 * In order to use one MTT MR for data and metadata, we register in mlx5_ib_map_mtt_mr_sg_pi()
2495 * the metadata (the sig MR will verify that the HW will access in mlx5_ib_map_mtt_mr_sg_pi()
2517 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_klm_mr_sg_pi() local
2518 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2550 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg_pi() local
2556 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2557 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2558 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2559 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2560 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2580 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2587 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2597 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2601 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2609 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg() local
2612 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg()
2614 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2615 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2618 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2619 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, in mlx5_ib_map_mr_sg()
2625 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2626 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()