• Home
  • Raw
  • Download

Lines Matching full:mr

109 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
110 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
119 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in destroy_mkey() argument
121 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
123 return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); in destroy_mkey()
126 static inline bool mlx5_ib_pas_fits_in_mr(struct mlx5_ib_mr *mr, u64 start, in mlx5_ib_pas_fits_in_mr() argument
129 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= in mlx5_ib_pas_fits_in_mr()
135 struct mlx5_ib_mr *mr = in create_mkey_callback() local
137 struct mlx5_ib_dev *dev = mr->dev; in create_mkey_callback()
138 struct mlx5_cache_ent *ent = mr->cache_ent; in create_mkey_callback()
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); in create_mkey_callback()
143 kfree(mr); in create_mkey_callback()
152 mr->mmkey.type = MLX5_MKEY_MR; in create_mkey_callback()
153 mr->mmkey.key |= mlx5_idx_to_mkey( in create_mkey_callback()
154 MLX5_GET(create_mkey_out, mr->out, mkey_index)); in create_mkey_callback()
159 list_add_tail(&mr->list, &ent->head); in create_mkey_callback()
170 struct mlx5_ib_mr *mr; in alloc_cache_mr() local
172 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in alloc_cache_mr()
173 if (!mr) in alloc_cache_mr()
175 mr->order = ent->order; in alloc_cache_mr()
176 mr->cache_ent = ent; in alloc_cache_mr()
177 mr->dev = ent->dev; in alloc_cache_mr()
187 return mr; in alloc_cache_mr()
194 struct mlx5_ib_mr *mr; in add_keys() local
206 mr = alloc_cache_mr(ent, mkc); in add_keys()
207 if (!mr) { in add_keys()
215 kfree(mr); in add_keys()
220 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, in add_keys()
222 mr->out, sizeof(mr->out), in add_keys()
223 &mr->cb_work); in add_keys()
229 kfree(mr); in add_keys()
238 /* Synchronously create a MR in the cache */
242 struct mlx5_ib_mr *mr; in create_cache_mr() local
252 mr = alloc_cache_mr(ent, mkc); in create_cache_mr()
253 if (!mr) { in create_cache_mr()
258 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); in create_cache_mr()
262 mr->mmkey.type = MLX5_MKEY_MR; in create_cache_mr()
268 return mr; in create_cache_mr()
270 kfree(mr); in create_cache_mr()
278 struct mlx5_ib_mr *mr; in remove_cache_mr_locked() local
283 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_cache_mr_locked()
284 list_del(&mr->list); in remove_cache_mr_locked()
288 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); in remove_cache_mr_locked()
289 kfree(mr); in remove_cache_mr_locked()
567 struct mlx5_ib_mr *mr; in mlx5_mr_cache_alloc() local
583 mr = create_cache_mr(ent); in mlx5_mr_cache_alloc()
584 if (IS_ERR(mr)) in mlx5_mr_cache_alloc()
585 return mr; in mlx5_mr_cache_alloc()
587 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in mlx5_mr_cache_alloc()
588 list_del(&mr->list); in mlx5_mr_cache_alloc()
593 mr->access_flags = access_flags; in mlx5_mr_cache_alloc()
594 return mr; in mlx5_mr_cache_alloc()
597 /* Return a MR already available in the cache */
601 struct mlx5_ib_mr *mr = NULL; in get_cache_mr() local
604 /* Try larger MR pools from the cache to satisfy the allocation */ in get_cache_mr()
611 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in get_cache_mr()
613 list_del(&mr->list); in get_cache_mr()
623 if (!mr) in get_cache_mr()
626 return mr; in get_cache_mr()
629 static void detach_mr_from_cache(struct mlx5_ib_mr *mr) in detach_mr_from_cache() argument
631 struct mlx5_cache_ent *ent = mr->cache_ent; in detach_mr_from_cache()
633 mr->cache_ent = NULL; in detach_mr_from_cache()
639 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in mlx5_mr_cache_free() argument
641 struct mlx5_cache_ent *ent = mr->cache_ent; in mlx5_mr_cache_free()
646 if (mlx5_mr_cache_invalidate(mr)) { in mlx5_mr_cache_free()
647 detach_mr_from_cache(mr); in mlx5_mr_cache_free()
648 destroy_mkey(dev, mr); in mlx5_mr_cache_free()
649 kfree(mr); in mlx5_mr_cache_free()
654 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
665 struct mlx5_ib_mr *mr; in clean_keys() local
675 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
676 list_move(&mr->list, &del_list); in clean_keys()
680 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); in clean_keys()
683 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { in clean_keys()
684 list_del(&mr->list); in clean_keys()
685 kfree(mr); in clean_keys()
815 struct mlx5_ib_mr *mr; in mlx5_ib_get_dma_mr() local
820 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dma_mr()
821 if (!mr) in mlx5_ib_get_dma_mr()
836 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
841 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
842 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
843 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
844 mr->umem = NULL; in mlx5_ib_get_dma_mr()
846 return &mr->ibmr; in mlx5_ib_get_dma_mr()
852 kfree(mr); in mlx5_ib_get_dma_mr()
989 struct mlx5_ib_mr *mr; in alloc_mr_from_cache() local
998 mr = get_cache_mr(ent); in alloc_mr_from_cache()
999 if (!mr) { in alloc_mr_from_cache()
1000 mr = create_cache_mr(ent); in alloc_mr_from_cache()
1001 if (IS_ERR(mr)) in alloc_mr_from_cache()
1002 return mr; in alloc_mr_from_cache()
1005 mr->ibmr.pd = pd; in alloc_mr_from_cache()
1006 mr->umem = umem; in alloc_mr_from_cache()
1007 mr->access_flags = access_flags; in alloc_mr_from_cache()
1008 mr->desc_size = sizeof(struct mlx5_mtt); in alloc_mr_from_cache()
1009 mr->mmkey.iova = virt_addr; in alloc_mr_from_cache()
1010 mr->mmkey.size = len; in alloc_mr_from_cache()
1011 mr->mmkey.pd = to_mpd(pd)->pdn; in alloc_mr_from_cache()
1013 return mr; in alloc_mr_from_cache()
1020 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, in mlx5_ib_update_xlt() argument
1023 struct mlx5_ib_dev *dev = mr->dev; in mlx5_ib_update_xlt()
1086 if (mr->umem->is_odp) { in mlx5_ib_update_xlt()
1088 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in mlx5_ib_update_xlt()
1106 wr.pd = mr->ibmr.pd; in mlx5_ib_update_xlt()
1107 wr.mkey = mr->mmkey.key; in mlx5_ib_update_xlt()
1108 wr.length = mr->mmkey.size; in mlx5_ib_update_xlt()
1109 wr.virt_addr = mr->mmkey.iova; in mlx5_ib_update_xlt()
1110 wr.access_flags = mr->access_flags; in mlx5_ib_update_xlt()
1119 if (mr->umem->is_odp) { in mlx5_ib_update_xlt()
1120 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); in mlx5_ib_update_xlt()
1122 __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx, in mlx5_ib_update_xlt()
1176 struct mlx5_ib_mr *mr; in reg_create() local
1184 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL); in reg_create()
1185 if (!mr) in reg_create()
1188 mr->ibmr.pd = pd; in reg_create()
1189 mr->access_flags = access_flags; in reg_create()
1230 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1235 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1236 mr->desc_size = sizeof(struct mlx5_mtt); in reg_create()
1237 mr->dev = dev; in reg_create()
1240 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1242 return mr; in reg_create()
1249 kfree(mr); in reg_create()
1254 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, in set_mr_fields() argument
1257 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
1258 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
1259 mr->ibmr.length = length; in set_mr_fields()
1260 mr->access_flags = access_flags; in set_mr_fields()
1268 struct mlx5_ib_mr *mr; in mlx5_ib_get_dm_mr() local
1273 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dm_mr()
1274 if (!mr) in mlx5_ib_get_dm_mr()
1290 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1296 set_mr_fields(dev, mr, length, acc); in mlx5_ib_get_dm_mr()
1298 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1304 kfree(mr); in mlx5_ib_get_dm_mr()
1362 struct mlx5_ib_mr *mr = NULL; in mlx5_ib_reg_user_mr() local
1390 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags); in mlx5_ib_reg_user_mr()
1391 if (IS_ERR(mr)) in mlx5_ib_reg_user_mr()
1392 return ERR_CAST(mr); in mlx5_ib_reg_user_mr()
1393 return &mr->ibmr; in mlx5_ib_reg_user_mr()
1403 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, in mlx5_ib_reg_user_mr()
1405 if (IS_ERR(mr)) in mlx5_ib_reg_user_mr()
1406 mr = NULL; in mlx5_ib_reg_user_mr()
1409 if (!mr) { in mlx5_ib_reg_user_mr()
1411 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, in mlx5_ib_reg_user_mr()
1416 if (IS_ERR(mr)) { in mlx5_ib_reg_user_mr()
1417 err = PTR_ERR(mr); in mlx5_ib_reg_user_mr()
1421 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in mlx5_ib_reg_user_mr()
1423 mr->umem = umem; in mlx5_ib_reg_user_mr()
1424 mr->npages = npages; in mlx5_ib_reg_user_mr()
1425 atomic_add(mr->npages, &dev->mdev->priv.reg_pages); in mlx5_ib_reg_user_mr()
1426 set_mr_fields(dev, mr, length, access_flags); in mlx5_ib_reg_user_mr()
1430 * If the MR was created with reg_create then it will be in mlx5_ib_reg_user_mr()
1436 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, in mlx5_ib_reg_user_mr()
1439 dereg_mr(dev, mr); in mlx5_ib_reg_user_mr()
1444 if (is_odp_mr(mr)) { in mlx5_ib_reg_user_mr()
1445 to_ib_umem_odp(mr->umem)->private = mr; in mlx5_ib_reg_user_mr()
1446 init_waitqueue_head(&mr->q_deferred_work); in mlx5_ib_reg_user_mr()
1447 atomic_set(&mr->num_deferred_work, 0); in mlx5_ib_reg_user_mr()
1449 mlx5_base_mkey(mr->mmkey.key), &mr->mmkey, in mlx5_ib_reg_user_mr()
1452 dereg_mr(dev, mr); in mlx5_ib_reg_user_mr()
1456 err = mlx5_ib_init_odp_mr(mr, xlt_with_umr); in mlx5_ib_reg_user_mr()
1458 dereg_mr(dev, mr); in mlx5_ib_reg_user_mr()
1463 return &mr->ibmr; in mlx5_ib_reg_user_mr()
1470 * mlx5_mr_cache_invalidate - Fence all DMA on the MR
1471 * @mr: The MR to fence
1473 * Upon return the NIC will not be doing any DMA to the pages under the MR,
1477 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr) in mlx5_mr_cache_invalidate() argument
1481 if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) in mlx5_mr_cache_invalidate()
1487 umrwr.pd = mr->dev->umrc.pd; in mlx5_mr_cache_invalidate()
1488 umrwr.mkey = mr->mmkey.key; in mlx5_mr_cache_invalidate()
1491 return mlx5_ib_post_send_wait(mr->dev, &umrwr); in mlx5_mr_cache_invalidate()
1494 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, in rereg_umr() argument
1504 umrwr.mkey = mr->mmkey.key; in rereg_umr()
1522 struct mlx5_ib_mr *mr = to_mmr(ib_mr); in mlx5_ib_rereg_user_mr() local
1526 mr->access_flags; in mlx5_ib_rereg_user_mr()
1538 if (!mr->umem) in mlx5_ib_rereg_user_mr()
1541 if (is_odp_mr(mr)) in mlx5_ib_rereg_user_mr()
1548 addr = mr->umem->address; in mlx5_ib_rereg_user_mr()
1549 len = mr->umem->length; in mlx5_ib_rereg_user_mr()
1558 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); in mlx5_ib_rereg_user_mr()
1559 mr->npages = 0; in mlx5_ib_rereg_user_mr()
1560 ib_umem_release(mr->umem); in mlx5_ib_rereg_user_mr()
1561 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1563 err = mr_umem_get(dev, addr, len, access_flags, &mr->umem, in mlx5_ib_rereg_user_mr()
1567 mr->npages = ncont; in mlx5_ib_rereg_user_mr()
1568 atomic_add(mr->npages, &dev->mdev->priv.reg_pages); in mlx5_ib_rereg_user_mr()
1571 if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1575 !mlx5_ib_pas_fits_in_mr(mr, addr, len))) { in mlx5_ib_rereg_user_mr()
1579 if (mr->cache_ent) in mlx5_ib_rereg_user_mr()
1580 detach_mr_from_cache(mr); in mlx5_ib_rereg_user_mr()
1581 err = destroy_mkey(dev, mr); in mlx5_ib_rereg_user_mr()
1585 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, in mlx5_ib_rereg_user_mr()
1588 if (IS_ERR(mr)) { in mlx5_ib_rereg_user_mr()
1589 err = PTR_ERR(mr); in mlx5_ib_rereg_user_mr()
1590 mr = to_mmr(ib_mr); in mlx5_ib_rereg_user_mr()
1597 mr->ibmr.pd = pd; in mlx5_ib_rereg_user_mr()
1598 mr->access_flags = access_flags; in mlx5_ib_rereg_user_mr()
1599 mr->mmkey.iova = addr; in mlx5_ib_rereg_user_mr()
1600 mr->mmkey.size = len; in mlx5_ib_rereg_user_mr()
1601 mr->mmkey.pd = to_mpd(pd)->pdn; in mlx5_ib_rereg_user_mr()
1609 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, in mlx5_ib_rereg_user_mr()
1612 err = rereg_umr(pd, mr, access_flags, flags); in mlx5_ib_rereg_user_mr()
1619 set_mr_fields(dev, mr, len, access_flags); in mlx5_ib_rereg_user_mr()
1624 ib_umem_release(mr->umem); in mlx5_ib_rereg_user_mr()
1625 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1627 clean_mr(dev, mr); in mlx5_ib_rereg_user_mr()
1633 struct mlx5_ib_mr *mr, in mlx5_alloc_priv_descs() argument
1643 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1644 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1647 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1649 mr->desc_map = dma_map_single(device->dev.parent, mr->descs, in mlx5_alloc_priv_descs()
1651 if (dma_mapping_error(device->dev.parent, mr->desc_map)) { in mlx5_alloc_priv_descs()
1658 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1664 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) in mlx5_free_priv_descs() argument
1666 if (mr->descs) { in mlx5_free_priv_descs()
1667 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1668 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1670 dma_unmap_single(device->dev.parent, mr->desc_map, in mlx5_free_priv_descs()
1672 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1673 mr->descs = NULL; in mlx5_free_priv_descs()
1677 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in clean_mr() argument
1679 if (mr->sig) { in clean_mr()
1681 mr->sig->psv_memory.psv_idx)) in clean_mr()
1683 mr->sig->psv_memory.psv_idx); in clean_mr()
1685 mr->sig->psv_wire.psv_idx)) in clean_mr()
1687 mr->sig->psv_wire.psv_idx); in clean_mr()
1688 xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key)); in clean_mr()
1689 kfree(mr->sig); in clean_mr()
1690 mr->sig = NULL; in clean_mr()
1693 if (!mr->cache_ent) { in clean_mr()
1694 destroy_mkey(dev, mr); in clean_mr()
1695 mlx5_free_priv_descs(mr); in clean_mr()
1699 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in dereg_mr() argument
1701 int npages = mr->npages; in dereg_mr()
1702 struct ib_umem *umem = mr->umem; in dereg_mr()
1705 if (is_odp_mr(mr)) in dereg_mr()
1706 mlx5_ib_fence_odp_mr(mr); in dereg_mr()
1708 clean_mr(dev, mr); in dereg_mr()
1710 if (mr->cache_ent) in dereg_mr()
1711 mlx5_mr_cache_free(dev, mr); in dereg_mr()
1713 kfree(mr); in dereg_mr()
1756 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in _mlx5_alloc_mkey_descs() argument
1763 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
1764 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
1765 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
1767 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
1773 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
1777 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
1778 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1779 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1784 mlx5_free_priv_descs(mr); in _mlx5_alloc_mkey_descs()
1795 struct mlx5_ib_mr *mr; in mlx5_ib_alloc_pi_mr() local
1799 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_alloc_pi_mr()
1800 if (!mr) in mlx5_ib_alloc_pi_mr()
1803 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
1804 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
1815 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, in mlx5_ib_alloc_pi_mr()
1820 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
1823 return mr; in mlx5_ib_alloc_pi_mr()
1828 kfree(mr); in mlx5_ib_alloc_pi_mr()
1832 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_mem_reg_descs() argument
1835 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), in mlx5_alloc_mem_reg_descs()
1840 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_sg_gaps_descs() argument
1843 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), in mlx5_alloc_sg_gaps_descs()
1847 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_integrity_descs() argument
1856 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
1857 if (!mr->sig) in mlx5_alloc_integrity_descs()
1865 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
1866 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
1868 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
1869 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
1871 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
1872 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
1875 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
1876 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
1879 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
1882 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
1883 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
1892 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, in mlx5_alloc_integrity_descs()
1897 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
1898 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
1904 destroy_mkey(dev, mr); in mlx5_alloc_integrity_descs()
1905 mlx5_free_priv_descs(mr); in mlx5_alloc_integrity_descs()
1907 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr); in mlx5_alloc_integrity_descs()
1908 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
1910 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr); in mlx5_alloc_integrity_descs()
1911 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
1913 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
1915 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
1916 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
1918 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
1920 kfree(mr->sig); in mlx5_alloc_integrity_descs()
1932 struct mlx5_ib_mr *mr; in __mlx5_ib_alloc_mr() local
1936 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in __mlx5_ib_alloc_mr()
1937 if (!mr) in __mlx5_ib_alloc_mr()
1946 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
1947 mr->umem = NULL; in __mlx5_ib_alloc_mr()
1951 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
1954 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
1957 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, in __mlx5_ib_alloc_mr()
1961 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); in __mlx5_ib_alloc_mr()
1970 return &mr->ibmr; in __mlx5_ib_alloc_mr()
1975 kfree(mr); in __mlx5_ib_alloc_mr()
2081 * SRCU if the user bound an ODP MR to this MW. in mlx5_ib_dealloc_mw()
2105 pr_err("signature status check requested on a non-signature enabled MR\n"); in mlx5_ib_check_mr_status()
2136 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_pa_mr_sg_pi() local
2140 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2143 mr->ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2146 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2147 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2150 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2155 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2156 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2158 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2165 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, in mlx5_ib_sg_to_klms() argument
2174 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2176 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2179 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2180 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2183 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2188 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2196 mr->ndescs = i; in mlx5_ib_sg_to_klms()
2197 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2203 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2210 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2217 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2218 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2226 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page() local
2229 if (unlikely(mr->ndescs == mr->max_descs)) in mlx5_set_page()
2232 descs = mr->descs; in mlx5_set_page()
2233 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2240 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page_pi() local
2243 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2246 descs = mr->descs; in mlx5_set_page_pi()
2247 descs[mr->ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2259 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mtt_mr_sg_pi() local
2260 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2300 * In order to use one MTT MR for data and metadata, we register in mlx5_ib_map_mtt_mr_sg_pi()
2302 * the metadata (the sig MR will verify that the HW will access in mlx5_ib_map_mtt_mr_sg_pi()
2324 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_klm_mr_sg_pi() local
2325 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2357 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg_pi() local
2363 mr->ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2364 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2365 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2366 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2367 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2387 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2394 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2404 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2408 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2416 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg() local
2419 mr->ndescs = 0; in mlx5_ib_map_mr_sg()
2421 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2422 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2425 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2426 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, in mlx5_ib_map_mr_sg()
2432 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2433 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()