• Home
  • Raw
  • Download

Lines Matching refs:mr

117 static void rvt_deinit_mregion(struct rvt_mregion *mr)  in rvt_deinit_mregion()  argument
119 int i = mr->mapsz; in rvt_deinit_mregion()
121 mr->mapsz = 0; in rvt_deinit_mregion()
123 kfree(mr->map[--i]); in rvt_deinit_mregion()
124 percpu_ref_exit(&mr->refcount); in rvt_deinit_mregion()
129 struct rvt_mregion *mr = container_of(ref, struct rvt_mregion, in __rvt_mregion_complete() local
132 complete(&mr->comp); in __rvt_mregion_complete()
135 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd, in rvt_init_mregion() argument
141 mr->mapsz = 0; in rvt_init_mregion()
144 mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL, in rvt_init_mregion()
146 if (!mr->map[i]) in rvt_init_mregion()
148 mr->mapsz++; in rvt_init_mregion()
150 init_completion(&mr->comp); in rvt_init_mregion()
152 if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete, in rvt_init_mregion()
156 atomic_set(&mr->lkey_invalid, 0); in rvt_init_mregion()
157 mr->pd = pd; in rvt_init_mregion()
158 mr->max_segs = count; in rvt_init_mregion()
161 rvt_deinit_mregion(mr); in rvt_init_mregion()
177 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region) in rvt_alloc_lkey() argument
183 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device); in rvt_alloc_lkey()
186 rvt_get_mr(mr); in rvt_alloc_lkey()
195 mr->lkey_published = 1; in rvt_alloc_lkey()
197 rcu_assign_pointer(dev->dma_mr, mr); in rvt_alloc_lkey()
198 rvt_get_mr(mr); in rvt_alloc_lkey()
222 mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) | in rvt_alloc_lkey()
225 if (mr->lkey == 0) { in rvt_alloc_lkey()
226 mr->lkey |= 1 << 8; in rvt_alloc_lkey()
229 mr->lkey_published = 1; in rvt_alloc_lkey()
231 rcu_assign_pointer(rkt->table[r], mr); in rvt_alloc_lkey()
237 rvt_put_mr(mr); in rvt_alloc_lkey()
247 static void rvt_free_lkey(struct rvt_mregion *mr) in rvt_free_lkey() argument
250 u32 lkey = mr->lkey; in rvt_free_lkey()
252 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device); in rvt_free_lkey()
258 if (mr->lkey_published) { in rvt_free_lkey()
259 mr->lkey_published = 0; in rvt_free_lkey()
262 rvt_put_mr(mr); in rvt_free_lkey()
265 if (!mr->lkey_published) in rvt_free_lkey()
268 mr->lkey_published = 0; in rvt_free_lkey()
276 percpu_ref_kill(&mr->refcount); in rvt_free_lkey()
281 struct rvt_mr *mr; in __rvt_alloc_mr() local
287 mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL); in __rvt_alloc_mr()
288 if (!mr) in __rvt_alloc_mr()
291 rval = rvt_init_mregion(&mr->mr, pd, count, 0); in __rvt_alloc_mr()
298 rval = rvt_alloc_lkey(&mr->mr, 0); in __rvt_alloc_mr()
301 mr->ibmr.lkey = mr->mr.lkey; in __rvt_alloc_mr()
302 mr->ibmr.rkey = mr->mr.lkey; in __rvt_alloc_mr()
304 return mr; in __rvt_alloc_mr()
307 rvt_deinit_mregion(&mr->mr); in __rvt_alloc_mr()
309 kfree(mr); in __rvt_alloc_mr()
310 mr = ERR_PTR(rval); in __rvt_alloc_mr()
314 static void __rvt_free_mr(struct rvt_mr *mr) in __rvt_free_mr() argument
316 rvt_free_lkey(&mr->mr); in __rvt_free_mr()
317 rvt_deinit_mregion(&mr->mr); in __rvt_free_mr()
318 kfree(mr); in __rvt_free_mr()
330 struct rvt_mr *mr; in rvt_get_dma_mr() local
337 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in rvt_get_dma_mr()
338 if (!mr) { in rvt_get_dma_mr()
343 rval = rvt_init_mregion(&mr->mr, pd, 0, 0); in rvt_get_dma_mr()
349 rval = rvt_alloc_lkey(&mr->mr, 1); in rvt_get_dma_mr()
355 mr->mr.access_flags = acc; in rvt_get_dma_mr()
356 ret = &mr->ibmr; in rvt_get_dma_mr()
361 rvt_deinit_mregion(&mr->mr); in rvt_get_dma_mr()
363 kfree(mr); in rvt_get_dma_mr()
381 struct rvt_mr *mr; in rvt_reg_user_mr() local
396 mr = __rvt_alloc_mr(n, pd); in rvt_reg_user_mr()
397 if (IS_ERR(mr)) { in rvt_reg_user_mr()
398 ret = (struct ib_mr *)mr; in rvt_reg_user_mr()
402 mr->mr.user_base = start; in rvt_reg_user_mr()
403 mr->mr.iova = virt_addr; in rvt_reg_user_mr()
404 mr->mr.length = length; in rvt_reg_user_mr()
405 mr->mr.offset = ib_umem_offset(umem); in rvt_reg_user_mr()
406 mr->mr.access_flags = mr_access_flags; in rvt_reg_user_mr()
407 mr->umem = umem; in rvt_reg_user_mr()
409 mr->mr.page_shift = PAGE_SHIFT; in rvt_reg_user_mr()
420 mr->mr.map[m]->segs[n].vaddr = vaddr; in rvt_reg_user_mr()
421 mr->mr.map[m]->segs[n].length = PAGE_SIZE; in rvt_reg_user_mr()
422 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE); in rvt_reg_user_mr()
428 return &mr->ibmr; in rvt_reg_user_mr()
431 __rvt_free_mr(mr); in rvt_reg_user_mr()
450 struct rvt_mregion *mr = (struct rvt_mregion *)v; in rvt_dereg_clean_qp_cb() local
453 if (mr->pd != qp->ibqp.pd) in rvt_dereg_clean_qp_cb()
455 rvt_qp_mr_clean(qp, mr->lkey); in rvt_dereg_clean_qp_cb()
465 static void rvt_dereg_clean_qps(struct rvt_mregion *mr) in rvt_dereg_clean_qps() argument
467 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); in rvt_dereg_clean_qps()
469 rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb); in rvt_dereg_clean_qps()
483 static int rvt_check_refs(struct rvt_mregion *mr, const char *t) in rvt_check_refs() argument
486 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); in rvt_check_refs()
488 if (mr->lkey) { in rvt_check_refs()
490 rvt_dereg_clean_qps(mr); in rvt_check_refs()
495 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); in rvt_check_refs()
499 t, mr, mr->pd, mr->lkey, in rvt_check_refs()
500 atomic_long_read(&mr->refcount.data->count)); in rvt_check_refs()
501 rvt_get_mr(mr); in rvt_check_refs()
512 bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey) in rvt_mr_has_lkey() argument
514 return mr && lkey == mr->lkey; in rvt_mr_has_lkey()
533 rval = rvt_mr_has_lkey(ss->sge.mr, lkey); in rvt_ss_has_lkey()
536 rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey); in rvt_ss_has_lkey()
552 struct rvt_mr *mr = to_imr(ibmr); in rvt_dereg_mr() local
555 rvt_free_lkey(&mr->mr); in rvt_dereg_mr()
557 rvt_put_mr(&mr->mr); /* will set completion if last */ in rvt_dereg_mr()
558 ret = rvt_check_refs(&mr->mr, __func__); in rvt_dereg_mr()
561 rvt_deinit_mregion(&mr->mr); in rvt_dereg_mr()
562 ib_umem_release(mr->umem); in rvt_dereg_mr()
563 kfree(mr); in rvt_dereg_mr()
579 struct rvt_mr *mr; in rvt_alloc_mr() local
584 mr = __rvt_alloc_mr(max_num_sg, pd); in rvt_alloc_mr()
585 if (IS_ERR(mr)) in rvt_alloc_mr()
586 return (struct ib_mr *)mr; in rvt_alloc_mr()
588 return &mr->ibmr; in rvt_alloc_mr()
600 struct rvt_mr *mr = to_imr(ibmr); in rvt_set_page() local
601 u32 ps = 1 << mr->mr.page_shift; in rvt_set_page()
602 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift; in rvt_set_page()
605 if (unlikely(mapped_segs == mr->mr.max_segs)) in rvt_set_page()
610 mr->mr.map[m]->segs[n].vaddr = (void *)addr; in rvt_set_page()
611 mr->mr.map[m]->segs[n].length = ps; in rvt_set_page()
612 mr->mr.length += ps; in rvt_set_page()
613 trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps); in rvt_set_page()
632 struct rvt_mr *mr = to_imr(ibmr); in rvt_map_mr_sg() local
635 mr->mr.length = 0; in rvt_map_mr_sg()
636 mr->mr.page_shift = PAGE_SHIFT; in rvt_map_mr_sg()
638 mr->mr.user_base = ibmr->iova; in rvt_map_mr_sg()
639 mr->mr.iova = ibmr->iova; in rvt_map_mr_sg()
640 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; in rvt_map_mr_sg()
641 mr->mr.length = (size_t)ibmr->length; in rvt_map_mr_sg()
658 struct rvt_mr *mr = to_imr(ibmr); in rvt_fast_reg_mr() local
660 if (qp->ibqp.pd != mr->mr.pd) in rvt_fast_reg_mr()
664 if (!mr->mr.lkey || mr->umem) in rvt_fast_reg_mr()
667 if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00)) in rvt_fast_reg_mr()
672 mr->mr.lkey = key; in rvt_fast_reg_mr()
673 mr->mr.access_flags = access; in rvt_fast_reg_mr()
674 mr->mr.iova = ibmr->iova; in rvt_fast_reg_mr()
675 atomic_set(&mr->mr.lkey_invalid, 0); in rvt_fast_reg_mr()
692 struct rvt_mregion *mr; in rvt_invalidate_rkey() local
698 mr = rcu_dereference( in rvt_invalidate_rkey()
700 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_invalidate_rkey()
703 atomic_set(&mr->lkey_invalid, 1); in rvt_invalidate_rkey()
725 if (last_sge && sge->lkey == last_sge->mr->lkey && in rvt_sge_adjacent()
728 if (unlikely((sge->addr - last_sge->mr->user_base + in rvt_sge_adjacent()
729 sge->length > last_sge->mr->length))) in rvt_sge_adjacent()
761 struct rvt_mregion *mr; in rvt_lkey_ok() local
777 mr = rcu_dereference(dev->dma_mr); in rvt_lkey_ok()
778 if (!mr) in rvt_lkey_ok()
780 rvt_get_mr(mr); in rvt_lkey_ok()
783 isge->mr = mr; in rvt_lkey_ok()
794 mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]); in rvt_lkey_ok()
795 if (!mr) in rvt_lkey_ok()
797 rvt_get_mr(mr); in rvt_lkey_ok()
798 if (!READ_ONCE(mr->lkey_published)) in rvt_lkey_ok()
801 if (unlikely(atomic_read(&mr->lkey_invalid) || in rvt_lkey_ok()
802 mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) in rvt_lkey_ok()
805 off = sge->addr - mr->user_base; in rvt_lkey_ok()
806 if (unlikely(sge->addr < mr->user_base || in rvt_lkey_ok()
807 off + sge->length > mr->length || in rvt_lkey_ok()
808 (mr->access_flags & acc) != acc)) in rvt_lkey_ok()
812 off += mr->offset; in rvt_lkey_ok()
813 if (mr->page_shift) { in rvt_lkey_ok()
821 entries_spanned_by_off = off >> mr->page_shift; in rvt_lkey_ok()
822 off -= (entries_spanned_by_off << mr->page_shift); in rvt_lkey_ok()
828 while (off >= mr->map[m]->segs[n].length) { in rvt_lkey_ok()
829 off -= mr->map[m]->segs[n].length; in rvt_lkey_ok()
837 isge->mr = mr; in rvt_lkey_ok()
838 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in rvt_lkey_ok()
839 isge->length = mr->map[m]->segs[n].length - off; in rvt_lkey_ok()
847 rvt_put_mr(mr); in rvt_lkey_ok()
872 struct rvt_mregion *mr; in rvt_rkey_ok() local
887 mr = rcu_dereference(rdi->dma_mr); in rvt_rkey_ok()
888 if (!mr) in rvt_rkey_ok()
890 rvt_get_mr(mr); in rvt_rkey_ok()
893 sge->mr = mr; in rvt_rkey_ok()
902 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]); in rvt_rkey_ok()
903 if (!mr) in rvt_rkey_ok()
905 rvt_get_mr(mr); in rvt_rkey_ok()
907 if (!READ_ONCE(mr->lkey_published)) in rvt_rkey_ok()
909 if (unlikely(atomic_read(&mr->lkey_invalid) || in rvt_rkey_ok()
910 mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_rkey_ok()
913 off = vaddr - mr->iova; in rvt_rkey_ok()
914 if (unlikely(vaddr < mr->iova || off + len > mr->length || in rvt_rkey_ok()
915 (mr->access_flags & acc) == 0)) in rvt_rkey_ok()
919 off += mr->offset; in rvt_rkey_ok()
920 if (mr->page_shift) { in rvt_rkey_ok()
928 entries_spanned_by_off = off >> mr->page_shift; in rvt_rkey_ok()
929 off -= (entries_spanned_by_off << mr->page_shift); in rvt_rkey_ok()
935 while (off >= mr->map[m]->segs[n].length) { in rvt_rkey_ok()
936 off -= mr->map[m]->segs[n].length; in rvt_rkey_ok()
944 sge->mr = mr; in rvt_rkey_ok()
945 sge->vaddr = mr->map[m]->segs[n].vaddr + off; in rvt_rkey_ok()
946 sge->length = mr->map[m]->segs[n].length - off; in rvt_rkey_ok()
953 rvt_put_mr(mr); in rvt_rkey_ok()