Lines Matching refs:mr
42 struct qib_mregion mr; /* must be last */ member
50 static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, in init_qib_mregion() argument
58 mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); in init_qib_mregion()
59 if (!mr->map[i]) in init_qib_mregion()
62 mr->mapsz = m; in init_qib_mregion()
63 init_completion(&mr->comp); in init_qib_mregion()
65 atomic_set(&mr->refcount, 1); in init_qib_mregion()
66 mr->pd = pd; in init_qib_mregion()
67 mr->max_segs = count; in init_qib_mregion()
72 kfree(mr->map[--i]); in init_qib_mregion()
77 static void deinit_qib_mregion(struct qib_mregion *mr) in deinit_qib_mregion() argument
79 int i = mr->mapsz; in deinit_qib_mregion()
81 mr->mapsz = 0; in deinit_qib_mregion()
83 kfree(mr->map[--i]); in deinit_qib_mregion()
98 struct qib_mr *mr = NULL; in qib_get_dma_mr() local
107 mr = kzalloc(sizeof *mr, GFP_KERNEL); in qib_get_dma_mr()
108 if (!mr) { in qib_get_dma_mr()
113 rval = init_qib_mregion(&mr->mr, pd, 0); in qib_get_dma_mr()
120 rval = qib_alloc_lkey(&mr->mr, 1); in qib_get_dma_mr()
126 mr->mr.access_flags = acc; in qib_get_dma_mr()
127 ret = &mr->ibmr; in qib_get_dma_mr()
132 deinit_qib_mregion(&mr->mr); in qib_get_dma_mr()
134 kfree(mr); in qib_get_dma_mr()
140 struct qib_mr *mr; in alloc_mr() local
146 mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); in alloc_mr()
147 if (!mr) in alloc_mr()
150 rval = init_qib_mregion(&mr->mr, pd, count); in alloc_mr()
157 rval = qib_alloc_lkey(&mr->mr, 0); in alloc_mr()
160 mr->ibmr.lkey = mr->mr.lkey; in alloc_mr()
161 mr->ibmr.rkey = mr->mr.lkey; in alloc_mr()
163 return mr; in alloc_mr()
166 deinit_qib_mregion(&mr->mr); in alloc_mr()
168 kfree(mr); in alloc_mr()
169 mr = ERR_PTR(rval); in alloc_mr()
186 struct qib_mr *mr; in qib_reg_phys_mr() local
190 mr = alloc_mr(num_phys_buf, pd); in qib_reg_phys_mr()
191 if (IS_ERR(mr)) { in qib_reg_phys_mr()
192 ret = (struct ib_mr *)mr; in qib_reg_phys_mr()
196 mr->mr.user_base = *iova_start; in qib_reg_phys_mr()
197 mr->mr.iova = *iova_start; in qib_reg_phys_mr()
198 mr->mr.access_flags = acc; in qib_reg_phys_mr()
203 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; in qib_reg_phys_mr()
204 mr->mr.map[m]->segs[n].length = buffer_list[i].size; in qib_reg_phys_mr()
205 mr->mr.length += buffer_list[i].size; in qib_reg_phys_mr()
213 ret = &mr->ibmr; in qib_reg_phys_mr()
233 struct qib_mr *mr; in qib_reg_user_mr() local
251 mr = alloc_mr(n, pd); in qib_reg_user_mr()
252 if (IS_ERR(mr)) { in qib_reg_user_mr()
253 ret = (struct ib_mr *)mr; in qib_reg_user_mr()
258 mr->mr.user_base = start; in qib_reg_user_mr()
259 mr->mr.iova = virt_addr; in qib_reg_user_mr()
260 mr->mr.length = length; in qib_reg_user_mr()
261 mr->mr.offset = umem->offset; in qib_reg_user_mr()
262 mr->mr.access_flags = mr_access_flags; in qib_reg_user_mr()
263 mr->umem = umem; in qib_reg_user_mr()
266 mr->mr.page_shift = ilog2(umem->page_size); in qib_reg_user_mr()
277 mr->mr.map[m]->segs[n].vaddr = vaddr; in qib_reg_user_mr()
278 mr->mr.map[m]->segs[n].length = umem->page_size; in qib_reg_user_mr()
285 ret = &mr->ibmr; in qib_reg_user_mr()
302 struct qib_mr *mr = to_imr(ibmr); in qib_dereg_mr() local
306 qib_free_lkey(&mr->mr); in qib_dereg_mr()
308 qib_put_mr(&mr->mr); /* will set completion if last */ in qib_dereg_mr()
309 timeout = wait_for_completion_timeout(&mr->mr.comp, in qib_dereg_mr()
312 qib_get_mr(&mr->mr); in qib_dereg_mr()
316 deinit_qib_mregion(&mr->mr); in qib_dereg_mr()
317 if (mr->umem) in qib_dereg_mr()
318 ib_umem_release(mr->umem); in qib_dereg_mr()
319 kfree(mr); in qib_dereg_mr()
332 struct qib_mr *mr; in qib_alloc_fast_reg_mr() local
334 mr = alloc_mr(max_page_list_len, pd); in qib_alloc_fast_reg_mr()
335 if (IS_ERR(mr)) in qib_alloc_fast_reg_mr()
336 return (struct ib_mr *)mr; in qib_alloc_fast_reg_mr()
338 return &mr->ibmr; in qib_alloc_fast_reg_mr()
389 fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); in qib_alloc_fmr()
393 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); in qib_alloc_fmr()
401 rval = qib_alloc_lkey(&fmr->mr, 0); in qib_alloc_fmr()
404 fmr->ibfmr.rkey = fmr->mr.lkey; in qib_alloc_fmr()
405 fmr->ibfmr.lkey = fmr->mr.lkey; in qib_alloc_fmr()
410 fmr->mr.access_flags = mr_access_flags; in qib_alloc_fmr()
411 fmr->mr.max_segs = fmr_attr->max_pages; in qib_alloc_fmr()
412 fmr->mr.page_shift = fmr_attr->page_shift; in qib_alloc_fmr()
419 deinit_qib_mregion(&fmr->mr); in qib_alloc_fmr()
446 i = atomic_read(&fmr->mr.refcount); in qib_map_phys_fmr()
450 if (list_len > fmr->mr.max_segs) { in qib_map_phys_fmr()
456 fmr->mr.user_base = iova; in qib_map_phys_fmr()
457 fmr->mr.iova = iova; in qib_map_phys_fmr()
458 ps = 1 << fmr->mr.page_shift; in qib_map_phys_fmr()
459 fmr->mr.length = list_len * ps; in qib_map_phys_fmr()
463 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_map_phys_fmr()
464 fmr->mr.map[m]->segs[n].length = ps; in qib_map_phys_fmr()
492 fmr->mr.user_base = 0; in qib_unmap_fmr()
493 fmr->mr.iova = 0; in qib_unmap_fmr()
494 fmr->mr.length = 0; in qib_unmap_fmr()
512 qib_free_lkey(&fmr->mr); in qib_dealloc_fmr()
513 qib_put_mr(&fmr->mr); /* will set completion if last */ in qib_dealloc_fmr()
514 timeout = wait_for_completion_timeout(&fmr->mr.comp, in qib_dealloc_fmr()
517 qib_get_mr(&fmr->mr); in qib_dealloc_fmr()
521 deinit_qib_mregion(&fmr->mr); in qib_dealloc_fmr()
529 struct qib_mregion *mr = container_of(list, struct qib_mregion, list); in mr_rcu_callback() local
531 complete(&mr->comp); in mr_rcu_callback()