• Home
  • Raw
  • Download

Lines Matching full:mem

54 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)  in mem_check_range()  argument
56 switch (mem->type) { in mem_check_range()
62 if (iova < mem->iova || in mem_check_range()
63 length > mem->length || in mem_check_range()
64 iova > mem->iova + mem->length - length) in mem_check_range()
77 static void rxe_mem_init(int access, struct rxe_mem *mem) in rxe_mem_init() argument
79 u32 lkey = mem->pelem.index << 8 | rxe_get_key(); in rxe_mem_init()
82 if (mem->pelem.pool->type == RXE_TYPE_MR) { in rxe_mem_init()
83 mem->ibmr.lkey = lkey; in rxe_mem_init()
84 mem->ibmr.rkey = rkey; in rxe_mem_init()
87 mem->lkey = lkey; in rxe_mem_init()
88 mem->rkey = rkey; in rxe_mem_init()
89 mem->state = RXE_MEM_STATE_INVALID; in rxe_mem_init()
90 mem->type = RXE_MEM_TYPE_NONE; in rxe_mem_init()
91 mem->map_shift = ilog2(RXE_BUF_PER_MAP); in rxe_mem_init()
96 struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem); in rxe_mem_cleanup() local
99 if (mem->umem) in rxe_mem_cleanup()
100 ib_umem_release(mem->umem); in rxe_mem_cleanup()
102 if (mem->map) { in rxe_mem_cleanup()
103 for (i = 0; i < mem->num_map; i++) in rxe_mem_cleanup()
104 kfree(mem->map[i]); in rxe_mem_cleanup()
106 kfree(mem->map); in rxe_mem_cleanup()
110 static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf) in rxe_mem_alloc() argument
114 struct rxe_map **map = mem->map; in rxe_mem_alloc()
118 mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); in rxe_mem_alloc()
119 if (!mem->map) in rxe_mem_alloc()
123 mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); in rxe_mem_alloc()
124 if (!mem->map[i]) in rxe_mem_alloc()
130 mem->map_shift = ilog2(RXE_BUF_PER_MAP); in rxe_mem_alloc()
131 mem->map_mask = RXE_BUF_PER_MAP - 1; in rxe_mem_alloc()
133 mem->num_buf = num_buf; in rxe_mem_alloc()
134 mem->num_map = num_map; in rxe_mem_alloc()
135 mem->max_buf = num_map * RXE_BUF_PER_MAP; in rxe_mem_alloc()
141 kfree(mem->map[i]); in rxe_mem_alloc()
143 kfree(mem->map); in rxe_mem_alloc()
149 int access, struct rxe_mem *mem) in rxe_mem_init_dma() argument
151 rxe_mem_init(access, mem); in rxe_mem_init_dma()
153 mem->pd = pd; in rxe_mem_init_dma()
154 mem->access = access; in rxe_mem_init_dma()
155 mem->state = RXE_MEM_STATE_VALID; in rxe_mem_init_dma()
156 mem->type = RXE_MEM_TYPE_DMA; in rxe_mem_init_dma()
163 struct rxe_mem *mem) in rxe_mem_init_user() argument
182 mem->umem = umem; in rxe_mem_init_user()
185 rxe_mem_init(access, mem); in rxe_mem_init_user()
187 err = rxe_mem_alloc(mem, num_buf); in rxe_mem_init_user()
194 mem->page_shift = umem->page_shift; in rxe_mem_init_user()
195 mem->page_mask = BIT(umem->page_shift) - 1; in rxe_mem_init_user()
198 map = mem->map; in rxe_mem_init_user()
224 mem->pd = pd; in rxe_mem_init_user()
225 mem->umem = umem; in rxe_mem_init_user()
226 mem->access = access; in rxe_mem_init_user()
227 mem->length = length; in rxe_mem_init_user()
228 mem->iova = iova; in rxe_mem_init_user()
229 mem->va = start; in rxe_mem_init_user()
230 mem->offset = ib_umem_offset(umem); in rxe_mem_init_user()
231 mem->state = RXE_MEM_STATE_VALID; in rxe_mem_init_user()
232 mem->type = RXE_MEM_TYPE_MR; in rxe_mem_init_user()
241 int max_pages, struct rxe_mem *mem) in rxe_mem_init_fast() argument
245 rxe_mem_init(0, mem); in rxe_mem_init_fast()
248 mem->ibmr.rkey = mem->ibmr.lkey; in rxe_mem_init_fast()
250 err = rxe_mem_alloc(mem, max_pages); in rxe_mem_init_fast()
254 mem->pd = pd; in rxe_mem_init_fast()
255 mem->max_buf = max_pages; in rxe_mem_init_fast()
256 mem->state = RXE_MEM_STATE_FREE; in rxe_mem_init_fast()
257 mem->type = RXE_MEM_TYPE_MR; in rxe_mem_init_fast()
266 struct rxe_mem *mem, in lookup_iova() argument
272 size_t offset = iova - mem->iova + mem->offset; in lookup_iova()
277 if (likely(mem->page_shift)) { in lookup_iova()
278 *offset_out = offset & mem->page_mask; in lookup_iova()
279 offset >>= mem->page_shift; in lookup_iova()
280 *n_out = offset & mem->map_mask; in lookup_iova()
281 *m_out = offset >> mem->map_shift; in lookup_iova()
286 length = mem->map[map_index]->buf[buf_index].size; in lookup_iova()
296 length = mem->map[map_index]->buf[buf_index].size; in lookup_iova()
305 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length) in iova_to_vaddr() argument
311 if (mem->state != RXE_MEM_STATE_VALID) { in iova_to_vaddr()
312 pr_warn("mem not in valid state\n"); in iova_to_vaddr()
317 if (!mem->map) { in iova_to_vaddr()
322 if (mem_check_range(mem, iova, length)) { in iova_to_vaddr()
328 lookup_iova(mem, iova, &m, &n, &offset); in iova_to_vaddr()
330 if (offset + length > mem->map[m]->buf[n].size) { in iova_to_vaddr()
336 addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset; in iova_to_vaddr()
343 * a mem object starting at iova. Compute incremental value of
344 * crc32 if crcp is not zero. caller must hold a reference to mem
346 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, in rxe_mem_copy() argument
362 if (mem->type == RXE_MEM_TYPE_DMA) { in rxe_mem_copy()
374 *crcp = rxe_crc32(to_rdev(mem->pd->ibpd.device), in rxe_mem_copy()
380 WARN_ON_ONCE(!mem->map); in rxe_mem_copy()
382 err = mem_check_range(mem, iova, length); in rxe_mem_copy()
388 lookup_iova(mem, iova, &m, &i, &offset); in rxe_mem_copy()
390 map = mem->map + m; in rxe_mem_copy()
408 crc = rxe_crc32(to_rdev(mem->pd->ibpd.device), in rxe_mem_copy()
450 struct rxe_mem *mem = NULL; in copy_data() local
463 mem = lookup_mem(pd, access, sge->lkey, lookup_local); in copy_data()
464 if (!mem) { in copy_data()
474 if (mem) { in copy_data()
475 rxe_drop_ref(mem); in copy_data()
476 mem = NULL; in copy_data()
488 mem = lookup_mem(pd, access, sge->lkey, in copy_data()
490 if (!mem) { in copy_data()
505 err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp); in copy_data()
519 if (mem) in copy_data()
520 rxe_drop_ref(mem); in copy_data()
525 if (mem) in copy_data()
526 rxe_drop_ref(mem); in copy_data()
564 /* (1) find the mem (mr or mw) corresponding to lkey/rkey
566 * (2) verify that the (qp) pd matches the mem pd
567 * (3) verify that the mem can support the requested access
568 * (4) verify that mem state is valid
573 struct rxe_mem *mem; in lookup_mem() local
578 mem = rxe_pool_get_index(&rxe->mr_pool, index); in lookup_mem()
579 if (!mem) in lookup_mem()
585 if ((type == lookup_local && mem->lkey != key) || in lookup_mem()
586 (type == lookup_remote && mem->rkey != key)) in lookup_mem()
589 if (mem->pd != pd) in lookup_mem()
592 if (access && !(access & mem->access)) in lookup_mem()
595 if (mem->state != RXE_MEM_STATE_VALID) in lookup_mem()
598 return mem; in lookup_mem()
601 rxe_drop_ref(mem); in lookup_mem()
606 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem, in rxe_mem_map_pages() argument
616 if (num_pages > mem->max_buf) { in rxe_mem_map_pages()
622 page_size = 1 << mem->page_shift; in rxe_mem_map_pages()
623 map = mem->map; in rxe_mem_map_pages()
639 mem->iova = iova; in rxe_mem_map_pages()
640 mem->va = iova; in rxe_mem_map_pages()
641 mem->length = num_pages << mem->page_shift; in rxe_mem_map_pages()
642 mem->state = RXE_MEM_STATE_VALID; in rxe_mem_map_pages()