1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include "rxe.h"
35 #include "rxe_loc.h"
36
37 /*
38 * lfsr (linear feedback shift register) with period 255
39 */
rxe_get_key(void)40 static u8 rxe_get_key(void)
41 {
42 static u32 key = 1;
43
44 key = key << 1;
45
46 key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
47 ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
48
49 key &= 0xff;
50
51 return key;
52 }
53
mem_check_range(struct rxe_mem * mem,u64 iova,size_t length)54 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
55 {
56 switch (mem->type) {
57 case RXE_MEM_TYPE_DMA:
58 return 0;
59
60 case RXE_MEM_TYPE_MR:
61 case RXE_MEM_TYPE_FMR:
62 if (iova < mem->iova ||
63 length > mem->length ||
64 iova > mem->iova + mem->length - length)
65 return -EFAULT;
66 return 0;
67
68 default:
69 return -EFAULT;
70 }
71 }
72
73 #define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
74 | IB_ACCESS_REMOTE_WRITE \
75 | IB_ACCESS_REMOTE_ATOMIC)
76
rxe_mem_init(int access,struct rxe_mem * mem)77 static void rxe_mem_init(int access, struct rxe_mem *mem)
78 {
79 u32 lkey = mem->pelem.index << 8 | rxe_get_key();
80 u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
81
82 if (mem->pelem.pool->type == RXE_TYPE_MR) {
83 mem->ibmr.lkey = lkey;
84 mem->ibmr.rkey = rkey;
85 }
86
87 mem->lkey = lkey;
88 mem->rkey = rkey;
89 mem->state = RXE_MEM_STATE_INVALID;
90 mem->type = RXE_MEM_TYPE_NONE;
91 mem->map_shift = ilog2(RXE_BUF_PER_MAP);
92 }
93
rxe_mem_cleanup(struct rxe_pool_entry * arg)94 void rxe_mem_cleanup(struct rxe_pool_entry *arg)
95 {
96 struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
97 int i;
98
99 if (mem->umem)
100 ib_umem_release(mem->umem);
101
102 if (mem->map) {
103 for (i = 0; i < mem->num_map; i++)
104 kfree(mem->map[i]);
105
106 kfree(mem->map);
107 }
108 }
109
rxe_mem_alloc(struct rxe_mem * mem,int num_buf)110 static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
111 {
112 int i;
113 int num_map;
114 struct rxe_map **map = mem->map;
115
116 num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
117
118 mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
119 if (!mem->map)
120 goto err1;
121
122 for (i = 0; i < num_map; i++) {
123 mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
124 if (!mem->map[i])
125 goto err2;
126 }
127
128 BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
129
130 mem->map_shift = ilog2(RXE_BUF_PER_MAP);
131 mem->map_mask = RXE_BUF_PER_MAP - 1;
132
133 mem->num_buf = num_buf;
134 mem->num_map = num_map;
135 mem->max_buf = num_map * RXE_BUF_PER_MAP;
136
137 return 0;
138
139 err2:
140 for (i--; i >= 0; i--)
141 kfree(mem->map[i]);
142
143 kfree(mem->map);
144 err1:
145 return -ENOMEM;
146 }
147
rxe_mem_init_dma(struct rxe_pd * pd,int access,struct rxe_mem * mem)148 int rxe_mem_init_dma(struct rxe_pd *pd,
149 int access, struct rxe_mem *mem)
150 {
151 rxe_mem_init(access, mem);
152
153 mem->pd = pd;
154 mem->access = access;
155 mem->state = RXE_MEM_STATE_VALID;
156 mem->type = RXE_MEM_TYPE_DMA;
157
158 return 0;
159 }
160
rxe_mem_init_user(struct rxe_pd * pd,u64 start,u64 length,u64 iova,int access,struct ib_udata * udata,struct rxe_mem * mem)161 int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
162 u64 length, u64 iova, int access, struct ib_udata *udata,
163 struct rxe_mem *mem)
164 {
165 int entry;
166 struct rxe_map **map;
167 struct rxe_phys_buf *buf = NULL;
168 struct ib_umem *umem;
169 struct scatterlist *sg;
170 int num_buf;
171 void *vaddr;
172 int err;
173
174 umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
175 if (IS_ERR(umem)) {
176 pr_warn("err %d from rxe_umem_get\n",
177 (int)PTR_ERR(umem));
178 err = -EINVAL;
179 goto err1;
180 }
181
182 mem->umem = umem;
183 num_buf = umem->nmap;
184
185 rxe_mem_init(access, mem);
186
187 err = rxe_mem_alloc(mem, num_buf);
188 if (err) {
189 pr_warn("err %d from rxe_mem_alloc\n", err);
190 ib_umem_release(umem);
191 goto err1;
192 }
193
194 mem->page_shift = umem->page_shift;
195 mem->page_mask = BIT(umem->page_shift) - 1;
196
197 num_buf = 0;
198 map = mem->map;
199 if (length > 0) {
200 buf = map[0]->buf;
201
202 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
203 vaddr = page_address(sg_page(sg));
204 if (!vaddr) {
205 pr_warn("null vaddr\n");
206 ib_umem_release(umem);
207 err = -ENOMEM;
208 goto err1;
209 }
210
211 buf->addr = (uintptr_t)vaddr;
212 buf->size = BIT(umem->page_shift);
213 num_buf++;
214 buf++;
215
216 if (num_buf >= RXE_BUF_PER_MAP) {
217 map++;
218 buf = map[0]->buf;
219 num_buf = 0;
220 }
221 }
222 }
223
224 mem->pd = pd;
225 mem->umem = umem;
226 mem->access = access;
227 mem->length = length;
228 mem->iova = iova;
229 mem->va = start;
230 mem->offset = ib_umem_offset(umem);
231 mem->state = RXE_MEM_STATE_VALID;
232 mem->type = RXE_MEM_TYPE_MR;
233
234 return 0;
235
236 err1:
237 return err;
238 }
239
rxe_mem_init_fast(struct rxe_pd * pd,int max_pages,struct rxe_mem * mem)240 int rxe_mem_init_fast(struct rxe_pd *pd,
241 int max_pages, struct rxe_mem *mem)
242 {
243 int err;
244
245 rxe_mem_init(0, mem);
246
247 /* In fastreg, we also set the rkey */
248 mem->ibmr.rkey = mem->ibmr.lkey;
249
250 err = rxe_mem_alloc(mem, max_pages);
251 if (err)
252 goto err1;
253
254 mem->pd = pd;
255 mem->max_buf = max_pages;
256 mem->state = RXE_MEM_STATE_FREE;
257 mem->type = RXE_MEM_TYPE_MR;
258
259 return 0;
260
261 err1:
262 return err;
263 }
264
lookup_iova(struct rxe_mem * mem,u64 iova,int * m_out,int * n_out,size_t * offset_out)265 static void lookup_iova(
266 struct rxe_mem *mem,
267 u64 iova,
268 int *m_out,
269 int *n_out,
270 size_t *offset_out)
271 {
272 size_t offset = iova - mem->iova + mem->offset;
273 int map_index;
274 int buf_index;
275 u64 length;
276
277 if (likely(mem->page_shift)) {
278 *offset_out = offset & mem->page_mask;
279 offset >>= mem->page_shift;
280 *n_out = offset & mem->map_mask;
281 *m_out = offset >> mem->map_shift;
282 } else {
283 map_index = 0;
284 buf_index = 0;
285
286 length = mem->map[map_index]->buf[buf_index].size;
287
288 while (offset >= length) {
289 offset -= length;
290 buf_index++;
291
292 if (buf_index == RXE_BUF_PER_MAP) {
293 map_index++;
294 buf_index = 0;
295 }
296 length = mem->map[map_index]->buf[buf_index].size;
297 }
298
299 *m_out = map_index;
300 *n_out = buf_index;
301 *offset_out = offset;
302 }
303 }
304
iova_to_vaddr(struct rxe_mem * mem,u64 iova,int length)305 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
306 {
307 size_t offset;
308 int m, n;
309 void *addr;
310
311 if (mem->state != RXE_MEM_STATE_VALID) {
312 pr_warn("mem not in valid state\n");
313 addr = NULL;
314 goto out;
315 }
316
317 if (!mem->map) {
318 addr = (void *)(uintptr_t)iova;
319 goto out;
320 }
321
322 if (mem_check_range(mem, iova, length)) {
323 pr_warn("range violation\n");
324 addr = NULL;
325 goto out;
326 }
327
328 lookup_iova(mem, iova, &m, &n, &offset);
329
330 if (offset + length > mem->map[m]->buf[n].size) {
331 pr_warn("crosses page boundary\n");
332 addr = NULL;
333 goto out;
334 }
335
336 addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
337
338 out:
339 return addr;
340 }
341
342 /* copy data from a range (vaddr, vaddr+length-1) to or from
343 * a mem object starting at iova. Compute incremental value of
344 * crc32 if crcp is not zero. caller must hold a reference to mem
345 */
rxe_mem_copy(struct rxe_mem * mem,u64 iova,void * addr,int length,enum copy_direction dir,u32 * crcp)346 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
347 enum copy_direction dir, u32 *crcp)
348 {
349 int err;
350 int bytes;
351 u8 *va;
352 struct rxe_map **map;
353 struct rxe_phys_buf *buf;
354 int m;
355 int i;
356 size_t offset;
357 u32 crc = crcp ? (*crcp) : 0;
358
359 if (length == 0)
360 return 0;
361
362 if (mem->type == RXE_MEM_TYPE_DMA) {
363 u8 *src, *dest;
364
365 src = (dir == to_mem_obj) ?
366 addr : ((void *)(uintptr_t)iova);
367
368 dest = (dir == to_mem_obj) ?
369 ((void *)(uintptr_t)iova) : addr;
370
371 memcpy(dest, src, length);
372
373 if (crcp)
374 *crcp = rxe_crc32(to_rdev(mem->pd->ibpd.device),
375 *crcp, dest, length);
376
377 return 0;
378 }
379
380 WARN_ON_ONCE(!mem->map);
381
382 err = mem_check_range(mem, iova, length);
383 if (err) {
384 err = -EFAULT;
385 goto err1;
386 }
387
388 lookup_iova(mem, iova, &m, &i, &offset);
389
390 map = mem->map + m;
391 buf = map[0]->buf + i;
392
393 while (length > 0) {
394 u8 *src, *dest;
395
396 va = (u8 *)(uintptr_t)buf->addr + offset;
397 src = (dir == to_mem_obj) ? addr : va;
398 dest = (dir == to_mem_obj) ? va : addr;
399
400 bytes = buf->size - offset;
401
402 if (bytes > length)
403 bytes = length;
404
405 memcpy(dest, src, bytes);
406
407 if (crcp)
408 crc = rxe_crc32(to_rdev(mem->pd->ibpd.device),
409 crc, dest, bytes);
410
411 length -= bytes;
412 addr += bytes;
413
414 offset = 0;
415 buf++;
416 i++;
417
418 if (i == RXE_BUF_PER_MAP) {
419 i = 0;
420 map++;
421 buf = map[0]->buf;
422 }
423 }
424
425 if (crcp)
426 *crcp = crc;
427
428 return 0;
429
430 err1:
431 return err;
432 }
433
434 /* copy data in or out of a wqe, i.e. sg list
435 * under the control of a dma descriptor
436 */
copy_data(struct rxe_pd * pd,int access,struct rxe_dma_info * dma,void * addr,int length,enum copy_direction dir,u32 * crcp)437 int copy_data(
438 struct rxe_pd *pd,
439 int access,
440 struct rxe_dma_info *dma,
441 void *addr,
442 int length,
443 enum copy_direction dir,
444 u32 *crcp)
445 {
446 int bytes;
447 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
448 int offset = dma->sge_offset;
449 int resid = dma->resid;
450 struct rxe_mem *mem = NULL;
451 u64 iova;
452 int err;
453
454 if (length == 0)
455 return 0;
456
457 if (length > resid) {
458 err = -EINVAL;
459 goto err2;
460 }
461
462 if (sge->length && (offset < sge->length)) {
463 mem = lookup_mem(pd, access, sge->lkey, lookup_local);
464 if (!mem) {
465 err = -EINVAL;
466 goto err1;
467 }
468 }
469
470 while (length > 0) {
471 bytes = length;
472
473 if (offset >= sge->length) {
474 if (mem) {
475 rxe_drop_ref(mem);
476 mem = NULL;
477 }
478 sge++;
479 dma->cur_sge++;
480 offset = 0;
481
482 if (dma->cur_sge >= dma->num_sge) {
483 err = -ENOSPC;
484 goto err2;
485 }
486
487 if (sge->length) {
488 mem = lookup_mem(pd, access, sge->lkey,
489 lookup_local);
490 if (!mem) {
491 err = -EINVAL;
492 goto err1;
493 }
494 } else {
495 continue;
496 }
497 }
498
499 if (bytes > sge->length - offset)
500 bytes = sge->length - offset;
501
502 if (bytes > 0) {
503 iova = sge->addr + offset;
504
505 err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
506 if (err)
507 goto err2;
508
509 offset += bytes;
510 resid -= bytes;
511 length -= bytes;
512 addr += bytes;
513 }
514 }
515
516 dma->sge_offset = offset;
517 dma->resid = resid;
518
519 if (mem)
520 rxe_drop_ref(mem);
521
522 return 0;
523
524 err2:
525 if (mem)
526 rxe_drop_ref(mem);
527 err1:
528 return err;
529 }
530
advance_dma_data(struct rxe_dma_info * dma,unsigned int length)531 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
532 {
533 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
534 int offset = dma->sge_offset;
535 int resid = dma->resid;
536
537 while (length) {
538 unsigned int bytes;
539
540 if (offset >= sge->length) {
541 sge++;
542 dma->cur_sge++;
543 offset = 0;
544 if (dma->cur_sge >= dma->num_sge)
545 return -ENOSPC;
546 }
547
548 bytes = length;
549
550 if (bytes > sge->length - offset)
551 bytes = sge->length - offset;
552
553 offset += bytes;
554 resid -= bytes;
555 length -= bytes;
556 }
557
558 dma->sge_offset = offset;
559 dma->resid = resid;
560
561 return 0;
562 }
563
564 /* (1) find the mem (mr or mw) corresponding to lkey/rkey
565 * depending on lookup_type
566 * (2) verify that the (qp) pd matches the mem pd
567 * (3) verify that the mem can support the requested access
568 * (4) verify that mem state is valid
569 */
lookup_mem(struct rxe_pd * pd,int access,u32 key,enum lookup_type type)570 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
571 enum lookup_type type)
572 {
573 struct rxe_mem *mem;
574 struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
575 int index = key >> 8;
576
577 if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
578 mem = rxe_pool_get_index(&rxe->mr_pool, index);
579 if (!mem)
580 goto err1;
581 } else {
582 goto err1;
583 }
584
585 if ((type == lookup_local && mem->lkey != key) ||
586 (type == lookup_remote && mem->rkey != key))
587 goto err2;
588
589 if (mem->pd != pd)
590 goto err2;
591
592 if (access && !(access & mem->access))
593 goto err2;
594
595 if (mem->state != RXE_MEM_STATE_VALID)
596 goto err2;
597
598 return mem;
599
600 err2:
601 rxe_drop_ref(mem);
602 err1:
603 return NULL;
604 }
605
rxe_mem_map_pages(struct rxe_dev * rxe,struct rxe_mem * mem,u64 * page,int num_pages,u64 iova)606 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
607 u64 *page, int num_pages, u64 iova)
608 {
609 int i;
610 int num_buf;
611 int err;
612 struct rxe_map **map;
613 struct rxe_phys_buf *buf;
614 int page_size;
615
616 if (num_pages > mem->max_buf) {
617 err = -EINVAL;
618 goto err1;
619 }
620
621 num_buf = 0;
622 page_size = 1 << mem->page_shift;
623 map = mem->map;
624 buf = map[0]->buf;
625
626 for (i = 0; i < num_pages; i++) {
627 buf->addr = *page++;
628 buf->size = page_size;
629 buf++;
630 num_buf++;
631
632 if (num_buf == RXE_BUF_PER_MAP) {
633 map++;
634 buf = map[0]->buf;
635 num_buf = 0;
636 }
637 }
638
639 mem->iova = iova;
640 mem->va = iova;
641 mem->length = num_pages << mem->page_shift;
642 mem->state = RXE_MEM_STATE_VALID;
643
644 return 0;
645
646 err1:
647 return err;
648 }
649