Lines Matching refs:mhp
393 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) in finish_mem_reg() argument
397 mhp->attr.state = 1; in finish_mem_reg()
398 mhp->attr.stag = stag; in finish_mem_reg()
400 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; in finish_mem_reg()
401 mhp->ibmr.length = mhp->attr.len; in finish_mem_reg()
402 mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12); in finish_mem_reg()
403 pr_debug("mmid 0x%x mhp %p\n", mmid, mhp); in finish_mem_reg()
404 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); in finish_mem_reg()
408 struct c4iw_mr *mhp, int shift) in register_mem() argument
413 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, in register_mem()
414 FW_RI_STAG_NSMR, mhp->attr.len ? in register_mem()
415 mhp->attr.perms : 0, in register_mem()
416 mhp->attr.mw_bind_enable, mhp->attr.zbva, in register_mem()
417 mhp->attr.va_fbo, mhp->attr.len ? in register_mem()
418 mhp->attr.len : -1, shift - 12, in register_mem()
419 mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL, in register_mem()
420 mhp->wr_waitp); in register_mem()
424 ret = finish_mem_reg(mhp, stag); in register_mem()
426 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in register_mem()
427 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); in register_mem()
428 mhp->dereg_skb = NULL; in register_mem()
433 static int alloc_pbl(struct c4iw_mr *mhp, int npages) in alloc_pbl() argument
435 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, in alloc_pbl()
438 if (!mhp->attr.pbl_addr) in alloc_pbl()
441 mhp->attr.pbl_size = npages; in alloc_pbl()
450 struct c4iw_mr *mhp; in c4iw_get_dma_mr() local
458 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); in c4iw_get_dma_mr()
459 if (!mhp) in c4iw_get_dma_mr()
461 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_get_dma_mr()
462 if (!mhp->wr_waitp) { in c4iw_get_dma_mr()
466 c4iw_init_wr_wait(mhp->wr_waitp); in c4iw_get_dma_mr()
468 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); in c4iw_get_dma_mr()
469 if (!mhp->dereg_skb) { in c4iw_get_dma_mr()
474 mhp->rhp = rhp; in c4iw_get_dma_mr()
475 mhp->attr.pdid = php->pdid; in c4iw_get_dma_mr()
476 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); in c4iw_get_dma_mr()
477 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; in c4iw_get_dma_mr()
478 mhp->attr.zbva = 0; in c4iw_get_dma_mr()
479 mhp->attr.va_fbo = 0; in c4iw_get_dma_mr()
480 mhp->attr.page_size = 0; in c4iw_get_dma_mr()
481 mhp->attr.len = ~0ULL; in c4iw_get_dma_mr()
482 mhp->attr.pbl_size = 0; in c4iw_get_dma_mr()
485 FW_RI_STAG_NSMR, mhp->attr.perms, in c4iw_get_dma_mr()
486 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0, in c4iw_get_dma_mr()
487 NULL, mhp->wr_waitp); in c4iw_get_dma_mr()
491 ret = finish_mem_reg(mhp, stag); in c4iw_get_dma_mr()
494 return &mhp->ibmr; in c4iw_get_dma_mr()
496 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in c4iw_get_dma_mr()
497 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); in c4iw_get_dma_mr()
499 kfree_skb(mhp->dereg_skb); in c4iw_get_dma_mr()
501 c4iw_put_wr_wait(mhp->wr_waitp); in c4iw_get_dma_mr()
503 kfree(mhp); in c4iw_get_dma_mr()
516 struct c4iw_mr *mhp; in c4iw_reg_user_mr() local
532 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); in c4iw_reg_user_mr()
533 if (!mhp) in c4iw_reg_user_mr()
535 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_reg_user_mr()
536 if (!mhp->wr_waitp) in c4iw_reg_user_mr()
539 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); in c4iw_reg_user_mr()
540 if (!mhp->dereg_skb) in c4iw_reg_user_mr()
543 mhp->rhp = rhp; in c4iw_reg_user_mr()
545 mhp->umem = ib_umem_get(pd->device, start, length, acc); in c4iw_reg_user_mr()
546 if (IS_ERR(mhp->umem)) in c4iw_reg_user_mr()
551 n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift); in c4iw_reg_user_mr()
552 err = alloc_pbl(mhp, n); in c4iw_reg_user_mr()
564 rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) { in c4iw_reg_user_mr()
567 err = write_pbl(&mhp->rhp->rdev, pages, in c4iw_reg_user_mr()
568 mhp->attr.pbl_addr + (n << 3), i, in c4iw_reg_user_mr()
569 mhp->wr_waitp); in c4iw_reg_user_mr()
578 err = write_pbl(&mhp->rhp->rdev, pages, in c4iw_reg_user_mr()
579 mhp->attr.pbl_addr + (n << 3), i, in c4iw_reg_user_mr()
580 mhp->wr_waitp); in c4iw_reg_user_mr()
587 mhp->attr.pdid = php->pdid; in c4iw_reg_user_mr()
588 mhp->attr.zbva = 0; in c4iw_reg_user_mr()
589 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); in c4iw_reg_user_mr()
590 mhp->attr.va_fbo = virt; in c4iw_reg_user_mr()
591 mhp->attr.page_size = shift - 12; in c4iw_reg_user_mr()
592 mhp->attr.len = length; in c4iw_reg_user_mr()
594 err = register_mem(rhp, php, mhp, shift); in c4iw_reg_user_mr()
598 return &mhp->ibmr; in c4iw_reg_user_mr()
601 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, in c4iw_reg_user_mr()
602 mhp->attr.pbl_size << 3); in c4iw_reg_user_mr()
604 ib_umem_release(mhp->umem); in c4iw_reg_user_mr()
606 kfree_skb(mhp->dereg_skb); in c4iw_reg_user_mr()
608 c4iw_put_wr_wait(mhp->wr_waitp); in c4iw_reg_user_mr()
610 kfree(mhp); in c4iw_reg_user_mr()
616 struct c4iw_mw *mhp = to_c4iw_mw(ibmw); in c4iw_alloc_mw() local
628 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_alloc_mw()
629 if (!mhp->wr_waitp) in c4iw_alloc_mw()
632 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); in c4iw_alloc_mw()
633 if (!mhp->dereg_skb) { in c4iw_alloc_mw()
638 ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp); in c4iw_alloc_mw()
642 mhp->rhp = rhp; in c4iw_alloc_mw()
643 mhp->attr.pdid = php->pdid; in c4iw_alloc_mw()
644 mhp->attr.type = FW_RI_STAG_MW; in c4iw_alloc_mw()
645 mhp->attr.stag = stag; in c4iw_alloc_mw()
648 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { in c4iw_alloc_mw()
652 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag); in c4iw_alloc_mw()
656 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, in c4iw_alloc_mw()
657 mhp->wr_waitp); in c4iw_alloc_mw()
659 kfree_skb(mhp->dereg_skb); in c4iw_alloc_mw()
661 c4iw_put_wr_wait(mhp->wr_waitp); in c4iw_alloc_mw()
668 struct c4iw_mw *mhp; in c4iw_dealloc_mw() local
671 mhp = to_c4iw_mw(mw); in c4iw_dealloc_mw()
672 rhp = mhp->rhp; in c4iw_dealloc_mw()
675 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, in c4iw_dealloc_mw()
676 mhp->wr_waitp); in c4iw_dealloc_mw()
677 kfree_skb(mhp->dereg_skb); in c4iw_dealloc_mw()
678 c4iw_put_wr_wait(mhp->wr_waitp); in c4iw_dealloc_mw()
687 struct c4iw_mr *mhp; in c4iw_alloc_mr() local
701 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); in c4iw_alloc_mr()
702 if (!mhp) { in c4iw_alloc_mr()
707 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_alloc_mr()
708 if (!mhp->wr_waitp) { in c4iw_alloc_mr()
712 c4iw_init_wr_wait(mhp->wr_waitp); in c4iw_alloc_mr()
714 mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev, in c4iw_alloc_mr()
715 length, &mhp->mpl_addr, GFP_KERNEL); in c4iw_alloc_mr()
716 if (!mhp->mpl) { in c4iw_alloc_mr()
720 mhp->max_mpl_len = length; in c4iw_alloc_mr()
722 mhp->rhp = rhp; in c4iw_alloc_mr()
723 ret = alloc_pbl(mhp, max_num_sg); in c4iw_alloc_mr()
726 mhp->attr.pbl_size = max_num_sg; in c4iw_alloc_mr()
728 mhp->attr.pbl_size, mhp->attr.pbl_addr, in c4iw_alloc_mr()
729 mhp->wr_waitp); in c4iw_alloc_mr()
732 mhp->attr.pdid = php->pdid; in c4iw_alloc_mr()
733 mhp->attr.type = FW_RI_STAG_NSMR; in c4iw_alloc_mr()
734 mhp->attr.stag = stag; in c4iw_alloc_mr()
735 mhp->attr.state = 0; in c4iw_alloc_mr()
737 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; in c4iw_alloc_mr()
738 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { in c4iw_alloc_mr()
743 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag); in c4iw_alloc_mr()
744 return &(mhp->ibmr); in c4iw_alloc_mr()
746 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, in c4iw_alloc_mr()
747 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); in c4iw_alloc_mr()
749 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, in c4iw_alloc_mr()
750 mhp->attr.pbl_size << 3); in c4iw_alloc_mr()
752 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, in c4iw_alloc_mr()
753 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); in c4iw_alloc_mr()
755 c4iw_put_wr_wait(mhp->wr_waitp); in c4iw_alloc_mr()
757 kfree(mhp); in c4iw_alloc_mr()
764 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); in c4iw_set_page() local
766 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size)) in c4iw_set_page()
769 mhp->mpl[mhp->mpl_len++] = addr; in c4iw_set_page()
777 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); in c4iw_map_mr_sg() local
779 mhp->mpl_len = 0; in c4iw_map_mr_sg()
787 struct c4iw_mr *mhp; in c4iw_dereg_mr() local
792 mhp = to_c4iw_mr(ib_mr); in c4iw_dereg_mr()
793 rhp = mhp->rhp; in c4iw_dereg_mr()
794 mmid = mhp->attr.stag >> 8; in c4iw_dereg_mr()
796 if (mhp->mpl) in c4iw_dereg_mr()
797 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, in c4iw_dereg_mr()
798 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); in c4iw_dereg_mr()
799 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in c4iw_dereg_mr()
800 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); in c4iw_dereg_mr()
801 if (mhp->attr.pbl_size) in c4iw_dereg_mr()
802 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, in c4iw_dereg_mr()
803 mhp->attr.pbl_size << 3); in c4iw_dereg_mr()
804 if (mhp->kva) in c4iw_dereg_mr()
805 kfree((void *) (unsigned long) mhp->kva); in c4iw_dereg_mr()
806 ib_umem_release(mhp->umem); in c4iw_dereg_mr()
807 pr_debug("mmid 0x%x ptr %p\n", mmid, mhp); in c4iw_dereg_mr()
808 c4iw_put_wr_wait(mhp->wr_waitp); in c4iw_dereg_mr()
809 kfree(mhp); in c4iw_dereg_mr()
815 struct c4iw_mr *mhp; in c4iw_invalidate_mr() local
819 mhp = xa_load(&rhp->mrs, rkey >> 8); in c4iw_invalidate_mr()
820 if (mhp) in c4iw_invalidate_mr()
821 mhp->attr.state = 0; in c4iw_invalidate_mr()