Lines Matching refs:rhp
404 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); in finish_mem_reg()
407 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, in register_mem() argument
413 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, in register_mem()
426 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in register_mem()
435 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, in alloc_pbl()
448 struct c4iw_dev *rhp; in c4iw_get_dma_mr() local
456 rhp = php->rhp; in c4iw_get_dma_mr()
474 mhp->rhp = rhp; in c4iw_get_dma_mr()
484 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, in c4iw_get_dma_mr()
496 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in c4iw_get_dma_mr()
514 struct c4iw_dev *rhp; in c4iw_reg_user_mr() local
527 rhp = php->rhp; in c4iw_reg_user_mr()
529 if (mr_exceeds_hw_limits(rhp, length)) in c4iw_reg_user_mr()
543 mhp->rhp = rhp; in c4iw_reg_user_mr()
567 err = write_pbl(&mhp->rhp->rdev, pages, in c4iw_reg_user_mr()
578 err = write_pbl(&mhp->rhp->rdev, pages, in c4iw_reg_user_mr()
594 err = register_mem(rhp, php, mhp, shift); in c4iw_reg_user_mr()
601 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, in c4iw_reg_user_mr()
617 struct c4iw_dev *rhp; in c4iw_alloc_mw() local
627 rhp = php->rhp; in c4iw_alloc_mw()
638 ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp); in c4iw_alloc_mw()
642 mhp->rhp = rhp; in c4iw_alloc_mw()
648 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { in c4iw_alloc_mw()
656 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, in c4iw_alloc_mw()
667 struct c4iw_dev *rhp; in c4iw_dealloc_mw() local
672 rhp = mhp->rhp; in c4iw_dealloc_mw()
674 xa_erase_irq(&rhp->mrs, mmid); in c4iw_dealloc_mw()
675 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, in c4iw_dealloc_mw()
685 struct c4iw_dev *rhp; in c4iw_alloc_mr() local
694 rhp = php->rhp; in c4iw_alloc_mr()
697 max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl && in c4iw_alloc_mr()
714 mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev, in c4iw_alloc_mr()
722 mhp->rhp = rhp; in c4iw_alloc_mr()
727 ret = allocate_stag(&rhp->rdev, &stag, php->pdid, in c4iw_alloc_mr()
738 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { in c4iw_alloc_mr()
746 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, in c4iw_alloc_mr()
749 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, in c4iw_alloc_mr()
752 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, in c4iw_alloc_mr()
786 struct c4iw_dev *rhp; in c4iw_dereg_mr() local
793 rhp = mhp->rhp; in c4iw_dereg_mr()
795 xa_erase_irq(&rhp->mrs, mmid); in c4iw_dereg_mr()
797 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, in c4iw_dereg_mr()
799 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in c4iw_dereg_mr()
802 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, in c4iw_dereg_mr()
813 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) in c4iw_invalidate_mr() argument
818 xa_lock_irqsave(&rhp->mrs, flags); in c4iw_invalidate_mr()
819 mhp = xa_load(&rhp->mrs, rkey >> 8); in c4iw_invalidate_mr()
822 xa_unlock_irqrestore(&rhp->mrs, flags); in c4iw_invalidate_mr()