RDMA/mana_ib: Implement DMABUF MR support

Add support of dmabuf MRs to mana_ib.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Link: https://patch.msgid.link/1739454861-4456-1-git-send-email-kotaranov@linux.microsoft.com
Reviewed-by: Long Li <longli@microsoft.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Konstantin Taranov
2025-02-13 05:54:21 -08:00
committed by Leon Romanovsky
parent 161072d43a
commit ffd67b6b42
3 changed files with 74 additions and 0 deletions
+1
View File
@@ -48,6 +48,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.query_pkey = mana_ib_query_pkey,
.query_port = mana_ib_query_port,
.reg_user_mr = mana_ib_reg_user_mr,
.reg_user_mr_dmabuf = mana_ib_reg_user_mr_dmabuf,
.req_notify_cq = mana_ib_arm_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah),
+4
View File
@@ -682,4 +682,8 @@ int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int mr_access_flags,
struct uverbs_attr_bundle *attrs);
#endif
+69
View File
@@ -173,6 +173,75 @@ err_free:
return ERR_PTR(err);
}
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int access_flags,
struct uverbs_attr_bundle *attrs)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
struct gdma_create_mr_params mr_params = {};
struct ib_device *ibdev = ibpd->device;
struct ib_umem_dmabuf *umem_dmabuf;
struct mana_ib_dev *dev;
struct mana_ib_mr *mr;
u64 dma_region_handle;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~VALID_MR_FLAGS)
return ERR_PTR(-EOPNOTSUPP);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
goto err_free;
}
mr->umem = &umem_dmabuf->umem;
err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
if (err) {
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
err);
goto err_umem;
}
mr_params.pd_handle = pd->pd_handle;
mr_params.mr_type = GDMA_MR_TYPE_GVA;
mr_params.gva.dma_region_handle = dma_region_handle;
mr_params.gva.virtual_address = iova;
mr_params.gva.access_flags =
mana_ib_verbs_to_gdma_access_flags(access_flags);
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
if (err)
goto err_dma_region;
/*
* There is no need to keep track of dma_region_handle after MR is
* successfully created. The dma_region_handle is tracked in the PF
* as part of the lifecycle of this MR.
*/
return &mr->ibmr;
err_dma_region:
mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
err_umem:
ib_umem_release(mr->umem);
err_free:
kfree(mr);
return ERR_PTR(err);
}
struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);