iommufd: Lock all IOAS objects

Define helpers to lock and unlock all IOAS objects.
This will allow DMA mappings to be updated atomically during live update.
This code is substantially the same as an initial version provided by
Jason, plus fixes.

Link: https://patch.msgid.link/r/1731527497-16091-3-git-send-email-steven.sistare@oracle.com
Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Steve Sistare
2024-11-13 11:51:35 -08:00
committed by Jason Gunthorpe
parent 10caa8b451
commit 051ae5aa73
3 changed files with 67 additions and 0 deletions
+65
View File
@@ -52,7 +52,10 @@ int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd)
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
if (rc)
goto out_table;
down_read(&ucmd->ictx->ioas_creation_lock);
iommufd_object_finalize(ucmd->ictx, &ioas->obj);
up_read(&ucmd->ictx->ioas_creation_lock);
return 0;
out_table:
@@ -374,6 +377,68 @@ out_put:
return rc;
}
static void iommufd_release_all_iova_rwsem(struct iommufd_ctx *ictx,
struct xarray *ioas_list)
{
struct iommufd_ioas *ioas;
unsigned long index;
xa_for_each(ioas_list, index, ioas) {
up_write(&ioas->iopt.iova_rwsem);
refcount_dec(&ioas->obj.users);
}
up_write(&ictx->ioas_creation_lock);
xa_destroy(ioas_list);
}
static int iommufd_take_all_iova_rwsem(struct iommufd_ctx *ictx,
struct xarray *ioas_list)
{
struct iommufd_object *obj;
unsigned long index;
int rc;
/*
* This is very ugly, it is done instead of adding a lock around
* pages->source_mm, which is a performance path for mdev, we just
* obtain the write side of all the iova_rwsems which also protects the
* pages->source_*. Due to copies we can't know which IOAS could read
* from the pages, so we just lock everything. This is the only place
* locks are nested and they are uniformly taken in ID order.
*
* ioas_creation_lock prevents new IOAS from being installed in the
* xarray while we do this, and also prevents more than one thread from
* holding nested locks.
*/
down_write(&ictx->ioas_creation_lock);
xa_lock(&ictx->objects);
xa_for_each(&ictx->objects, index, obj) {
struct iommufd_ioas *ioas;
if (!obj || obj->type != IOMMUFD_OBJ_IOAS)
continue;
if (!refcount_inc_not_zero(&obj->users))
continue;
xa_unlock(&ictx->objects);
ioas = container_of(obj, struct iommufd_ioas, obj);
down_write_nest_lock(&ioas->iopt.iova_rwsem,
&ictx->ioas_creation_lock);
rc = xa_err(xa_store(ioas_list, index, ioas, GFP_KERNEL));
if (rc) {
iommufd_release_all_iova_rwsem(ictx, ioas_list);
return rc;
}
xa_lock(&ictx->objects);
}
xa_unlock(&ictx->objects);
return 0;
}
int iommufd_option_rlimit_mode(struct iommu_option *cmd,
struct iommufd_ctx *ictx)
{
+1
View File
@@ -24,6 +24,7 @@ struct iommufd_ctx {
struct xarray objects;
struct xarray groups;
wait_queue_head_t destroy_wait;
struct rw_semaphore ioas_creation_lock;
u8 account_mode;
/* Compatibility with VFIO no iommu */
+1
View File
@@ -222,6 +222,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
pr_info_once("IOMMUFD is providing /dev/vfio/vfio, not VFIO.\n");
}
init_rwsem(&ictx->ioas_creation_lock);
xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
xa_init(&ictx->groups);
ictx->file = filp;