RDMA/rxe: Add page invalidation support

On page invalidation, an MMU notifier callback is invoked to unmap DMA
addresses and update the driver page table(umem_odp->dma_list). The
callback is registered when an ODP-enabled MR is created.

Link: https://patch.msgid.link/r/20241220100936.2193541-3-matsuda-daisuke@fujitsu.com
Signed-off-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Daisuke Matsuda
2024-12-20 19:09:33 +09:00
committed by Jason Gunthorpe
parent 7f88072507
commit b601792392
3 changed files with 43 additions and 0 deletions
+2
View File
@@ -23,3 +23,5 @@ rdma_rxe-y := \
rxe_task.o \
rxe_net.o \
rxe_hw_counters.o
rdma_rxe-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += rxe_odp.o
+3
View File
@@ -181,4 +181,7 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
}
/* rxe_odp.c */
extern const struct mmu_interval_notifier_ops rxe_mn_ops;
#endif /* RXE_LOC_H */
+38
View File
@@ -0,0 +1,38 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2022-2023 Fujitsu Ltd. All rights reserved.
*/
#include <linux/hmm.h>
#include <rdma/ib_umem_odp.h>
#include "rxe.h"
static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct ib_umem_odp *umem_odp =
container_of(mni, struct ib_umem_odp, notifier);
unsigned long start, end;
if (!mmu_notifier_range_blockable(range))
return false;
mutex_lock(&umem_odp->umem_mutex);
mmu_interval_set_seq(mni, cur_seq);
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
/* update umem_odp->dma_list */
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
mutex_unlock(&umem_odp->umem_mutex);
return true;
}
const struct mmu_interval_notifier_ops rxe_mn_ops = {
.invalidate = rxe_ib_invalidate_range,
};