iommu/rockchip: add irq mask cb
Change-Id: I5d2d565340395fb3573c7f25d764ed9f66333aeb Signed-off-by: Simon Xue <xxm@rock-chips.com>
This commit is contained in:
@@ -699,6 +699,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
|
||||
struct rk_iommu *iommu = dev_id;
|
||||
u32 status;
|
||||
u32 int_status;
|
||||
u32 int_mask;
|
||||
dma_addr_t iova;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
int i, err;
|
||||
@@ -743,7 +744,15 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
|
||||
dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
|
||||
|
||||
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
|
||||
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
|
||||
|
||||
/*
|
||||
* Master may clear the int_mask to prevent iommu
|
||||
* re-enter interrupt when mapping. So we postpone
|
||||
* sending PAGE_FAULT_DONE command to mapping finished.
|
||||
*/
|
||||
int_mask = rk_iommu_read(iommu->bases[i], RK_MMU_INT_MASK);
|
||||
if (int_mask != 0x0)
|
||||
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
|
||||
}
|
||||
|
||||
if (int_status & RK_MMU_IRQ_BUS_ERROR)
|
||||
@@ -1570,6 +1579,37 @@ static int rk_iommu_of_xlate(struct device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rk_iommu_mask_irq(struct device *dev)
|
||||
{
|
||||
struct rk_iommu *iommu = rk_iommu_from_dev(dev);
|
||||
int i;
|
||||
|
||||
if (!iommu)
|
||||
return;
|
||||
|
||||
for (i = 0; i < iommu->num_mmu; i++)
|
||||
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(rk_iommu_mask_irq);
|
||||
|
||||
void rk_iommu_unmask_irq(struct device *dev)
|
||||
{
|
||||
struct rk_iommu *iommu = rk_iommu_from_dev(dev);
|
||||
int i;
|
||||
|
||||
if (!iommu)
|
||||
return;
|
||||
|
||||
for (i = 0; i < iommu->num_mmu; i++) {
|
||||
/* Need to zap tlb in case of mapping during pagefault */
|
||||
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
|
||||
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
|
||||
/* Leave iommu in pagefault state until mapping finished */
|
||||
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rk_iommu_unmask_irq);
|
||||
|
||||
static const struct iommu_ops rk_iommu_ops = {
|
||||
.domain_alloc = rk_iommu_domain_alloc,
|
||||
.domain_free = rk_iommu_domain_free,
|
||||
|
||||
@@ -579,6 +579,8 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
|
||||
extern struct iommu_group *pci_device_group(struct device *dev);
|
||||
/* Generic device grouping function */
|
||||
extern struct iommu_group *generic_device_group(struct device *dev);
|
||||
extern void rk_iommu_mask_irq(struct device *dev);
|
||||
extern void rk_iommu_unmask_irq(struct device *dev);
|
||||
/* FSL-MC device grouping function */
|
||||
struct iommu_group *fsl_mc_device_group(struct device *dev);
|
||||
|
||||
@@ -1087,6 +1089,14 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void rk_iommu_mask_irq(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rk_iommu_unmask_irq(struct device *dev)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user