diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 2adc7882a023..fe647bb1db8f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -25,6 +25,7 @@ static void init_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); static void fq_destroy_all_entries(struct iova_domain *iovad); static void fq_flush_timeout(struct timer_list *t); +static void iova_dump(struct iova_domain *iovad); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, @@ -523,8 +524,10 @@ retry: if (!new_iova) { unsigned int cpu; - if (!flush_rcache) + if (!flush_rcache) { + iova_dump(iovad); return 0; + } /* Try replenishing IOVAs by flushing rcache. */ flush_rcache = false; @@ -887,6 +890,71 @@ struct iova_cpu_rcache { struct iova_magazine *prev; }; +static void iova_dump(struct iova_domain *iovad) +{ + struct iova_cpu_rcache *cpu_rcache; + struct iova_rcache *rcache; + struct iova *iova, *t; + struct iova_magazine *mag; + unsigned long flags, rflags; + unsigned int cpu; + int i = 0, j; + + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + rbtree_postorder_for_each_entry_safe(iova, t, &iovad->rbroot, node) { + dma_addr_t start = iova->pfn_lo << iova_shift(iovad); + dma_addr_t end = iova->pfn_hi << iova_shift(iovad); + + if (iova->pfn_lo == IOVA_ANCHOR) + continue; + + pr_info("%4d: [%pad..%pad] (%4lu - %4lu)MiB\n", + i++, &start, &end, + iova->pfn_lo >> (20 - PAGE_SHIFT), + iova->pfn_hi >> (20 - PAGE_SHIFT)); + } + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + + pr_info("global cache\n"); + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + rcache = &iovad->rcaches[i]; + spin_lock_irqsave(&rcache->lock, rflags); + for (j = 0; j < rcache->depot_size; ++j) { + mag = rcache->depot[j]; + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + for (i = 0 ; i < mag->size; ++i) { + struct iova *v = private_find_iova(iovad, mag->pfns[i]); + dma_addr_t s = v->pfn_lo << iova_shift(iovad); + dma_addr_t e = v->pfn_hi << iova_shift(iovad); + + pr_info("[%pad..%pad]\n", &s, &e); + } + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + } + spin_unlock_irqrestore(&rcache->lock, rflags); + } + + for_each_online_cpu(cpu) { + pr_info("cpu%d cache\n", cpu); + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + rcache = &iovad->rcaches[i]; + cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + mag = cpu_rcache->loaded; + spin_lock_irqsave(&cpu_rcache->lock, rflags); + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + for (i = 0 ; i < mag->size; ++i) { + struct iova *v = private_find_iova(iovad, mag->pfns[i]); + dma_addr_t s = v->pfn_lo << iova_shift(iovad); + dma_addr_t e = v->pfn_hi << iova_shift(iovad); + + pr_info("[%pad..%pad]\n", &s, &e); + } + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + spin_unlock_irqrestore(&cpu_rcache->lock, rflags); + } + } +} + static struct iova_magazine *iova_magazine_alloc(gfp_t flags) { return kzalloc(sizeof(struct iova_magazine), flags);