iommu/dma-iommu: Handle deferred devices
Handle devices which defer their attach to the iommu in the dma-iommu api Signed-off-by: Tom Murphy <murphyt7@tcd.ie> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
781ca2de89
commit
795bbbb9b6
@ -22,6 +22,7 @@
|
|||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/crash_dump.h>
|
||||||
|
|
||||||
struct iommu_dma_msi_page {
|
struct iommu_dma_msi_page {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|||||||
return iova_reserve_iommu_regions(dev, domain);
|
return iova_reserve_iommu_regions(dev, domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iommu_dma_deferred_attach(struct device *dev,
|
||||||
|
struct iommu_domain *domain)
|
||||||
|
{
|
||||||
|
const struct iommu_ops *ops = domain->ops;
|
||||||
|
|
||||||
|
if (!is_kdump_kernel())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (unlikely(ops->is_attach_deferred &&
|
||||||
|
ops->is_attach_deferred(domain, dev)))
|
||||||
|
return iommu_attach_device(domain, dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
|
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
|
||||||
* page flags.
|
* page flags.
|
||||||
@ -470,6 +486,9 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
|||||||
size_t iova_off = iova_offset(iovad, phys);
|
size_t iova_off = iova_offset(iovad, phys);
|
||||||
dma_addr_t iova;
|
dma_addr_t iova;
|
||||||
|
|
||||||
|
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
|
||||||
|
return DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
size = iova_align(iovad, size + iova_off);
|
size = iova_align(iovad, size + iova_off);
|
||||||
|
|
||||||
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||||
@ -579,6 +598,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
|||||||
|
|
||||||
*dma_handle = DMA_MAPPING_ERROR;
|
*dma_handle = DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
|
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
min_size = alloc_sizes & -alloc_sizes;
|
min_size = alloc_sizes & -alloc_sizes;
|
||||||
if (min_size < PAGE_SIZE) {
|
if (min_size < PAGE_SIZE) {
|
||||||
min_size = PAGE_SIZE;
|
min_size = PAGE_SIZE;
|
||||||
@ -711,7 +733,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
|||||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||||
dma_addr_t dma_handle;
|
dma_addr_t dma_handle;
|
||||||
|
|
||||||
dma_handle =__iommu_dma_map(dev, phys, size, prot);
|
dma_handle = __iommu_dma_map(dev, phys, size, prot);
|
||||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||||
dma_handle != DMA_MAPPING_ERROR)
|
dma_handle != DMA_MAPPING_ERROR)
|
||||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
arch_sync_dma_for_device(dev, phys, size, dir);
|
||||||
@ -821,6 +843,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||||||
unsigned long mask = dma_get_seg_boundary(dev);
|
unsigned long mask = dma_get_seg_boundary(dev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
|
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user