IOMMU Updates for Linux v4.1
Not much this time, but the changes include: * Moving domain allocation into the iommu drivers to prepare for the introduction of default domains for devices * Fixing the IO page-table code in the AMD IOMMU driver to correctly encode large page sizes * Extension of the PCI support in the ARM-SMMU driver * Various fixes and cleanups -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJVNFIPAAoJECvwRC2XARrj4v8QAMVsPJ+kmnLvqGDkO9v2i9z6 sFX27h55HhK3Pgb5aEmEhvZd0Eec22KtuADr92LsRSjskgA4FgrzzSlo8w7+MbwM dtowij+5Bzx/jEeexM5gog0ZA9Brl725KSYBmwJIAroKAtl3YXsIA4TO7X/JtXJm 0qWbCxLs9CX5uWyJawkeDl8UAaZYb8AHKv1UhJt8Z5yajM/qITMULi51g2Bgh8kx YaRHeZNj+mFQqb6IlNkmOhILN+dbTdxQREp+aJs1alGdkBGlJyfo6eK4weNOpA4x gc8EXUWZzj1GEPyWMpA/ZMzPzCbj9M6wTeXqRiTq31AMV10zcy545uYcLWks680M CYvWTmjeCvwsbuaj9cn+efa47foH2UoeXxBmXWOJDv4WxcjE1ejmlmSd8WYfwkh9 hIkMzD8tW2iZf3ssnjCeQLa7f6ydL2P4cpnK2JH+N7hN9VOASAlciezroFxtCjU+ 18T7ozgUTbOXZZomBX7OcGQ8ElXMiHB/uaCyNO64yVzApsUnQfpHzcRI5OavOYn5 dznjrzvNLCwHs3QFI4R7rsmIfPkOM0g5nY5drGwJ23+F+rVpLmpWVPR5hqT7a1HM tJVmzces6HzOu7P1Mo0IwvNbZEmNBGTHYjGtWs6e79MQxdriFT4I+DwvFOy7GUq/ Is2b+HPwWhiWJHQXLTT2 =FMxH -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU updates from Joerg Roedel: "Not much this time, but the changes include: - moving domain allocation into the iommu drivers to prepare for the introduction of default domains for devices - fixing the IO page-table code in the AMD IOMMU driver to correctly encode large page sizes - extension of the PCI support in the ARM-SMMU driver - various fixes and cleanups" * tag 'iommu-updates-v4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (34 commits) iommu/amd: Correctly encode huge pages in iommu page tables iommu/amd: Optimize amd_iommu_iova_to_phys for new fetch_pte interface iommu/amd: Optimize alloc_new_range for new fetch_pte interface iommu/amd: Optimize iommu_unmap_page for new fetch_pte interface iommu/amd: Return the pte page-size in fetch_pte iommu/amd: Add support for contiguous dma allocator iommu/amd: Don't allocate with __GFP_ZERO in alloc_coherent iommu/amd: Ignore BUS_NOTIFY_UNBOUND_DRIVER event iommu/amd: Use BUS_NOTIFY_REMOVED_DEVICE iommu/tegra: smmu: Compute PFN mask at runtime iommu/tegra: gart: Set aperture at domain initialization time iommu/tegra: Setup aperture iommu: Remove domain_init and domain_free iommu_ops iommu/fsl: Make use of domain_alloc and domain_free iommu/rockchip: Make use of domain_alloc and domain_free iommu/ipmmu-vmsa: Make use of domain_alloc and domain_free iommu/shmobile: Make use of domain_alloc and domain_free iommu/msm: Make use of domain_alloc and domain_free iommu/tegra-gart: Make use of domain_alloc and domain_free iommu/tegra-smmu: Make use of domain_alloc and domain_free ...
This commit is contained in:
commit
79319a052c
@ -33,6 +33,7 @@
|
|||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/msi.h>
|
#include <linux/msi.h>
|
||||||
|
#include <linux/dma-contiguous.h>
|
||||||
#include <asm/irq_remapping.h>
|
#include <asm/irq_remapping.h>
|
||||||
#include <asm/io_apic.h>
|
#include <asm/io_apic.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
@ -126,6 +127,11 @@ static int __init alloc_passthrough_domain(void);
|
|||||||
*
|
*
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
|
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct protection_domain, domain);
|
||||||
|
}
|
||||||
|
|
||||||
static struct iommu_dev_data *alloc_dev_data(u16 devid)
|
static struct iommu_dev_data *alloc_dev_data(u16 devid)
|
||||||
{
|
{
|
||||||
struct iommu_dev_data *dev_data;
|
struct iommu_dev_data *dev_data;
|
||||||
@ -1321,7 +1327,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
|
|||||||
* This function checks if there is a PTE for a given dma address. If
|
* This function checks if there is a PTE for a given dma address. If
|
||||||
* there is one, it returns the pointer to it.
|
* there is one, it returns the pointer to it.
|
||||||
*/
|
*/
|
||||||
static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
|
static u64 *fetch_pte(struct protection_domain *domain,
|
||||||
|
unsigned long address,
|
||||||
|
unsigned long *page_size)
|
||||||
{
|
{
|
||||||
int level;
|
int level;
|
||||||
u64 *pte;
|
u64 *pte;
|
||||||
@ -1329,8 +1337,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
|
|||||||
if (address > PM_LEVEL_SIZE(domain->mode))
|
if (address > PM_LEVEL_SIZE(domain->mode))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
level = domain->mode - 1;
|
level = domain->mode - 1;
|
||||||
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
|
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
|
||||||
|
*page_size = PTE_LEVEL_PAGE_SIZE(level);
|
||||||
|
|
||||||
while (level > 0) {
|
while (level > 0) {
|
||||||
|
|
||||||
@ -1339,19 +1348,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Large PTE */
|
/* Large PTE */
|
||||||
if (PM_PTE_LEVEL(*pte) == 0x07) {
|
if (PM_PTE_LEVEL(*pte) == 7 ||
|
||||||
unsigned long pte_mask, __pte;
|
PM_PTE_LEVEL(*pte) == 0)
|
||||||
|
break;
|
||||||
/*
|
|
||||||
* If we have a series of large PTEs, make
|
|
||||||
* sure to return a pointer to the first one.
|
|
||||||
*/
|
|
||||||
pte_mask = PTE_PAGE_SIZE(*pte);
|
|
||||||
pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
|
|
||||||
__pte = ((unsigned long)pte) & pte_mask;
|
|
||||||
|
|
||||||
return (u64 *)__pte;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* No level skipping support yet */
|
/* No level skipping support yet */
|
||||||
if (PM_PTE_LEVEL(*pte) != level)
|
if (PM_PTE_LEVEL(*pte) != level)
|
||||||
@ -1360,8 +1359,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
|
|||||||
level -= 1;
|
level -= 1;
|
||||||
|
|
||||||
/* Walk to the next level */
|
/* Walk to the next level */
|
||||||
pte = IOMMU_PTE_PAGE(*pte);
|
pte = IOMMU_PTE_PAGE(*pte);
|
||||||
pte = &pte[PM_LEVEL_INDEX(level, address)];
|
pte = &pte[PM_LEVEL_INDEX(level, address)];
|
||||||
|
*page_size = PTE_LEVEL_PAGE_SIZE(level);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (PM_PTE_LEVEL(*pte) == 0x07) {
|
||||||
|
unsigned long pte_mask;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have a series of large PTEs, make
|
||||||
|
* sure to return a pointer to the first one.
|
||||||
|
*/
|
||||||
|
*page_size = pte_mask = PTE_PAGE_SIZE(*pte);
|
||||||
|
pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
|
||||||
|
pte = (u64 *)(((unsigned long)pte) & pte_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
return pte;
|
return pte;
|
||||||
@ -1383,13 +1395,14 @@ static int iommu_map_page(struct protection_domain *dom,
|
|||||||
u64 __pte, *pte;
|
u64 __pte, *pte;
|
||||||
int i, count;
|
int i, count;
|
||||||
|
|
||||||
|
BUG_ON(!IS_ALIGNED(bus_addr, page_size));
|
||||||
|
BUG_ON(!IS_ALIGNED(phys_addr, page_size));
|
||||||
|
|
||||||
if (!(prot & IOMMU_PROT_MASK))
|
if (!(prot & IOMMU_PROT_MASK))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
bus_addr = PAGE_ALIGN(bus_addr);
|
count = PAGE_SIZE_PTE_COUNT(page_size);
|
||||||
phys_addr = PAGE_ALIGN(phys_addr);
|
pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
|
||||||
count = PAGE_SIZE_PTE_COUNT(page_size);
|
|
||||||
pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -1398,7 +1411,7 @@ static int iommu_map_page(struct protection_domain *dom,
|
|||||||
if (IOMMU_PTE_PRESENT(pte[i]))
|
if (IOMMU_PTE_PRESENT(pte[i]))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
if (page_size > PAGE_SIZE) {
|
if (count > 1) {
|
||||||
__pte = PAGE_SIZE_PTE(phys_addr, page_size);
|
__pte = PAGE_SIZE_PTE(phys_addr, page_size);
|
||||||
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
|
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
|
||||||
} else
|
} else
|
||||||
@ -1421,7 +1434,8 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
|
|||||||
unsigned long bus_addr,
|
unsigned long bus_addr,
|
||||||
unsigned long page_size)
|
unsigned long page_size)
|
||||||
{
|
{
|
||||||
unsigned long long unmap_size, unmapped;
|
unsigned long long unmapped;
|
||||||
|
unsigned long unmap_size;
|
||||||
u64 *pte;
|
u64 *pte;
|
||||||
|
|
||||||
BUG_ON(!is_power_of_2(page_size));
|
BUG_ON(!is_power_of_2(page_size));
|
||||||
@ -1430,28 +1444,12 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
|
|||||||
|
|
||||||
while (unmapped < page_size) {
|
while (unmapped < page_size) {
|
||||||
|
|
||||||
pte = fetch_pte(dom, bus_addr);
|
pte = fetch_pte(dom, bus_addr, &unmap_size);
|
||||||
|
|
||||||
if (!pte) {
|
if (pte) {
|
||||||
/*
|
int i, count;
|
||||||
* No PTE for this address
|
|
||||||
* move forward in 4kb steps
|
|
||||||
*/
|
|
||||||
unmap_size = PAGE_SIZE;
|
|
||||||
} else if (PM_PTE_LEVEL(*pte) == 0) {
|
|
||||||
/* 4kb PTE found for this address */
|
|
||||||
unmap_size = PAGE_SIZE;
|
|
||||||
*pte = 0ULL;
|
|
||||||
} else {
|
|
||||||
int count, i;
|
|
||||||
|
|
||||||
/* Large PTE found which maps this address */
|
count = PAGE_SIZE_PTE_COUNT(unmap_size);
|
||||||
unmap_size = PTE_PAGE_SIZE(*pte);
|
|
||||||
|
|
||||||
/* Only unmap from the first pte in the page */
|
|
||||||
if ((unmap_size - 1) & bus_addr)
|
|
||||||
break;
|
|
||||||
count = PAGE_SIZE_PTE_COUNT(unmap_size);
|
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
pte[i] = 0ULL;
|
pte[i] = 0ULL;
|
||||||
}
|
}
|
||||||
@ -1599,7 +1597,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
|
|||||||
{
|
{
|
||||||
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
|
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
unsigned long i, old_size;
|
unsigned long i, old_size, pte_pgsize;
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_STRESS
|
#ifdef CONFIG_IOMMU_STRESS
|
||||||
populate = false;
|
populate = false;
|
||||||
@ -1672,12 +1670,13 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
|
|||||||
*/
|
*/
|
||||||
for (i = dma_dom->aperture[index]->offset;
|
for (i = dma_dom->aperture[index]->offset;
|
||||||
i < dma_dom->aperture_size;
|
i < dma_dom->aperture_size;
|
||||||
i += PAGE_SIZE) {
|
i += pte_pgsize) {
|
||||||
u64 *pte = fetch_pte(&dma_dom->domain, i);
|
u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
|
||||||
if (!pte || !IOMMU_PTE_PRESENT(*pte))
|
if (!pte || !IOMMU_PTE_PRESENT(*pte))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
|
dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
|
||||||
|
pte_pgsize >> 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
update_domain(&dma_dom->domain);
|
update_domain(&dma_dom->domain);
|
||||||
@ -2422,16 +2421,6 @@ static int device_change_notifier(struct notifier_block *nb,
|
|||||||
dev_data = get_dev_data(dev);
|
dev_data = get_dev_data(dev);
|
||||||
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case BUS_NOTIFY_UNBOUND_DRIVER:
|
|
||||||
|
|
||||||
domain = domain_for_device(dev);
|
|
||||||
|
|
||||||
if (!domain)
|
|
||||||
goto out;
|
|
||||||
if (dev_data->passthrough)
|
|
||||||
break;
|
|
||||||
detach_device(dev);
|
|
||||||
break;
|
|
||||||
case BUS_NOTIFY_ADD_DEVICE:
|
case BUS_NOTIFY_ADD_DEVICE:
|
||||||
|
|
||||||
iommu_init_device(dev);
|
iommu_init_device(dev);
|
||||||
@ -2467,7 +2456,7 @@ static int device_change_notifier(struct notifier_block *nb,
|
|||||||
dev->archdata.dma_ops = &amd_iommu_dma_ops;
|
dev->archdata.dma_ops = &amd_iommu_dma_ops;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
case BUS_NOTIFY_DEL_DEVICE:
|
case BUS_NOTIFY_REMOVED_DEVICE:
|
||||||
|
|
||||||
iommu_uninit_device(dev);
|
iommu_uninit_device(dev);
|
||||||
|
|
||||||
@ -2923,38 +2912,42 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
|||||||
dma_addr_t *dma_addr, gfp_t flag,
|
dma_addr_t *dma_addr, gfp_t flag,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
void *virt_addr;
|
|
||||||
struct protection_domain *domain;
|
|
||||||
phys_addr_t paddr;
|
|
||||||
u64 dma_mask = dev->coherent_dma_mask;
|
u64 dma_mask = dev->coherent_dma_mask;
|
||||||
|
struct protection_domain *domain;
|
||||||
|
unsigned long flags;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
INC_STATS_COUNTER(cnt_alloc_coherent);
|
INC_STATS_COUNTER(cnt_alloc_coherent);
|
||||||
|
|
||||||
domain = get_domain(dev);
|
domain = get_domain(dev);
|
||||||
if (PTR_ERR(domain) == -EINVAL) {
|
if (PTR_ERR(domain) == -EINVAL) {
|
||||||
virt_addr = (void *)__get_free_pages(flag, get_order(size));
|
page = alloc_pages(flag, get_order(size));
|
||||||
*dma_addr = __pa(virt_addr);
|
*dma_addr = page_to_phys(page);
|
||||||
return virt_addr;
|
return page_address(page);
|
||||||
} else if (IS_ERR(domain))
|
} else if (IS_ERR(domain))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
size = PAGE_ALIGN(size);
|
||||||
dma_mask = dev->coherent_dma_mask;
|
dma_mask = dev->coherent_dma_mask;
|
||||||
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
||||||
flag |= __GFP_ZERO;
|
|
||||||
|
|
||||||
virt_addr = (void *)__get_free_pages(flag, get_order(size));
|
page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
|
||||||
if (!virt_addr)
|
if (!page) {
|
||||||
return NULL;
|
if (!(flag & __GFP_WAIT))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
paddr = virt_to_phys(virt_addr);
|
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
||||||
|
get_order(size));
|
||||||
|
if (!page)
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!dma_mask)
|
if (!dma_mask)
|
||||||
dma_mask = *dev->dma_mask;
|
dma_mask = *dev->dma_mask;
|
||||||
|
|
||||||
spin_lock_irqsave(&domain->lock, flags);
|
spin_lock_irqsave(&domain->lock, flags);
|
||||||
|
|
||||||
*dma_addr = __map_single(dev, domain->priv, paddr,
|
*dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
|
||||||
size, DMA_BIDIRECTIONAL, true, dma_mask);
|
size, DMA_BIDIRECTIONAL, true, dma_mask);
|
||||||
|
|
||||||
if (*dma_addr == DMA_ERROR_CODE) {
|
if (*dma_addr == DMA_ERROR_CODE) {
|
||||||
@ -2966,11 +2959,12 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
|||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
||||||
return virt_addr;
|
return page_address(page);
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
|
|
||||||
free_pages((unsigned long)virt_addr, get_order(size));
|
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
|
||||||
|
__free_pages(page, get_order(size));
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -2982,11 +2976,15 @@ static void free_coherent(struct device *dev, size_t size,
|
|||||||
void *virt_addr, dma_addr_t dma_addr,
|
void *virt_addr, dma_addr_t dma_addr,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct protection_domain *domain;
|
struct protection_domain *domain;
|
||||||
|
unsigned long flags;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
INC_STATS_COUNTER(cnt_free_coherent);
|
INC_STATS_COUNTER(cnt_free_coherent);
|
||||||
|
|
||||||
|
page = virt_to_page(virt_addr);
|
||||||
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
domain = get_domain(dev);
|
domain = get_domain(dev);
|
||||||
if (IS_ERR(domain))
|
if (IS_ERR(domain))
|
||||||
goto free_mem;
|
goto free_mem;
|
||||||
@ -3000,7 +2998,8 @@ static void free_coherent(struct device *dev, size_t size,
|
|||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
||||||
free_mem:
|
free_mem:
|
||||||
free_pages((unsigned long)virt_addr, get_order(size));
|
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
|
||||||
|
__free_pages(page, get_order(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3236,42 +3235,45 @@ static int __init alloc_passthrough_domain(void)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
static int amd_iommu_domain_init(struct iommu_domain *dom)
|
|
||||||
|
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
|
||||||
|
{
|
||||||
|
struct protection_domain *pdomain;
|
||||||
|
|
||||||
|
/* We only support unmanaged domains for now */
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pdomain = protection_domain_alloc();
|
||||||
|
if (!pdomain)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
pdomain->mode = PAGE_MODE_3_LEVEL;
|
||||||
|
pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
|
||||||
|
if (!pdomain->pt_root)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
pdomain->domain.geometry.aperture_start = 0;
|
||||||
|
pdomain->domain.geometry.aperture_end = ~0ULL;
|
||||||
|
pdomain->domain.geometry.force_aperture = true;
|
||||||
|
|
||||||
|
return &pdomain->domain;
|
||||||
|
|
||||||
|
out_free:
|
||||||
|
protection_domain_free(pdomain);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void amd_iommu_domain_free(struct iommu_domain *dom)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain;
|
struct protection_domain *domain;
|
||||||
|
|
||||||
domain = protection_domain_alloc();
|
if (!dom)
|
||||||
if (!domain)
|
|
||||||
goto out_free;
|
|
||||||
|
|
||||||
domain->mode = PAGE_MODE_3_LEVEL;
|
|
||||||
domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
|
|
||||||
if (!domain->pt_root)
|
|
||||||
goto out_free;
|
|
||||||
|
|
||||||
domain->iommu_domain = dom;
|
|
||||||
|
|
||||||
dom->priv = domain;
|
|
||||||
|
|
||||||
dom->geometry.aperture_start = 0;
|
|
||||||
dom->geometry.aperture_end = ~0ULL;
|
|
||||||
dom->geometry.force_aperture = true;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
out_free:
|
|
||||||
protection_domain_free(domain);
|
|
||||||
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void amd_iommu_domain_destroy(struct iommu_domain *dom)
|
|
||||||
{
|
|
||||||
struct protection_domain *domain = dom->priv;
|
|
||||||
|
|
||||||
if (!domain)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
domain = to_pdomain(dom);
|
||||||
|
|
||||||
if (domain->dev_cnt > 0)
|
if (domain->dev_cnt > 0)
|
||||||
cleanup_domain(domain);
|
cleanup_domain(domain);
|
||||||
|
|
||||||
@ -3284,8 +3286,6 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
|
|||||||
free_gcr3_table(domain);
|
free_gcr3_table(domain);
|
||||||
|
|
||||||
protection_domain_free(domain);
|
protection_domain_free(domain);
|
||||||
|
|
||||||
dom->priv = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_iommu_detach_device(struct iommu_domain *dom,
|
static void amd_iommu_detach_device(struct iommu_domain *dom,
|
||||||
@ -3313,7 +3313,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
|
|||||||
static int amd_iommu_attach_device(struct iommu_domain *dom,
|
static int amd_iommu_attach_device(struct iommu_domain *dom,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
struct iommu_dev_data *dev_data;
|
struct iommu_dev_data *dev_data;
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
int ret;
|
int ret;
|
||||||
@ -3340,7 +3340,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
|
|||||||
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t page_size, int iommu_prot)
|
phys_addr_t paddr, size_t page_size, int iommu_prot)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
int prot = 0;
|
int prot = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -3362,7 +3362,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
|||||||
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
|
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
|
||||||
size_t page_size)
|
size_t page_size)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
size_t unmap_size;
|
size_t unmap_size;
|
||||||
|
|
||||||
if (domain->mode == PAGE_MODE_NONE)
|
if (domain->mode == PAGE_MODE_NONE)
|
||||||
@ -3380,28 +3380,22 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
|
|||||||
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
unsigned long offset_mask;
|
unsigned long offset_mask, pte_pgsize;
|
||||||
phys_addr_t paddr;
|
|
||||||
u64 *pte, __pte;
|
u64 *pte, __pte;
|
||||||
|
|
||||||
if (domain->mode == PAGE_MODE_NONE)
|
if (domain->mode == PAGE_MODE_NONE)
|
||||||
return iova;
|
return iova;
|
||||||
|
|
||||||
pte = fetch_pte(domain, iova);
|
pte = fetch_pte(domain, iova, &pte_pgsize);
|
||||||
|
|
||||||
if (!pte || !IOMMU_PTE_PRESENT(*pte))
|
if (!pte || !IOMMU_PTE_PRESENT(*pte))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (PM_PTE_LEVEL(*pte) == 0)
|
offset_mask = pte_pgsize - 1;
|
||||||
offset_mask = PAGE_SIZE - 1;
|
__pte = *pte & PM_ADDR_MASK;
|
||||||
else
|
|
||||||
offset_mask = PTE_PAGE_SIZE(*pte) - 1;
|
|
||||||
|
|
||||||
__pte = *pte & PM_ADDR_MASK;
|
return (__pte & ~offset_mask) | (iova & offset_mask);
|
||||||
paddr = (__pte & ~offset_mask) | (iova & offset_mask);
|
|
||||||
|
|
||||||
return paddr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool amd_iommu_capable(enum iommu_cap cap)
|
static bool amd_iommu_capable(enum iommu_cap cap)
|
||||||
@ -3420,8 +3414,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
|
|||||||
|
|
||||||
static const struct iommu_ops amd_iommu_ops = {
|
static const struct iommu_ops amd_iommu_ops = {
|
||||||
.capable = amd_iommu_capable,
|
.capable = amd_iommu_capable,
|
||||||
.domain_init = amd_iommu_domain_init,
|
.domain_alloc = amd_iommu_domain_alloc,
|
||||||
.domain_destroy = amd_iommu_domain_destroy,
|
.domain_free = amd_iommu_domain_free,
|
||||||
.attach_dev = amd_iommu_attach_device,
|
.attach_dev = amd_iommu_attach_device,
|
||||||
.detach_dev = amd_iommu_detach_device,
|
.detach_dev = amd_iommu_detach_device,
|
||||||
.map = amd_iommu_map,
|
.map = amd_iommu_map,
|
||||||
@ -3483,7 +3477,7 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
|
|||||||
|
|
||||||
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
|
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&domain->lock, flags);
|
spin_lock_irqsave(&domain->lock, flags);
|
||||||
@ -3504,7 +3498,7 @@ EXPORT_SYMBOL(amd_iommu_domain_direct_map);
|
|||||||
|
|
||||||
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
|
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int levels, ret;
|
int levels, ret;
|
||||||
|
|
||||||
@ -3616,7 +3610,7 @@ static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
|
|||||||
int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
|
int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
|
||||||
u64 address)
|
u64 address)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -3638,7 +3632,7 @@ static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
|
|||||||
|
|
||||||
int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
|
int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -3718,7 +3712,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid)
|
|||||||
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
|
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
|
||||||
unsigned long cr3)
|
unsigned long cr3)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -3732,7 +3726,7 @@ EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
|
|||||||
|
|
||||||
int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
|
int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -3765,17 +3759,17 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
|
|||||||
|
|
||||||
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
|
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain;
|
struct protection_domain *pdomain;
|
||||||
|
|
||||||
domain = get_domain(&pdev->dev);
|
pdomain = get_domain(&pdev->dev);
|
||||||
if (IS_ERR(domain))
|
if (IS_ERR(pdomain))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Only return IOMMUv2 domains */
|
/* Only return IOMMUv2 domains */
|
||||||
if (!(domain->flags & PD_IOMMUV2_MASK))
|
if (!(pdomain->flags & PD_IOMMUV2_MASK))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return domain->iommu_domain;
|
return &pdomain->domain;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(amd_iommu_get_v2_domain);
|
EXPORT_SYMBOL(amd_iommu_get_v2_domain);
|
||||||
|
|
||||||
|
@ -282,6 +282,12 @@
|
|||||||
#define PTE_PAGE_SIZE(pte) \
|
#define PTE_PAGE_SIZE(pte) \
|
||||||
(1ULL << (1 + ffz(((pte) | 0xfffULL))))
|
(1ULL << (1 + ffz(((pte) | 0xfffULL))))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Takes a page-table level and returns the default page-size for this level
|
||||||
|
*/
|
||||||
|
#define PTE_LEVEL_PAGE_SIZE(level) \
|
||||||
|
(1ULL << (12 + (9 * (level))))
|
||||||
|
|
||||||
#define IOMMU_PTE_P (1ULL << 0)
|
#define IOMMU_PTE_P (1ULL << 0)
|
||||||
#define IOMMU_PTE_TV (1ULL << 1)
|
#define IOMMU_PTE_TV (1ULL << 1)
|
||||||
#define IOMMU_PTE_U (1ULL << 59)
|
#define IOMMU_PTE_U (1ULL << 59)
|
||||||
@ -400,6 +406,8 @@ struct iommu_domain;
|
|||||||
struct protection_domain {
|
struct protection_domain {
|
||||||
struct list_head list; /* for list of all protection domains */
|
struct list_head list; /* for list of all protection domains */
|
||||||
struct list_head dev_list; /* List of all devices in this domain */
|
struct list_head dev_list; /* List of all devices in this domain */
|
||||||
|
struct iommu_domain domain; /* generic domain handle used by
|
||||||
|
iommu core code */
|
||||||
spinlock_t lock; /* mostly used to lock the page table*/
|
spinlock_t lock; /* mostly used to lock the page table*/
|
||||||
struct mutex api_lock; /* protect page tables in the iommu-api path */
|
struct mutex api_lock; /* protect page tables in the iommu-api path */
|
||||||
u16 id; /* the domain id written to the device table */
|
u16 id; /* the domain id written to the device table */
|
||||||
@ -411,10 +419,7 @@ struct protection_domain {
|
|||||||
bool updated; /* complete domain flush required */
|
bool updated; /* complete domain flush required */
|
||||||
unsigned dev_cnt; /* devices assigned to this domain */
|
unsigned dev_cnt; /* devices assigned to this domain */
|
||||||
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
||||||
void *priv; /* private data */
|
void *priv; /* private data */
|
||||||
struct iommu_domain *iommu_domain; /* Pointer to generic
|
|
||||||
domain structure */
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -417,7 +417,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
|||||||
dev_state = pasid_state->device_state;
|
dev_state = pasid_state->device_state;
|
||||||
run_inv_ctx_cb = !pasid_state->invalid;
|
run_inv_ctx_cb = !pasid_state->invalid;
|
||||||
|
|
||||||
if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
|
if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
|
||||||
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
|
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
|
||||||
|
|
||||||
unbind_pasid(pasid_state);
|
unbind_pasid(pasid_state);
|
||||||
|
@ -343,6 +343,7 @@ struct arm_smmu_domain {
|
|||||||
struct arm_smmu_cfg cfg;
|
struct arm_smmu_cfg cfg;
|
||||||
enum arm_smmu_domain_stage stage;
|
enum arm_smmu_domain_stage stage;
|
||||||
struct mutex init_mutex; /* Protects smmu pointer */
|
struct mutex init_mutex; /* Protects smmu pointer */
|
||||||
|
struct iommu_domain domain;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct iommu_ops arm_smmu_ops;
|
static struct iommu_ops arm_smmu_ops;
|
||||||
@ -360,6 +361,11 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
|
|||||||
{ 0, NULL},
|
{ 0, NULL},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct arm_smmu_domain, domain);
|
||||||
|
}
|
||||||
|
|
||||||
static void parse_driver_options(struct arm_smmu_device *smmu)
|
static void parse_driver_options(struct arm_smmu_device *smmu)
|
||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
@ -645,7 +651,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
|||||||
u32 fsr, far, fsynr, resume;
|
u32 fsr, far, fsynr, resume;
|
||||||
unsigned long iova;
|
unsigned long iova;
|
||||||
struct iommu_domain *domain = dev;
|
struct iommu_domain *domain = dev;
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
void __iomem *cb_base;
|
void __iomem *cb_base;
|
||||||
@ -730,6 +736,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||||||
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
|
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
|
||||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||||
|
|
||||||
|
if (smmu->version > ARM_SMMU_V1) {
|
||||||
|
/*
|
||||||
|
* CBA2R.
|
||||||
|
* *Must* be initialised before CBAR thanks to VMID16
|
||||||
|
* architectural oversight affected some implementations.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
reg = CBA2R_RW64_64BIT;
|
||||||
|
#else
|
||||||
|
reg = CBA2R_RW64_32BIT;
|
||||||
|
#endif
|
||||||
|
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
|
||||||
|
}
|
||||||
|
|
||||||
/* CBAR */
|
/* CBAR */
|
||||||
reg = cfg->cbar;
|
reg = cfg->cbar;
|
||||||
if (smmu->version == ARM_SMMU_V1)
|
if (smmu->version == ARM_SMMU_V1)
|
||||||
@ -747,16 +767,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||||||
}
|
}
|
||||||
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
|
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
|
||||||
|
|
||||||
if (smmu->version > ARM_SMMU_V1) {
|
|
||||||
/* CBA2R */
|
|
||||||
#ifdef CONFIG_64BIT
|
|
||||||
reg = CBA2R_RW64_64BIT;
|
|
||||||
#else
|
|
||||||
reg = CBA2R_RW64_32BIT;
|
|
||||||
#endif
|
|
||||||
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* TTBRs */
|
/* TTBRs */
|
||||||
if (stage1) {
|
if (stage1) {
|
||||||
reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
|
reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
|
||||||
@ -836,7 +846,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||||||
struct io_pgtable_ops *pgtbl_ops;
|
struct io_pgtable_ops *pgtbl_ops;
|
||||||
struct io_pgtable_cfg pgtbl_cfg;
|
struct io_pgtable_cfg pgtbl_cfg;
|
||||||
enum io_pgtable_fmt fmt;
|
enum io_pgtable_fmt fmt;
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||||
|
|
||||||
mutex_lock(&smmu_domain->init_mutex);
|
mutex_lock(&smmu_domain->init_mutex);
|
||||||
@ -958,7 +968,7 @@ out_unlock:
|
|||||||
|
|
||||||
static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||||
void __iomem *cb_base;
|
void __iomem *cb_base;
|
||||||
@ -985,10 +995,12 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
|||||||
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
|
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain;
|
struct arm_smmu_domain *smmu_domain;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
/*
|
/*
|
||||||
* Allocate the domain and initialise some of its data structures.
|
* Allocate the domain and initialise some of its data structures.
|
||||||
* We can't really do anything meaningful until we've added a
|
* We can't really do anything meaningful until we've added a
|
||||||
@ -996,17 +1008,17 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
|
|||||||
*/
|
*/
|
||||||
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
|
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
|
||||||
if (!smmu_domain)
|
if (!smmu_domain)
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
|
|
||||||
mutex_init(&smmu_domain->init_mutex);
|
mutex_init(&smmu_domain->init_mutex);
|
||||||
spin_lock_init(&smmu_domain->pgtbl_lock);
|
spin_lock_init(&smmu_domain->pgtbl_lock);
|
||||||
domain->priv = smmu_domain;
|
|
||||||
return 0;
|
return &smmu_domain->domain;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_domain_destroy(struct iommu_domain *domain)
|
static void arm_smmu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free the domain resources. We assume that all devices have
|
* Free the domain resources. We assume that all devices have
|
||||||
@ -1143,7 +1155,7 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
|
|||||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct arm_smmu_device *smmu;
|
struct arm_smmu_device *smmu;
|
||||||
struct arm_smmu_master_cfg *cfg;
|
struct arm_smmu_master_cfg *cfg;
|
||||||
|
|
||||||
@ -1187,7 +1199,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||||||
|
|
||||||
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
|
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct arm_smmu_master_cfg *cfg;
|
struct arm_smmu_master_cfg *cfg;
|
||||||
|
|
||||||
cfg = find_smmu_master_cfg(dev);
|
cfg = find_smmu_master_cfg(dev);
|
||||||
@ -1203,7 +1215,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
|
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
|
||||||
|
|
||||||
if (!ops)
|
if (!ops)
|
||||||
@ -1220,7 +1232,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||||||
{
|
{
|
||||||
size_t ret;
|
size_t ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
|
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
|
||||||
|
|
||||||
if (!ops)
|
if (!ops)
|
||||||
@ -1235,7 +1247,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||||||
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||||
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
|
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
|
||||||
@ -1281,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
|
|||||||
{
|
{
|
||||||
phys_addr_t ret;
|
phys_addr_t ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
|
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
|
||||||
|
|
||||||
if (!ops)
|
if (!ops)
|
||||||
@ -1329,61 +1341,83 @@ static void __arm_smmu_release_pci_iommudata(void *data)
|
|||||||
kfree(data);
|
kfree(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_add_device(struct device *dev)
|
static int arm_smmu_add_pci_device(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct arm_smmu_device *smmu;
|
int i, ret;
|
||||||
struct arm_smmu_master_cfg *cfg;
|
u16 sid;
|
||||||
struct iommu_group *group;
|
struct iommu_group *group;
|
||||||
void (*releasefn)(void *) = NULL;
|
struct arm_smmu_master_cfg *cfg;
|
||||||
int ret;
|
|
||||||
|
|
||||||
smmu = find_smmu_for_device(dev);
|
group = iommu_group_get_for_dev(&pdev->dev);
|
||||||
if (!smmu)
|
if (IS_ERR(group))
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
group = iommu_group_alloc();
|
|
||||||
if (IS_ERR(group)) {
|
|
||||||
dev_err(dev, "Failed to allocate IOMMU group\n");
|
|
||||||
return PTR_ERR(group);
|
return PTR_ERR(group);
|
||||||
}
|
|
||||||
|
|
||||||
if (dev_is_pci(dev)) {
|
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
|
||||||
|
|
||||||
|
cfg = iommu_group_get_iommudata(group);
|
||||||
|
if (!cfg) {
|
||||||
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
||||||
if (!cfg) {
|
if (!cfg) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_put_group;
|
goto out_put_group;
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg->num_streamids = 1;
|
iommu_group_set_iommudata(group, cfg,
|
||||||
/*
|
__arm_smmu_release_pci_iommudata);
|
||||||
* Assume Stream ID == Requester ID for now.
|
|
||||||
* We need a way to describe the ID mappings in FDT.
|
|
||||||
*/
|
|
||||||
pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
|
|
||||||
&cfg->streamids[0]);
|
|
||||||
releasefn = __arm_smmu_release_pci_iommudata;
|
|
||||||
} else {
|
|
||||||
struct arm_smmu_master *master;
|
|
||||||
|
|
||||||
master = find_smmu_master(smmu, dev->of_node);
|
|
||||||
if (!master) {
|
|
||||||
ret = -ENODEV;
|
|
||||||
goto out_put_group;
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg = &master->cfg;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
iommu_group_set_iommudata(group, cfg, releasefn);
|
if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
|
||||||
ret = iommu_group_add_device(group, dev);
|
ret = -ENOSPC;
|
||||||
|
goto out_put_group;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Assume Stream ID == Requester ID for now.
|
||||||
|
* We need a way to describe the ID mappings in FDT.
|
||||||
|
*/
|
||||||
|
pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
|
||||||
|
for (i = 0; i < cfg->num_streamids; ++i)
|
||||||
|
if (cfg->streamids[i] == sid)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Avoid duplicate SIDs, as this can lead to SMR conflicts */
|
||||||
|
if (i == cfg->num_streamids)
|
||||||
|
cfg->streamids[cfg->num_streamids++] = sid;
|
||||||
|
|
||||||
|
return 0;
|
||||||
out_put_group:
|
out_put_group:
|
||||||
iommu_group_put(group);
|
iommu_group_put(group);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int arm_smmu_add_platform_device(struct device *dev)
|
||||||
|
{
|
||||||
|
struct iommu_group *group;
|
||||||
|
struct arm_smmu_master *master;
|
||||||
|
struct arm_smmu_device *smmu = find_smmu_for_device(dev);
|
||||||
|
|
||||||
|
if (!smmu)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
master = find_smmu_master(smmu, dev->of_node);
|
||||||
|
if (!master)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* No automatic group creation for platform devices */
|
||||||
|
group = iommu_group_alloc();
|
||||||
|
if (IS_ERR(group))
|
||||||
|
return PTR_ERR(group);
|
||||||
|
|
||||||
|
iommu_group_set_iommudata(group, &master->cfg, NULL);
|
||||||
|
return iommu_group_add_device(group, dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arm_smmu_add_device(struct device *dev)
|
||||||
|
{
|
||||||
|
if (dev_is_pci(dev))
|
||||||
|
return arm_smmu_add_pci_device(to_pci_dev(dev));
|
||||||
|
|
||||||
|
return arm_smmu_add_platform_device(dev);
|
||||||
|
}
|
||||||
|
|
||||||
static void arm_smmu_remove_device(struct device *dev)
|
static void arm_smmu_remove_device(struct device *dev)
|
||||||
{
|
{
|
||||||
iommu_group_remove_device(dev);
|
iommu_group_remove_device(dev);
|
||||||
@ -1392,7 +1426,7 @@ static void arm_smmu_remove_device(struct device *dev)
|
|||||||
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
||||||
enum iommu_attr attr, void *data)
|
enum iommu_attr attr, void *data)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
|
|
||||||
switch (attr) {
|
switch (attr) {
|
||||||
case DOMAIN_ATTR_NESTING:
|
case DOMAIN_ATTR_NESTING:
|
||||||
@ -1407,7 +1441,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
|||||||
enum iommu_attr attr, void *data)
|
enum iommu_attr attr, void *data)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
|
|
||||||
mutex_lock(&smmu_domain->init_mutex);
|
mutex_lock(&smmu_domain->init_mutex);
|
||||||
|
|
||||||
@ -1435,8 +1469,8 @@ out_unlock:
|
|||||||
|
|
||||||
static struct iommu_ops arm_smmu_ops = {
|
static struct iommu_ops arm_smmu_ops = {
|
||||||
.capable = arm_smmu_capable,
|
.capable = arm_smmu_capable,
|
||||||
.domain_init = arm_smmu_domain_init,
|
.domain_alloc = arm_smmu_domain_alloc,
|
||||||
.domain_destroy = arm_smmu_domain_destroy,
|
.domain_free = arm_smmu_domain_free,
|
||||||
.attach_dev = arm_smmu_attach_dev,
|
.attach_dev = arm_smmu_attach_dev,
|
||||||
.detach_dev = arm_smmu_detach_dev,
|
.detach_dev = arm_smmu_detach_dev,
|
||||||
.map = arm_smmu_map,
|
.map = arm_smmu_map,
|
||||||
@ -1633,6 +1667,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||||||
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
|
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
|
||||||
smmu->pa_size = size;
|
smmu->pa_size = size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* What the page table walker can address actually depends on which
|
||||||
|
* descriptor format is in use, but since a) we don't know that yet,
|
||||||
|
* and b) it can vary per context bank, this will have to do...
|
||||||
|
*/
|
||||||
|
if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
|
||||||
|
dev_warn(smmu->dev,
|
||||||
|
"failed to set DMA mask for table walker\n");
|
||||||
|
|
||||||
if (smmu->version == ARM_SMMU_V1) {
|
if (smmu->version == ARM_SMMU_V1) {
|
||||||
smmu->va_size = smmu->ipa_size;
|
smmu->va_size = smmu->ipa_size;
|
||||||
size = SZ_4K | SZ_2M | SZ_1G;
|
size = SZ_4K | SZ_2M | SZ_1G;
|
||||||
|
@ -200,6 +200,7 @@ struct exynos_iommu_domain {
|
|||||||
short *lv2entcnt; /* free lv2 entry counter for each section */
|
short *lv2entcnt; /* free lv2 entry counter for each section */
|
||||||
spinlock_t lock; /* lock for this structure */
|
spinlock_t lock; /* lock for this structure */
|
||||||
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
|
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
|
||||||
|
struct iommu_domain domain; /* generic domain data structure */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct sysmmu_drvdata {
|
struct sysmmu_drvdata {
|
||||||
@ -214,6 +215,11 @@ struct sysmmu_drvdata {
|
|||||||
phys_addr_t pgtable;
|
phys_addr_t pgtable;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct exynos_iommu_domain, domain);
|
||||||
|
}
|
||||||
|
|
||||||
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
|
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
|
||||||
{
|
{
|
||||||
/* return true if the System MMU was not active previously
|
/* return true if the System MMU was not active previously
|
||||||
@ -696,58 +702,60 @@ static inline void pgtable_flush(void *vastart, void *vaend)
|
|||||||
virt_to_phys(vaend));
|
virt_to_phys(vaend));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos_iommu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct exynos_iommu_domain *priv;
|
struct exynos_iommu_domain *exynos_domain;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
if (!priv)
|
return NULL;
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
|
exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
|
||||||
if (!priv->pgtable)
|
if (!exynos_domain)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
|
||||||
|
if (!exynos_domain->pgtable)
|
||||||
goto err_pgtable;
|
goto err_pgtable;
|
||||||
|
|
||||||
priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
|
exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
|
||||||
if (!priv->lv2entcnt)
|
if (!exynos_domain->lv2entcnt)
|
||||||
goto err_counter;
|
goto err_counter;
|
||||||
|
|
||||||
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
|
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
|
||||||
for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
|
for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
|
||||||
priv->pgtable[i + 0] = ZERO_LV2LINK;
|
exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
|
||||||
priv->pgtable[i + 1] = ZERO_LV2LINK;
|
exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
|
||||||
priv->pgtable[i + 2] = ZERO_LV2LINK;
|
exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
|
||||||
priv->pgtable[i + 3] = ZERO_LV2LINK;
|
exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
|
||||||
priv->pgtable[i + 4] = ZERO_LV2LINK;
|
exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
|
||||||
priv->pgtable[i + 5] = ZERO_LV2LINK;
|
exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
|
||||||
priv->pgtable[i + 6] = ZERO_LV2LINK;
|
exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
|
||||||
priv->pgtable[i + 7] = ZERO_LV2LINK;
|
exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
|
pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
|
||||||
|
|
||||||
spin_lock_init(&priv->lock);
|
spin_lock_init(&exynos_domain->lock);
|
||||||
spin_lock_init(&priv->pgtablelock);
|
spin_lock_init(&exynos_domain->pgtablelock);
|
||||||
INIT_LIST_HEAD(&priv->clients);
|
INIT_LIST_HEAD(&exynos_domain->clients);
|
||||||
|
|
||||||
domain->geometry.aperture_start = 0;
|
exynos_domain->domain.geometry.aperture_start = 0;
|
||||||
domain->geometry.aperture_end = ~0UL;
|
exynos_domain->domain.geometry.aperture_end = ~0UL;
|
||||||
domain->geometry.force_aperture = true;
|
exynos_domain->domain.geometry.force_aperture = true;
|
||||||
|
|
||||||
domain->priv = priv;
|
return &exynos_domain->domain;
|
||||||
return 0;
|
|
||||||
|
|
||||||
err_counter:
|
err_counter:
|
||||||
free_pages((unsigned long)priv->pgtable, 2);
|
free_pages((unsigned long)exynos_domain->pgtable, 2);
|
||||||
err_pgtable:
|
err_pgtable:
|
||||||
kfree(priv);
|
kfree(exynos_domain);
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
|
static void exynos_iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct exynos_iommu_domain *priv = domain->priv;
|
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
|
||||||
struct exynos_iommu_owner *owner;
|
struct exynos_iommu_owner *owner;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
@ -773,15 +781,14 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
|
|||||||
|
|
||||||
free_pages((unsigned long)priv->pgtable, 2);
|
free_pages((unsigned long)priv->pgtable, 2);
|
||||||
free_pages((unsigned long)priv->lv2entcnt, 1);
|
free_pages((unsigned long)priv->lv2entcnt, 1);
|
||||||
kfree(domain->priv);
|
kfree(priv);
|
||||||
domain->priv = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos_iommu_attach_device(struct iommu_domain *domain,
|
static int exynos_iommu_attach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct exynos_iommu_owner *owner = dev->archdata.iommu;
|
struct exynos_iommu_owner *owner = dev->archdata.iommu;
|
||||||
struct exynos_iommu_domain *priv = domain->priv;
|
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
|
||||||
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
|
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
@ -812,7 +819,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
|
|||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct exynos_iommu_owner *owner;
|
struct exynos_iommu_owner *owner;
|
||||||
struct exynos_iommu_domain *priv = domain->priv;
|
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
|
||||||
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
|
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -988,7 +995,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
|
|||||||
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
|
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
phys_addr_t paddr, size_t size, int prot)
|
||||||
{
|
{
|
||||||
struct exynos_iommu_domain *priv = domain->priv;
|
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
|
||||||
sysmmu_pte_t *entry;
|
sysmmu_pte_t *entry;
|
||||||
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
|
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -1042,7 +1049,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
|
|||||||
static size_t exynos_iommu_unmap(struct iommu_domain *domain,
|
static size_t exynos_iommu_unmap(struct iommu_domain *domain,
|
||||||
unsigned long l_iova, size_t size)
|
unsigned long l_iova, size_t size)
|
||||||
{
|
{
|
||||||
struct exynos_iommu_domain *priv = domain->priv;
|
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
|
||||||
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
|
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
|
||||||
sysmmu_pte_t *ent;
|
sysmmu_pte_t *ent;
|
||||||
size_t err_pgsize;
|
size_t err_pgsize;
|
||||||
@ -1119,7 +1126,7 @@ err:
|
|||||||
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct exynos_iommu_domain *priv = domain->priv;
|
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
|
||||||
sysmmu_pte_t *entry;
|
sysmmu_pte_t *entry;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
phys_addr_t phys = 0;
|
phys_addr_t phys = 0;
|
||||||
@ -1171,8 +1178,8 @@ static void exynos_iommu_remove_device(struct device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_ops exynos_iommu_ops = {
|
static const struct iommu_ops exynos_iommu_ops = {
|
||||||
.domain_init = exynos_iommu_domain_init,
|
.domain_alloc = exynos_iommu_domain_alloc,
|
||||||
.domain_destroy = exynos_iommu_domain_destroy,
|
.domain_free = exynos_iommu_domain_free,
|
||||||
.attach_dev = exynos_iommu_attach_device,
|
.attach_dev = exynos_iommu_attach_device,
|
||||||
.detach_dev = exynos_iommu_detach_device,
|
.detach_dev = exynos_iommu_detach_device,
|
||||||
.map = exynos_iommu_map,
|
.map = exynos_iommu_map,
|
||||||
|
@ -33,6 +33,11 @@ static struct kmem_cache *fsl_pamu_domain_cache;
|
|||||||
static struct kmem_cache *iommu_devinfo_cache;
|
static struct kmem_cache *iommu_devinfo_cache;
|
||||||
static DEFINE_SPINLOCK(device_domain_lock);
|
static DEFINE_SPINLOCK(device_domain_lock);
|
||||||
|
|
||||||
|
static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct fsl_dma_domain, iommu_domain);
|
||||||
|
}
|
||||||
|
|
||||||
static int __init iommu_init_mempool(void)
|
static int __init iommu_init_mempool(void)
|
||||||
{
|
{
|
||||||
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
|
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
|
||||||
@ -65,7 +70,7 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
|
|||||||
struct dma_window *win_ptr = &dma_domain->win_arr[0];
|
struct dma_window *win_ptr = &dma_domain->win_arr[0];
|
||||||
struct iommu_domain_geometry *geom;
|
struct iommu_domain_geometry *geom;
|
||||||
|
|
||||||
geom = &dma_domain->iommu_domain->geometry;
|
geom = &dma_domain->iommu_domain.geometry;
|
||||||
|
|
||||||
if (!win_cnt || !dma_domain->geom_size) {
|
if (!win_cnt || !dma_domain->geom_size) {
|
||||||
pr_debug("Number of windows/geometry not configured for the domain\n");
|
pr_debug("Number of windows/geometry not configured for the domain\n");
|
||||||
@ -123,7 +128,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct dma_window *wnd = &dma_domain->win_arr[0];
|
struct dma_window *wnd = &dma_domain->win_arr[0];
|
||||||
phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
|
phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu_lock, flags);
|
spin_lock_irqsave(&iommu_lock, flags);
|
||||||
@ -172,7 +177,7 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
|
|||||||
} else {
|
} else {
|
||||||
phys_addr_t wnd_addr;
|
phys_addr_t wnd_addr;
|
||||||
|
|
||||||
wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
|
wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
|
||||||
|
|
||||||
ret = pamu_config_ppaace(liodn, wnd_addr,
|
ret = pamu_config_ppaace(liodn, wnd_addr,
|
||||||
wnd->size,
|
wnd->size,
|
||||||
@ -384,7 +389,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
|
|||||||
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
|
|
||||||
if (iova < domain->geometry.aperture_start ||
|
if (iova < domain->geometry.aperture_start ||
|
||||||
iova > domain->geometry.aperture_end)
|
iova > domain->geometry.aperture_end)
|
||||||
@ -398,11 +403,9 @@ static bool fsl_pamu_capable(enum iommu_cap cap)
|
|||||||
return cap == IOMMU_CAP_CACHE_COHERENCY;
|
return cap == IOMMU_CAP_CACHE_COHERENCY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
|
static void fsl_pamu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
|
|
||||||
domain->priv = NULL;
|
|
||||||
|
|
||||||
/* remove all the devices from the device list */
|
/* remove all the devices from the device list */
|
||||||
detach_device(NULL, dma_domain);
|
detach_device(NULL, dma_domain);
|
||||||
@ -413,23 +416,24 @@ static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
|
|||||||
kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
|
kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fsl_pamu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain;
|
struct fsl_dma_domain *dma_domain;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
dma_domain = iommu_alloc_dma_domain();
|
dma_domain = iommu_alloc_dma_domain();
|
||||||
if (!dma_domain) {
|
if (!dma_domain) {
|
||||||
pr_debug("dma_domain allocation failed\n");
|
pr_debug("dma_domain allocation failed\n");
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
domain->priv = dma_domain;
|
|
||||||
dma_domain->iommu_domain = domain;
|
|
||||||
/* defaul geometry 64 GB i.e. maximum system address */
|
/* defaul geometry 64 GB i.e. maximum system address */
|
||||||
domain->geometry.aperture_start = 0;
|
dma_domain->iommu_domain. geometry.aperture_start = 0;
|
||||||
domain->geometry.aperture_end = (1ULL << 36) - 1;
|
dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
|
||||||
domain->geometry.force_aperture = true;
|
dma_domain->iommu_domain.geometry.force_aperture = true;
|
||||||
|
|
||||||
return 0;
|
return &dma_domain->iommu_domain;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Configure geometry settings for all LIODNs associated with domain */
|
/* Configure geometry settings for all LIODNs associated with domain */
|
||||||
@ -499,7 +503,7 @@ static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
|
|||||||
|
|
||||||
static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
|
static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -530,7 +534,7 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
|
|||||||
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||||
phys_addr_t paddr, u64 size, int prot)
|
phys_addr_t paddr, u64 size, int prot)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
struct dma_window *wnd;
|
struct dma_window *wnd;
|
||||||
int pamu_prot = 0;
|
int pamu_prot = 0;
|
||||||
int ret;
|
int ret;
|
||||||
@ -607,7 +611,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
|
|||||||
int num)
|
int num)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct iommu_domain *domain = dma_domain->iommu_domain;
|
struct iommu_domain *domain = &dma_domain->iommu_domain;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -653,7 +657,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
|
|||||||
static int fsl_pamu_attach_device(struct iommu_domain *domain,
|
static int fsl_pamu_attach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
const u32 *liodn;
|
const u32 *liodn;
|
||||||
u32 liodn_cnt;
|
u32 liodn_cnt;
|
||||||
int len, ret = 0;
|
int len, ret = 0;
|
||||||
@ -691,7 +695,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
|
|||||||
static void fsl_pamu_detach_device(struct iommu_domain *domain,
|
static void fsl_pamu_detach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
const u32 *prop;
|
const u32 *prop;
|
||||||
int len;
|
int len;
|
||||||
struct pci_dev *pdev = NULL;
|
struct pci_dev *pdev = NULL;
|
||||||
@ -723,7 +727,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
|
|||||||
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
|
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
|
||||||
{
|
{
|
||||||
struct iommu_domain_geometry *geom_attr = data;
|
struct iommu_domain_geometry *geom_attr = data;
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
dma_addr_t geom_size;
|
dma_addr_t geom_size;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -813,7 +817,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
|
|||||||
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
|
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
|
||||||
enum iommu_attr attr_type, void *data)
|
enum iommu_attr attr_type, void *data)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
switch (attr_type) {
|
switch (attr_type) {
|
||||||
@ -838,7 +842,7 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
|
|||||||
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
|
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
|
||||||
enum iommu_attr attr_type, void *data)
|
enum iommu_attr attr_type, void *data)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
switch (attr_type) {
|
switch (attr_type) {
|
||||||
@ -999,7 +1003,7 @@ static void fsl_pamu_remove_device(struct device *dev)
|
|||||||
|
|
||||||
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
|
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1048,15 +1052,15 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
|
|||||||
|
|
||||||
static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
|
static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct fsl_dma_domain *dma_domain = domain->priv;
|
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||||
|
|
||||||
return dma_domain->win_cnt;
|
return dma_domain->win_cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_ops fsl_pamu_ops = {
|
static const struct iommu_ops fsl_pamu_ops = {
|
||||||
.capable = fsl_pamu_capable,
|
.capable = fsl_pamu_capable,
|
||||||
.domain_init = fsl_pamu_domain_init,
|
.domain_alloc = fsl_pamu_domain_alloc,
|
||||||
.domain_destroy = fsl_pamu_domain_destroy,
|
.domain_free = fsl_pamu_domain_free,
|
||||||
.attach_dev = fsl_pamu_attach_device,
|
.attach_dev = fsl_pamu_attach_device,
|
||||||
.detach_dev = fsl_pamu_detach_device,
|
.detach_dev = fsl_pamu_detach_device,
|
||||||
.domain_window_enable = fsl_pamu_window_enable,
|
.domain_window_enable = fsl_pamu_window_enable,
|
||||||
|
@ -71,7 +71,7 @@ struct fsl_dma_domain {
|
|||||||
u32 stash_id;
|
u32 stash_id;
|
||||||
struct pamu_stash_attribute dma_stash;
|
struct pamu_stash_attribute dma_stash;
|
||||||
u32 snoop_id;
|
u32 snoop_id;
|
||||||
struct iommu_domain *iommu_domain;
|
struct iommu_domain iommu_domain;
|
||||||
spinlock_t domain_lock;
|
spinlock_t domain_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -339,7 +339,7 @@ struct dmar_domain {
|
|||||||
DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
|
DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
|
||||||
/* bitmap of iommus this domain uses*/
|
/* bitmap of iommus this domain uses*/
|
||||||
|
|
||||||
struct list_head devices; /* all devices' list */
|
struct list_head devices; /* all devices' list */
|
||||||
struct iova_domain iovad; /* iova's that belong to this domain */
|
struct iova_domain iovad; /* iova's that belong to this domain */
|
||||||
|
|
||||||
struct dma_pte *pgd; /* virtual address */
|
struct dma_pte *pgd; /* virtual address */
|
||||||
@ -358,6 +358,9 @@ struct dmar_domain {
|
|||||||
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
|
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
|
||||||
spinlock_t iommu_lock; /* protect iommu set in domain */
|
spinlock_t iommu_lock; /* protect iommu set in domain */
|
||||||
u64 max_addr; /* maximum mapped address */
|
u64 max_addr; /* maximum mapped address */
|
||||||
|
|
||||||
|
struct iommu_domain domain; /* generic domain data structure for
|
||||||
|
iommu core */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* PCI domain-device relationship */
|
/* PCI domain-device relationship */
|
||||||
@ -449,6 +452,12 @@ static LIST_HEAD(device_domain_list);
|
|||||||
|
|
||||||
static const struct iommu_ops intel_iommu_ops;
|
static const struct iommu_ops intel_iommu_ops;
|
||||||
|
|
||||||
|
/* Convert generic 'struct iommu_domain to private struct dmar_domain */
|
||||||
|
static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct dmar_domain, domain);
|
||||||
|
}
|
||||||
|
|
||||||
static int __init intel_iommu_setup(char *str)
|
static int __init intel_iommu_setup(char *str)
|
||||||
{
|
{
|
||||||
if (!str)
|
if (!str)
|
||||||
@ -595,12 +604,13 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
|
|||||||
{
|
{
|
||||||
struct dmar_drhd_unit *drhd;
|
struct dmar_drhd_unit *drhd;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
int i, found = 0;
|
bool found = false;
|
||||||
|
int i;
|
||||||
|
|
||||||
domain->iommu_coherency = 1;
|
domain->iommu_coherency = 1;
|
||||||
|
|
||||||
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
|
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
|
||||||
found = 1;
|
found = true;
|
||||||
if (!ecap_coherent(g_iommus[i]->ecap)) {
|
if (!ecap_coherent(g_iommus[i]->ecap)) {
|
||||||
domain->iommu_coherency = 0;
|
domain->iommu_coherency = 0;
|
||||||
break;
|
break;
|
||||||
@ -1267,7 +1277,7 @@ static struct device_domain_info *
|
|||||||
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
|
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
|
||||||
u8 bus, u8 devfn)
|
u8 bus, u8 devfn)
|
||||||
{
|
{
|
||||||
int found = 0;
|
bool found = false;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct device_domain_info *info;
|
struct device_domain_info *info;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
@ -1282,7 +1292,7 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
|
|||||||
list_for_each_entry(info, &domain->devices, link)
|
list_for_each_entry(info, &domain->devices, link)
|
||||||
if (info->iommu == iommu && info->bus == bus &&
|
if (info->iommu == iommu && info->bus == bus &&
|
||||||
info->devfn == devfn) {
|
info->devfn == devfn) {
|
||||||
found = 1;
|
found = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
@ -4269,7 +4279,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
|
|||||||
struct device_domain_info *info, *tmp;
|
struct device_domain_info *info, *tmp;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int found = 0;
|
bool found = false;
|
||||||
u8 bus, devfn;
|
u8 bus, devfn;
|
||||||
|
|
||||||
iommu = device_to_iommu(dev, &bus, &devfn);
|
iommu = device_to_iommu(dev, &bus, &devfn);
|
||||||
@ -4301,7 +4311,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
|
|||||||
* update iommu count and coherency
|
* update iommu count and coherency
|
||||||
*/
|
*/
|
||||||
if (info->iommu == iommu)
|
if (info->iommu == iommu)
|
||||||
found = 1;
|
found = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
@ -4339,44 +4349,45 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_iommu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain;
|
struct dmar_domain *dmar_domain;
|
||||||
|
struct iommu_domain *domain;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
|
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
|
||||||
if (!dmar_domain) {
|
if (!dmar_domain) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"intel_iommu_domain_init: dmar_domain == NULL\n");
|
"intel_iommu_domain_init: dmar_domain == NULL\n");
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
|
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"intel_iommu_domain_init() failed\n");
|
"intel_iommu_domain_init() failed\n");
|
||||||
domain_exit(dmar_domain);
|
domain_exit(dmar_domain);
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
domain_update_iommu_cap(dmar_domain);
|
domain_update_iommu_cap(dmar_domain);
|
||||||
domain->priv = dmar_domain;
|
|
||||||
|
|
||||||
|
domain = &dmar_domain->domain;
|
||||||
domain->geometry.aperture_start = 0;
|
domain->geometry.aperture_start = 0;
|
||||||
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
|
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
|
||||||
domain->geometry.force_aperture = true;
|
domain->geometry.force_aperture = true;
|
||||||
|
|
||||||
return 0;
|
return domain;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_iommu_domain_destroy(struct iommu_domain *domain)
|
static void intel_iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = domain->priv;
|
domain_exit(to_dmar_domain(domain));
|
||||||
|
|
||||||
domain->priv = NULL;
|
|
||||||
domain_exit(dmar_domain);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_iommu_attach_device(struct iommu_domain *domain,
|
static int intel_iommu_attach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = domain->priv;
|
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
int addr_width;
|
int addr_width;
|
||||||
u8 bus, devfn;
|
u8 bus, devfn;
|
||||||
@ -4441,16 +4452,14 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
|
|||||||
static void intel_iommu_detach_device(struct iommu_domain *domain,
|
static void intel_iommu_detach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = domain->priv;
|
domain_remove_one_dev_info(to_dmar_domain(domain), dev);
|
||||||
|
|
||||||
domain_remove_one_dev_info(dmar_domain, dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_iommu_map(struct iommu_domain *domain,
|
static int intel_iommu_map(struct iommu_domain *domain,
|
||||||
unsigned long iova, phys_addr_t hpa,
|
unsigned long iova, phys_addr_t hpa,
|
||||||
size_t size, int iommu_prot)
|
size_t size, int iommu_prot)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = domain->priv;
|
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||||
u64 max_addr;
|
u64 max_addr;
|
||||||
int prot = 0;
|
int prot = 0;
|
||||||
int ret;
|
int ret;
|
||||||
@ -4487,7 +4496,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
|
|||||||
static size_t intel_iommu_unmap(struct iommu_domain *domain,
|
static size_t intel_iommu_unmap(struct iommu_domain *domain,
|
||||||
unsigned long iova, size_t size)
|
unsigned long iova, size_t size)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = domain->priv;
|
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||||
struct page *freelist = NULL;
|
struct page *freelist = NULL;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
unsigned long start_pfn, last_pfn;
|
unsigned long start_pfn, last_pfn;
|
||||||
@ -4535,7 +4544,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
|
|||||||
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = domain->priv;
|
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||||
struct dma_pte *pte;
|
struct dma_pte *pte;
|
||||||
int level = 0;
|
int level = 0;
|
||||||
u64 phys = 0;
|
u64 phys = 0;
|
||||||
@ -4594,8 +4603,8 @@ static void intel_iommu_remove_device(struct device *dev)
|
|||||||
|
|
||||||
static const struct iommu_ops intel_iommu_ops = {
|
static const struct iommu_ops intel_iommu_ops = {
|
||||||
.capable = intel_iommu_capable,
|
.capable = intel_iommu_capable,
|
||||||
.domain_init = intel_iommu_domain_init,
|
.domain_alloc = intel_iommu_domain_alloc,
|
||||||
.domain_destroy = intel_iommu_domain_destroy,
|
.domain_free = intel_iommu_domain_free,
|
||||||
.attach_dev = intel_iommu_attach_device,
|
.attach_dev = intel_iommu_attach_device,
|
||||||
.detach_dev = intel_iommu_detach_device,
|
.detach_dev = intel_iommu_detach_device,
|
||||||
.map = intel_iommu_map,
|
.map = intel_iommu_map,
|
||||||
|
@ -631,7 +631,7 @@ static int __init intel_enable_irq_remapping(void)
|
|||||||
{
|
{
|
||||||
struct dmar_drhd_unit *drhd;
|
struct dmar_drhd_unit *drhd;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
int setup = 0;
|
bool setup = false;
|
||||||
int eim = 0;
|
int eim = 0;
|
||||||
|
|
||||||
if (x2apic_supported()) {
|
if (x2apic_supported()) {
|
||||||
@ -697,7 +697,7 @@ static int __init intel_enable_irq_remapping(void)
|
|||||||
*/
|
*/
|
||||||
for_each_iommu(iommu, drhd) {
|
for_each_iommu(iommu, drhd) {
|
||||||
iommu_set_irq_remapping(iommu, eim);
|
iommu_set_irq_remapping(iommu, eim);
|
||||||
setup = 1;
|
setup = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!setup)
|
if (!setup)
|
||||||
@ -856,7 +856,7 @@ static int __init parse_ioapics_under_ir(void)
|
|||||||
{
|
{
|
||||||
struct dmar_drhd_unit *drhd;
|
struct dmar_drhd_unit *drhd;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
int ir_supported = 0;
|
bool ir_supported = false;
|
||||||
int ioapic_idx;
|
int ioapic_idx;
|
||||||
|
|
||||||
for_each_iommu(iommu, drhd)
|
for_each_iommu(iommu, drhd)
|
||||||
@ -864,7 +864,7 @@ static int __init parse_ioapics_under_ir(void)
|
|||||||
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
|
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
ir_supported = 1;
|
ir_supported = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ir_supported)
|
if (!ir_supported)
|
||||||
@ -917,7 +917,7 @@ static void disable_irq_remapping(void)
|
|||||||
static int reenable_irq_remapping(int eim)
|
static int reenable_irq_remapping(int eim)
|
||||||
{
|
{
|
||||||
struct dmar_drhd_unit *drhd;
|
struct dmar_drhd_unit *drhd;
|
||||||
int setup = 0;
|
bool setup = false;
|
||||||
struct intel_iommu *iommu = NULL;
|
struct intel_iommu *iommu = NULL;
|
||||||
|
|
||||||
for_each_iommu(iommu, drhd)
|
for_each_iommu(iommu, drhd)
|
||||||
@ -933,7 +933,7 @@ static int reenable_irq_remapping(int eim)
|
|||||||
|
|
||||||
/* Set up interrupt remapping for iommu.*/
|
/* Set up interrupt remapping for iommu.*/
|
||||||
iommu_set_irq_remapping(iommu, eim);
|
iommu_set_irq_remapping(iommu, eim);
|
||||||
setup = 1;
|
setup = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!setup)
|
if (!setup)
|
||||||
|
@ -116,6 +116,8 @@
|
|||||||
#define ARM_32_LPAE_TCR_EAE (1 << 31)
|
#define ARM_32_LPAE_TCR_EAE (1 << 31)
|
||||||
#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
|
#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
|
||||||
|
|
||||||
|
#define ARM_LPAE_TCR_EPD1 (1 << 23)
|
||||||
|
|
||||||
#define ARM_LPAE_TCR_TG0_4K (0 << 14)
|
#define ARM_LPAE_TCR_TG0_4K (0 << 14)
|
||||||
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
|
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
|
||||||
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
|
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
|
||||||
@ -621,6 +623,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
|||||||
}
|
}
|
||||||
|
|
||||||
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
|
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
|
||||||
|
|
||||||
|
/* Disable speculative walks through TTBR1 */
|
||||||
|
reg |= ARM_LPAE_TCR_EPD1;
|
||||||
cfg->arm_lpae_s1_cfg.tcr = reg;
|
cfg->arm_lpae_s1_cfg.tcr = reg;
|
||||||
|
|
||||||
/* MAIRs */
|
/* MAIRs */
|
||||||
|
@ -901,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
|
|||||||
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
|
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain;
|
struct iommu_domain *domain;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (bus == NULL || bus->iommu_ops == NULL)
|
if (bus == NULL || bus->iommu_ops == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
domain->ops = bus->iommu_ops;
|
domain->ops = bus->iommu_ops;
|
||||||
|
domain->type = IOMMU_DOMAIN_UNMANAGED;
|
||||||
ret = domain->ops->domain_init(domain);
|
|
||||||
if (ret)
|
|
||||||
goto out_free;
|
|
||||||
|
|
||||||
return domain;
|
return domain;
|
||||||
|
|
||||||
out_free:
|
|
||||||
kfree(domain);
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
|
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
|
||||||
|
|
||||||
void iommu_domain_free(struct iommu_domain *domain)
|
void iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
if (likely(domain->ops->domain_destroy != NULL))
|
domain->ops->domain_free(domain);
|
||||||
domain->ops->domain_destroy(domain);
|
|
||||||
|
|
||||||
kfree(domain);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_domain_free);
|
EXPORT_SYMBOL_GPL(iommu_domain_free);
|
||||||
|
|
||||||
@ -1049,6 +1037,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
domain->ops->pgsize_bitmap == 0UL))
|
domain->ops->pgsize_bitmap == 0UL))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* find out the minimum page size supported */
|
/* find out the minimum page size supported */
|
||||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||||
|
|
||||||
@ -1100,6 +1091,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
|||||||
domain->ops->pgsize_bitmap == 0UL))
|
domain->ops->pgsize_bitmap == 0UL))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* find out the minimum page size supported */
|
/* find out the minimum page size supported */
|
||||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ struct ipmmu_vmsa_device {
|
|||||||
|
|
||||||
struct ipmmu_vmsa_domain {
|
struct ipmmu_vmsa_domain {
|
||||||
struct ipmmu_vmsa_device *mmu;
|
struct ipmmu_vmsa_device *mmu;
|
||||||
struct iommu_domain *io_domain;
|
struct iommu_domain io_domain;
|
||||||
|
|
||||||
struct io_pgtable_cfg cfg;
|
struct io_pgtable_cfg cfg;
|
||||||
struct io_pgtable_ops *iop;
|
struct io_pgtable_ops *iop;
|
||||||
@ -56,6 +56,11 @@ struct ipmmu_vmsa_archdata {
|
|||||||
static DEFINE_SPINLOCK(ipmmu_devices_lock);
|
static DEFINE_SPINLOCK(ipmmu_devices_lock);
|
||||||
static LIST_HEAD(ipmmu_devices);
|
static LIST_HEAD(ipmmu_devices);
|
||||||
|
|
||||||
|
static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
|
||||||
|
}
|
||||||
|
|
||||||
#define TLB_LOOP_TIMEOUT 100 /* 100us */
|
#define TLB_LOOP_TIMEOUT 100 /* 100us */
|
||||||
|
|
||||||
/* -----------------------------------------------------------------------------
|
/* -----------------------------------------------------------------------------
|
||||||
@ -428,7 +433,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
|
|||||||
* TODO: We need to look up the faulty device based on the I/O VA. Use
|
* TODO: We need to look up the faulty device based on the I/O VA. Use
|
||||||
* the IOMMU device for now.
|
* the IOMMU device for now.
|
||||||
*/
|
*/
|
||||||
if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0))
|
if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
||||||
dev_err_ratelimited(mmu->dev,
|
dev_err_ratelimited(mmu->dev,
|
||||||
@ -448,7 +453,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
|
|||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
|
||||||
io_domain = mmu->mapping->domain;
|
io_domain = mmu->mapping->domain;
|
||||||
domain = io_domain->priv;
|
domain = to_vmsa_domain(io_domain);
|
||||||
|
|
||||||
return ipmmu_domain_irq(domain);
|
return ipmmu_domain_irq(domain);
|
||||||
}
|
}
|
||||||
@ -457,25 +462,25 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
|
|||||||
* IOMMU Operations
|
* IOMMU Operations
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int ipmmu_domain_init(struct iommu_domain *io_domain)
|
static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct ipmmu_vmsa_domain *domain;
|
struct ipmmu_vmsa_domain *domain;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
|
|
||||||
spin_lock_init(&domain->lock);
|
spin_lock_init(&domain->lock);
|
||||||
|
|
||||||
io_domain->priv = domain;
|
return &domain->io_domain;
|
||||||
domain->io_domain = io_domain;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
|
static void ipmmu_domain_free(struct iommu_domain *io_domain)
|
||||||
{
|
{
|
||||||
struct ipmmu_vmsa_domain *domain = io_domain->priv;
|
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free the domain resources. We assume that all devices have already
|
* Free the domain resources. We assume that all devices have already
|
||||||
@ -491,7 +496,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
|||||||
{
|
{
|
||||||
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
|
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
|
||||||
struct ipmmu_vmsa_device *mmu = archdata->mmu;
|
struct ipmmu_vmsa_device *mmu = archdata->mmu;
|
||||||
struct ipmmu_vmsa_domain *domain = io_domain->priv;
|
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -532,7 +537,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
|
|||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
|
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
|
||||||
struct ipmmu_vmsa_domain *domain = io_domain->priv;
|
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < archdata->num_utlbs; ++i)
|
for (i = 0; i < archdata->num_utlbs; ++i)
|
||||||
@ -546,7 +551,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
|
|||||||
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
|
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
phys_addr_t paddr, size_t size, int prot)
|
||||||
{
|
{
|
||||||
struct ipmmu_vmsa_domain *domain = io_domain->priv;
|
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
||||||
|
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@ -557,7 +562,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
|
|||||||
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
|
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
struct ipmmu_vmsa_domain *domain = io_domain->priv;
|
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
||||||
|
|
||||||
return domain->iop->unmap(domain->iop, iova, size);
|
return domain->iop->unmap(domain->iop, iova, size);
|
||||||
}
|
}
|
||||||
@ -565,7 +570,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
|
|||||||
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
|
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct ipmmu_vmsa_domain *domain = io_domain->priv;
|
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
||||||
|
|
||||||
/* TODO: Is locking needed ? */
|
/* TODO: Is locking needed ? */
|
||||||
|
|
||||||
@ -737,8 +742,8 @@ static void ipmmu_remove_device(struct device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_ops ipmmu_ops = {
|
static const struct iommu_ops ipmmu_ops = {
|
||||||
.domain_init = ipmmu_domain_init,
|
.domain_alloc = ipmmu_domain_alloc,
|
||||||
.domain_destroy = ipmmu_domain_destroy,
|
.domain_free = ipmmu_domain_free,
|
||||||
.attach_dev = ipmmu_attach_device,
|
.attach_dev = ipmmu_attach_device,
|
||||||
.detach_dev = ipmmu_detach_device,
|
.detach_dev = ipmmu_detach_device,
|
||||||
.map = ipmmu_map,
|
.map = ipmmu_map,
|
||||||
|
@ -52,8 +52,14 @@ DEFINE_SPINLOCK(msm_iommu_lock);
|
|||||||
struct msm_priv {
|
struct msm_priv {
|
||||||
unsigned long *pgtable;
|
unsigned long *pgtable;
|
||||||
struct list_head list_attached;
|
struct list_head list_attached;
|
||||||
|
struct iommu_domain domain;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct msm_priv, domain);
|
||||||
|
}
|
||||||
|
|
||||||
static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
|
static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@ -79,7 +85,7 @@ static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
|
|||||||
|
|
||||||
static int __flush_iotlb(struct iommu_domain *domain)
|
static int __flush_iotlb(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct msm_priv *priv = domain->priv;
|
struct msm_priv *priv = to_msm_priv(domain);
|
||||||
struct msm_iommu_drvdata *iommu_drvdata;
|
struct msm_iommu_drvdata *iommu_drvdata;
|
||||||
struct msm_iommu_ctx_drvdata *ctx_drvdata;
|
struct msm_iommu_ctx_drvdata *ctx_drvdata;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -209,10 +215,14 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
|
|||||||
SET_M(base, ctx, 1);
|
SET_M(base, ctx, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_iommu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
struct msm_priv *priv;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
if (!priv)
|
if (!priv)
|
||||||
goto fail_nomem;
|
goto fail_nomem;
|
||||||
|
|
||||||
@ -224,20 +234,19 @@ static int msm_iommu_domain_init(struct iommu_domain *domain)
|
|||||||
goto fail_nomem;
|
goto fail_nomem;
|
||||||
|
|
||||||
memset(priv->pgtable, 0, SZ_16K);
|
memset(priv->pgtable, 0, SZ_16K);
|
||||||
domain->priv = priv;
|
|
||||||
|
|
||||||
domain->geometry.aperture_start = 0;
|
priv->domain.geometry.aperture_start = 0;
|
||||||
domain->geometry.aperture_end = (1ULL << 32) - 1;
|
priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
|
||||||
domain->geometry.force_aperture = true;
|
priv->domain.geometry.force_aperture = true;
|
||||||
|
|
||||||
return 0;
|
return &priv->domain;
|
||||||
|
|
||||||
fail_nomem:
|
fail_nomem:
|
||||||
kfree(priv);
|
kfree(priv);
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void msm_iommu_domain_destroy(struct iommu_domain *domain)
|
static void msm_iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct msm_priv *priv;
|
struct msm_priv *priv;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -245,20 +254,17 @@ static void msm_iommu_domain_destroy(struct iommu_domain *domain)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||||
priv = domain->priv;
|
priv = to_msm_priv(domain);
|
||||||
domain->priv = NULL;
|
|
||||||
|
|
||||||
if (priv) {
|
fl_table = priv->pgtable;
|
||||||
fl_table = priv->pgtable;
|
|
||||||
|
|
||||||
for (i = 0; i < NUM_FL_PTE; i++)
|
for (i = 0; i < NUM_FL_PTE; i++)
|
||||||
if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
|
if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
|
||||||
free_page((unsigned long) __va(((fl_table[i]) &
|
free_page((unsigned long) __va(((fl_table[i]) &
|
||||||
FL_BASE_MASK)));
|
FL_BASE_MASK)));
|
||||||
|
|
||||||
free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
|
free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
|
||||||
priv->pgtable = NULL;
|
priv->pgtable = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
kfree(priv);
|
kfree(priv);
|
||||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||||
@ -276,9 +282,9 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||||||
|
|
||||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||||
|
|
||||||
priv = domain->priv;
|
priv = to_msm_priv(domain);
|
||||||
|
|
||||||
if (!priv || !dev) {
|
if (!dev) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
@ -330,9 +336,9 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||||
priv = domain->priv;
|
priv = to_msm_priv(domain);
|
||||||
|
|
||||||
if (!priv || !dev)
|
if (!dev)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
iommu_drvdata = dev_get_drvdata(dev->parent);
|
iommu_drvdata = dev_get_drvdata(dev->parent);
|
||||||
@ -382,11 +388,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv = domain->priv;
|
priv = to_msm_priv(domain);
|
||||||
if (!priv) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
fl_table = priv->pgtable;
|
fl_table = priv->pgtable;
|
||||||
|
|
||||||
@ -484,10 +486,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
|
|||||||
|
|
||||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||||
|
|
||||||
priv = domain->priv;
|
priv = to_msm_priv(domain);
|
||||||
|
|
||||||
if (!priv)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
fl_table = priv->pgtable;
|
fl_table = priv->pgtable;
|
||||||
|
|
||||||
@ -566,7 +565,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
|
|||||||
|
|
||||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||||
|
|
||||||
priv = domain->priv;
|
priv = to_msm_priv(domain);
|
||||||
if (list_empty(&priv->list_attached))
|
if (list_empty(&priv->list_attached))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
@ -674,8 +673,8 @@ fail:
|
|||||||
|
|
||||||
static const struct iommu_ops msm_iommu_ops = {
|
static const struct iommu_ops msm_iommu_ops = {
|
||||||
.capable = msm_iommu_capable,
|
.capable = msm_iommu_capable,
|
||||||
.domain_init = msm_iommu_domain_init,
|
.domain_alloc = msm_iommu_domain_alloc,
|
||||||
.domain_destroy = msm_iommu_domain_destroy,
|
.domain_free = msm_iommu_domain_free,
|
||||||
.attach_dev = msm_iommu_attach_dev,
|
.attach_dev = msm_iommu_attach_dev,
|
||||||
.detach_dev = msm_iommu_detach_dev,
|
.detach_dev = msm_iommu_detach_dev,
|
||||||
.map = msm_iommu_map,
|
.map = msm_iommu_map,
|
||||||
|
@ -59,6 +59,7 @@ struct omap_iommu_domain {
|
|||||||
struct omap_iommu *iommu_dev;
|
struct omap_iommu *iommu_dev;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
struct iommu_domain domain;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define MMU_LOCK_BASE_SHIFT 10
|
#define MMU_LOCK_BASE_SHIFT 10
|
||||||
@ -79,6 +80,15 @@ struct iotlb_lock {
|
|||||||
static struct platform_driver omap_iommu_driver;
|
static struct platform_driver omap_iommu_driver;
|
||||||
static struct kmem_cache *iopte_cachep;
|
static struct kmem_cache *iopte_cachep;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
|
||||||
|
* @dom: generic iommu domain handle
|
||||||
|
**/
|
||||||
|
static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct omap_iommu_domain, domain);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* omap_iommu_save_ctx - Save registers for pm off-mode support
|
* omap_iommu_save_ctx - Save registers for pm off-mode support
|
||||||
* @dev: client device
|
* @dev: client device
|
||||||
@ -901,7 +911,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
|
|||||||
u32 *iopgd, *iopte;
|
u32 *iopgd, *iopte;
|
||||||
struct omap_iommu *obj = data;
|
struct omap_iommu *obj = data;
|
||||||
struct iommu_domain *domain = obj->domain;
|
struct iommu_domain *domain = obj->domain;
|
||||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||||
|
|
||||||
if (!omap_domain->iommu_dev)
|
if (!omap_domain->iommu_dev)
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
@ -1113,7 +1123,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
|
|||||||
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
||||||
phys_addr_t pa, size_t bytes, int prot)
|
phys_addr_t pa, size_t bytes, int prot)
|
||||||
{
|
{
|
||||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||||
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
||||||
struct device *dev = oiommu->dev;
|
struct device *dev = oiommu->dev;
|
||||||
struct iotlb_entry e;
|
struct iotlb_entry e;
|
||||||
@ -1140,7 +1150,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
|||||||
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
|
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||||
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
||||||
struct device *dev = oiommu->dev;
|
struct device *dev = oiommu->dev;
|
||||||
|
|
||||||
@ -1152,7 +1162,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
|
|||||||
static int
|
static int
|
||||||
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
{
|
{
|
||||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||||
struct omap_iommu *oiommu;
|
struct omap_iommu *oiommu;
|
||||||
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -1212,17 +1222,20 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
|
|||||||
static void omap_iommu_detach_dev(struct iommu_domain *domain,
|
static void omap_iommu_detach_dev(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||||
|
|
||||||
spin_lock(&omap_domain->lock);
|
spin_lock(&omap_domain->lock);
|
||||||
_omap_iommu_detach_dev(omap_domain, dev);
|
_omap_iommu_detach_dev(omap_domain, dev);
|
||||||
spin_unlock(&omap_domain->lock);
|
spin_unlock(&omap_domain->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_iommu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct omap_iommu_domain *omap_domain;
|
struct omap_iommu_domain *omap_domain;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
|
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
|
||||||
if (!omap_domain) {
|
if (!omap_domain) {
|
||||||
pr_err("kzalloc failed\n");
|
pr_err("kzalloc failed\n");
|
||||||
@ -1244,25 +1257,21 @@ static int omap_iommu_domain_init(struct iommu_domain *domain)
|
|||||||
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
|
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
|
||||||
spin_lock_init(&omap_domain->lock);
|
spin_lock_init(&omap_domain->lock);
|
||||||
|
|
||||||
domain->priv = omap_domain;
|
omap_domain->domain.geometry.aperture_start = 0;
|
||||||
|
omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
|
||||||
|
omap_domain->domain.geometry.force_aperture = true;
|
||||||
|
|
||||||
domain->geometry.aperture_start = 0;
|
return &omap_domain->domain;
|
||||||
domain->geometry.aperture_end = (1ULL << 32) - 1;
|
|
||||||
domain->geometry.force_aperture = true;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
fail_nomem:
|
fail_nomem:
|
||||||
kfree(omap_domain);
|
kfree(omap_domain);
|
||||||
out:
|
out:
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
|
static void omap_iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||||
|
|
||||||
domain->priv = NULL;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* An iommu device is still attached
|
* An iommu device is still attached
|
||||||
@ -1278,7 +1287,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain)
|
|||||||
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
dma_addr_t da)
|
dma_addr_t da)
|
||||||
{
|
{
|
||||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||||
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
||||||
struct device *dev = oiommu->dev;
|
struct device *dev = oiommu->dev;
|
||||||
u32 *pgd, *pte;
|
u32 *pgd, *pte;
|
||||||
@ -1358,8 +1367,8 @@ static void omap_iommu_remove_device(struct device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_ops omap_iommu_ops = {
|
static const struct iommu_ops omap_iommu_ops = {
|
||||||
.domain_init = omap_iommu_domain_init,
|
.domain_alloc = omap_iommu_domain_alloc,
|
||||||
.domain_destroy = omap_iommu_domain_destroy,
|
.domain_free = omap_iommu_domain_free,
|
||||||
.attach_dev = omap_iommu_attach_dev,
|
.attach_dev = omap_iommu_attach_dev,
|
||||||
.detach_dev = omap_iommu_detach_dev,
|
.detach_dev = omap_iommu_detach_dev,
|
||||||
.map = omap_iommu_map,
|
.map = omap_iommu_map,
|
||||||
|
@ -80,6 +80,8 @@ struct rk_iommu_domain {
|
|||||||
u32 *dt; /* page directory table */
|
u32 *dt; /* page directory table */
|
||||||
spinlock_t iommus_lock; /* lock for iommus list */
|
spinlock_t iommus_lock; /* lock for iommus list */
|
||||||
spinlock_t dt_lock; /* lock for modifying page directory table */
|
spinlock_t dt_lock; /* lock for modifying page directory table */
|
||||||
|
|
||||||
|
struct iommu_domain domain;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rk_iommu {
|
struct rk_iommu {
|
||||||
@ -100,6 +102,11 @@ static inline void rk_table_flush(u32 *va, unsigned int count)
|
|||||||
outer_flush_range(pa_start, pa_end);
|
outer_flush_range(pa_start, pa_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct rk_iommu_domain, domain);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Inspired by _wait_for in intel_drv.h
|
* Inspired by _wait_for in intel_drv.h
|
||||||
* This is NOT safe for use in interrupt context.
|
* This is NOT safe for use in interrupt context.
|
||||||
@ -503,7 +510,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
|
|||||||
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct rk_iommu_domain *rk_domain = domain->priv;
|
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
phys_addr_t pt_phys, phys = 0;
|
phys_addr_t pt_phys, phys = 0;
|
||||||
u32 dte, pte;
|
u32 dte, pte;
|
||||||
@ -639,7 +646,7 @@ unwind:
|
|||||||
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
|
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
phys_addr_t paddr, size_t size, int prot)
|
||||||
{
|
{
|
||||||
struct rk_iommu_domain *rk_domain = domain->priv;
|
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
dma_addr_t iova = (dma_addr_t)_iova;
|
dma_addr_t iova = (dma_addr_t)_iova;
|
||||||
u32 *page_table, *pte_addr;
|
u32 *page_table, *pte_addr;
|
||||||
@ -670,7 +677,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
|
|||||||
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
|
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
struct rk_iommu_domain *rk_domain = domain->priv;
|
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
dma_addr_t iova = (dma_addr_t)_iova;
|
dma_addr_t iova = (dma_addr_t)_iova;
|
||||||
phys_addr_t pt_phys;
|
phys_addr_t pt_phys;
|
||||||
@ -726,7 +733,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
|
|||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct rk_iommu *iommu;
|
struct rk_iommu *iommu;
|
||||||
struct rk_iommu_domain *rk_domain = domain->priv;
|
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
phys_addr_t dte_addr;
|
phys_addr_t dte_addr;
|
||||||
@ -778,7 +785,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
|
|||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct rk_iommu *iommu;
|
struct rk_iommu *iommu;
|
||||||
struct rk_iommu_domain *rk_domain = domain->priv;
|
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Allow 'virtual devices' (eg drm) to detach from domain */
|
/* Allow 'virtual devices' (eg drm) to detach from domain */
|
||||||
@ -804,13 +811,16 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
|
|||||||
dev_info(dev, "Detached from iommu domain\n");
|
dev_info(dev, "Detached from iommu domain\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rk_iommu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct rk_iommu_domain *rk_domain;
|
struct rk_iommu_domain *rk_domain;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
|
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
|
||||||
if (!rk_domain)
|
if (!rk_domain)
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rk32xx iommus use a 2 level pagetable.
|
* rk32xx iommus use a 2 level pagetable.
|
||||||
@ -827,17 +837,16 @@ static int rk_iommu_domain_init(struct iommu_domain *domain)
|
|||||||
spin_lock_init(&rk_domain->dt_lock);
|
spin_lock_init(&rk_domain->dt_lock);
|
||||||
INIT_LIST_HEAD(&rk_domain->iommus);
|
INIT_LIST_HEAD(&rk_domain->iommus);
|
||||||
|
|
||||||
domain->priv = rk_domain;
|
return &rk_domain->domain;
|
||||||
|
|
||||||
return 0;
|
|
||||||
err_dt:
|
err_dt:
|
||||||
kfree(rk_domain);
|
kfree(rk_domain);
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rk_iommu_domain_destroy(struct iommu_domain *domain)
|
static void rk_iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct rk_iommu_domain *rk_domain = domain->priv;
|
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
WARN_ON(!list_empty(&rk_domain->iommus));
|
WARN_ON(!list_empty(&rk_domain->iommus));
|
||||||
@ -852,8 +861,7 @@ static void rk_iommu_domain_destroy(struct iommu_domain *domain)
|
|||||||
}
|
}
|
||||||
|
|
||||||
free_page((unsigned long)rk_domain->dt);
|
free_page((unsigned long)rk_domain->dt);
|
||||||
kfree(domain->priv);
|
kfree(rk_domain);
|
||||||
domain->priv = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
|
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
|
||||||
@ -952,8 +960,8 @@ static void rk_iommu_remove_device(struct device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_ops rk_iommu_ops = {
|
static const struct iommu_ops rk_iommu_ops = {
|
||||||
.domain_init = rk_iommu_domain_init,
|
.domain_alloc = rk_iommu_domain_alloc,
|
||||||
.domain_destroy = rk_iommu_domain_destroy,
|
.domain_free = rk_iommu_domain_free,
|
||||||
.attach_dev = rk_iommu_attach_device,
|
.attach_dev = rk_iommu_attach_device,
|
||||||
.detach_dev = rk_iommu_detach_device,
|
.detach_dev = rk_iommu_detach_device,
|
||||||
.map = rk_iommu_map,
|
.map = rk_iommu_map,
|
||||||
|
@ -42,11 +42,17 @@ struct shmobile_iommu_domain {
|
|||||||
spinlock_t map_lock;
|
spinlock_t map_lock;
|
||||||
spinlock_t attached_list_lock;
|
spinlock_t attached_list_lock;
|
||||||
struct list_head attached_list;
|
struct list_head attached_list;
|
||||||
|
struct iommu_domain domain;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct shmobile_iommu_archdata *ipmmu_archdata;
|
static struct shmobile_iommu_archdata *ipmmu_archdata;
|
||||||
static struct kmem_cache *l1cache, *l2cache;
|
static struct kmem_cache *l1cache, *l2cache;
|
||||||
|
|
||||||
|
static struct shmobile_iommu_domain *to_sh_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct shmobile_iommu_domain, domain);
|
||||||
|
}
|
||||||
|
|
||||||
static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
|
static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
|
||||||
struct kmem_cache *cache, size_t size)
|
struct kmem_cache *cache, size_t size)
|
||||||
{
|
{
|
||||||
@ -82,31 +88,33 @@ static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
|
|||||||
sizeof(val) * count, DMA_TO_DEVICE);
|
sizeof(val) * count, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int shmobile_iommu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *shmobile_iommu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct shmobile_iommu_domain *sh_domain;
|
struct shmobile_iommu_domain *sh_domain;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL);
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL);
|
||||||
if (!sh_domain)
|
if (!sh_domain)
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
|
ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
kfree(sh_domain);
|
kfree(sh_domain);
|
||||||
return ret;
|
return NULL;
|
||||||
}
|
}
|
||||||
for (i = 0; i < L1_LEN; i++)
|
for (i = 0; i < L1_LEN; i++)
|
||||||
sh_domain->l2[i].pgtable = NULL;
|
sh_domain->l2[i].pgtable = NULL;
|
||||||
spin_lock_init(&sh_domain->map_lock);
|
spin_lock_init(&sh_domain->map_lock);
|
||||||
spin_lock_init(&sh_domain->attached_list_lock);
|
spin_lock_init(&sh_domain->attached_list_lock);
|
||||||
INIT_LIST_HEAD(&sh_domain->attached_list);
|
INIT_LIST_HEAD(&sh_domain->attached_list);
|
||||||
domain->priv = sh_domain;
|
return &sh_domain->domain;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
|
static void shmobile_iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < L1_LEN; i++) {
|
for (i = 0; i < L1_LEN; i++) {
|
||||||
@ -115,14 +123,13 @@ static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
|
|||||||
}
|
}
|
||||||
pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
|
pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
|
||||||
kfree(sh_domain);
|
kfree(sh_domain);
|
||||||
domain->priv = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int shmobile_iommu_attach_device(struct iommu_domain *domain,
|
static int shmobile_iommu_attach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
|
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
|
||||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
|
||||||
int ret = -EBUSY;
|
int ret = -EBUSY;
|
||||||
|
|
||||||
if (!archdata)
|
if (!archdata)
|
||||||
@ -151,7 +158,7 @@ static void shmobile_iommu_detach_device(struct iommu_domain *domain,
|
|||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
|
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
|
||||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
|
||||||
|
|
||||||
if (!archdata)
|
if (!archdata)
|
||||||
return;
|
return;
|
||||||
@ -214,7 +221,7 @@ static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
phys_addr_t paddr, size_t size, int prot)
|
phys_addr_t paddr, size_t size, int prot)
|
||||||
{
|
{
|
||||||
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
|
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
|
||||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
|
||||||
unsigned int l1index, l2index;
|
unsigned int l1index, l2index;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -258,7 +265,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
|
|||||||
unsigned long iova, size_t size)
|
unsigned long iova, size_t size)
|
||||||
{
|
{
|
||||||
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
|
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
|
||||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
|
||||||
unsigned int l1index, l2index;
|
unsigned int l1index, l2index;
|
||||||
uint32_t l2entry = 0;
|
uint32_t l2entry = 0;
|
||||||
size_t ret = 0;
|
size_t ret = 0;
|
||||||
@ -298,7 +305,7 @@ done:
|
|||||||
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
|
||||||
uint32_t l1entry = 0, l2entry = 0;
|
uint32_t l1entry = 0, l2entry = 0;
|
||||||
unsigned int l1index, l2index;
|
unsigned int l1index, l2index;
|
||||||
|
|
||||||
@ -355,8 +362,8 @@ static int shmobile_iommu_add_device(struct device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_ops shmobile_iommu_ops = {
|
static const struct iommu_ops shmobile_iommu_ops = {
|
||||||
.domain_init = shmobile_iommu_domain_init,
|
.domain_alloc = shmobile_iommu_domain_alloc,
|
||||||
.domain_destroy = shmobile_iommu_domain_destroy,
|
.domain_free = shmobile_iommu_domain_free,
|
||||||
.attach_dev = shmobile_iommu_attach_device,
|
.attach_dev = shmobile_iommu_attach_device,
|
||||||
.detach_dev = shmobile_iommu_detach_device,
|
.detach_dev = shmobile_iommu_detach_device,
|
||||||
.map = shmobile_iommu_map,
|
.map = shmobile_iommu_map,
|
||||||
|
@ -63,11 +63,21 @@ struct gart_device {
|
|||||||
struct device *dev;
|
struct device *dev;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct gart_domain {
|
||||||
|
struct iommu_domain domain; /* generic domain handle */
|
||||||
|
struct gart_device *gart; /* link to gart device */
|
||||||
|
};
|
||||||
|
|
||||||
static struct gart_device *gart_handle; /* unique for a system */
|
static struct gart_device *gart_handle; /* unique for a system */
|
||||||
|
|
||||||
#define GART_PTE(_pfn) \
|
#define GART_PTE(_pfn) \
|
||||||
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
|
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
|
||||||
|
|
||||||
|
static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct gart_domain, domain);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Any interaction between any block on PPSB and a block on APB or AHB
|
* Any interaction between any block on PPSB and a block on APB or AHB
|
||||||
* must have these read-back to ensure the APB/AHB bus transaction is
|
* must have these read-back to ensure the APB/AHB bus transaction is
|
||||||
@ -156,20 +166,11 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
|
|||||||
static int gart_iommu_attach_dev(struct iommu_domain *domain,
|
static int gart_iommu_attach_dev(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct gart_device *gart;
|
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||||
|
struct gart_device *gart = gart_domain->gart;
|
||||||
struct gart_client *client, *c;
|
struct gart_client *client, *c;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
gart = gart_handle;
|
|
||||||
if (!gart)
|
|
||||||
return -EINVAL;
|
|
||||||
domain->priv = gart;
|
|
||||||
|
|
||||||
domain->geometry.aperture_start = gart->iovmm_base;
|
|
||||||
domain->geometry.aperture_end = gart->iovmm_base +
|
|
||||||
gart->page_count * GART_PAGE_SIZE - 1;
|
|
||||||
domain->geometry.force_aperture = true;
|
|
||||||
|
|
||||||
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
|
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
|
||||||
if (!client)
|
if (!client)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -198,7 +199,8 @@ fail:
|
|||||||
static void gart_iommu_detach_dev(struct iommu_domain *domain,
|
static void gart_iommu_detach_dev(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct gart_device *gart = domain->priv;
|
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||||
|
struct gart_device *gart = gart_domain->gart;
|
||||||
struct gart_client *c;
|
struct gart_client *c;
|
||||||
|
|
||||||
spin_lock(&gart->client_lock);
|
spin_lock(&gart->client_lock);
|
||||||
@ -216,33 +218,55 @@ out:
|
|||||||
spin_unlock(&gart->client_lock);
|
spin_unlock(&gart->client_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gart_iommu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
return 0;
|
struct gart_domain *gart_domain;
|
||||||
|
struct gart_device *gart;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
gart = gart_handle;
|
||||||
|
if (!gart)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
|
||||||
|
if (!gart_domain)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
gart_domain->gart = gart;
|
||||||
|
gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
|
||||||
|
gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
|
||||||
|
gart->page_count * GART_PAGE_SIZE - 1;
|
||||||
|
gart_domain->domain.geometry.force_aperture = true;
|
||||||
|
|
||||||
|
return &gart_domain->domain;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gart_iommu_domain_destroy(struct iommu_domain *domain)
|
static void gart_iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct gart_device *gart = domain->priv;
|
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||||
|
struct gart_device *gart = gart_domain->gart;
|
||||||
|
|
||||||
if (!gart)
|
if (gart) {
|
||||||
return;
|
spin_lock(&gart->client_lock);
|
||||||
|
if (!list_empty(&gart->client)) {
|
||||||
|
struct gart_client *c;
|
||||||
|
|
||||||
spin_lock(&gart->client_lock);
|
list_for_each_entry(c, &gart->client, list)
|
||||||
if (!list_empty(&gart->client)) {
|
gart_iommu_detach_dev(domain, c->dev);
|
||||||
struct gart_client *c;
|
}
|
||||||
|
spin_unlock(&gart->client_lock);
|
||||||
list_for_each_entry(c, &gart->client, list)
|
|
||||||
gart_iommu_detach_dev(domain, c->dev);
|
|
||||||
}
|
}
|
||||||
spin_unlock(&gart->client_lock);
|
|
||||||
domain->priv = NULL;
|
kfree(gart_domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t pa, size_t bytes, int prot)
|
phys_addr_t pa, size_t bytes, int prot)
|
||||||
{
|
{
|
||||||
struct gart_device *gart = domain->priv;
|
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||||
|
struct gart_device *gart = gart_domain->gart;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
|
||||||
@ -265,7 +289,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
size_t bytes)
|
size_t bytes)
|
||||||
{
|
{
|
||||||
struct gart_device *gart = domain->priv;
|
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||||
|
struct gart_device *gart = gart_domain->gart;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!gart_iova_range_valid(gart, iova, bytes))
|
if (!gart_iova_range_valid(gart, iova, bytes))
|
||||||
@ -281,7 +306,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||||||
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct gart_device *gart = domain->priv;
|
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||||
|
struct gart_device *gart = gart_domain->gart;
|
||||||
unsigned long pte;
|
unsigned long pte;
|
||||||
phys_addr_t pa;
|
phys_addr_t pa;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -310,8 +336,8 @@ static bool gart_iommu_capable(enum iommu_cap cap)
|
|||||||
|
|
||||||
static const struct iommu_ops gart_iommu_ops = {
|
static const struct iommu_ops gart_iommu_ops = {
|
||||||
.capable = gart_iommu_capable,
|
.capable = gart_iommu_capable,
|
||||||
.domain_init = gart_iommu_domain_init,
|
.domain_alloc = gart_iommu_domain_alloc,
|
||||||
.domain_destroy = gart_iommu_domain_destroy,
|
.domain_free = gart_iommu_domain_free,
|
||||||
.attach_dev = gart_iommu_attach_dev,
|
.attach_dev = gart_iommu_attach_dev,
|
||||||
.detach_dev = gart_iommu_detach_dev,
|
.detach_dev = gart_iommu_detach_dev,
|
||||||
.map = gart_iommu_map,
|
.map = gart_iommu_map,
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
* published by the Free Software Foundation.
|
* published by the Free Software Foundation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/bitops.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
@ -24,6 +25,8 @@ struct tegra_smmu {
|
|||||||
struct tegra_mc *mc;
|
struct tegra_mc *mc;
|
||||||
const struct tegra_smmu_soc *soc;
|
const struct tegra_smmu_soc *soc;
|
||||||
|
|
||||||
|
unsigned long pfn_mask;
|
||||||
|
|
||||||
unsigned long *asids;
|
unsigned long *asids;
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
|
|
||||||
@ -31,7 +34,7 @@ struct tegra_smmu {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct tegra_smmu_as {
|
struct tegra_smmu_as {
|
||||||
struct iommu_domain *domain;
|
struct iommu_domain domain;
|
||||||
struct tegra_smmu *smmu;
|
struct tegra_smmu *smmu;
|
||||||
unsigned int use_count;
|
unsigned int use_count;
|
||||||
struct page *count;
|
struct page *count;
|
||||||
@ -40,6 +43,11 @@ struct tegra_smmu_as {
|
|||||||
u32 attr;
|
u32 attr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
|
||||||
|
{
|
||||||
|
return container_of(dom, struct tegra_smmu_as, domain);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
|
static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
|
||||||
unsigned long offset)
|
unsigned long offset)
|
||||||
{
|
{
|
||||||
@ -105,8 +113,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
|
|||||||
#define SMMU_PDE_SHIFT 22
|
#define SMMU_PDE_SHIFT 22
|
||||||
#define SMMU_PTE_SHIFT 12
|
#define SMMU_PTE_SHIFT 12
|
||||||
|
|
||||||
#define SMMU_PFN_MASK 0x000fffff
|
|
||||||
|
|
||||||
#define SMMU_PD_READABLE (1 << 31)
|
#define SMMU_PD_READABLE (1 << 31)
|
||||||
#define SMMU_PD_WRITABLE (1 << 30)
|
#define SMMU_PD_WRITABLE (1 << 30)
|
||||||
#define SMMU_PD_NONSECURE (1 << 29)
|
#define SMMU_PD_NONSECURE (1 << 29)
|
||||||
@ -224,30 +230,32 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tegra_smmu_domain_init(struct iommu_domain *domain)
|
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct tegra_smmu_as *as;
|
struct tegra_smmu_as *as;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
uint32_t *pd;
|
uint32_t *pd;
|
||||||
|
|
||||||
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
as = kzalloc(sizeof(*as), GFP_KERNEL);
|
as = kzalloc(sizeof(*as), GFP_KERNEL);
|
||||||
if (!as)
|
if (!as)
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
|
|
||||||
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
|
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
|
||||||
as->domain = domain;
|
|
||||||
|
|
||||||
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
|
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
|
||||||
if (!as->pd) {
|
if (!as->pd) {
|
||||||
kfree(as);
|
kfree(as);
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
as->count = alloc_page(GFP_KERNEL);
|
as->count = alloc_page(GFP_KERNEL);
|
||||||
if (!as->count) {
|
if (!as->count) {
|
||||||
__free_page(as->pd);
|
__free_page(as->pd);
|
||||||
kfree(as);
|
kfree(as);
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clear PDEs */
|
/* clear PDEs */
|
||||||
@ -264,14 +272,17 @@ static int tegra_smmu_domain_init(struct iommu_domain *domain)
|
|||||||
for (i = 0; i < SMMU_NUM_PDE; i++)
|
for (i = 0; i < SMMU_NUM_PDE; i++)
|
||||||
pd[i] = 0;
|
pd[i] = 0;
|
||||||
|
|
||||||
domain->priv = as;
|
/* setup aperture */
|
||||||
|
as->domain.geometry.aperture_start = 0;
|
||||||
|
as->domain.geometry.aperture_end = 0xffffffff;
|
||||||
|
as->domain.geometry.force_aperture = true;
|
||||||
|
|
||||||
return 0;
|
return &as->domain;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tegra_smmu_domain_destroy(struct iommu_domain *domain)
|
static void tegra_smmu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct tegra_smmu_as *as = domain->priv;
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
||||||
|
|
||||||
/* TODO: free page directory and page tables */
|
/* TODO: free page directory and page tables */
|
||||||
ClearPageReserved(as->pd);
|
ClearPageReserved(as->pd);
|
||||||
@ -395,7 +406,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain,
|
|||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct tegra_smmu *smmu = dev->archdata.iommu;
|
struct tegra_smmu *smmu = dev->archdata.iommu;
|
||||||
struct tegra_smmu_as *as = domain->priv;
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
||||||
struct device_node *np = dev->of_node;
|
struct device_node *np = dev->of_node;
|
||||||
struct of_phandle_args args;
|
struct of_phandle_args args;
|
||||||
unsigned int index = 0;
|
unsigned int index = 0;
|
||||||
@ -428,7 +439,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain,
|
|||||||
|
|
||||||
static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
|
static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
{
|
{
|
||||||
struct tegra_smmu_as *as = domain->priv;
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
||||||
struct device_node *np = dev->of_node;
|
struct device_node *np = dev->of_node;
|
||||||
struct tegra_smmu *smmu = as->smmu;
|
struct tegra_smmu *smmu = as->smmu;
|
||||||
struct of_phandle_args args;
|
struct of_phandle_args args;
|
||||||
@ -481,7 +492,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
|
|||||||
smmu_flush_tlb_section(smmu, as->id, iova);
|
smmu_flush_tlb_section(smmu, as->id, iova);
|
||||||
smmu_flush(smmu);
|
smmu_flush(smmu);
|
||||||
} else {
|
} else {
|
||||||
page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
|
page = pfn_to_page(pd[pde] & smmu->pfn_mask);
|
||||||
pt = page_address(page);
|
pt = page_address(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -503,7 +514,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
|
|||||||
u32 *pd = page_address(as->pd), *pt;
|
u32 *pd = page_address(as->pd), *pt;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
|
page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
|
||||||
pt = page_address(page);
|
pt = page_address(page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -524,7 +535,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
|
|||||||
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
phys_addr_t paddr, size_t size, int prot)
|
||||||
{
|
{
|
||||||
struct tegra_smmu_as *as = domain->priv;
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
||||||
struct tegra_smmu *smmu = as->smmu;
|
struct tegra_smmu *smmu = as->smmu;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
@ -548,7 +559,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
struct tegra_smmu_as *as = domain->priv;
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
||||||
struct tegra_smmu *smmu = as->smmu;
|
struct tegra_smmu *smmu = as->smmu;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
@ -572,13 +583,13 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||||||
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
|
||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct tegra_smmu_as *as = domain->priv;
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
u32 *pte;
|
u32 *pte;
|
||||||
|
|
||||||
pte = as_get_pte(as, iova, &page);
|
pte = as_get_pte(as, iova, &page);
|
||||||
pfn = *pte & SMMU_PFN_MASK;
|
pfn = *pte & as->smmu->pfn_mask;
|
||||||
|
|
||||||
return PFN_PHYS(pfn);
|
return PFN_PHYS(pfn);
|
||||||
}
|
}
|
||||||
@ -633,8 +644,8 @@ static void tegra_smmu_remove_device(struct device *dev)
|
|||||||
|
|
||||||
static const struct iommu_ops tegra_smmu_ops = {
|
static const struct iommu_ops tegra_smmu_ops = {
|
||||||
.capable = tegra_smmu_capable,
|
.capable = tegra_smmu_capable,
|
||||||
.domain_init = tegra_smmu_domain_init,
|
.domain_alloc = tegra_smmu_domain_alloc,
|
||||||
.domain_destroy = tegra_smmu_domain_destroy,
|
.domain_free = tegra_smmu_domain_free,
|
||||||
.attach_dev = tegra_smmu_attach_dev,
|
.attach_dev = tegra_smmu_attach_dev,
|
||||||
.detach_dev = tegra_smmu_detach_dev,
|
.detach_dev = tegra_smmu_detach_dev,
|
||||||
.add_device = tegra_smmu_add_device,
|
.add_device = tegra_smmu_add_device,
|
||||||
@ -702,6 +713,10 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
|
|||||||
smmu->dev = dev;
|
smmu->dev = dev;
|
||||||
smmu->mc = mc;
|
smmu->mc = mc;
|
||||||
|
|
||||||
|
smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
|
||||||
|
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
|
||||||
|
mc->soc->num_address_bits, smmu->pfn_mask);
|
||||||
|
|
||||||
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
|
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
|
||||||
|
|
||||||
if (soc->supports_request_limit)
|
if (soc->supports_request_limit)
|
||||||
|
@ -51,9 +51,33 @@ struct iommu_domain_geometry {
|
|||||||
bool force_aperture; /* DMA only allowed in mappable range? */
|
bool force_aperture; /* DMA only allowed in mappable range? */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Domain feature flags */
|
||||||
|
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
|
||||||
|
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
|
||||||
|
implementation */
|
||||||
|
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This are the possible domain-types
|
||||||
|
*
|
||||||
|
* IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
|
||||||
|
* devices
|
||||||
|
* IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
|
||||||
|
* IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
|
||||||
|
* for VMs
|
||||||
|
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
|
||||||
|
* This flag allows IOMMU drivers to implement
|
||||||
|
* certain optimizations for these domains
|
||||||
|
*/
|
||||||
|
#define IOMMU_DOMAIN_BLOCKED (0U)
|
||||||
|
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
|
||||||
|
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
|
||||||
|
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
|
||||||
|
__IOMMU_DOMAIN_DMA_API)
|
||||||
|
|
||||||
struct iommu_domain {
|
struct iommu_domain {
|
||||||
|
unsigned type;
|
||||||
const struct iommu_ops *ops;
|
const struct iommu_ops *ops;
|
||||||
void *priv;
|
|
||||||
iommu_fault_handler_t handler;
|
iommu_fault_handler_t handler;
|
||||||
void *handler_token;
|
void *handler_token;
|
||||||
struct iommu_domain_geometry geometry;
|
struct iommu_domain_geometry geometry;
|
||||||
@ -113,8 +137,11 @@ enum iommu_attr {
|
|||||||
*/
|
*/
|
||||||
struct iommu_ops {
|
struct iommu_ops {
|
||||||
bool (*capable)(enum iommu_cap);
|
bool (*capable)(enum iommu_cap);
|
||||||
int (*domain_init)(struct iommu_domain *domain);
|
|
||||||
void (*domain_destroy)(struct iommu_domain *domain);
|
/* Domain allocation and freeing by the iommu driver */
|
||||||
|
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
|
||||||
|
void (*domain_free)(struct iommu_domain *);
|
||||||
|
|
||||||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||||
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
|
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||||
int (*map)(struct iommu_domain *domain, unsigned long iova,
|
int (*map)(struct iommu_domain *domain, unsigned long iova,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user