iommu/amd/io-pgtable: Implement map_pages io_pgtable_ops callback
Implement the io_pgtable_ops->map_pages() callback for AMD driver. Also deprecate io_pgtable->map callback. Suggested-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Link: https://lore.kernel.org/r/20220825063939.8360-2-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
7e18e42e4b
commit
8cc233dec3
@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
|
||||
* supporting all features of AMD IOMMU page tables like level skipping
|
||||
* and full 64 bit address spaces.
|
||||
*/
|
||||
static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
phys_addr_t paddr, size_t pgsize, size_t pgcount,
|
||||
int prot, gfp_t gfp, size_t *mapped)
|
||||
{
|
||||
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
|
||||
LIST_HEAD(freelist);
|
||||
@ -369,39 +370,47 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
u64 __pte, *pte;
|
||||
int ret, i, count;
|
||||
|
||||
BUG_ON(!IS_ALIGNED(iova, size));
|
||||
BUG_ON(!IS_ALIGNED(paddr, size));
|
||||
BUG_ON(!IS_ALIGNED(iova, pgsize));
|
||||
BUG_ON(!IS_ALIGNED(paddr, pgsize));
|
||||
|
||||
ret = -EINVAL;
|
||||
if (!(prot & IOMMU_PROT_MASK))
|
||||
goto out;
|
||||
|
||||
count = PAGE_SIZE_PTE_COUNT(size);
|
||||
pte = alloc_pte(dom, iova, size, NULL, gfp, &updated);
|
||||
while (pgcount > 0) {
|
||||
count = PAGE_SIZE_PTE_COUNT(pgsize);
|
||||
pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (!pte)
|
||||
goto out;
|
||||
ret = -ENOMEM;
|
||||
if (!pte)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < count; ++i)
|
||||
free_clear_pte(&pte[i], pte[i], &freelist);
|
||||
for (i = 0; i < count; ++i)
|
||||
free_clear_pte(&pte[i], pte[i], &freelist);
|
||||
|
||||
if (!list_empty(&freelist))
|
||||
updated = true;
|
||||
if (!list_empty(&freelist))
|
||||
updated = true;
|
||||
|
||||
if (count > 1) {
|
||||
__pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
|
||||
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
|
||||
} else
|
||||
__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
|
||||
if (count > 1) {
|
||||
__pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
|
||||
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
|
||||
} else
|
||||
__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
|
||||
|
||||
if (prot & IOMMU_PROT_IR)
|
||||
__pte |= IOMMU_PTE_IR;
|
||||
if (prot & IOMMU_PROT_IW)
|
||||
__pte |= IOMMU_PTE_IW;
|
||||
if (prot & IOMMU_PROT_IR)
|
||||
__pte |= IOMMU_PTE_IR;
|
||||
if (prot & IOMMU_PROT_IW)
|
||||
__pte |= IOMMU_PTE_IW;
|
||||
|
||||
for (i = 0; i < count; ++i)
|
||||
pte[i] = __pte;
|
||||
for (i = 0; i < count; ++i)
|
||||
pte[i] = __pte;
|
||||
|
||||
iova += pgsize;
|
||||
paddr += pgsize;
|
||||
pgcount--;
|
||||
if (mapped)
|
||||
*mapped += pgsize;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
@ -514,7 +523,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
|
||||
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
|
||||
cfg->tlb = &v1_flush_ops;
|
||||
|
||||
pgtable->iop.ops.map = iommu_v1_map_page;
|
||||
pgtable->iop.ops.map_pages = iommu_v1_map_pages;
|
||||
pgtable->iop.ops.unmap = iommu_v1_unmap_page;
|
||||
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user