iommu/amd: Use put_pages_list
page->freelist is for the use of slab. We already have the ability to free a list of pages in the core mm, but it requires the use of a list_head and for the pages to be chained together through page->lru. Switch the AMD IOMMU code over to using free_pages_list(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> [rm: split from original patch, cosmetic tweaks] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/73af128f651aaa1f38f69e586c66765a88ad2de0.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
6b3106e9ba
commit
ce00eece69
@ -74,26 +74,14 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static void free_page_list(struct page *freelist)
|
||||
{
|
||||
while (freelist != NULL) {
|
||||
unsigned long p = (unsigned long)page_address(freelist);
|
||||
|
||||
freelist = freelist->freelist;
|
||||
free_page(p);
|
||||
}
|
||||
}
|
||||
|
||||
static struct page *free_pt_page(u64 *pt, struct page *freelist)
|
||||
static void free_pt_page(u64 *pt, struct list_head *freelist)
|
||||
{
|
||||
struct page *p = virt_to_page(pt);
|
||||
|
||||
p->freelist = freelist;
|
||||
|
||||
return p;
|
||||
list_add_tail(&p->lru, freelist);
|
||||
}
|
||||
|
||||
static struct page *free_pt_lvl(u64 *pt, struct page *freelist, int lvl)
|
||||
static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
|
||||
{
|
||||
u64 *p;
|
||||
int i;
|
||||
@ -114,22 +102,22 @@ static struct page *free_pt_lvl(u64 *pt, struct page *freelist, int lvl)
|
||||
*/
|
||||
p = IOMMU_PTE_PAGE(pt[i]);
|
||||
if (lvl > 2)
|
||||
freelist = free_pt_lvl(p, freelist, lvl - 1);
|
||||
free_pt_lvl(p, freelist, lvl - 1);
|
||||
else
|
||||
freelist = free_pt_page(p, freelist);
|
||||
free_pt_page(p, freelist);
|
||||
}
|
||||
|
||||
return free_pt_page(pt, freelist);
|
||||
free_pt_page(pt, freelist);
|
||||
}
|
||||
|
||||
static struct page *free_sub_pt(u64 *root, int mode, struct page *freelist)
|
||||
static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
|
||||
{
|
||||
switch (mode) {
|
||||
case PAGE_MODE_NONE:
|
||||
case PAGE_MODE_7_LEVEL:
|
||||
break;
|
||||
case PAGE_MODE_1_LEVEL:
|
||||
freelist = free_pt_page(root, freelist);
|
||||
free_pt_page(root, freelist);
|
||||
break;
|
||||
case PAGE_MODE_2_LEVEL:
|
||||
case PAGE_MODE_3_LEVEL:
|
||||
@ -141,8 +129,6 @@ static struct page *free_sub_pt(u64 *root, int mode, struct page *freelist)
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return freelist;
|
||||
}
|
||||
|
||||
void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
|
||||
@ -350,7 +336,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
|
||||
return pte;
|
||||
}
|
||||
|
||||
static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
|
||||
static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
|
||||
{
|
||||
u64 *pt;
|
||||
int mode;
|
||||
@ -361,12 +347,12 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
|
||||
}
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(pteval))
|
||||
return freelist;
|
||||
return;
|
||||
|
||||
pt = IOMMU_PTE_PAGE(pteval);
|
||||
mode = IOMMU_PTE_MODE(pteval);
|
||||
|
||||
return free_sub_pt(pt, mode, freelist);
|
||||
free_sub_pt(pt, mode, freelist);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -380,7 +366,7 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
|
||||
struct page *freelist = NULL;
|
||||
LIST_HEAD(freelist);
|
||||
bool updated = false;
|
||||
u64 __pte, *pte;
|
||||
int ret, i, count;
|
||||
@ -400,9 +386,9 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < count; ++i)
|
||||
freelist = free_clear_pte(&pte[i], pte[i], freelist);
|
||||
free_clear_pte(&pte[i], pte[i], &freelist);
|
||||
|
||||
if (freelist != NULL)
|
||||
if (!list_empty(&freelist))
|
||||
updated = true;
|
||||
|
||||
if (count > 1) {
|
||||
@ -437,7 +423,7 @@ out:
|
||||
}
|
||||
|
||||
/* Everything flushed out, free pages now */
|
||||
free_page_list(freelist);
|
||||
put_pages_list(&freelist);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -499,7 +485,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
|
||||
{
|
||||
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
|
||||
struct protection_domain *dom;
|
||||
struct page *freelist = NULL;
|
||||
LIST_HEAD(freelist);
|
||||
|
||||
if (pgtable->mode == PAGE_MODE_NONE)
|
||||
return;
|
||||
@ -516,9 +502,9 @@ static void v1_free_pgtable(struct io_pgtable *iop)
|
||||
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
|
||||
pgtable->mode > PAGE_MODE_6_LEVEL);
|
||||
|
||||
freelist = free_sub_pt(pgtable->root, pgtable->mode, freelist);
|
||||
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
|
||||
|
||||
free_page_list(freelist);
|
||||
put_pages_list(&freelist);
|
||||
}
|
||||
|
||||
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
|
Loading…
Reference in New Issue
Block a user