x86/mm/pageattr: Add a PUD error unwinding path
In case we encounter an error during the mapping of a region, we want to unwind what we've established so far exactly the way we did the mapping. This is the PUD part kept deliberately small for easier review. Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
This commit is contained in:
parent
c6b6f363f7
commit
0bb8aeee7b
@ -666,6 +666,51 @@ static int split_large_page(pte_t *kpte, unsigned long address)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define unmap_pmd_range(pud, start, pre_end) do {} while (0)
|
||||||
|
|
||||||
|
static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
pud_t *pud = pud_offset(pgd, start);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Not on a GB page boundary?
|
||||||
|
*/
|
||||||
|
if (start & (PUD_SIZE - 1)) {
|
||||||
|
unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
|
||||||
|
unsigned long pre_end = min_t(unsigned long, end, next_page);
|
||||||
|
|
||||||
|
unmap_pmd_range(pud, start, pre_end);
|
||||||
|
|
||||||
|
start = pre_end;
|
||||||
|
pud++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Try to unmap in 1G chunks?
|
||||||
|
*/
|
||||||
|
while (end - start >= PUD_SIZE) {
|
||||||
|
|
||||||
|
if (pud_large(*pud))
|
||||||
|
pud_clear(pud);
|
||||||
|
else
|
||||||
|
unmap_pmd_range(pud, start, start + PUD_SIZE);
|
||||||
|
|
||||||
|
start += PUD_SIZE;
|
||||||
|
pud++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 2M leftovers?
|
||||||
|
*/
|
||||||
|
if (start < end)
|
||||||
|
unmap_pmd_range(pud, start, end);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No need to try to free the PUD page because we'll free it in
|
||||||
|
* populate_pgd's error path
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
static int alloc_pte_page(pmd_t *pmd)
|
static int alloc_pte_page(pmd_t *pmd)
|
||||||
{
|
{
|
||||||
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
|
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
|
||||||
@ -883,9 +928,20 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
|
|||||||
pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
|
pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
|
||||||
|
|
||||||
ret = populate_pud(cpa, addr, pgd_entry, pgprot);
|
ret = populate_pud(cpa, addr, pgd_entry, pgprot);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
return ret;
|
unmap_pud_range(pgd_entry, addr,
|
||||||
|
addr + (cpa->numpages << PAGE_SHIFT));
|
||||||
|
|
||||||
|
if (allocd_pgd) {
|
||||||
|
/*
|
||||||
|
* If I allocated this PUD page, I can just as well
|
||||||
|
* free it in this error path.
|
||||||
|
*/
|
||||||
|
pgd_clear(pgd_entry);
|
||||||
|
free_page((unsigned long)pud);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
cpa->numpages = ret;
|
cpa->numpages = ret;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user