KVM: arm64: Implement kvm_pgtable_hyp_unmap() at EL2
Implement kvm_pgtable_hyp_unmap() which can be used to remove hypervisor stage-1 mappings at EL2. Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211215161232.1480836-6-qperret@google.com
This commit is contained in:
parent
34ec7cbf1e
commit
82bb02445d
@ -251,6 +251,27 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
|
||||
int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
|
||||
enum kvm_pgtable_prot prot);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
|
||||
* @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
|
||||
* @addr: Virtual address from which to remove the mapping.
|
||||
* @size: Size of the mapping.
|
||||
*
|
||||
* The offset of @addr within a page is ignored, @size is rounded-up to
|
||||
* the next page boundary and @phys is rounded-down to the previous page
|
||||
* boundary.
|
||||
*
|
||||
* TLB invalidation is performed for each page-table entry cleared during the
|
||||
* unmapping operation and the reference count for the page-table page
|
||||
* containing the cleared entry is decremented, with unreferenced pages being
|
||||
* freed. The unmapping operation will stop early if it encounters either an
|
||||
* invalid page-table entry or a valid block mapping which maps beyond the range
|
||||
* being unmapped.
|
||||
*
|
||||
* Return: Number of bytes unmapped, which may be 0.
|
||||
*/
|
||||
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
|
||||
|
||||
/**
|
||||
* kvm_get_vtcr() - Helper to construct VTCR_EL2
|
||||
* @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
|
||||
|
@ -451,6 +451,69 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct hyp_unmap_data {
|
||||
u64 unmapped;
|
||||
struct kvm_pgtable_mm_ops *mm_ops;
|
||||
};
|
||||
|
||||
static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||
enum kvm_pgtable_walk_flags flag, void * const arg)
|
||||
{
|
||||
kvm_pte_t pte = *ptep, *childp = NULL;
|
||||
u64 granule = kvm_granule_size(level);
|
||||
struct hyp_unmap_data *data = arg;
|
||||
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
|
||||
|
||||
if (!kvm_pte_valid(pte))
|
||||
return -EINVAL;
|
||||
|
||||
if (kvm_pte_table(pte, level)) {
|
||||
childp = kvm_pte_follow(pte, mm_ops);
|
||||
|
||||
if (mm_ops->page_count(childp) != 1)
|
||||
return 0;
|
||||
|
||||
kvm_clear_pte(ptep);
|
||||
dsb(ishst);
|
||||
__tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level);
|
||||
} else {
|
||||
if (end - addr < granule)
|
||||
return -EINVAL;
|
||||
|
||||
kvm_clear_pte(ptep);
|
||||
dsb(ishst);
|
||||
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
|
||||
data->unmapped += granule;
|
||||
}
|
||||
|
||||
dsb(ish);
|
||||
isb();
|
||||
mm_ops->put_page(ptep);
|
||||
|
||||
if (childp)
|
||||
mm_ops->put_page(childp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
|
||||
{
|
||||
struct hyp_unmap_data unmap_data = {
|
||||
.mm_ops = pgt->mm_ops,
|
||||
};
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = hyp_unmap_walker,
|
||||
.arg = &unmap_data,
|
||||
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
|
||||
};
|
||||
|
||||
if (!pgt->mm_ops->page_count)
|
||||
return 0;
|
||||
|
||||
kvm_pgtable_walk(pgt, addr, size, &walker);
|
||||
return unmap_data.unmapped;
|
||||
}
|
||||
|
||||
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
|
||||
struct kvm_pgtable_mm_ops *mm_ops)
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user