KVM: arm64: pkvm: Refcount the pages shared with EL2
In order to simplify the page tracking infrastructure at EL2 in nVHE protected mode, move the responsibility of refcounting pages that are shared multiple times on the host. In order to do so, let's create a red-black tree tracking all the PFNs that have been shared, along with a refcount. Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211215161232.1480836-8-qperret@google.com
This commit is contained in:
parent
3f868e142c
commit
a83e2191b7
@ -281,23 +281,72 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
|
||||
}
|
||||
}
|
||||
|
||||
static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t addr;
|
||||
int ret;
|
||||
struct hyp_shared_pfn {
|
||||
u64 pfn;
|
||||
int count;
|
||||
struct rb_node node;
|
||||
};
|
||||
|
||||
for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
|
||||
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
|
||||
__phys_to_pfn(addr));
|
||||
if (ret)
|
||||
return ret;
|
||||
static DEFINE_MUTEX(hyp_shared_pfns_lock);
|
||||
static struct rb_root hyp_shared_pfns = RB_ROOT;
|
||||
|
||||
static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
|
||||
struct rb_node **parent)
|
||||
{
|
||||
struct hyp_shared_pfn *this;
|
||||
|
||||
*node = &hyp_shared_pfns.rb_node;
|
||||
*parent = NULL;
|
||||
while (**node) {
|
||||
this = container_of(**node, struct hyp_shared_pfn, node);
|
||||
*parent = **node;
|
||||
if (this->pfn < pfn)
|
||||
*node = &((**node)->rb_left);
|
||||
else if (this->pfn > pfn)
|
||||
*node = &((**node)->rb_right);
|
||||
else
|
||||
return this;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int share_pfn_hyp(u64 pfn)
|
||||
{
|
||||
struct rb_node **node, *parent;
|
||||
struct hyp_shared_pfn *this;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&hyp_shared_pfns_lock);
|
||||
this = find_shared_pfn(pfn, &node, &parent);
|
||||
if (this) {
|
||||
this->count++;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
this = kzalloc(sizeof(*this), GFP_KERNEL);
|
||||
if (!this) {
|
||||
ret = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
this->pfn = pfn;
|
||||
this->count = 1;
|
||||
rb_link_node(&this->node, parent, node);
|
||||
rb_insert_color(&this->node, &hyp_shared_pfns);
|
||||
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
|
||||
unlock:
|
||||
mutex_unlock(&hyp_shared_pfns_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_share_hyp(void *from, void *to)
|
||||
{
|
||||
phys_addr_t start, end, cur;
|
||||
u64 pfn;
|
||||
int ret;
|
||||
|
||||
if (is_kernel_in_hyp_mode())
|
||||
return 0;
|
||||
|
||||
@ -312,7 +361,16 @@ int kvm_share_hyp(void *from, void *to)
|
||||
if (kvm_host_owns_hyp_mappings())
|
||||
return create_hyp_mappings(from, to, PAGE_HYP);
|
||||
|
||||
return pkvm_share_hyp(__pa(from), __pa(to));
|
||||
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
|
||||
end = PAGE_ALIGN(__pa(to));
|
||||
for (cur = start; cur < end; cur += PAGE_SIZE) {
|
||||
pfn = __phys_to_pfn(cur);
|
||||
ret = share_pfn_hyp(pfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user