KVM: MMU: introduce PT_MAX_HUGEPAGE_LEVEL
Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
committed by
Paolo Bonzini
parent
0d5367900a
commit
8a3d08f16f
@ -811,8 +811,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
slot = gfn_to_memslot(kvm, gfn);
|
slot = gfn_to_memslot(kvm, gfn);
|
||||||
for (i = PT_DIRECTORY_LEVEL;
|
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
||||||
linfo = lpage_info_slot(gfn, slot, i);
|
linfo = lpage_info_slot(gfn, slot, i);
|
||||||
linfo->write_count += 1;
|
linfo->write_count += 1;
|
||||||
}
|
}
|
||||||
@ -826,8 +825,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
slot = gfn_to_memslot(kvm, gfn);
|
slot = gfn_to_memslot(kvm, gfn);
|
||||||
for (i = PT_DIRECTORY_LEVEL;
|
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
||||||
linfo = lpage_info_slot(gfn, slot, i);
|
linfo = lpage_info_slot(gfn, slot, i);
|
||||||
linfo->write_count -= 1;
|
linfo->write_count -= 1;
|
||||||
WARN_ON(linfo->write_count < 0);
|
WARN_ON(linfo->write_count < 0);
|
||||||
@ -858,8 +856,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
|
|||||||
|
|
||||||
page_size = kvm_host_page_size(kvm, gfn);
|
page_size = kvm_host_page_size(kvm, gfn);
|
||||||
|
|
||||||
for (i = PT_PAGE_TABLE_LEVEL;
|
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||||
i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
|
|
||||||
if (page_size >= KVM_HPAGE_SIZE(i))
|
if (page_size >= KVM_HPAGE_SIZE(i))
|
||||||
ret = i;
|
ret = i;
|
||||||
else
|
else
|
||||||
@ -1344,8 +1341,7 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
|
|||||||
|
|
||||||
slot = gfn_to_memslot(kvm, gfn);
|
slot = gfn_to_memslot(kvm, gfn);
|
||||||
|
|
||||||
for (i = PT_PAGE_TABLE_LEVEL;
|
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
||||||
rmapp = __gfn_to_rmap(gfn, i, slot);
|
rmapp = __gfn_to_rmap(gfn, i, slot);
|
||||||
write_protected |= __rmap_write_protect(kvm, rmapp, true);
|
write_protected |= __rmap_write_protect(kvm, rmapp, true);
|
||||||
}
|
}
|
||||||
@ -1451,7 +1447,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|||||||
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
||||||
|
|
||||||
for (j = PT_PAGE_TABLE_LEVEL;
|
for (j = PT_PAGE_TABLE_LEVEL;
|
||||||
j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
|
j <= PT_MAX_HUGEPAGE_LEVEL; ++j) {
|
||||||
unsigned long idx, idx_end;
|
unsigned long idx, idx_end;
|
||||||
unsigned long *rmapp;
|
unsigned long *rmapp;
|
||||||
gfn_t gfn = gfn_start;
|
gfn_t gfn = gfn_start;
|
||||||
@ -4416,8 +4412,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
|||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
for (i = PT_PAGE_TABLE_LEVEL;
|
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
||||||
unsigned long *rmapp;
|
unsigned long *rmapp;
|
||||||
unsigned long last_index, index;
|
unsigned long last_index, index;
|
||||||
|
|
||||||
@ -4573,8 +4568,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
|
|||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
for (i = PT_PAGE_TABLE_LEVEL + 1; /* skip rmap for 4K page */
|
/* skip rmap for 4K page */
|
||||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
for (i = PT_PAGE_TABLE_LEVEL + 1; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||||
unsigned long *rmapp;
|
unsigned long *rmapp;
|
||||||
unsigned long last_index, index;
|
unsigned long last_index, index;
|
||||||
|
|
||||||
@ -4611,8 +4606,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
|
|||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
for (i = PT_PAGE_TABLE_LEVEL;
|
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
||||||
unsigned long *rmapp;
|
unsigned long *rmapp;
|
||||||
unsigned long last_index, index;
|
unsigned long last_index, index;
|
||||||
|
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
#define PT_PDPE_LEVEL 3
|
#define PT_PDPE_LEVEL 3
|
||||||
#define PT_DIRECTORY_LEVEL 2
|
#define PT_DIRECTORY_LEVEL 2
|
||||||
#define PT_PAGE_TABLE_LEVEL 1
|
#define PT_PAGE_TABLE_LEVEL 1
|
||||||
|
#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
|
||||||
|
|
||||||
static inline u64 rsvd_bits(int s, int e)
|
static inline u64 rsvd_bits(int s, int e)
|
||||||
{
|
{
|
||||||
|
Reference in New Issue
Block a user