KVM: x86/pmu: Add kvm_pmu_cap to optimize perf_get_x86_pmu_capability
The information obtained from the interface perf_get_x86_pmu_capability() doesn't change, so an exported "struct x86_pmu_capability" is introduced for all guests in the KVM, and it's initialized before hardware_setup(). Signed-off-by: Like Xu <likexu@tencent.com> Message-Id: <20220411101946.20262-16-likexu@tencent.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
854250329c
commit
968635abd5
@ -868,7 +868,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
case 9:
|
||||
break;
|
||||
case 0xa: { /* Architectural Performance Monitoring */
|
||||
struct x86_pmu_capability cap;
|
||||
union cpuid10_eax eax;
|
||||
union cpuid10_edx edx;
|
||||
|
||||
@ -877,30 +876,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
break;
|
||||
}
|
||||
|
||||
perf_get_x86_pmu_capability(&cap);
|
||||
eax.split.version_id = kvm_pmu_cap.version;
|
||||
eax.split.num_counters = kvm_pmu_cap.num_counters_gp;
|
||||
eax.split.bit_width = kvm_pmu_cap.bit_width_gp;
|
||||
eax.split.mask_length = kvm_pmu_cap.events_mask_len;
|
||||
edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed;
|
||||
edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed;
|
||||
|
||||
/*
|
||||
* The guest architecture pmu is only supported if the architecture
|
||||
* pmu exists on the host and the module parameters allow it.
|
||||
*/
|
||||
if (!cap.version || !enable_pmu)
|
||||
memset(&cap, 0, sizeof(cap));
|
||||
|
||||
eax.split.version_id = min(cap.version, 2);
|
||||
eax.split.num_counters = cap.num_counters_gp;
|
||||
eax.split.bit_width = cap.bit_width_gp;
|
||||
eax.split.mask_length = cap.events_mask_len;
|
||||
|
||||
edx.split.num_counters_fixed =
|
||||
min(cap.num_counters_fixed, KVM_PMC_MAX_FIXED);
|
||||
edx.split.bit_width_fixed = cap.bit_width_fixed;
|
||||
if (cap.version)
|
||||
if (kvm_pmu_cap.version)
|
||||
edx.split.anythread_deprecated = 1;
|
||||
edx.split.reserved1 = 0;
|
||||
edx.split.reserved2 = 0;
|
||||
|
||||
entry->eax = eax.full;
|
||||
entry->ebx = cap.events_mask;
|
||||
entry->ebx = kvm_pmu_cap.events_mask;
|
||||
entry->ecx = 0;
|
||||
entry->edx = edx.full;
|
||||
break;
|
||||
|
@ -24,6 +24,9 @@
|
||||
/* This is enough to filter the vast majority of currently defined events. */
|
||||
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
|
||||
|
||||
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
|
||||
EXPORT_SYMBOL_GPL(kvm_pmu_cap);
|
||||
|
||||
/* NOTE:
|
||||
* - Each perf counter is defined as "struct kvm_pmc";
|
||||
* - There are two types of perf counters: general purpose (gp) and fixed.
|
||||
|
@ -163,6 +163,24 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
|
||||
return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||
}
|
||||
|
||||
extern struct x86_pmu_capability kvm_pmu_cap;
|
||||
|
||||
static inline void kvm_init_pmu_capability(void)
|
||||
{
|
||||
perf_get_x86_pmu_capability(&kvm_pmu_cap);
|
||||
|
||||
/*
|
||||
* Only support guest architectural pmu on
|
||||
* a host with architectural pmu.
|
||||
*/
|
||||
if (!kvm_pmu_cap.version)
|
||||
memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
|
||||
|
||||
kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
|
||||
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
|
||||
KVM_PMC_MAX_FIXED);
|
||||
}
|
||||
|
||||
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
|
||||
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
|
||||
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
|
||||
@ -181,6 +199,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
|
||||
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
|
||||
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
|
||||
void kvm_init_pmu_capability(void);
|
||||
|
||||
bool is_vmware_backdoor_pmc(u32 pmc_idx);
|
||||
|
||||
|
@ -515,8 +515,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
||||
|
||||
struct x86_pmu_capability x86_pmu;
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
union cpuid10_eax eax;
|
||||
union cpuid10_edx edx;
|
||||
@ -544,13 +542,14 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
return;
|
||||
|
||||
vcpu->arch.ia32_misc_enable_msr |= MSR_IA32_MISC_ENABLE_EMON;
|
||||
perf_get_x86_pmu_capability(&x86_pmu);
|
||||
|
||||
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
|
||||
x86_pmu.num_counters_gp);
|
||||
eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
|
||||
kvm_pmu_cap.num_counters_gp);
|
||||
eax.split.bit_width = min_t(int, eax.split.bit_width,
|
||||
kvm_pmu_cap.bit_width_gp);
|
||||
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
|
||||
eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
|
||||
eax.split.mask_length = min_t(int, eax.split.mask_length,
|
||||
kvm_pmu_cap.events_mask_len);
|
||||
pmu->available_event_types = ~entry->ebx &
|
||||
((1ull << eax.split.mask_length) - 1);
|
||||
|
||||
@ -560,9 +559,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
pmu->nr_arch_fixed_counters =
|
||||
min3(ARRAY_SIZE(fixed_pmc_events),
|
||||
(size_t) edx.split.num_counters_fixed,
|
||||
(size_t) x86_pmu.num_counters_fixed);
|
||||
edx.split.bit_width_fixed = min_t(int,
|
||||
edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
|
||||
(size_t)kvm_pmu_cap.num_counters_fixed);
|
||||
edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
|
||||
kvm_pmu_cap.bit_width_fixed);
|
||||
pmu->counter_bitmask[KVM_PMC_FIXED] =
|
||||
((u64)1 << edx.split.bit_width_fixed) - 1;
|
||||
setup_fixed_pmc_eventsel(pmu);
|
||||
|
@ -6667,15 +6667,12 @@ out:
|
||||
|
||||
static void kvm_init_msr_list(void)
|
||||
{
|
||||
struct x86_pmu_capability x86_pmu;
|
||||
u32 dummy[2];
|
||||
unsigned i;
|
||||
|
||||
BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3,
|
||||
"Please update the fixed PMCs in msrs_to_saved_all[]");
|
||||
|
||||
perf_get_x86_pmu_capability(&x86_pmu);
|
||||
|
||||
num_msrs_to_save = 0;
|
||||
num_emulated_msrs = 0;
|
||||
num_msr_based_features = 0;
|
||||
@ -6727,12 +6724,12 @@ static void kvm_init_msr_list(void)
|
||||
break;
|
||||
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
|
||||
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
|
||||
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
|
||||
min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
continue;
|
||||
break;
|
||||
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
|
||||
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
|
||||
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
|
||||
min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
continue;
|
||||
break;
|
||||
case MSR_IA32_XFD:
|
||||
@ -11721,6 +11718,8 @@ int kvm_arch_hardware_setup(void *opaque)
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||
rdmsrl(MSR_IA32_XSS, host_xss);
|
||||
|
||||
kvm_init_pmu_capability();
|
||||
|
||||
r = ops->hardware_setup();
|
||||
if (r != 0)
|
||||
return r;
|
||||
|
Loading…
Reference in New Issue
Block a user