KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in
Change kvm_pmu_get_msr() to get the msr_data struct, as the host_initiated field from the struct could be used by get_msr. This also makes this API consistent with kvm_pmu_set_msr. No functional changes. Signed-off-by: Wei Wang <wei.w.wang@intel.com> Message-Id: <20200529074347.124619-2-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
72de5fa4c1
commit
cbd717585b
@ -397,9 +397,9 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
|
|||||||
__set_bit(pmc->idx, pmu->pmc_in_use);
|
__set_bit(pmc->idx, pmu->pmc_in_use);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
|
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
{
|
{
|
||||||
return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr, data);
|
return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
|
@ -32,7 +32,7 @@ struct kvm_pmu_ops {
|
|||||||
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
|
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
|
||||||
int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
|
int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||||
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
|
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
|
||||||
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
|
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||||
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||||
void (*refresh)(struct kvm_vcpu *vcpu);
|
void (*refresh)(struct kvm_vcpu *vcpu);
|
||||||
void (*init)(struct kvm_vcpu *vcpu);
|
void (*init)(struct kvm_vcpu *vcpu);
|
||||||
@ -147,7 +147,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
|
|||||||
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
|
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
|
||||||
int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
|
int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||||
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
|
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
|
||||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
|
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||||
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
|
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
|
||||||
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
|
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
|
||||||
|
@ -215,21 +215,22 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
|
|||||||
return pmc;
|
return pmc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
|
static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
{
|
{
|
||||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||||
struct kvm_pmc *pmc;
|
struct kvm_pmc *pmc;
|
||||||
|
u32 msr = msr_info->index;
|
||||||
|
|
||||||
/* MSR_PERFCTRn */
|
/* MSR_PERFCTRn */
|
||||||
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
|
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
|
||||||
if (pmc) {
|
if (pmc) {
|
||||||
*data = pmc_read_counter(pmc);
|
msr_info->data = pmc_read_counter(pmc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* MSR_EVNTSELn */
|
/* MSR_EVNTSELn */
|
||||||
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
|
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
|
||||||
if (pmc) {
|
if (pmc) {
|
||||||
*data = pmc->eventsel;
|
msr_info->data = pmc->eventsel;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,35 +184,38 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
|
|||||||
return pmc;
|
return pmc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
|
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
{
|
{
|
||||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||||
struct kvm_pmc *pmc;
|
struct kvm_pmc *pmc;
|
||||||
|
u32 msr = msr_info->index;
|
||||||
|
|
||||||
switch (msr) {
|
switch (msr) {
|
||||||
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
||||||
*data = pmu->fixed_ctr_ctrl;
|
msr_info->data = pmu->fixed_ctr_ctrl;
|
||||||
return 0;
|
return 0;
|
||||||
case MSR_CORE_PERF_GLOBAL_STATUS:
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
||||||
*data = pmu->global_status;
|
msr_info->data = pmu->global_status;
|
||||||
return 0;
|
return 0;
|
||||||
case MSR_CORE_PERF_GLOBAL_CTRL:
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
||||||
*data = pmu->global_ctrl;
|
msr_info->data = pmu->global_ctrl;
|
||||||
return 0;
|
return 0;
|
||||||
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
||||||
*data = pmu->global_ovf_ctrl;
|
msr_info->data = pmu->global_ovf_ctrl;
|
||||||
return 0;
|
return 0;
|
||||||
default:
|
default:
|
||||||
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
|
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
|
||||||
u64 val = pmc_read_counter(pmc);
|
u64 val = pmc_read_counter(pmc);
|
||||||
*data = val & pmu->counter_bitmask[KVM_PMC_GP];
|
msr_info->data =
|
||||||
|
val & pmu->counter_bitmask[KVM_PMC_GP];
|
||||||
return 0;
|
return 0;
|
||||||
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
||||||
u64 val = pmc_read_counter(pmc);
|
u64 val = pmc_read_counter(pmc);
|
||||||
*data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
|
msr_info->data =
|
||||||
|
val & pmu->counter_bitmask[KVM_PMC_FIXED];
|
||||||
return 0;
|
return 0;
|
||||||
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
||||||
*data = pmc->eventsel;
|
msr_info->data = pmc->eventsel;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3148,7 +3148,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
|
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
|
||||||
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
|
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
|
||||||
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
|
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
|
||||||
return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
|
return kvm_pmu_get_msr(vcpu, msr_info);
|
||||||
msr_info->data = 0;
|
msr_info->data = 0;
|
||||||
break;
|
break;
|
||||||
case MSR_IA32_UCODE_REV:
|
case MSR_IA32_UCODE_REV:
|
||||||
@ -3316,7 +3316,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
|
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
|
||||||
return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
|
return kvm_pmu_get_msr(vcpu, msr_info);
|
||||||
if (!ignore_msrs) {
|
if (!ignore_msrs) {
|
||||||
vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
|
vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
|
||||||
msr_info->index);
|
msr_info->index);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user