Merge branch 'kvm-arm64/kill_oprofile_dependency' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
@ -50,13 +50,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
|
|||||||
|
|
||||||
int kvm_perf_init(void)
|
int kvm_perf_init(void)
|
||||||
{
|
{
|
||||||
/*
|
if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled())
|
||||||
* Check if HW_PERF_EVENTS are supported by checking the number of
|
|
||||||
* hardware performance counters. This could ensure the presence of
|
|
||||||
* a physical PMU and CONFIG_PERF_EVENT is selected.
|
|
||||||
*/
|
|
||||||
if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0
|
|
||||||
&& !is_protected_kvm_enabled())
|
|
||||||
static_branch_enable(&kvm_arm_pmu_available);
|
static_branch_enable(&kvm_arm_pmu_available);
|
||||||
|
|
||||||
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||||
|
@ -739,7 +739,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
|||||||
kvm_pmu_create_perf_event(vcpu, select_idx);
|
kvm_pmu_create_perf_event(vcpu, select_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_pmu_probe_pmuver(void)
|
int kvm_pmu_probe_pmuver(void)
|
||||||
{
|
{
|
||||||
struct perf_event_attr attr = { };
|
struct perf_event_attr attr = { };
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
|
@ -23,27 +23,6 @@
|
|||||||
#include <asm/sysinfo.h>
|
#include <asm/sysinfo.h>
|
||||||
#include <asm/unwind.h>
|
#include <asm/unwind.h>
|
||||||
|
|
||||||
const char *perf_pmu_name(void)
|
|
||||||
{
|
|
||||||
if (cpum_cf_avail() || cpum_sf_avail())
|
|
||||||
return "CPU-Measurement Facilities (CPU-MF)";
|
|
||||||
return "pmu";
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(perf_pmu_name);
|
|
||||||
|
|
||||||
int perf_num_counters(void)
|
|
||||||
{
|
|
||||||
int num = 0;
|
|
||||||
|
|
||||||
if (cpum_cf_avail())
|
|
||||||
num += PERF_CPUM_CF_MAX_CTR;
|
|
||||||
if (cpum_sf_avail())
|
|
||||||
num += PERF_CPUM_SF_MAX_CTR;
|
|
||||||
|
|
||||||
return num;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(perf_num_counters);
|
|
||||||
|
|
||||||
static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
|
static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
|
struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
|
||||||
|
@ -57,24 +57,6 @@ static inline int sh_pmu_initialized(void)
|
|||||||
return !!sh_pmu;
|
return !!sh_pmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *perf_pmu_name(void)
|
|
||||||
{
|
|
||||||
if (!sh_pmu)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return sh_pmu->name;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(perf_pmu_name);
|
|
||||||
|
|
||||||
int perf_num_counters(void)
|
|
||||||
{
|
|
||||||
if (!sh_pmu)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return sh_pmu->num_events;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(perf_num_counters);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release the PMU if this is the last perf_event.
|
* Release the PMU if this is the last perf_event.
|
||||||
*/
|
*/
|
||||||
|
@ -581,33 +581,6 @@ static const struct attribute_group armpmu_common_attr_group = {
|
|||||||
.attrs = armpmu_common_attrs,
|
.attrs = armpmu_common_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Set at runtime when we know what CPU type we are. */
|
|
||||||
static struct arm_pmu *__oprofile_cpu_pmu;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Despite the names, these two functions are CPU-specific and are used
|
|
||||||
* by the OProfile/perf code.
|
|
||||||
*/
|
|
||||||
const char *perf_pmu_name(void)
|
|
||||||
{
|
|
||||||
if (!__oprofile_cpu_pmu)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return __oprofile_cpu_pmu->name;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(perf_pmu_name);
|
|
||||||
|
|
||||||
int perf_num_counters(void)
|
|
||||||
{
|
|
||||||
int max_events = 0;
|
|
||||||
|
|
||||||
if (__oprofile_cpu_pmu != NULL)
|
|
||||||
max_events = __oprofile_cpu_pmu->num_events;
|
|
||||||
|
|
||||||
return max_events;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(perf_num_counters);
|
|
||||||
|
|
||||||
static int armpmu_count_irq_users(const int irq)
|
static int armpmu_count_irq_users(const int irq)
|
||||||
{
|
{
|
||||||
int cpu, count = 0;
|
int cpu, count = 0;
|
||||||
@ -979,9 +952,6 @@ int armpmu_register(struct arm_pmu *pmu)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out_destroy;
|
goto out_destroy;
|
||||||
|
|
||||||
if (!__oprofile_cpu_pmu)
|
|
||||||
__oprofile_cpu_pmu = pmu;
|
|
||||||
|
|
||||||
pr_info("enabled with %s PMU driver, %d counters available%s\n",
|
pr_info("enabled with %s PMU driver, %d counters available%s\n",
|
||||||
pmu->name, pmu->num_events,
|
pmu->name, pmu->num_events,
|
||||||
has_nmi ? ", using NMIs" : "");
|
has_nmi ? ", using NMIs" : "");
|
||||||
|
@ -61,6 +61,7 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
|
|||||||
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
|
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_device_attr *attr);
|
struct kvm_device_attr *attr);
|
||||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
|
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
|
||||||
|
int kvm_pmu_probe_pmuver(void);
|
||||||
#else
|
#else
|
||||||
struct kvm_pmu {
|
struct kvm_pmu {
|
||||||
};
|
};
|
||||||
@ -116,6 +117,9 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -951,8 +951,6 @@ extern void perf_event_itrace_started(struct perf_event *event);
|
|||||||
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
|
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
|
||||||
extern void perf_pmu_unregister(struct pmu *pmu);
|
extern void perf_pmu_unregister(struct pmu *pmu);
|
||||||
|
|
||||||
extern int perf_num_counters(void);
|
|
||||||
extern const char *perf_pmu_name(void);
|
|
||||||
extern void __perf_event_task_sched_in(struct task_struct *prev,
|
extern void __perf_event_task_sched_in(struct task_struct *prev,
|
||||||
struct task_struct *task);
|
struct task_struct *task);
|
||||||
extern void __perf_event_task_sched_out(struct task_struct *prev,
|
extern void __perf_event_task_sched_out(struct task_struct *prev,
|
||||||
|
@ -580,11 +580,6 @@ static u64 perf_event_time(struct perf_event *event);
|
|||||||
|
|
||||||
void __weak perf_event_print_debug(void) { }
|
void __weak perf_event_print_debug(void) { }
|
||||||
|
|
||||||
extern __weak const char *perf_pmu_name(void)
|
|
||||||
{
|
|
||||||
return "pmu";
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 perf_clock(void)
|
static inline u64 perf_clock(void)
|
||||||
{
|
{
|
||||||
return local_clock();
|
return local_clock();
|
||||||
|
Reference in New Issue
Block a user