Changes for this cycle were:
- Fix address filtering for Intel/PT,ARM/CoreSight - Enable Intel/PEBS format 5 - Allow more fixed-function counters for x86 - Intel/PT: Enable not recording Taken-Not-Taken packets - Add a few branch-types Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmI4WdIRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1jdTA/7BADTYzFCbdwPzHt2mR8osv7k+pDvYxs9 wxNjyi1X7N8cPkhqgIg9CfdhdyDOqo7+J4fG17f2qbwjNK7b2Fb1/U6ZoZaf+f8F W0e2LX5KZTXUhkA+TEjrXvYD9FmJaCPM/l2RQg8U7okBs2kb0H6QT2Yn21wd1roC WwI5KFiWSVS1IzpVLaXjDh+FJfJHd75ReMqJeus+QoVQ9NHeuI+t4DglSB1IBi54 d/zeVXE/Y4dFTQOrU06S2HxcOEptvXZsPmVLvKab/veeGGyWiGPxQpvu6bXm6u3x 0sV+dn67zut2m2pQlUZUucgGTSYIZTpOe+rNukTB9hJ4XeN4/1ohOOCrOuYM+63P lGFbN1v+LD7Wc6C2eEhw8G5GEL0qbwzFNQ06O3EOFi7C7GKn7WS/ET6XuuMOERFk uxEPb4pFtbBlJ0SriCprFJSd5NL3PORZlLIhv4hGH5hilLR1TFeKDuwZaM4noQxU dL3rKGLi9H+P46Eni9H28+0gDISbv1xL+WivHOFQNmhBqAZO52ZcF3J+dgBaR1B5 pBxVTycFpZMjxSZnqTE0gMsFaLIpVGc+75Chns1rajR0mEtRtJUQUbYz4tK4zb0E dZR1p+VF6+DYmSRhiqeaTi9uz9oE8kMa8o/EcbFIg/9BgEnUwJXU20bjnar30xQ7 9OIn7r9hjHI= =XPuo -----END PGP SIGNATURE----- Merge tag 'perf-core-2022-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 perf event updates from Ingo Molnar: - Fix address filtering for Intel/PT,ARM/CoreSight - Enable Intel/PEBS format 5 - Allow more fixed-function counters for x86 - Intel/PT: Enable not recording Taken-Not-Taken packets - Add a few branch-types * tag 'perf-core-2022-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel/uncore: Fix the build on !CONFIG_PHYS_ADDR_T_64BIT perf: Add irq and exception return branch types perf/x86/intel/uncore: Make uncore_discovery clean for 64 bit addresses perf/x86/intel/pt: Add a capability and config bit for disabling TNTs perf/x86/intel/pt: Add a capability and config bit for event tracing perf/x86/intel: Increase max number of the fixed counters KVM: x86: use the KVM side max supported fixed counter perf/x86/intel: Enable PEBS format 5 perf/core: Allow kernel address filter when not filtering the kernel perf/x86/intel/pt: Fix address filter config for 32-bit kernel perf/core: Fix address filter parser for multiple filters x86: Share definition of __is_canonical_address() perf/x86/intel/pt: Relax address filter validation
This commit is contained in:
commit
95ab0e8768
@ -181,6 +181,27 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
|
||||
FIXED_EVENT_CONSTRAINT(0x0500, 4),
|
||||
FIXED_EVENT_CONSTRAINT(0x0600, 5),
|
||||
FIXED_EVENT_CONSTRAINT(0x0700, 6),
|
||||
FIXED_EVENT_CONSTRAINT(0x0800, 7),
|
||||
FIXED_EVENT_CONSTRAINT(0x0900, 8),
|
||||
FIXED_EVENT_CONSTRAINT(0x0a00, 9),
|
||||
FIXED_EVENT_CONSTRAINT(0x0b00, 10),
|
||||
FIXED_EVENT_CONSTRAINT(0x0c00, 11),
|
||||
FIXED_EVENT_CONSTRAINT(0x0d00, 12),
|
||||
FIXED_EVENT_CONSTRAINT(0x0e00, 13),
|
||||
FIXED_EVENT_CONSTRAINT(0x0f00, 14),
|
||||
FIXED_EVENT_CONSTRAINT(0x1000, 15),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_slm_event_constraints[] __read_mostly =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
@ -6308,7 +6329,9 @@ __init int intel_pmu_init(void)
|
||||
pr_cont("generic architected perfmon v1, ");
|
||||
name = "generic_arch_v1";
|
||||
break;
|
||||
default:
|
||||
case 2:
|
||||
case 3:
|
||||
case 4:
|
||||
/*
|
||||
* default constraints for v2 and up
|
||||
*/
|
||||
@ -6316,6 +6339,21 @@ __init int intel_pmu_init(void)
|
||||
pr_cont("generic architected perfmon, ");
|
||||
name = "generic_arch_v2+";
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* The default constraints for v5 and up can support up to
|
||||
* 16 fixed counters. For the fixed counters 4 and later,
|
||||
* the pseudo-encoding is applied.
|
||||
* The constraints may be cut according to the CPUID enumeration
|
||||
* by inserting the EVENT_CONSTRAINT_END.
|
||||
*/
|
||||
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED)
|
||||
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
|
||||
intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1;
|
||||
x86_pmu.event_constraints = intel_v5_gen_event_constraints;
|
||||
pr_cont("generic architected perfmon, ");
|
||||
name = "generic_arch_v5+";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1203,7 +1203,10 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
|
||||
if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
|
||||
base = MSR_RELOAD_FIXED_CTR0;
|
||||
idx = hwc->idx - INTEL_PMC_IDX_FIXED;
|
||||
value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
|
||||
if (x86_pmu.intel_cap.pebs_format < 5)
|
||||
value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx];
|
||||
else
|
||||
value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
|
||||
}
|
||||
wrmsrl(base + idx, value);
|
||||
}
|
||||
@ -1232,8 +1235,12 @@ void intel_pmu_pebs_enable(struct perf_event *event)
|
||||
}
|
||||
}
|
||||
|
||||
if (idx >= INTEL_PMC_IDX_FIXED)
|
||||
idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
|
||||
if (idx >= INTEL_PMC_IDX_FIXED) {
|
||||
if (x86_pmu.intel_cap.pebs_format < 5)
|
||||
idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED);
|
||||
else
|
||||
idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use auto-reload if possible to save a MSR write in the PMI.
|
||||
@ -2204,6 +2211,7 @@ void __init intel_ds_init(void)
|
||||
break;
|
||||
|
||||
case 4:
|
||||
case 5:
|
||||
x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
|
||||
x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
|
||||
if (x86_pmu.intel_cap.pebs_baseline) {
|
||||
|
@ -1329,10 +1329,10 @@ static int branch_map[X86_BR_TYPE_MAP_MAX] = {
|
||||
PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
|
||||
PERF_BR_SYSRET, /* X86_BR_SYSRET */
|
||||
PERF_BR_UNKNOWN, /* X86_BR_INT */
|
||||
PERF_BR_UNKNOWN, /* X86_BR_IRET */
|
||||
PERF_BR_ERET, /* X86_BR_IRET */
|
||||
PERF_BR_COND, /* X86_BR_JCC */
|
||||
PERF_BR_UNCOND, /* X86_BR_JMP */
|
||||
PERF_BR_UNKNOWN, /* X86_BR_IRQ */
|
||||
PERF_BR_IRQ, /* X86_BR_IRQ */
|
||||
PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
|
||||
PERF_BR_UNKNOWN, /* X86_BR_ABORT */
|
||||
PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
|
||||
|
@ -13,6 +13,8 @@
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
@ -57,6 +59,8 @@ static struct pt_cap_desc {
|
||||
PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
|
||||
PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
|
||||
PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
|
||||
PT_CAP(event_trace, 0, CPUID_EBX, BIT(7)),
|
||||
PT_CAP(tnt_disable, 0, CPUID_EBX, BIT(8)),
|
||||
PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
|
||||
PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
|
||||
PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
|
||||
@ -108,6 +112,8 @@ PMU_FORMAT_ATTR(tsc, "config:10" );
|
||||
PMU_FORMAT_ATTR(noretcomp, "config:11" );
|
||||
PMU_FORMAT_ATTR(ptw, "config:12" );
|
||||
PMU_FORMAT_ATTR(branch, "config:13" );
|
||||
PMU_FORMAT_ATTR(event, "config:31" );
|
||||
PMU_FORMAT_ATTR(notnt, "config:55" );
|
||||
PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
|
||||
PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
|
||||
PMU_FORMAT_ATTR(psb_period, "config:24-27" );
|
||||
@ -116,6 +122,8 @@ static struct attribute *pt_formats_attr[] = {
|
||||
&format_attr_pt.attr,
|
||||
&format_attr_cyc.attr,
|
||||
&format_attr_pwr_evt.attr,
|
||||
&format_attr_event.attr,
|
||||
&format_attr_notnt.attr,
|
||||
&format_attr_fup_on_ptw.attr,
|
||||
&format_attr_mtc.attr,
|
||||
&format_attr_tsc.attr,
|
||||
@ -296,6 +304,8 @@ fail:
|
||||
RTIT_CTL_CYC_PSB | \
|
||||
RTIT_CTL_MTC | \
|
||||
RTIT_CTL_PWR_EVT_EN | \
|
||||
RTIT_CTL_EVENT_EN | \
|
||||
RTIT_CTL_NOTNT | \
|
||||
RTIT_CTL_FUP_ON_PTW | \
|
||||
RTIT_CTL_PTW_EN)
|
||||
|
||||
@ -350,6 +360,14 @@ static bool pt_event_valid(struct perf_event *event)
|
||||
!intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
|
||||
return false;
|
||||
|
||||
if (config & RTIT_CTL_EVENT_EN &&
|
||||
!intel_pt_validate_hw_cap(PT_CAP_event_trace))
|
||||
return false;
|
||||
|
||||
if (config & RTIT_CTL_NOTNT &&
|
||||
!intel_pt_validate_hw_cap(PT_CAP_tnt_disable))
|
||||
return false;
|
||||
|
||||
if (config & RTIT_CTL_PTW) {
|
||||
if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
|
||||
return false;
|
||||
@ -472,7 +490,7 @@ static u64 pt_config_filters(struct perf_event *event)
|
||||
pt->filters.filter[range].msr_b = filter->msr_b;
|
||||
}
|
||||
|
||||
rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
|
||||
rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off;
|
||||
}
|
||||
|
||||
return rtit_ctl;
|
||||
@ -1348,11 +1366,27 @@ static void pt_addr_filters_fini(struct perf_event *event)
|
||||
event->hw.addr_filters = NULL;
|
||||
}
|
||||
|
||||
static inline bool valid_kernel_ip(unsigned long ip)
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Clamp to a canonical address greater-than-or-equal-to the address given */
|
||||
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return virt_addr_valid(ip) && kernel_ip(ip);
|
||||
return __is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
-BIT_ULL(vaddr_bits - 1);
|
||||
}
|
||||
|
||||
/* Clamp to a canonical address less-than-or-equal-to the address given */
|
||||
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return __is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
BIT_ULL(vaddr_bits - 1) - 1;
|
||||
}
|
||||
#else
|
||||
#define clamp_to_ge_canonical_addr(x, y) (x)
|
||||
#define clamp_to_le_canonical_addr(x, y) (x)
|
||||
#endif
|
||||
|
||||
static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||
{
|
||||
struct perf_addr_filter *filter;
|
||||
@ -1367,14 +1401,6 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||
filter->action == PERF_ADDR_FILTER_ACTION_START)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!filter->path.dentry) {
|
||||
if (!valid_kernel_ip(filter->offset))
|
||||
return -EINVAL;
|
||||
|
||||
if (!valid_kernel_ip(filter->offset + filter->size))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -1398,9 +1424,26 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
|
||||
if (filter->path.dentry && !fr[range].start) {
|
||||
msr_a = msr_b = 0;
|
||||
} else {
|
||||
/* apply the offset */
|
||||
msr_a = fr[range].start;
|
||||
msr_b = msr_a + fr[range].size - 1;
|
||||
unsigned long n = fr[range].size - 1;
|
||||
unsigned long a = fr[range].start;
|
||||
unsigned long b;
|
||||
|
||||
if (a > ULONG_MAX - n)
|
||||
b = ULONG_MAX;
|
||||
else
|
||||
b = a + n;
|
||||
/*
|
||||
* Apply the offset. 64-bit addresses written to the
|
||||
* MSRs must be canonical, but the range can encompass
|
||||
* non-canonical addresses. Since software cannot
|
||||
* execute at non-canonical addresses, adjusting to
|
||||
* canonical addresses does not affect the result of the
|
||||
* address filter.
|
||||
*/
|
||||
msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
|
||||
msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
|
||||
if (msr_b < msr_a)
|
||||
msr_a = msr_b = 0;
|
||||
}
|
||||
|
||||
filters->filter[range].msr_a = msr_a;
|
||||
|
@ -215,10 +215,18 @@ static int parse_discovery_table(struct pci_dev *dev, int die,
|
||||
|
||||
pci_read_config_dword(dev, bar_offset, &val);
|
||||
|
||||
if (val & UNCORE_DISCOVERY_MASK)
|
||||
if (val & ~PCI_BASE_ADDRESS_MEM_MASK & ~PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
return -EINVAL;
|
||||
|
||||
addr = (resource_size_t)(val & ~UNCORE_DISCOVERY_MASK);
|
||||
addr = (resource_size_t)(val & PCI_BASE_ADDRESS_MEM_MASK);
|
||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
||||
if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
|
||||
u32 val2;
|
||||
|
||||
pci_read_config_dword(dev, bar_offset + 4, &val2);
|
||||
addr |= ((resource_size_t)val2) << 32;
|
||||
}
|
||||
#endif
|
||||
size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
|
||||
io_addr = ioremap(addr, size);
|
||||
if (!io_addr)
|
||||
@ -444,7 +452,7 @@ static struct intel_uncore_ops generic_uncore_pci_ops = {
|
||||
|
||||
#define UNCORE_GENERIC_MMIO_SIZE 0x4000
|
||||
|
||||
static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
|
||||
static u64 generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
|
||||
{
|
||||
struct intel_uncore_type *type = box->pmu->type;
|
||||
|
||||
@ -456,7 +464,7 @@ static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
|
||||
|
||||
void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
|
||||
{
|
||||
unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
|
||||
u64 box_ctl = generic_uncore_mmio_box_ctl(box);
|
||||
struct intel_uncore_type *type = box->pmu->type;
|
||||
resource_size_t addr;
|
||||
|
||||
|
@ -18,8 +18,6 @@
|
||||
#define UNCORE_DISCOVERY_BIR_BASE 0x10
|
||||
/* Discovery table BAR step */
|
||||
#define UNCORE_DISCOVERY_BIR_STEP 0x4
|
||||
/* Mask of the discovery table offset */
|
||||
#define UNCORE_DISCOVERY_MASK 0xf
|
||||
/* Global discovery table size */
|
||||
#define UNCORE_DISCOVERY_GLOBAL_MAP_SIZE 0x20
|
||||
|
||||
|
@ -7,8 +7,9 @@
|
||||
#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
|
||||
|
||||
/* The maximal number of PEBS events: */
|
||||
#define MAX_PEBS_EVENTS 8
|
||||
#define MAX_FIXED_PEBS_EVENTS 4
|
||||
#define MAX_PEBS_EVENTS_FMT4 8
|
||||
#define MAX_PEBS_EVENTS 32
|
||||
#define MAX_FIXED_PEBS_EVENTS 16
|
||||
|
||||
/*
|
||||
* A debug store configuration.
|
||||
|
@ -13,6 +13,8 @@ enum pt_capabilities {
|
||||
PT_CAP_mtc,
|
||||
PT_CAP_ptwrite,
|
||||
PT_CAP_power_event_trace,
|
||||
PT_CAP_event_trace,
|
||||
PT_CAP_tnt_disable,
|
||||
PT_CAP_topa_output,
|
||||
PT_CAP_topa_multiple_entries,
|
||||
PT_CAP_single_range_output,
|
||||
|
@ -498,6 +498,7 @@ struct kvm_pmc {
|
||||
bool intr;
|
||||
};
|
||||
|
||||
#define KVM_PMC_MAX_FIXED 3
|
||||
struct kvm_pmu {
|
||||
unsigned nr_arch_gp_counters;
|
||||
unsigned nr_arch_fixed_counters;
|
||||
@ -511,7 +512,7 @@ struct kvm_pmu {
|
||||
u64 reserved_bits;
|
||||
u8 version;
|
||||
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
|
||||
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
|
||||
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
|
||||
struct irq_work irq_work;
|
||||
DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
|
||||
DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
|
||||
|
@ -205,6 +205,8 @@
|
||||
#define RTIT_CTL_DISRETC BIT(11)
|
||||
#define RTIT_CTL_PTW_EN BIT(12)
|
||||
#define RTIT_CTL_BRANCH_EN BIT(13)
|
||||
#define RTIT_CTL_EVENT_EN BIT(31)
|
||||
#define RTIT_CTL_NOTNT BIT_ULL(55)
|
||||
#define RTIT_CTL_MTC_RANGE_OFFSET 14
|
||||
#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
|
||||
#define RTIT_CTL_CYC_THRESH_OFFSET 19
|
||||
|
@ -71,6 +71,16 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
extern bool __virt_addr_valid(unsigned long kaddr);
|
||||
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
|
||||
|
||||
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return __canonical_address(vaddr, vaddr_bits) == vaddr;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
|
@ -7,7 +7,7 @@
|
||||
*/
|
||||
|
||||
#define INTEL_PMC_MAX_GENERIC 32
|
||||
#define INTEL_PMC_MAX_FIXED 4
|
||||
#define INTEL_PMC_MAX_FIXED 16
|
||||
#define INTEL_PMC_IDX_FIXED 32
|
||||
|
||||
#define X86_PMC_IDX_MAX 64
|
||||
|
@ -879,7 +879,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
eax.split.bit_width = cap.bit_width_gp;
|
||||
eax.split.mask_length = cap.events_mask_len;
|
||||
|
||||
edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
|
||||
edx.split.num_counters_fixed =
|
||||
min(cap.num_counters_fixed, KVM_PMC_MAX_FIXED);
|
||||
edx.split.bit_width_fixed = cap.bit_width_fixed;
|
||||
if (cap.version)
|
||||
edx.split.anythread_deprecated = 1;
|
||||
|
@ -680,7 +680,7 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
|
||||
static inline bool emul_is_noncanonical_address(u64 la,
|
||||
struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
|
||||
return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -730,7 +730,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
|
||||
case X86EMUL_MODE_PROT64:
|
||||
*linear = la;
|
||||
va_bits = ctxt_virt_addr_bits(ctxt);
|
||||
if (get_canonical(la, va_bits) != la)
|
||||
if (!__is_canonical_address(la, va_bits))
|
||||
goto bad;
|
||||
|
||||
*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
|
||||
|
@ -15,8 +15,6 @@
|
||||
#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
|
||||
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
|
||||
|
||||
#define MAX_FIXED_COUNTERS 3
|
||||
|
||||
struct kvm_event_hw_type_mapping {
|
||||
u8 eventsel;
|
||||
u8 unit_mask;
|
||||
|
@ -565,7 +565,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
||||
pmu->gp_counters[i].current_config = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
|
||||
for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
|
||||
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
|
||||
pmu->fixed_counters[i].vcpu = vcpu;
|
||||
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
|
||||
@ -591,7 +591,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
pmc->counter = pmc->eventsel = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
|
||||
for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
|
||||
pmc = &pmu->fixed_counters[i];
|
||||
|
||||
pmc_stop_counter(pmc);
|
||||
|
@ -1749,7 +1749,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
|
||||
* value, and that something deterministic happens if the guest
|
||||
* invokes 64-bit SYSENTER.
|
||||
*/
|
||||
data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
|
||||
data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
|
||||
@ -6529,7 +6529,7 @@ static void kvm_init_msr_list(void)
|
||||
u32 dummy[2];
|
||||
unsigned i;
|
||||
|
||||
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
|
||||
BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3,
|
||||
"Please update the fixed PMCs in msrs_to_saved_all[]");
|
||||
|
||||
perf_get_x86_pmu_capability(&x86_pmu);
|
||||
|
@ -166,14 +166,9 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
|
||||
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
|
||||
}
|
||||
|
||||
static inline u64 get_canonical(u64 la, u8 vaddr_bits)
|
||||
{
|
||||
return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
|
||||
return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
|
||||
}
|
||||
|
||||
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
||||
|
@ -4,11 +4,6 @@
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)unsafe_src;
|
||||
@ -19,7 +14,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
||||
* we also need to include the userspace guard page.
|
||||
*/
|
||||
return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
|
||||
canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr;
|
||||
__is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
|
||||
}
|
||||
#else
|
||||
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
||||
|
@ -251,6 +251,8 @@ enum {
|
||||
PERF_BR_SYSRET = 8, /* syscall return */
|
||||
PERF_BR_COND_CALL = 9, /* conditional function call */
|
||||
PERF_BR_COND_RET = 10, /* conditional function return */
|
||||
PERF_BR_ERET = 11, /* exception return */
|
||||
PERF_BR_IRQ = 12, /* irq */
|
||||
PERF_BR_MAX,
|
||||
};
|
||||
|
||||
|
@ -10531,8 +10531,6 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
|
||||
*/
|
||||
if (state == IF_STATE_END) {
|
||||
ret = -EINVAL;
|
||||
if (kernel && event->attr.exclude_kernel)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* ACTION "filter" must have a non-zero length region
|
||||
@ -10574,8 +10572,11 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
|
||||
}
|
||||
|
||||
/* ready to consume more filters */
|
||||
kfree(filename);
|
||||
filename = NULL;
|
||||
state = IF_STATE_ACTION;
|
||||
filter = NULL;
|
||||
kernel = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,6 +251,8 @@ enum {
|
||||
PERF_BR_SYSRET = 8, /* syscall return */
|
||||
PERF_BR_COND_CALL = 9, /* conditional function call */
|
||||
PERF_BR_COND_RET = 10, /* conditional function return */
|
||||
PERF_BR_ERET = 11, /* exception return */
|
||||
PERF_BR_IRQ = 12, /* irq */
|
||||
PERF_BR_MAX,
|
||||
};
|
||||
|
||||
|
@ -49,7 +49,9 @@ const char *branch_type_name(int type)
|
||||
"SYSCALL",
|
||||
"SYSRET",
|
||||
"COND_CALL",
|
||||
"COND_RET"
|
||||
"COND_RET",
|
||||
"ERET",
|
||||
"IRQ"
|
||||
};
|
||||
|
||||
if (type >= 0 && type < PERF_BR_MAX)
|
||||
|
Loading…
x
Reference in New Issue
Block a user