From 3bf9bdb3e51b89352a623dbaa8abedf6bb190ea8 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 24 Jun 2024 23:10:57 +0300 Subject: [PATCH] perf/x86/intel/pt: Fix pt_topa_entry_for_page() address calculation [ Upstream commit 3520b251dcae2b4a27b95cd6f745c54fd658bda5 ] Currently, perf allocates an array of page pointers which is limited in size by MAX_PAGE_ORDER. That in turn limits the maximum Intel PT buffer size to 2GiB. Should that limitation be lifted, the Intel PT driver can support larger sizes, except for one calculation in pt_topa_entry_for_page(), which is limited to 32-bits. Fix pt_topa_entry_for_page() address calculation by adding a cast. Fixes: 39152ee51b77 ("perf/x86/intel/pt: Get rid of reverse lookup table for ToPA") Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240624201101.60186-4-adrian.hunter@intel.com Signed-off-by: Sasha Levin --- arch/x86/events/intel/pt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 42a55794004a..e347e56030fd 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -989,7 +989,7 @@ pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg) * order allocations, there shouldn't be many of these. */ list_for_each_entry(topa, &buf->tables, list) { - if (topa->offset + topa->size > pg << PAGE_SHIFT) + if (topa->offset + topa->size > (unsigned long)pg << PAGE_SHIFT) goto found; }