From 2a48c8c9cf8793a0d23267c0f9ae3c9990819c90 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Fri, 21 Jun 2024 13:35:21 +0200 Subject: [PATCH] s390/kmsan: implement the architecture-specific functions arch_kmsan_get_meta_or_null() finds the lowcore shadow by querying the prefix and calling kmsan_get_metadata() again. kmsan_virt_addr_valid() delegates to virt_addr_valid(). Link: https://lkml.kernel.org/r/20240621113706.315500-38-iii@linux.ibm.com Signed-off-by: Ilya Leoshkevich Acked-by: Alexander Gordeev Reviewed-by: Alexander Potapenko Cc: Christian Borntraeger Cc: Christoph Lameter Cc: David Rientjes Cc: Dmitry Vyukov Cc: Heiko Carstens Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Joonsoo Kim Cc: Cc: Marco Elver Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Pekka Enberg Cc: Roman Gushchin Cc: Steven Rostedt (Google) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/s390/include/asm/kmsan.h | 59 +++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 arch/s390/include/asm/kmsan.h diff --git a/arch/s390/include/asm/kmsan.h b/arch/s390/include/asm/kmsan.h new file mode 100644 index 000000000000..27db65fbf3f6 --- /dev/null +++ b/arch/s390/include/asm/kmsan.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_KMSAN_H +#define _ASM_S390_KMSAN_H + +#include +#include +#include +#include +#include + +#ifndef MODULE + +static inline bool is_lowcore_addr(void *addr) +{ + return addr >= (void *)&S390_lowcore && + addr < (void *)(&S390_lowcore + 1); +} + +static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin) +{ + if (is_lowcore_addr(addr)) { + /* + * Different lowcores accessed via S390_lowcore are described + * by the same struct page. Resolve the prefix manually in + * order to get a distinct struct page. + */ + addr += (void *)lowcore_ptr[raw_smp_processor_id()] - + (void *)&S390_lowcore; + if (KMSAN_WARN_ON(is_lowcore_addr(addr))) + return NULL; + return kmsan_get_metadata(addr, is_origin); + } + return NULL; +} + +static inline bool kmsan_virt_addr_valid(void *addr) +{ + bool ret; + + /* + * pfn_valid() relies on RCU, and may call into the scheduler on exiting + * the critical section. However, this would result in recursion with + * KMSAN. Therefore, disable preemption here, and re-enable preemption + * below while suppressing reschedules to avoid recursion. + * + * Note, this sacrifices occasionally breaking scheduling guarantees. + * Although, a kernel compiled with KMSAN has already given up on any + * performance guarantees due to being heavily instrumented. + */ + preempt_disable(); + ret = virt_addr_valid(addr); + preempt_enable_no_resched(); + + return ret; +} + +#endif /* !MODULE */ + +#endif /* _ASM_S390_KMSAN_H */