From addbeea6f50b5ac344331652dd7f35faf760969e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 26 Aug 2022 09:21:15 -0700 Subject: [PATCH 01/27] testing/selftests: Add tests for the is_signed_type() macro Although not documented, is_signed_type() must support the 'bool' and pointer types next to scalar and enumeration types. Add a selftest that verifies that this macro handles all supported types correctly. Cc: Andrew Morton Cc: Arnd Bergmann Cc: Dan Williams Cc: Eric Dumazet Cc: Ingo Molnar Cc: Isabella Basso Cc: "Jason A. Donenfeld" Cc: Josh Poimboeuf Cc: Luc Van Oostenryck Cc: Masami Hiramatsu Cc: Nathan Chancellor Cc: Peter Zijlstra Cc: Rasmus Villemoes Cc: Sander Vanheule Cc: Steven Rostedt Cc: Vlastimil Babka Cc: Yury Norov Signed-off-by: Bart Van Assche Tested-by: Isabella Basso Acked-by: Rasmus Villemoes Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220826162116.1050972-2-bvanassche@acm.org --- lib/Kconfig.debug | 12 ++++++++++ lib/Makefile | 1 + lib/is_signed_type_kunit.c | 48 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+) create mode 100644 lib/is_signed_type_kunit.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 072e4b289c13..36455953d306 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2506,6 +2506,18 @@ config MEMCPY_KUNIT_TEST If unsure, say N. +config IS_SIGNED_TYPE_KUNIT_TEST + tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Builds unit tests for the is_signed_type() macro. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + config OVERFLOW_KUNIT_TEST tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/lib/Makefile b/lib/Makefile index 5927d7fa0806..f545140ed9e7 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -377,6 +377,7 @@ obj-$(CONFIG_BITS_TEST) += test_bits.o obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o +obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable) obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c new file mode 100644 index 000000000000..f2eedb1f0935 --- /dev/null +++ b/lib/is_signed_type_kunit.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * ./tools/testing/kunit/kunit.py run is_signed_type [--raw_output] + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +enum unsigned_enum { + constant_a = 3, +}; + +enum signed_enum { + constant_b = -1, + constant_c = 2, +}; + +static void is_signed_type_test(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, is_signed_type(bool), false); + KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true); + KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false); + KUNIT_EXPECT_EQ(test, is_signed_type(int), true); + KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false); + KUNIT_EXPECT_EQ(test, is_signed_type(long), true); + KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long), false); + KUNIT_EXPECT_EQ(test, is_signed_type(long long), true); + KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long long), false); + KUNIT_EXPECT_EQ(test, is_signed_type(enum unsigned_enum), false); + KUNIT_EXPECT_EQ(test, is_signed_type(enum signed_enum), true); + KUNIT_EXPECT_EQ(test, is_signed_type(void *), false); + KUNIT_EXPECT_EQ(test, is_signed_type(const char *), false); +} + +static struct kunit_case is_signed_type_test_cases[] = { + KUNIT_CASE(is_signed_type_test), + {} +}; + +static struct kunit_suite is_signed_type_test_suite = { + .name = "is_signed_type", + .test_cases = is_signed_type_test_cases, +}; + +kunit_test_suite(is_signed_type_test_suite); + +MODULE_LICENSE("Dual MIT/GPL"); From 92d23c6e94157739b997cacce151586a0d07bb8a Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 26 Aug 2022 09:21:16 -0700 Subject: [PATCH 02/27] overflow, tracing: Define the is_signed_type() macro once There are two definitions of the is_signed_type() macro: one in and a second definition in . As suggested by Linus Torvalds, move the definition of the is_signed_type() macro into the header file. Change the definition of the is_signed_type() macro to make sure that it does not trigger any sparse warnings with future versions of sparse for bitwise types. See also: https://lore.kernel.org/all/CAHk-=whjH6p+qzwUdx5SOVVHjS3WvzJQr6mDUwhEyTf6pJWzaQ@mail.gmail.com/ https://lore.kernel.org/all/CAHk-=wjQGnVfb4jehFR0XyZikdQvCZouE96xR_nnf5kqaM5qqQ@mail.gmail.com/ Cc: Andrew Morton Cc: Arnd Bergmann Cc: Dan Williams Cc: Eric Dumazet Cc: Ingo Molnar Cc: Isabella Basso Cc: "Jason A. Donenfeld" Cc: Josh Poimboeuf Cc: Luc Van Oostenryck Cc: Masami Hiramatsu Cc: Nathan Chancellor Cc: Peter Zijlstra Cc: Rasmus Villemoes Cc: Sander Vanheule Cc: Steven Rostedt Cc: Vlastimil Babka Cc: Yury Norov Signed-off-by: Bart Van Assche Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220826162116.1050972-3-bvanassche@acm.org --- include/linux/compiler.h | 6 ++++++ include/linux/overflow.h | 1 - include/linux/trace_events.h | 2 -- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 01ce94b58b42..7713d7bcdaea 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -239,6 +239,12 @@ static inline void *offset_to_ptr(const int *off) /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +/* + * Whether 'type' is a signed type or an unsigned type. Supports scalar types, + * bool and also pointer types. + */ +#define is_signed_type(type) (((type)(-1)) < (__force type)1) + /* * This is needed in functions which generate the stack canary, see * arch/x86/kernel/smpboot.c::start_secondary() for an example. diff --git a/include/linux/overflow.h b/include/linux/overflow.h index f1221d11f8e5..0eb3b192f07a 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -30,7 +30,6 @@ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - * credit to Christian Biere. */ -#define is_signed_type(type) (((type)(-1)) < (type)1) #define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) #define type_min(T) ((T)((T)-type_max(T)-(T)1)) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index b18759a673c6..8401dec93c15 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -814,8 +814,6 @@ extern int trace_add_event_call(struct trace_event_call *call); extern int trace_remove_event_call(struct trace_event_call *call); extern int trace_event_get_offsets(struct trace_event_call *call); -#define is_signed_type(type) (((type)(-1)) < (type)1) - int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); int trace_set_clr_event(const char *system, const char *event, int set); int trace_array_set_clr_event(struct trace_array *tr, const char *system, From d219d2a9a92e39aa92799efe8f2aa21259b6dd82 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 29 Aug 2022 13:37:17 -0700 Subject: [PATCH 03/27] overflow: Allow mixed type arguments When the check_[op]_overflow() helpers were introduced, all arguments were required to be the same type to make the fallback macros simpler. However, now that the fallback macros have been removed[1], it is fine to allow mixed types, which makes using the helpers much more useful, as they can be used to test for type-based overflows (e.g. adding two large ints but storing into a u8), as would be handy in the drm core[2]. Remove the restriction, and add additional self-tests that exercise some of the mixed-type overflow cases, and double-check for accidental macro side-effects. [1] https://git.kernel.org/linus/4eb6bd55cfb22ffc20652732340c4962f3ac9a91 [2] https://lore.kernel.org/lkml/20220824084514.2261614-2-gwan-gyeong.mun@intel.com Cc: Rasmus Villemoes Cc: Gwan-gyeong Mun Cc: "Gustavo A. R. Silva" Cc: Nick Desaulniers Cc: linux-hardening@vger.kernel.org Reviewed-by: Andrzej Hajda Reviewed-by: Gwan-gyeong Mun Tested-by: Gwan-gyeong Mun Signed-off-by: Kees Cook --- include/linux/overflow.h | 72 ++++++++++++++++------------ lib/overflow_kunit.c | 101 ++++++++++++++++++++++++++++----------- 2 files changed, 113 insertions(+), 60 deletions(-) diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 0eb3b192f07a..19dfdd74835e 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -51,40 +51,50 @@ static inline bool __must_check __must_check_overflow(bool overflow) return unlikely(overflow); } -/* - * For simplicity and code hygiene, the fallback code below insists on - * a, b and *d having the same type (similar to the min() and max() - * macros), whereas gcc's type-generic overflow checkers accept - * different types. Hence we don't just make check_add_overflow an - * alias for __builtin_add_overflow, but add type checks similar to - * below. +/** check_add_overflow() - Calculate addition with overflow checking + * + * @a: first addend + * @b: second addend + * @d: pointer to store sum + * + * Returns 0 on success. + * + * *@d holds the results of the attempted addition, but is not considered + * "safe for use" on a non-zero return value, which indicates that the + * sum has overflowed or been truncated. */ -#define check_add_overflow(a, b, d) __must_check_overflow(({ \ - typeof(a) __a = (a); \ - typeof(b) __b = (b); \ - typeof(d) __d = (d); \ - (void) (&__a == &__b); \ - (void) (&__a == __d); \ - __builtin_add_overflow(__a, __b, __d); \ -})) +#define check_add_overflow(a, b, d) \ + __must_check_overflow(__builtin_add_overflow(a, b, d)) -#define check_sub_overflow(a, b, d) __must_check_overflow(({ \ - typeof(a) __a = (a); \ - typeof(b) __b = (b); \ - typeof(d) __d = (d); \ - (void) (&__a == &__b); \ - (void) (&__a == __d); \ - __builtin_sub_overflow(__a, __b, __d); \ -})) +/** check_sub_overflow() - Calculate subtraction with overflow checking + * + * @a: minuend; value to subtract from + * @b: subtrahend; value to subtract from @a + * @d: pointer to store difference + * + * Returns 0 on success. + * + * *@d holds the results of the attempted subtraction, but is not considered + * "safe for use" on a non-zero return value, which indicates that the + * difference has underflowed or been truncated. + */ +#define check_sub_overflow(a, b, d) \ + __must_check_overflow(__builtin_sub_overflow(a, b, d)) -#define check_mul_overflow(a, b, d) __must_check_overflow(({ \ - typeof(a) __a = (a); \ - typeof(b) __b = (b); \ - typeof(d) __d = (d); \ - (void) (&__a == &__b); \ - (void) (&__a == __d); \ - __builtin_mul_overflow(__a, __b, __d); \ -})) +/** check_mul_overflow() - Calculate multiplication with overflow checking + * + * @a: first factor + * @b: second factor + * @d: pointer to store product + * + * Returns 0 on success. + * + * *@d holds the results of the attempted multiplication, but is not + * considered "safe for use" on a non-zero return value, which indicates + * that the product has overflowed or been truncated. + */ +#define check_mul_overflow(a, b, d) \ + __must_check_overflow(__builtin_mul_overflow(a, b, d)) /** check_shl_overflow() - Calculate a left-shifted value and check overflow * diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 7e3e43679b73..0d98c9bc75da 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -16,12 +16,15 @@ #include #include -#define DEFINE_TEST_ARRAY(t) \ - static const struct test_ ## t { \ - t a, b; \ - t sum, diff, prod; \ - bool s_of, d_of, p_of; \ - } t ## _tests[] +#define DEFINE_TEST_ARRAY_TYPED(t1, t2, t) \ + static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \ + t1 a; \ + t2 b; \ + t sum, diff, prod; \ + bool s_of, d_of, p_of; \ + } t1 ## _ ## t2 ## __ ## t ## _tests[] + +#define DEFINE_TEST_ARRAY(t) DEFINE_TEST_ARRAY_TYPED(t, t, t) DEFINE_TEST_ARRAY(u8) = { {0, 0, 0, 0, 0, false, false, false}, @@ -222,21 +225,27 @@ DEFINE_TEST_ARRAY(s64) = { }; #endif -#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ - t _r; \ - bool _of; \ - \ - _of = check_ ## op ## _overflow(a, b, &_r); \ - KUNIT_EXPECT_EQ_MSG(test, _of, of, \ +#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ + int _a_orig = a, _a_bump = a + 1; \ + int _b_orig = b, _b_bump = b + 1; \ + bool _of; \ + t _r; \ + \ + _of = check_ ## op ## _overflow(a, b, &_r); \ + KUNIT_EXPECT_EQ_MSG(test, _of, of, \ "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \ - a, b, of ? "" : " not", #t); \ - KUNIT_EXPECT_EQ_MSG(test, _r, r, \ + a, b, of ? "" : " not", #t); \ + KUNIT_EXPECT_EQ_MSG(test, _r, r, \ "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \ - a, b, r, _r, #t); \ + a, b, r, _r, #t); \ + /* Check for internal macro side-effects. */ \ + _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \ + KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \ + KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \ } while (0) -#define DEFINE_TEST_FUNC(t, fmt) \ -static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ +#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \ +static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \ { \ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ @@ -245,15 +254,18 @@ static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ } \ \ -static void t ## _overflow_test(struct kunit *test) { \ +static void n ## _overflow_test(struct kunit *test) { \ unsigned i; \ \ - for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ - do_test_ ## t(test, &t ## _tests[i]); \ + for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i) \ + do_test_ ## n(test, &n ## _tests[i]); \ kunit_info(test, "%zu %s arithmetic tests finished\n", \ - ARRAY_SIZE(t ## _tests), #t); \ + ARRAY_SIZE(n ## _tests), #n); \ } +#define DEFINE_TEST_FUNC(t, fmt) \ + DEFINE_TEST_FUNC_TYPED(t ## _ ## t ## __ ## t, t, fmt) + DEFINE_TEST_FUNC(u8, "%d"); DEFINE_TEST_FUNC(s8, "%d"); DEFINE_TEST_FUNC(u16, "%d"); @@ -265,6 +277,33 @@ DEFINE_TEST_FUNC(u64, "%llu"); DEFINE_TEST_FUNC(s64, "%lld"); #endif +DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = { + {0, 0, 0, 0, 0, false, false, false}, + {U8_MAX, 2, 1, U8_MAX - 2, U8_MAX - 1, true, false, true}, + {U8_MAX + 1, 0, 0, 0, 0, true, true, false}, +}; +DEFINE_TEST_FUNC_TYPED(u32_u32__u8, u8, "%d"); + +DEFINE_TEST_ARRAY_TYPED(u32, u32, int) = { + {0, 0, 0, 0, 0, false, false, false}, + {U32_MAX, 0, -1, -1, 0, true, true, false}, +}; +DEFINE_TEST_FUNC_TYPED(u32_u32__int, int, "%d"); + +DEFINE_TEST_ARRAY_TYPED(u8, u8, int) = { + {0, 0, 0, 0, 0, false, false, false}, + {U8_MAX, U8_MAX, 2 * U8_MAX, 0, U8_MAX * U8_MAX, false, false, false}, + {1, 2, 3, -1, 2, false, false, false}, +}; +DEFINE_TEST_FUNC_TYPED(u8_u8__int, int, "%d"); + +DEFINE_TEST_ARRAY_TYPED(int, int, u8) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 2, 3, U8_MAX, 2, false, true, false}, + {-1, 0, U8_MAX, U8_MAX, 0, true, true, false}, +}; +DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d"); + static void overflow_shift_test(struct kunit *test) { int count = 0; @@ -649,17 +688,21 @@ static void overflow_size_helpers_test(struct kunit *test) } static struct kunit_case overflow_test_cases[] = { - KUNIT_CASE(u8_overflow_test), - KUNIT_CASE(s8_overflow_test), - KUNIT_CASE(u16_overflow_test), - KUNIT_CASE(s16_overflow_test), - KUNIT_CASE(u32_overflow_test), - KUNIT_CASE(s32_overflow_test), + KUNIT_CASE(u8_u8__u8_overflow_test), + KUNIT_CASE(s8_s8__s8_overflow_test), + KUNIT_CASE(u16_u16__u16_overflow_test), + KUNIT_CASE(s16_s16__s16_overflow_test), + KUNIT_CASE(u32_u32__u32_overflow_test), + KUNIT_CASE(s32_s32__s32_overflow_test), /* Clang 13 and earlier generate unwanted libcalls on 32-bit. */ #if BITS_PER_LONG == 64 - KUNIT_CASE(u64_overflow_test), - KUNIT_CASE(s64_overflow_test), + KUNIT_CASE(u64_u64__u64_overflow_test), + KUNIT_CASE(s64_s64__s64_overflow_test), #endif + KUNIT_CASE(u32_u32__u8_overflow_test), + KUNIT_CASE(u32_u32__int_overflow_test), + KUNIT_CASE(u8_u8__int_overflow_test), + KUNIT_CASE(int_int__u8_overflow_test), KUNIT_CASE(overflow_shift_test), KUNIT_CASE(overflow_allocation_test), KUNIT_CASE(overflow_size_helpers_test), From 779742255cb464e9e833fed2a8d352eb12936dae Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 31 Aug 2022 11:09:13 -0700 Subject: [PATCH 04/27] overflow: Split up kunit tests for smaller stack frames Under some pathological 32-bit configs, the shift overflow KUnit tests create huge stack frames. Split up the function to avoid this, separating by rough shift overflow cases. Cc: Rasmus Villemoes Cc: Daniel Latypov Cc: Vitor Massaru Iha Cc: "Gustavo A. R. Silva" Cc: Nick Desaulniers Reported-by: kernel test robot Link: https://lore.kernel.org/lkml/202208301850.iuv9VwA8-lkp@intel.com Acked-by: Daniel Latypov Signed-off-by: Kees Cook --- lib/overflow_kunit.c | 78 +++++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 27 deletions(-) diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 0d98c9bc75da..f385ca652b74 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -304,10 +304,6 @@ DEFINE_TEST_ARRAY_TYPED(int, int, u8) = { }; DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d"); -static void overflow_shift_test(struct kunit *test) -{ - int count = 0; - /* Args are: value, shift, type, expected result, overflow expected */ #define TEST_ONE_SHIFT(a, s, t, expect, of) do { \ typeof(a) __a = (a); \ @@ -331,6 +327,10 @@ static void overflow_shift_test(struct kunit *test) count++; \ } while (0) +static void shift_sane_test(struct kunit *test) +{ + int count = 0; + /* Sane shifts. */ TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false); TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false); @@ -373,6 +373,13 @@ static void overflow_shift_test(struct kunit *test) TEST_ONE_SHIFT(0, 30, s32, 0, false); TEST_ONE_SHIFT(0, 62, s64, 0, false); + kunit_info(test, "%d sane shift tests finished\n", count); +} + +static void shift_overflow_test(struct kunit *test) +{ + int count = 0; + /* Overflow: shifted the bit off the end. */ TEST_ONE_SHIFT(1, 8, u8, 0, true); TEST_ONE_SHIFT(1, 16, u16, 0, true); @@ -420,6 +427,13 @@ static void overflow_shift_test(struct kunit *test) /* 0100000100001000001000000010000001000010000001000100010001001011 */ TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true); + kunit_info(test, "%d overflow shift tests finished\n", count); +} + +static void shift_truncate_test(struct kunit *test) +{ + int count = 0; + /* Overflow: values larger than destination type. */ TEST_ONE_SHIFT(0x100, 0, u8, 0, true); TEST_ONE_SHIFT(0xFF, 0, s8, 0, true); @@ -431,6 +445,33 @@ static void overflow_shift_test(struct kunit *test) TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true); TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true); + /* Overflow: shifted at or beyond entire type's bit width. */ + TEST_ONE_SHIFT(0, 8, u8, 0, true); + TEST_ONE_SHIFT(0, 9, u8, 0, true); + TEST_ONE_SHIFT(0, 8, s8, 0, true); + TEST_ONE_SHIFT(0, 9, s8, 0, true); + TEST_ONE_SHIFT(0, 16, u16, 0, true); + TEST_ONE_SHIFT(0, 17, u16, 0, true); + TEST_ONE_SHIFT(0, 16, s16, 0, true); + TEST_ONE_SHIFT(0, 17, s16, 0, true); + TEST_ONE_SHIFT(0, 32, u32, 0, true); + TEST_ONE_SHIFT(0, 33, u32, 0, true); + TEST_ONE_SHIFT(0, 32, int, 0, true); + TEST_ONE_SHIFT(0, 33, int, 0, true); + TEST_ONE_SHIFT(0, 32, s32, 0, true); + TEST_ONE_SHIFT(0, 33, s32, 0, true); + TEST_ONE_SHIFT(0, 64, u64, 0, true); + TEST_ONE_SHIFT(0, 65, u64, 0, true); + TEST_ONE_SHIFT(0, 64, s64, 0, true); + TEST_ONE_SHIFT(0, 65, s64, 0, true); + + kunit_info(test, "%d truncate shift tests finished\n", count); +} + +static void shift_nonsense_test(struct kunit *test) +{ + int count = 0; + /* Nonsense: negative initial value. */ TEST_ONE_SHIFT(-1, 0, s8, 0, true); TEST_ONE_SHIFT(-1, 0, u8, 0, true); @@ -455,26 +496,6 @@ static void overflow_shift_test(struct kunit *test) TEST_ONE_SHIFT(0, -30, s64, 0, true); TEST_ONE_SHIFT(0, -30, u64, 0, true); - /* Overflow: shifted at or beyond entire type's bit width. */ - TEST_ONE_SHIFT(0, 8, u8, 0, true); - TEST_ONE_SHIFT(0, 9, u8, 0, true); - TEST_ONE_SHIFT(0, 8, s8, 0, true); - TEST_ONE_SHIFT(0, 9, s8, 0, true); - TEST_ONE_SHIFT(0, 16, u16, 0, true); - TEST_ONE_SHIFT(0, 17, u16, 0, true); - TEST_ONE_SHIFT(0, 16, s16, 0, true); - TEST_ONE_SHIFT(0, 17, s16, 0, true); - TEST_ONE_SHIFT(0, 32, u32, 0, true); - TEST_ONE_SHIFT(0, 33, u32, 0, true); - TEST_ONE_SHIFT(0, 32, int, 0, true); - TEST_ONE_SHIFT(0, 33, int, 0, true); - TEST_ONE_SHIFT(0, 32, s32, 0, true); - TEST_ONE_SHIFT(0, 33, s32, 0, true); - TEST_ONE_SHIFT(0, 64, u64, 0, true); - TEST_ONE_SHIFT(0, 65, u64, 0, true); - TEST_ONE_SHIFT(0, 64, s64, 0, true); - TEST_ONE_SHIFT(0, 65, s64, 0, true); - /* * Corner case: for unsigned types, we fail when we've shifted * through the entire width of bits. For signed types, we might @@ -490,9 +511,9 @@ static void overflow_shift_test(struct kunit *test) TEST_ONE_SHIFT(0, 31, s32, 0, false); TEST_ONE_SHIFT(0, 63, s64, 0, false); - kunit_info(test, "%d shift tests finished\n", count); -#undef TEST_ONE_SHIFT + kunit_info(test, "%d nonsense shift tests finished\n", count); } +#undef TEST_ONE_SHIFT /* * Deal with the various forms of allocator arguments. See comments above @@ -703,7 +724,10 @@ static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(u32_u32__int_overflow_test), KUNIT_CASE(u8_u8__int_overflow_test), KUNIT_CASE(int_int__u8_overflow_test), - KUNIT_CASE(overflow_shift_test), + KUNIT_CASE(shift_sane_test), + KUNIT_CASE(shift_overflow_test), + KUNIT_CASE(shift_truncate_test), + KUNIT_CASE(shift_nonsense_test), KUNIT_CASE(overflow_allocation_test), KUNIT_CASE(overflow_size_helpers_test), {} From dfbafa70bde26c40615f8c538ce68dac82a64fb4 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 26 Aug 2022 11:04:43 -0700 Subject: [PATCH 05/27] string: Introduce strtomem() and strtomem_pad() One of the "legitimate" uses of strncpy() is copying a NUL-terminated string into a fixed-size non-NUL-terminated character array. To avoid the weaknesses and ambiguity of intent when using strncpy(), provide replacement functions that explicitly distinguish between trailing padding and not, and require the destination buffer size be discoverable by the compiler. For example: struct obj { int foo; char small[4] __nonstring; char big[8] __nonstring; int bar; }; struct obj p; /* This will truncate to 4 chars with no trailing NUL */ strncpy(p.small, "hello", sizeof(p.small)); /* p.small contains 'h', 'e', 'l', 'l' */ /* This will NUL pad to 8 chars. */ strncpy(p.big, "hello", sizeof(p.big)); /* p.big contains 'h', 'e', 'l', 'l', 'o', '\0', '\0', '\0' */ When the "__nonstring" attributes are missing, the intent of the programmer becomes ambiguous for whether the lack of a trailing NUL in the p.small copy is a bug. Additionally, it's not clear whether the trailing padding in the p.big copy is _needed_. Both cases become unambiguous with: strtomem(p.small, "hello"); strtomem_pad(p.big, "hello", 0); See also https://github.com/KSPP/linux/issues/90 Expand the memcpy KUnit tests to include these functions. Cc: Wolfram Sang Cc: Nick Desaulniers Cc: Geert Uytterhoeven Cc: Guenter Roeck Signed-off-by: Kees Cook --- Documentation/process/deprecated.rst | 11 ++++-- include/linux/fortify-string.h | 32 +++++++++++++++ include/linux/string.h | 43 ++++++++++++++++++++ lib/memcpy_kunit.c | 59 ++++++++++++++++++++++++++-- 4 files changed, 137 insertions(+), 8 deletions(-) diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst index a6e36d9c3d14..c8fd53a11a20 100644 --- a/Documentation/process/deprecated.rst +++ b/Documentation/process/deprecated.rst @@ -138,17 +138,20 @@ be NUL terminated. This can lead to various linear read overflows and other misbehavior due to the missing termination. It also NUL-pads the destination buffer if the source contents are shorter than the destination buffer size, which may be a needless performance penalty -for callers using only NUL-terminated strings. The safe replacement is +for callers using only NUL-terminated strings. + +When the destination is required to be NUL-terminated, the replacement is strscpy(), though care must be given to any cases where the return value of strncpy() was used, since strscpy() does not return a pointer to the destination, but rather a count of non-NUL bytes copied (or negative errno when it truncates). Any cases still needing NUL-padding should instead use strscpy_pad(). -If a caller is using non-NUL-terminated strings, strncpy() can -still be used, but destinations should be marked with the `__nonstring +If a caller is using non-NUL-terminated strings, strtomem() should be +used, and the destinations should be marked with the `__nonstring `_ -attribute to avoid future compiler warnings. +attribute to avoid future compiler warnings. For cases still needing +NUL-padding, strtomem_pad() can be used. strlcpy() --------- diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 3b401fa0f374..8e8c2c87b1d5 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -77,6 +77,38 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) #define POS __pass_object_size(1) #define POS0 __pass_object_size(0) +/** + * strncpy - Copy a string to memory with non-guaranteed NUL padding + * + * @p: pointer to destination of copy + * @q: pointer to NUL-terminated source string to copy + * @size: bytes to write at @p + * + * If strlen(@q) >= @size, the copy of @q will stop after @size bytes, + * and @p will NOT be NUL-terminated + * + * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes + * will be written to @p until @size total bytes have been written. + * + * Do not use this function. While FORTIFY_SOURCE tries to avoid + * over-reads of @q, it cannot defend against writing unterminated + * results to @p. Using strncpy() remains ambiguous and fragile. + * Instead, please choose an alternative, so that the expectation + * of @p's contents is unambiguous: + * + * +--------------------+-----------------+------------+ + * | @p needs to be: | padded to @size | not padded | + * +====================+=================+============+ + * | NUL-terminated | strscpy_pad() | strscpy() | + * +--------------------+-----------------+------------+ + * | not NUL-terminated | strtomem_pad() | strtomem() | + * +--------------------+-----------------+------------+ + * + * Note strscpy*()'s differing return values for detecting truncation, + * and strtomem*()'s expectation that the destination is marked with + * __nonstring when it is a character array. + * + */ __FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3) char *strncpy(char * const POS p, const char *q, __kernel_size_t size) { diff --git a/include/linux/string.h b/include/linux/string.h index 61ec7e4f6311..cf7607b32102 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -260,6 +260,49 @@ static inline const char *kbasename(const char *path) void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, int pad); +/** + * strtomem_pad - Copy NUL-terminated string to non-NUL-terminated buffer + * + * @dest: Pointer of destination character array (marked as __nonstring) + * @src: Pointer to NUL-terminated string + * @pad: Padding character to fill any remaining bytes of @dest after copy + * + * This is a replacement for strncpy() uses where the destination is not + * a NUL-terminated string, but with bounds checking on the source size, and + * an explicit padding character. If padding is not required, use strtomem(). + * + * Note that the size of @dest is not an argument, as the length of @dest + * must be discoverable by the compiler. + */ +#define strtomem_pad(dest, src, pad) do { \ + const size_t _dest_len = __builtin_object_size(dest, 1); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ + _dest_len == (size_t)-1); \ + memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \ +} while (0) + +/** + * strtomem - Copy NUL-terminated string to non-NUL-terminated buffer + * + * @dest: Pointer of destination character array (marked as __nonstring) + * @src: Pointer to NUL-terminated string + * + * This is a replacement for strncpy() uses where the destination is not + * a NUL-terminated string, but with bounds checking on the source size, and + * without trailing padding. If padding is required, use strtomem_pad(). + * + * Note that the size of @dest is not an argument, as the length of @dest + * must be discoverable by the compiler. + */ +#define strtomem(dest, src) do { \ + const size_t _dest_len = __builtin_object_size(dest, 1); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ + _dest_len == (size_t)-1); \ + memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \ +} while (0) + /** * memset_after - Set a value after a struct member to the end of a struct * diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c index 62f8ffcbbaa3..d22fa3838ee9 100644 --- a/lib/memcpy_kunit.c +++ b/lib/memcpy_kunit.c @@ -29,9 +29,8 @@ struct some_bytes { }; #define check(instance, v) do { \ - int i; \ BUILD_BUG_ON(sizeof(instance.data) != 32); \ - for (i = 0; i < sizeof(instance.data); i++) { \ + for (size_t i = 0; i < sizeof(instance.data); i++) { \ KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \ "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \ __LINE__, #instance, v, i, instance.data[i]); \ @@ -39,9 +38,8 @@ struct some_bytes { } while (0) #define compare(name, one, two) do { \ - int i; \ BUILD_BUG_ON(sizeof(one) != sizeof(two)); \ - for (i = 0; i < sizeof(one); i++) { \ + for (size_t i = 0; i < sizeof(one); i++) { \ KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \ "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \ __LINE__, #one, i, one.data[i], #two, i, two.data[i]); \ @@ -272,10 +270,63 @@ static void memset_test(struct kunit *test) #undef TEST_OP } +static void strtomem_test(struct kunit *test) +{ + static const char input[] = "hi"; + static const char truncate[] = "this is too long"; + struct { + unsigned long canary1; + unsigned char output[sizeof(unsigned long)] __nonstring; + unsigned long canary2; + } wrap; + + memset(&wrap, 0xFF, sizeof(wrap)); + KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX, + "bad initial canary value"); + KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX, + "bad initial canary value"); + + /* Check unpadded copy leaves surroundings untouched. */ + strtomem(wrap.output, input); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); + KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); + for (size_t i = 2; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check truncated copy leaves surroundings untouched. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem(wrap.output, truncate); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + for (size_t i = 0; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check padded copy leaves only string padded. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem_pad(wrap.output, input, 0xAA); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); + KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); + for (size_t i = 2; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check truncated padded copy has no padding. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem(wrap.output, truncate); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + for (size_t i = 0; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); +} + static struct kunit_case memcpy_test_cases[] = { KUNIT_CASE(memset_test), KUNIT_CASE(memcpy_test), KUNIT_CASE(memmove_test), + KUNIT_CASE(strtomem_test), {} }; From d07c0acb4f41cc42a0d97530946965b3e4fa68c1 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 2 Sep 2022 13:02:26 -0700 Subject: [PATCH 06/27] fortify: Fix __compiletime_strlen() under UBSAN_BOUNDS_LOCAL With CONFIG_FORTIFY=y and CONFIG_UBSAN_LOCAL_BOUNDS=y enabled, we observe a runtime panic while running Android's Compatibility Test Suite's (CTS) android.hardware.input.cts.tests. This is stemming from a strlen() call in hidinput_allocate(). __compiletime_strlen() is implemented in terms of __builtin_object_size(), then does an array access to check for NUL-termination. A quirk of __builtin_object_size() is that for strings whose values are runtime dependent, __builtin_object_size(str, 1 or 0) returns the maximum size of possible values when those sizes are determinable at compile time. Example: static const char *v = "FOO BAR"; static const char *y = "FOO BA"; unsigned long x (int z) { // Returns 8, which is: // max(__builtin_object_size(v, 1), __builtin_object_size(y, 1)) return __builtin_object_size(z ? v : y, 1); } So when FORTIFY_SOURCE is enabled, the current implementation of __compiletime_strlen() will try to access beyond the end of y at runtime using the size of v. Mixed with UBSAN_LOCAL_BOUNDS we get a fault. hidinput_allocate() has a local C string whose value is control flow dependent on a switch statement, so __builtin_object_size(str, 1) evaluates to the maximum string length, making all other cases fault on the last character check. hidinput_allocate() could be cleaned up to avoid runtime calls to strlen() since the local variable can only have literal values, so there's no benefit to trying to fortify the strlen call site there. Perform a __builtin_constant_p() check against index 0 earlier in the macro to filter out the control-flow-dependant case. Add a KUnit test for checking the expected behavioral characteristics of FORTIFY_SOURCE internals. Cc: Nathan Chancellor Cc: Tom Rix Cc: Andrew Morton Cc: Vlastimil Babka Cc: "Steven Rostedt (Google)" Cc: David Gow Cc: Yury Norov Cc: Masami Hiramatsu Cc: Sander Vanheule Cc: linux-hardening@vger.kernel.org Cc: llvm@lists.linux.dev Reviewed-by: Nick Desaulniers Tested-by: Android Treehugger Robot Link: https://android-review.googlesource.com/c/kernel/common/+/2206839 Co-developed-by: Nick Desaulniers Signed-off-by: Nick Desaulniers Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 8e8c2c87b1d5..be264091f7a7 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -19,7 +19,8 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning(" unsigned char *__p = (unsigned char *)(p); \ size_t __ret = (size_t)-1; \ size_t __p_size = __builtin_object_size(p, 1); \ - if (__p_size != (size_t)-1) { \ + if (__p_size != (size_t)-1 && \ + __builtin_constant_p(*__p)) { \ size_t __p_len = __p_size - 1; \ if (__builtin_constant_p(__p[__p_len]) && \ __p[__p_len] == '\0') \ From 875bfd5276f31d09e811d31fca638b9f4d1205e8 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 2 Sep 2022 13:02:26 -0700 Subject: [PATCH 07/27] fortify: Add KUnit test for FORTIFY_SOURCE internals Add lib/fortify_kunit.c KUnit test for checking the expected behavioral characteristics of FORTIFY_SOURCE internals. Cc: Nick Desaulniers Cc: Nathan Chancellor Cc: Tom Rix Cc: Andrew Morton Cc: Vlastimil Babka Cc: "Steven Rostedt (Google)" Cc: Yury Norov Cc: Masami Hiramatsu Cc: Sander Vanheule Cc: linux-hardening@vger.kernel.org Cc: llvm@lists.linux.dev Reviewed-by: David Gow Signed-off-by: Kees Cook --- MAINTAINERS | 1 + lib/Kconfig.debug | 9 ++++++ lib/Makefile | 1 + lib/fortify_kunit.c | 77 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 88 insertions(+) create mode 100644 lib/fortify_kunit.c diff --git a/MAINTAINERS b/MAINTAINERS index 9d7f64dc0efe..640115472199 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8002,6 +8002,7 @@ L: linux-hardening@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening F: include/linux/fortify-string.h +F: lib/fortify_kunit.c F: lib/test_fortify/* F: scripts/test_fortify.sh K: \b__NO_FORTIFY\b diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 36455953d306..1f267c0ddffd 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2542,6 +2542,15 @@ config STACKINIT_KUNIT_TEST CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF, or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL. +config FORTIFY_KUNIT_TEST + tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT && FORTIFY_SOURCE + default KUNIT_ALL_TESTS + help + Builds unit tests for checking internals of FORTIFY_SOURCE as used + by the str*() and mem*() family of functions. For testing runtime + traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests. + config TEST_UDELAY tristate "udelay test driver" help diff --git a/lib/Makefile b/lib/Makefile index f545140ed9e7..4ee1ceae945a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -381,6 +381,7 @@ obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable) obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o +obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c new file mode 100644 index 000000000000..99bc0ea60d27 --- /dev/null +++ b/lib/fortify_kunit.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to + * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c) + * + * For corner cases with UBSAN, try testing with: + * + * ./tools/testing/kunit/kunit.py run --arch=x86_64 \ + * --kconfig_add CONFIG_FORTIFY_SOURCE=y \ + * --kconfig_add CONFIG_UBSAN=y \ + * --kconfig_add CONFIG_UBSAN_TRAP=y \ + * --kconfig_add CONFIG_UBSAN_BOUNDS=y \ + * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \ + * --make_options LLVM=1 fortify + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +static const char array_of_10[] = "this is 10"; +static const char *ptr_of_11 = "this is 11!"; +static char array_unknown[] = "compiler thinks I might change"; + +static void known_sizes_test(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8); + KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10); + KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11); + + KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX); + /* Externally defined and dynamically sized string pointer: */ + KUNIT_EXPECT_EQ(test, __compiletime_strlen(saved_command_line), SIZE_MAX); +} + +/* This is volatile so the optimizer can't perform DCE below. */ +static volatile int pick; + +/* Not inline to keep optimizer from figuring out which string we want. */ +static noinline size_t want_minus_one(int pick) +{ + const char *str; + + switch (pick) { + case 1: + str = "4444"; + break; + case 2: + str = "333"; + break; + default: + str = "1"; + break; + } + return __compiletime_strlen(str); +} + +static void control_flow_split_test(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX); +} + +static struct kunit_case fortify_test_cases[] = { + KUNIT_CASE(known_sizes_test), + KUNIT_CASE(control_flow_split_test), + {} +}; + +static struct kunit_suite fortify_test_suite = { + .name = "fortify", + .test_cases = fortify_test_cases, +}; + +kunit_test_suite(fortify_test_suite); + +MODULE_LICENSE("GPL"); From 311fb40aa0569abacc430b0d66ee41470803111f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 2 Sep 2022 13:23:06 -0700 Subject: [PATCH 08/27] fortify: Use SIZE_MAX instead of (size_t)-1 Clean up uses of "(size_t)-1" in favor of SIZE_MAX. Cc: linux-hardening@vger.kernel.org Suggested-by: Nick Desaulniers Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index be264091f7a7..e46af17d23d0 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -3,6 +3,7 @@ #define _LINUX_FORTIFY_STRING_H_ #include +#include #define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable #define __RENAME(x) __asm__(#x) @@ -17,9 +18,9 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning(" #define __compiletime_strlen(p) \ ({ \ unsigned char *__p = (unsigned char *)(p); \ - size_t __ret = (size_t)-1; \ + size_t __ret = SIZE_MAX; \ size_t __p_size = __builtin_object_size(p, 1); \ - if (__p_size != (size_t)-1 && \ + if (__p_size != SIZE_MAX && \ __builtin_constant_p(*__p)) { \ size_t __p_len = __p_size - 1; \ if (__builtin_constant_p(__p[__p_len]) && \ @@ -127,7 +128,7 @@ char *strcat(char * const POS p, const char *q) { size_t p_size = __builtin_object_size(p, 1); - if (p_size == (size_t)-1) + if (p_size == SIZE_MAX) return __underlying_strcat(p, q); if (strlcat(p, q, p_size) >= p_size) fortify_panic(__func__); @@ -142,7 +143,7 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size size_t ret; /* We can take compile-time actions when maxlen is const. */ - if (__builtin_constant_p(maxlen) && p_len != (size_t)-1) { + if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) { /* If p is const, we can use its compile-time-known len. */ if (maxlen >= p_size) return p_len; @@ -170,7 +171,7 @@ __kernel_size_t __fortify_strlen(const char * const POS p) size_t p_size = __builtin_object_size(p, 1); /* Give up if we don't know how large p is. */ - if (p_size == (size_t)-1) + if (p_size == SIZE_MAX) return __underlying_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) @@ -187,7 +188,7 @@ __FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, si size_t q_len; /* Full count of source string length. */ size_t len; /* Count of characters going into destination. */ - if (p_size == (size_t)-1 && q_size == (size_t)-1) + if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __real_strlcpy(p, q, size); q_len = strlen(q); len = (q_len >= size) ? size - 1 : q_len; @@ -215,7 +216,7 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s size_t q_size = __builtin_object_size(q, 1); /* If we cannot get size of p and q default to call strscpy. */ - if (p_size == (size_t) -1 && q_size == (size_t) -1) + if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __real_strscpy(p, q, size); /* @@ -260,7 +261,7 @@ char *strncat(char * const POS p, const char * const POS q, __kernel_size_t coun size_t p_size = __builtin_object_size(p, 1); size_t q_size = __builtin_object_size(q, 1); - if (p_size == (size_t)-1 && q_size == (size_t)-1) + if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __underlying_strncat(p, q, count); p_len = strlen(p); copy_len = strnlen(q, count); @@ -301,10 +302,10 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size, /* * Always stop accesses beyond the struct that contains the * field, when the buffer's remaining size is known. - * (The -1 test is to optimize away checks where the buffer + * (The SIZE_MAX test is to optimize away checks where the buffer * lengths are unknown.) */ - if (p_size != (size_t)(-1) && p_size < size) + if (p_size != SIZE_MAX && p_size < size) fortify_panic("memset"); } @@ -395,11 +396,11 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size, /* * Always stop accesses beyond the struct that contains the * field, when the buffer's remaining size is known. - * (The -1 test is to optimize away checks where the buffer + * (The SIZE_MAX test is to optimize away checks where the buffer * lengths are unknown.) */ - if ((p_size != (size_t)(-1) && p_size < size) || - (q_size != (size_t)(-1) && q_size < size)) + if ((p_size != SIZE_MAX && p_size < size) || + (q_size != SIZE_MAX && q_size < size)) fortify_panic(func); } @@ -498,7 +499,7 @@ char *strcpy(char * const POS p, const char * const POS q) size_t size; /* If neither buffer size is known, immediately give up. */ - if (p_size == (size_t)-1 && q_size == (size_t)-1) + if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __underlying_strcpy(p, q); size = strlen(q) + 1; /* Compile-time check for const size overflow. */ From 54d9469bc515dc5fcbc20eecbe19cea868b70d68 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 24 Jun 2021 15:39:26 -0700 Subject: [PATCH 09/27] fortify: Add run-time WARN for cross-field memcpy() Enable run-time checking of dynamic memcpy() and memmove() lengths, issuing a WARN when a write would exceed the size of the target struct member, when built with CONFIG_FORTIFY_SOURCE=y. This would have caught all of the memcpy()-based buffer overflows in the last 3 years, specifically covering all the cases where the destination buffer size is known at compile time. This change ONLY adds a run-time warning. As false positives are currently still expected, this will not block the overflow. The new warnings will look like this: memcpy: detected field-spanning write (size N) of single field "var->dest" (size M) WARNING: CPU: n PID: pppp at source/file/path.c:nr function+0xXX/0xXX [module] There may be false positives in the kernel where intentional field-spanning writes are happening. These need to be addressed similarly to how the compile-time cases were addressed: add a struct_group(), split the memcpy(), or some other refactoring. In order to make counting/investigating instances of added runtime checks easier, each instance includes the destination variable name as a WARN argument, prefixed with 'field "'. Therefore, on an x86_64 defconfig build, it is trivial to inspect the build artifacts to find instances. For example on an x86_64 defconfig build, there are 78 new run-time memcpy() bounds checks added: $ for i in vmlinux $(find . -name '*.ko'); do \ strings "$i" | grep '^field "'; done | wc -l 78 Simple cases where a destination buffer is known to be a dynamic size do not generate a WARN. For example: struct normal_flex_array { void *a; int b; u32 c; size_t array_size; u8 flex_array[]; }; struct normal_flex_array *instance; ... /* These will be ignored for run-time bounds checking. */ memcpy(instance, src, len); memcpy(instance->flex_array, src, len); However, one of the dynamic-sized destination cases is irritatingly unable to be detected by the compiler: when using memcpy() to target a composite struct member which contains a trailing flexible array struct. For example: struct wrapper { int foo; char bar; struct normal_flex_array embedded; }; struct wrapper *instance; ... /* This will incorrectly WARN when len > sizeof(instance->embedded) */ memcpy(&instance->embedded, src, len); These cases end up appearing to the compiler to be sized as if the flexible array had 0 elements. :( For more details see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832 https://godbolt.org/z/vW6x8vh4P These "composite flexible array structure destination" cases will be need to be flushed out and addressed on a case-by-case basis. Regardless, for the general case of using memcpy() on flexible array destinations, future APIs will be created to handle common cases. Those can be used to migrate away from open-coded memcpy() so that proper error handling (instead of trapping) can be used. As mentioned, none of these bounds checks block any overflows currently. For users that have tested their workloads, do not encounter any warnings, and wish to make these checks stop any overflows, they can use a big hammer and set the sysctl panic_on_warn=1. Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 70 ++++++++++++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 3 deletions(-) diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index e46af17d23d0..ff879efe94ed 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -2,6 +2,7 @@ #ifndef _LINUX_FORTIFY_STRING_H_ #define _LINUX_FORTIFY_STRING_H_ +#include #include #include @@ -353,7 +354,7 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size, * V = vulnerable to run-time overflow (will need refactoring to solve) * */ -__FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size, +__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, const size_t p_size, const size_t q_size, const size_t p_size_field, @@ -402,16 +403,79 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size, if ((p_size != SIZE_MAX && p_size < size) || (q_size != SIZE_MAX && q_size < size)) fortify_panic(func); + + /* + * Warn when writing beyond destination field size. + * + * We must ignore p_size_field == 0 for existing 0-element + * fake flexible arrays, until they are all converted to + * proper flexible arrays. + * + * The implementation of __builtin_object_size() behaves + * like sizeof() when not directly referencing a flexible + * array member, which means there will be many bounds checks + * that will appear at run-time, without a way for them to be + * detected at compile-time (as can be done when the destination + * is specifically the flexible array member). + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832 + */ + if (p_size_field != 0 && p_size_field != SIZE_MAX && + p_size != p_size_field && p_size_field < size) + return true; + + return false; } #define __fortify_memcpy_chk(p, q, size, p_size, q_size, \ p_size_field, q_size_field, op) ({ \ size_t __fortify_size = (size_t)(size); \ - fortify_memcpy_chk(__fortify_size, p_size, q_size, \ - p_size_field, q_size_field, #op); \ + WARN_ONCE(fortify_memcpy_chk(__fortify_size, p_size, q_size, \ + p_size_field, q_size_field, #op), \ + #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \ + __fortify_size, \ + "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \ + p_size_field); \ __underlying_##op(p, q, __fortify_size); \ }) +/* + * Notes about compile-time buffer size detection: + * + * With these types... + * + * struct middle { + * u16 a; + * u8 middle_buf[16]; + * int b; + * }; + * struct end { + * u16 a; + * u8 end_buf[16]; + * }; + * struct flex { + * int a; + * u8 flex_buf[]; + * }; + * + * void func(TYPE *ptr) { ... } + * + * Cases where destination size cannot be currently detected: + * - the size of ptr's object (seemingly by design, gcc & clang fail): + * __builtin_object_size(ptr, 1) == SIZE_MAX + * - the size of flexible arrays in ptr's obj (by design, dynamic size): + * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX + * - the size of ANY array at the end of ptr's obj (gcc and clang bug): + * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836 + * + * Cases where destination size is currently detected: + * - the size of non-array members within ptr's object: + * __builtin_object_size(ptr->a, 1) == 2 + * - the size of non-flexible-array in the middle of ptr's obj: + * __builtin_object_size(ptr->middle_buf, 1) == 16 + * + */ + /* * __builtin_object_size() must be captured here to avoid evaluating argument * side-effects further into the macro layers. From 325bf6d84bad3fc641b94fad6e69c70e960fdf2e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 31 Aug 2022 23:05:01 -0700 Subject: [PATCH 10/27] lkdtm: Update tests for memcpy() run-time warnings Clarify the LKDTM FORTIFY tests, and add tests for the mem*() family of functions, now that run-time checking is distinct. Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Cc: Shuah Khan Cc: linux-kselftest@vger.kernel.org Signed-off-by: Kees Cook --- drivers/misc/lkdtm/fortify.c | 96 +++++++++++++++++++++---- tools/testing/selftests/lkdtm/tests.txt | 8 ++- 2 files changed, 88 insertions(+), 16 deletions(-) diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c index 080293fa3c52..015927665678 100644 --- a/drivers/misc/lkdtm/fortify.c +++ b/drivers/misc/lkdtm/fortify.c @@ -10,28 +10,31 @@ static volatile int fortify_scratch_space; -static void lkdtm_FORTIFIED_OBJECT(void) +static void lkdtm_FORTIFY_STR_OBJECT(void) { struct target { char a[10]; - } target[2] = {}; + int foo; + } target[3] = {}; /* * Using volatile prevents the compiler from determining the value of * 'size' at compile time. Without that, we would get a compile error * rather than a runtime error. */ - volatile int size = 11; + volatile int size = 20; - pr_info("trying to read past the end of a struct\n"); + pr_info("trying to strcmp() past the end of a struct\n"); + + strncpy(target[0].a, target[1].a, size); /* Store result to global to prevent the code from being eliminated */ - fortify_scratch_space = memcmp(&target[0], &target[1], size); + fortify_scratch_space = target[0].a[3]; - pr_err("FAIL: fortify did not block an object overread!\n"); + pr_err("FAIL: fortify did not block a strncpy() object write overflow!\n"); pr_expected_config(CONFIG_FORTIFY_SOURCE); } -static void lkdtm_FORTIFIED_SUBOBJECT(void) +static void lkdtm_FORTIFY_STR_MEMBER(void) { struct target { char a[10]; @@ -44,7 +47,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void) strscpy(src, "over ten bytes", size); size = strlen(src) + 1; - pr_info("trying to strncpy past the end of a member of a struct\n"); + pr_info("trying to strncpy() past the end of a struct member...\n"); /* * strncpy(target.a, src, 20); will hit a compile error because the @@ -56,7 +59,72 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void) /* Store result to global to prevent the code from being eliminated */ fortify_scratch_space = target.a[3]; - pr_err("FAIL: fortify did not block an sub-object overrun!\n"); + pr_err("FAIL: fortify did not block a strncpy() struct member write overflow!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); + + kfree(src); +} + +static void lkdtm_FORTIFY_MEM_OBJECT(void) +{ + int before[10]; + struct target { + char a[10]; + int foo; + } target = {}; + int after[10]; + /* + * Using volatile prevents the compiler from determining the value of + * 'size' at compile time. Without that, we would get a compile error + * rather than a runtime error. + */ + volatile int size = 20; + + memset(before, 0, sizeof(before)); + memset(after, 0, sizeof(after)); + fortify_scratch_space = before[5]; + fortify_scratch_space = after[5]; + + pr_info("trying to memcpy() past the end of a struct\n"); + + pr_info("0: %zu\n", __builtin_object_size(&target, 0)); + pr_info("1: %zu\n", __builtin_object_size(&target, 1)); + pr_info("s: %d\n", size); + memcpy(&target, &before, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target.a[3]; + + pr_err("FAIL: fortify did not block a memcpy() object write overflow!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); +} + +static void lkdtm_FORTIFY_MEM_MEMBER(void) +{ + struct target { + char a[10]; + char b[10]; + } target; + volatile int size = 20; + char *src; + + src = kmalloc(size, GFP_KERNEL); + strscpy(src, "over ten bytes", size); + size = strlen(src) + 1; + + pr_info("trying to memcpy() past the end of a struct member...\n"); + + /* + * strncpy(target.a, src, 20); will hit a compile error because the + * compiler knows at build time that target.a < 20 bytes. Use a + * volatile to force a runtime error. + */ + memcpy(target.a, src, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target.a[3]; + + pr_err("FAIL: fortify did not block a memcpy() struct member write overflow!\n"); pr_expected_config(CONFIG_FORTIFY_SOURCE); kfree(src); @@ -67,7 +135,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void) * strscpy and generate a panic because there is a write overflow (i.e. src * length is greater than dst length). */ -static void lkdtm_FORTIFIED_STRSCPY(void) +static void lkdtm_FORTIFY_STRSCPY(void) { char *src; char dst[5]; @@ -136,9 +204,11 @@ static void lkdtm_FORTIFIED_STRSCPY(void) } static struct crashtype crashtypes[] = { - CRASHTYPE(FORTIFIED_OBJECT), - CRASHTYPE(FORTIFIED_SUBOBJECT), - CRASHTYPE(FORTIFIED_STRSCPY), + CRASHTYPE(FORTIFY_STR_OBJECT), + CRASHTYPE(FORTIFY_STR_MEMBER), + CRASHTYPE(FORTIFY_MEM_OBJECT), + CRASHTYPE(FORTIFY_MEM_MEMBER), + CRASHTYPE(FORTIFY_STRSCPY), }; struct crashtype_category fortify_crashtypes = { diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt index 65e53eb0840b..607b8d7e3ea3 100644 --- a/tools/testing/selftests/lkdtm/tests.txt +++ b/tools/testing/selftests/lkdtm/tests.txt @@ -75,7 +75,9 @@ USERCOPY_KERNEL STACKLEAK_ERASING OK: the rest of the thread stack is properly erased CFI_FORWARD_PROTO CFI_BACKWARD call trace:|ok: control flow unchanged -FORTIFIED_STRSCPY -FORTIFIED_OBJECT -FORTIFIED_SUBOBJECT +FORTIFY_STRSCPY detected buffer overflow +FORTIFY_STR_OBJECT detected buffer overflow +FORTIFY_STR_MEMBER detected buffer overflow +FORTIFY_MEM_OBJECT detected buffer overflow +FORTIFY_MEM_MEMBER detected field-spanning write PPC_SLB_MULTIHIT Recovered From ba38961a069b0d8d03b53218a6c29d737577d448 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 9 Feb 2022 16:32:24 -0800 Subject: [PATCH 11/27] um: Enable FORTIFY_SOURCE Enable FORTIFY_SOURCE so running Kunit tests can test fortified functions. Signed-off-by: Kees Cook Tested-by: David Gow Link: https://lore.kernel.org/r/20220210003224.773957-1-keescook@chromium.org --- arch/um/Kconfig | 1 + arch/um/os-Linux/user_syms.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 78de31ac1da7..ad4ff3b0e91e 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -6,6 +6,7 @@ config UML bool default y select ARCH_EPHEMERAL_INODES + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV select ARCH_HAS_STRNCPY_FROM_USER diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c index cb667c9225ab..fd575ecbcaec 100644 --- a/arch/um/os-Linux/user_syms.c +++ b/arch/um/os-Linux/user_syms.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define __NO_FORTIFY #include #include From aafc203bbad4bf6cf394a34ea698c2b0b8affae0 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Mon, 29 Aug 2022 17:46:10 -0700 Subject: [PATCH 12/27] LoadPin: Fix Kconfig doc about format of file with verity digests The doc for CONFIG_SECURITY_LOADPIN_VERITY says that the file with verity digests must contain a comma separated list of digests. That was the case at some stage of the development, but was changed during the review process to one digest per line. Update the Kconfig doc accordingly. Reported-by: Jae Hoon Kim Signed-off-by: Matthias Kaehlcke Fixes: 3f805f8cc23b ("LoadPin: Enable loading from trusted dm-verity devices") Cc: stable@vger.kernel.org Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220829174557.1.I5d202d1344212a3800d9828f936df6511eb2d0d1@changeid --- security/loadpin/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig index 70e7985b2561..994c1d9376e6 100644 --- a/security/loadpin/Kconfig +++ b/security/loadpin/Kconfig @@ -33,4 +33,4 @@ config SECURITY_LOADPIN_VERITY on the LoadPin securityfs entry 'dm-verity'. The ioctl expects a file descriptor of a file with verity digests as parameter. The file must be located on the pinned root and - contain a comma separated list of digests. + contain one digest per line. From 916ef6232cc4b84db7082b4c3d3cf1753d9462ba Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Wed, 7 Sep 2022 13:30:58 -0700 Subject: [PATCH 13/27] dm: verity-loadpin: Only trust verity targets with enforcement Verity targets can be configured to ignore corrupted data blocks. LoadPin must only trust verity targets that are configured to perform some kind of enforcement when data corruption is detected, like returning an error, restarting the system or triggering a panic. Fixes: b6c1c5745ccc ("dm: Add verity helpers for LoadPin") Reported-by: Sarthak Kukreti Signed-off-by: Matthias Kaehlcke Reviewed-by: Sarthak Kukreti Cc: stable@vger.kernel.org Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220907133055.1.Ic8a1dafe960dc0f8302e189642bc88ebb785d274@changeid --- drivers/md/dm-verity-loadpin.c | 8 ++++++++ drivers/md/dm-verity-target.c | 16 ++++++++++++++++ drivers/md/dm-verity.h | 1 + 3 files changed, 25 insertions(+) diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c index 387ec43aef72..4f78cc55c251 100644 --- a/drivers/md/dm-verity-loadpin.c +++ b/drivers/md/dm-verity-loadpin.c @@ -14,6 +14,7 @@ LIST_HEAD(dm_verity_loadpin_trusted_root_digests); static bool is_trusted_verity_target(struct dm_target *ti) { + int verity_mode; u8 *root_digest; unsigned int digest_size; struct dm_verity_loadpin_trusted_root_digest *trd; @@ -22,6 +23,13 @@ static bool is_trusted_verity_target(struct dm_target *ti) if (!dm_is_verity_target(ti)) return false; + verity_mode = dm_verity_get_mode(ti); + + if ((verity_mode != DM_VERITY_MODE_EIO) && + (verity_mode != DM_VERITY_MODE_RESTART) && + (verity_mode != DM_VERITY_MODE_PANIC)) + return false; + if (dm_verity_get_root_digest(ti, &root_digest, &digest_size)) return false; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 94b6cb599db4..8a00cc42e498 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1446,6 +1446,22 @@ bool dm_is_verity_target(struct dm_target *ti) return ti->type->module == THIS_MODULE; } +/* + * Get the verity mode (error behavior) of a verity target. + * + * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity + * target. + */ +int dm_verity_get_mode(struct dm_target *ti) +{ + struct dm_verity *v = ti->private; + + if (!dm_is_verity_target(ti)) + return -EINVAL; + + return v->mode; +} + /* * Get the root digest of a verity target. * diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 45455de1b4bc..98f306ec6a33 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -134,6 +134,7 @@ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero); extern bool dm_is_verity_target(struct dm_target *ti); +extern int dm_verity_get_mode(struct dm_target *ti); extern int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size); From 6e42aec7c75947e0d6b38400628f171364eb8231 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Tue, 6 Sep 2022 18:18:12 -0700 Subject: [PATCH 14/27] LoadPin: Require file with verity root digests to have a header LoadPin expects the file with trusted verity root digests to be an ASCII file with one digest (hex value) per line. A pinned root could contain files that meet these format requirements, even though the hex values don't represent trusted root digests. Add a new requirement to the file format which consists in the first line containing a fixed string. This prevents attackers from feeding files with an otherwise valid format to LoadPin. Suggested-by: Sarthak Kukreti Signed-off-by: Matthias Kaehlcke Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220906181725.1.I3f51d1bb0014e5a5951be4ad3c5ad7c7ca1dfc32@changeid --- security/loadpin/Kconfig | 7 ++++++- security/loadpin/loadpin.c | 16 +++++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig index 994c1d9376e6..6724eaba3d36 100644 --- a/security/loadpin/Kconfig +++ b/security/loadpin/Kconfig @@ -33,4 +33,9 @@ config SECURITY_LOADPIN_VERITY on the LoadPin securityfs entry 'dm-verity'. The ioctl expects a file descriptor of a file with verity digests as parameter. The file must be located on the pinned root and - contain one digest per line. + start with the line: + + # LOADPIN_TRUSTED_VERITY_ROOT_DIGESTS + + This is followed by the verity digests, with one digest per + line. diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c index 44521582dcba..de41621f4998 100644 --- a/security/loadpin/loadpin.c +++ b/security/loadpin/loadpin.c @@ -21,6 +21,8 @@ #include #include +#define VERITY_DIGEST_FILE_HEADER "# LOADPIN_TRUSTED_VERITY_ROOT_DIGESTS" + static void report_load(const char *origin, struct file *file, char *operation) { char *cmdline, *pathname; @@ -292,9 +294,21 @@ static int read_trusted_verity_root_digests(unsigned int fd) p = strim(data); while ((d = strsep(&p, "\n")) != NULL) { - int len = strlen(d); + int len; struct dm_verity_loadpin_trusted_root_digest *trd; + if (d == data) { + /* first line, validate header */ + if (strcmp(d, VERITY_DIGEST_FILE_HEADER)) { + rc = -EPROTO; + goto err; + } + + continue; + } + + len = strlen(d); + if (len % 2) { rc = -EPROTO; goto err; From 98388bda6a99d76309f81584f2bc0d773bdf8b35 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Sep 2022 11:03:29 -0700 Subject: [PATCH 15/27] lib: Improve the is_signed_type() kunit test Since the definition of is_signed_type() has been moved from to , include the latter header file instead of the former. Additionally, add a test for the type 'char'. Cc: Isabella Basso Cc: Rasmus Villemoes Signed-off-by: Bart Van Assche Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220907180329.3825417-1-bvanassche@acm.org --- lib/is_signed_type_kunit.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c index f2eedb1f0935..207207522925 100644 --- a/lib/is_signed_type_kunit.c +++ b/lib/is_signed_type_kunit.c @@ -5,7 +5,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include +#include enum unsigned_enum { constant_a = 3, @@ -21,6 +21,11 @@ static void is_signed_type_test(struct kunit *test) KUNIT_EXPECT_EQ(test, is_signed_type(bool), false); KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true); KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false); +#ifdef __CHAR_UNSIGNED__ + KUNIT_EXPECT_EQ(test, is_signed_type(char), false); +#else + KUNIT_EXPECT_EQ(test, is_signed_type(char), true); +#endif KUNIT_EXPECT_EQ(test, is_signed_type(int), true); KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false); KUNIT_EXPECT_EQ(test, is_signed_type(long), true); From 66cb2a36a96f6facbcb4ef1db967b8e9ea6910fe Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 7 Sep 2022 16:27:06 -0700 Subject: [PATCH 16/27] kunit/memcpy: Avoid pathological compile-time string size The memcpy() KUnit tests are trying to sanity-check run-time behaviors, but tripped compile-time warnings about a pathological condition of a too-small buffer being used for input. Avoid this by explicitly resizing the buffer, but leaving the string short. Avoid the following warning: lib/memcpy_kunit.c: In function 'strtomem_test': include/linux/string.h:303:42: warning: 'strnlen' specified bound 4 exceeds source size 3 [-Wstringop-overread] 303 | memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \ include/linux/minmax.h:32:39: note: in definition of macro '__cmp_once' 32 | typeof(y) unique_y = (y); \ | ^ include/linux/minmax.h:45:25: note: in expansion of macro '__careful_cmp' 45 | #define min(x, y) __careful_cmp(x, y, <) | ^~~~~~~~~~~~~ include/linux/string.h:303:27: note: in expansion of macro 'min' 303 | memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \ | ^~~ lib/memcpy_kunit.c:290:9: note: in expansion of macro 'strtomem' 290 | strtomem(wrap.output, input); | ^~~~~~~~ lib/memcpy_kunit.c:275:27: note: source object allocated here 275 | static const char input[] = "hi"; | ^~~~~ Reported-by: kernel test robot Link: https://lore.kernel.org/linux-mm/202209070728.o3stvgVt-lkp@intel.com Fixes: dfbafa70bde2 ("string: Introduce strtomem() and strtomem_pad()") Signed-off-by: Kees Cook --- lib/memcpy_kunit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c index d22fa3838ee9..2b5cc70ac53f 100644 --- a/lib/memcpy_kunit.c +++ b/lib/memcpy_kunit.c @@ -272,7 +272,7 @@ static void memset_test(struct kunit *test) static void strtomem_test(struct kunit *test) { - static const char input[] = "hi"; + static const char input[sizeof(unsigned long)] = "hi"; static const char truncate[] = "this is too long"; struct { unsigned long canary1; From c5783af354688b24abd359f7086c282ec74de993 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 7 Sep 2022 16:40:44 -0700 Subject: [PATCH 17/27] sh: machvec: Use char[] for section boundaries As done for other sections, define the extern as a character array, which relaxes many of the compiler-time object size checks, which would otherwise assume it's a single long. Solves the following build error: arch/sh/kernel/machvec.c: error: array subscript 'struct sh_machine_vector[0]' is partly outside array bounds of 'long int[1]' [-Werror=array-bounds]: => 105:33 Cc: Yoshinori Sato Cc: Rich Felker Cc: linux-sh@vger.kernel.org Reported-by: Geert Uytterhoeven Link: https://lore.kernel.org/lkml/alpine.DEB.2.22.394.2209050944290.964530@ramsan.of.borg/ Fixes: 9655ad03af2d ("sh: Fixup machvec support.") Reviewed-by: Geert Uytterhoeven Reviewed-by: Gustavo A. R. Silva Acked-by: Rich Felker Signed-off-by: Kees Cook --- arch/sh/include/asm/sections.h | 2 +- arch/sh/kernel/machvec.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 8edb824049b9..0cb0ca149ac3 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -4,7 +4,7 @@ #include -extern long __machvec_start, __machvec_end; +extern char __machvec_start[], __machvec_end[]; extern char __uncached_start, __uncached_end; extern char __start_eh_frame[], __stop_eh_frame[]; diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index d606679a211e..57efaf5b82ae 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -20,8 +20,8 @@ #define MV_NAME_SIZE 32 #define for_each_mv(mv) \ - for ((mv) = (struct sh_machine_vector *)&__machvec_start; \ - (mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \ + for ((mv) = (struct sh_machine_vector *)__machvec_start; \ + (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \ (mv)++) static struct sh_machine_vector * __init get_mv_byname(const char *name) @@ -87,8 +87,8 @@ void __init sh_mv_setup(void) if (!machvec_selected) { unsigned long machvec_size; - machvec_size = ((unsigned long)&__machvec_end - - (unsigned long)&__machvec_start); + machvec_size = ((unsigned long)__machvec_end - + (unsigned long)__machvec_start); /* * Sanity check for machvec section alignment. Ensure @@ -102,7 +102,7 @@ void __init sh_mv_setup(void) * vector (usually the only one) from .machvec.init. */ if (machvec_size >= sizeof(struct sh_machine_vector)) - sh_mv = *(struct sh_machine_vector *)&__machvec_start; + sh_mv = *(struct sh_machine_vector *)__machvec_start; } pr_notice("Booting machvec: %s\n", get_system_type()); From 06c1c49d0cd1d6cec5b78963109ba728e49e0063 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 13 Sep 2022 10:28:56 -0700 Subject: [PATCH 18/27] fortify: Adjust KUnit test for modular build A much better "unknown size" string pointer is available directly from struct test, so use that instead of a global that isn't shared with modules. Reported-by: Nathan Chancellor Link: https://lore.kernel.org/lkml/YyCOHOchVuE/E7vS@dev-arch.thelio-3990X Fixes: 875bfd5276f3 ("fortify: Add KUnit test for FORTIFY_SOURCE internals") Cc: linux-hardening@vger.kernel.org Build-tested-by: Nathan Chancellor Reviewed-by: David Gow Signed-off-by: Kees Cook --- lib/fortify_kunit.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index 99bc0ea60d27..409af07f340a 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -17,7 +17,6 @@ #include #include -#include static const char array_of_10[] = "this is 10"; static const char *ptr_of_11 = "this is 11!"; @@ -31,7 +30,7 @@ static void known_sizes_test(struct kunit *test) KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX); /* Externally defined and dynamically sized string pointer: */ - KUNIT_EXPECT_EQ(test, __compiletime_strlen(saved_command_line), SIZE_MAX); + KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX); } /* This is volatile so the optimizer can't perform DCE below. */ From 1b64daf413acd86c2c13f5443f6b4ef3690c8061 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 7 Sep 2022 15:41:03 -0700 Subject: [PATCH 19/27] ARM: decompressor: Include .data.rel.ro.local The .data.rel.ro.local section has the same semantics as .data.rel.ro here, so include it in the .rodata section of the decompressor. Additionally since the .printk_index section isn't usable outside of the core kernel, discard it in the decompressor. Avoids these warnings: arm-linux-gnueabi-ld: warning: orphan section `.data.rel.ro.local' from `arch/arm/boot/compressed/fdt_rw.o' being placed in section `.data.rel.ro.local' arm-linux-gnueabi-ld: warning: orphan section `.printk_index' from `arch/arm/boot/compressed/fdt_rw.o' being placed in section `.printk_index' Reported-by: kernel test robot Link: https://lore.kernel.org/linux-mm/202209080545.qMIVj7YM-lkp@intel.com Cc: Russell King Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Kees Cook --- arch/arm/boot/compressed/vmlinux.lds.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S index 1bcb68ac4b01..3fcb3e62dc56 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.S +++ b/arch/arm/boot/compressed/vmlinux.lds.S @@ -23,6 +23,7 @@ SECTIONS *(.ARM.extab*) *(.note.*) *(.rel.*) + *(.printk_index) /* * Discard any r/w data - this produces a link error if we have any, * which is required for PIC decompression. Local data generates @@ -57,6 +58,7 @@ SECTIONS *(.rodata) *(.rodata.*) *(.data.rel.ro) + *(.data.rel.ro.*) } .piggydata : { *(.piggydata) From 3e1730842f142add55dc658929221521a9ea62b6 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 19 Sep 2022 19:45:14 -0700 Subject: [PATCH 20/27] x86/entry: Work around Clang __bdos() bug Clang produces a false positive when building with CONFIG_FORTIFY_SOURCE=y and CONFIG_UBSAN_BOUNDS=y when operating on an array with a dynamic offset. Work around this by using a direct assignment of an empty instance. Avoids this warning: ../include/linux/fortify-string.h:309:4: warning: call to __write_overflow_field declared with 'warn ing' attribute: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Wat tribute-warning] __write_overflow_field(p_size_field, size); ^ which was isolated to the memset() call in xen_load_idt(). Note that this looks very much like another bug that was worked around: https://github.com/ClangBuiltLinux/linux/issues/1592 Cc: Juergen Gross Cc: Boris Ostrovsky Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: x86@kernel.org Cc: "H. Peter Anvin" Cc: xen-devel@lists.xenproject.org Reviewed-by: Boris Ostrovsky Link: https://lore.kernel.org/lkml/41527d69-e8ab-3f86-ff37-6b298c01d5bc@oracle.com Signed-off-by: Kees Cook --- arch/x86/xen/enlighten_pv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 0ed2e487a693..9b1a58dda935 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -765,6 +765,7 @@ static void xen_load_idt(const struct desc_ptr *desc) { static DEFINE_SPINLOCK(lock); static struct trap_info traps[257]; + static const struct trap_info zero = { }; unsigned out; trace_xen_cpu_load_idt(desc); @@ -774,7 +775,7 @@ static void xen_load_idt(const struct desc_ptr *desc) memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); out = xen_convert_trap_info(desc, traps, false); - memset(&traps[out], 0, sizeof(traps[0])); + traps[out] = zero; xen_mc_flush(); if (HYPERVISOR_set_trap_table(traps)) From fa35198f39571bbdae53c5b321020021eaad6bd2 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 19 Sep 2022 16:33:33 -0700 Subject: [PATCH 21/27] fortify: Explicitly check bounds are compile-time constants In preparation for replacing __builtin_object_size() with __builtin_dynamic_object_size(), all the compile-time size checks need to check that the bounds comparisons are, in fact, known at compile-time. Enforce what was guaranteed with __bos(). In other words, since all uses of __bos() were constant expressions, it was not required to test for this. When these change to __bdos(), they _may_ be constant expressions, and the checks are only valid when the prior condition holds. This results in no binary differences. Cc: linux-hardening@vger.kernel.org Link: https://lore.kernel.org/lkml/20220920192202.190793-3-keescook@chromium.org Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 49 +++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index ff879efe94ed..1c582224c525 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -80,6 +80,11 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) #define POS __pass_object_size(1) #define POS0 __pass_object_size(0) +#define __compiletime_lessthan(bounds, length) ( \ + __builtin_constant_p((bounds) < (length)) && \ + (bounds) < (length) \ +) + /** * strncpy - Copy a string to memory with non-guaranteed NUL padding * @@ -117,7 +122,7 @@ char *strncpy(char * const POS p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 1); - if (__builtin_constant_p(size) && p_size < size) + if (__compiletime_lessthan(p_size, size)) __write_overflow(); if (p_size < size) fortify_panic(__func__); @@ -224,7 +229,7 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s * If size can be known at compile time and is greater than * p_size, generate a compile time write overflow error. */ - if (__builtin_constant_p(size) && size > p_size) + if (__compiletime_lessthan(p_size, size)) __write_overflow(); /* @@ -281,15 +286,16 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size, /* * Length argument is a constant expression, so we * can perform compile-time bounds checking where - * buffer sizes are known. + * buffer sizes are also known at compile time. */ /* Error when size is larger than enclosing struct. */ - if (p_size > p_size_field && p_size < size) + if (__compiletime_lessthan(p_size_field, p_size) && + __compiletime_lessthan(p_size, size)) __write_overflow(); /* Warn when write size is larger than dest field. */ - if (p_size_field < size) + if (__compiletime_lessthan(p_size_field, size)) __write_overflow_field(p_size_field, size); } /* @@ -365,25 +371,28 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, /* * Length argument is a constant expression, so we * can perform compile-time bounds checking where - * buffer sizes are known. + * buffer sizes are also known at compile time. */ /* Error when size is larger than enclosing struct. */ - if (p_size > p_size_field && p_size < size) + if (__compiletime_lessthan(p_size_field, p_size) && + __compiletime_lessthan(p_size, size)) __write_overflow(); - if (q_size > q_size_field && q_size < size) + if (__compiletime_lessthan(q_size_field, q_size) && + __compiletime_lessthan(q_size, size)) __read_overflow2(); /* Warn when write size argument larger than dest field. */ - if (p_size_field < size) + if (__compiletime_lessthan(p_size_field, size)) __write_overflow_field(p_size_field, size); /* * Warn for source field over-read when building with W=1 * or when an over-write happened, so both can be fixed at * the same time. */ - if ((IS_ENABLED(KBUILD_EXTRA_WARN1) || p_size_field < size) && - q_size_field < size) + if ((IS_ENABLED(KBUILD_EXTRA_WARN1) || + __compiletime_lessthan(p_size_field, size)) && + __compiletime_lessthan(q_size_field, size)) __read_overflow2_field(q_size_field, size); } /* @@ -494,7 +503,7 @@ __FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) + if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) fortify_panic(__func__); @@ -508,9 +517,9 @@ int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size_t q_size = __builtin_object_size(q, 0); if (__builtin_constant_p(size)) { - if (p_size < size) + if (__compiletime_lessthan(p_size, size)) __read_overflow(); - if (q_size < size) + if (__compiletime_lessthan(q_size, size)) __read_overflow2(); } if (p_size < size || q_size < size) @@ -523,7 +532,7 @@ void *memchr(const void * const POS0 p, int c, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) + if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) fortify_panic(__func__); @@ -535,7 +544,7 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size) { size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) + if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) fortify_panic(__func__); @@ -547,7 +556,7 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp { size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) + if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) fortify_panic(__func__); @@ -563,11 +572,13 @@ char *strcpy(char * const POS p, const char * const POS q) size_t size; /* If neither buffer size is known, immediately give up. */ - if (p_size == SIZE_MAX && q_size == SIZE_MAX) + if (__builtin_constant_p(p_size) && + __builtin_constant_p(q_size) && + p_size == SIZE_MAX && q_size == SIZE_MAX) return __underlying_strcpy(p, q); size = strlen(q) + 1; /* Compile-time check for const size overflow. */ - if (__builtin_constant_p(size) && p_size < size) + if (__compiletime_lessthan(p_size, size)) __write_overflow(); /* Run-time check for dynamic size overflow. */ if (p_size < size) From 9f7d69c5cd23904a29178a7ecc4eee9c1cfba04b Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 19 Sep 2022 19:50:32 -0700 Subject: [PATCH 22/27] fortify: Convert to struct vs member helpers In preparation for adding support for __builtin_dynamic_object_size(), wrap each instance of __builtin_object_size(p, N) with either the new __struct_size(p) as __bos(p, 0), or __member_size(p) as __bos(p, 1). This will allow us to replace the definitions with __bdos() next. There are no binary differences from this change. Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Tom Rix Cc: linux-hardening@vger.kernel.org Cc: llvm@lists.linux.dev Link: https://lore.kernel.org/lkml/20220920192202.190793-4-keescook@chromium.org Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 68 +++++++++++++++++----------------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 1c582224c525..b62c90cfafaf 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -20,7 +20,7 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning(" ({ \ unsigned char *__p = (unsigned char *)(p); \ size_t __ret = SIZE_MAX; \ - size_t __p_size = __builtin_object_size(p, 1); \ + size_t __p_size = __member_size(p); \ if (__p_size != SIZE_MAX && \ __builtin_constant_p(*__p)) { \ size_t __p_len = __p_size - 1; \ @@ -72,13 +72,15 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __underlying_memcpy(dst, src, bytes) /* - * Clang's use of __builtin_object_size() within inlines needs hinting via - * __pass_object_size(). The preference is to only ever use type 1 (member + * Clang's use of __builtin_*object_size() within inlines needs hinting via + * __pass_*object_size(). The preference is to only ever use type 1 (member * size, rather than struct size), but there remain some stragglers using * type 0 that will be converted in the future. */ -#define POS __pass_object_size(1) -#define POS0 __pass_object_size(0) +#define POS __pass_object_size(1) +#define POS0 __pass_object_size(0) +#define __struct_size(p) __builtin_object_size(p, 0) +#define __member_size(p) __builtin_object_size(p, 1) #define __compiletime_lessthan(bounds, length) ( \ __builtin_constant_p((bounds) < (length)) && \ @@ -120,7 +122,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3) char *strncpy(char * const POS p, const char *q, __kernel_size_t size) { - size_t p_size = __builtin_object_size(p, 1); + size_t p_size = __member_size(p); if (__compiletime_lessthan(p_size, size)) __write_overflow(); @@ -132,7 +134,7 @@ char *strncpy(char * const POS p, const char *q, __kernel_size_t size) __FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2) char *strcat(char * const POS p, const char *q) { - size_t p_size = __builtin_object_size(p, 1); + size_t p_size = __member_size(p); if (p_size == SIZE_MAX) return __underlying_strcat(p, q); @@ -144,7 +146,7 @@ char *strcat(char * const POS p, const char *q) extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); __FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen) { - size_t p_size = __builtin_object_size(p, 1); + size_t p_size = __member_size(p); size_t p_len = __compiletime_strlen(p); size_t ret; @@ -174,7 +176,7 @@ __FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1) __kernel_size_t __fortify_strlen(const char * const POS p) { __kernel_size_t ret; - size_t p_size = __builtin_object_size(p, 1); + size_t p_size = __member_size(p); /* Give up if we don't know how large p is. */ if (p_size == SIZE_MAX) @@ -189,8 +191,8 @@ __kernel_size_t __fortify_strlen(const char * const POS p) extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); __FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size) { - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); + size_t p_size = __member_size(p); + size_t q_size = __member_size(q); size_t q_len; /* Full count of source string length. */ size_t len; /* Count of characters going into destination. */ @@ -218,8 +220,8 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s { size_t len; /* Use string size rather than possible enclosing struct size. */ - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); + size_t p_size = __member_size(p); + size_t q_size = __member_size(q); /* If we cannot get size of p and q default to call strscpy. */ if (p_size == SIZE_MAX && q_size == SIZE_MAX) @@ -264,8 +266,8 @@ __FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3) char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count) { size_t p_len, copy_len; - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); + size_t p_size = __member_size(p); + size_t q_size = __member_size(q); if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __underlying_strncat(p, q, count); @@ -323,11 +325,11 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size, }) /* - * __builtin_object_size() must be captured here to avoid evaluating argument - * side-effects further into the macro layers. + * __struct_size() vs __member_size() must be captured here to avoid + * evaluating argument side-effects further into the macro layers. */ #define memset(p, c, s) __fortify_memset_chk(p, c, s, \ - __builtin_object_size(p, 0), __builtin_object_size(p, 1)) + __struct_size(p), __member_size(p)) /* * To make sure the compiler can enforce protection against buffer overflows, @@ -420,7 +422,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, * fake flexible arrays, until they are all converted to * proper flexible arrays. * - * The implementation of __builtin_object_size() behaves + * The implementation of __builtin_*object_size() behaves * like sizeof() when not directly referencing a flexible * array member, which means there will be many bounds checks * that will appear at run-time, without a way for them to be @@ -486,22 +488,22 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, */ /* - * __builtin_object_size() must be captured here to avoid evaluating argument - * side-effects further into the macro layers. + * __struct_size() vs __member_size() must be captured here to avoid + * evaluating argument side-effects further into the macro layers. */ #define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \ - __builtin_object_size(p, 0), __builtin_object_size(q, 0), \ - __builtin_object_size(p, 1), __builtin_object_size(q, 1), \ + __struct_size(p), __struct_size(q), \ + __member_size(p), __member_size(q), \ memcpy) #define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \ - __builtin_object_size(p, 0), __builtin_object_size(q, 0), \ - __builtin_object_size(p, 1), __builtin_object_size(q, 1), \ + __struct_size(p), __struct_size(q), \ + __member_size(p), __member_size(q), \ memmove) extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); __FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size) { - size_t p_size = __builtin_object_size(p, 0); + size_t p_size = __struct_size(p); if (__compiletime_lessthan(p_size, size)) __read_overflow(); @@ -513,8 +515,8 @@ __FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size) __FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3) int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size) { - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); + size_t p_size = __struct_size(p); + size_t q_size = __struct_size(q); if (__builtin_constant_p(size)) { if (__compiletime_lessthan(p_size, size)) @@ -530,7 +532,7 @@ int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t __FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3) void *memchr(const void * const POS0 p, int c, __kernel_size_t size) { - size_t p_size = __builtin_object_size(p, 0); + size_t p_size = __struct_size(p); if (__compiletime_lessthan(p_size, size)) __read_overflow(); @@ -542,7 +544,7 @@ void *memchr(const void * const POS0 p, int c, __kernel_size_t size) void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size) { - size_t p_size = __builtin_object_size(p, 0); + size_t p_size = __struct_size(p); if (__compiletime_lessthan(p_size, size)) __read_overflow(); @@ -554,7 +556,7 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size) extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp) { - size_t p_size = __builtin_object_size(p, 0); + size_t p_size = __struct_size(p); if (__compiletime_lessthan(p_size, size)) __read_overflow(); @@ -567,8 +569,8 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp __FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2) char *strcpy(char * const POS p, const char * const POS q) { - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); + size_t p_size = __member_size(p); + size_t q_size = __member_size(q); size_t size; /* If neither buffer size is known, immediately give up. */ From f67b90be20097294cd58b7db91435f4f3278d75f Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Fri, 2 Sep 2022 21:37:49 +0000 Subject: [PATCH 23/27] x86/paravirt: clean up typos and grammaros Drive-by clean up of the comment. [ Impact: cleanup] Signed-off-by: Bill Wendling Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220902213750.1124421-2-morbo@google.com --- arch/x86/include/asm/paravirt_types.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 89df6c6617f5..f04157456a49 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -328,7 +328,7 @@ int paravirt_disable_iospace(void); * Unfortunately, this is a relatively slow operation for modern CPUs, * because it cannot necessarily determine what the destination * address is. In this case, the address is a runtime constant, so at - * the very least we can patch the call to e a simple direct call, or + * the very least we can patch the call to a simple direct call, or, * ideally, patch an inline implementation into the callsite. (Direct * calls are essentially free, because the call and return addresses * are completely predictable.) @@ -339,10 +339,10 @@ int paravirt_disable_iospace(void); * on the stack. All caller-save registers (eax,edx,ecx) are expected * to be modified (either clobbered or used for return values). * X86_64, on the other hand, already specifies a register-based calling - * conventions, returning at %rax, with parameters going on %rdi, %rsi, + * conventions, returning at %rax, with parameters going in %rdi, %rsi, * %rdx, and %rcx. Note that for this reason, x86_64 does not need any * special handling for dealing with 4 arguments, unlike i386. - * However, x86_64 also have to clobber all caller saved registers, which + * However, x86_64 also has to clobber all caller saved registers, which * unfortunately, are quite a bit (r8 - r11) * * The call instruction itself is marked by placing its start address @@ -360,22 +360,22 @@ int paravirt_disable_iospace(void); * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. * It could be extended to more arguments, but there would be little * to be gained from that. For each number of arguments, there are - * the two VCALL and CALL variants for void and non-void functions. + * two VCALL and CALL variants for void and non-void functions. * * When there is a return value, the invoker of the macro must specify * the return type. The macro then uses sizeof() on that type to - * determine whether its a 32 or 64 bit value, and places the return + * determine whether it's a 32 or 64 bit value and places the return * in the right register(s) (just %eax for 32-bit, and %edx:%eax for - * 64-bit). For x86_64 machines, it just returns at %rax regardless of + * 64-bit). For x86_64 machines, it just returns in %rax regardless of * the return value size. * - * 64-bit arguments are passed as a pair of adjacent 32-bit arguments + * 64-bit arguments are passed as a pair of adjacent 32-bit arguments; * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments * in low,high order * * Small structures are passed and returned in registers. The macro * calling convention can't directly deal with this, so the wrapper - * functions must do this. + * functions must do it. * * These PVOP_* macros are only defined within this header. This * means that all uses must be wrapped in inline functions. This also From 8c86f29bfb18465d15b05cfd26a6454ec787b793 Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Fri, 2 Sep 2022 21:37:50 +0000 Subject: [PATCH 24/27] x86/paravirt: add extra clobbers with ZERO_CALL_USED_REGS enabled The ZERO_CALL_USED_REGS feature may zero out caller-saved registers before returning. In spurious_kernel_fault(), the "pte_offset_kernel()" call results in this assembly code: .Ltmp151: #APP # ALT: oldnstr .Ltmp152: .Ltmp153: .Ltmp154: .section .discard.retpoline_safe,"",@progbits .quad .Ltmp154 .text callq *pv_ops+536(%rip) .Ltmp155: .section .parainstructions,"a",@progbits .p2align 3, 0x0 .quad .Ltmp153 .byte 67 .byte .Ltmp155-.Ltmp153 .short 1 .text .Ltmp156: # ALT: padding .zero (-(((.Ltmp157-.Ltmp158)-(.Ltmp156-.Ltmp152))>0))*((.Ltmp157-.Ltmp158)-(.Ltmp156-.Ltmp152)),144 .Ltmp159: .section .altinstructions,"a",@progbits .Ltmp160: .long .Ltmp152-.Ltmp160 .Ltmp161: .long .Ltmp158-.Ltmp161 .short 33040 .byte .Ltmp159-.Ltmp152 .byte .Ltmp157-.Ltmp158 .text .section .altinstr_replacement,"ax",@progbits # ALT: replacement 1 .Ltmp158: movq %rdi, %rax .Ltmp157: .text #NO_APP .Ltmp162: testb $-128, %dil The "testb" here is using %dil, but the %rdi register was cleared before returning from "callq *pv_ops+536(%rip)". Adding the proper constraints results in the use of a different register: movq %r11, %rdi # Similar to above. testb $-128, %r11b Link: https://github.com/KSPP/linux/issues/192 Signed-off-by: Bill Wendling Reported-and-tested-by: Nathan Chancellor Fixes: 035f7f87b729 ("randstruct: Enable Clang support") Reviewed-by: Juergen Gross Link: https://lore.kernel.org/lkml/fa6df43b-8a1a-8ad1-0236-94d2a0b588fa@suse.com/ Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220902213750.1124421-3-morbo@google.com --- arch/x86/include/asm/paravirt_types.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f04157456a49..b1ab5d94881b 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -414,8 +414,17 @@ int paravirt_disable_iospace(void); "=c" (__ecx) #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) -/* void functions are still allowed [re]ax for scratch */ +/* + * void functions are still allowed [re]ax for scratch. + * + * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved + * registers. Make sure we model this with the appropriate clobbers. + */ +#ifdef CONFIG_ZERO_CALL_USED_REGS +#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), PVOP_VCALL_CLOBBERS +#else #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) +#endif #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" From 17006e86a7641fa3c50324cfb602f0e74dac8527 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 30 Aug 2022 13:58:42 -0700 Subject: [PATCH 25/27] sparc: Unbreak the build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following build errors: arch/sparc/mm/srmmu.c: In function ‘smp_flush_page_for_dma’: arch/sparc/mm/srmmu.c:1639:13: error: cast between incompatible function types from ‘void (*)(long unsigned int)’ to ‘void (*)(long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int)’ [-Werror=cast-function-type] 1639 | xc1((smpfunc_t) local_ops->page_for_dma, page); | ^ arch/sparc/mm/srmmu.c: In function ‘smp_flush_cache_mm’: arch/sparc/mm/srmmu.c:1662:29: error: cast between incompatible function types from ‘void (*)(struct mm_struct *)’ to ‘void (*)(long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int)’ [-Werror=cast-function-type] 1662 | xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm); | [ ... ] Compile-tested only. Fixes: 552a23a0e5d0 ("Makefile: Enable -Wcast-function-type") Cc: stable@vger.kernel.org Signed-off-by: Bart Van Assche Tested-by: Andreas Larsson Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220830205854.1918026-1-bvanassche@acm.org --- arch/sparc/include/asm/smp_32.h | 15 ++++++--------- arch/sparc/kernel/leon_smp.c | 12 +++++++----- arch/sparc/kernel/sun4d_smp.c | 12 +++++++----- arch/sparc/kernel/sun4m_smp.c | 10 ++++++---- arch/sparc/mm/srmmu.c | 29 +++++++++++++---------------- 5 files changed, 39 insertions(+), 39 deletions(-) diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index 856081761b0f..2cf7971d7f6c 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -33,9 +33,6 @@ extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern cpumask_t smp_commenced_mask; extern struct linux_prom_registers smp_penguin_ctable; -typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long); - void cpu_panic(void); /* @@ -57,7 +54,7 @@ void smp_bogo(struct seq_file *); void smp_info(struct seq_file *); struct sparc32_ipi_ops { - void (*cross_call)(smpfunc_t func, cpumask_t mask, unsigned long arg1, + void (*cross_call)(void *func, cpumask_t mask, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4); void (*resched)(int cpu); @@ -66,28 +63,28 @@ struct sparc32_ipi_ops { }; extern const struct sparc32_ipi_ops *sparc32_ipi_ops; -static inline void xc0(smpfunc_t func) +static inline void xc0(void *func) { sparc32_ipi_ops->cross_call(func, *cpu_online_mask, 0, 0, 0, 0); } -static inline void xc1(smpfunc_t func, unsigned long arg1) +static inline void xc1(void *func, unsigned long arg1) { sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); } -static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) +static inline void xc2(void *func, unsigned long arg1, unsigned long arg2) { sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); } -static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, +static inline void xc3(void *func, unsigned long arg1, unsigned long arg2, unsigned long arg3) { sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); } -static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, +static inline void xc4(void *func, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { sparc32_ipi_ops->cross_call(func, *cpu_online_mask, diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 1eed26d423fb..991e9ad3d3e8 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -359,7 +359,7 @@ void leonsmp_ipi_interrupt(void) } static struct smp_funcall { - smpfunc_t func; + void *func; unsigned long arg1; unsigned long arg2; unsigned long arg3; @@ -372,7 +372,7 @@ static struct smp_funcall { static DEFINE_SPINLOCK(cross_call_lock); /* Cross calls must be serialized, at least currently. */ -static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, +static void leon_cross_call(void *func, cpumask_t mask, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { @@ -384,7 +384,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, { /* If you make changes here, make sure gcc generates proper code... */ - register smpfunc_t f asm("i0") = func; + register void *f asm("i0") = func; register unsigned long a1 asm("i1") = arg1; register unsigned long a2 asm("i2") = arg2; register unsigned long a3 asm("i3") = arg3; @@ -444,11 +444,13 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, /* Running cross calls. */ void leon_cross_call_irq(void) { + void (*func)(unsigned long, unsigned long, unsigned long, unsigned long, + unsigned long) = ccall_info.func; int i = smp_processor_id(); ccall_info.processors_in[i] = 1; - ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, - ccall_info.arg4, ccall_info.arg5); + func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, + ccall_info.arg5); ccall_info.processors_out[i] = 1; } diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index ff30f03beb7c..9a62a5cf3337 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -268,7 +268,7 @@ static void sun4d_ipi_resched(int cpu) } static struct smp_funcall { - smpfunc_t func; + void *func; unsigned long arg1; unsigned long arg2; unsigned long arg3; @@ -281,7 +281,7 @@ static struct smp_funcall { static DEFINE_SPINLOCK(cross_call_lock); /* Cross calls must be serialized, at least currently. */ -static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, +static void sun4d_cross_call(void *func, cpumask_t mask, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { @@ -296,7 +296,7 @@ static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, * If you make changes here, make sure * gcc generates proper code... */ - register smpfunc_t f asm("i0") = func; + register void *f asm("i0") = func; register unsigned long a1 asm("i1") = arg1; register unsigned long a2 asm("i2") = arg2; register unsigned long a3 asm("i3") = arg3; @@ -353,11 +353,13 @@ static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, /* Running cross calls. */ void smp4d_cross_call_irq(void) { + void (*func)(unsigned long, unsigned long, unsigned long, unsigned long, + unsigned long) = ccall_info.func; int i = hard_smp_processor_id(); ccall_info.processors_in[i] = 1; - ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, - ccall_info.arg4, ccall_info.arg5); + func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, + ccall_info.arg5); ccall_info.processors_out[i] = 1; } diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 228a6527082d..056df034e79e 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -157,7 +157,7 @@ static void sun4m_ipi_mask_one(int cpu) } static struct smp_funcall { - smpfunc_t func; + void *func; unsigned long arg1; unsigned long arg2; unsigned long arg3; @@ -170,7 +170,7 @@ static struct smp_funcall { static DEFINE_SPINLOCK(cross_call_lock); /* Cross calls must be serialized, at least currently. */ -static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, +static void sun4m_cross_call(void *func, cpumask_t mask, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { @@ -230,11 +230,13 @@ static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, /* Running cross calls. */ void smp4m_cross_call_irq(void) { + void (*func)(unsigned long, unsigned long, unsigned long, unsigned long, + unsigned long) = ccall_info.func; int i = smp_processor_id(); ccall_info.processors_in[i] = 1; - ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, - ccall_info.arg4, ccall_info.arg5); + func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, + ccall_info.arg5); ccall_info.processors_out[i] = 1; } diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index a9aa6a92c7fe..13f027afc875 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1636,19 +1636,19 @@ static void __init get_srmmu_type(void) /* Local cross-calls. */ static void smp_flush_page_for_dma(unsigned long page) { - xc1((smpfunc_t) local_ops->page_for_dma, page); + xc1(local_ops->page_for_dma, page); local_ops->page_for_dma(page); } static void smp_flush_cache_all(void) { - xc0((smpfunc_t) local_ops->cache_all); + xc0(local_ops->cache_all); local_ops->cache_all(); } static void smp_flush_tlb_all(void) { - xc0((smpfunc_t) local_ops->tlb_all); + xc0(local_ops->tlb_all); local_ops->tlb_all(); } @@ -1659,7 +1659,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm) cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) - xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm); + xc1(local_ops->cache_mm, (unsigned long)mm); local_ops->cache_mm(mm); } } @@ -1671,7 +1671,7 @@ static void smp_flush_tlb_mm(struct mm_struct *mm) cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) { - xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm); + xc1(local_ops->tlb_mm, (unsigned long)mm); if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); @@ -1691,8 +1691,8 @@ static void smp_flush_cache_range(struct vm_area_struct *vma, cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) - xc3((smpfunc_t) local_ops->cache_range, - (unsigned long) vma, start, end); + xc3(local_ops->cache_range, (unsigned long)vma, start, + end); local_ops->cache_range(vma, start, end); } } @@ -1708,8 +1708,8 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma, cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) - xc3((smpfunc_t) local_ops->tlb_range, - (unsigned long) vma, start, end); + xc3(local_ops->tlb_range, (unsigned long)vma, start, + end); local_ops->tlb_range(vma, start, end); } } @@ -1723,8 +1723,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) - xc2((smpfunc_t) local_ops->cache_page, - (unsigned long) vma, page); + xc2(local_ops->cache_page, (unsigned long)vma, page); local_ops->cache_page(vma, page); } } @@ -1738,8 +1737,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) - xc2((smpfunc_t) local_ops->tlb_page, - (unsigned long) vma, page); + xc2(local_ops->tlb_page, (unsigned long)vma, page); local_ops->tlb_page(vma, page); } } @@ -1753,7 +1751,7 @@ static void smp_flush_page_to_ram(unsigned long page) * XXX This experiment failed, research further... -DaveM */ #if 1 - xc1((smpfunc_t) local_ops->page_to_ram, page); + xc1(local_ops->page_to_ram, page); #endif local_ops->page_to_ram(page); } @@ -1764,8 +1762,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) - xc2((smpfunc_t) local_ops->sig_insns, - (unsigned long) mm, insn_addr); + xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr); local_ops->sig_insns(mm, insn_addr); } From 607e57c6c62c00965ae276902c166834ce73014a Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 29 Sep 2022 22:57:43 -0700 Subject: [PATCH 26/27] hardening: Remove Clang's enable flag for -ftrivial-auto-var-init=zero Now that Clang's -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang option is no longer required, remove it from the command line. Clang 16 and later will warn when it is used, which will cause Kconfig to think it can't use -ftrivial-auto-var-init=zero at all. Check for whether it is required and only use it when so. Cc: Nathan Chancellor Cc: Masahiro Yamada Cc: Nick Desaulniers Cc: linux-kbuild@vger.kernel.org Cc: llvm@lists.linux.dev Cc: stable@vger.kernel.org Fixes: f02003c860d9 ("hardening: Avoid harmless Clang option under CONFIG_INIT_STACK_ALL_ZERO") Signed-off-by: Kees Cook --- Makefile | 4 ++-- security/Kconfig.hardening | 14 ++++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index c7705f749601..02c857e2243c 100644 --- a/Makefile +++ b/Makefile @@ -831,8 +831,8 @@ endif # Initialize all stack variables with a zero value. ifdef CONFIG_INIT_STACK_ALL_ZERO KBUILD_CFLAGS += -ftrivial-auto-var-init=zero -ifdef CONFIG_CC_IS_CLANG -# https://bugs.llvm.org/show_bug.cgi?id=45497 +ifdef CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER +# https://github.com/llvm/llvm-project/issues/44842 KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang endif endif diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index bd2aabb2c60f..995bc42003e6 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -22,11 +22,17 @@ menu "Memory initialization" config CC_HAS_AUTO_VAR_INIT_PATTERN def_bool $(cc-option,-ftrivial-auto-var-init=pattern) -config CC_HAS_AUTO_VAR_INIT_ZERO - # GCC ignores the -enable flag, so we can test for the feature with - # a single invocation using the flag, but drop it as appropriate in - # the Makefile, depending on the presence of Clang. +config CC_HAS_AUTO_VAR_INIT_ZERO_BARE + def_bool $(cc-option,-ftrivial-auto-var-init=zero) + +config CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER + # Clang 16 and later warn about using the -enable flag, but it + # is required before then. def_bool $(cc-option,-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang) + depends on !CC_HAS_AUTO_VAR_INIT_ZERO_BARE + +config CC_HAS_AUTO_VAR_INIT_ZERO + def_bool CC_HAS_AUTO_VAR_INIT_ZERO_BARE || CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER choice prompt "Initialize kernel stack variables at function entry" From 2120635108b35ecad9c59c8b44f6cbdf4f98214e Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Fri, 30 Sep 2022 20:33:10 +0000 Subject: [PATCH 27/27] Makefile.extrawarn: Move -Wcast-function-type-strict to W=1 We enable -Wcast-function-type globally in the kernel to warn about mismatching types in function pointer casts. Compilers currently warn only about ABI incompability with this flag, but Clang 16 will enable a stricter version of the check by default that checks for an exact type match. This will be very noisy in the kernel, so disable -Wcast-function-type-strict without W=1 until the new warnings have been addressed. Cc: stable@vger.kernel.org Link: https://reviews.llvm.org/D134831 Link: https://github.com/ClangBuiltLinux/linux/issues/1724 Suggested-by: Nathan Chancellor Signed-off-by: Sami Tolvanen Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220930203310.4010564-1-samitolvanen@google.com --- scripts/Makefile.extrawarn | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 0621c39a3955..20df48637373 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -52,6 +52,7 @@ KBUILD_CFLAGS += -Wno-sign-compare KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) +KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict) endif endif