From 4d9dd4b0ce88072ca2368dfdd5c92bc078366e1e Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Wed, 30 Nov 2022 16:54:50 +0800 Subject: [PATCH 1/2] mm/slub, kunit: add SLAB_SKIP_KFENCE flag for cache creation When kfence is enabled, the buffer allocated from the test case could be from a kfence pool, and the operation could be also caught and reported by kfence first, causing the case to fail. With default kfence setting, this is very difficult to be triggered. By changing CONFIG_KFENCE_NUM_OBJECTS from 255 to 16383, and CONFIG_KFENCE_SAMPLE_INTERVAL from 100 to 5, the allocation from kfence did hit 7 times in different slub_kunit cases out of 900 times of boot test. To avoid this, initially we tried is_kfence_address() to check this and repeated allocation till finding a non-kfence address. Vlastimil Babka suggested SLAB_SKIP_KFENCE flag could be used to achieve this, and better add a wrapper function for simplifying cache creation. Signed-off-by: Feng Tang Reviewed-by: Marco Elver Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- lib/slub_kunit.c | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index 7a0564d7cb7a..5b0c8e7eb6dc 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -9,10 +9,25 @@ static struct kunit_resource resource; static int slab_errors; +/* + * Wrapper function for kmem_cache_create(), which reduces 2 parameters: + * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an + * object from kfence pool, where the operation could be caught by both + * our test and kfence sanity check. + */ +static struct kmem_cache *test_kmem_cache_create(const char *name, + unsigned int size, slab_flags_t flags) +{ + struct kmem_cache *s = kmem_cache_create(name, size, 0, + (flags | SLAB_NO_USER_FLAGS), NULL); + s->flags |= SLAB_SKIP_KFENCE; + return s; +} + static void test_clobber_zone(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0, - SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64, + SLAB_RED_ZONE); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kasan_disable_current(); @@ -29,8 +44,8 @@ static void test_clobber_zone(struct kunit *test) #ifndef CONFIG_KASAN static void test_next_pointer(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0, - SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free", + 64, SLAB_POISON); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); unsigned long tmp; unsigned long *ptr_addr; @@ -74,8 +89,8 @@ static void test_next_pointer(struct kunit *test) static void test_first_word(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0, - SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free", + 64, SLAB_POISON); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_free(s, p); @@ -89,8 +104,8 @@ static void test_first_word(struct kunit *test) static void test_clobber_50th_byte(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0, - SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free", + 64, SLAB_POISON); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_free(s, p); @@ -105,8 +120,8 @@ static void test_clobber_50th_byte(struct kunit *test) static void test_clobber_redzone_free(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0, - SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64, + SLAB_RED_ZONE); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kasan_disable_current(); From 6cd6d33ca41ff4af21bc25c331ab34b50b4a9c8c Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Wed, 30 Nov 2022 16:54:51 +0800 Subject: [PATCH 2/2] mm/slub, kunit: Add a test case for kmalloc redzone check kmalloc redzone check for slub has been merged, and it's better to add a kunit case for it, which is inspired by a real-world case as described in commit 120ee599b5bf ("staging: octeon-usb: prevent memory corruption"): " octeon-hcd will crash the kernel when SLOB is used. This usually happens after the 18-byte control transfer when a device descriptor is read. The DMA engine is always transferring full 32-bit words and if the transfer is shorter, some random garbage appears after the buffer. The problem is not visible with SLUB since it rounds up the allocations to word boundary, and the extra bytes will go undetected. " To avoid interrupting the normal functioning of kmalloc caches, a kmem_cache mimicing kmalloc cache is created with similar flags, and kmalloc_trace() is used to really test the orig_size and redzone setup. Suggested-by: Vlastimil Babka Signed-off-by: Feng Tang Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- lib/slub_kunit.c | 22 ++++++++++++++++++++++ mm/slab.h | 4 +++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index 5b0c8e7eb6dc..bdf358d520b4 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -135,6 +135,27 @@ static void test_clobber_redzone_free(struct kunit *test) kmem_cache_destroy(s); } +static void test_kmalloc_redzone_access(struct kunit *test) +{ + struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32, + SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE); + u8 *p = kmalloc_trace(s, GFP_KERNEL, 18); + + kasan_disable_current(); + + /* Suppress the -Warray-bounds warning */ + OPTIMIZER_HIDE_VAR(p); + p[18] = 0xab; + p[19] = 0xab; + + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 2, slab_errors); + + kasan_enable_current(); + kmem_cache_free(s, p); + kmem_cache_destroy(s); +} + static int test_init(struct kunit *test) { slab_errors = 0; @@ -154,6 +175,7 @@ static struct kunit_case test_cases[] = { #endif KUNIT_CASE(test_clobber_redzone_free), + KUNIT_CASE(test_kmalloc_redzone_access), {} }; diff --git a/mm/slab.h b/mm/slab.h index 190f2d4ec216..d5a0b69b81ab 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -336,7 +336,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, SLAB_ACCOUNT) #elif defined(CONFIG_SLUB) #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ - SLAB_TEMPORARY | SLAB_ACCOUNT | SLAB_NO_USER_FLAGS) + SLAB_TEMPORARY | SLAB_ACCOUNT | \ + SLAB_NO_USER_FLAGS | SLAB_KMALLOC) #else #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) #endif @@ -356,6 +357,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | \ SLAB_ACCOUNT | \ + SLAB_KMALLOC | \ SLAB_NO_USER_FLAGS) bool __kmem_cache_empty(struct kmem_cache *);