kasan: introduce kasan_mempool_unpoison_pages
Introduce and document a new kasan_mempool_unpoison_pages hook to be used by the mempool code instead of kasan_unpoison_pages. This hook is not functionally different from kasan_unpoison_pages, but using it improves the mempool code readability. Link: https://lkml.kernel.org/r/239bd9af6176f2cc59f5c25893eb36143184daff.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Cc: Alexander Lobakin <alobakin@pm.me> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Breno Leitao <leitao@debian.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Marco Elver <elver@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f129c31039
commit
9f41c59ae3
@ -225,6 +225,9 @@ bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
|
||||
* This function is similar to kasan_mempool_poison_object() but operates on
|
||||
* page allocations.
|
||||
*
|
||||
* Before the poisoned allocation can be reused, it must be unpoisoned via
|
||||
* kasan_mempool_unpoison_pages().
|
||||
*
|
||||
* Return: true if the allocation can be safely reused; false otherwise.
|
||||
*/
|
||||
static __always_inline bool kasan_mempool_poison_pages(struct page *page,
|
||||
@ -235,6 +238,27 @@ static __always_inline bool kasan_mempool_poison_pages(struct page *page,
|
||||
return true;
|
||||
}
|
||||
|
||||
void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
|
||||
unsigned long ip);
|
||||
/**
|
||||
* kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
|
||||
* @page: Pointer to the page allocation.
|
||||
* @order: Order of the allocation.
|
||||
*
|
||||
* This function is intended for kernel subsystems that cache page allocations
|
||||
* to reuse them instead of freeing them back to page_alloc (e.g. mempool).
|
||||
*
|
||||
* This function unpoisons a page allocation that was previously poisoned by
|
||||
* kasan_mempool_poison_pages() without zeroing the allocation's memory. For
|
||||
* the tag-based modes, this function assigns a new tag to the allocation.
|
||||
*/
|
||||
static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
|
||||
unsigned int order)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_mempool_unpoison_pages(page, order, _RET_IP_);
|
||||
}
|
||||
|
||||
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
|
||||
/**
|
||||
* kasan_mempool_poison_object - Check and poison a mempool slab allocation.
|
||||
@ -353,6 +377,7 @@ static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int or
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
|
||||
static inline bool kasan_mempool_poison_object(void *ptr)
|
||||
{
|
||||
return true;
|
||||
|
@ -449,6 +449,12 @@ bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
|
||||
return true;
|
||||
}
|
||||
|
||||
void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
|
||||
unsigned long ip)
|
||||
{
|
||||
__kasan_unpoison_pages(page, order, false);
|
||||
}
|
||||
|
||||
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
|
||||
{
|
||||
struct folio *folio;
|
||||
|
Loading…
Reference in New Issue
Block a user