lkdtm/usercopy: Check vmalloc and >0-order folios
Add coverage for the recently added usercopy checks for vmalloc and folios, via USERCOPY_VMALLOC and USERCOPY_FOLIO respectively. Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Kees Cook <keescook@chromium.org>
This commit is contained in:
parent
d2b8060f16
commit
fc34eec686
@ -5,6 +5,7 @@
|
||||
*/
|
||||
#include "lkdtm.h"
|
||||
#include <linux/slab.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/mman.h>
|
||||
@ -341,6 +342,86 @@ free_user:
|
||||
vm_munmap(user_addr, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* This expects "kaddr" to point to a PAGE_SIZE allocation, which means
|
||||
* a more complete test that would include copy_from_user() would risk
|
||||
* memory corruption. Just test copy_to_user() here, as that exercises
|
||||
* almost exactly the same code paths.
|
||||
*/
|
||||
static void do_usercopy_page_span(const char *name, void *kaddr)
|
||||
{
|
||||
unsigned long uaddr;
|
||||
|
||||
uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, 0);
|
||||
if (uaddr >= TASK_SIZE) {
|
||||
pr_warn("Failed to allocate user memory\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Initialize contents. */
|
||||
memset(kaddr, 0xAA, PAGE_SIZE);
|
||||
|
||||
/* Bump the kaddr forward to detect a page-spanning overflow. */
|
||||
kaddr += PAGE_SIZE / 2;
|
||||
|
||||
pr_info("attempting good copy_to_user() from kernel %s: %px\n",
|
||||
name, kaddr);
|
||||
if (copy_to_user((void __user *)uaddr, kaddr,
|
||||
unconst + (PAGE_SIZE / 2))) {
|
||||
pr_err("copy_to_user() failed unexpectedly?!\n");
|
||||
goto free_user;
|
||||
}
|
||||
|
||||
pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
|
||||
name, kaddr);
|
||||
if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
|
||||
pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
|
||||
goto free_user;
|
||||
}
|
||||
|
||||
pr_err("FAIL: bad copy_to_user() not detected!\n");
|
||||
pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
|
||||
|
||||
free_user:
|
||||
vm_munmap(uaddr, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void lkdtm_USERCOPY_VMALLOC(void)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
addr = vmalloc(PAGE_SIZE);
|
||||
if (!addr) {
|
||||
pr_err("vmalloc() failed!?\n");
|
||||
return;
|
||||
}
|
||||
do_usercopy_page_span("vmalloc", addr);
|
||||
vfree(addr);
|
||||
}
|
||||
|
||||
static void lkdtm_USERCOPY_FOLIO(void)
|
||||
{
|
||||
struct folio *folio;
|
||||
void *addr;
|
||||
|
||||
/*
|
||||
* FIXME: Folio checking currently misses 0-order allocations, so
|
||||
* allocate and bump forward to the last page.
|
||||
*/
|
||||
folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
|
||||
if (!folio) {
|
||||
pr_err("folio_alloc() failed!?\n");
|
||||
return;
|
||||
}
|
||||
addr = folio_address(folio);
|
||||
if (addr)
|
||||
do_usercopy_page_span("folio", addr + PAGE_SIZE);
|
||||
else
|
||||
pr_err("folio_address() failed?!\n");
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
void __init lkdtm_usercopy_init(void)
|
||||
{
|
||||
/* Prepare cache that lacks SLAB_USERCOPY flag. */
|
||||
@ -365,6 +446,8 @@ static struct crashtype crashtypes[] = {
|
||||
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
|
||||
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
|
||||
CRASHTYPE(USERCOPY_STACK_BEYOND),
|
||||
CRASHTYPE(USERCOPY_VMALLOC),
|
||||
CRASHTYPE(USERCOPY_FOLIO),
|
||||
CRASHTYPE(USERCOPY_KERNEL),
|
||||
};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user