2010-08-30 14:24:10 +04:00
/*
* mmu_audit . c :
*
* Audit code for KVM MMU
*
* Copyright ( C ) 2006 Qumranet , Inc .
2010-10-06 16:23:22 +04:00
* Copyright 2010 Red Hat , Inc . and / or its affiliates .
2010-08-30 14:24:10 +04:00
*
* Authors :
* Yaniv Kamay < yaniv @ qumranet . com >
* Avi Kivity < avi @ qumranet . com >
* Marcelo Tosatti < mtosatti @ redhat . com >
* Xiao Guangrong < xiaoguangrong @ cn . fujitsu . com >
*
* This work is licensed under the terms of the GNU GPL , version 2. See
* the COPYING file in the top - level directory .
*
*/
2010-08-30 14:26:33 +04:00
# include <linux/ratelimit.h>
2011-11-30 13:43:24 +04:00
char const * audit_point_name [ ] = {
" pre page fault " ,
" post page fault " ,
" pre pte write " ,
" post pte write " ,
" pre sync " ,
" post sync "
} ;
2010-12-23 11:08:35 +03:00
# define audit_printk(kvm, fmt, args...) \
2010-09-27 14:07:59 +04:00
printk ( KERN_ERR " audit: (%s) error: " \
2010-12-23 11:08:35 +03:00
fmt , audit_point_name [ kvm - > arch . audit_point ] , # # args )
2010-08-30 14:24:10 +04:00
2010-08-30 14:25:51 +04:00
typedef void ( * inspect_spte_fn ) ( struct kvm_vcpu * vcpu , u64 * sptep , int level ) ;
2010-08-30 14:24:10 +04:00
2010-08-30 14:25:51 +04:00
static void __mmu_spte_walk ( struct kvm_vcpu * vcpu , struct kvm_mmu_page * sp ,
inspect_spte_fn fn , int level )
2010-08-30 14:24:10 +04:00
{
int i ;
for ( i = 0 ; i < PT64_ENT_PER_PAGE ; + + i ) {
2010-08-30 14:25:51 +04:00
u64 * ent = sp - > spt ;
fn ( vcpu , ent + i , level ) ;
if ( is_shadow_present_pte ( ent [ i ] ) & &
! is_last_spte ( ent [ i ] , level ) ) {
struct kvm_mmu_page * child ;
child = page_header ( ent [ i ] & PT64_BASE_ADDR_MASK ) ;
__mmu_spte_walk ( vcpu , child , fn , level - 1 ) ;
2010-08-30 14:24:10 +04:00
}
}
}
static void mmu_spte_walk ( struct kvm_vcpu * vcpu , inspect_spte_fn fn )
{
int i ;
struct kvm_mmu_page * sp ;
if ( ! VALID_PAGE ( vcpu - > arch . mmu . root_hpa ) )
return ;
2010-08-30 14:25:51 +04:00
2010-09-27 14:06:16 +04:00
if ( vcpu - > arch . mmu . root_level = = PT64_ROOT_LEVEL ) {
2010-08-30 14:24:10 +04:00
hpa_t root = vcpu - > arch . mmu . root_hpa ;
2010-08-30 14:25:51 +04:00
2010-08-30 14:24:10 +04:00
sp = page_header ( root ) ;
2010-08-30 14:25:51 +04:00
__mmu_spte_walk ( vcpu , sp , fn , PT64_ROOT_LEVEL ) ;
2010-08-30 14:24:10 +04:00
return ;
}
2010-08-30 14:25:51 +04:00
2010-08-30 14:24:10 +04:00
for ( i = 0 ; i < 4 ; + + i ) {
hpa_t root = vcpu - > arch . mmu . pae_root [ i ] ;
if ( root & & VALID_PAGE ( root ) ) {
root & = PT64_BASE_ADDR_MASK ;
sp = page_header ( root ) ;
2010-08-30 14:25:51 +04:00
__mmu_spte_walk ( vcpu , sp , fn , 2 ) ;
2010-08-30 14:24:10 +04:00
}
}
2010-08-30 14:25:51 +04:00
2010-08-30 14:24:10 +04:00
return ;
}
2010-08-30 14:25:03 +04:00
typedef void ( * sp_handler ) ( struct kvm * kvm , struct kvm_mmu_page * sp ) ;
static void walk_all_active_sps ( struct kvm * kvm , sp_handler fn )
{
struct kvm_mmu_page * sp ;
list_for_each_entry ( sp , & kvm - > arch . active_mmu_pages , link )
fn ( kvm , sp ) ;
}
2010-08-30 14:25:51 +04:00
static void audit_mappings ( struct kvm_vcpu * vcpu , u64 * sptep , int level )
2010-08-30 14:24:10 +04:00
{
2010-08-30 14:25:51 +04:00
struct kvm_mmu_page * sp ;
gfn_t gfn ;
pfn_t pfn ;
hpa_t hpa ;
2010-08-30 14:24:10 +04:00
2010-08-30 14:25:51 +04:00
sp = page_header ( __pa ( sptep ) ) ;
if ( sp - > unsync ) {
if ( level ! = PT_PAGE_TABLE_LEVEL ) {
2010-12-23 11:08:35 +03:00
audit_printk ( vcpu - > kvm , " unsync sp: %p "
" level = %d \n " , sp , level ) ;
2010-08-30 14:24:10 +04:00
return ;
}
2010-08-30 14:25:51 +04:00
}
2010-08-30 14:24:10 +04:00
2010-08-30 14:25:51 +04:00
if ( ! is_shadow_present_pte ( * sptep ) | | ! is_last_spte ( * sptep , level ) )
return ;
2010-08-30 14:24:10 +04:00
2010-08-30 14:25:51 +04:00
gfn = kvm_mmu_page_get_gfn ( sp , sptep - sp - > spt ) ;
pfn = gfn_to_pfn_atomic ( vcpu - > kvm , gfn ) ;
2010-08-30 14:24:10 +04:00
2010-08-30 14:25:51 +04:00
if ( is_error_pfn ( pfn ) ) {
kvm_release_pfn_clean ( pfn ) ;
return ;
2010-08-30 14:24:10 +04:00
}
2010-08-30 14:25:51 +04:00
hpa = pfn < < PAGE_SHIFT ;
if ( ( * sptep & PT64_BASE_ADDR_MASK ) ! = hpa )
2010-12-23 11:08:35 +03:00
audit_printk ( vcpu - > kvm , " levels %d pfn %llx hpa %llx "
" ent %llxn " , vcpu - > arch . mmu . root_level , pfn ,
hpa , * sptep ) ;
2010-08-30 14:24:10 +04:00
}
2010-08-30 14:25:51 +04:00
static void inspect_spte_has_rmap ( struct kvm * kvm , u64 * sptep )
2010-08-30 14:24:10 +04:00
{
2011-09-12 13:26:22 +04:00
static DEFINE_RATELIMIT_STATE ( ratelimit_state , 5 * HZ , 10 ) ;
2010-08-30 14:24:10 +04:00
unsigned long * rmapp ;
struct kvm_mmu_page * rev_sp ;
gfn_t gfn ;
rev_sp = page_header ( __pa ( sptep ) ) ;
gfn = kvm_mmu_page_get_gfn ( rev_sp , sptep - rev_sp - > spt ) ;
if ( ! gfn_to_memslot ( kvm , gfn ) ) {
2011-09-12 13:26:22 +04:00
if ( ! __ratelimit ( & ratelimit_state ) )
2010-08-30 14:24:10 +04:00
return ;
2010-12-23 11:08:35 +03:00
audit_printk ( kvm , " no memslot for gfn %llx \n " , gfn ) ;
audit_printk ( kvm , " index %ld of sp (gfn=%llx) \n " ,
2010-09-27 14:07:59 +04:00
( long int ) ( sptep - rev_sp - > spt ) , rev_sp - > gfn ) ;
2010-08-30 14:24:10 +04:00
dump_stack ( ) ;
return ;
}
rmapp = gfn_to_rmap ( kvm , gfn , rev_sp - > role . level ) ;
if ( ! * rmapp ) {
2011-09-12 13:26:22 +04:00
if ( ! __ratelimit ( & ratelimit_state ) )
2010-08-30 14:24:10 +04:00
return ;
2010-12-23 11:08:35 +03:00
audit_printk ( kvm , " no rmap for writable spte %llx \n " ,
* sptep ) ;
2010-08-30 14:24:10 +04:00
dump_stack ( ) ;
}
}
2010-08-30 14:25:51 +04:00
static void audit_sptes_have_rmaps ( struct kvm_vcpu * vcpu , u64 * sptep , int level )
2010-08-30 14:24:10 +04:00
{
2010-08-30 14:25:51 +04:00
if ( is_shadow_present_pte ( * sptep ) & & is_last_spte ( * sptep , level ) )
inspect_spte_has_rmap ( vcpu - > kvm , sptep ) ;
2010-08-30 14:24:10 +04:00
}
2010-09-27 14:09:29 +04:00
static void audit_spte_after_sync ( struct kvm_vcpu * vcpu , u64 * sptep , int level )
{
struct kvm_mmu_page * sp = page_header ( __pa ( sptep ) ) ;
2010-12-23 11:08:35 +03:00
if ( vcpu - > kvm - > arch . audit_point = = AUDIT_POST_SYNC & & sp - > unsync )
audit_printk ( vcpu - > kvm , " meet unsync sp(%p) after sync "
" root. \n " , sp ) ;
2010-09-27 14:09:29 +04:00
}
2010-08-30 14:25:03 +04:00
static void check_mappings_rmap ( struct kvm * kvm , struct kvm_mmu_page * sp )
2010-08-30 14:24:10 +04:00
{
int i ;
2010-08-30 14:25:03 +04:00
if ( sp - > role . level ! = PT_PAGE_TABLE_LEVEL )
return ;
2010-08-30 14:24:10 +04:00
2010-08-30 14:25:03 +04:00
for ( i = 0 ; i < PT64_ENT_PER_PAGE ; + + i ) {
if ( ! is_rmap_spte ( sp - > spt [ i ] ) )
2010-08-30 14:24:10 +04:00
continue ;
2010-08-30 14:25:03 +04:00
inspect_spte_has_rmap ( kvm , sp - > spt + i ) ;
2010-08-30 14:24:10 +04:00
}
}
2010-09-27 14:09:29 +04:00
static void audit_write_protection ( struct kvm * kvm , struct kvm_mmu_page * sp )
2010-08-30 14:24:10 +04:00
{
unsigned long * rmapp ;
2012-03-21 18:50:34 +04:00
u64 * sptep ;
struct rmap_iterator iter ;
2010-08-30 14:24:10 +04:00
2010-08-30 14:25:03 +04:00
if ( sp - > role . direct | | sp - > unsync | | sp - > role . invalid )
return ;
2010-08-30 14:24:10 +04:00
2012-08-01 13:02:01 +04:00
rmapp = gfn_to_rmap ( kvm , sp - > gfn , PT_PAGE_TABLE_LEVEL ) ;
2010-08-30 14:24:10 +04:00
2012-03-21 18:50:34 +04:00
for ( sptep = rmap_get_first ( * rmapp , & iter ) ; sptep ;
sptep = rmap_get_next ( & iter ) ) {
if ( is_writable_pte ( * sptep ) )
2010-12-23 11:08:35 +03:00
audit_printk ( kvm , " shadow page has writable "
" mappings: gfn %llx role %x \n " ,
sp - > gfn , sp - > role . word ) ;
2010-08-30 14:24:10 +04:00
}
}
2010-08-30 14:25:03 +04:00
static void audit_sp ( struct kvm * kvm , struct kvm_mmu_page * sp )
{
check_mappings_rmap ( kvm , sp ) ;
audit_write_protection ( kvm , sp ) ;
}
static void audit_all_active_sps ( struct kvm * kvm )
{
walk_all_active_sps ( kvm , audit_sp ) ;
}
2010-08-30 14:25:51 +04:00
static void audit_spte ( struct kvm_vcpu * vcpu , u64 * sptep , int level )
{
audit_sptes_have_rmaps ( vcpu , sptep , level ) ;
audit_mappings ( vcpu , sptep , level ) ;
2010-09-27 14:09:29 +04:00
audit_spte_after_sync ( vcpu , sptep , level ) ;
2010-08-30 14:25:51 +04:00
}
static void audit_vcpu_spte ( struct kvm_vcpu * vcpu )
{
mmu_spte_walk ( vcpu , audit_spte ) ;
}
2011-11-28 16:41:00 +04:00
static bool mmu_audit ;
2012-02-24 11:31:31 +04:00
static struct static_key mmu_audit_key ;
2011-11-28 16:41:00 +04:00
2011-11-30 13:43:24 +04:00
static void __kvm_mmu_audit ( struct kvm_vcpu * vcpu , int point )
2010-08-30 14:24:10 +04:00
{
2010-08-30 14:26:33 +04:00
static DEFINE_RATELIMIT_STATE ( ratelimit_state , 5 * HZ , 10 ) ;
2011-11-30 13:43:24 +04:00
if ( ! __ratelimit ( & ratelimit_state ) )
return ;
2010-08-30 14:26:33 +04:00
2011-11-30 13:43:24 +04:00
vcpu - > kvm - > arch . audit_point = point ;
audit_all_active_sps ( vcpu - > kvm ) ;
audit_vcpu_spte ( vcpu ) ;
}
static inline void kvm_mmu_audit ( struct kvm_vcpu * vcpu , int point )
{
2012-02-24 11:31:31 +04:00
if ( static_key_false ( ( & mmu_audit_key ) ) )
2011-11-30 13:43:24 +04:00
__kvm_mmu_audit ( vcpu , point ) ;
2010-08-30 14:24:10 +04:00
}
static void mmu_audit_enable ( void )
{
if ( mmu_audit )
return ;
2012-02-24 11:31:31 +04:00
static_key_slow_inc ( & mmu_audit_key ) ;
2010-08-30 14:24:10 +04:00
mmu_audit = true ;
}
static void mmu_audit_disable ( void )
{
if ( ! mmu_audit )
return ;
2012-02-24 11:31:31 +04:00
static_key_slow_dec ( & mmu_audit_key ) ;
2010-08-30 14:24:10 +04:00
mmu_audit = false ;
}
static int mmu_audit_set ( const char * val , const struct kernel_param * kp )
{
int ret ;
unsigned long enable ;
ret = strict_strtoul ( val , 10 , & enable ) ;
if ( ret < 0 )
return - EINVAL ;
switch ( enable ) {
case 0 :
mmu_audit_disable ( ) ;
break ;
case 1 :
mmu_audit_enable ( ) ;
break ;
default :
return - EINVAL ;
}
return 0 ;
}
static struct kernel_param_ops audit_param_ops = {
. set = mmu_audit_set ,
. get = param_get_bool ,
} ;
module_param_cb ( mmu_audit , & audit_param_ops , & mmu_audit , 0644 ) ;