2009-07-06 13:21:32 +04:00
# if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
# define _TRACE_KVMMMU_H
# include <linux/tracepoint.h>
2015-04-29 21:36:05 +03:00
# include <linux/trace_events.h>
2009-07-06 13:21:32 +04:00
# undef TRACE_SYSTEM
# define TRACE_SYSTEM kvmmmu
2013-05-31 04:36:24 +04:00
# define KVM_MMU_PAGE_FIELDS \
__field ( unsigned long , mmu_valid_gen ) \
__field ( __u64 , gfn ) \
__field ( __u32 , role ) \
__field ( __u32 , root_count ) \
2010-04-22 13:33:57 +04:00
__field ( bool , unsync )
2009-07-06 16:58:14 +04:00
2013-05-31 04:36:24 +04:00
# define KVM_MMU_PAGE_ASSIGN(sp) \
__entry - > mmu_valid_gen = sp - > mmu_valid_gen ; \
__entry - > gfn = sp - > gfn ; \
__entry - > role = sp - > role . word ; \
__entry - > root_count = sp - > root_count ; \
2009-07-06 16:58:14 +04:00
__entry - > unsync = sp - > unsync ;
# define KVM_MMU_PAGE_PRINTK() ({ \
2014-09-25 02:52:39 +04:00
const char * saved_ptr = trace_seq_buffer_ptr ( p ) ; \
2009-07-06 16:58:14 +04:00
static const char * access_str [ ] = { \
" --- " , " --x " , " w-- " , " w-x " , " -u- " , " -ux " , " wu- " , " wux " \
} ; \
union kvm_mmu_page_role role ; \
\
role . word = __entry - > role ; \
\
2013-05-31 04:36:24 +04:00
trace_seq_printf ( p , " sp gen %lx gfn %llx %u%s q%u%s %s%s " \
" %snxe root %u %s%c " , __entry - > mmu_valid_gen , \
2010-04-14 20:20:03 +04:00
__entry - > gfn , role . level , \
role . cr4_pae ? " pae " : " " , \
2009-07-06 16:58:14 +04:00
role . quadrant , \
role . direct ? " direct " : " " , \
access_str [ role . access ] , \
role . invalid ? " invalid " : " " , \
role . nxe ? " " : " ! " , \
__entry - > root_count , \
__entry - > unsync ? " unsync " : " sync " , 0 ) ; \
2014-09-25 02:52:39 +04:00
saved_ptr ; \
2009-07-06 16:58:14 +04:00
} )
2009-07-06 13:21:32 +04:00
# define kvm_mmu_trace_pferr_flags \
{ PFERR_PRESENT_MASK , " P " } , \
{ PFERR_WRITE_MASK , " W " } , \
{ PFERR_USER_MASK , " U " } , \
{ PFERR_RSVD_MASK , " RSVD " } , \
{ PFERR_FETCH_MASK , " F " }
/*
* A pagetable walk has started
*/
TRACE_EVENT (
kvm_mmu_pagetable_walk ,
2012-06-20 12:00:00 +04:00
TP_PROTO ( u64 addr , u32 pferr ) ,
TP_ARGS ( addr , pferr ) ,
2009-07-06 13:21:32 +04:00
TP_STRUCT__entry (
__field ( __u64 , addr )
__field ( __u32 , pferr )
) ,
TP_fast_assign (
__entry - > addr = addr ;
2012-06-20 12:00:00 +04:00
__entry - > pferr = pferr ;
2009-07-06 13:21:32 +04:00
) ,
TP_printk ( " addr %llx pferr %x %s " , __entry - > addr , __entry - > pferr ,
__print_flags ( __entry - > pferr , " | " , kvm_mmu_trace_pferr_flags ) )
) ;
/* We just walked a paging element */
TRACE_EVENT (
kvm_mmu_paging_element ,
TP_PROTO ( u64 pte , int level ) ,
TP_ARGS ( pte , level ) ,
TP_STRUCT__entry (
__field ( __u64 , pte )
__field ( __u32 , level )
) ,
TP_fast_assign (
__entry - > pte = pte ;
__entry - > level = level ;
) ,
TP_printk ( " pte %llx level %u " , __entry - > pte , __entry - > level )
) ;
2010-04-28 07:54:55 +04:00
DECLARE_EVENT_CLASS ( kvm_mmu_set_bit_class ,
2009-07-06 13:21:32 +04:00
TP_PROTO ( unsigned long table_gfn , unsigned index , unsigned size ) ,
2010-04-28 07:54:55 +04:00
2009-07-06 13:21:32 +04:00
TP_ARGS ( table_gfn , index , size ) ,
TP_STRUCT__entry (
__field ( __u64 , gpa )
2010-04-28 07:54:55 +04:00
) ,
2009-07-06 13:21:32 +04:00
TP_fast_assign (
__entry - > gpa = ( ( u64 ) table_gfn < < PAGE_SHIFT )
+ index * size ;
) ,
TP_printk ( " gpa %llx " , __entry - > gpa )
) ;
2010-04-28 07:54:55 +04:00
/* We set a pte accessed bit */
DEFINE_EVENT ( kvm_mmu_set_bit_class , kvm_mmu_set_accessed_bit ,
2009-07-06 13:21:32 +04:00
TP_PROTO ( unsigned long table_gfn , unsigned index , unsigned size ) ,
2010-04-28 07:54:55 +04:00
TP_ARGS ( table_gfn , index , size )
) ;
2009-07-06 13:21:32 +04:00
2010-04-28 07:54:55 +04:00
/* We set a pte dirty bit */
DEFINE_EVENT ( kvm_mmu_set_bit_class , kvm_mmu_set_dirty_bit ,
2009-07-06 13:21:32 +04:00
2010-04-28 07:54:55 +04:00
TP_PROTO ( unsigned long table_gfn , unsigned index , unsigned size ) ,
TP_ARGS ( table_gfn , index , size )
2009-07-06 13:21:32 +04:00
) ;
TRACE_EVENT (
kvm_mmu_walker_error ,
TP_PROTO ( u32 pferr ) ,
TP_ARGS ( pferr ) ,
TP_STRUCT__entry (
__field ( __u32 , pferr )
) ,
TP_fast_assign (
__entry - > pferr = pferr ;
) ,
TP_printk ( " pferr %x %s " , __entry - > pferr ,
__print_flags ( __entry - > pferr , " | " , kvm_mmu_trace_pferr_flags ) )
) ;
2009-07-06 16:58:14 +04:00
TRACE_EVENT (
kvm_mmu_get_page ,
TP_PROTO ( struct kvm_mmu_page * sp , bool created ) ,
TP_ARGS ( sp , created ) ,
TP_STRUCT__entry (
KVM_MMU_PAGE_FIELDS
__field ( bool , created )
) ,
TP_fast_assign (
KVM_MMU_PAGE_ASSIGN ( sp )
__entry - > created = created ;
) ,
TP_printk ( " %s %s " , KVM_MMU_PAGE_PRINTK ( ) ,
__entry - > created ? " new " : " existing " )
) ;
2010-04-28 07:54:55 +04:00
DECLARE_EVENT_CLASS ( kvm_mmu_page_class ,
2009-07-06 16:58:14 +04:00
TP_PROTO ( struct kvm_mmu_page * sp ) ,
TP_ARGS ( sp ) ,
TP_STRUCT__entry (
KVM_MMU_PAGE_FIELDS
2010-04-28 07:54:55 +04:00
) ,
2009-07-06 16:58:14 +04:00
TP_fast_assign (
KVM_MMU_PAGE_ASSIGN ( sp )
2010-04-28 07:54:55 +04:00
) ,
2009-07-06 16:58:14 +04:00
TP_printk ( " %s " , KVM_MMU_PAGE_PRINTK ( ) )
) ;
2010-04-28 07:54:55 +04:00
DEFINE_EVENT ( kvm_mmu_page_class , kvm_mmu_sync_page ,
2009-07-06 16:58:14 +04:00
TP_PROTO ( struct kvm_mmu_page * sp ) ,
2010-04-28 07:54:55 +04:00
TP_ARGS ( sp )
2009-07-06 16:58:14 +04:00
) ;
2010-04-28 07:54:55 +04:00
DEFINE_EVENT ( kvm_mmu_page_class , kvm_mmu_unsync_page ,
2009-07-06 16:58:14 +04:00
TP_PROTO ( struct kvm_mmu_page * sp ) ,
2010-04-28 07:54:55 +04:00
TP_ARGS ( sp )
) ;
2009-07-06 16:58:14 +04:00
2010-06-04 17:53:54 +04:00
DEFINE_EVENT ( kvm_mmu_page_class , kvm_mmu_prepare_zap_page ,
2010-04-28 07:54:55 +04:00
TP_PROTO ( struct kvm_mmu_page * sp ) ,
2009-07-06 16:58:14 +04:00
2010-04-28 07:54:55 +04:00
TP_ARGS ( sp )
2009-07-06 16:58:14 +04:00
) ;
2010-08-30 14:22:53 +04:00
2011-07-11 23:34:24 +04:00
TRACE_EVENT (
mark_mmio_spte ,
2013-06-07 12:51:24 +04:00
TP_PROTO ( u64 * sptep , gfn_t gfn , unsigned access , unsigned int gen ) ,
TP_ARGS ( sptep , gfn , access , gen ) ,
2011-07-11 23:34:24 +04:00
TP_STRUCT__entry (
__field ( void * , sptep )
__field ( gfn_t , gfn )
__field ( unsigned , access )
2013-06-07 12:51:24 +04:00
__field ( unsigned int , gen )
2011-07-11 23:34:24 +04:00
) ,
TP_fast_assign (
__entry - > sptep = sptep ;
__entry - > gfn = gfn ;
__entry - > access = access ;
2013-06-07 12:51:24 +04:00
__entry - > gen = gen ;
2011-07-11 23:34:24 +04:00
) ,
2013-06-07 12:51:24 +04:00
TP_printk ( " sptep:%p gfn %llx access %x gen %x " , __entry - > sptep ,
__entry - > gfn , __entry - > access , __entry - > gen )
2011-07-11 23:34:24 +04:00
) ;
TRACE_EVENT (
handle_mmio_page_fault ,
TP_PROTO ( u64 addr , gfn_t gfn , unsigned access ) ,
TP_ARGS ( addr , gfn , access ) ,
TP_STRUCT__entry (
__field ( u64 , addr )
__field ( gfn_t , gfn )
__field ( unsigned , access )
) ,
TP_fast_assign (
__entry - > addr = addr ;
__entry - > gfn = gfn ;
__entry - > access = access ;
) ,
TP_printk ( " addr:%llx gfn %llx access %x " , __entry - > addr , __entry - > gfn ,
__entry - > access )
) ;
2012-06-20 11:59:41 +04:00
# define __spte_satisfied(__spte) \
( __entry - > retry & & is_writable_pte ( __entry - > __spte ) )
TRACE_EVENT (
fast_page_fault ,
TP_PROTO ( struct kvm_vcpu * vcpu , gva_t gva , u32 error_code ,
u64 * sptep , u64 old_spte , bool retry ) ,
TP_ARGS ( vcpu , gva , error_code , sptep , old_spte , retry ) ,
TP_STRUCT__entry (
__field ( int , vcpu_id )
__field ( gva_t , gva )
__field ( u32 , error_code )
__field ( u64 * , sptep )
__field ( u64 , old_spte )
__field ( u64 , new_spte )
__field ( bool , retry )
) ,
TP_fast_assign (
__entry - > vcpu_id = vcpu - > vcpu_id ;
__entry - > gva = gva ;
__entry - > error_code = error_code ;
__entry - > sptep = sptep ;
__entry - > old_spte = old_spte ;
__entry - > new_spte = * sptep ;
__entry - > retry = retry ;
) ,
TP_printk ( " vcpu %d gva %lx error_code %s sptep %p old %#llx "
" new %llx spurious %d fixed %d " , __entry - > vcpu_id ,
__entry - > gva , __print_flags ( __entry - > error_code , " | " ,
kvm_mmu_trace_pferr_flags ) , __entry - > sptep ,
__entry - > old_spte , __entry - > new_spte ,
__spte_satisfied ( old_spte ) , __spte_satisfied ( new_spte )
)
) ;
2013-05-31 04:36:25 +04:00
TRACE_EVENT (
kvm_mmu_invalidate_zap_all_pages ,
TP_PROTO ( struct kvm * kvm ) ,
TP_ARGS ( kvm ) ,
TP_STRUCT__entry (
__field ( unsigned long , mmu_valid_gen )
__field ( unsigned int , mmu_used_pages )
) ,
TP_fast_assign (
__entry - > mmu_valid_gen = kvm - > arch . mmu_valid_gen ;
__entry - > mmu_used_pages = kvm - > arch . n_used_mmu_pages ;
) ,
TP_printk ( " kvm-mmu-valid-gen %lx used_pages %x " ,
__entry - > mmu_valid_gen , __entry - > mmu_used_pages
)
) ;
2013-06-07 12:51:27 +04:00
TRACE_EVENT (
check_mmio_spte ,
TP_PROTO ( u64 spte , unsigned int kvm_gen , unsigned int spte_gen ) ,
TP_ARGS ( spte , kvm_gen , spte_gen ) ,
TP_STRUCT__entry (
__field ( unsigned int , kvm_gen )
__field ( unsigned int , spte_gen )
__field ( u64 , spte )
) ,
TP_fast_assign (
__entry - > kvm_gen = kvm_gen ;
__entry - > spte_gen = spte_gen ;
__entry - > spte = spte ;
) ,
TP_printk ( " spte %llx kvm_gen %x spte-gen %x valid %d " , __entry - > spte ,
__entry - > kvm_gen , __entry - > spte_gen ,
__entry - > kvm_gen = = __entry - > spte_gen
)
) ;
2009-07-06 13:21:32 +04:00
# endif /* _TRACE_KVMMMU_H */
2010-03-10 14:00:43 +03:00
# undef TRACE_INCLUDE_PATH
# define TRACE_INCLUDE_PATH .
# undef TRACE_INCLUDE_FILE
# define TRACE_INCLUDE_FILE mmutrace
2009-07-06 13:21:32 +04:00
/* This part must be outside protection */
# include <trace/define_trace.h>