2009-05-01 20:29:57 +04:00
# ifndef _PERF_PERF_H
# define _PERF_PERF_H
2009-07-09 01:46:14 +04:00
# if defined(__i386__)
# include "../../arch/x86/include/asm/unistd.h"
# define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
# define cpu_relax() asm volatile("rep; nop" ::: "memory");
# endif
# if defined(__x86_64__)
2009-05-23 20:28:58 +04:00
# include "../../arch/x86/include/asm/unistd.h"
# define rmb() asm volatile("lfence" ::: "memory")
# define cpu_relax() asm volatile("rep; nop" ::: "memory");
# endif
# ifdef __powerpc__
# include "../../arch/powerpc/include/asm/unistd.h"
# define rmb() asm volatile ("sync" ::: "memory")
# define cpu_relax() asm volatile ("" ::: "memory");
# endif
2009-06-22 14:08:22 +04:00
# ifdef __s390__
# include "../../arch/s390/include/asm/unistd.h"
# define rmb() asm volatile("bcr 15,0" ::: "memory")
# define cpu_relax() asm volatile("" ::: "memory");
# endif
2009-06-25 09:41:57 +04:00
# ifdef __sh__
# include "../../arch/sh/include/asm/unistd.h"
# if defined(__SH4A__) || defined(__SH5__)
# define rmb() asm volatile("synco" ::: "memory")
# else
# define rmb() asm volatile("" ::: "memory")
# endif
# define cpu_relax() asm volatile("" ::: "memory")
# endif
2009-06-24 05:38:49 +04:00
# ifdef __hppa__
# include "../../arch/parisc/include/asm/unistd.h"
# define rmb() asm volatile("" ::: "memory")
# define cpu_relax() asm volatile("" ::: "memory");
# endif
2009-05-23 20:28:58 +04:00
# include <time.h>
# include <unistd.h>
# include <sys/types.h>
# include <sys/syscall.h>
# include "../../include/linux/perf_counter.h"
2009-06-25 19:05:54 +04:00
# include "util/types.h"
2009-05-23 20:28:58 +04:00
2009-05-01 20:29:57 +04:00
/*
* prctl ( PR_TASK_PERF_COUNTERS_DISABLE ) will ( cheaply ) disable all
* counters in the current task .
*/
# define PR_TASK_PERF_COUNTERS_DISABLE 31
# define PR_TASK_PERF_COUNTERS_ENABLE 32
2009-05-01 20:39:47 +04:00
# ifndef NSEC_PER_SEC
# define NSEC_PER_SEC 1000000000ULL
# endif
static inline unsigned long long rdclock ( void )
{
struct timespec ts ;
clock_gettime ( CLOCK_MONOTONIC , & ts ) ;
return ts . tv_sec * 1000000000ULL + ts . tv_nsec ;
}
2009-05-01 20:29:57 +04:00
/*
* Pick up some kernel type conventions :
*/
# define __user
# define asmlinkage
2009-07-01 14:37:06 +04:00
# define __used __attribute__((__unused__))
2009-05-01 20:29:57 +04:00
# define unlikely(x) __builtin_expect(!!(x), 0)
# define min(x, y) ({ \
typeof ( x ) _min1 = ( x ) ; \
typeof ( y ) _min2 = ( y ) ; \
( void ) ( & _min1 = = & _min2 ) ; \
_min1 < _min2 ? _min1 : _min2 ; } )
static inline int
2009-06-12 14:46:55 +04:00
sys_perf_counter_open ( struct perf_counter_attr * attr ,
2009-05-01 20:29:57 +04:00
pid_t pid , int cpu , int group_fd ,
unsigned long flags )
{
2009-06-12 14:46:55 +04:00
attr - > size = sizeof ( * attr ) ;
return syscall ( __NR_perf_counter_open , attr , pid , cpu ,
2009-05-01 20:29:57 +04:00
group_fd , flags ) ;
}
2009-05-25 11:59:50 +04:00
# define MAX_COUNTERS 256
# define MAX_NR_CPUS 256
2009-05-01 20:29:57 +04:00
2009-06-26 18:28:00 +04:00
struct ip_callchain {
u64 nr ;
u64 ips [ 0 ] ;
2009-06-19 01:22:55 +04:00
} ;
2009-05-01 20:29:57 +04:00
# endif