2009-05-01 18:29:57 +02:00
# ifndef _PERF_PERF_H
# define _PERF_PERF_H
2009-05-23 18:28:58 +02:00
# if defined(__x86_64__) || defined(__i386__)
# include "../../arch/x86/include/asm/unistd.h"
# define rmb() asm volatile("lfence" ::: "memory")
# define cpu_relax() asm volatile("rep; nop" ::: "memory");
# endif
# ifdef __powerpc__
# include "../../arch/powerpc/include/asm/unistd.h"
# define rmb() asm volatile ("sync" ::: "memory")
# define cpu_relax() asm volatile ("" ::: "memory");
# endif
# include <time.h>
# include <unistd.h>
# include <sys/types.h>
# include <sys/syscall.h>
# include "../../include/linux/perf_counter.h"
2009-05-01 18:29:57 +02:00
/*
* prctl ( PR_TASK_PERF_COUNTERS_DISABLE ) will ( cheaply ) disable all
* counters in the current task .
*/
# define PR_TASK_PERF_COUNTERS_DISABLE 31
# define PR_TASK_PERF_COUNTERS_ENABLE 32
2009-05-01 18:39:47 +02:00
# ifndef NSEC_PER_SEC
# define NSEC_PER_SEC 1000000000ULL
# endif
static inline unsigned long long rdclock ( void )
{
struct timespec ts ;
clock_gettime ( CLOCK_MONOTONIC , & ts ) ;
return ts . tv_sec * 1000000000ULL + ts . tv_nsec ;
}
2009-05-01 18:29:57 +02:00
/*
* Pick up some kernel type conventions :
*/
# define __user
# define asmlinkage
# define unlikely(x) __builtin_expect(!!(x), 0)
# define min(x, y) ({ \
typeof ( x ) _min1 = ( x ) ; \
typeof ( y ) _min2 = ( y ) ; \
( void ) ( & _min1 = = & _min2 ) ; \
_min1 < _min2 ? _min1 : _min2 ; } )
static inline int
2009-06-12 12:46:55 +02:00
sys_perf_counter_open ( struct perf_counter_attr * attr ,
2009-05-01 18:29:57 +02:00
pid_t pid , int cpu , int group_fd ,
unsigned long flags )
{
2009-06-12 12:46:55 +02:00
attr - > size = sizeof ( * attr ) ;
return syscall ( __NR_perf_counter_open , attr , pid , cpu ,
2009-05-01 18:29:57 +02:00
group_fd , flags ) ;
}
2009-05-25 09:59:50 +02:00
# define MAX_COUNTERS 256
# define MAX_NR_CPUS 256
2009-05-01 18:29:57 +02:00
# endif