2016-06-13 23:54:36 +03:00
# define _GNU_SOURCE
# include "main.h"
# include <stdlib.h>
# include <stdio.h>
# include <string.h>
# include <pthread.h>
# include <malloc.h>
# include <assert.h>
# include <errno.h>
# include <limits.h>
# define SMP_CACHE_BYTES 64
# define cache_line_size() SMP_CACHE_BYTES
# define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
# define unlikely(x) (__builtin_expect(!!(x), 0))
2016-08-14 23:44:21 +03:00
# define likely(x) (__builtin_expect(!!(x), 1))
2016-06-13 23:54:36 +03:00
# define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
typedef pthread_spinlock_t spinlock_t ;
typedef int gfp_t ;
2016-06-30 14:45:33 +08:00
static void * kmalloc ( unsigned size , gfp_t gfp )
{
return memalign ( 64 , size ) ;
}
2016-06-13 23:54:36 +03:00
static void * kzalloc ( unsigned size , gfp_t gfp )
{
void * p = memalign ( 64 , size ) ;
if ( ! p )
return p ;
memset ( p , 0 , size ) ;
return p ;
}
static void kfree ( void * p )
{
if ( p )
free ( p ) ;
}
static void spin_lock_init ( spinlock_t * lock )
{
int r = pthread_spin_init ( lock , 0 ) ;
assert ( ! r ) ;
}
static void spin_lock ( spinlock_t * lock )
{
int ret = pthread_spin_lock ( lock ) ;
assert ( ! ret ) ;
}
static void spin_unlock ( spinlock_t * lock )
{
int ret = pthread_spin_unlock ( lock ) ;
assert ( ! ret ) ;
}
static void spin_lock_bh ( spinlock_t * lock )
{
spin_lock ( lock ) ;
}
static void spin_unlock_bh ( spinlock_t * lock )
{
spin_unlock ( lock ) ;
}
static void spin_lock_irq ( spinlock_t * lock )
{
spin_lock ( lock ) ;
}
static void spin_unlock_irq ( spinlock_t * lock )
{
spin_unlock ( lock ) ;
}
static void spin_lock_irqsave ( spinlock_t * lock , unsigned long f )
{
spin_lock ( lock ) ;
}
static void spin_unlock_irqrestore ( spinlock_t * lock , unsigned long f )
{
spin_unlock ( lock ) ;
}
# include "../../../include/linux/ptr_ring.h"
static unsigned long long headcnt , tailcnt ;
static struct ptr_ring array ____cacheline_aligned_in_smp ;
/* implemented by ring */
void alloc_ring ( void )
{
int ret = ptr_ring_init ( & array , ring_size , 0 ) ;
assert ( ! ret ) ;
2017-04-07 08:45:32 +03:00
/* Hacky way to poke at ring internals. Useful for testing though. */
if ( param )
array . batch = param ;
2016-06-13 23:54:36 +03:00
}
/* guest side */
int add_inbuf ( unsigned len , void * buf , void * datap )
{
int ret ;
ret = __ptr_ring_produce ( & array , buf ) ;
if ( ret > = 0 ) {
ret = 0 ;
headcnt + + ;
}
return ret ;
}
/*
* ptr_ring API provides no way for producer to find out whether a given
* buffer was consumed . Our tests merely require that a successful get_buf
* implies that add_inbuf succeed in the past , and that add_inbuf will succeed ,
* fake it accordingly .
*/
void * get_buf ( unsigned * lenp , void * * bufp )
{
void * datap ;
if ( tailcnt = = headcnt | | __ptr_ring_full ( & array ) )
datap = NULL ;
else {
datap = " Buffer \n " ;
+ + tailcnt ;
}
return datap ;
}
2016-10-06 11:39:11 +02:00
bool used_empty ( )
2016-06-13 23:54:36 +03:00
{
2016-10-06 11:39:11 +02:00
return ( tailcnt = = headcnt | | __ptr_ring_full ( & array ) ) ;
2016-06-13 23:54:36 +03:00
}
void disable_call ( )
{
assert ( 0 ) ;
}
bool enable_call ( )
{
assert ( 0 ) ;
}
void kick_available ( void )
{
assert ( 0 ) ;
}
/* host side */
void disable_kick ( )
{
assert ( 0 ) ;
}
bool enable_kick ( )
{
assert ( 0 ) ;
}
2016-10-06 11:39:11 +02:00
bool avail_empty ( )
2016-06-13 23:54:36 +03:00
{
2016-10-06 11:39:11 +02:00
return ! __ptr_ring_peek ( & array ) ;
2016-06-13 23:54:36 +03:00
}
bool use_buf ( unsigned * lenp , void * * bufp )
{
void * ptr ;
ptr = __ptr_ring_consume ( & array ) ;
return ptr ;
}
void call_used ( void )
{
assert ( 0 ) ;
}