2009-07-14 03:02:34 +04:00
/*
*
* Copyright ( c ) 2009 , Microsoft Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . , 59 Temple
* Place - Suite 330 , Boston , MA 02111 - 1307 USA .
*
* Authors :
* Haiyang Zhang < haiyangz @ microsoft . com >
* Hank Janssen < hjanssen @ microsoft . com >
2011-05-10 18:55:30 +04:00
* K . Y . Srinivasan < kys @ microsoft . com >
2009-07-14 03:02:34 +04:00
*
*/
2011-03-30 00:58:47 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2009-07-14 03:02:34 +04:00
2009-08-18 04:22:08 +04:00
# include <linux/kernel.h>
# include <linux/mm.h>
2011-10-04 23:29:52 +04:00
# include <linux/hyperv.h>
2011-05-13 06:34:15 +04:00
2011-05-13 06:34:28 +04:00
# include "hyperv_vmbus.h"
2009-07-14 03:02:34 +04:00
2012-12-01 18:46:32 +04:00
void hv_begin_read ( struct hv_ring_buffer_info * rbi )
{
rbi - > ring_buffer - > interrupt_mask = 1 ;
2013-06-18 09:04:23 +04:00
mb ( ) ;
2012-12-01 18:46:32 +04:00
}
u32 hv_end_read ( struct hv_ring_buffer_info * rbi )
{
u32 read ;
u32 write ;
rbi - > ring_buffer - > interrupt_mask = 0 ;
2013-06-18 09:04:23 +04:00
mb ( ) ;
2012-12-01 18:46:32 +04:00
/*
* Now check to see if the ring buffer is still empty .
* If it is not , we raced and we need to process new
* incoming messages .
*/
hv_get_ringbuffer_availbytes ( rbi , & read , & write ) ;
return read ;
}
2012-12-01 18:46:36 +04:00
/*
* When we write to the ring buffer , check if the host needs to
* be signaled . Here is the details of this protocol :
*
* 1. The host guarantees that while it is draining the
* ring buffer , it will set the interrupt_mask to
* indicate it does not need to be interrupted when
* new data is placed .
*
* 2. The host guarantees that it will completely drain
* the ring buffer before exiting the read loop . Further ,
* once the ring buffer is empty , it will clear the
* interrupt_mask and re - check to see if new data has
* arrived .
*/
static bool hv_need_to_signal ( u32 old_write , struct hv_ring_buffer_info * rbi )
{
2013-06-18 09:04:23 +04:00
mb ( ) ;
2012-12-01 18:46:36 +04:00
if ( rbi - > ring_buffer - > interrupt_mask )
return false ;
2013-06-20 08:58:57 +04:00
/* check interrupt_mask before read_index */
rmb ( ) ;
2012-12-01 18:46:36 +04:00
/*
* This is the only case we need to signal when the
* ring transitions from being empty to non - empty .
*/
if ( old_write = = rbi - > ring_buffer - > read_index )
return true ;
return false ;
}
2012-12-01 18:46:57 +04:00
/*
* To optimize the flow management on the send - side ,
* when the sender is blocked because of lack of
* sufficient space in the ring buffer , potential the
* consumer of the ring buffer can signal the producer .
* This is controlled by the following parameters :
*
* 1. pending_send_sz : This is the size in bytes that the
* producer is trying to send .
* 2. The feature bit feat_pending_send_sz set to indicate if
* the consumer of the ring will signal when the ring
* state transitions from being full to a state where
* there is room for the producer to send the pending packet .
*/
static bool hv_need_to_signal_on_read ( u32 old_rd ,
struct hv_ring_buffer_info * rbi )
{
u32 prev_write_sz ;
u32 cur_write_sz ;
u32 r_size ;
u32 write_loc = rbi - > ring_buffer - > write_index ;
u32 read_loc = rbi - > ring_buffer - > read_index ;
u32 pending_sz = rbi - > ring_buffer - > pending_send_sz ;
/*
* If the other end is not blocked on write don ' t bother .
*/
if ( pending_sz = = 0 )
return false ;
r_size = rbi - > ring_datasize ;
cur_write_sz = write_loc > = read_loc ? r_size - ( write_loc - read_loc ) :
read_loc - write_loc ;
prev_write_sz = write_loc > = old_rd ? r_size - ( write_loc - old_rd ) :
old_rd - write_loc ;
if ( ( prev_write_sz < pending_sz ) & & ( cur_write_sz > = pending_sz ) )
return true ;
return false ;
}
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:30 +04:00
/*
* hv_get_next_write_location ( )
*
* Get the next write location for the specified ring buffer
*
*/
2009-07-15 02:09:36 +04:00
static inline u32
2011-05-10 18:55:29 +04:00
hv_get_next_write_location ( struct hv_ring_buffer_info * ring_info )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
u32 next = ring_info - > ring_buffer - > write_index ;
2009-07-14 03:02:34 +04:00
return next ;
}
2011-05-10 18:55:30 +04:00
/*
* hv_set_next_write_location ( )
*
* Set the next write location for the specified ring buffer
*
*/
2009-07-14 03:02:34 +04:00
static inline void
2011-05-10 18:55:29 +04:00
hv_set_next_write_location ( struct hv_ring_buffer_info * ring_info ,
2010-11-09 01:04:46 +03:00
u32 next_write_location )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
ring_info - > ring_buffer - > write_index = next_write_location ;
2009-07-14 03:02:34 +04:00
}
2011-05-10 18:55:30 +04:00
/*
* hv_get_next_read_location ( )
*
* Get the next read location for the specified ring buffer
*/
2009-07-15 02:09:36 +04:00
static inline u32
2011-05-10 18:55:29 +04:00
hv_get_next_read_location ( struct hv_ring_buffer_info * ring_info )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
u32 next = ring_info - > ring_buffer - > read_index ;
2009-07-14 03:02:34 +04:00
return next ;
}
2011-05-10 18:55:30 +04:00
/*
* hv_get_next_readlocation_withoffset ( )
*
* Get the next read location + offset for the specified ring buffer .
* This allows the caller to skip
*/
2009-07-15 02:09:36 +04:00
static inline u32
2011-05-10 18:55:29 +04:00
hv_get_next_readlocation_withoffset ( struct hv_ring_buffer_info * ring_info ,
2010-11-09 01:04:47 +03:00
u32 offset )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
u32 next = ring_info - > ring_buffer - > read_index ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
next + = offset ;
next % = ring_info - > ring_datasize ;
2009-07-14 03:02:34 +04:00
return next ;
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_set_next_read_location ( )
*
* Set the next read location for the specified ring buffer
*
*/
2009-07-14 03:02:34 +04:00
static inline void
2011-05-10 18:55:29 +04:00
hv_set_next_read_location ( struct hv_ring_buffer_info * ring_info ,
2010-11-09 01:04:46 +03:00
u32 next_read_location )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
ring_info - > ring_buffer - > read_index = next_read_location ;
2009-07-14 03:02:34 +04:00
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_get_ring_buffer ( )
*
* Get the start of the ring buffer
*/
2009-07-15 02:06:28 +04:00
static inline void *
2011-05-10 18:55:29 +04:00
hv_get_ring_buffer ( struct hv_ring_buffer_info * ring_info )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
return ( void * ) ring_info - > ring_buffer - > buffer ;
2009-07-14 03:02:34 +04:00
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_get_ring_buffersize ( )
*
* Get the size of the ring buffer
*/
2009-07-15 02:09:36 +04:00
static inline u32
2011-05-10 18:55:29 +04:00
hv_get_ring_buffersize ( struct hv_ring_buffer_info * ring_info )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
return ring_info - > ring_datasize ;
2009-07-14 03:02:34 +04:00
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_get_ring_bufferindices ( )
*
* Get the read and write indices as u64 of the specified ring buffer
*
*/
2009-07-15 02:10:26 +04:00
static inline u64
2011-05-10 18:55:29 +04:00
hv_get_ring_bufferindices ( struct hv_ring_buffer_info * ring_info )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
return ( u64 ) ring_info - > ring_buffer - > write_index < < 32 ;
2009-07-14 03:02:34 +04:00
}
2011-05-10 18:55:31 +04:00
/*
*
* hv_copyfrom_ringbuffer ( )
*
* Helper routine to copy to source from ring buffer .
* Assume there is enough room . Handles wrap - around in src case only ! !
*
*/
static u32 hv_copyfrom_ringbuffer (
struct hv_ring_buffer_info * ring_info ,
void * dest ,
u32 destlen ,
u32 start_read_offset )
{
void * ring_buffer = hv_get_ring_buffer ( ring_info ) ;
u32 ring_buffer_size = hv_get_ring_buffersize ( ring_info ) ;
u32 frag_len ;
/* wrap-around detected at the src */
if ( destlen > ring_buffer_size - start_read_offset ) {
frag_len = ring_buffer_size - start_read_offset ;
memcpy ( dest , ring_buffer + start_read_offset , frag_len ) ;
memcpy ( dest + frag_len , ring_buffer , destlen - frag_len ) ;
} else
memcpy ( dest , ring_buffer + start_read_offset , destlen ) ;
start_read_offset + = destlen ;
start_read_offset % = ring_buffer_size ;
return start_read_offset ;
}
2011-05-10 18:55:32 +04:00
/*
*
* hv_copyto_ringbuffer ( )
*
* Helper routine to copy from source to ring buffer .
* Assume there is enough room . Handles wrap - around in dest case only ! !
*
*/
static u32 hv_copyto_ringbuffer (
2010-11-09 01:04:46 +03:00
struct hv_ring_buffer_info * ring_info ,
u32 start_write_offset ,
void * src ,
2011-05-10 18:55:32 +04:00
u32 srclen )
{
void * ring_buffer = hv_get_ring_buffer ( ring_info ) ;
u32 ring_buffer_size = hv_get_ring_buffersize ( ring_info ) ;
u32 frag_len ;
/* wrap-around detected! */
if ( srclen > ring_buffer_size - start_write_offset ) {
frag_len = ring_buffer_size - start_write_offset ;
memcpy ( ring_buffer + start_write_offset , src , frag_len ) ;
memcpy ( ring_buffer , src + frag_len , srclen - frag_len ) ;
} else
memcpy ( ring_buffer + start_write_offset , src , srclen ) ;
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:32 +04:00
start_write_offset + = srclen ;
start_write_offset % = ring_buffer_size ;
return start_write_offset ;
}
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:30 +04:00
/*
*
* hv_ringbuffer_get_debuginfo ( )
*
* Get various debug metrics for the specified ring buffer
*
*/
2011-05-10 18:55:28 +04:00
void hv_ringbuffer_get_debuginfo ( struct hv_ring_buffer_info * ring_info ,
2010-07-27 22:37:32 +04:00
struct hv_ring_buffer_debug_info * debug_info )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
u32 bytes_avail_towrite ;
u32 bytes_avail_toread ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
if ( ring_info - > ring_buffer ) {
2011-05-10 18:55:29 +04:00
hv_get_ringbuffer_availbytes ( ring_info ,
2010-11-09 01:04:46 +03:00
& bytes_avail_toread ,
& bytes_avail_towrite ) ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
debug_info - > bytes_avail_toread = bytes_avail_toread ;
debug_info - > bytes_avail_towrite = bytes_avail_towrite ;
2010-11-09 01:04:45 +03:00
debug_info - > current_read_index =
2010-11-09 01:04:46 +03:00
ring_info - > ring_buffer - > read_index ;
2010-11-09 01:04:45 +03:00
debug_info - > current_write_index =
2010-11-09 01:04:46 +03:00
ring_info - > ring_buffer - > write_index ;
2010-11-09 01:04:45 +03:00
debug_info - > current_interrupt_mask =
2010-11-09 01:04:46 +03:00
ring_info - > ring_buffer - > interrupt_mask ;
2009-07-14 03:02:34 +04:00
}
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_ringbuffer_init ( )
*
* Initialize the ring buffer
*
*/
2011-05-10 18:55:21 +04:00
int hv_ringbuffer_init ( struct hv_ring_buffer_info * ring_info ,
2010-11-09 01:04:46 +03:00
void * buffer , u32 buflen )
2009-07-14 03:02:34 +04:00
{
2010-07-27 22:47:08 +04:00
if ( sizeof ( struct hv_ring_buffer ) ! = PAGE_SIZE )
2010-05-05 23:27:49 +04:00
return - EINVAL ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
memset ( ring_info , 0 , sizeof ( struct hv_ring_buffer_info ) ) ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
ring_info - > ring_buffer = ( struct hv_ring_buffer * ) buffer ;
ring_info - > ring_buffer - > read_index =
ring_info - > ring_buffer - > write_index = 0 ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
ring_info - > ring_size = buflen ;
ring_info - > ring_datasize = buflen - sizeof ( struct hv_ring_buffer ) ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
spin_lock_init ( & ring_info - > ring_lock ) ;
2009-07-14 03:02:34 +04:00
return 0 ;
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_ringbuffer_cleanup ( )
*
* Cleanup the ring buffer
*
*/
2011-05-10 18:55:22 +04:00
void hv_ringbuffer_cleanup ( struct hv_ring_buffer_info * ring_info )
2009-07-14 03:02:34 +04:00
{
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_ringbuffer_write ( )
*
* Write to the ring buffer
*
*/
2011-05-10 18:55:23 +04:00
int hv_ringbuffer_write ( struct hv_ring_buffer_info * outring_info ,
2012-12-01 18:46:36 +04:00
struct scatterlist * sglist , u32 sgcount , bool * signal )
2009-07-14 03:02:34 +04:00
{
2010-02-03 18:34:27 +03:00
int i = 0 ;
2010-11-09 01:04:46 +03:00
u32 bytes_avail_towrite ;
u32 bytes_avail_toread ;
u32 totalbytes_towrite = 0 ;
2009-07-14 03:02:34 +04:00
2009-07-30 19:37:23 +04:00
struct scatterlist * sg ;
2011-05-10 18:55:33 +04:00
u32 next_write_location ;
2012-12-01 18:46:36 +04:00
u32 old_write ;
2010-11-09 01:04:46 +03:00
u64 prev_indices = 0 ;
2009-07-16 01:55:14 +04:00
unsigned long flags ;
2009-07-14 03:02:34 +04:00
2009-07-30 19:37:23 +04:00
for_each_sg ( sglist , sg , sgcount , i )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
totalbytes_towrite + = sg - > length ;
2009-07-14 03:02:34 +04:00
}
2010-11-09 01:04:46 +03:00
totalbytes_towrite + = sizeof ( u64 ) ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
spin_lock_irqsave ( & outring_info - > ring_lock , flags ) ;
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:29 +04:00
hv_get_ringbuffer_availbytes ( outring_info ,
2010-11-09 01:04:46 +03:00
& bytes_avail_toread ,
& bytes_avail_towrite ) ;
2009-07-14 03:02:34 +04:00
2010-02-03 18:34:27 +03:00
/* If there is only room for the packet, assume it is full. */
/* Otherwise, the next time around, we think the ring buffer */
2009-07-28 00:47:24 +04:00
/* is empty since the read index == write index */
2010-11-09 01:04:46 +03:00
if ( bytes_avail_towrite < = totalbytes_towrite ) {
spin_unlock_irqrestore ( & outring_info - > ring_lock , flags ) ;
2011-08-25 20:48:58 +04:00
return - EAGAIN ;
2009-07-14 03:02:34 +04:00
}
2009-07-28 00:47:24 +04:00
/* Write to the ring buffer */
2011-05-10 18:55:29 +04:00
next_write_location = hv_get_next_write_location ( outring_info ) ;
2009-07-14 03:02:34 +04:00
2012-12-01 18:46:36 +04:00
old_write = next_write_location ;
2009-07-30 19:37:23 +04:00
for_each_sg ( sglist , sg , sgcount , i )
2009-07-14 03:02:34 +04:00
{
2011-05-10 18:55:29 +04:00
next_write_location = hv_copyto_ringbuffer ( outring_info ,
2010-11-09 01:04:46 +03:00
next_write_location ,
2009-07-30 19:37:23 +04:00
sg_virt ( sg ) ,
sg - > length ) ;
2009-07-14 03:02:34 +04:00
}
2009-07-28 00:47:24 +04:00
/* Set previous packet start */
2011-05-10 18:55:29 +04:00
prev_indices = hv_get_ring_bufferindices ( outring_info ) ;
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:29 +04:00
next_write_location = hv_copyto_ringbuffer ( outring_info ,
2010-11-09 01:04:46 +03:00
next_write_location ,
& prev_indices ,
2009-07-30 19:37:23 +04:00
sizeof ( u64 ) ) ;
2009-07-14 03:02:34 +04:00
2012-12-01 18:46:36 +04:00
/* Issue a full memory barrier before updating the write index */
2013-06-18 09:04:23 +04:00
mb ( ) ;
2009-07-14 03:02:34 +04:00
2009-07-28 00:47:24 +04:00
/* Now, update the write location */
2011-05-10 18:55:29 +04:00
hv_set_next_write_location ( outring_info , next_write_location ) ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
spin_unlock_irqrestore ( & outring_info - > ring_lock , flags ) ;
2012-12-01 18:46:36 +04:00
* signal = hv_need_to_signal ( old_write , outring_info ) ;
2009-07-14 03:02:34 +04:00
return 0 ;
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_ringbuffer_peek ( )
*
* Read without advancing the read index
*
*/
2011-05-10 18:55:24 +04:00
int hv_ringbuffer_peek ( struct hv_ring_buffer_info * Inring_info ,
2010-11-09 01:04:46 +03:00
void * Buffer , u32 buflen )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
u32 bytes_avail_towrite ;
u32 bytes_avail_toread ;
u32 next_read_location = 0 ;
2009-07-16 01:55:14 +04:00
unsigned long flags ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
spin_lock_irqsave ( & Inring_info - > ring_lock , flags ) ;
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:29 +04:00
hv_get_ringbuffer_availbytes ( Inring_info ,
2010-11-09 01:04:46 +03:00
& bytes_avail_toread ,
& bytes_avail_towrite ) ;
2009-07-14 03:02:34 +04:00
2009-07-28 00:47:24 +04:00
/* Make sure there is something to read */
2010-11-09 01:04:46 +03:00
if ( bytes_avail_toread < buflen ) {
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
spin_unlock_irqrestore ( & Inring_info - > ring_lock , flags ) ;
2009-07-14 03:02:34 +04:00
2011-08-25 20:48:58 +04:00
return - EAGAIN ;
2009-07-14 03:02:34 +04:00
}
2009-07-28 00:47:24 +04:00
/* Convert to byte offset */
2011-05-10 18:55:29 +04:00
next_read_location = hv_get_next_read_location ( Inring_info ) ;
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:29 +04:00
next_read_location = hv_copyfrom_ringbuffer ( Inring_info ,
2010-02-03 18:34:27 +03:00
Buffer ,
2010-11-09 01:04:46 +03:00
buflen ,
next_read_location ) ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
spin_unlock_irqrestore ( & Inring_info - > ring_lock , flags ) ;
2009-07-14 03:02:34 +04:00
return 0 ;
}
2011-05-10 18:55:30 +04:00
/*
*
* hv_ringbuffer_read ( )
*
* Read and advance the read index
*
*/
2011-05-10 18:55:25 +04:00
int hv_ringbuffer_read ( struct hv_ring_buffer_info * inring_info , void * buffer ,
2012-12-01 18:46:57 +04:00
u32 buflen , u32 offset , bool * signal )
2009-07-14 03:02:34 +04:00
{
2010-11-09 01:04:46 +03:00
u32 bytes_avail_towrite ;
u32 bytes_avail_toread ;
u32 next_read_location = 0 ;
u64 prev_indices = 0 ;
2009-07-16 01:55:14 +04:00
unsigned long flags ;
2012-12-01 18:46:57 +04:00
u32 old_read ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
if ( buflen < = 0 )
2010-05-05 23:27:50 +04:00
return - EINVAL ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
spin_lock_irqsave ( & inring_info - > ring_lock , flags ) ;
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:29 +04:00
hv_get_ringbuffer_availbytes ( inring_info ,
2010-11-09 01:04:46 +03:00
& bytes_avail_toread ,
& bytes_avail_towrite ) ;
2009-07-14 03:02:34 +04:00
2012-12-01 18:46:57 +04:00
old_read = bytes_avail_toread ;
2009-07-28 00:47:24 +04:00
/* Make sure there is something to read */
2010-11-09 01:04:46 +03:00
if ( bytes_avail_toread < buflen ) {
spin_unlock_irqrestore ( & inring_info - > ring_lock , flags ) ;
2009-07-14 03:02:34 +04:00
2011-08-25 20:48:58 +04:00
return - EAGAIN ;
2009-07-14 03:02:34 +04:00
}
2010-11-09 01:04:47 +03:00
next_read_location =
2011-05-10 18:55:29 +04:00
hv_get_next_readlocation_withoffset ( inring_info , offset ) ;
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:29 +04:00
next_read_location = hv_copyfrom_ringbuffer ( inring_info ,
2010-11-09 01:04:46 +03:00
buffer ,
buflen ,
next_read_location ) ;
2009-07-14 03:02:34 +04:00
2011-05-10 18:55:29 +04:00
next_read_location = hv_copyfrom_ringbuffer ( inring_info ,
2010-11-09 01:04:46 +03:00
& prev_indices ,
2010-02-03 18:34:27 +03:00
sizeof ( u64 ) ,
2010-11-09 01:04:46 +03:00
next_read_location ) ;
2009-07-14 03:02:34 +04:00
2009-07-28 00:47:24 +04:00
/* Make sure all reads are done before we update the read index since */
2010-02-03 18:34:27 +03:00
/* the writer may start writing to the read area once the read index */
/*is updated */
2013-06-18 09:04:23 +04:00
mb ( ) ;
2009-07-14 03:02:34 +04:00
2009-07-28 00:47:24 +04:00
/* Update the read index */
2011-05-10 18:55:29 +04:00
hv_set_next_read_location ( inring_info , next_read_location ) ;
2009-07-14 03:02:34 +04:00
2010-11-09 01:04:46 +03:00
spin_unlock_irqrestore ( & inring_info - > ring_lock , flags ) ;
2009-07-14 03:02:34 +04:00
2012-12-01 18:46:57 +04:00
* signal = hv_need_to_signal_on_read ( old_read , inring_info ) ;
2009-07-14 03:02:34 +04:00
return 0 ;
}