2008-09-15 20:17:11 +04:00
/*
2009-09-03 21:02:45 +04:00
* Copyright 2008 , 2009 Cisco Systems , Inc . All rights reserved .
2008-09-15 20:17:11 +04:00
* Copyright 2007 Nuova Systems , Inc . All rights reserved .
*
* This program is free software ; you may redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; version 2 of the License .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*
*/
# ifndef _VNIC_RQ_H_
# define _VNIC_RQ_H_
# include <linux/pci.h>
# include "vnic_dev.h"
# include "vnic_cq.h"
/* Receive queue control */
struct vnic_rq_ctrl {
u64 ring_base ; /* 0x00 */
u32 ring_size ; /* 0x08 */
u32 pad0 ;
u32 posted_index ; /* 0x10 */
u32 pad1 ;
u32 cq_index ; /* 0x18 */
u32 pad2 ;
u32 enable ; /* 0x20 */
u32 pad3 ;
u32 running ; /* 0x28 */
u32 pad4 ;
u32 fetch_index ; /* 0x30 */
u32 pad5 ;
u32 error_interrupt_enable ; /* 0x38 */
u32 pad6 ;
u32 error_interrupt_offset ; /* 0x40 */
u32 pad7 ;
u32 error_status ; /* 0x48 */
u32 pad8 ;
u32 dropped_packet_count ; /* 0x50 */
u32 pad9 ;
u32 dropped_packet_count_rc ; /* 0x58 */
u32 pad10 ;
} ;
/* Break the vnic_rq_buf allocations into blocks of 64 entries */
# define VNIC_RQ_BUF_BLK_ENTRIES 64
# define VNIC_RQ_BUF_BLK_SZ \
( VNIC_RQ_BUF_BLK_ENTRIES * sizeof ( struct vnic_rq_buf ) )
# define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP ( entries , VNIC_RQ_BUF_BLK_ENTRIES )
# define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
struct vnic_rq_buf {
struct vnic_rq_buf * next ;
dma_addr_t dma_addr ;
void * os_buf ;
unsigned int os_buf_index ;
unsigned int len ;
unsigned int index ;
void * desc ;
} ;
struct vnic_rq {
unsigned int index ;
struct vnic_dev * vdev ;
struct vnic_rq_ctrl __iomem * ctrl ; /* memory-mapped */
struct vnic_dev_ring ring ;
struct vnic_rq_buf * bufs [ VNIC_RQ_BUF_BLKS_MAX ] ;
struct vnic_rq_buf * to_use ;
struct vnic_rq_buf * to_clean ;
void * os_buf_head ;
unsigned int pkts_outstanding ;
} ;
static inline unsigned int vnic_rq_desc_avail ( struct vnic_rq * rq )
{
/* how many does SW own? */
return rq - > ring . desc_avail ;
}
static inline unsigned int vnic_rq_desc_used ( struct vnic_rq * rq )
{
/* how many does HW own? */
return rq - > ring . desc_count - rq - > ring . desc_avail - 1 ;
}
static inline void * vnic_rq_next_desc ( struct vnic_rq * rq )
{
return rq - > to_use - > desc ;
}
static inline unsigned int vnic_rq_next_index ( struct vnic_rq * rq )
{
return rq - > to_use - > index ;
}
static inline void vnic_rq_post ( struct vnic_rq * rq ,
void * os_buf , unsigned int os_buf_index ,
dma_addr_t dma_addr , unsigned int len )
{
struct vnic_rq_buf * buf = rq - > to_use ;
buf - > os_buf = os_buf ;
buf - > os_buf_index = os_buf_index ;
buf - > dma_addr = dma_addr ;
buf - > len = len ;
buf = buf - > next ;
rq - > to_use = buf ;
rq - > ring . desc_avail - - ;
/* Move the posted_index every nth descriptor
*/
# ifndef VNIC_RQ_RETURN_RATE
# define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
# endif
2008-11-22 08:29:01 +03:00
if ( ( buf - > index & VNIC_RQ_RETURN_RATE ) = = 0 ) {
/* Adding write memory barrier prevents compiler and/or CPU
* reordering , thus avoiding descriptor posting before
* descriptor is initialized . Otherwise , hardware can read
* stale descriptor fields .
*/
wmb ( ) ;
2008-09-15 20:17:11 +04:00
iowrite32 ( buf - > index , & rq - > ctrl - > posted_index ) ;
2008-11-22 08:29:01 +03:00
}
2008-09-15 20:17:11 +04:00
}
2009-09-03 21:01:58 +04:00
static inline int vnic_rq_posting_soon ( struct vnic_rq * rq )
{
return ( ( rq - > to_use - > index & VNIC_RQ_RETURN_RATE ) = = 0 ) ;
}
2008-09-15 20:17:11 +04:00
static inline void vnic_rq_return_descs ( struct vnic_rq * rq , unsigned int count )
{
rq - > ring . desc_avail + = count ;
}
enum desc_return_options {
VNIC_RQ_RETURN_DESC ,
VNIC_RQ_DEFER_RETURN_DESC ,
} ;
static inline void vnic_rq_service ( struct vnic_rq * rq ,
struct cq_desc * cq_desc , u16 completed_index ,
int desc_return , void ( * buf_service ) ( struct vnic_rq * rq ,
struct cq_desc * cq_desc , struct vnic_rq_buf * buf ,
int skipped , void * opaque ) , void * opaque )
{
struct vnic_rq_buf * buf ;
int skipped ;
buf = rq - > to_clean ;
while ( 1 ) {
skipped = ( buf - > index ! = completed_index ) ;
( * buf_service ) ( rq , cq_desc , buf , skipped , opaque ) ;
if ( desc_return = = VNIC_RQ_RETURN_DESC )
rq - > ring . desc_avail + + ;
rq - > to_clean = buf - > next ;
if ( ! skipped )
break ;
buf = rq - > to_clean ;
}
}
static inline int vnic_rq_fill ( struct vnic_rq * rq ,
int ( * buf_fill ) ( struct vnic_rq * rq ) )
{
int err ;
2009-09-03 21:01:58 +04:00
while ( vnic_rq_desc_avail ( rq ) > 0 ) {
2008-09-15 20:17:11 +04:00
err = ( * buf_fill ) ( rq ) ;
if ( err )
return err ;
}
return 0 ;
}
void vnic_rq_free ( struct vnic_rq * rq ) ;
int vnic_rq_alloc ( struct vnic_dev * vdev , struct vnic_rq * rq , unsigned int index ,
unsigned int desc_count , unsigned int desc_size ) ;
2009-09-03 21:02:45 +04:00
void vnic_rq_init_start ( struct vnic_rq * rq , unsigned int cq_index ,
unsigned int fetch_index , unsigned int posted_index ,
unsigned int error_interrupt_enable ,
unsigned int error_interrupt_offset ) ;
2008-09-15 20:17:11 +04:00
void vnic_rq_init ( struct vnic_rq * rq , unsigned int cq_index ,
unsigned int error_interrupt_enable ,
unsigned int error_interrupt_offset ) ;
unsigned int vnic_rq_error_status ( struct vnic_rq * rq ) ;
void vnic_rq_enable ( struct vnic_rq * rq ) ;
int vnic_rq_disable ( struct vnic_rq * rq ) ;
void vnic_rq_clean ( struct vnic_rq * rq ,
void ( * buf_clean ) ( struct vnic_rq * rq , struct vnic_rq_buf * buf ) ) ;
# endif /* _VNIC_RQ_H_ */