2005-08-12 00:25:20 +04:00
/*
* linux / net / sunrpc / socklib . c
*
* Common socket helper routines for RPC client and server
*
* Copyright ( C ) 1995 , 1996 Olaf Kirch < okir @ monad . swb . de >
*/
2005-11-11 00:01:24 +03:00
# include <linux/compiler.h>
# include <linux/netdevice.h>
# include <linux/skbuff.h>
2005-08-12 00:25:20 +04:00
# include <linux/types.h>
# include <linux/pagemap.h>
# include <linux/udp.h>
# include <linux/sunrpc/xdr.h>
/**
2006-12-06 00:35:41 +03:00
* xdr_skb_read_bits - copy some data bits from skb to internal buffer
2005-08-12 00:25:20 +04:00
* @ desc : sk_buff copy helper
* @ to : copy destination
* @ len : number of bytes to copy
*
* Possibly called several times to iterate over an sk_buff and copy
* data out of it .
*/
2006-12-06 00:35:44 +03:00
size_t xdr_skb_read_bits ( struct xdr_skb_reader * desc , void * to , size_t len )
2005-08-12 00:25:20 +04:00
{
if ( len > desc - > count )
len = desc - > count ;
2006-12-06 00:35:41 +03:00
if ( unlikely ( skb_copy_bits ( desc - > skb , desc - > offset , to , len ) ) )
2005-08-12 00:25:20 +04:00
return 0 ;
desc - > count - = len ;
desc - > offset + = len ;
return len ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xdr_skb_read_bits ) ;
2005-08-12 00:25:20 +04:00
/**
2006-12-06 00:35:41 +03:00
* xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
2005-08-12 00:25:20 +04:00
* @ desc : sk_buff copy helper
* @ to : copy destination
* @ len : number of bytes to copy
*
* Same as skb_read_bits , but calculate a checksum at the same time .
*/
2006-12-06 00:35:44 +03:00
static size_t xdr_skb_read_and_csum_bits ( struct xdr_skb_reader * desc , void * to , size_t len )
2005-08-12 00:25:20 +04:00
{
2006-11-15 08:36:54 +03:00
unsigned int pos ;
__wsum csum2 ;
2005-08-12 00:25:20 +04:00
if ( len > desc - > count )
len = desc - > count ;
pos = desc - > offset ;
csum2 = skb_copy_and_csum_bits ( desc - > skb , pos , to , len , 0 ) ;
desc - > csum = csum_block_add ( desc - > csum , csum2 , pos ) ;
desc - > count - = len ;
desc - > offset + = len ;
return len ;
}
/**
* xdr_partial_copy_from_skb - copy data out of an skb
* @ xdr : target XDR buffer
* @ base : starting offset
* @ desc : sk_buff copy helper
* @ copy_actor : virtual method for copying data
*
*/
2006-12-06 00:35:44 +03:00
ssize_t xdr_partial_copy_from_skb ( struct xdr_buf * xdr , unsigned int base , struct xdr_skb_reader * desc , xdr_skb_read_actor copy_actor )
2005-08-12 00:25:20 +04:00
{
struct page * * ppage = xdr - > pages ;
unsigned int len , pglen = xdr - > page_len ;
ssize_t copied = 0 ;
2007-10-26 21:30:59 +04:00
size_t ret ;
2005-08-12 00:25:20 +04:00
len = xdr - > head [ 0 ] . iov_len ;
if ( base < len ) {
len - = base ;
ret = copy_actor ( desc , ( char * ) xdr - > head [ 0 ] . iov_base + base , len ) ;
copied + = ret ;
if ( ret ! = len | | ! desc - > count )
goto out ;
base = 0 ;
} else
base - = len ;
if ( unlikely ( pglen = = 0 ) )
goto copy_tail ;
if ( unlikely ( base > = pglen ) ) {
base - = pglen ;
goto copy_tail ;
}
if ( base | | xdr - > page_base ) {
pglen - = base ;
base + = xdr - > page_base ;
ppage + = base > > PAGE_CACHE_SHIFT ;
base & = ~ PAGE_CACHE_MASK ;
}
do {
char * kaddr ;
/* ACL likes to be lazy in allocating pages - ACLs
* are small by default but can get huge . */
if ( unlikely ( * ppage = = NULL ) ) {
* ppage = alloc_page ( GFP_ATOMIC ) ;
if ( unlikely ( * ppage = = NULL ) ) {
if ( copied = = 0 )
copied = - ENOMEM ;
goto out ;
}
}
len = PAGE_CACHE_SIZE ;
kaddr = kmap_atomic ( * ppage , KM_SKB_SUNRPC_DATA ) ;
if ( base ) {
len - = base ;
if ( pglen < len )
len = pglen ;
ret = copy_actor ( desc , kaddr + base , len ) ;
base = 0 ;
} else {
if ( pglen < len )
len = pglen ;
ret = copy_actor ( desc , kaddr , len ) ;
}
flush_dcache_page ( * ppage ) ;
kunmap_atomic ( kaddr , KM_SKB_SUNRPC_DATA ) ;
copied + = ret ;
if ( ret ! = len | | ! desc - > count )
goto out ;
ppage + + ;
} while ( ( pglen - = len ) ! = 0 ) ;
copy_tail :
len = xdr - > tail [ 0 ] . iov_len ;
if ( base < len )
copied + = copy_actor ( desc , ( char * ) xdr - > tail [ 0 ] . iov_base + base , len - base ) ;
out :
return copied ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xdr_partial_copy_from_skb ) ;
2005-08-12 00:25:20 +04:00
/**
* csum_partial_copy_to_xdr - checksum and copy data
* @ xdr : target XDR buffer
* @ skb : source skb
*
* We have set things up such that we perform the checksum of the UDP
* packet in parallel with the copies into the RPC client iovec . - DaveM
*/
int csum_partial_copy_to_xdr ( struct xdr_buf * xdr , struct sk_buff * skb )
{
2006-12-06 00:35:44 +03:00
struct xdr_skb_reader desc ;
2005-08-12 00:25:20 +04:00
desc . skb = skb ;
desc . offset = sizeof ( struct udphdr ) ;
desc . count = skb - > len - desc . offset ;
2007-04-09 22:59:39 +04:00
if ( skb_csum_unnecessary ( skb ) )
2005-08-12 00:25:20 +04:00
goto no_checksum ;
desc . csum = csum_partial ( skb - > data , desc . offset , skb - > csum ) ;
2006-12-06 00:35:41 +03:00
if ( xdr_partial_copy_from_skb ( xdr , 0 , & desc , xdr_skb_read_and_csum_bits ) < 0 )
2005-08-12 00:25:20 +04:00
return - 1 ;
if ( desc . offset ! = skb - > len ) {
2006-11-15 08:36:54 +03:00
__wsum csum2 ;
2005-08-12 00:25:20 +04:00
csum2 = skb_checksum ( skb , desc . offset , skb - > len - desc . offset , 0 ) ;
desc . csum = csum_block_add ( desc . csum , csum2 , desc . offset ) ;
}
if ( desc . count )
return - 1 ;
2006-11-15 08:24:49 +03:00
if ( csum_fold ( desc . csum ) )
2005-08-12 00:25:20 +04:00
return - 1 ;
2006-08-30 03:44:56 +04:00
if ( unlikely ( skb - > ip_summed = = CHECKSUM_COMPLETE ) )
2005-11-11 00:01:24 +03:00
netdev_rx_csum_fault ( skb - > dev ) ;
2005-08-12 00:25:20 +04:00
return 0 ;
no_checksum :
2006-12-06 00:35:41 +03:00
if ( xdr_partial_copy_from_skb ( xdr , 0 , & desc , xdr_skb_read_bits ) < 0 )
2005-08-12 00:25:20 +04:00
return - 1 ;
if ( desc . count )
return - 1 ;
return 0 ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( csum_partial_copy_to_xdr ) ;