2008-02-17 18:48:25 +03:00
/*
* Copyright 2002 , 2003 Andi Kleen , SuSE Labs .
2005-04-17 02:20:36 +04:00
* Subject to the GNU Public License v .2
2008-02-17 16:56:50 +03:00
*
2005-04-17 02:20:36 +04:00
* Wrappers of assembly checksum functions for x86 - 64.
*/
# include <asm/checksum.h>
# include <linux/module.h>
2013-08-31 02:43:03 +04:00
# include <asm/smap.h>
2005-04-17 02:20:36 +04:00
2008-02-17 16:56:50 +03:00
/**
* csum_partial_copy_from_user - Copy and checksum from user space .
* @ src : source address ( user space )
2005-04-17 02:20:36 +04:00
* @ dst : destination address
* @ len : number of bytes to be copied .
* @ isum : initial sum that is added into the result ( 32 bit unfolded )
* @ errp : set to - EFAULT for an bad source address .
2008-02-17 16:56:50 +03:00
*
2005-04-17 02:20:36 +04:00
* Returns an 32 bit unfolded checksum of the buffer .
2008-02-17 16:56:50 +03:00
* src and dst are best aligned to 64 bits .
*/
2006-11-15 08:20:08 +03:00
__wsum
csum_partial_copy_from_user ( const void __user * src , void * dst ,
int len , __wsum isum , int * errp )
2008-02-17 16:56:50 +03:00
{
2005-04-17 02:20:36 +04:00
might_sleep ( ) ;
* errp = 0 ;
2008-02-17 18:48:25 +03:00
if ( ! likely ( access_ok ( VERIFY_READ , src , len ) ) )
goto out_err ;
/*
* Why 6 , not 7 ? To handle odd addresses aligned we
* would need to do considerable complications to fix the
* checksum which is defined as an 16 bit accumulator . The
* fix alignment code is primarily for performance
* compatibility with 32 bit and that will handle odd
* addresses slowly too .
*/
if ( unlikely ( ( unsigned long ) src & 6 ) ) {
while ( ( ( unsigned long ) src & 6 ) & & len > = 2 ) {
__u16 val16 ;
2014-11-16 22:00:42 +03:00
if ( __get_user ( val16 , ( const __u16 __user * ) src ) )
goto out_err ;
2008-02-17 18:48:25 +03:00
* ( __u16 * ) dst = val16 ;
isum = ( __force __wsum ) add32_with_carry (
( __force unsigned ) isum , val16 ) ;
src + = 2 ;
dst + = 2 ;
len - = 2 ;
2005-04-17 02:20:36 +04:00
}
2008-02-17 16:56:50 +03:00
}
2013-08-31 02:43:03 +04:00
stac ( ) ;
2008-02-17 18:48:25 +03:00
isum = csum_partial_copy_generic ( ( __force const void * ) src ,
dst , len , isum , errp , NULL ) ;
2013-08-31 02:43:03 +04:00
clac ( ) ;
2008-02-17 18:48:25 +03:00
if ( unlikely ( * errp ) )
goto out_err ;
return isum ;
out_err :
2005-04-17 02:20:36 +04:00
* errp = - EFAULT ;
2008-02-17 16:56:50 +03:00
memset ( dst , 0 , len ) ;
2008-02-17 18:48:25 +03:00
2008-02-17 16:56:50 +03:00
return isum ;
}
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( csum_partial_copy_from_user ) ;
2008-02-17 16:56:50 +03:00
/**
* csum_partial_copy_to_user - Copy and checksum to user space .
2005-04-17 02:20:36 +04:00
* @ src : source address
* @ dst : destination address ( user space )
* @ len : number of bytes to be copied .
* @ isum : initial sum that is added into the result ( 32 bit unfolded )
* @ errp : set to - EFAULT for an bad destination address .
2008-02-17 16:56:50 +03:00
*
2005-04-17 02:20:36 +04:00
* Returns an 32 bit unfolded checksum of the buffer .
* src and dst are best aligned to 64 bits .
2008-02-17 16:56:50 +03:00
*/
2006-11-15 08:20:08 +03:00
__wsum
csum_partial_copy_to_user ( const void * src , void __user * dst ,
int len , __wsum isum , int * errp )
2008-02-17 16:56:50 +03:00
{
2013-08-31 02:43:03 +04:00
__wsum ret ;
2005-04-17 02:20:36 +04:00
might_sleep ( ) ;
2008-02-17 18:48:25 +03:00
2005-04-17 02:20:36 +04:00
if ( unlikely ( ! access_ok ( VERIFY_WRITE , dst , len ) ) ) {
* errp = - EFAULT ;
2008-02-17 16:56:50 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
if ( unlikely ( ( unsigned long ) dst & 6 ) ) {
2008-02-17 16:56:50 +03:00
while ( ( ( unsigned long ) dst & 6 ) & & len > = 2 ) {
2005-04-17 02:20:36 +04:00
__u16 val16 = * ( __u16 * ) src ;
2008-02-17 18:48:25 +03:00
2006-11-15 08:20:08 +03:00
isum = ( __force __wsum ) add32_with_carry (
( __force unsigned ) isum , val16 ) ;
2005-04-17 02:20:36 +04:00
* errp = __put_user ( val16 , ( __u16 __user * ) dst ) ;
if ( * errp )
return isum ;
2008-02-17 16:56:50 +03:00
src + = 2 ;
dst + = 2 ;
2005-04-17 02:20:36 +04:00
len - = 2 ;
}
}
* errp = 0 ;
2013-08-31 02:43:03 +04:00
stac ( ) ;
ret = csum_partial_copy_generic ( src , ( void __force * ) dst ,
len , isum , NULL , errp ) ;
clac ( ) ;
return ret ;
2008-02-17 16:56:50 +03:00
}
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( csum_partial_copy_to_user ) ;
2008-02-17 16:56:50 +03:00
/**
2005-04-17 02:20:36 +04:00
* csum_partial_copy_nocheck - Copy and checksum .
* @ src : source address
* @ dst : destination address
* @ len : number of bytes to be copied .
2012-06-12 08:53:21 +04:00
* @ sum : initial sum that is added into the result ( 32 bit unfolded )
2008-02-17 16:56:50 +03:00
*
2005-04-17 02:20:36 +04:00
* Returns an 32 bit unfolded checksum of the buffer .
2008-02-17 16:56:50 +03:00
*/
2006-11-15 08:20:08 +03:00
__wsum
csum_partial_copy_nocheck ( const void * src , void * dst , int len , __wsum sum )
2008-02-17 16:56:50 +03:00
{
return csum_partial_copy_generic ( src , dst , len , sum , NULL , NULL ) ;
}
2006-06-26 15:59:44 +04:00
EXPORT_SYMBOL ( csum_partial_copy_nocheck ) ;
2005-04-17 02:20:36 +04:00
2006-11-15 08:20:08 +03:00
__sum16 csum_ipv6_magic ( const struct in6_addr * saddr ,
const struct in6_addr * daddr ,
2016-03-12 01:05:41 +03:00
__u32 len , __u8 proto , __wsum sum )
2005-04-17 02:20:36 +04:00
{
__u64 rest , sum64 ;
2008-02-17 16:56:50 +03:00
2006-11-15 08:20:08 +03:00
rest = ( __force __u64 ) htonl ( len ) + ( __force __u64 ) htons ( proto ) +
( __force __u64 ) sum ;
2008-02-17 18:48:25 +03:00
asm ( " addq (%[saddr]),%[sum] \n "
" adcq 8(%[saddr]),%[sum] \n "
" adcq (%[daddr]),%[sum] \n "
" adcq 8(%[daddr]),%[sum] \n "
" adcq $0,%[sum] \n "
2008-02-17 16:56:50 +03:00
: [ sum ] " =r " ( sum64 )
: " [sum] " ( rest ) , [ saddr ] " r " ( saddr ) , [ daddr ] " r " ( daddr ) ) ;
2005-04-17 02:20:36 +04:00
2008-02-17 18:48:25 +03:00
return csum_fold (
( __force __wsum ) add32_with_carry ( sum64 & 0xffffffff , sum64 > > 32 ) ) ;
}
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( csum_ipv6_magic ) ;