2005-06-24 09:01:26 +04:00
/*
* include / asm - xtensa / checksum . h
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2001 - 2005 Tensilica Inc .
*/
# ifndef _XTENSA_CHECKSUM_H
# define _XTENSA_CHECKSUM_H
# include <linux/in6.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2008-11-06 17:40:46 +03:00
# include <variant/core.h>
2005-06-24 09:01:26 +04:00
/*
* computes the checksum of a memory block at buff , length len ,
* and adds in " sum " ( 32 - bit )
*
* returns a 32 - bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths , except
* for the last fragment , which may be odd
*
* it ' s best to have buff aligned on a 32 - bit boundary
*/
2006-11-15 08:23:40 +03:00
asmlinkage __wsum csum_partial ( const void * buff , int len , __wsum sum ) ;
2005-06-24 09:01:26 +04:00
/*
* the same as csum_partial , but copies from src while it
* checksums , and handles user - space pointer exceptions correctly , when needed .
*
* here even more important to align src and dst on a 32 - bit ( or even
* better 64 - bit ) boundary
*/
2012-11-29 04:53:51 +04:00
asmlinkage __wsum csum_partial_copy_generic ( const void * src , void * dst ,
int len , __wsum sum ,
int * src_err_ptr , int * dst_err_ptr ) ;
2005-06-24 09:01:26 +04:00
/*
* Note : when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions .
*
2006-06-23 13:05:04 +04:00
* If you use these functions directly please don ' t forget the access_ok ( ) .
2005-06-24 09:01:26 +04:00
*/
2005-09-04 02:57:53 +04:00
static inline
2006-11-15 08:23:40 +03:00
__wsum csum_partial_copy_nocheck ( const void * src , void * dst ,
int len , __wsum sum )
2005-06-24 09:01:26 +04:00
{
2006-11-15 08:23:40 +03:00
return csum_partial_copy_generic ( src , dst , len , sum , NULL , NULL ) ;
2005-06-24 09:01:26 +04:00
}
2005-09-04 02:57:53 +04:00
static inline
2006-11-15 08:23:40 +03:00
__wsum csum_partial_copy_from_user ( const void __user * src , void * dst ,
2012-11-29 04:53:51 +04:00
int len , __wsum sum , int * err_ptr )
2005-06-24 09:01:26 +04:00
{
2006-11-15 08:23:40 +03:00
return csum_partial_copy_generic ( ( __force const void * ) src , dst ,
len , sum , err_ptr , NULL ) ;
2005-06-24 09:01:26 +04:00
}
/*
* Fold a partial checksum
*/
2006-11-15 08:23:40 +03:00
static __inline__ __sum16 csum_fold ( __wsum sum )
2005-06-24 09:01:26 +04:00
{
unsigned int __dummy ;
__asm__ ( " extui %1, %0, 16, 16 \n \t "
" extui %0 ,%0, 0, 16 \n \t "
" add %0, %0, %1 \n \t "
" slli %1, %0, 16 \n \t "
" add %0, %0, %1 \n \t "
" extui %0, %0, 16, 16 \n \t "
" neg %0, %0 \n \t "
" addi %0, %0, -1 \n \t "
" extui %0, %0, 0, 16 \n \t "
: " =r " ( sum ) , " =&r " ( __dummy )
: " 0 " ( sum ) ) ;
2006-11-15 08:23:40 +03:00
return ( __force __sum16 ) sum ;
2005-06-24 09:01:26 +04:00
}
/*
* This is a version of ip_compute_csum ( ) optimized for IP headers ,
* which always checksum on 4 octet boundaries .
*/
2006-11-15 08:23:40 +03:00
static __inline__ __sum16 ip_fast_csum ( const void * iph , unsigned int ihl )
2005-06-24 09:01:26 +04:00
{
unsigned int sum , tmp , endaddr ;
__asm__ __volatile__ (
" sub %0, %0, %0 \n \t "
# if XCHAL_HAVE_LOOPS
" loopgtz %2, 2f \n \t "
# else
" beqz %2, 2f \n \t "
" slli %4, %2, 2 \n \t "
" add %4, %4, %1 \n \t "
" 0: \t "
# endif
" l32i %3, %1, 0 \n \t "
" add %0, %0, %3 \n \t "
" bgeu %0, %3, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" addi %1, %1, 4 \n \t "
# if !XCHAL_HAVE_LOOPS
" blt %1, %4, 0b \n \t "
# endif
" 2: \t "
/* Since the input registers which are loaded with iph and ihl
are modified , we must also specify them as outputs , or gcc
will assume they contain their original values . */
2012-11-29 04:53:51 +04:00
: " =r " ( sum ) , " =r " ( iph ) , " =r " ( ihl ) , " =&r " ( tmp ) ,
" =&r " ( endaddr )
2009-04-16 11:28:09 +04:00
: " 1 " ( iph ) , " 2 " ( ihl )
: " memory " ) ;
2005-06-24 09:01:26 +04:00
return csum_fold ( sum ) ;
}
2006-11-15 08:23:40 +03:00
static __inline__ __wsum csum_tcpudp_nofold ( __be32 saddr , __be32 daddr ,
2016-03-12 01:05:34 +03:00
__u32 len , __u8 proto ,
__wsum sum )
2005-06-24 09:01:26 +04:00
{
# ifdef __XTENSA_EL__
2006-11-15 08:23:40 +03:00
unsigned long len_proto = ( len + proto ) < < 8 ;
2005-06-24 09:01:26 +04:00
# elif defined(__XTENSA_EB__)
2006-11-15 08:23:40 +03:00
unsigned long len_proto = len + proto ;
2005-06-24 09:01:26 +04:00
# else
# error processor byte order undefined!
# endif
__asm__ ( " add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" add %0, %0, %2 \n \t "
" bgeu %0, %2, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" add %0, %0, %3 \n \t "
" bgeu %0, %3, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
: " =r " ( sum ) , " =r " ( len_proto )
: " r " ( daddr ) , " r " ( saddr ) , " 1 " ( len_proto ) , " 0 " ( sum ) ) ;
return sum ;
}
/*
* computes the checksum of the TCP / UDP pseudo - header
* returns a 16 - bit checksum , already complemented
*/
2006-11-15 08:23:40 +03:00
static __inline__ __sum16 csum_tcpudp_magic ( __be32 saddr , __be32 daddr ,
2016-03-12 01:05:34 +03:00
__u32 len , __u8 proto ,
__wsum sum )
2005-06-24 09:01:26 +04:00
{
return csum_fold ( csum_tcpudp_nofold ( saddr , daddr , len , proto , sum ) ) ;
}
/*
* this routine is used for miscellaneous IP - like checksums , mainly
* in icmp . c
*/
2006-11-15 08:23:40 +03:00
static __inline__ __sum16 ip_compute_csum ( const void * buff , int len )
2005-06-24 09:01:26 +04:00
{
2012-11-29 04:53:51 +04:00
return csum_fold ( csum_partial ( buff , len , 0 ) ) ;
2005-06-24 09:01:26 +04:00
}
# define _HAVE_ARCH_IPV6_CSUM
2006-11-15 08:23:40 +03:00
static __inline__ __sum16 csum_ipv6_magic ( const struct in6_addr * saddr ,
const struct in6_addr * daddr ,
2016-03-12 01:05:41 +03:00
__u32 len , __u8 proto ,
2006-11-15 08:23:40 +03:00
__wsum sum )
2005-06-24 09:01:26 +04:00
{
unsigned int __dummy ;
__asm__ ( " l32i %1, %2, 0 \n \t "
" add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" l32i %1, %2, 4 \n \t "
" add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" l32i %1, %2, 8 \n \t "
" add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" l32i %1, %2, 12 \n \t "
" add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" l32i %1, %3, 0 \n \t "
" add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" l32i %1, %3, 4 \n \t "
" add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" l32i %1, %3, 8 \n \t "
" add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" l32i %1, %3, 12 \n \t "
" add %0, %0, %1 \n \t "
" bgeu %0, %1, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" add %0, %0, %4 \n \t "
" bgeu %0, %4, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
" add %0, %0, %5 \n \t "
" bgeu %0, %5, 1f \n \t "
" addi %0, %0, 1 \n \t "
" 1: \t "
: " =r " ( sum ) , " =&r " ( __dummy )
: " r " ( saddr ) , " r " ( daddr ) ,
2009-04-16 11:28:09 +04:00
" r " ( htonl ( len ) ) , " r " ( htonl ( proto ) ) , " 0 " ( sum )
: " memory " ) ;
2005-06-24 09:01:26 +04:00
return csum_fold ( sum ) ;
}
/*
* Copy and checksum to user
*/
# define HAVE_CSUM_COPY_USER
2012-11-29 04:53:51 +04:00
static __inline__ __wsum csum_and_copy_to_user ( const void * src ,
void __user * dst , int len ,
__wsum sum , int * err_ptr )
2005-06-24 09:01:26 +04:00
{
if ( access_ok ( VERIFY_WRITE , dst , len ) )
2012-11-29 04:53:51 +04:00
return csum_partial_copy_generic ( src , dst , len , sum , NULL , err_ptr ) ;
2005-06-24 09:01:26 +04:00
if ( len )
* err_ptr = - EFAULT ;
2006-11-15 08:23:40 +03:00
return ( __force __wsum ) - 1 ; /* invalid checksum */
2005-06-24 09:01:26 +04:00
}
# endif