2005-04-17 02:20:36 +04:00
/*
* linux / net / sunrpc / svcsock . c
*
* These are the RPC server socket internals .
*
* The server scheduling algorithm does not always distribute the load
* evenly when servicing a single client . May need to modify the
2007-12-31 06:07:57 +03:00
* svc_xprt_enqueue procedure . . .
2005-04-17 02:20:36 +04:00
*
* TCP support is largely untested and may be a little slow . The problem
* is that we currently do two separate recvfrom ' s , one for the 4 - byte
* record length , and the second for the actual record . This could possibly
* be improved by always reading a minimum size of around 100 bytes and
* tucking any superfluous bytes away in a temporary store . Still , that
* leaves write requests out in the rain . An alternative may be to peek at
* the first skb in the queue , and if it matches the next TCP sequence
* number , to extract the record marker . Yuck .
*
* Copyright ( C ) 1995 , 1996 Olaf Kirch < okir @ monad . swb . de >
*/
2007-08-29 02:50:33 +04:00
# include <linux/kernel.h>
2005-04-17 02:20:36 +04:00
# include <linux/sched.h>
# include <linux/errno.h>
# include <linux/fcntl.h>
# include <linux/net.h>
# include <linux/in.h>
# include <linux/inet.h>
# include <linux/udp.h>
2005-08-10 07:20:07 +04:00
# include <linux/tcp.h>
2005-04-17 02:20:36 +04:00
# include <linux/unistd.h>
# include <linux/slab.h>
# include <linux/netdevice.h>
# include <linux/skbuff.h>
2006-10-02 13:17:48 +04:00
# include <linux/file.h>
2006-12-07 07:34:23 +03:00
# include <linux/freezer.h>
2005-04-17 02:20:36 +04:00
# include <net/sock.h>
# include <net/checksum.h>
# include <net/ip.h>
2007-02-12 11:53:36 +03:00
# include <net/ipv6.h>
2008-04-14 20:27:01 +04:00
# include <net/tcp.h>
2005-08-10 07:08:28 +04:00
# include <net/tcp_states.h>
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
# include <asm/ioctls.h>
# include <linux/sunrpc/types.h>
2007-02-12 11:53:32 +03:00
# include <linux/sunrpc/clnt.h>
2005-04-17 02:20:36 +04:00
# include <linux/sunrpc/xdr.h>
2008-04-14 20:27:30 +04:00
# include <linux/sunrpc/msg_prot.h>
2005-04-17 02:20:36 +04:00
# include <linux/sunrpc/svcsock.h>
# include <linux/sunrpc/stats.h>
2009-09-10 18:32:28 +04:00
# include <linux/sunrpc/xprt.h>
2005-04-17 02:20:36 +04:00
2007-12-31 06:07:17 +03:00
# define RPCDBG_FACILITY RPCDBG_SVCXPRT
2005-04-17 02:20:36 +04:00
static struct svc_sock * svc_setup_socket ( struct svc_serv * , struct socket * ,
2007-02-12 11:53:28 +03:00
int * errp , int flags ) ;
2005-04-17 02:20:36 +04:00
static void svc_udp_data_ready ( struct sock * , int ) ;
static int svc_udp_recvfrom ( struct svc_rqst * ) ;
static int svc_udp_sendto ( struct svc_rqst * ) ;
2007-12-31 06:07:27 +03:00
static void svc_sock_detach ( struct svc_xprt * ) ;
2008-12-24 00:30:11 +03:00
static void svc_tcp_sock_detach ( struct svc_xprt * ) ;
2007-12-31 06:07:27 +03:00
static void svc_sock_free ( struct svc_xprt * ) ;
2005-04-17 02:20:36 +04:00
2007-12-31 06:07:42 +03:00
static struct svc_xprt * svc_create_socket ( struct svc_serv * , int ,
2010-09-29 16:04:18 +04:00
struct net * , struct sockaddr * ,
int , int ) ;
2006-12-07 07:35:24 +03:00
# ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key svc_key [ 2 ] ;
static struct lock_class_key svc_slock_key [ 2 ] ;
2007-12-31 06:08:27 +03:00
static void svc_reclassify_socket ( struct socket * sock )
2006-12-07 07:35:24 +03:00
{
struct sock * sk = sock - > sk ;
2007-09-12 12:42:12 +04:00
BUG_ON ( sock_owned_by_user ( sk ) ) ;
2006-12-07 07:35:24 +03:00
switch ( sk - > sk_family ) {
case AF_INET :
sock_lock_init_class_and_name ( sk , " slock-AF_INET-NFSD " ,
2007-12-31 06:08:08 +03:00
& svc_slock_key [ 0 ] ,
" sk_xprt.xpt_lock-AF_INET-NFSD " ,
& svc_key [ 0 ] ) ;
2006-12-07 07:35:24 +03:00
break ;
case AF_INET6 :
sock_lock_init_class_and_name ( sk , " slock-AF_INET6-NFSD " ,
2007-12-31 06:08:08 +03:00
& svc_slock_key [ 1 ] ,
" sk_xprt.xpt_lock-AF_INET6-NFSD " ,
& svc_key [ 1 ] ) ;
2006-12-07 07:35:24 +03:00
break ;
default :
BUG ( ) ;
}
}
# else
2007-12-31 06:08:27 +03:00
static void svc_reclassify_socket ( struct socket * sock )
2006-12-07 07:35:24 +03:00
{
}
# endif
2005-04-17 02:20:36 +04:00
/*
* Release an skbuff after use
*/
2007-12-31 06:07:25 +03:00
static void svc_release_skb ( struct svc_rqst * rqstp )
2005-04-17 02:20:36 +04:00
{
2007-12-31 06:07:25 +03:00
struct sk_buff * skb = rqstp - > rq_xprt_ctxt ;
2005-04-17 02:20:36 +04:00
if ( skb ) {
2007-12-31 06:08:22 +03:00
struct svc_sock * svsk =
container_of ( rqstp - > rq_xprt , struct svc_sock , sk_xprt ) ;
2007-12-31 06:07:25 +03:00
rqstp - > rq_xprt_ctxt = NULL ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: service %p, releasing skb %p \n " , rqstp , skb ) ;
2009-10-30 08:03:53 +03:00
skb_free_datagram_locked ( svsk - > sk_sk , skb ) ;
2005-04-17 02:20:36 +04:00
}
}
2007-02-12 11:53:36 +03:00
union svc_pktinfo_u {
struct in_pktinfo pkti ;
struct in6_pktinfo pkti6 ;
} ;
2007-04-13 00:35:59 +04:00
# define SVC_PKTINFO_SPACE \
CMSG_SPACE ( sizeof ( union svc_pktinfo_u ) )
2007-02-12 11:53:36 +03:00
static void svc_set_cmsg_data ( struct svc_rqst * rqstp , struct cmsghdr * cmh )
{
2007-12-31 06:08:22 +03:00
struct svc_sock * svsk =
container_of ( rqstp - > rq_xprt , struct svc_sock , sk_xprt ) ;
switch ( svsk - > sk_sk - > sk_family ) {
2007-02-12 11:53:36 +03:00
case AF_INET : {
struct in_pktinfo * pki = CMSG_DATA ( cmh ) ;
cmh - > cmsg_level = SOL_IP ;
cmh - > cmsg_type = IP_PKTINFO ;
pki - > ipi_ifindex = 0 ;
pki - > ipi_spec_dst . s_addr = rqstp - > rq_daddr . addr . s_addr ;
cmh - > cmsg_len = CMSG_LEN ( sizeof ( * pki ) ) ;
}
break ;
2007-03-06 12:42:22 +03:00
2007-02-12 11:53:36 +03:00
case AF_INET6 : {
struct in6_pktinfo * pki = CMSG_DATA ( cmh ) ;
cmh - > cmsg_level = SOL_IPV6 ;
cmh - > cmsg_type = IPV6_PKTINFO ;
pki - > ipi6_ifindex = 0 ;
ipv6_addr_copy ( & pki - > ipi6_addr ,
& rqstp - > rq_daddr . addr6 ) ;
cmh - > cmsg_len = CMSG_LEN ( sizeof ( * pki ) ) ;
}
break ;
}
}
2005-04-17 02:20:36 +04:00
/*
2009-09-10 18:32:28 +04:00
* send routine intended to be shared by the fore - and back - channel
2005-04-17 02:20:36 +04:00
*/
2009-09-10 18:32:28 +04:00
int svc_send_common ( struct socket * sock , struct xdr_buf * xdr ,
struct page * headpage , unsigned long headoffset ,
struct page * tailpage , unsigned long tailoffset )
2005-04-17 02:20:36 +04:00
{
int result ;
int size ;
struct page * * ppage = xdr - > pages ;
size_t base = xdr - > page_base ;
unsigned int pglen = xdr - > page_len ;
unsigned int flags = MSG_MORE ;
2009-09-10 18:32:28 +04:00
int slen ;
int len = 0 ;
2005-04-17 02:20:36 +04:00
slen = xdr - > len ;
/* send head */
if ( slen = = xdr - > head [ 0 ] . iov_len )
flags = 0 ;
2009-09-10 18:32:28 +04:00
len = kernel_sendpage ( sock , headpage , headoffset ,
2006-10-04 13:15:46 +04:00
xdr - > head [ 0 ] . iov_len , flags ) ;
2005-04-17 02:20:36 +04:00
if ( len ! = xdr - > head [ 0 ] . iov_len )
goto out ;
slen - = xdr - > head [ 0 ] . iov_len ;
if ( slen = = 0 )
goto out ;
/* send page data */
size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen ;
while ( pglen > 0 ) {
if ( slen = = size )
flags = 0 ;
2006-08-08 07:58:01 +04:00
result = kernel_sendpage ( sock , * ppage , base , size , flags ) ;
2005-04-17 02:20:36 +04:00
if ( result > 0 )
len + = result ;
if ( result ! = size )
goto out ;
slen - = size ;
pglen - = size ;
size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen ;
base = 0 ;
ppage + + ;
}
2009-09-10 18:32:28 +04:00
2005-04-17 02:20:36 +04:00
/* send tail */
if ( xdr - > tail [ 0 ] . iov_len ) {
2009-09-10 18:32:28 +04:00
result = kernel_sendpage ( sock , tailpage , tailoffset ,
xdr - > tail [ 0 ] . iov_len , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( result > 0 )
len + = result ;
}
2009-09-10 18:32:28 +04:00
out :
return len ;
}
/*
* Generic sendto routine
*/
static int svc_sendto ( struct svc_rqst * rqstp , struct xdr_buf * xdr )
{
struct svc_sock * svsk =
container_of ( rqstp - > rq_xprt , struct svc_sock , sk_xprt ) ;
struct socket * sock = svsk - > sk_sock ;
union {
struct cmsghdr hdr ;
long all [ SVC_PKTINFO_SPACE / sizeof ( long ) ] ;
} buffer ;
struct cmsghdr * cmh = & buffer . hdr ;
int len = 0 ;
unsigned long tailoff ;
unsigned long headoff ;
RPC_IFDEBUG ( char buf [ RPC_MAX_ADDRBUFLEN ] ) ;
if ( rqstp - > rq_prot = = IPPROTO_UDP ) {
struct msghdr msg = {
. msg_name = & rqstp - > rq_addr ,
. msg_namelen = rqstp - > rq_addrlen ,
. msg_control = cmh ,
. msg_controllen = sizeof ( buffer ) ,
. msg_flags = MSG_MORE ,
} ;
svc_set_cmsg_data ( rqstp , cmh ) ;
if ( sock_sendmsg ( sock , & msg , 0 ) < 0 )
goto out ;
}
tailoff = ( ( unsigned long ) xdr - > tail [ 0 ] . iov_base ) & ( PAGE_SIZE - 1 ) ;
headoff = 0 ;
len = svc_send_common ( sock , xdr , rqstp - > rq_respages [ 0 ] , headoff ,
rqstp - > rq_respages [ 0 ] , tailoff ) ;
2005-04-17 02:20:36 +04:00
out :
2007-02-12 11:53:32 +03:00
dprintk ( " svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s) \n " ,
2007-12-31 06:08:22 +03:00
svsk , xdr - > head [ 0 ] . iov_base , xdr - > head [ 0 ] . iov_len ,
2007-02-12 11:53:32 +03:00
xdr - > len , len , svc_print_addr ( rqstp , buf , sizeof ( buf ) ) ) ;
2005-04-17 02:20:36 +04:00
return len ;
}
2006-10-02 13:17:47 +04:00
/*
* Report socket names for nfsdfs
*/
2009-04-24 03:32:48 +04:00
static int svc_one_sock_name ( struct svc_sock * svsk , char * buf , int remaining )
2006-10-02 13:17:47 +04:00
{
2009-04-24 03:33:03 +04:00
const struct sock * sk = svsk - > sk_sk ;
const char * proto_name = sk - > sk_protocol = = IPPROTO_UDP ?
" udp " : " tcp " ;
2006-10-02 13:17:47 +04:00
int len ;
2009-04-24 03:33:03 +04:00
switch ( sk - > sk_family ) {
2009-04-24 03:32:48 +04:00
case PF_INET :
len = snprintf ( buf , remaining , " ipv4 %s %pI4 %d \n " ,
2009-04-24 03:33:03 +04:00
proto_name ,
2009-10-15 10:30:45 +04:00
& inet_sk ( sk ) - > inet_rcv_saddr ,
inet_sk ( sk ) - > inet_num ) ;
2006-10-02 13:17:47 +04:00
break ;
2009-04-24 03:32:55 +04:00
case PF_INET6 :
len = snprintf ( buf , remaining , " ipv6 %s %pI6 %d \n " ,
2009-04-24 03:33:03 +04:00
proto_name ,
& inet6_sk ( sk ) - > rcv_saddr ,
2009-10-15 10:30:45 +04:00
inet_sk ( sk ) - > inet_num ) ;
2006-10-02 13:17:47 +04:00
break ;
default :
2009-04-24 03:32:48 +04:00
len = snprintf ( buf , remaining , " *unknown-%d* \n " ,
2009-04-24 03:33:03 +04:00
sk - > sk_family ) ;
2006-10-02 13:17:47 +04:00
}
2009-04-24 03:32:48 +04:00
if ( len > = remaining ) {
* buf = ' \0 ' ;
return - ENAMETOOLONG ;
2006-10-02 13:17:47 +04:00
}
return len ;
}
2009-04-24 03:32:40 +04:00
/**
* svc_sock_names - construct a list of listener names in a string
* @ serv : pointer to RPC service
* @ buf : pointer to a buffer to fill in with socket names
* @ buflen : size of the buffer to be filled
* @ toclose : pointer to ' \0 ' - terminated C string containing the name
* of a listener to be closed
*
* Fills in @ buf with a ' \n ' - separated list of names of listener
* sockets . If @ toclose is not NULL , the socket named by @ toclose
* is closed , and is not included in the output list .
*
* Returns positive length of the socket name string , or a negative
* errno value on error .
*/
int svc_sock_names ( struct svc_serv * serv , char * buf , const size_t buflen ,
const char * toclose )
2006-10-02 13:17:47 +04:00
{
2006-10-02 13:17:48 +04:00
struct svc_sock * svsk , * closesk = NULL ;
2006-10-02 13:17:47 +04:00
int len = 0 ;
if ( ! serv )
return 0 ;
2009-04-24 03:32:48 +04:00
2007-02-09 01:20:30 +03:00
spin_lock_bh ( & serv - > sv_lock ) ;
2007-12-31 06:07:53 +03:00
list_for_each_entry ( svsk , & serv - > sv_permsocks , sk_xprt . xpt_list ) {
2009-04-24 03:32:48 +04:00
int onelen = svc_one_sock_name ( svsk , buf + len , buflen - len ) ;
if ( onelen < 0 ) {
len = onelen ;
break ;
}
if ( toclose & & strcmp ( toclose , buf + len ) = = 0 )
2006-10-02 13:17:48 +04:00
closesk = svsk ;
else
len + = onelen ;
2006-10-02 13:17:47 +04:00
}
2007-02-09 01:20:30 +03:00
spin_unlock_bh ( & serv - > sv_lock ) ;
2009-04-24 03:32:48 +04:00
2006-10-02 13:17:48 +04:00
if ( closesk )
2006-10-04 13:15:45 +04:00
/* Should unregister with portmap, but you cannot
* unregister just one protocol . . .
*/
2007-12-31 06:07:53 +03:00
svc_close_xprt ( & closesk - > sk_xprt ) ;
2006-10-04 13:15:44 +04:00
else if ( toclose )
return - ENOENT ;
2006-10-02 13:17:47 +04:00
return len ;
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_sock_names ) ;
2006-10-02 13:17:47 +04:00
2005-04-17 02:20:36 +04:00
/*
* Check input queue length
*/
2007-12-31 06:08:27 +03:00
static int svc_recv_available ( struct svc_sock * svsk )
2005-04-17 02:20:36 +04:00
{
struct socket * sock = svsk - > sk_sock ;
int avail , err ;
2006-08-08 07:58:01 +04:00
err = kernel_sock_ioctl ( sock , TIOCINQ , ( unsigned long ) & avail ) ;
2005-04-17 02:20:36 +04:00
return ( err > = 0 ) ? avail : err ;
}
/*
* Generic recvfrom routine .
*/
2007-12-31 06:08:27 +03:00
static int svc_recvfrom ( struct svc_rqst * rqstp , struct kvec * iov , int nr ,
int buflen )
2005-04-17 02:20:36 +04:00
{
2007-12-31 06:08:22 +03:00
struct svc_sock * svsk =
container_of ( rqstp - > rq_xprt , struct svc_sock , sk_xprt ) ;
2007-02-12 11:53:31 +03:00
struct msghdr msg = {
. msg_flags = MSG_DONTWAIT ,
} ;
int len ;
2005-04-17 02:20:36 +04:00
2007-12-31 06:08:29 +03:00
rqstp - > rq_xprt_hlen = 0 ;
2007-02-12 11:53:31 +03:00
len = kernel_recvmsg ( svsk - > sk_sock , & msg , iov , nr , buflen ,
msg . msg_flags ) ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: socket %p recvfrom(%p, %Zu) = %d \n " ,
2007-02-12 11:53:31 +03:00
svsk , iov [ 0 ] . iov_base , iov [ 0 ] . iov_len , len ) ;
2005-04-17 02:20:36 +04:00
return len ;
}
/*
* Set socket snd and rcv buffer lengths
*/
2007-12-31 06:08:27 +03:00
static void svc_sock_setbufsize ( struct socket * sock , unsigned int snd ,
unsigned int rcv )
2005-04-17 02:20:36 +04:00
{
#if 0
mm_segment_t oldfs ;
oldfs = get_fs ( ) ; set_fs ( KERNEL_DS ) ;
sock_setsockopt ( sock , SOL_SOCKET , SO_SNDBUF ,
( char * ) & snd , sizeof ( snd ) ) ;
sock_setsockopt ( sock , SOL_SOCKET , SO_RCVBUF ,
( char * ) & rcv , sizeof ( rcv ) ) ;
# else
/* sock_setsockopt limits use to sysctl_?mem_max,
* which isn ' t acceptable . Until that is made conditional
* on not having CAP_SYS_RESOURCE or similar , we go direct . . .
* DaveM said I could !
*/
lock_sock ( sock - > sk ) ;
sock - > sk - > sk_sndbuf = snd * 2 ;
sock - > sk - > sk_rcvbuf = rcv * 2 ;
2009-05-28 02:51:06 +04:00
sock - > sk - > sk_userlocks | = SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK ;
2009-05-19 01:47:56 +04:00
sock - > sk - > sk_write_space ( sock - > sk ) ;
2005-04-17 02:20:36 +04:00
release_sock ( sock - > sk ) ;
# endif
}
/*
* INET callback when data has been received on the socket .
*/
2007-12-31 06:08:27 +03:00
static void svc_udp_data_ready ( struct sock * sk , int count )
2005-04-17 02:20:36 +04:00
{
2005-09-13 12:25:39 +04:00
struct svc_sock * svsk = ( struct svc_sock * ) sk - > sk_user_data ;
2005-04-17 02:20:36 +04:00
2005-09-13 12:25:39 +04:00
if ( svsk ) {
dprintk ( " svc: socket %p(inet %p), count=%d, busy=%d \n " ,
2007-12-31 06:07:48 +03:00
svsk , sk , count ,
test_bit ( XPT_BUSY , & svsk - > sk_xprt . xpt_flags ) ) ;
set_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ;
2007-12-31 06:07:57 +03:00
svc_xprt_enqueue ( & svsk - > sk_xprt ) ;
2005-09-13 12:25:39 +04:00
}
2010-04-20 17:03:51 +04:00
if ( sk_sleep ( sk ) & & waitqueue_active ( sk_sleep ( sk ) ) )
wake_up_interruptible ( sk_sleep ( sk ) ) ;
2005-04-17 02:20:36 +04:00
}
/*
* INET callback when space is newly available on the socket .
*/
2007-12-31 06:08:27 +03:00
static void svc_write_space ( struct sock * sk )
2005-04-17 02:20:36 +04:00
{
struct svc_sock * svsk = ( struct svc_sock * ) ( sk - > sk_user_data ) ;
if ( svsk ) {
dprintk ( " svc: socket %p(inet %p), write_space busy=%d \n " ,
2007-12-31 06:07:48 +03:00
svsk , sk , test_bit ( XPT_BUSY , & svsk - > sk_xprt . xpt_flags ) ) ;
2007-12-31 06:07:57 +03:00
svc_xprt_enqueue ( & svsk - > sk_xprt ) ;
2005-04-17 02:20:36 +04:00
}
2010-04-20 17:03:51 +04:00
if ( sk_sleep ( sk ) & & waitqueue_active ( sk_sleep ( sk ) ) ) {
2005-09-13 12:25:39 +04:00
dprintk ( " RPC svc_write_space: someone sleeping on %p \n " ,
2005-04-17 02:20:36 +04:00
svsk ) ;
2010-04-20 17:03:51 +04:00
wake_up_interruptible ( sk_sleep ( sk ) ) ;
2005-04-17 02:20:36 +04:00
}
}
2009-05-19 01:47:56 +04:00
static void svc_tcp_write_space ( struct sock * sk )
{
struct socket * sock = sk - > sk_socket ;
if ( sk_stream_wspace ( sk ) > = sk_stream_min_wspace ( sk ) & & sock )
clear_bit ( SOCK_NOSPACE , & sock - > flags ) ;
svc_write_space ( sk ) ;
}
2009-07-13 18:54:26 +04:00
/*
* See net / ipv6 / ip_sockglue . c : ip_cmsg_recv_pktinfo
*/
static int svc_udp_get_dest_address4 ( struct svc_rqst * rqstp ,
struct cmsghdr * cmh )
{
struct in_pktinfo * pki = CMSG_DATA ( cmh ) ;
if ( cmh - > cmsg_type ! = IP_PKTINFO )
return 0 ;
rqstp - > rq_daddr . addr . s_addr = pki - > ipi_spec_dst . s_addr ;
return 1 ;
}
/*
* See net / ipv6 / datagram . c : datagram_recv_ctl
*/
static int svc_udp_get_dest_address6 ( struct svc_rqst * rqstp ,
struct cmsghdr * cmh )
{
struct in6_pktinfo * pki = CMSG_DATA ( cmh ) ;
if ( cmh - > cmsg_type ! = IPV6_PKTINFO )
return 0 ;
ipv6_addr_copy ( & rqstp - > rq_daddr . addr6 , & pki - > ipi6_addr ) ;
return 1 ;
}
2007-12-31 06:08:12 +03:00
/*
* Copy the UDP datagram ' s destination address to the rqstp structure .
* The ' destination ' address in this case is the address to which the
* peer sent the datagram , i . e . our local address . For multihomed
* hosts , this can change from msg to msg . Note that only the IP
* address changes , the port number should remain the same .
*/
2009-07-13 18:54:26 +04:00
static int svc_udp_get_dest_address ( struct svc_rqst * rqstp ,
struct cmsghdr * cmh )
2007-02-12 11:53:38 +03:00
{
2009-07-13 18:54:26 +04:00
switch ( cmh - > cmsg_level ) {
case SOL_IP :
return svc_udp_get_dest_address4 ( rqstp , cmh ) ;
case SOL_IPV6 :
return svc_udp_get_dest_address6 ( rqstp , cmh ) ;
2007-02-12 11:53:38 +03:00
}
2009-07-13 18:54:26 +04:00
return 0 ;
2007-02-12 11:53:38 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Receive a datagram from a UDP socket .
*/
2007-12-31 06:08:27 +03:00
static int svc_udp_recvfrom ( struct svc_rqst * rqstp )
2005-04-17 02:20:36 +04:00
{
2007-12-31 06:08:22 +03:00
struct svc_sock * svsk =
container_of ( rqstp - > rq_xprt , struct svc_sock , sk_xprt ) ;
2007-12-31 06:07:50 +03:00
struct svc_serv * serv = svsk - > sk_xprt . xpt_server ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
2007-04-13 00:35:59 +04:00
union {
struct cmsghdr hdr ;
long all [ SVC_PKTINFO_SPACE / sizeof ( long ) ] ;
} buffer ;
struct cmsghdr * cmh = & buffer . hdr ;
2007-03-06 12:42:21 +03:00
struct msghdr msg = {
. msg_name = svc_addr ( rqstp ) ,
. msg_control = cmh ,
. msg_controllen = sizeof ( buffer ) ,
. msg_flags = MSG_DONTWAIT ,
} ;
2009-04-24 03:31:25 +04:00
size_t len ;
int err ;
2005-04-17 02:20:36 +04:00
2007-12-31 06:07:48 +03:00
if ( test_and_clear_bit ( XPT_CHNGBUF , & svsk - > sk_xprt . xpt_flags ) )
2005-04-17 02:20:36 +04:00
/* udp sockets need large rcvbuf as all pending
* requests are still in that buffer . sndbuf must
* also be large enough that there is enough space
2006-10-02 13:17:58 +04:00
* for one reply per thread . We count all threads
* rather than threads in a particular pool , which
* provides an upper bound on the number of threads
* which will access the socket .
2005-04-17 02:20:36 +04:00
*/
svc_sock_setbufsize ( svsk - > sk_sock ,
2006-10-06 11:44:05 +04:00
( serv - > sv_nrthreads + 3 ) * serv - > sv_max_mesg ,
( serv - > sv_nrthreads + 3 ) * serv - > sv_max_mesg ) ;
2005-04-17 02:20:36 +04:00
2007-12-31 06:07:48 +03:00
clear_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ;
2007-05-09 13:34:55 +04:00
skb = NULL ;
err = kernel_recvmsg ( svsk - > sk_sock , & msg , NULL ,
0 , 0 , MSG_PEEK | MSG_DONTWAIT ) ;
if ( err > = 0 )
skb = skb_recv_datagram ( svsk - > sk_sk , 0 , 1 , & err ) ;
if ( skb = = NULL ) {
if ( err ! = - EAGAIN ) {
/* possibly an icmp error */
dprintk ( " svc: recvfrom returned error %d \n " , - err ) ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
}
2007-05-09 13:34:55 +04:00
return - EAGAIN ;
2005-04-17 02:20:36 +04:00
}
2007-12-31 06:08:12 +03:00
len = svc_addr_len ( svc_addr ( rqstp ) ) ;
2009-04-24 03:31:25 +04:00
if ( len = = 0 )
return - EAFNOSUPPORT ;
2007-12-31 06:08:12 +03:00
rqstp - > rq_addrlen = len ;
2007-04-20 03:16:32 +04:00
if ( skb - > tstamp . tv64 = = 0 ) {
skb - > tstamp = ktime_get_real ( ) ;
2007-02-10 02:38:13 +03:00
/* Don't enable netstamp, sunrpc doesn't
2005-04-17 02:20:36 +04:00
need that much accuracy */
}
2007-04-20 03:16:32 +04:00
svsk - > sk_sk - > sk_stamp = skb - > tstamp ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ; /* there may be more data... */
2005-04-17 02:20:36 +04:00
len = skb - > len - sizeof ( struct udphdr ) ;
rqstp - > rq_arg . len = len ;
2007-02-12 11:53:38 +03:00
rqstp - > rq_prot = IPPROTO_UDP ;
2007-02-12 11:53:34 +03:00
2009-07-13 18:54:26 +04:00
if ( ! svc_udp_get_dest_address ( rqstp , cmh ) ) {
2007-03-06 12:42:21 +03:00
if ( net_ratelimit ( ) )
2009-07-13 18:54:26 +04:00
printk ( KERN_WARNING
" svc: received unknown control message %d/%d; "
" dropping RPC reply datagram \n " ,
cmh - > cmsg_level , cmh - > cmsg_type ) ;
2009-10-30 08:03:53 +03:00
skb_free_datagram_locked ( svsk - > sk_sk , skb ) ;
2007-03-06 12:42:21 +03:00
return 0 ;
}
2005-04-17 02:20:36 +04:00
if ( skb_is_nonlinear ( skb ) ) {
/* we have to copy */
local_bh_disable ( ) ;
if ( csum_partial_copy_to_xdr ( & rqstp - > rq_arg , skb ) ) {
local_bh_enable ( ) ;
/* checksum error */
2009-10-30 08:03:53 +03:00
skb_free_datagram_locked ( svsk - > sk_sk , skb ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
local_bh_enable ( ) ;
2009-10-30 08:03:53 +03:00
skb_free_datagram_locked ( svsk - > sk_sk , skb ) ;
2005-04-17 02:20:36 +04:00
} else {
/* we can use it in-place */
2007-12-31 06:08:27 +03:00
rqstp - > rq_arg . head [ 0 ] . iov_base = skb - > data +
sizeof ( struct udphdr ) ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_arg . head [ 0 ] . iov_len = len ;
2005-11-11 00:01:24 +03:00
if ( skb_checksum_complete ( skb ) ) {
2009-10-30 08:03:53 +03:00
skb_free_datagram_locked ( svsk - > sk_sk , skb ) ;
2005-11-11 00:01:24 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2007-12-31 06:07:25 +03:00
rqstp - > rq_xprt_ctxt = skb ;
2005-04-17 02:20:36 +04:00
}
rqstp - > rq_arg . page_base = 0 ;
if ( len < = rqstp - > rq_arg . head [ 0 ] . iov_len ) {
rqstp - > rq_arg . head [ 0 ] . iov_len = len ;
rqstp - > rq_arg . page_len = 0 ;
2006-10-04 13:15:46 +04:00
rqstp - > rq_respages = rqstp - > rq_pages + 1 ;
2005-04-17 02:20:36 +04:00
} else {
rqstp - > rq_arg . page_len = len - rqstp - > rq_arg . head [ 0 ] . iov_len ;
2006-10-04 13:15:46 +04:00
rqstp - > rq_respages = rqstp - > rq_pages + 1 +
2007-08-29 02:50:33 +04:00
DIV_ROUND_UP ( rqstp - > rq_arg . page_len , PAGE_SIZE ) ;
2005-04-17 02:20:36 +04:00
}
if ( serv - > sv_stats )
serv - > sv_stats - > netudpcnt + + ;
return len ;
}
static int
svc_udp_sendto ( struct svc_rqst * rqstp )
{
int error ;
error = svc_sendto ( rqstp , & rqstp - > rq_res ) ;
if ( error = = - ECONNREFUSED )
/* ICMP error on earlier request. */
error = svc_sendto ( rqstp , & rqstp - > rq_res ) ;
return error ;
}
2007-12-31 06:07:29 +03:00
static void svc_udp_prep_reply_hdr ( struct svc_rqst * rqstp )
{
}
2007-12-31 06:07:31 +03:00
static int svc_udp_has_wspace ( struct svc_xprt * xprt )
{
struct svc_sock * svsk = container_of ( xprt , struct svc_sock , sk_xprt ) ;
2007-12-31 06:07:50 +03:00
struct svc_serv * serv = xprt - > xpt_server ;
2007-12-31 06:07:31 +03:00
unsigned long required ;
/*
* Set the SOCK_NOSPACE flag before checking the available
* sock space .
*/
set_bit ( SOCK_NOSPACE , & svsk - > sk_sock - > flags ) ;
2007-12-31 06:07:55 +03:00
required = atomic_read ( & svsk - > sk_xprt . xpt_reserved ) + serv - > sv_max_mesg ;
2007-12-31 06:07:31 +03:00
if ( required * 2 > sock_wspace ( svsk - > sk_sk ) )
return 0 ;
clear_bit ( SOCK_NOSPACE , & svsk - > sk_sock - > flags ) ;
return 1 ;
}
2007-12-31 06:07:36 +03:00
static struct svc_xprt * svc_udp_accept ( struct svc_xprt * xprt )
{
BUG ( ) ;
return NULL ;
}
2007-12-31 06:07:42 +03:00
static struct svc_xprt * svc_udp_create ( struct svc_serv * serv ,
2010-09-29 16:04:18 +04:00
struct net * net ,
2007-12-31 06:07:42 +03:00
struct sockaddr * sa , int salen ,
int flags )
{
2010-09-29 16:04:18 +04:00
return svc_create_socket ( serv , IPPROTO_UDP , net , sa , salen , flags ) ;
2007-12-31 06:07:42 +03:00
}
2007-12-31 06:07:17 +03:00
static struct svc_xprt_ops svc_udp_ops = {
2007-12-31 06:07:42 +03:00
. xpo_create = svc_udp_create ,
2007-12-31 06:07:23 +03:00
. xpo_recvfrom = svc_udp_recvfrom ,
. xpo_sendto = svc_udp_sendto ,
2007-12-31 06:07:25 +03:00
. xpo_release_rqst = svc_release_skb ,
2007-12-31 06:07:27 +03:00
. xpo_detach = svc_sock_detach ,
. xpo_free = svc_sock_free ,
2007-12-31 06:07:29 +03:00
. xpo_prep_reply_hdr = svc_udp_prep_reply_hdr ,
2007-12-31 06:07:31 +03:00
. xpo_has_wspace = svc_udp_has_wspace ,
2007-12-31 06:07:36 +03:00
. xpo_accept = svc_udp_accept ,
2007-12-31 06:07:17 +03:00
} ;
static struct svc_xprt_class svc_udp_class = {
. xcl_name = " udp " ,
2007-12-31 06:07:42 +03:00
. xcl_owner = THIS_MODULE ,
2007-12-31 06:07:17 +03:00
. xcl_ops = & svc_udp_ops ,
2007-12-31 06:07:21 +03:00
. xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP ,
2007-12-31 06:07:17 +03:00
} ;
2007-12-31 06:07:50 +03:00
static void svc_udp_init ( struct svc_sock * svsk , struct svc_serv * serv )
2005-04-17 02:20:36 +04:00
{
2009-07-13 18:54:26 +04:00
int err , level , optname , one = 1 ;
2007-03-06 12:42:21 +03:00
2007-12-31 06:07:50 +03:00
svc_xprt_init ( & svc_udp_class , & svsk - > sk_xprt , serv ) ;
2007-12-31 06:08:08 +03:00
clear_bit ( XPT_CACHE_AUTH , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
svsk - > sk_sk - > sk_data_ready = svc_udp_data_ready ;
svsk - > sk_sk - > sk_write_space = svc_write_space ;
/* initialise setting must have enough space to
2007-02-10 02:38:13 +03:00
* receive and respond to one request .
2005-04-17 02:20:36 +04:00
* svc_udp_recvfrom will re - adjust if necessary
*/
svc_sock_setbufsize ( svsk - > sk_sock ,
2007-12-31 06:07:50 +03:00
3 * svsk - > sk_xprt . xpt_server - > sv_max_mesg ,
3 * svsk - > sk_xprt . xpt_server - > sv_max_mesg ) ;
2005-04-17 02:20:36 +04:00
2007-12-31 06:08:27 +03:00
/* data might have come in before data_ready set up */
set_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_CHNGBUF , & svsk - > sk_xprt . xpt_flags ) ;
2007-03-06 12:42:21 +03:00
/* make sure we get destination address info */
2009-07-13 18:54:26 +04:00
switch ( svsk - > sk_sk - > sk_family ) {
case AF_INET :
level = SOL_IP ;
optname = IP_PKTINFO ;
break ;
case AF_INET6 :
level = SOL_IPV6 ;
optname = IPV6_RECVPKTINFO ;
break ;
default :
BUG ( ) ;
}
err = kernel_setsockopt ( svsk - > sk_sock , level , optname ,
( char * ) & one , sizeof ( one ) ) ;
dprintk ( " svc: kernel_setsockopt returned %d \n " , err ) ;
2005-04-17 02:20:36 +04:00
}
/*
* A data_ready event on a listening socket means there ' s a connection
* pending . Do not use state_change as a substitute for it .
*/
2007-12-31 06:08:27 +03:00
static void svc_tcp_listen_data_ready ( struct sock * sk , int count_unused )
2005-04-17 02:20:36 +04:00
{
2005-09-13 12:25:39 +04:00
struct svc_sock * svsk = ( struct svc_sock * ) sk - > sk_user_data ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: socket %p TCP (listen) state change %d \n " ,
2005-09-13 12:25:39 +04:00
sk , sk - > sk_state ) ;
2005-04-17 02:20:36 +04:00
2005-09-13 12:25:39 +04:00
/*
* This callback may called twice when a new connection
* is established as a child socket inherits everything
* from a parent LISTEN socket .
* 1 ) data_ready method of the parent socket will be called
* when one of child sockets become ESTABLISHED .
* 2 ) data_ready method of the child socket may be called
* when it receives data before the socket is accepted .
* In case of 2 , we should ignore it silently .
*/
if ( sk - > sk_state = = TCP_LISTEN ) {
if ( svsk ) {
2007-12-31 06:07:48 +03:00
set_bit ( XPT_CONN , & svsk - > sk_xprt . xpt_flags ) ;
2007-12-31 06:07:57 +03:00
svc_xprt_enqueue ( & svsk - > sk_xprt ) ;
2005-09-13 12:25:39 +04:00
} else
printk ( " svc: socket %p: no user data \n " , sk ) ;
2005-04-17 02:20:36 +04:00
}
2005-09-13 12:25:39 +04:00
2010-04-20 17:03:51 +04:00
if ( sk_sleep ( sk ) & & waitqueue_active ( sk_sleep ( sk ) ) )
wake_up_interruptible_all ( sk_sleep ( sk ) ) ;
2005-04-17 02:20:36 +04:00
}
/*
* A state change on a connected socket means it ' s dying or dead .
*/
2007-12-31 06:08:27 +03:00
static void svc_tcp_state_change ( struct sock * sk )
2005-04-17 02:20:36 +04:00
{
2005-09-13 12:25:39 +04:00
struct svc_sock * svsk = ( struct svc_sock * ) sk - > sk_user_data ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: socket %p TCP (connected) state change %d (svsk %p) \n " ,
2005-09-13 12:25:39 +04:00
sk , sk - > sk_state , sk - > sk_user_data ) ;
2005-04-17 02:20:36 +04:00
2005-09-13 12:25:39 +04:00
if ( ! svsk )
2005-04-17 02:20:36 +04:00
printk ( " svc: socket %p: no user data \n " , sk ) ;
2005-09-13 12:25:39 +04:00
else {
2007-12-31 06:07:48 +03:00
set_bit ( XPT_CLOSE , & svsk - > sk_xprt . xpt_flags ) ;
2007-12-31 06:07:57 +03:00
svc_xprt_enqueue ( & svsk - > sk_xprt ) ;
2005-04-17 02:20:36 +04:00
}
2010-04-20 17:03:51 +04:00
if ( sk_sleep ( sk ) & & waitqueue_active ( sk_sleep ( sk ) ) )
wake_up_interruptible_all ( sk_sleep ( sk ) ) ;
2005-04-17 02:20:36 +04:00
}
2007-12-31 06:08:27 +03:00
static void svc_tcp_data_ready ( struct sock * sk , int count )
2005-04-17 02:20:36 +04:00
{
2005-09-13 12:25:39 +04:00
struct svc_sock * svsk = ( struct svc_sock * ) sk - > sk_user_data ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: socket %p TCP data ready (svsk %p) \n " ,
2005-09-13 12:25:39 +04:00
sk , sk - > sk_user_data ) ;
if ( svsk ) {
2007-12-31 06:07:48 +03:00
set_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ;
2007-12-31 06:07:57 +03:00
svc_xprt_enqueue ( & svsk - > sk_xprt ) ;
2005-09-13 12:25:39 +04:00
}
2010-04-20 17:03:51 +04:00
if ( sk_sleep ( sk ) & & waitqueue_active ( sk_sleep ( sk ) ) )
wake_up_interruptible ( sk_sleep ( sk ) ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Accept a TCP connection
*/
2007-12-31 06:07:36 +03:00
static struct svc_xprt * svc_tcp_accept ( struct svc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
2007-12-31 06:07:36 +03:00
struct svc_sock * svsk = container_of ( xprt , struct svc_sock , sk_xprt ) ;
2007-02-12 11:53:38 +03:00
struct sockaddr_storage addr ;
struct sockaddr * sin = ( struct sockaddr * ) & addr ;
2007-12-31 06:07:50 +03:00
struct svc_serv * serv = svsk - > sk_xprt . xpt_server ;
2005-04-17 02:20:36 +04:00
struct socket * sock = svsk - > sk_sock ;
struct socket * newsock ;
struct svc_sock * newsvsk ;
int err , slen ;
2008-02-21 10:57:45 +03:00
RPC_IFDEBUG ( char buf [ RPC_MAX_ADDRBUFLEN ] ) ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: tcp_accept %p sock %p \n " , svsk , sock ) ;
if ( ! sock )
2007-12-31 06:07:36 +03:00
return NULL ;
2005-04-17 02:20:36 +04:00
2007-12-31 06:07:48 +03:00
clear_bit ( XPT_CONN , & svsk - > sk_xprt . xpt_flags ) ;
2006-08-08 07:58:01 +04:00
err = kernel_accept ( sock , & newsock , O_NONBLOCK ) ;
if ( err < 0 ) {
2005-04-17 02:20:36 +04:00
if ( err = = - ENOMEM )
printk ( KERN_WARNING " %s: no more sockets! \n " ,
serv - > sv_name ) ;
2006-08-08 07:58:01 +04:00
else if ( err ! = - EAGAIN & & net_ratelimit ( ) )
2005-04-17 02:20:36 +04:00
printk ( KERN_WARNING " %s: accept failed (err %d)! \n " ,
serv - > sv_name , - err ) ;
2007-12-31 06:07:36 +03:00
return NULL ;
2005-04-17 02:20:36 +04:00
}
2007-12-31 06:07:48 +03:00
set_bit ( XPT_CONN , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
2007-02-12 11:53:38 +03:00
err = kernel_getpeername ( newsock , sin , & slen ) ;
2005-04-17 02:20:36 +04:00
if ( err < 0 ) {
if ( net_ratelimit ( ) )
printk ( KERN_WARNING " %s: peername failed (err %d)! \n " ,
serv - > sv_name , - err ) ;
goto failed ; /* aborted connection or whatever */
}
/* Ideally, we would want to reject connections from unauthorized
2007-02-12 11:53:32 +03:00
* hosts here , but when we get encryption , the IP of the host won ' t
* tell us anything . For now just warn about unpriv connections .
2005-04-17 02:20:36 +04:00
*/
2007-02-12 11:53:38 +03:00
if ( ! svc_port_is_privileged ( sin ) ) {
2005-04-17 02:20:36 +04:00
dprintk ( KERN_WARNING
2007-02-12 11:53:32 +03:00
" %s: connect from unprivileged port: %s \n " ,
2007-02-10 02:38:13 +03:00
serv - > sv_name ,
2007-02-12 11:53:38 +03:00
__svc_print_addr ( sin , buf , sizeof ( buf ) ) ) ;
2005-04-17 02:20:36 +04:00
}
2007-02-12 11:53:32 +03:00
dprintk ( " %s: connect from %s \n " , serv - > sv_name ,
2007-02-12 11:53:38 +03:00
__svc_print_addr ( sin , buf , sizeof ( buf ) ) ) ;
2005-04-17 02:20:36 +04:00
/* make sure that a write doesn't block forever when
* low on memory
*/
newsock - > sk - > sk_sndtimeo = HZ * 30 ;
2007-02-12 11:53:28 +03:00
if ( ! ( newsvsk = svc_setup_socket ( serv , newsock , & err ,
( SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY ) ) ) )
2005-04-17 02:20:36 +04:00
goto failed ;
2007-12-31 06:08:12 +03:00
svc_xprt_set_remote ( & newsvsk - > sk_xprt , sin , slen ) ;
2007-07-10 00:21:39 +04:00
err = kernel_getsockname ( newsock , sin , & slen ) ;
if ( unlikely ( err < 0 ) ) {
dprintk ( " svc_tcp_accept: kernel_getsockname error %d \n " , - err ) ;
slen = offsetof ( struct sockaddr , sa_data ) ;
}
2007-12-31 06:08:12 +03:00
svc_xprt_set_local ( & newsvsk - > sk_xprt , sin , slen ) ;
2007-02-12 11:53:30 +03:00
2007-12-31 06:07:40 +03:00
if ( serv - > sv_stats )
serv - > sv_stats - > nettcpconn + + ;
return & newsvsk - > sk_xprt ;
failed :
sock_release ( newsock ) ;
return NULL ;
}
2005-04-17 02:20:36 +04:00
/*
2009-08-20 04:34:19 +04:00
* Receive data .
* If we haven ' t gotten the record length yet , get the next four bytes .
* Otherwise try to gobble up as much as possible up to the complete
* record length .
2005-04-17 02:20:36 +04:00
*/
2009-08-20 04:34:19 +04:00
static int svc_tcp_recv_record ( struct svc_sock * svsk , struct svc_rqst * rqstp )
2005-04-17 02:20:36 +04:00
{
2007-12-31 06:07:50 +03:00
struct svc_serv * serv = svsk - > sk_xprt . xpt_server ;
2009-08-20 04:34:19 +04:00
int len ;
2005-04-17 02:20:36 +04:00
2009-05-28 02:51:06 +04:00
if ( test_and_clear_bit ( XPT_CHNGBUF , & svsk - > sk_xprt . xpt_flags ) )
/* sndbuf needs to have room for one request
* per thread , otherwise we can stall even when the
* network isn ' t a bottleneck .
*
* We count all threads rather than threads in a
* particular pool , which provides an upper bound
* on the number of threads which will access the socket .
*
* rcvbuf just needs to be able to hold a few requests .
* Normally they will be removed from the queue
* as soon a a complete request arrives .
*/
svc_sock_setbufsize ( svsk - > sk_sock ,
( serv - > sv_nrthreads + 3 ) * serv - > sv_max_mesg ,
3 * serv - > sv_max_mesg ) ;
2007-12-31 06:07:48 +03:00
clear_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
2008-04-14 20:27:30 +04:00
if ( svsk - > sk_tcplen < sizeof ( rpc_fraghdr ) ) {
int want = sizeof ( rpc_fraghdr ) - svsk - > sk_tcplen ;
2005-04-17 02:20:36 +04:00
struct kvec iov ;
iov . iov_base = ( ( char * ) & svsk - > sk_reclen ) + svsk - > sk_tcplen ;
iov . iov_len = want ;
if ( ( len = svc_recvfrom ( rqstp , & iov , 1 , want ) ) < 0 )
goto error ;
svsk - > sk_tcplen + = len ;
if ( len < want ) {
2008-04-14 20:27:30 +04:00
dprintk ( " svc: short recvfrom while reading record "
" length (%d of %d) \n " , len , want ) ;
2009-08-20 04:34:19 +04:00
goto err_again ; /* record header not complete */
2005-04-17 02:20:36 +04:00
}
svsk - > sk_reclen = ntohl ( svsk - > sk_reclen ) ;
2008-04-14 20:27:30 +04:00
if ( ! ( svsk - > sk_reclen & RPC_LAST_STREAM_FRAGMENT ) ) {
2005-04-17 02:20:36 +04:00
/* FIXME: technically, a record can be fragmented,
* and non - terminal fragments will not have the top
* bit set in the fragment length header .
* But apparently no known nfs clients send fragmented
* records . */
2007-01-30 00:19:52 +03:00
if ( net_ratelimit ( ) )
2008-04-14 20:27:30 +04:00
printk ( KERN_NOTICE " RPC: multiple fragments "
" per record not supported \n " ) ;
2005-04-17 02:20:36 +04:00
goto err_delete ;
}
2009-08-20 04:34:19 +04:00
2008-04-14 20:27:30 +04:00
svsk - > sk_reclen & = RPC_FRAGMENT_SIZE_MASK ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: TCP record, %d bytes \n " , svsk - > sk_reclen ) ;
2006-10-06 11:44:05 +04:00
if ( svsk - > sk_reclen > serv - > sv_max_mesg ) {
2007-01-30 00:19:52 +03:00
if ( net_ratelimit ( ) )
2008-04-14 20:27:30 +04:00
printk ( KERN_NOTICE " RPC: "
" fragment too large: 0x%08lx \n " ,
( unsigned long ) svsk - > sk_reclen ) ;
2005-04-17 02:20:36 +04:00
goto err_delete ;
}
}
/* Check whether enough data is available */
len = svc_recv_available ( svsk ) ;
if ( len < 0 )
goto error ;
if ( len < svsk - > sk_reclen ) {
dprintk ( " svc: incomplete TCP record (%d of %d) \n " ,
len , svsk - > sk_reclen ) ;
2009-08-20 04:34:19 +04:00
goto err_again ; /* record not complete */
2005-04-17 02:20:36 +04:00
}
len = svsk - > sk_reclen ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
2009-08-20 04:34:19 +04:00
return len ;
error :
2010-03-01 08:51:14 +03:00
if ( len = = - EAGAIN )
2009-08-20 04:34:19 +04:00
dprintk ( " RPC: TCP recv_record got EAGAIN \n " ) ;
return len ;
err_delete :
set_bit ( XPT_CLOSE , & svsk - > sk_xprt . xpt_flags ) ;
err_again :
return - EAGAIN ;
}
2009-09-10 18:32:28 +04:00
static int svc_process_calldir ( struct svc_sock * svsk , struct svc_rqst * rqstp ,
struct rpc_rqst * * reqpp , struct kvec * vec )
{
struct rpc_rqst * req = NULL ;
u32 * p ;
u32 xid ;
u32 calldir ;
int len ;
len = svc_recvfrom ( rqstp , vec , 1 , 8 ) ;
if ( len < 0 )
goto error ;
p = ( u32 * ) rqstp - > rq_arg . head [ 0 ] . iov_base ;
xid = * p + + ;
calldir = * p ;
if ( calldir = = 0 ) {
/* REQUEST is the most common case */
vec [ 0 ] = rqstp - > rq_arg . head [ 0 ] ;
} else {
/* REPLY */
if ( svsk - > sk_bc_xprt )
req = xprt_lookup_rqst ( svsk - > sk_bc_xprt , xid ) ;
if ( ! req ) {
printk ( KERN_NOTICE
" %s: Got unrecognized reply: "
" calldir 0x%x sk_bc_xprt %p xid %08x \n " ,
__func__ , ntohl ( calldir ) ,
svsk - > sk_bc_xprt , xid ) ;
vec [ 0 ] = rqstp - > rq_arg . head [ 0 ] ;
goto out ;
}
memcpy ( & req - > rq_private_buf , & req - > rq_rcv_buf ,
sizeof ( struct xdr_buf ) ) ;
/* copy the xid and call direction */
memcpy ( req - > rq_private_buf . head [ 0 ] . iov_base ,
rqstp - > rq_arg . head [ 0 ] . iov_base , 8 ) ;
vec [ 0 ] = req - > rq_private_buf . head [ 0 ] ;
}
out :
vec [ 0 ] . iov_base + = 8 ;
vec [ 0 ] . iov_len - = 8 ;
len = svsk - > sk_reclen - 8 ;
error :
* reqpp = req ;
return len ;
}
2009-08-20 04:34:19 +04:00
/*
* Receive data from a TCP socket .
*/
static int svc_tcp_recvfrom ( struct svc_rqst * rqstp )
{
struct svc_sock * svsk =
container_of ( rqstp - > rq_xprt , struct svc_sock , sk_xprt ) ;
struct svc_serv * serv = svsk - > sk_xprt . xpt_server ;
int len ;
struct kvec * vec ;
int pnum , vlen ;
2009-09-10 18:32:28 +04:00
struct rpc_rqst * req = NULL ;
2009-08-20 04:34:19 +04:00
dprintk ( " svc: tcp_recv %p data %d conn %d close %d \n " ,
svsk , test_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ,
test_bit ( XPT_CONN , & svsk - > sk_xprt . xpt_flags ) ,
test_bit ( XPT_CLOSE , & svsk - > sk_xprt . xpt_flags ) ) ;
len = svc_tcp_recv_record ( svsk , rqstp ) ;
if ( len < 0 )
goto error ;
2006-10-04 13:15:47 +04:00
vec = rqstp - > rq_vec ;
2005-04-17 02:20:36 +04:00
vec [ 0 ] = rqstp - > rq_arg . head [ 0 ] ;
vlen = PAGE_SIZE ;
2009-09-10 18:32:28 +04:00
/*
* We have enough data for the whole tcp record . Let ' s try and read the
* first 8 bytes to get the xid and the call direction . We can use this
* to figure out if this is a call or a reply to a callback . If
* sk_reclen is < 8 ( xid and calldir ) , then this is a malformed packet .
* In that case , don ' t bother with the calldir and just read the data .
* It will be rejected in svc_process .
*/
if ( len > = 8 ) {
len = svc_process_calldir ( svsk , rqstp , & req , vec ) ;
if ( len < 0 )
goto err_again ;
vlen - = 8 ;
}
2005-04-17 02:20:36 +04:00
pnum = 1 ;
while ( vlen < len ) {
2009-09-10 18:32:28 +04:00
vec [ pnum ] . iov_base = ( req ) ?
page_address ( req - > rq_private_buf . pages [ pnum - 1 ] ) :
page_address ( rqstp - > rq_pages [ pnum ] ) ;
2005-04-17 02:20:36 +04:00
vec [ pnum ] . iov_len = PAGE_SIZE ;
pnum + + ;
vlen + = PAGE_SIZE ;
}
2006-10-04 13:15:46 +04:00
rqstp - > rq_respages = & rqstp - > rq_pages [ pnum ] ;
2005-04-17 02:20:36 +04:00
/* Now receive data */
len = svc_recvfrom ( rqstp , vec , pnum , len ) ;
if ( len < 0 )
2009-08-20 04:34:19 +04:00
goto err_again ;
2005-04-17 02:20:36 +04:00
2009-09-10 18:32:28 +04:00
/*
* Account for the 8 bytes we read earlier
*/
len + = 8 ;
if ( req ) {
xprt_complete_rqst ( req - > rq_task , len ) ;
len = 0 ;
goto out ;
}
2005-04-17 02:20:36 +04:00
dprintk ( " svc: TCP complete record (%d bytes) \n " , len ) ;
rqstp - > rq_arg . len = len ;
rqstp - > rq_arg . page_base = 0 ;
if ( len < = rqstp - > rq_arg . head [ 0 ] . iov_len ) {
rqstp - > rq_arg . head [ 0 ] . iov_len = len ;
rqstp - > rq_arg . page_len = 0 ;
} else {
rqstp - > rq_arg . page_len = len - rqstp - > rq_arg . head [ 0 ] . iov_len ;
}
2007-12-31 06:07:25 +03:00
rqstp - > rq_xprt_ctxt = NULL ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_prot = IPPROTO_TCP ;
2009-09-10 18:32:28 +04:00
out :
2005-04-17 02:20:36 +04:00
/* Reset TCP read info */
svsk - > sk_reclen = 0 ;
svsk - > sk_tcplen = 0 ;
2007-12-31 06:08:12 +03:00
svc_xprt_copy_addrs ( rqstp , & svsk - > sk_xprt ) ;
2005-04-17 02:20:36 +04:00
if ( serv - > sv_stats )
serv - > sv_stats - > nettcpcnt + + ;
return len ;
2009-08-20 04:34:19 +04:00
err_again :
2005-04-17 02:20:36 +04:00
if ( len = = - EAGAIN ) {
dprintk ( " RPC: TCP recvfrom got EAGAIN \n " ) ;
2009-08-20 04:34:19 +04:00
return len ;
}
error :
if ( len ! = - EAGAIN ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_NOTICE " %s: recvfrom returned errno %d \n " ,
2007-12-31 06:07:50 +03:00
svsk - > sk_xprt . xpt_server - > sv_name , - len ) ;
2009-08-20 04:34:19 +04:00
set_bit ( XPT_CLOSE , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
}
2009-08-20 04:34:19 +04:00
return - EAGAIN ;
2005-04-17 02:20:36 +04:00
}
/*
* Send out data on TCP socket .
*/
2007-12-31 06:08:27 +03:00
static int svc_tcp_sendto ( struct svc_rqst * rqstp )
2005-04-17 02:20:36 +04:00
{
struct xdr_buf * xbufp = & rqstp - > rq_res ;
int sent ;
2006-09-27 09:29:38 +04:00
__be32 reclen ;
2005-04-17 02:20:36 +04:00
/* Set up the first element of the reply kvec.
* Any other kvecs that may be in use have been taken
* care of by the server implementation itself .
*/
reclen = htonl ( 0x80000000 | ( ( xbufp - > len ) - 4 ) ) ;
memcpy ( xbufp - > head [ 0 ] . iov_base , & reclen , 4 ) ;
2007-12-31 06:08:22 +03:00
if ( test_bit ( XPT_DEAD , & rqstp - > rq_xprt - > xpt_flags ) )
2005-04-17 02:20:36 +04:00
return - ENOTCONN ;
sent = svc_sendto ( rqstp , & rqstp - > rq_res ) ;
if ( sent ! = xbufp - > len ) {
2007-12-31 06:08:27 +03:00
printk ( KERN_NOTICE
" rpc-srv/tcp: %s: %s %d when sending %d bytes "
" - shutting down socket \n " ,
2007-12-31 06:08:22 +03:00
rqstp - > rq_xprt - > xpt_server - > sv_name ,
2005-04-17 02:20:36 +04:00
( sent < 0 ) ? " got error " : " sent only " ,
sent , xbufp - > len ) ;
2007-12-31 06:08:22 +03:00
set_bit ( XPT_CLOSE , & rqstp - > rq_xprt - > xpt_flags ) ;
2007-12-31 06:07:57 +03:00
svc_xprt_enqueue ( rqstp - > rq_xprt ) ;
2005-04-17 02:20:36 +04:00
sent = - EAGAIN ;
}
return sent ;
}
2007-12-31 06:07:29 +03:00
/*
* Setup response header . TCP has a 4 B record length field .
*/
static void svc_tcp_prep_reply_hdr ( struct svc_rqst * rqstp )
{
struct kvec * resv = & rqstp - > rq_res . head [ 0 ] ;
/* tcp needs a space for the record length... */
svc_putnl ( resv , 0 ) ;
}
2007-12-31 06:07:31 +03:00
static int svc_tcp_has_wspace ( struct svc_xprt * xprt )
{
2007-12-31 06:08:22 +03:00
struct svc_sock * svsk = container_of ( xprt , struct svc_sock , sk_xprt ) ;
2009-05-19 01:47:56 +04:00
struct svc_serv * serv = svsk - > sk_xprt . xpt_server ;
2007-12-31 06:07:31 +03:00
int required ;
2009-05-19 01:47:56 +04:00
if ( test_bit ( XPT_LISTENER , & xprt - > xpt_flags ) )
return 1 ;
required = atomic_read ( & xprt - > xpt_reserved ) + serv - > sv_max_mesg ;
if ( sk_stream_wspace ( svsk - > sk_sk ) > = required )
return 1 ;
2007-12-31 06:07:31 +03:00
set_bit ( SOCK_NOSPACE , & svsk - > sk_sock - > flags ) ;
2009-05-19 01:47:56 +04:00
return 0 ;
2007-12-31 06:07:31 +03:00
}
2007-12-31 06:07:42 +03:00
static struct svc_xprt * svc_tcp_create ( struct svc_serv * serv ,
2010-09-29 16:04:18 +04:00
struct net * net ,
2007-12-31 06:07:42 +03:00
struct sockaddr * sa , int salen ,
int flags )
{
2010-09-29 16:04:18 +04:00
return svc_create_socket ( serv , IPPROTO_TCP , net , sa , salen , flags ) ;
2007-12-31 06:07:42 +03:00
}
2007-12-31 06:07:17 +03:00
static struct svc_xprt_ops svc_tcp_ops = {
2007-12-31 06:07:42 +03:00
. xpo_create = svc_tcp_create ,
2007-12-31 06:07:23 +03:00
. xpo_recvfrom = svc_tcp_recvfrom ,
. xpo_sendto = svc_tcp_sendto ,
2007-12-31 06:07:25 +03:00
. xpo_release_rqst = svc_release_skb ,
2008-12-24 00:30:11 +03:00
. xpo_detach = svc_tcp_sock_detach ,
2007-12-31 06:07:27 +03:00
. xpo_free = svc_sock_free ,
2007-12-31 06:07:29 +03:00
. xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr ,
2007-12-31 06:07:31 +03:00
. xpo_has_wspace = svc_tcp_has_wspace ,
2007-12-31 06:07:36 +03:00
. xpo_accept = svc_tcp_accept ,
2007-12-31 06:07:17 +03:00
} ;
static struct svc_xprt_class svc_tcp_class = {
. xcl_name = " tcp " ,
2007-12-31 06:07:42 +03:00
. xcl_owner = THIS_MODULE ,
2007-12-31 06:07:17 +03:00
. xcl_ops = & svc_tcp_ops ,
2007-12-31 06:07:21 +03:00
. xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP ,
2007-12-31 06:07:17 +03:00
} ;
void svc_init_xprt_sock ( void )
{
svc_reg_xprt_class ( & svc_tcp_class ) ;
svc_reg_xprt_class ( & svc_udp_class ) ;
}
void svc_cleanup_xprt_sock ( void )
{
svc_unreg_xprt_class ( & svc_tcp_class ) ;
svc_unreg_xprt_class ( & svc_udp_class ) ;
}
2007-12-31 06:07:50 +03:00
static void svc_tcp_init ( struct svc_sock * svsk , struct svc_serv * serv )
2005-04-17 02:20:36 +04:00
{
struct sock * sk = svsk - > sk_sk ;
2007-12-31 06:07:50 +03:00
svc_xprt_init ( & svc_tcp_class , & svsk - > sk_xprt , serv ) ;
2007-12-31 06:08:08 +03:00
set_bit ( XPT_CACHE_AUTH , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
if ( sk - > sk_state = = TCP_LISTEN ) {
dprintk ( " setting up TCP socket for listening \n " ) ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_LISTENER , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
sk - > sk_data_ready = svc_tcp_listen_data_ready ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_CONN , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
} else {
dprintk ( " setting up TCP socket for reading \n " ) ;
sk - > sk_state_change = svc_tcp_state_change ;
sk - > sk_data_ready = svc_tcp_data_ready ;
2009-05-19 01:47:56 +04:00
sk - > sk_write_space = svc_tcp_write_space ;
2005-04-17 02:20:36 +04:00
svsk - > sk_reclen = 0 ;
svsk - > sk_tcplen = 0 ;
2008-04-14 20:27:01 +04:00
tcp_sk ( sk ) - > nonagle | = TCP_NAGLE_OFF ;
2005-04-17 02:20:36 +04:00
2009-05-28 02:51:06 +04:00
/* initialise setting must have enough space to
* receive and respond to one request .
* svc_tcp_recvfrom will re - adjust if necessary
*/
svc_sock_setbufsize ( svsk - > sk_sock ,
3 * svsk - > sk_xprt . xpt_server - > sv_max_mesg ,
3 * svsk - > sk_xprt . xpt_server - > sv_max_mesg ) ;
set_bit ( XPT_CHNGBUF , & svsk - > sk_xprt . xpt_flags ) ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_DATA , & svsk - > sk_xprt . xpt_flags ) ;
2007-02-10 02:38:13 +03:00
if ( sk - > sk_state ! = TCP_ESTABLISHED )
2007-12-31 06:07:48 +03:00
set_bit ( XPT_CLOSE , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
}
}
2007-12-31 06:08:27 +03:00
void svc_sock_update_bufs ( struct svc_serv * serv )
2005-04-17 02:20:36 +04:00
{
/*
* The number of server threads has changed . Update
* rcvbuf and sndbuf accordingly on all sockets
*/
struct list_head * le ;
spin_lock_bh ( & serv - > sv_lock ) ;
list_for_each ( le , & serv - > sv_permsocks ) {
2007-02-10 02:38:13 +03:00
struct svc_sock * svsk =
2007-12-31 06:07:53 +03:00
list_entry ( le , struct svc_sock , sk_xprt . xpt_list ) ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_CHNGBUF , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
}
list_for_each ( le , & serv - > sv_tempsocks ) {
struct svc_sock * svsk =
2007-12-31 06:07:53 +03:00
list_entry ( le , struct svc_sock , sk_xprt . xpt_list ) ;
2007-12-31 06:07:48 +03:00
set_bit ( XPT_CHNGBUF , & svsk - > sk_xprt . xpt_flags ) ;
2005-04-17 02:20:36 +04:00
}
spin_unlock_bh ( & serv - > sv_lock ) ;
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_sock_update_bufs ) ;
2005-04-17 02:20:36 +04:00
/*
* Initialize socket for RPC use and create svc_sock struct
* XXX : May want to setsockopt SO_SNDBUF and SO_RCVBUF .
*/
2007-02-12 11:53:28 +03:00
static struct svc_sock * svc_setup_socket ( struct svc_serv * serv ,
struct socket * sock ,
int * errp , int flags )
2005-04-17 02:20:36 +04:00
{
struct svc_sock * svsk ;
struct sock * inet ;
2007-02-12 11:53:28 +03:00
int pmap_register = ! ( flags & SVC_SOCK_ANONYMOUS ) ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: svc_setup_socket %p \n " , sock ) ;
2006-07-22 01:51:30 +04:00
if ( ! ( svsk = kzalloc ( sizeof ( * svsk ) , GFP_KERNEL ) ) ) {
2005-04-17 02:20:36 +04:00
* errp = - ENOMEM ;
return NULL ;
}
inet = sock - > sk ;
/* Register socket with portmapper */
if ( * errp > = 0 & & pmap_register )
2009-03-19 03:46:13 +03:00
* errp = svc_register ( serv , inet - > sk_family , inet - > sk_protocol ,
2009-10-15 10:30:45 +04:00
ntohs ( inet_sk ( inet ) - > inet_sport ) ) ;
2005-04-17 02:20:36 +04:00
if ( * errp < 0 ) {
kfree ( svsk ) ;
return NULL ;
}
inet - > sk_user_data = svsk ;
svsk - > sk_sock = sock ;
svsk - > sk_sk = inet ;
svsk - > sk_ostate = inet - > sk_state_change ;
svsk - > sk_odata = inet - > sk_data_ready ;
svsk - > sk_owspace = inet - > sk_write_space ;
/* Initialize the socket */
if ( sock - > type = = SOCK_DGRAM )
2007-12-31 06:07:50 +03:00
svc_udp_init ( svsk , serv ) ;
2009-05-28 02:51:06 +04:00
else
2007-12-31 06:07:50 +03:00
svc_tcp_init ( svsk , serv ) ;
2005-04-17 02:20:36 +04:00
dprintk ( " svc: svc_setup_socket created %p (inet %p) \n " ,
svsk , svsk - > sk_sk ) ;
return svsk ;
}
2009-04-24 03:32:33 +04:00
/**
* svc_addsock - add a listener socket to an RPC service
* @ serv : pointer to RPC service to which to add a new listener
* @ fd : file descriptor of the new listener
* @ name_return : pointer to buffer to fill in with name of listener
* @ len : size of the buffer
*
* Fills in socket name and returns positive length of name if successful .
* Name is terminated with ' \n ' . On error , returns a negative errno
* value .
*/
int svc_addsock ( struct svc_serv * serv , const int fd , char * name_return ,
const size_t len )
2006-10-02 13:17:48 +04:00
{
int err = 0 ;
struct socket * so = sockfd_lookup ( fd , & err ) ;
struct svc_sock * svsk = NULL ;
if ( ! so )
return err ;
2010-01-26 22:03:56 +03:00
if ( ( so - > sk - > sk_family ! = PF_INET ) & & ( so - > sk - > sk_family ! = PF_INET6 ) )
2006-10-02 13:17:48 +04:00
err = - EAFNOSUPPORT ;
else if ( so - > sk - > sk_protocol ! = IPPROTO_TCP & &
so - > sk - > sk_protocol ! = IPPROTO_UDP )
err = - EPROTONOSUPPORT ;
else if ( so - > state > SS_UNCONNECTED )
err = - EISCONN ;
else {
2008-11-23 18:58:08 +03:00
if ( ! try_module_get ( THIS_MODULE ) )
err = - ENOENT ;
else
svsk = svc_setup_socket ( serv , so , & err ,
SVC_SOCK_DEFAULTS ) ;
2007-02-12 11:53:30 +03:00
if ( svsk ) {
2007-12-31 06:08:12 +03:00
struct sockaddr_storage addr ;
struct sockaddr * sin = ( struct sockaddr * ) & addr ;
int salen ;
if ( kernel_getsockname ( svsk - > sk_sock , sin , & salen ) = = 0 )
svc_xprt_set_local ( & svsk - > sk_xprt , sin , salen ) ;
2007-12-31 06:08:20 +03:00
clear_bit ( XPT_TEMP , & svsk - > sk_xprt . xpt_flags ) ;
spin_lock_bh ( & serv - > sv_lock ) ;
list_add ( & svsk - > sk_xprt . xpt_list , & serv - > sv_permsocks ) ;
spin_unlock_bh ( & serv - > sv_lock ) ;
2007-12-31 06:08:01 +03:00
svc_xprt_received ( & svsk - > sk_xprt ) ;
2006-10-02 13:17:48 +04:00
err = 0 ;
2008-11-23 18:58:08 +03:00
} else
module_put ( THIS_MODULE ) ;
2006-10-02 13:17:48 +04:00
}
if ( err ) {
sockfd_put ( so ) ;
return err ;
}
2009-04-24 03:32:48 +04:00
return svc_one_sock_name ( svsk , name_return , len ) ;
2006-10-02 13:17:48 +04:00
}
EXPORT_SYMBOL_GPL ( svc_addsock ) ;
2005-04-17 02:20:36 +04:00
/*
* Create socket for RPC service .
*/
2007-12-31 06:07:42 +03:00
static struct svc_xprt * svc_create_socket ( struct svc_serv * serv ,
int protocol ,
2010-09-29 16:04:18 +04:00
struct net * net ,
2007-12-31 06:07:42 +03:00
struct sockaddr * sin , int len ,
int flags )
2005-04-17 02:20:36 +04:00
{
struct svc_sock * svsk ;
struct socket * sock ;
int error ;
int type ;
2007-12-31 06:08:12 +03:00
struct sockaddr_storage addr ;
struct sockaddr * newsin = ( struct sockaddr * ) & addr ;
int newlen ;
2009-03-31 02:59:17 +04:00
int family ;
int val ;
2008-02-21 10:57:45 +03:00
RPC_IFDEBUG ( char buf [ RPC_MAX_ADDRBUFLEN ] ) ;
2005-04-17 02:20:36 +04:00
2007-02-12 11:53:32 +03:00
dprintk ( " svc: svc_create_socket(%s, %d, %s) \n " ,
serv - > sv_program - > pg_name , protocol ,
2007-02-12 11:53:39 +03:00
__svc_print_addr ( sin , buf , sizeof ( buf ) ) ) ;
2005-04-17 02:20:36 +04:00
if ( protocol ! = IPPROTO_UDP & & protocol ! = IPPROTO_TCP ) {
printk ( KERN_WARNING " svc: only UDP and TCP "
" sockets supported \n " ) ;
2007-12-31 06:07:42 +03:00
return ERR_PTR ( - EINVAL ) ;
2005-04-17 02:20:36 +04:00
}
2009-03-31 02:59:17 +04:00
2005-04-17 02:20:36 +04:00
type = ( protocol = = IPPROTO_UDP ) ? SOCK_DGRAM : SOCK_STREAM ;
2009-03-31 02:59:17 +04:00
switch ( sin - > sa_family ) {
case AF_INET6 :
family = PF_INET6 ;
break ;
case AF_INET :
family = PF_INET ;
break ;
default :
return ERR_PTR ( - EINVAL ) ;
}
2005-04-17 02:20:36 +04:00
2009-03-31 02:59:17 +04:00
error = sock_create_kern ( family , type , protocol , & sock ) ;
2007-02-12 11:53:39 +03:00
if ( error < 0 )
2007-12-31 06:07:42 +03:00
return ERR_PTR ( error ) ;
2005-04-17 02:20:36 +04:00
2006-12-07 07:35:24 +03:00
svc_reclassify_socket ( sock ) ;
2009-03-31 02:59:17 +04:00
/*
* If this is an PF_INET6 listener , we want to avoid
* getting requests from IPv4 remotes . Those should
* be shunted to a PF_INET listener via rpcbind .
*/
val = 1 ;
if ( family = = PF_INET6 )
kernel_setsockopt ( sock , SOL_IPV6 , IPV6_V6ONLY ,
( char * ) & val , sizeof ( val ) ) ;
2006-09-29 01:37:07 +04:00
if ( type = = SOCK_STREAM )
2007-02-12 11:53:39 +03:00
sock - > sk - > sk_reuse = 1 ; /* allow address reuse */
error = kernel_bind ( sock , sin , len ) ;
2006-09-29 01:37:07 +04:00
if ( error < 0 )
goto bummer ;
2005-04-17 02:20:36 +04:00
2007-12-31 06:08:12 +03:00
newlen = len ;
error = kernel_getsockname ( sock , newsin , & newlen ) ;
if ( error < 0 )
goto bummer ;
2005-04-17 02:20:36 +04:00
if ( protocol = = IPPROTO_TCP ) {
2006-08-08 07:58:01 +04:00
if ( ( error = kernel_listen ( sock , 64 ) ) < 0 )
2005-04-17 02:20:36 +04:00
goto bummer ;
}
2007-02-12 11:53:30 +03:00
if ( ( svsk = svc_setup_socket ( serv , sock , & error , flags ) ) ! = NULL ) {
2007-12-31 06:08:12 +03:00
svc_xprt_set_local ( & svsk - > sk_xprt , newsin , newlen ) ;
2007-12-31 06:07:42 +03:00
return ( struct svc_xprt * ) svsk ;
2007-02-12 11:53:30 +03:00
}
2005-04-17 02:20:36 +04:00
bummer :
dprintk ( " svc: svc_create_socket error = %d \n " , - error ) ;
sock_release ( sock ) ;
2007-12-31 06:07:42 +03:00
return ERR_PTR ( error ) ;
2005-04-17 02:20:36 +04:00
}
2007-12-31 06:07:27 +03:00
/*
* Detach the svc_sock from the socket so that no
* more callbacks occur .
*/
static void svc_sock_detach ( struct svc_xprt * xprt )
{
struct svc_sock * svsk = container_of ( xprt , struct svc_sock , sk_xprt ) ;
struct sock * sk = svsk - > sk_sk ;
dprintk ( " svc: svc_sock_detach(%p) \n " , svsk ) ;
/* put back the old socket callbacks */
sk - > sk_state_change = svsk - > sk_ostate ;
sk - > sk_data_ready = svsk - > sk_odata ;
sk - > sk_write_space = svsk - > sk_owspace ;
2008-12-24 00:30:11 +03:00
2010-04-20 17:03:51 +04:00
if ( sk_sleep ( sk ) & & waitqueue_active ( sk_sleep ( sk ) ) )
wake_up_interruptible ( sk_sleep ( sk ) ) ;
2008-12-24 00:30:11 +03:00
}
/*
* Disconnect the socket , and reset the callbacks
*/
static void svc_tcp_sock_detach ( struct svc_xprt * xprt )
{
struct svc_sock * svsk = container_of ( xprt , struct svc_sock , sk_xprt ) ;
dprintk ( " svc: svc_tcp_sock_detach(%p) \n " , svsk ) ;
svc_sock_detach ( xprt ) ;
if ( ! test_bit ( XPT_LISTENER , & xprt - > xpt_flags ) )
kernel_sock_shutdown ( svsk - > sk_sock , SHUT_RDWR ) ;
2007-12-31 06:07:27 +03:00
}
/*
* Free the svc_sock ' s socket resources and the svc_sock itself .
*/
static void svc_sock_free ( struct svc_xprt * xprt )
{
struct svc_sock * svsk = container_of ( xprt , struct svc_sock , sk_xprt ) ;
dprintk ( " svc: svc_sock_free(%p) \n " , svsk ) ;
if ( svsk - > sk_sock - > file )
sockfd_put ( svsk - > sk_sock ) ;
else
sock_release ( svsk - > sk_sock ) ;
kfree ( svsk ) ;
}
2009-04-01 17:23:09 +04:00
/*
* Create a svc_xprt .
*
* For internal use only ( e . g . nfsv4 .1 backchannel ) .
* Callers should typically use the xpo_create ( ) method .
*/
struct svc_xprt * svc_sock_create ( struct svc_serv * serv , int prot )
{
struct svc_sock * svsk ;
struct svc_xprt * xprt = NULL ;
dprintk ( " svc: %s \n " , __func__ ) ;
svsk = kzalloc ( sizeof ( * svsk ) , GFP_KERNEL ) ;
if ( ! svsk )
goto out ;
xprt = & svsk - > sk_xprt ;
if ( prot = = IPPROTO_TCP )
svc_xprt_init ( & svc_tcp_class , xprt , serv ) ;
else if ( prot = = IPPROTO_UDP )
svc_xprt_init ( & svc_udp_class , xprt , serv ) ;
else
BUG ( ) ;
out :
dprintk ( " svc: %s return %p \n " , __func__ , xprt ) ;
return xprt ;
}
EXPORT_SYMBOL_GPL ( svc_sock_create ) ;
/*
* Destroy a svc_sock .
*/
void svc_sock_destroy ( struct svc_xprt * xprt )
{
if ( xprt )
kfree ( container_of ( xprt , struct svc_sock , sk_xprt ) ) ;
}
EXPORT_SYMBOL_GPL ( svc_sock_destroy ) ;