2007-02-09 00:51:54 +03:00
/*
* IUCV protocol stack for Linux on zSeries
*
2009-06-16 12:30:44 +04:00
* Copyright IBM Corp . 2006 , 2009
2007-02-09 00:51:54 +03:00
*
* Author ( s ) : Jennifer Hunt < jenhunt @ us . ibm . com >
2009-06-16 12:30:44 +04:00
* Hendrik Brueckner < brueckner @ linux . vnet . ibm . com >
* PM functions :
* Ursula Braun < ursula . braun @ de . ibm . com >
2007-02-09 00:51:54 +03:00
*/
2008-12-25 15:39:47 +03:00
# define KMSG_COMPONENT "af_iucv"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2007-02-09 00:51:54 +03:00
# include <linux/module.h>
# include <linux/types.h>
# include <linux/list.h>
# include <linux/errno.h>
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/skbuff.h>
# include <linux/init.h>
# include <linux/poll.h>
# include <net/sock.h>
# include <asm/ebcdic.h>
# include <asm/cpcmd.h>
# include <linux/kmod.h>
# include <net/iucv/af_iucv.h>
2011-08-08 05:33:54 +04:00
# define VERSION "1.2"
2007-02-09 00:51:54 +03:00
static char iucv_userid [ 80 ] ;
2009-09-14 16:23:23 +04:00
static const struct proto_ops iucv_sock_ops ;
2007-02-09 00:51:54 +03:00
static struct proto iucv_proto = {
. name = " AF_IUCV " ,
. owner = THIS_MODULE ,
. obj_size = sizeof ( struct iucv_sock ) ,
} ;
2011-08-08 05:33:51 +04:00
static struct iucv_interface * pr_iucv ;
2009-04-22 03:26:23 +04:00
/* special AF_IUCV IPRM messages */
static const u8 iprm_shutdown [ 8 ] =
{ 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x01 } ;
2009-04-22 03:26:24 +04:00
# define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
/* macros to set/get socket control buffer at correct offset */
# define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
# define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
# define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
# define CB_TRGCLS_LEN (TRGCLS_SIZE)
2009-06-18 01:54:48 +04:00
# define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
DEFINE_WAIT ( __wait ) ; \
long __timeo = timeo ; \
ret = 0 ; \
2010-04-20 17:03:51 +04:00
prepare_to_wait ( sk_sleep ( sk ) , & __wait , TASK_INTERRUPTIBLE ) ; \
2009-06-18 01:54:48 +04:00
while ( ! ( condition ) ) { \
if ( ! __timeo ) { \
ret = - EAGAIN ; \
break ; \
} \
if ( signal_pending ( current ) ) { \
ret = sock_intr_errno ( __timeo ) ; \
break ; \
} \
release_sock ( sk ) ; \
__timeo = schedule_timeout ( __timeo ) ; \
lock_sock ( sk ) ; \
ret = sock_error ( sk ) ; \
if ( ret ) \
break ; \
} \
2010-04-20 17:03:51 +04:00
finish_wait ( sk_sleep ( sk ) , & __wait ) ; \
2009-06-18 01:54:48 +04:00
} while ( 0 )
# define iucv_sock_wait(sk, condition, timeo) \
( { \
int __ret = 0 ; \
if ( ! ( condition ) ) \
__iucv_sock_wait ( sk , condition , timeo , __ret ) ; \
__ret ; \
} )
2009-04-22 03:26:24 +04:00
2007-10-08 13:02:52 +04:00
static void iucv_sock_kill ( struct sock * sk ) ;
static void iucv_sock_close ( struct sock * sk ) ;
2011-08-08 05:33:54 +04:00
static int afiucv_hs_rcv ( struct sk_buff * skb , struct net_device * dev ,
struct packet_type * pt , struct net_device * orig_dev ) ;
static int afiucv_hs_send ( struct iucv_message * imsg , struct sock * sock ,
struct sk_buff * skb , u8 flags ) ;
static void afiucv_hs_callback_txnotify ( struct sk_buff * , enum iucv_tx_notify ) ;
2007-02-09 00:51:54 +03:00
/* Call Back functions */
static void iucv_callback_rx ( struct iucv_path * , struct iucv_message * ) ;
static void iucv_callback_txdone ( struct iucv_path * , struct iucv_message * ) ;
static void iucv_callback_connack ( struct iucv_path * , u8 ipuser [ 16 ] ) ;
2007-05-04 23:23:27 +04:00
static int iucv_callback_connreq ( struct iucv_path * , u8 ipvmid [ 8 ] ,
u8 ipuser [ 16 ] ) ;
2007-02-09 00:51:54 +03:00
static void iucv_callback_connrej ( struct iucv_path * , u8 ipuser [ 16 ] ) ;
2009-04-22 03:26:21 +04:00
static void iucv_callback_shutdown ( struct iucv_path * , u8 ipuser [ 16 ] ) ;
2007-02-09 00:51:54 +03:00
static struct iucv_sock_list iucv_sk_list = {
2008-04-10 13:11:24 +04:00
. lock = __RW_LOCK_UNLOCKED ( iucv_sk_list . lock ) ,
2007-02-09 00:51:54 +03:00
. autobind_name = ATOMIC_INIT ( 0 )
} ;
static struct iucv_handler af_iucv_handler = {
. path_pending = iucv_callback_connreq ,
. path_complete = iucv_callback_connack ,
. path_severed = iucv_callback_connrej ,
. message_pending = iucv_callback_rx ,
2009-04-22 03:26:21 +04:00
. message_complete = iucv_callback_txdone ,
. path_quiesced = iucv_callback_shutdown ,
2007-02-09 00:51:54 +03:00
} ;
static inline void high_nmcpy ( unsigned char * dst , char * src )
{
memcpy ( dst , src , 8 ) ;
}
static inline void low_nmcpy ( unsigned char * dst , char * src )
{
memcpy ( & dst [ 8 ] , src , 8 ) ;
}
2011-12-20 02:56:29 +04:00
static void iucv_skb_queue_purge ( struct sk_buff_head * list )
{
struct sk_buff * skb ;
while ( ( skb = skb_dequeue ( list ) ) ! = NULL ) {
if ( skb - > dev )
dev_put ( skb - > dev ) ;
kfree_skb ( skb ) ;
}
}
2009-06-16 12:30:44 +04:00
static int afiucv_pm_prepare ( struct device * dev )
{
# ifdef CONFIG_PM_DEBUG
printk ( KERN_WARNING " afiucv_pm_prepare \n " ) ;
# endif
return 0 ;
}
static void afiucv_pm_complete ( struct device * dev )
{
# ifdef CONFIG_PM_DEBUG
printk ( KERN_WARNING " afiucv_pm_complete \n " ) ;
# endif
}
/**
* afiucv_pm_freeze ( ) - Freeze PM callback
* @ dev : AFIUCV dummy device
*
* Sever all established IUCV communication pathes
*/
static int afiucv_pm_freeze ( struct device * dev )
{
struct iucv_sock * iucv ;
struct sock * sk ;
struct hlist_node * node ;
int err = 0 ;
# ifdef CONFIG_PM_DEBUG
printk ( KERN_WARNING " afiucv_pm_freeze \n " ) ;
# endif
read_lock ( & iucv_sk_list . lock ) ;
sk_for_each ( sk , node , & iucv_sk_list . head ) {
iucv = iucv_sk ( sk ) ;
2011-12-20 02:56:29 +04:00
iucv_skb_queue_purge ( & iucv - > send_skb_q ) ;
2009-06-16 12:30:44 +04:00
skb_queue_purge ( & iucv - > backlog_skb_q ) ;
switch ( sk - > sk_state ) {
case IUCV_SEVERED :
case IUCV_DISCONN :
case IUCV_CLOSING :
case IUCV_CONNECTED :
if ( iucv - > path ) {
2011-08-08 05:33:51 +04:00
err = pr_iucv - > path_sever ( iucv - > path , NULL ) ;
2009-06-16 12:30:44 +04:00
iucv_path_free ( iucv - > path ) ;
iucv - > path = NULL ;
}
break ;
case IUCV_OPEN :
case IUCV_BOUND :
case IUCV_LISTEN :
case IUCV_CLOSED :
default :
break ;
}
}
read_unlock ( & iucv_sk_list . lock ) ;
return err ;
}
/**
* afiucv_pm_restore_thaw ( ) - Thaw and restore PM callback
* @ dev : AFIUCV dummy device
*
* socket clean up after freeze
*/
static int afiucv_pm_restore_thaw ( struct device * dev )
{
struct sock * sk ;
struct hlist_node * node ;
# ifdef CONFIG_PM_DEBUG
printk ( KERN_WARNING " afiucv_pm_restore_thaw \n " ) ;
# endif
read_lock ( & iucv_sk_list . lock ) ;
sk_for_each ( sk , node , & iucv_sk_list . head ) {
switch ( sk - > sk_state ) {
case IUCV_CONNECTED :
sk - > sk_err = EPIPE ;
sk - > sk_state = IUCV_DISCONN ;
sk - > sk_state_change ( sk ) ;
break ;
case IUCV_DISCONN :
case IUCV_SEVERED :
case IUCV_CLOSING :
case IUCV_LISTEN :
case IUCV_BOUND :
case IUCV_OPEN :
default :
break ;
}
}
read_unlock ( & iucv_sk_list . lock ) ;
return 0 ;
}
2009-12-15 05:00:08 +03:00
static const struct dev_pm_ops afiucv_pm_ops = {
2009-06-16 12:30:44 +04:00
. prepare = afiucv_pm_prepare ,
. complete = afiucv_pm_complete ,
. freeze = afiucv_pm_freeze ,
. thaw = afiucv_pm_restore_thaw ,
. restore = afiucv_pm_restore_thaw ,
} ;
static struct device_driver af_iucv_driver = {
. owner = THIS_MODULE ,
. name = " afiucv " ,
2011-08-08 05:33:51 +04:00
. bus = NULL ,
2009-06-16 12:30:44 +04:00
. pm = & afiucv_pm_ops ,
} ;
/* dummy device used as trigger for PM functions */
static struct device * af_iucv_dev ;
2009-04-22 03:26:23 +04:00
/**
* iucv_msg_length ( ) - Returns the length of an iucv message .
* @ msg : Pointer to struct iucv_message , MUST NOT be NULL
*
* The function returns the length of the specified iucv message @ msg of data
* stored in a buffer and of data stored in the parameter list ( PRMDATA ) .
*
* For IUCV_IPRMDATA , AF_IUCV uses the following convention to transport socket
* data :
* PRMDATA [ 0. .6 ] socket data ( max 7 bytes ) ;
* PRMDATA [ 7 ] socket data length value ( len is 0xff - PRMDATA [ 7 ] )
*
2011-03-31 05:57:33 +04:00
* The socket data length is computed by subtracting the socket data length
2009-04-22 03:26:23 +04:00
* value from 0xFF .
* If the socket data len is greater 7 , then PRMDATA can be used for special
* notifications ( see iucv_sock_shutdown ) ; and further ,
* if the socket data len is > 7 , the function returns 8.
*
* Use this function to allocate socket buffers to store iucv message data .
*/
static inline size_t iucv_msg_length ( struct iucv_message * msg )
{
size_t datalen ;
if ( msg - > flags & IUCV_IPRMDATA ) {
datalen = 0xff - msg - > rmmsg [ 7 ] ;
return ( datalen < 8 ) ? datalen : 8 ;
}
return msg - > length ;
}
2009-06-18 01:54:48 +04:00
/**
* iucv_sock_in_state ( ) - check for specific states
* @ sk : sock structure
* @ state : first iucv sk state
* @ state : second iucv sk state
*
* Returns true if the socket in either in the first or second state .
*/
static int iucv_sock_in_state ( struct sock * sk , int state , int state2 )
{
return ( sk - > sk_state = = state | | sk - > sk_state = = state2 ) ;
}
/**
* iucv_below_msglim ( ) - function to check if messages can be sent
* @ sk : sock structure
*
* Returns true if the send queue length is lower than the message limit .
* Always returns true if the socket is not connected ( no iucv path for
* checking the message limit ) .
*/
static inline int iucv_below_msglim ( struct sock * sk )
{
struct iucv_sock * iucv = iucv_sk ( sk ) ;
if ( sk - > sk_state ! = IUCV_CONNECTED )
return 1 ;
2011-08-08 05:33:54 +04:00
if ( iucv - > transport = = AF_IUCV_TRANS_IUCV )
return ( skb_queue_len ( & iucv - > send_skb_q ) < iucv - > path - > msglim ) ;
else
return ( ( atomic_read ( & iucv - > msg_sent ) < iucv - > msglimit_peer ) & &
( atomic_read ( & iucv - > pendings ) < = 0 ) ) ;
2009-06-18 01:54:48 +04:00
}
/**
* iucv_sock_wake_msglim ( ) - Wake up thread waiting on msg limit
*/
static void iucv_sock_wake_msglim ( struct sock * sk )
{
2010-04-29 15:01:49 +04:00
struct socket_wq * wq ;
rcu_read_lock ( ) ;
wq = rcu_dereference ( sk - > sk_wq ) ;
if ( wq_has_sleeper ( wq ) )
wake_up_interruptible_all ( & wq - > wait ) ;
2009-06-18 01:54:48 +04:00
sk_wake_async ( sk , SOCK_WAKE_SPACE , POLL_OUT ) ;
2010-04-29 15:01:49 +04:00
rcu_read_unlock ( ) ;
2009-06-18 01:54:48 +04:00
}
2011-08-08 05:33:54 +04:00
/**
* afiucv_hs_send ( ) - send a message through HiperSockets transport
*/
static int afiucv_hs_send ( struct iucv_message * imsg , struct sock * sock ,
struct sk_buff * skb , u8 flags )
{
struct net * net = sock_net ( sock ) ;
struct iucv_sock * iucv = iucv_sk ( sock ) ;
struct af_iucv_trans_hdr * phs_hdr ;
struct sk_buff * nskb ;
int err , confirm_recv = 0 ;
memset ( skb - > head , 0 , ETH_HLEN ) ;
phs_hdr = ( struct af_iucv_trans_hdr * ) skb_push ( skb ,
sizeof ( struct af_iucv_trans_hdr ) ) ;
skb_reset_mac_header ( skb ) ;
skb_reset_network_header ( skb ) ;
skb_push ( skb , ETH_HLEN ) ;
skb_reset_mac_header ( skb ) ;
memset ( phs_hdr , 0 , sizeof ( struct af_iucv_trans_hdr ) ) ;
phs_hdr - > magic = ETH_P_AF_IUCV ;
phs_hdr - > version = 1 ;
phs_hdr - > flags = flags ;
if ( flags = = AF_IUCV_FLAG_SYN )
phs_hdr - > window = iucv - > msglimit ;
else if ( ( flags = = AF_IUCV_FLAG_WIN ) | | ! flags ) {
confirm_recv = atomic_read ( & iucv - > msg_recv ) ;
phs_hdr - > window = confirm_recv ;
if ( confirm_recv )
phs_hdr - > flags = phs_hdr - > flags | AF_IUCV_FLAG_WIN ;
}
memcpy ( phs_hdr - > destUserID , iucv - > dst_user_id , 8 ) ;
memcpy ( phs_hdr - > destAppName , iucv - > dst_name , 8 ) ;
memcpy ( phs_hdr - > srcUserID , iucv - > src_user_id , 8 ) ;
memcpy ( phs_hdr - > srcAppName , iucv - > src_name , 8 ) ;
ASCEBC ( phs_hdr - > destUserID , sizeof ( phs_hdr - > destUserID ) ) ;
ASCEBC ( phs_hdr - > destAppName , sizeof ( phs_hdr - > destAppName ) ) ;
ASCEBC ( phs_hdr - > srcUserID , sizeof ( phs_hdr - > srcUserID ) ) ;
ASCEBC ( phs_hdr - > srcAppName , sizeof ( phs_hdr - > srcAppName ) ) ;
if ( imsg )
memcpy ( & phs_hdr - > iucv_hdr , imsg , sizeof ( struct iucv_message ) ) ;
2011-12-20 02:56:29 +04:00
skb - > dev = dev_get_by_index ( net , sock - > sk_bound_dev_if ) ;
2011-08-08 05:33:54 +04:00
if ( ! skb - > dev )
return - ENODEV ;
if ( ! ( skb - > dev - > flags & IFF_UP ) )
return - ENETDOWN ;
if ( skb - > len > skb - > dev - > mtu ) {
if ( sock - > sk_type = = SOCK_SEQPACKET )
return - EMSGSIZE ;
else
skb_trim ( skb , skb - > dev - > mtu ) ;
}
skb - > protocol = ETH_P_AF_IUCV ;
skb_shinfo ( skb ) - > tx_flags | = SKBTX_DRV_NEEDS_SK_REF ;
nskb = skb_clone ( skb , GFP_ATOMIC ) ;
if ( ! nskb )
return - ENOMEM ;
skb_queue_tail ( & iucv - > send_skb_q , nskb ) ;
err = dev_queue_xmit ( skb ) ;
if ( err ) {
skb_unlink ( nskb , & iucv - > send_skb_q ) ;
2011-12-20 02:56:29 +04:00
dev_put ( nskb - > dev ) ;
2011-08-08 05:33:54 +04:00
kfree_skb ( nskb ) ;
} else {
atomic_sub ( confirm_recv , & iucv - > msg_recv ) ;
WARN_ON ( atomic_read ( & iucv - > msg_recv ) < 0 ) ;
}
return err ;
}
2007-02-09 00:51:54 +03:00
static struct sock * __iucv_get_sock_by_name ( char * nm )
{
struct sock * sk ;
struct hlist_node * node ;
sk_for_each ( sk , node , & iucv_sk_list . head )
if ( ! memcmp ( & iucv_sk ( sk ) - > src_name , nm , 8 ) )
return sk ;
return NULL ;
}
static void iucv_sock_destruct ( struct sock * sk )
{
skb_queue_purge ( & sk - > sk_receive_queue ) ;
skb_queue_purge ( & sk - > sk_write_queue ) ;
}
/* Cleanup Listen */
static void iucv_sock_cleanup_listen ( struct sock * parent )
{
struct sock * sk ;
/* Close non-accepted connections */
while ( ( sk = iucv_accept_dequeue ( parent , NULL ) ) ) {
iucv_sock_close ( sk ) ;
iucv_sock_kill ( sk ) ;
}
parent - > sk_state = IUCV_CLOSED ;
}
af_iucv: do not call iucv_sock_kill() twice
For non-accepted sockets on the accept queue, iucv_sock_kill()
is called twice (in iucv_sock_close() and iucv_sock_cleanup_listen()).
This typically results in a kernel oops as shown below.
Remove the duplicate call to iucv_sock_kill() and set the SOCK_ZAPPED
flag in iucv_sock_close() only.
The iucv_sock_kill() function frees a socket only if the socket is zapped
and orphaned (sk->sk_socket == NULL):
- Non-accepted sockets are always orphaned and, thus, iucv_sock_kill()
frees the socket twice.
- For accepted sockets or sockets created with iucv_sock_create(),
sk->sk_socket is initialized. This caused the first call to
iucv_sock_kill() to return immediately. To free these sockets,
iucv_sock_release() uses sock_orphan() before calling iucv_sock_kill().
<1>Unable to handle kernel pointer dereference at virtual kernel address 000000003edd3000
<4>Oops: 0011 [#1] PREEMPT SMP DEBUG_PAGEALLOC
<4>Modules linked in: af_iucv sunrpc qeth_l3 dm_multipath dm_mod qeth vmur ccwgroup
<4>CPU: 0 Not tainted 2.6.30 #4
<4>Process iucv_sock_close (pid: 2486, task: 000000003aea4340, ksp: 000000003b75bc68)
<4>Krnl PSW : 0704200180000000 000003e00168e23a (iucv_sock_kill+0x2e/0xcc [af_iucv])
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:2 PM:0 EA:3
<4>Krnl GPRS: 0000000000000000 000000003b75c000 000000003edd37f0 0000000000000001
<4> 000003e00168ec62 000000003988d960 0000000000000000 000003e0016b0608
<4> 000000003fe81b20 000000003839bb58 00000000399977f0 000000003edd37f0
<4> 000003e00168b000 000003e00168f138 000000003b75bcd0 000000003b75bc98
<4>Krnl Code: 000003e00168e22a: c0c0ffffe6eb larl %r12,3e00168b000
<4> 000003e00168e230: b90400b2 lgr %r11,%r2
<4> 000003e00168e234: e3e0f0980024 stg %r14,152(%r15)
<4> >000003e00168e23a: e310225e0090 llgc %r1,606(%r2)
<4> 000003e00168e240: a7110001 tmll %r1,1
<4> 000003e00168e244: a7840007 brc 8,3e00168e252
<4> 000003e00168e248: d507d00023c8 clc 0(8,%r13),968(%r2)
<4> 000003e00168e24e: a7840009 brc 8,3e00168e260
<4>Call Trace:
<4>([<000003e0016b0608>] afiucv_dbf+0x0/0xfffffffffffdea20 [af_iucv])
<4> [<000003e00168ec6c>] iucv_sock_close+0x130/0x368 [af_iucv]
<4> [<000003e00168ef02>] iucv_sock_release+0x5e/0xe4 [af_iucv]
<4> [<0000000000438e6c>] sock_release+0x44/0x104
<4> [<0000000000438f5e>] sock_close+0x32/0x50
<4> [<0000000000207898>] __fput+0xf4/0x250
<4> [<00000000002038aa>] filp_close+0x7a/0xa8
<4> [<00000000002039ba>] SyS_close+0xe2/0x148
<4> [<0000000000117c8e>] sysc_noemu+0x10/0x16
<4> [<00000042ff8deeac>] 0x42ff8deeac
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-16 08:37:27 +04:00
/* Kill socket (only if zapped and orphaned) */
2007-02-09 00:51:54 +03:00
static void iucv_sock_kill ( struct sock * sk )
{
if ( ! sock_flag ( sk , SOCK_ZAPPED ) | | sk - > sk_socket )
return ;
iucv_sock_unlink ( & iucv_sk_list , sk ) ;
sock_set_flag ( sk , SOCK_DEAD ) ;
sock_put ( sk ) ;
}
/* Close an IUCV socket */
static void iucv_sock_close ( struct sock * sk )
{
unsigned char user_data [ 16 ] ;
struct iucv_sock * iucv = iucv_sk ( sk ) ;
2007-05-04 23:22:07 +04:00
unsigned long timeo ;
2011-08-08 05:33:54 +04:00
int err , blen ;
struct sk_buff * skb ;
2007-02-09 00:51:54 +03:00
lock_sock ( sk ) ;
2007-05-04 23:23:27 +04:00
switch ( sk - > sk_state ) {
2007-02-09 00:51:54 +03:00
case IUCV_LISTEN :
iucv_sock_cleanup_listen ( sk ) ;
break ;
case IUCV_CONNECTED :
2011-08-08 05:33:54 +04:00
if ( iucv - > transport = = AF_IUCV_TRANS_HIPER ) {
/* send fin */
blen = sizeof ( struct af_iucv_trans_hdr ) + ETH_HLEN ;
skb = sock_alloc_send_skb ( sk , blen , 1 , & err ) ;
if ( skb ) {
2011-12-20 02:56:29 +04:00
skb_reserve ( skb , blen ) ;
2011-08-08 05:33:54 +04:00
err = afiucv_hs_send ( NULL , sk , skb ,
AF_IUCV_FLAG_FIN ) ;
}
sk - > sk_state = IUCV_DISCONN ;
sk - > sk_state_change ( sk ) ;
}
2011-12-20 02:56:29 +04:00
case IUCV_DISCONN : /* fall through */
2007-05-04 23:22:07 +04:00
sk - > sk_state = IUCV_CLOSING ;
sk - > sk_state_change ( sk ) ;
2007-05-04 23:23:27 +04:00
if ( ! skb_queue_empty ( & iucv - > send_skb_q ) ) {
2007-05-04 23:22:07 +04:00
if ( sock_flag ( sk , SOCK_LINGER ) & & sk - > sk_lingertime )
timeo = sk - > sk_lingertime ;
else
timeo = IUCV_DISCONN_TIMEOUT ;
2011-05-12 22:45:08 +04:00
iucv_sock_wait ( sk ,
2009-06-18 01:54:48 +04:00
iucv_sock_in_state ( sk , IUCV_CLOSED , 0 ) ,
timeo ) ;
2007-05-04 23:22:07 +04:00
}
2009-04-21 10:04:20 +04:00
case IUCV_CLOSING : /* fall through */
2007-05-04 23:22:07 +04:00
sk - > sk_state = IUCV_CLOSED ;
sk - > sk_state_change ( sk ) ;
2007-02-09 00:51:54 +03:00
if ( iucv - > path ) {
low_nmcpy ( user_data , iucv - > src_name ) ;
high_nmcpy ( user_data , iucv - > dst_name ) ;
ASCEBC ( user_data , sizeof ( user_data ) ) ;
2011-08-08 05:33:51 +04:00
pr_iucv - > path_sever ( iucv - > path , user_data ) ;
2007-02-09 00:51:54 +03:00
iucv_path_free ( iucv - > path ) ;
iucv - > path = NULL ;
}
sk - > sk_err = ECONNRESET ;
sk - > sk_state_change ( sk ) ;
2011-12-20 02:56:29 +04:00
iucv_skb_queue_purge ( & iucv - > send_skb_q ) ;
2007-05-04 23:22:07 +04:00
skb_queue_purge ( & iucv - > backlog_skb_q ) ;
2007-02-09 00:51:54 +03:00
break ;
default :
af_iucv: do not call iucv_sock_kill() twice
For non-accepted sockets on the accept queue, iucv_sock_kill()
is called twice (in iucv_sock_close() and iucv_sock_cleanup_listen()).
This typically results in a kernel oops as shown below.
Remove the duplicate call to iucv_sock_kill() and set the SOCK_ZAPPED
flag in iucv_sock_close() only.
The iucv_sock_kill() function frees a socket only if the socket is zapped
and orphaned (sk->sk_socket == NULL):
- Non-accepted sockets are always orphaned and, thus, iucv_sock_kill()
frees the socket twice.
- For accepted sockets or sockets created with iucv_sock_create(),
sk->sk_socket is initialized. This caused the first call to
iucv_sock_kill() to return immediately. To free these sockets,
iucv_sock_release() uses sock_orphan() before calling iucv_sock_kill().
<1>Unable to handle kernel pointer dereference at virtual kernel address 000000003edd3000
<4>Oops: 0011 [#1] PREEMPT SMP DEBUG_PAGEALLOC
<4>Modules linked in: af_iucv sunrpc qeth_l3 dm_multipath dm_mod qeth vmur ccwgroup
<4>CPU: 0 Not tainted 2.6.30 #4
<4>Process iucv_sock_close (pid: 2486, task: 000000003aea4340, ksp: 000000003b75bc68)
<4>Krnl PSW : 0704200180000000 000003e00168e23a (iucv_sock_kill+0x2e/0xcc [af_iucv])
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:2 PM:0 EA:3
<4>Krnl GPRS: 0000000000000000 000000003b75c000 000000003edd37f0 0000000000000001
<4> 000003e00168ec62 000000003988d960 0000000000000000 000003e0016b0608
<4> 000000003fe81b20 000000003839bb58 00000000399977f0 000000003edd37f0
<4> 000003e00168b000 000003e00168f138 000000003b75bcd0 000000003b75bc98
<4>Krnl Code: 000003e00168e22a: c0c0ffffe6eb larl %r12,3e00168b000
<4> 000003e00168e230: b90400b2 lgr %r11,%r2
<4> 000003e00168e234: e3e0f0980024 stg %r14,152(%r15)
<4> >000003e00168e23a: e310225e0090 llgc %r1,606(%r2)
<4> 000003e00168e240: a7110001 tmll %r1,1
<4> 000003e00168e244: a7840007 brc 8,3e00168e252
<4> 000003e00168e248: d507d00023c8 clc 0(8,%r13),968(%r2)
<4> 000003e00168e24e: a7840009 brc 8,3e00168e260
<4>Call Trace:
<4>([<000003e0016b0608>] afiucv_dbf+0x0/0xfffffffffffdea20 [af_iucv])
<4> [<000003e00168ec6c>] iucv_sock_close+0x130/0x368 [af_iucv]
<4> [<000003e00168ef02>] iucv_sock_release+0x5e/0xe4 [af_iucv]
<4> [<0000000000438e6c>] sock_release+0x44/0x104
<4> [<0000000000438f5e>] sock_close+0x32/0x50
<4> [<0000000000207898>] __fput+0xf4/0x250
<4> [<00000000002038aa>] filp_close+0x7a/0xa8
<4> [<00000000002039ba>] SyS_close+0xe2/0x148
<4> [<0000000000117c8e>] sysc_noemu+0x10/0x16
<4> [<00000042ff8deeac>] 0x42ff8deeac
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-16 08:37:27 +04:00
/* nothing to do here */
2007-02-09 00:51:54 +03:00
break ;
2007-04-21 04:09:22 +04:00
}
2007-02-09 00:51:54 +03:00
af_iucv: do not call iucv_sock_kill() twice
For non-accepted sockets on the accept queue, iucv_sock_kill()
is called twice (in iucv_sock_close() and iucv_sock_cleanup_listen()).
This typically results in a kernel oops as shown below.
Remove the duplicate call to iucv_sock_kill() and set the SOCK_ZAPPED
flag in iucv_sock_close() only.
The iucv_sock_kill() function frees a socket only if the socket is zapped
and orphaned (sk->sk_socket == NULL):
- Non-accepted sockets are always orphaned and, thus, iucv_sock_kill()
frees the socket twice.
- For accepted sockets or sockets created with iucv_sock_create(),
sk->sk_socket is initialized. This caused the first call to
iucv_sock_kill() to return immediately. To free these sockets,
iucv_sock_release() uses sock_orphan() before calling iucv_sock_kill().
<1>Unable to handle kernel pointer dereference at virtual kernel address 000000003edd3000
<4>Oops: 0011 [#1] PREEMPT SMP DEBUG_PAGEALLOC
<4>Modules linked in: af_iucv sunrpc qeth_l3 dm_multipath dm_mod qeth vmur ccwgroup
<4>CPU: 0 Not tainted 2.6.30 #4
<4>Process iucv_sock_close (pid: 2486, task: 000000003aea4340, ksp: 000000003b75bc68)
<4>Krnl PSW : 0704200180000000 000003e00168e23a (iucv_sock_kill+0x2e/0xcc [af_iucv])
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:2 PM:0 EA:3
<4>Krnl GPRS: 0000000000000000 000000003b75c000 000000003edd37f0 0000000000000001
<4> 000003e00168ec62 000000003988d960 0000000000000000 000003e0016b0608
<4> 000000003fe81b20 000000003839bb58 00000000399977f0 000000003edd37f0
<4> 000003e00168b000 000003e00168f138 000000003b75bcd0 000000003b75bc98
<4>Krnl Code: 000003e00168e22a: c0c0ffffe6eb larl %r12,3e00168b000
<4> 000003e00168e230: b90400b2 lgr %r11,%r2
<4> 000003e00168e234: e3e0f0980024 stg %r14,152(%r15)
<4> >000003e00168e23a: e310225e0090 llgc %r1,606(%r2)
<4> 000003e00168e240: a7110001 tmll %r1,1
<4> 000003e00168e244: a7840007 brc 8,3e00168e252
<4> 000003e00168e248: d507d00023c8 clc 0(8,%r13),968(%r2)
<4> 000003e00168e24e: a7840009 brc 8,3e00168e260
<4>Call Trace:
<4>([<000003e0016b0608>] afiucv_dbf+0x0/0xfffffffffffdea20 [af_iucv])
<4> [<000003e00168ec6c>] iucv_sock_close+0x130/0x368 [af_iucv]
<4> [<000003e00168ef02>] iucv_sock_release+0x5e/0xe4 [af_iucv]
<4> [<0000000000438e6c>] sock_release+0x44/0x104
<4> [<0000000000438f5e>] sock_close+0x32/0x50
<4> [<0000000000207898>] __fput+0xf4/0x250
<4> [<00000000002038aa>] filp_close+0x7a/0xa8
<4> [<00000000002039ba>] SyS_close+0xe2/0x148
<4> [<0000000000117c8e>] sysc_noemu+0x10/0x16
<4> [<00000042ff8deeac>] 0x42ff8deeac
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-16 08:37:27 +04:00
/* mark socket for deletion by iucv_sock_kill() */
sock_set_flag ( sk , SOCK_ZAPPED ) ;
2007-02-09 00:51:54 +03:00
release_sock ( sk ) ;
}
static void iucv_sock_init ( struct sock * sk , struct sock * parent )
{
if ( parent )
sk - > sk_type = parent - > sk_type ;
}
static struct sock * iucv_sock_alloc ( struct socket * sock , int proto , gfp_t prio )
{
struct sock * sk ;
2011-08-08 05:33:52 +04:00
struct iucv_sock * iucv ;
2007-02-09 00:51:54 +03:00
2007-11-01 10:39:31 +03:00
sk = sk_alloc ( & init_net , PF_IUCV , prio , & iucv_proto ) ;
2007-02-09 00:51:54 +03:00
if ( ! sk )
return NULL ;
2011-08-08 05:33:52 +04:00
iucv = iucv_sk ( sk ) ;
2007-02-09 00:51:54 +03:00
sock_init_data ( sock , sk ) ;
2011-08-08 05:33:52 +04:00
INIT_LIST_HEAD ( & iucv - > accept_q ) ;
spin_lock_init ( & iucv - > accept_q_lock ) ;
skb_queue_head_init ( & iucv - > send_skb_q ) ;
INIT_LIST_HEAD ( & iucv - > message_q . list ) ;
spin_lock_init ( & iucv - > message_q . lock ) ;
skb_queue_head_init ( & iucv - > backlog_skb_q ) ;
iucv - > send_tag = 0 ;
2011-08-08 05:33:54 +04:00
atomic_set ( & iucv - > pendings , 0 ) ;
2011-08-08 05:33:52 +04:00
iucv - > flags = 0 ;
2011-08-08 05:33:54 +04:00
iucv - > msglimit = 0 ;
atomic_set ( & iucv - > msg_sent , 0 ) ;
atomic_set ( & iucv - > msg_recv , 0 ) ;
2011-08-08 05:33:52 +04:00
iucv - > path = NULL ;
2011-08-08 05:33:54 +04:00
iucv - > sk_txnotify = afiucv_hs_callback_txnotify ;
2011-08-08 05:33:52 +04:00
memset ( & iucv - > src_user_id , 0 , 32 ) ;
2011-08-08 05:33:54 +04:00
if ( pr_iucv )
iucv - > transport = AF_IUCV_TRANS_IUCV ;
else
iucv - > transport = AF_IUCV_TRANS_HIPER ;
2007-02-09 00:51:54 +03:00
sk - > sk_destruct = iucv_sock_destruct ;
sk - > sk_sndtimeo = IUCV_CONN_TIMEOUT ;
sk - > sk_allocation = GFP_DMA ;
sock_reset_flag ( sk , SOCK_ZAPPED ) ;
sk - > sk_protocol = proto ;
sk - > sk_state = IUCV_OPEN ;
iucv_sock_link ( & iucv_sk_list , sk ) ;
return sk ;
}
/* Create an IUCV socket */
2009-11-06 09:18:14 +03:00
static int iucv_sock_create ( struct net * net , struct socket * sock , int protocol ,
int kern )
2007-02-09 00:51:54 +03:00
{
struct sock * sk ;
2009-04-22 03:26:25 +04:00
if ( protocol & & protocol ! = PF_IUCV )
return - EPROTONOSUPPORT ;
2007-02-09 00:51:54 +03:00
sock - > state = SS_UNCONNECTED ;
2009-04-22 03:26:25 +04:00
switch ( sock - > type ) {
case SOCK_STREAM :
sock - > ops = & iucv_sock_ops ;
break ;
case SOCK_SEQPACKET :
/* currently, proto ops can handle both sk types */
sock - > ops = & iucv_sock_ops ;
break ;
default :
return - ESOCKTNOSUPPORT ;
}
2007-02-09 00:51:54 +03:00
sk = iucv_sock_alloc ( sock , protocol , GFP_KERNEL ) ;
if ( ! sk )
return - ENOMEM ;
iucv_sock_init ( sk , NULL ) ;
return 0 ;
}
void iucv_sock_link ( struct iucv_sock_list * l , struct sock * sk )
{
write_lock_bh ( & l - > lock ) ;
sk_add_node ( sk , & l - > head ) ;
write_unlock_bh ( & l - > lock ) ;
}
void iucv_sock_unlink ( struct iucv_sock_list * l , struct sock * sk )
{
write_lock_bh ( & l - > lock ) ;
sk_del_node_init ( sk ) ;
write_unlock_bh ( & l - > lock ) ;
}
void iucv_accept_enqueue ( struct sock * parent , struct sock * sk )
{
2007-07-15 06:04:25 +04:00
unsigned long flags ;
struct iucv_sock * par = iucv_sk ( parent ) ;
2007-02-09 00:51:54 +03:00
sock_hold ( sk ) ;
2007-07-15 06:04:25 +04:00
spin_lock_irqsave ( & par - > accept_q_lock , flags ) ;
list_add_tail ( & iucv_sk ( sk ) - > accept_q , & par - > accept_q ) ;
spin_unlock_irqrestore ( & par - > accept_q_lock , flags ) ;
2007-02-09 00:51:54 +03:00
iucv_sk ( sk ) - > parent = parent ;
2009-10-15 02:54:55 +04:00
sk_acceptq_added ( parent ) ;
2007-02-09 00:51:54 +03:00
}
void iucv_accept_unlink ( struct sock * sk )
{
2007-07-15 06:04:25 +04:00
unsigned long flags ;
struct iucv_sock * par = iucv_sk ( iucv_sk ( sk ) - > parent ) ;
spin_lock_irqsave ( & par - > accept_q_lock , flags ) ;
2007-02-09 00:51:54 +03:00
list_del_init ( & iucv_sk ( sk ) - > accept_q ) ;
2007-07-15 06:04:25 +04:00
spin_unlock_irqrestore ( & par - > accept_q_lock , flags ) ;
2009-10-15 02:54:55 +04:00
sk_acceptq_removed ( iucv_sk ( sk ) - > parent ) ;
2007-02-09 00:51:54 +03:00
iucv_sk ( sk ) - > parent = NULL ;
sock_put ( sk ) ;
}
struct sock * iucv_accept_dequeue ( struct sock * parent , struct socket * newsock )
{
struct iucv_sock * isk , * n ;
struct sock * sk ;
2007-05-04 23:23:27 +04:00
list_for_each_entry_safe ( isk , n , & iucv_sk ( parent ) - > accept_q , accept_q ) {
2007-02-09 00:51:54 +03:00
sk = ( struct sock * ) isk ;
lock_sock ( sk ) ;
if ( sk - > sk_state = = IUCV_CLOSED ) {
iucv_accept_unlink ( sk ) ;
2007-07-15 06:04:25 +04:00
release_sock ( sk ) ;
2007-02-09 00:51:54 +03:00
continue ;
}
if ( sk - > sk_state = = IUCV_CONNECTED | |
sk - > sk_state = = IUCV_SEVERED | |
2009-09-16 08:37:26 +04:00
sk - > sk_state = = IUCV_DISCONN | | /* due to PM restore */
2007-02-09 00:51:54 +03:00
! newsock ) {
iucv_accept_unlink ( sk ) ;
if ( newsock )
sock_graft ( sk , newsock ) ;
if ( sk - > sk_state = = IUCV_SEVERED )
sk - > sk_state = IUCV_DISCONN ;
release_sock ( sk ) ;
return sk ;
}
release_sock ( sk ) ;
}
return NULL ;
}
/* Bind an unbound socket */
static int iucv_sock_bind ( struct socket * sock , struct sockaddr * addr ,
int addr_len )
{
struct sockaddr_iucv * sa = ( struct sockaddr_iucv * ) addr ;
struct sock * sk = sock - > sk ;
struct iucv_sock * iucv ;
2011-08-08 05:33:54 +04:00
int err = 0 ;
struct net_device * dev ;
char uid [ 9 ] ;
2007-02-09 00:51:54 +03:00
/* Verify the input sockaddr */
if ( ! addr | | addr - > sa_family ! = AF_IUCV )
return - EINVAL ;
lock_sock ( sk ) ;
if ( sk - > sk_state ! = IUCV_OPEN ) {
err = - EBADFD ;
goto done ;
}
write_lock_bh ( & iucv_sk_list . lock ) ;
iucv = iucv_sk ( sk ) ;
if ( __iucv_get_sock_by_name ( sa - > siucv_name ) ) {
err = - EADDRINUSE ;
goto done_unlock ;
}
2011-08-08 05:33:54 +04:00
if ( iucv - > path )
2007-02-09 00:51:54 +03:00
goto done_unlock ;
/* Bind the socket */
2011-08-08 05:33:54 +04:00
if ( pr_iucv )
if ( ! memcmp ( sa - > siucv_user_id , iucv_userid , 8 ) )
goto vm_bind ; /* VM IUCV transport */
2007-02-09 00:51:54 +03:00
2011-08-08 05:33:54 +04:00
/* try hiper transport */
memcpy ( uid , sa - > siucv_user_id , sizeof ( uid ) ) ;
ASCEBC ( uid , 8 ) ;
rcu_read_lock ( ) ;
for_each_netdev_rcu ( & init_net , dev ) {
if ( ! memcmp ( dev - > perm_addr , uid , 8 ) ) {
memcpy ( iucv - > src_name , sa - > siucv_name , 8 ) ;
memcpy ( iucv - > src_user_id , sa - > siucv_user_id , 8 ) ;
2011-12-20 02:56:29 +04:00
sk - > sk_bound_dev_if = dev - > ifindex ;
2011-08-08 05:33:54 +04:00
sk - > sk_state = IUCV_BOUND ;
iucv - > transport = AF_IUCV_TRANS_HIPER ;
if ( ! iucv - > msglimit )
iucv - > msglimit = IUCV_HIPER_MSGLIM_DEFAULT ;
rcu_read_unlock ( ) ;
goto done_unlock ;
}
}
rcu_read_unlock ( ) ;
vm_bind :
if ( pr_iucv ) {
/* use local userid for backward compat */
memcpy ( iucv - > src_name , sa - > siucv_name , 8 ) ;
memcpy ( iucv - > src_user_id , iucv_userid , 8 ) ;
sk - > sk_state = IUCV_BOUND ;
iucv - > transport = AF_IUCV_TRANS_IUCV ;
if ( ! iucv - > msglimit )
iucv - > msglimit = IUCV_QUEUELEN_DEFAULT ;
goto done_unlock ;
}
/* found no dev to bind */
err = - ENODEV ;
2007-02-09 00:51:54 +03:00
done_unlock :
/* Release the socket list lock */
write_unlock_bh ( & iucv_sk_list . lock ) ;
done :
release_sock ( sk ) ;
return err ;
}
/* Automatically bind an unbound socket */
static int iucv_sock_autobind ( struct sock * sk )
{
struct iucv_sock * iucv = iucv_sk ( sk ) ;
char query_buffer [ 80 ] ;
char name [ 12 ] ;
int err = 0 ;
/* Set the userid and name */
cpcmd ( " QUERY USERID " , query_buffer , sizeof ( query_buffer ) , & err ) ;
if ( unlikely ( err ) )
return - EPROTO ;
memcpy ( iucv - > src_user_id , query_buffer , 8 ) ;
write_lock_bh ( & iucv_sk_list . lock ) ;
sprintf ( name , " %08x " , atomic_inc_return ( & iucv_sk_list . autobind_name ) ) ;
while ( __iucv_get_sock_by_name ( name ) ) {
sprintf ( name , " %08x " ,
atomic_inc_return ( & iucv_sk_list . autobind_name ) ) ;
}
write_unlock_bh ( & iucv_sk_list . lock ) ;
memcpy ( & iucv - > src_name , name , 8 ) ;
2011-08-08 05:33:54 +04:00
if ( ! iucv - > msglimit )
iucv - > msglimit = IUCV_QUEUELEN_DEFAULT ;
2007-02-09 00:51:54 +03:00
return err ;
}
2011-08-08 05:33:54 +04:00
static int afiucv_hs_connect ( struct socket * sock )
{
struct sock * sk = sock - > sk ;
struct sk_buff * skb ;
int blen = sizeof ( struct af_iucv_trans_hdr ) + ETH_HLEN ;
int err = 0 ;
/* send syn */
skb = sock_alloc_send_skb ( sk , blen , 1 , & err ) ;
if ( ! skb ) {
err = - ENOMEM ;
goto done ;
}
skb - > dev = NULL ;
skb_reserve ( skb , blen ) ;
err = afiucv_hs_send ( NULL , sk , skb , AF_IUCV_FLAG_SYN ) ;
done :
return err ;
}
static int afiucv_path_connect ( struct socket * sock , struct sockaddr * addr )
2007-02-09 00:51:54 +03:00
{
struct sockaddr_iucv * sa = ( struct sockaddr_iucv * ) addr ;
struct sock * sk = sock - > sk ;
2011-08-08 05:33:52 +04:00
struct iucv_sock * iucv = iucv_sk ( sk ) ;
2007-02-09 00:51:54 +03:00
unsigned char user_data [ 16 ] ;
int err ;
high_nmcpy ( user_data , sa - > siucv_name ) ;
2011-08-08 05:33:52 +04:00
low_nmcpy ( user_data , iucv - > src_name ) ;
2007-02-09 00:51:54 +03:00
ASCEBC ( user_data , sizeof ( user_data ) ) ;
/* Create path. */
2009-04-22 03:26:27 +04:00
iucv - > path = iucv_path_alloc ( iucv - > msglimit ,
2009-04-22 03:26:23 +04:00
IUCV_IPRMDATA , GFP_KERNEL ) ;
2008-02-08 05:07:19 +03:00
if ( ! iucv - > path ) {
err = - ENOMEM ;
goto done ;
}
2011-08-08 05:33:51 +04:00
err = pr_iucv - > path_connect ( iucv - > path , & af_iucv_handler ,
sa - > siucv_user_id , NULL , user_data ,
sk ) ;
2007-02-09 00:51:54 +03:00
if ( err ) {
iucv_path_free ( iucv - > path ) ;
iucv - > path = NULL ;
2009-01-06 05:07:07 +03:00
switch ( err ) {
case 0x0b : /* Target communicator is not logged on */
err = - ENETUNREACH ;
break ;
case 0x0d : /* Max connections for this guest exceeded */
case 0x0e : /* Max connections for target guest exceeded */
err = - EAGAIN ;
break ;
case 0x0f : /* Missing IUCV authorization */
err = - EACCES ;
break ;
default :
err = - ECONNREFUSED ;
break ;
}
2007-02-09 00:51:54 +03:00
}
2011-08-08 05:33:54 +04:00
done :
return err ;
}
2007-02-09 00:51:54 +03:00
2011-08-08 05:33:54 +04:00
/* Connect an unconnected socket */
static int iucv_sock_connect ( struct socket * sock , struct sockaddr * addr ,
int alen , int flags )
{
struct sockaddr_iucv * sa = ( struct sockaddr_iucv * ) addr ;
struct sock * sk = sock - > sk ;
struct iucv_sock * iucv = iucv_sk ( sk ) ;
int err ;
if ( addr - > sa_family ! = AF_IUCV | | alen < sizeof ( struct sockaddr_iucv ) )
return - EINVAL ;
if ( sk - > sk_state ! = IUCV_OPEN & & sk - > sk_state ! = IUCV_BOUND )
return - EBADFD ;
if ( sk - > sk_state = = IUCV_OPEN & &
iucv - > transport = = AF_IUCV_TRANS_HIPER )
return - EBADFD ; /* explicit bind required */
if ( sk - > sk_type ! = SOCK_STREAM & & sk - > sk_type ! = SOCK_SEQPACKET )
return - EINVAL ;
if ( sk - > sk_state = = IUCV_OPEN ) {
err = iucv_sock_autobind ( sk ) ;
if ( unlikely ( err ) )
return err ;
}
lock_sock ( sk ) ;
/* Set the destination information */
memcpy ( iucv - > dst_user_id , sa - > siucv_user_id , 8 ) ;
memcpy ( iucv - > dst_name , sa - > siucv_name , 8 ) ;
if ( iucv - > transport = = AF_IUCV_TRANS_HIPER )
err = afiucv_hs_connect ( sock ) ;
else
err = afiucv_path_connect ( sock , addr ) ;
if ( err )
goto done ;
if ( sk - > sk_state ! = IUCV_CONNECTED )
2009-06-18 01:54:48 +04:00
err = iucv_sock_wait ( sk , iucv_sock_in_state ( sk , IUCV_CONNECTED ,
IUCV_DISCONN ) ,
sock_sndtimeo ( sk , flags & O_NONBLOCK ) ) ;
2007-02-09 00:51:54 +03:00
2011-08-08 05:33:54 +04:00
if ( sk - > sk_state = = IUCV_DISCONN | | sk - > sk_state = = IUCV_CLOSED )
2009-04-22 03:26:23 +04:00
err = - ECONNREFUSED ;
2009-01-06 05:07:46 +03:00
2011-08-08 05:33:54 +04:00
if ( err & & iucv - > transport = = AF_IUCV_TRANS_IUCV ) {
2011-08-08 05:33:51 +04:00
pr_iucv - > path_sever ( iucv - > path , NULL ) ;
2009-01-06 05:07:46 +03:00
iucv_path_free ( iucv - > path ) ;
iucv - > path = NULL ;
}
2007-02-09 00:51:54 +03:00
done :
release_sock ( sk ) ;
return err ;
}
/* Move a socket into listening state. */
static int iucv_sock_listen ( struct socket * sock , int backlog )
{
struct sock * sk = sock - > sk ;
int err ;
lock_sock ( sk ) ;
err = - EINVAL ;
2009-04-22 03:26:25 +04:00
if ( sk - > sk_state ! = IUCV_BOUND )
goto done ;
if ( sock - > type ! = SOCK_STREAM & & sock - > type ! = SOCK_SEQPACKET )
2007-02-09 00:51:54 +03:00
goto done ;
sk - > sk_max_ack_backlog = backlog ;
sk - > sk_ack_backlog = 0 ;
sk - > sk_state = IUCV_LISTEN ;
err = 0 ;
done :
release_sock ( sk ) ;
return err ;
}
/* Accept a pending connection */
static int iucv_sock_accept ( struct socket * sock , struct socket * newsock ,
int flags )
{
DECLARE_WAITQUEUE ( wait , current ) ;
struct sock * sk = sock - > sk , * nsk ;
long timeo ;
int err = 0 ;
2007-05-04 23:22:07 +04:00
lock_sock_nested ( sk , SINGLE_DEPTH_NESTING ) ;
2007-02-09 00:51:54 +03:00
if ( sk - > sk_state ! = IUCV_LISTEN ) {
err = - EBADFD ;
goto done ;
}
timeo = sock_rcvtimeo ( sk , flags & O_NONBLOCK ) ;
/* Wait for an incoming connection */
2010-04-20 17:03:51 +04:00
add_wait_queue_exclusive ( sk_sleep ( sk ) , & wait ) ;
2007-05-04 23:23:27 +04:00
while ( ! ( nsk = iucv_accept_dequeue ( sk , newsock ) ) ) {
2007-02-09 00:51:54 +03:00
set_current_state ( TASK_INTERRUPTIBLE ) ;
if ( ! timeo ) {
err = - EAGAIN ;
break ;
}
release_sock ( sk ) ;
timeo = schedule_timeout ( timeo ) ;
2007-05-04 23:22:07 +04:00
lock_sock_nested ( sk , SINGLE_DEPTH_NESTING ) ;
2007-02-09 00:51:54 +03:00
if ( sk - > sk_state ! = IUCV_LISTEN ) {
err = - EBADFD ;
break ;
}
if ( signal_pending ( current ) ) {
err = sock_intr_errno ( timeo ) ;
break ;
}
}
set_current_state ( TASK_RUNNING ) ;
2010-04-20 17:03:51 +04:00
remove_wait_queue ( sk_sleep ( sk ) , & wait ) ;
2007-02-09 00:51:54 +03:00
if ( err )
goto done ;
newsock - > state = SS_CONNECTED ;
done :
release_sock ( sk ) ;
return err ;
}
static int iucv_sock_getname ( struct socket * sock , struct sockaddr * addr ,
int * len , int peer )
{
struct sockaddr_iucv * siucv = ( struct sockaddr_iucv * ) addr ;
struct sock * sk = sock - > sk ;
2011-08-08 05:33:52 +04:00
struct iucv_sock * iucv = iucv_sk ( sk ) ;
2007-02-09 00:51:54 +03:00
addr - > sa_family = AF_IUCV ;
* len = sizeof ( struct sockaddr_iucv ) ;
if ( peer ) {
2011-08-08 05:33:52 +04:00
memcpy ( siucv - > siucv_user_id , iucv - > dst_user_id , 8 ) ;
memcpy ( siucv - > siucv_name , iucv - > dst_name , 8 ) ;
2007-02-09 00:51:54 +03:00
} else {
2011-08-08 05:33:52 +04:00
memcpy ( siucv - > siucv_user_id , iucv - > src_user_id , 8 ) ;
memcpy ( siucv - > siucv_name , iucv - > src_name , 8 ) ;
2007-02-09 00:51:54 +03:00
}
memset ( & siucv - > siucv_port , 0 , sizeof ( siucv - > siucv_port ) ) ;
memset ( & siucv - > siucv_addr , 0 , sizeof ( siucv - > siucv_addr ) ) ;
2011-08-08 05:33:52 +04:00
memset ( & siucv - > siucv_nodeid , 0 , sizeof ( siucv - > siucv_nodeid ) ) ;
2007-02-09 00:51:54 +03:00
return 0 ;
}
2009-04-22 03:26:23 +04:00
/**
* iucv_send_iprm ( ) - Send socket data in parameter list of an iucv message .
* @ path : IUCV path
* @ msg : Pointer to a struct iucv_message
* @ skb : The socket data to send , skb - > len MUST BE < = 7
*
* Send the socket data in the parameter list in the iucv message
* ( IUCV_IPRMDATA ) . The socket data is stored at index 0 to 6 in the parameter
* list and the socket data len at index 7 ( last byte ) .
* See also iucv_msg_length ( ) .
*
* Returns the error code from the iucv_message_send ( ) call .
*/
static int iucv_send_iprm ( struct iucv_path * path , struct iucv_message * msg ,
struct sk_buff * skb )
{
u8 prmdata [ 8 ] ;
memcpy ( prmdata , ( void * ) skb - > data , skb - > len ) ;
prmdata [ 7 ] = 0xff - ( u8 ) skb - > len ;
2011-08-08 05:33:51 +04:00
return pr_iucv - > message_send ( path , msg , IUCV_IPRMDATA , 0 ,
2009-04-22 03:26:23 +04:00
( void * ) prmdata , 8 ) ;
}
2007-02-09 00:51:54 +03:00
static int iucv_sock_sendmsg ( struct kiocb * iocb , struct socket * sock ,
struct msghdr * msg , size_t len )
{
struct sock * sk = sock - > sk ;
struct iucv_sock * iucv = iucv_sk ( sk ) ;
struct sk_buff * skb ;
struct iucv_message txmsg ;
2009-04-22 03:26:24 +04:00
struct cmsghdr * cmsg ;
int cmsg_done ;
2009-06-18 01:54:48 +04:00
long timeo ;
2008-12-25 15:39:47 +03:00
char user_id [ 9 ] ;
char appl_id [ 9 ] ;
2007-02-09 00:51:54 +03:00
int err ;
2009-06-18 01:54:48 +04:00
int noblock = msg - > msg_flags & MSG_DONTWAIT ;
2007-02-09 00:51:54 +03:00
err = sock_error ( sk ) ;
if ( err )
return err ;
if ( msg - > msg_flags & MSG_OOB )
return - EOPNOTSUPP ;
2009-04-22 03:26:25 +04:00
/* SOCK_SEQPACKET: we do not support segmented records */
if ( sk - > sk_type = = SOCK_SEQPACKET & & ! ( msg - > msg_flags & MSG_EOR ) )
return - EOPNOTSUPP ;
2007-02-09 00:51:54 +03:00
lock_sock ( sk ) ;
if ( sk - > sk_shutdown & SEND_SHUTDOWN ) {
err = - EPIPE ;
goto out ;
}
2009-06-18 01:54:47 +04:00
/* Return if the socket is not in connected state */
if ( sk - > sk_state ! = IUCV_CONNECTED ) {
err = - ENOTCONN ;
goto out ;
}
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
/* initialize defaults */
cmsg_done = 0 ; /* check for duplicate headers */
txmsg . class = 0 ;
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
/* iterate over control messages */
for ( cmsg = CMSG_FIRSTHDR ( msg ) ; cmsg ;
cmsg = CMSG_NXTHDR ( msg , cmsg ) ) {
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
if ( ! CMSG_OK ( msg , cmsg ) ) {
err = - EINVAL ;
goto out ;
}
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
if ( cmsg - > cmsg_level ! = SOL_IUCV )
continue ;
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
if ( cmsg - > cmsg_type & cmsg_done ) {
err = - EINVAL ;
goto out ;
}
cmsg_done | = cmsg - > cmsg_type ;
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
switch ( cmsg - > cmsg_type ) {
case SCM_IUCV_TRGCLS :
if ( cmsg - > cmsg_len ! = CMSG_LEN ( TRGCLS_SIZE ) ) {
2009-04-22 03:26:24 +04:00
err = - EINVAL ;
goto out ;
}
2009-06-18 01:54:47 +04:00
/* set iucv message target class */
memcpy ( & txmsg . class ,
( void * ) CMSG_DATA ( cmsg ) , TRGCLS_SIZE ) ;
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
break ;
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
default :
err = - EINVAL ;
goto out ;
break ;
2009-04-22 03:26:24 +04:00
}
2009-06-18 01:54:47 +04:00
}
2009-04-22 03:26:24 +04:00
2009-06-18 01:54:47 +04:00
/* allocate one skb for each iucv message:
* this is fine for SOCK_SEQPACKET ( unless we want to support
* segmented records using the MSG_EOR flag ) , but
* for SOCK_STREAM we might want to improve it in future */
2011-08-08 05:33:54 +04:00
if ( iucv - > transport = = AF_IUCV_TRANS_HIPER )
skb = sock_alloc_send_skb ( sk ,
len + sizeof ( struct af_iucv_trans_hdr ) + ETH_HLEN ,
noblock , & err ) ;
else
skb = sock_alloc_send_skb ( sk , len , noblock , & err ) ;
2009-06-18 01:54:47 +04:00
if ( ! skb )
goto out ;
2011-08-08 05:33:54 +04:00
if ( iucv - > transport = = AF_IUCV_TRANS_HIPER )
skb_reserve ( skb , sizeof ( struct af_iucv_trans_hdr ) + ETH_HLEN ) ;
2009-06-18 01:54:47 +04:00
if ( memcpy_fromiovec ( skb_put ( skb , len ) , msg - > msg_iov , len ) ) {
err = - EFAULT ;
goto fail ;
}
2007-02-09 00:51:54 +03:00
2009-06-18 01:54:48 +04:00
/* wait if outstanding messages for iucv path has reached */
timeo = sock_sndtimeo ( sk , noblock ) ;
err = iucv_sock_wait ( sk , iucv_below_msglim ( sk ) , timeo ) ;
if ( err )
goto fail ;
2007-02-09 00:51:54 +03:00
2009-06-18 01:54:48 +04:00
/* return -ECONNRESET if the socket is no longer connected */
if ( sk - > sk_state ! = IUCV_CONNECTED ) {
err = - ECONNRESET ;
goto fail ;
}
2009-04-22 03:26:23 +04:00
2009-06-18 01:54:47 +04:00
/* increment and save iucv message tag for msg_completion cbk */
txmsg . tag = iucv - > send_tag + + ;
memcpy ( CB_TAG ( skb ) , & txmsg . tag , CB_TAG_LEN ) ;
2011-08-08 05:33:54 +04:00
if ( iucv - > transport = = AF_IUCV_TRANS_HIPER ) {
atomic_inc ( & iucv - > msg_sent ) ;
err = afiucv_hs_send ( & txmsg , sk , skb , 0 ) ;
if ( err ) {
atomic_dec ( & iucv - > msg_sent ) ;
goto fail ;
}
goto release ;
}
2009-06-18 01:54:47 +04:00
skb_queue_tail ( & iucv - > send_skb_q , skb ) ;
2009-04-22 03:26:23 +04:00
2009-06-18 01:54:47 +04:00
if ( ( ( iucv - > path - > flags & IUCV_IPRMDATA ) & iucv - > flags )
& & skb - > len < = 7 ) {
err = iucv_send_iprm ( iucv - > path , & txmsg , skb ) ;
2009-04-22 03:26:23 +04:00
2009-06-18 01:54:47 +04:00
/* on success: there is no message_complete callback
* for an IPRMDATA msg ; remove skb from send queue */
if ( err = = 0 ) {
skb_unlink ( skb , & iucv - > send_skb_q ) ;
kfree_skb ( skb ) ;
}
/* this error should never happen since the
* IUCV_IPRMDATA path flag is set . . . sever path */
if ( err = = 0x15 ) {
2011-08-08 05:33:51 +04:00
pr_iucv - > path_sever ( iucv - > path , NULL ) ;
2007-02-09 00:51:54 +03:00
skb_unlink ( skb , & iucv - > send_skb_q ) ;
err = - EPIPE ;
goto fail ;
}
2009-06-18 01:54:47 +04:00
} else
2011-08-08 05:33:51 +04:00
err = pr_iucv - > message_send ( iucv - > path , & txmsg , 0 , 0 ,
2009-06-18 01:54:47 +04:00
( void * ) skb - > data , skb - > len ) ;
if ( err ) {
if ( err = = 3 ) {
user_id [ 8 ] = 0 ;
memcpy ( user_id , iucv - > dst_user_id , 8 ) ;
appl_id [ 8 ] = 0 ;
memcpy ( appl_id , iucv - > dst_name , 8 ) ;
pr_err ( " Application %s on z/VM guest %s "
" exceeds message limit \n " ,
appl_id , user_id ) ;
2009-06-18 01:54:48 +04:00
err = - EAGAIN ;
} else
err = - EPIPE ;
2009-06-18 01:54:47 +04:00
skb_unlink ( skb , & iucv - > send_skb_q ) ;
goto fail ;
2007-02-09 00:51:54 +03:00
}
2011-08-08 05:33:54 +04:00
release :
2007-02-09 00:51:54 +03:00
release_sock ( sk ) ;
return len ;
fail :
2011-12-20 02:56:29 +04:00
if ( skb - > dev )
dev_put ( skb - > dev ) ;
2007-02-09 00:51:54 +03:00
kfree_skb ( skb ) ;
out :
release_sock ( sk ) ;
return err ;
}
af_iucv: fix race when queueing skbs on the backlog queue
iucv_sock_recvmsg() and iucv_process_message()/iucv_fragment_skb race
for dequeuing an skb from the backlog queue.
If iucv_sock_recvmsg() dequeues first, iucv_process_message() calls
sock_queue_rcv_skb() with an skb that is NULL.
This results in the following kernel panic:
<1>Unable to handle kernel pointer dereference at virtual kernel address (null)
<4>Oops: 0004 [#1] PREEMPT SMP DEBUG_PAGEALLOC
<4>Modules linked in: af_iucv sunrpc qeth_l3 dm_multipath dm_mod vmur qeth ccwgroup
<4>CPU: 0 Not tainted 2.6.30 #4
<4>Process client-iucv (pid: 4787, task: 0000000034e75940, ksp: 00000000353e3710)
<4>Krnl PSW : 0704000180000000 000000000043ebca (sock_queue_rcv_skb+0x7a/0x138)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:0 PM:0 EA:3
<4>Krnl GPRS: 0052900000000000 000003e0016e0fe8 0000000000000000 0000000000000000
<4> 000000000043eba8 0000000000000002 0000000000000001 00000000341aa7f0
<4> 0000000000000000 0000000000007800 0000000000000000 0000000000000000
<4> 00000000341aa7f0 0000000000594650 000000000043eba8 000000003fc2fb28
<4>Krnl Code: 000000000043ebbe: a7840006 brc 8,43ebca
<4> 000000000043ebc2: 5930c23c c %r3,572(%r12)
<4> 000000000043ebc6: a724004c brc 2,43ec5e
<4> >000000000043ebca: e3c0b0100024 stg %r12,16(%r11)
<4> 000000000043ebd0: a7190000 lghi %r1,0
<4> 000000000043ebd4: e310b0200024 stg %r1,32(%r11)
<4> 000000000043ebda: c010ffffdce9 larl %r1,43a5ac
<4> 000000000043ebe0: e310b0800024 stg %r1,128(%r11)
<4>Call Trace:
<4>([<000000000043eba8>] sock_queue_rcv_skb+0x58/0x138)
<4> [<000003e0016bcf2a>] iucv_process_message+0x112/0x3cc [af_iucv]
<4> [<000003e0016bd3d4>] iucv_callback_rx+0x1f0/0x274 [af_iucv]
<4> [<000000000053a21a>] iucv_message_pending+0xa2/0x120
<4> [<000000000053b5a6>] iucv_tasklet_fn+0x176/0x1b8
<4> [<000000000014fa82>] tasklet_action+0xfe/0x1f4
<4> [<0000000000150a56>] __do_softirq+0x116/0x284
<4> [<0000000000111058>] do_softirq+0xe4/0xe8
<4> [<00000000001504ba>] irq_exit+0xba/0xd8
<4> [<000000000010e0b2>] do_extint+0x146/0x190
<4> [<00000000001184b6>] ext_no_vtime+0x1e/0x22
<4> [<00000000001fbf4e>] kfree+0x202/0x28c
<4>([<00000000001fbf44>] kfree+0x1f8/0x28c)
<4> [<000000000044205a>] __kfree_skb+0x32/0x124
<4> [<000003e0016bd8b2>] iucv_sock_recvmsg+0x236/0x41c [af_iucv]
<4> [<0000000000437042>] sock_aio_read+0x136/0x160
<4> [<0000000000205e50>] do_sync_read+0xe4/0x13c
<4> [<0000000000206dce>] vfs_read+0x152/0x15c
<4> [<0000000000206ed0>] SyS_read+0x54/0xac
<4> [<0000000000117c8e>] sysc_noemu+0x10/0x16
<4> [<00000042ff8def3c>] 0x42ff8def3c
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-16 08:37:28 +04:00
/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
*
* Locking : must be called with message_q . lock held
*/
2007-10-08 13:03:31 +04:00
static int iucv_fragment_skb ( struct sock * sk , struct sk_buff * skb , int len )
{
int dataleft , size , copied = 0 ;
struct sk_buff * nskb ;
dataleft = len ;
while ( dataleft ) {
if ( dataleft > = sk - > sk_rcvbuf / 4 )
size = sk - > sk_rcvbuf / 4 ;
else
size = dataleft ;
nskb = alloc_skb ( size , GFP_ATOMIC | GFP_DMA ) ;
if ( ! nskb )
return - ENOMEM ;
2009-04-22 03:26:24 +04:00
/* copy target class to control buffer of new skb */
memcpy ( CB_TRGCLS ( nskb ) , CB_TRGCLS ( skb ) , CB_TRGCLS_LEN ) ;
/* copy data fragment */
2007-10-08 13:03:31 +04:00
memcpy ( nskb - > data , skb - > data + copied , size ) ;
copied + = size ;
dataleft - = size ;
skb_reset_transport_header ( nskb ) ;
skb_reset_network_header ( nskb ) ;
nskb - > len = size ;
skb_queue_tail ( & iucv_sk ( sk ) - > backlog_skb_q , nskb ) ;
}
return 0 ;
}
af_iucv: fix race when queueing skbs on the backlog queue
iucv_sock_recvmsg() and iucv_process_message()/iucv_fragment_skb race
for dequeuing an skb from the backlog queue.
If iucv_sock_recvmsg() dequeues first, iucv_process_message() calls
sock_queue_rcv_skb() with an skb that is NULL.
This results in the following kernel panic:
<1>Unable to handle kernel pointer dereference at virtual kernel address (null)
<4>Oops: 0004 [#1] PREEMPT SMP DEBUG_PAGEALLOC
<4>Modules linked in: af_iucv sunrpc qeth_l3 dm_multipath dm_mod vmur qeth ccwgroup
<4>CPU: 0 Not tainted 2.6.30 #4
<4>Process client-iucv (pid: 4787, task: 0000000034e75940, ksp: 00000000353e3710)
<4>Krnl PSW : 0704000180000000 000000000043ebca (sock_queue_rcv_skb+0x7a/0x138)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:0 PM:0 EA:3
<4>Krnl GPRS: 0052900000000000 000003e0016e0fe8 0000000000000000 0000000000000000
<4> 000000000043eba8 0000000000000002 0000000000000001 00000000341aa7f0
<4> 0000000000000000 0000000000007800 0000000000000000 0000000000000000
<4> 00000000341aa7f0 0000000000594650 000000000043eba8 000000003fc2fb28
<4>Krnl Code: 000000000043ebbe: a7840006 brc 8,43ebca
<4> 000000000043ebc2: 5930c23c c %r3,572(%r12)
<4> 000000000043ebc6: a724004c brc 2,43ec5e
<4> >000000000043ebca: e3c0b0100024 stg %r12,16(%r11)
<4> 000000000043ebd0: a7190000 lghi %r1,0
<4> 000000000043ebd4: e310b0200024 stg %r1,32(%r11)
<4> 000000000043ebda: c010ffffdce9 larl %r1,43a5ac
<4> 000000000043ebe0: e310b0800024 stg %r1,128(%r11)
<4>Call Trace:
<4>([<000000000043eba8>] sock_queue_rcv_skb+0x58/0x138)
<4> [<000003e0016bcf2a>] iucv_process_message+0x112/0x3cc [af_iucv]
<4> [<000003e0016bd3d4>] iucv_callback_rx+0x1f0/0x274 [af_iucv]
<4> [<000000000053a21a>] iucv_message_pending+0xa2/0x120
<4> [<000000000053b5a6>] iucv_tasklet_fn+0x176/0x1b8
<4> [<000000000014fa82>] tasklet_action+0xfe/0x1f4
<4> [<0000000000150a56>] __do_softirq+0x116/0x284
<4> [<0000000000111058>] do_softirq+0xe4/0xe8
<4> [<00000000001504ba>] irq_exit+0xba/0xd8
<4> [<000000000010e0b2>] do_extint+0x146/0x190
<4> [<00000000001184b6>] ext_no_vtime+0x1e/0x22
<4> [<00000000001fbf4e>] kfree+0x202/0x28c
<4>([<00000000001fbf44>] kfree+0x1f8/0x28c)
<4> [<000000000044205a>] __kfree_skb+0x32/0x124
<4> [<000003e0016bd8b2>] iucv_sock_recvmsg+0x236/0x41c [af_iucv]
<4> [<0000000000437042>] sock_aio_read+0x136/0x160
<4> [<0000000000205e50>] do_sync_read+0xe4/0x13c
<4> [<0000000000206dce>] vfs_read+0x152/0x15c
<4> [<0000000000206ed0>] SyS_read+0x54/0xac
<4> [<0000000000117c8e>] sysc_noemu+0x10/0x16
<4> [<00000042ff8def3c>] 0x42ff8def3c
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-16 08:37:28 +04:00
/* iucv_process_message() - Receive a single outstanding IUCV message
*
* Locking : must be called with message_q . lock held
*/
2007-10-08 13:03:31 +04:00
static void iucv_process_message ( struct sock * sk , struct sk_buff * skb ,
struct iucv_path * path ,
struct iucv_message * msg )
{
int rc ;
2009-04-22 03:26:23 +04:00
unsigned int len ;
len = iucv_msg_length ( msg ) ;
2007-10-08 13:03:31 +04:00
2009-04-22 03:26:24 +04:00
/* store msg target class in the second 4 bytes of skb ctrl buffer */
/* Note: the first 4 bytes are reserved for msg tag */
memcpy ( CB_TRGCLS ( skb ) , & msg - > class , CB_TRGCLS_LEN ) ;
2009-04-22 03:26:23 +04:00
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
if ( ( msg - > flags & IUCV_IPRMDATA ) & & len > 7 ) {
if ( memcmp ( msg - > rmmsg , iprm_shutdown , 8 ) = = 0 ) {
skb - > data = NULL ;
skb - > len = 0 ;
}
2007-10-08 13:03:31 +04:00
} else {
2011-08-08 05:33:51 +04:00
rc = pr_iucv - > message_receive ( path , msg ,
msg - > flags & IUCV_IPRMDATA ,
skb - > data , len , NULL ) ;
2007-10-08 13:03:31 +04:00
if ( rc ) {
kfree_skb ( skb ) ;
return ;
}
2009-04-22 03:26:25 +04:00
/* we need to fragment iucv messages for SOCK_STREAM only;
* for SOCK_SEQPACKET , it is only relevant if we support
* record segmentation using MSG_EOR ( see also recvmsg ( ) ) */
if ( sk - > sk_type = = SOCK_STREAM & &
skb - > truesize > = sk - > sk_rcvbuf / 4 ) {
2009-04-22 03:26:23 +04:00
rc = iucv_fragment_skb ( sk , skb , len ) ;
2007-10-08 13:03:31 +04:00
kfree_skb ( skb ) ;
skb = NULL ;
if ( rc ) {
2011-08-08 05:33:51 +04:00
pr_iucv - > path_sever ( path , NULL ) ;
2007-10-08 13:03:31 +04:00
return ;
}
skb = skb_dequeue ( & iucv_sk ( sk ) - > backlog_skb_q ) ;
} else {
skb_reset_transport_header ( skb ) ;
skb_reset_network_header ( skb ) ;
2009-04-22 03:26:23 +04:00
skb - > len = len ;
2007-10-08 13:03:31 +04:00
}
}
if ( sock_queue_rcv_skb ( sk , skb ) )
skb_queue_head ( & iucv_sk ( sk ) - > backlog_skb_q , skb ) ;
}
af_iucv: fix race when queueing skbs on the backlog queue
iucv_sock_recvmsg() and iucv_process_message()/iucv_fragment_skb race
for dequeuing an skb from the backlog queue.
If iucv_sock_recvmsg() dequeues first, iucv_process_message() calls
sock_queue_rcv_skb() with an skb that is NULL.
This results in the following kernel panic:
<1>Unable to handle kernel pointer dereference at virtual kernel address (null)
<4>Oops: 0004 [#1] PREEMPT SMP DEBUG_PAGEALLOC
<4>Modules linked in: af_iucv sunrpc qeth_l3 dm_multipath dm_mod vmur qeth ccwgroup
<4>CPU: 0 Not tainted 2.6.30 #4
<4>Process client-iucv (pid: 4787, task: 0000000034e75940, ksp: 00000000353e3710)
<4>Krnl PSW : 0704000180000000 000000000043ebca (sock_queue_rcv_skb+0x7a/0x138)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:0 PM:0 EA:3
<4>Krnl GPRS: 0052900000000000 000003e0016e0fe8 0000000000000000 0000000000000000
<4> 000000000043eba8 0000000000000002 0000000000000001 00000000341aa7f0
<4> 0000000000000000 0000000000007800 0000000000000000 0000000000000000
<4> 00000000341aa7f0 0000000000594650 000000000043eba8 000000003fc2fb28
<4>Krnl Code: 000000000043ebbe: a7840006 brc 8,43ebca
<4> 000000000043ebc2: 5930c23c c %r3,572(%r12)
<4> 000000000043ebc6: a724004c brc 2,43ec5e
<4> >000000000043ebca: e3c0b0100024 stg %r12,16(%r11)
<4> 000000000043ebd0: a7190000 lghi %r1,0
<4> 000000000043ebd4: e310b0200024 stg %r1,32(%r11)
<4> 000000000043ebda: c010ffffdce9 larl %r1,43a5ac
<4> 000000000043ebe0: e310b0800024 stg %r1,128(%r11)
<4>Call Trace:
<4>([<000000000043eba8>] sock_queue_rcv_skb+0x58/0x138)
<4> [<000003e0016bcf2a>] iucv_process_message+0x112/0x3cc [af_iucv]
<4> [<000003e0016bd3d4>] iucv_callback_rx+0x1f0/0x274 [af_iucv]
<4> [<000000000053a21a>] iucv_message_pending+0xa2/0x120
<4> [<000000000053b5a6>] iucv_tasklet_fn+0x176/0x1b8
<4> [<000000000014fa82>] tasklet_action+0xfe/0x1f4
<4> [<0000000000150a56>] __do_softirq+0x116/0x284
<4> [<0000000000111058>] do_softirq+0xe4/0xe8
<4> [<00000000001504ba>] irq_exit+0xba/0xd8
<4> [<000000000010e0b2>] do_extint+0x146/0x190
<4> [<00000000001184b6>] ext_no_vtime+0x1e/0x22
<4> [<00000000001fbf4e>] kfree+0x202/0x28c
<4>([<00000000001fbf44>] kfree+0x1f8/0x28c)
<4> [<000000000044205a>] __kfree_skb+0x32/0x124
<4> [<000003e0016bd8b2>] iucv_sock_recvmsg+0x236/0x41c [af_iucv]
<4> [<0000000000437042>] sock_aio_read+0x136/0x160
<4> [<0000000000205e50>] do_sync_read+0xe4/0x13c
<4> [<0000000000206dce>] vfs_read+0x152/0x15c
<4> [<0000000000206ed0>] SyS_read+0x54/0xac
<4> [<0000000000117c8e>] sysc_noemu+0x10/0x16
<4> [<00000042ff8def3c>] 0x42ff8def3c
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-16 08:37:28 +04:00
/* iucv_process_message_q() - Process outstanding IUCV messages
*
* Locking : must be called with message_q . lock held
*/
2007-10-08 13:03:31 +04:00
static void iucv_process_message_q ( struct sock * sk )
{
struct iucv_sock * iucv = iucv_sk ( sk ) ;
struct sk_buff * skb ;
struct sock_msg_q * p , * n ;
list_for_each_entry_safe ( p , n , & iucv - > message_q . list , list ) {
2009-04-22 03:26:23 +04:00
skb = alloc_skb ( iucv_msg_length ( & p - > msg ) , GFP_ATOMIC | GFP_DMA ) ;
2007-10-08 13:03:31 +04:00
if ( ! skb )
break ;
iucv_process_message ( sk , skb , p - > path , & p - > msg ) ;
list_del ( & p - > list ) ;
kfree ( p ) ;
if ( ! skb_queue_empty ( & iucv - > backlog_skb_q ) )
break ;
}
}
2007-02-09 00:51:54 +03:00
static int iucv_sock_recvmsg ( struct kiocb * iocb , struct socket * sock ,
struct msghdr * msg , size_t len , int flags )
{
int noblock = flags & MSG_DONTWAIT ;
struct sock * sk = sock - > sk ;
2007-05-04 23:22:07 +04:00
struct iucv_sock * iucv = iucv_sk ( sk ) ;
2009-04-22 03:26:25 +04:00
unsigned int copied , rlen ;
2011-08-08 05:33:54 +04:00
struct sk_buff * skb , * rskb , * cskb , * sskb ;
int blen ;
2007-02-09 00:51:54 +03:00
int err = 0 ;
2007-05-04 23:22:07 +04:00
if ( ( sk - > sk_state = = IUCV_DISCONN | | sk - > sk_state = = IUCV_SEVERED ) & &
2007-10-08 13:03:31 +04:00
skb_queue_empty ( & iucv - > backlog_skb_q ) & &
skb_queue_empty ( & sk - > sk_receive_queue ) & &
list_empty ( & iucv - > message_q . list ) )
2007-05-04 23:22:07 +04:00
return 0 ;
2007-02-09 00:51:54 +03:00
if ( flags & ( MSG_OOB ) )
return - EOPNOTSUPP ;
2009-04-21 10:04:21 +04:00
/* receive/dequeue next skb:
* the function understands MSG_PEEK and , thus , does not dequeue skb */
2007-02-09 00:51:54 +03:00
skb = skb_recv_datagram ( sk , flags , noblock , & err ) ;
if ( ! skb ) {
if ( sk - > sk_shutdown & RCV_SHUTDOWN )
return 0 ;
return err ;
}
2009-04-22 03:26:25 +04:00
rlen = skb - > len ; /* real length of skb */
copied = min_t ( unsigned int , rlen , len ) ;
2007-02-09 00:51:54 +03:00
2007-05-04 23:22:07 +04:00
cskb = skb ;
2011-08-08 05:33:54 +04:00
if ( skb_copy_datagram_iovec ( cskb , 0 , msg - > msg_iov , copied ) ) {
2009-04-22 03:26:26 +04:00
if ( ! ( flags & MSG_PEEK ) )
skb_queue_head ( & sk - > sk_receive_queue , skb ) ;
return - EFAULT ;
2007-02-09 00:51:54 +03:00
}
2009-04-22 03:26:25 +04:00
/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
if ( sk - > sk_type = = SOCK_SEQPACKET ) {
if ( copied < rlen )
msg - > msg_flags | = MSG_TRUNC ;
/* each iucv message contains a complete record */
msg - > msg_flags | = MSG_EOR ;
}
2007-02-09 00:51:54 +03:00
2009-04-22 03:26:24 +04:00
/* create control message to store iucv msg target class:
* get the trgcls from the control buffer of the skb due to
* fragmentation of original iucv message . */
err = put_cmsg ( msg , SOL_IUCV , SCM_IUCV_TRGCLS ,
CB_TRGCLS_LEN , CB_TRGCLS ( skb ) ) ;
if ( err ) {
if ( ! ( flags & MSG_PEEK ) )
skb_queue_head ( & sk - > sk_receive_queue , skb ) ;
return err ;
}
2007-02-09 00:51:54 +03:00
/* Mark read part of skb as used */
if ( ! ( flags & MSG_PEEK ) ) {
2009-04-22 03:26:25 +04:00
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
if ( sk - > sk_type = = SOCK_STREAM ) {
skb_pull ( skb , copied ) ;
if ( skb - > len ) {
skb_queue_head ( & sk - > sk_receive_queue , skb ) ;
goto done ;
}
2007-02-09 00:51:54 +03:00
}
kfree_skb ( skb ) ;
2011-08-08 05:33:54 +04:00
atomic_inc ( & iucv - > msg_recv ) ;
2007-05-04 23:22:07 +04:00
/* Queue backlog skbs */
af_iucv: fix race when queueing skbs on the backlog queue
iucv_sock_recvmsg() and iucv_process_message()/iucv_fragment_skb race
for dequeuing an skb from the backlog queue.
If iucv_sock_recvmsg() dequeues first, iucv_process_message() calls
sock_queue_rcv_skb() with an skb that is NULL.
This results in the following kernel panic:
<1>Unable to handle kernel pointer dereference at virtual kernel address (null)
<4>Oops: 0004 [#1] PREEMPT SMP DEBUG_PAGEALLOC
<4>Modules linked in: af_iucv sunrpc qeth_l3 dm_multipath dm_mod vmur qeth ccwgroup
<4>CPU: 0 Not tainted 2.6.30 #4
<4>Process client-iucv (pid: 4787, task: 0000000034e75940, ksp: 00000000353e3710)
<4>Krnl PSW : 0704000180000000 000000000043ebca (sock_queue_rcv_skb+0x7a/0x138)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:0 PM:0 EA:3
<4>Krnl GPRS: 0052900000000000 000003e0016e0fe8 0000000000000000 0000000000000000
<4> 000000000043eba8 0000000000000002 0000000000000001 00000000341aa7f0
<4> 0000000000000000 0000000000007800 0000000000000000 0000000000000000
<4> 00000000341aa7f0 0000000000594650 000000000043eba8 000000003fc2fb28
<4>Krnl Code: 000000000043ebbe: a7840006 brc 8,43ebca
<4> 000000000043ebc2: 5930c23c c %r3,572(%r12)
<4> 000000000043ebc6: a724004c brc 2,43ec5e
<4> >000000000043ebca: e3c0b0100024 stg %r12,16(%r11)
<4> 000000000043ebd0: a7190000 lghi %r1,0
<4> 000000000043ebd4: e310b0200024 stg %r1,32(%r11)
<4> 000000000043ebda: c010ffffdce9 larl %r1,43a5ac
<4> 000000000043ebe0: e310b0800024 stg %r1,128(%r11)
<4>Call Trace:
<4>([<000000000043eba8>] sock_queue_rcv_skb+0x58/0x138)
<4> [<000003e0016bcf2a>] iucv_process_message+0x112/0x3cc [af_iucv]
<4> [<000003e0016bd3d4>] iucv_callback_rx+0x1f0/0x274 [af_iucv]
<4> [<000000000053a21a>] iucv_message_pending+0xa2/0x120
<4> [<000000000053b5a6>] iucv_tasklet_fn+0x176/0x1b8
<4> [<000000000014fa82>] tasklet_action+0xfe/0x1f4
<4> [<0000000000150a56>] __do_softirq+0x116/0x284
<4> [<0000000000111058>] do_softirq+0xe4/0xe8
<4> [<00000000001504ba>] irq_exit+0xba/0xd8
<4> [<000000000010e0b2>] do_extint+0x146/0x190
<4> [<00000000001184b6>] ext_no_vtime+0x1e/0x22
<4> [<00000000001fbf4e>] kfree+0x202/0x28c
<4>([<00000000001fbf44>] kfree+0x1f8/0x28c)
<4> [<000000000044205a>] __kfree_skb+0x32/0x124
<4> [<000003e0016bd8b2>] iucv_sock_recvmsg+0x236/0x41c [af_iucv]
<4> [<0000000000437042>] sock_aio_read+0x136/0x160
<4> [<0000000000205e50>] do_sync_read+0xe4/0x13c
<4> [<0000000000206dce>] vfs_read+0x152/0x15c
<4> [<0000000000206ed0>] SyS_read+0x54/0xac
<4> [<0000000000117c8e>] sysc_noemu+0x10/0x16
<4> [<00000042ff8def3c>] 0x42ff8def3c
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-16 08:37:28 +04:00
spin_lock_bh ( & iucv - > message_q . lock ) ;
2007-10-08 13:03:31 +04:00
rskb = skb_dequeue ( & iucv - > backlog_skb_q ) ;
2007-05-04 23:23:27 +04:00
while ( rskb ) {
2007-05-04 23:22:07 +04:00
if ( sock_queue_rcv_skb ( sk , rskb ) ) {
2007-10-08 13:03:31 +04:00
skb_queue_head ( & iucv - > backlog_skb_q ,
2007-05-04 23:22:07 +04:00
rskb ) ;
break ;
} else {
2007-10-08 13:03:31 +04:00
rskb = skb_dequeue ( & iucv - > backlog_skb_q ) ;
2007-05-04 23:22:07 +04:00
}
}
2007-10-08 13:03:31 +04:00
if ( skb_queue_empty ( & iucv - > backlog_skb_q ) ) {
if ( ! list_empty ( & iucv - > message_q . list ) )
iucv_process_message_q ( sk ) ;
2011-08-08 05:33:54 +04:00
if ( atomic_read ( & iucv - > msg_recv ) > =
iucv - > msglimit / 2 ) {
/* send WIN to peer */
blen = sizeof ( struct af_iucv_trans_hdr ) +
ETH_HLEN ;
sskb = sock_alloc_send_skb ( sk , blen , 1 , & err ) ;
if ( sskb ) {
2011-12-20 02:56:29 +04:00
skb_reserve ( sskb , blen ) ;
2011-08-08 05:33:54 +04:00
err = afiucv_hs_send ( NULL , sk , sskb ,
AF_IUCV_FLAG_WIN ) ;
}
if ( err ) {
sk - > sk_state = IUCV_DISCONN ;
sk - > sk_state_change ( sk ) ;
}
}
2007-10-08 13:03:31 +04:00
}
af_iucv: fix race when queueing skbs on the backlog queue
iucv_sock_recvmsg() and iucv_process_message()/iucv_fragment_skb race
for dequeuing an skb from the backlog queue.
If iucv_sock_recvmsg() dequeues first, iucv_process_message() calls
sock_queue_rcv_skb() with an skb that is NULL.
This results in the following kernel panic:
<1>Unable to handle kernel pointer dereference at virtual kernel address (null)
<4>Oops: 0004 [#1] PREEMPT SMP DEBUG_PAGEALLOC
<4>Modules linked in: af_iucv sunrpc qeth_l3 dm_multipath dm_mod vmur qeth ccwgroup
<4>CPU: 0 Not tainted 2.6.30 #4
<4>Process client-iucv (pid: 4787, task: 0000000034e75940, ksp: 00000000353e3710)
<4>Krnl PSW : 0704000180000000 000000000043ebca (sock_queue_rcv_skb+0x7a/0x138)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:0 PM:0 EA:3
<4>Krnl GPRS: 0052900000000000 000003e0016e0fe8 0000000000000000 0000000000000000
<4> 000000000043eba8 0000000000000002 0000000000000001 00000000341aa7f0
<4> 0000000000000000 0000000000007800 0000000000000000 0000000000000000
<4> 00000000341aa7f0 0000000000594650 000000000043eba8 000000003fc2fb28
<4>Krnl Code: 000000000043ebbe: a7840006 brc 8,43ebca
<4> 000000000043ebc2: 5930c23c c %r3,572(%r12)
<4> 000000000043ebc6: a724004c brc 2,43ec5e
<4> >000000000043ebca: e3c0b0100024 stg %r12,16(%r11)
<4> 000000000043ebd0: a7190000 lghi %r1,0
<4> 000000000043ebd4: e310b0200024 stg %r1,32(%r11)
<4> 000000000043ebda: c010ffffdce9 larl %r1,43a5ac
<4> 000000000043ebe0: e310b0800024 stg %r1,128(%r11)
<4>Call Trace:
<4>([<000000000043eba8>] sock_queue_rcv_skb+0x58/0x138)
<4> [<000003e0016bcf2a>] iucv_process_message+0x112/0x3cc [af_iucv]
<4> [<000003e0016bd3d4>] iucv_callback_rx+0x1f0/0x274 [af_iucv]
<4> [<000000000053a21a>] iucv_message_pending+0xa2/0x120
<4> [<000000000053b5a6>] iucv_tasklet_fn+0x176/0x1b8
<4> [<000000000014fa82>] tasklet_action+0xfe/0x1f4
<4> [<0000000000150a56>] __do_softirq+0x116/0x284
<4> [<0000000000111058>] do_softirq+0xe4/0xe8
<4> [<00000000001504ba>] irq_exit+0xba/0xd8
<4> [<000000000010e0b2>] do_extint+0x146/0x190
<4> [<00000000001184b6>] ext_no_vtime+0x1e/0x22
<4> [<00000000001fbf4e>] kfree+0x202/0x28c
<4>([<00000000001fbf44>] kfree+0x1f8/0x28c)
<4> [<000000000044205a>] __kfree_skb+0x32/0x124
<4> [<000003e0016bd8b2>] iucv_sock_recvmsg+0x236/0x41c [af_iucv]
<4> [<0000000000437042>] sock_aio_read+0x136/0x160
<4> [<0000000000205e50>] do_sync_read+0xe4/0x13c
<4> [<0000000000206dce>] vfs_read+0x152/0x15c
<4> [<0000000000206ed0>] SyS_read+0x54/0xac
<4> [<0000000000117c8e>] sysc_noemu+0x10/0x16
<4> [<00000042ff8def3c>] 0x42ff8def3c
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-09-16 08:37:28 +04:00
spin_unlock_bh ( & iucv - > message_q . lock ) ;
2009-04-21 10:04:21 +04:00
}
2007-02-09 00:51:54 +03:00
done :
2009-04-22 03:26:25 +04:00
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
if ( sk - > sk_type = = SOCK_SEQPACKET & & ( flags & MSG_TRUNC ) )
copied = rlen ;
return copied ;
2007-02-09 00:51:54 +03:00
}
static inline unsigned int iucv_accept_poll ( struct sock * parent )
{
struct iucv_sock * isk , * n ;
struct sock * sk ;
2007-05-04 23:23:27 +04:00
list_for_each_entry_safe ( isk , n , & iucv_sk ( parent ) - > accept_q , accept_q ) {
2007-02-09 00:51:54 +03:00
sk = ( struct sock * ) isk ;
if ( sk - > sk_state = = IUCV_CONNECTED )
return POLLIN | POLLRDNORM ;
}
return 0 ;
}
unsigned int iucv_sock_poll ( struct file * file , struct socket * sock ,
poll_table * wait )
{
struct sock * sk = sock - > sk ;
unsigned int mask = 0 ;
2010-04-20 17:03:51 +04:00
sock_poll_wait ( file , sk_sleep ( sk ) , wait ) ;
2007-02-09 00:51:54 +03:00
if ( sk - > sk_state = = IUCV_LISTEN )
return iucv_accept_poll ( sk ) ;
if ( sk - > sk_err | | ! skb_queue_empty ( & sk - > sk_error_queue ) )
mask | = POLLERR ;
if ( sk - > sk_shutdown & RCV_SHUTDOWN )
mask | = POLLRDHUP ;
if ( sk - > sk_shutdown = = SHUTDOWN_MASK )
mask | = POLLHUP ;
if ( ! skb_queue_empty ( & sk - > sk_receive_queue ) | |
2007-05-04 23:23:27 +04:00
( sk - > sk_shutdown & RCV_SHUTDOWN ) )
2007-02-09 00:51:54 +03:00
mask | = POLLIN | POLLRDNORM ;
if ( sk - > sk_state = = IUCV_CLOSED )
mask | = POLLHUP ;
2007-05-04 23:22:07 +04:00
if ( sk - > sk_state = = IUCV_DISCONN | | sk - > sk_state = = IUCV_SEVERED )
mask | = POLLIN ;
2007-02-09 00:51:54 +03:00
if ( sock_writeable ( sk ) )
mask | = POLLOUT | POLLWRNORM | POLLWRBAND ;
else
set_bit ( SOCK_ASYNC_NOSPACE , & sk - > sk_socket - > flags ) ;
return mask ;
}
static int iucv_sock_shutdown ( struct socket * sock , int how )
{
struct sock * sk = sock - > sk ;
struct iucv_sock * iucv = iucv_sk ( sk ) ;
struct iucv_message txmsg ;
int err = 0 ;
how + + ;
if ( ( how & ~ SHUTDOWN_MASK ) | | ! how )
return - EINVAL ;
lock_sock ( sk ) ;
2007-05-04 23:23:27 +04:00
switch ( sk - > sk_state ) {
2009-04-21 10:04:23 +04:00
case IUCV_DISCONN :
case IUCV_CLOSING :
case IUCV_SEVERED :
2007-02-09 00:51:54 +03:00
case IUCV_CLOSED :
err = - ENOTCONN ;
goto fail ;
default :
sk - > sk_shutdown | = how ;
break ;
}
if ( how = = SEND_SHUTDOWN | | how = = SHUTDOWN_MASK ) {
txmsg . class = 0 ;
txmsg . tag = 0 ;
2011-08-08 05:33:51 +04:00
err = pr_iucv - > message_send ( iucv - > path , & txmsg , IUCV_IPRMDATA ,
0 , ( void * ) iprm_shutdown , 8 ) ;
2007-02-09 00:51:54 +03:00
if ( err ) {
2007-05-04 23:23:27 +04:00
switch ( err ) {
2007-02-09 00:51:54 +03:00
case 1 :
err = - ENOTCONN ;
break ;
case 2 :
err = - ECONNRESET ;
break ;
default :
err = - ENOTCONN ;
break ;
}
}
}
if ( how = = RCV_SHUTDOWN | | how = = SHUTDOWN_MASK ) {
2011-08-08 05:33:51 +04:00
err = pr_iucv - > path_quiesce ( iucv - > path , NULL ) ;
2007-02-09 00:51:54 +03:00
if ( err )
err = - ENOTCONN ;
skb_queue_purge ( & sk - > sk_receive_queue ) ;
}
/* Wake up anyone sleeping in poll */
sk - > sk_state_change ( sk ) ;
fail :
release_sock ( sk ) ;
return err ;
}
static int iucv_sock_release ( struct socket * sock )
{
struct sock * sk = sock - > sk ;
int err = 0 ;
if ( ! sk )
return 0 ;
iucv_sock_close ( sk ) ;
/* Unregister with IUCV base support */
if ( iucv_sk ( sk ) - > path ) {
2011-08-08 05:33:51 +04:00
pr_iucv - > path_sever ( iucv_sk ( sk ) - > path , NULL ) ;
2007-02-09 00:51:54 +03:00
iucv_path_free ( iucv_sk ( sk ) - > path ) ;
iucv_sk ( sk ) - > path = NULL ;
}
sock_orphan ( sk ) ;
iucv_sock_kill ( sk ) ;
return err ;
}
2009-04-22 03:26:22 +04:00
/* getsockopt and setsockopt */
static int iucv_sock_setsockopt ( struct socket * sock , int level , int optname ,
2009-10-01 03:12:20 +04:00
char __user * optval , unsigned int optlen )
2009-04-22 03:26:22 +04:00
{
struct sock * sk = sock - > sk ;
struct iucv_sock * iucv = iucv_sk ( sk ) ;
int val ;
int rc ;
if ( level ! = SOL_IUCV )
return - ENOPROTOOPT ;
if ( optlen < sizeof ( int ) )
return - EINVAL ;
if ( get_user ( val , ( int __user * ) optval ) )
return - EFAULT ;
rc = 0 ;
lock_sock ( sk ) ;
switch ( optname ) {
case SO_IPRMDATA_MSG :
if ( val )
iucv - > flags | = IUCV_IPRMDATA ;
else
iucv - > flags & = ~ IUCV_IPRMDATA ;
break ;
2009-04-22 03:26:27 +04:00
case SO_MSGLIMIT :
switch ( sk - > sk_state ) {
case IUCV_OPEN :
case IUCV_BOUND :
if ( val < 1 | | val > ( u16 ) ( ~ 0 ) )
rc = - EINVAL ;
else
iucv - > msglimit = val ;
break ;
default :
rc = - EINVAL ;
break ;
}
break ;
2009-04-22 03:26:22 +04:00
default :
rc = - ENOPROTOOPT ;
break ;
}
release_sock ( sk ) ;
return rc ;
}
static int iucv_sock_getsockopt ( struct socket * sock , int level , int optname ,
char __user * optval , int __user * optlen )
{
struct sock * sk = sock - > sk ;
struct iucv_sock * iucv = iucv_sk ( sk ) ;
int val , len ;
if ( level ! = SOL_IUCV )
return - ENOPROTOOPT ;
if ( get_user ( len , optlen ) )
return - EFAULT ;
if ( len < 0 )
return - EINVAL ;
len = min_t ( unsigned int , len , sizeof ( int ) ) ;
switch ( optname ) {
case SO_IPRMDATA_MSG :
val = ( iucv - > flags & IUCV_IPRMDATA ) ? 1 : 0 ;
break ;
2009-04-22 03:26:27 +04:00
case SO_MSGLIMIT :
lock_sock ( sk ) ;
val = ( iucv - > path ! = NULL ) ? iucv - > path - > msglim /* connected */
: iucv - > msglimit ; /* default */
release_sock ( sk ) ;
break ;
2009-04-22 03:26:22 +04:00
default :
return - ENOPROTOOPT ;
}
if ( put_user ( len , optlen ) )
return - EFAULT ;
if ( copy_to_user ( optval , & val , len ) )
return - EFAULT ;
return 0 ;
}
2007-02-09 00:51:54 +03:00
/* Callback wrappers - called from iucv base support */
static int iucv_callback_connreq ( struct iucv_path * path ,
u8 ipvmid [ 8 ] , u8 ipuser [ 16 ] )
{
unsigned char user_data [ 16 ] ;
unsigned char nuser_data [ 16 ] ;
unsigned char src_name [ 8 ] ;
struct hlist_node * node ;
struct sock * sk , * nsk ;
struct iucv_sock * iucv , * niucv ;
int err ;
memcpy ( src_name , ipuser , 8 ) ;
EBCASC ( src_name , 8 ) ;
/* Find out if this path belongs to af_iucv. */
read_lock ( & iucv_sk_list . lock ) ;
iucv = NULL ;
2007-07-15 06:04:25 +04:00
sk = NULL ;
2007-02-09 00:51:54 +03:00
sk_for_each ( sk , node , & iucv_sk_list . head )
if ( sk - > sk_state = = IUCV_LISTEN & &
! memcmp ( & iucv_sk ( sk ) - > src_name , src_name , 8 ) ) {
/*
* Found a listening socket with
* src_name = = ipuser [ 0 - 7 ] .
*/
iucv = iucv_sk ( sk ) ;
break ;
}
read_unlock ( & iucv_sk_list . lock ) ;
if ( ! iucv )
/* No socket found, not one of our paths. */
return - EINVAL ;
bh_lock_sock ( sk ) ;
/* Check if parent socket is listening */
low_nmcpy ( user_data , iucv - > src_name ) ;
high_nmcpy ( user_data , iucv - > dst_name ) ;
ASCEBC ( user_data , sizeof ( user_data ) ) ;
if ( sk - > sk_state ! = IUCV_LISTEN ) {
2011-08-08 05:33:51 +04:00
err = pr_iucv - > path_sever ( path , user_data ) ;
2009-01-06 05:08:23 +03:00
iucv_path_free ( path ) ;
2007-02-09 00:51:54 +03:00
goto fail ;
}
/* Check for backlog size */
if ( sk_acceptq_is_full ( sk ) ) {
2011-08-08 05:33:51 +04:00
err = pr_iucv - > path_sever ( path , user_data ) ;
2009-01-06 05:08:23 +03:00
iucv_path_free ( path ) ;
2007-02-09 00:51:54 +03:00
goto fail ;
}
/* Create the new socket */
2009-04-22 03:26:25 +04:00
nsk = iucv_sock_alloc ( NULL , sk - > sk_type , GFP_ATOMIC ) ;
2007-05-04 23:23:27 +04:00
if ( ! nsk ) {
2011-08-08 05:33:51 +04:00
err = pr_iucv - > path_sever ( path , user_data ) ;
2009-01-06 05:08:23 +03:00
iucv_path_free ( path ) ;
2007-02-09 00:51:54 +03:00
goto fail ;
}
niucv = iucv_sk ( nsk ) ;
iucv_sock_init ( nsk , sk ) ;
/* Set the new iucv_sock */
memcpy ( niucv - > dst_name , ipuser + 8 , 8 ) ;
EBCASC ( niucv - > dst_name , 8 ) ;
memcpy ( niucv - > dst_user_id , ipvmid , 8 ) ;
memcpy ( niucv - > src_name , iucv - > src_name , 8 ) ;
memcpy ( niucv - > src_user_id , iucv - > src_user_id , 8 ) ;
niucv - > path = path ;
/* Call iucv_accept */
high_nmcpy ( nuser_data , ipuser + 8 ) ;
memcpy ( nuser_data + 8 , niucv - > src_name , 8 ) ;
ASCEBC ( nuser_data + 8 , 8 ) ;
2009-04-22 03:26:27 +04:00
/* set message limit for path based on msglimit of accepting socket */
niucv - > msglimit = iucv - > msglimit ;
path - > msglim = iucv - > msglimit ;
2011-08-08 05:33:51 +04:00
err = pr_iucv - > path_accept ( path , & af_iucv_handler , nuser_data , nsk ) ;
2007-05-04 23:23:27 +04:00
if ( err ) {
2011-08-08 05:33:51 +04:00
err = pr_iucv - > path_sever ( path , user_data ) ;
2009-01-06 05:08:23 +03:00
iucv_path_free ( path ) ;
iucv_sock_kill ( nsk ) ;
2007-02-09 00:51:54 +03:00
goto fail ;
}
iucv_accept_enqueue ( sk , nsk ) ;
/* Wake up accept */
nsk - > sk_state = IUCV_CONNECTED ;
sk - > sk_data_ready ( sk , 1 ) ;
err = 0 ;
fail :
bh_unlock_sock ( sk ) ;
return 0 ;
}
static void iucv_callback_connack ( struct iucv_path * path , u8 ipuser [ 16 ] )
{
struct sock * sk = path - > private ;
sk - > sk_state = IUCV_CONNECTED ;
sk - > sk_state_change ( sk ) ;
}
static void iucv_callback_rx ( struct iucv_path * path , struct iucv_message * msg )
{
struct sock * sk = path - > private ;
2007-05-04 23:22:07 +04:00
struct iucv_sock * iucv = iucv_sk ( sk ) ;
2007-10-08 13:03:31 +04:00
struct sk_buff * skb ;
struct sock_msg_q * save_msg ;
int len ;
2007-05-04 23:22:07 +04:00
2009-04-21 10:04:22 +04:00
if ( sk - > sk_shutdown & RCV_SHUTDOWN ) {
2011-08-08 05:33:51 +04:00
pr_iucv - > message_reject ( path , msg ) ;
2007-02-09 00:51:54 +03:00
return ;
2009-04-21 10:04:22 +04:00
}
2007-02-09 00:51:54 +03:00
2009-04-21 10:04:24 +04:00
spin_lock ( & iucv - > message_q . lock ) ;
2007-02-09 00:51:54 +03:00
2007-10-08 13:03:31 +04:00
if ( ! list_empty ( & iucv - > message_q . list ) | |
! skb_queue_empty ( & iucv - > backlog_skb_q ) )
goto save_message ;
len = atomic_read ( & sk - > sk_rmem_alloc ) ;
2011-10-13 11:28:54 +04:00
len + = SKB_TRUESIZE ( iucv_msg_length ( msg ) ) ;
2007-10-08 13:03:31 +04:00
if ( len > sk - > sk_rcvbuf )
goto save_message ;
2009-04-22 03:26:23 +04:00
skb = alloc_skb ( iucv_msg_length ( msg ) , GFP_ATOMIC | GFP_DMA ) ;
2007-10-08 13:03:31 +04:00
if ( ! skb )
goto save_message ;
2007-02-09 00:51:54 +03:00
2007-10-08 13:03:31 +04:00
iucv_process_message ( sk , skb , path , msg ) ;
2009-04-21 10:04:24 +04:00
goto out_unlock ;
2007-02-09 00:51:54 +03:00
2007-10-08 13:03:31 +04:00
save_message :
save_msg = kzalloc ( sizeof ( struct sock_msg_q ) , GFP_ATOMIC | GFP_DMA ) ;
2008-02-08 05:07:19 +03:00
if ( ! save_msg )
2010-05-26 09:56:48 +04:00
goto out_unlock ;
2007-10-08 13:03:31 +04:00
save_msg - > path = path ;
save_msg - > msg = * msg ;
2007-02-09 00:51:54 +03:00
2007-10-08 13:03:31 +04:00
list_add_tail ( & save_msg - > list , & iucv - > message_q . list ) ;
2009-04-21 10:04:24 +04:00
out_unlock :
2007-10-08 13:03:31 +04:00
spin_unlock ( & iucv - > message_q . lock ) ;
2007-02-09 00:51:54 +03:00
}
static void iucv_callback_txdone ( struct iucv_path * path ,
struct iucv_message * msg )
{
struct sock * sk = path - > private ;
2008-02-08 05:07:44 +03:00
struct sk_buff * this = NULL ;
2007-02-09 00:51:54 +03:00
struct sk_buff_head * list = & iucv_sk ( sk ) - > send_skb_q ;
struct sk_buff * list_skb = list - > next ;
unsigned long flags ;
2008-02-08 05:07:44 +03:00
if ( ! skb_queue_empty ( list ) ) {
2007-05-04 23:22:07 +04:00
spin_lock_irqsave ( & list - > lock , flags ) ;
2008-02-08 05:07:44 +03:00
while ( list_skb ! = ( struct sk_buff * ) list ) {
2009-04-22 03:26:24 +04:00
if ( ! memcmp ( & msg - > tag , CB_TAG ( list_skb ) , CB_TAG_LEN ) ) {
2008-02-08 05:07:44 +03:00
this = list_skb ;
break ;
}
2007-05-04 23:22:07 +04:00
list_skb = list_skb - > next ;
2008-02-08 05:07:44 +03:00
}
if ( this )
__skb_unlink ( this , list ) ;
2007-05-04 23:22:07 +04:00
spin_unlock_irqrestore ( & list - > lock , flags ) ;
2007-02-09 00:51:54 +03:00
2009-06-18 01:54:48 +04:00
if ( this ) {
kfree_skb ( this ) ;
/* wake up any process waiting for sending */
iucv_sock_wake_msglim ( sk ) ;
}
2007-05-04 23:22:07 +04:00
}
2008-07-14 11:59:29 +04:00
BUG_ON ( ! this ) ;
2007-02-09 00:51:54 +03:00
2007-05-04 23:23:27 +04:00
if ( sk - > sk_state = = IUCV_CLOSING ) {
2007-05-04 23:22:07 +04:00
if ( skb_queue_empty ( & iucv_sk ( sk ) - > send_skb_q ) ) {
sk - > sk_state = IUCV_CLOSED ;
sk - > sk_state_change ( sk ) ;
}
}
2007-02-09 00:51:54 +03:00
}
static void iucv_callback_connrej ( struct iucv_path * path , u8 ipuser [ 16 ] )
{
struct sock * sk = path - > private ;
if ( ! list_empty ( & iucv_sk ( sk ) - > accept_q ) )
sk - > sk_state = IUCV_SEVERED ;
else
sk - > sk_state = IUCV_DISCONN ;
sk - > sk_state_change ( sk ) ;
}
2009-04-22 03:26:21 +04:00
/* called if the other communication side shuts down its RECV direction;
* in turn , the callback sets SEND_SHUTDOWN to disable sending of data .
*/
static void iucv_callback_shutdown ( struct iucv_path * path , u8 ipuser [ 16 ] )
{
struct sock * sk = path - > private ;
bh_lock_sock ( sk ) ;
if ( sk - > sk_state ! = IUCV_CLOSED ) {
sk - > sk_shutdown | = SEND_SHUTDOWN ;
sk - > sk_state_change ( sk ) ;
}
bh_unlock_sock ( sk ) ;
}
2011-08-08 05:33:54 +04:00
/***************** HiperSockets transport callbacks ********************/
static void afiucv_swap_src_dest ( struct sk_buff * skb )
{
struct af_iucv_trans_hdr * trans_hdr =
( struct af_iucv_trans_hdr * ) skb - > data ;
char tmpID [ 8 ] ;
char tmpName [ 8 ] ;
ASCEBC ( trans_hdr - > destUserID , sizeof ( trans_hdr - > destUserID ) ) ;
ASCEBC ( trans_hdr - > destAppName , sizeof ( trans_hdr - > destAppName ) ) ;
ASCEBC ( trans_hdr - > srcUserID , sizeof ( trans_hdr - > srcUserID ) ) ;
ASCEBC ( trans_hdr - > srcAppName , sizeof ( trans_hdr - > srcAppName ) ) ;
memcpy ( tmpID , trans_hdr - > srcUserID , 8 ) ;
memcpy ( tmpName , trans_hdr - > srcAppName , 8 ) ;
memcpy ( trans_hdr - > srcUserID , trans_hdr - > destUserID , 8 ) ;
memcpy ( trans_hdr - > srcAppName , trans_hdr - > destAppName , 8 ) ;
memcpy ( trans_hdr - > destUserID , tmpID , 8 ) ;
memcpy ( trans_hdr - > destAppName , tmpName , 8 ) ;
skb_push ( skb , ETH_HLEN ) ;
memset ( skb - > data , 0 , ETH_HLEN ) ;
}
/**
* afiucv_hs_callback_syn - react on received SYN
* */
static int afiucv_hs_callback_syn ( struct sock * sk , struct sk_buff * skb )
{
struct sock * nsk ;
struct iucv_sock * iucv , * niucv ;
struct af_iucv_trans_hdr * trans_hdr ;
int err ;
iucv = iucv_sk ( sk ) ;
trans_hdr = ( struct af_iucv_trans_hdr * ) skb - > data ;
if ( ! iucv ) {
/* no sock - connection refused */
afiucv_swap_src_dest ( skb ) ;
trans_hdr - > flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN ;
err = dev_queue_xmit ( skb ) ;
goto out ;
}
nsk = iucv_sock_alloc ( NULL , sk - > sk_type , GFP_ATOMIC ) ;
bh_lock_sock ( sk ) ;
if ( ( sk - > sk_state ! = IUCV_LISTEN ) | |
sk_acceptq_is_full ( sk ) | |
! nsk ) {
/* error on server socket - connection refused */
if ( nsk )
sk_free ( nsk ) ;
afiucv_swap_src_dest ( skb ) ;
trans_hdr - > flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN ;
err = dev_queue_xmit ( skb ) ;
bh_unlock_sock ( sk ) ;
goto out ;
}
niucv = iucv_sk ( nsk ) ;
iucv_sock_init ( nsk , sk ) ;
niucv - > transport = AF_IUCV_TRANS_HIPER ;
niucv - > msglimit = iucv - > msglimit ;
if ( ! trans_hdr - > window )
niucv - > msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT ;
else
niucv - > msglimit_peer = trans_hdr - > window ;
memcpy ( niucv - > dst_name , trans_hdr - > srcAppName , 8 ) ;
memcpy ( niucv - > dst_user_id , trans_hdr - > srcUserID , 8 ) ;
memcpy ( niucv - > src_name , iucv - > src_name , 8 ) ;
memcpy ( niucv - > src_user_id , iucv - > src_user_id , 8 ) ;
nsk - > sk_bound_dev_if = sk - > sk_bound_dev_if ;
afiucv_swap_src_dest ( skb ) ;
trans_hdr - > flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK ;
trans_hdr - > window = niucv - > msglimit ;
/* if receiver acks the xmit connection is established */
err = dev_queue_xmit ( skb ) ;
if ( ! err ) {
iucv_accept_enqueue ( sk , nsk ) ;
nsk - > sk_state = IUCV_CONNECTED ;
sk - > sk_data_ready ( sk , 1 ) ;
} else
iucv_sock_kill ( nsk ) ;
bh_unlock_sock ( sk ) ;
out :
return NET_RX_SUCCESS ;
}
/**
* afiucv_hs_callback_synack ( ) - react on received SYN - ACK
* */
static int afiucv_hs_callback_synack ( struct sock * sk , struct sk_buff * skb )
{
struct iucv_sock * iucv = iucv_sk ( sk ) ;
struct af_iucv_trans_hdr * trans_hdr =
( struct af_iucv_trans_hdr * ) skb - > data ;
if ( ! iucv )
goto out ;
if ( sk - > sk_state ! = IUCV_BOUND )
goto out ;
bh_lock_sock ( sk ) ;
iucv - > msglimit_peer = trans_hdr - > window ;
sk - > sk_state = IUCV_CONNECTED ;
sk - > sk_state_change ( sk ) ;
bh_unlock_sock ( sk ) ;
out :
kfree_skb ( skb ) ;
return NET_RX_SUCCESS ;
}
/**
* afiucv_hs_callback_synfin ( ) - react on received SYN_FIN
* */
static int afiucv_hs_callback_synfin ( struct sock * sk , struct sk_buff * skb )
{
struct iucv_sock * iucv = iucv_sk ( sk ) ;
if ( ! iucv )
goto out ;
if ( sk - > sk_state ! = IUCV_BOUND )
goto out ;
bh_lock_sock ( sk ) ;
sk - > sk_state = IUCV_DISCONN ;
sk - > sk_state_change ( sk ) ;
bh_unlock_sock ( sk ) ;
out :
kfree_skb ( skb ) ;
return NET_RX_SUCCESS ;
}
/**
* afiucv_hs_callback_fin ( ) - react on received FIN
* */
static int afiucv_hs_callback_fin ( struct sock * sk , struct sk_buff * skb )
{
struct iucv_sock * iucv = iucv_sk ( sk ) ;
/* other end of connection closed */
if ( iucv ) {
bh_lock_sock ( sk ) ;
if ( ! list_empty ( & iucv - > accept_q ) )
sk - > sk_state = IUCV_SEVERED ;
else
sk - > sk_state = IUCV_DISCONN ;
sk - > sk_state_change ( sk ) ;
bh_unlock_sock ( sk ) ;
}
kfree_skb ( skb ) ;
return NET_RX_SUCCESS ;
}
/**
* afiucv_hs_callback_win ( ) - react on received WIN
* */
static int afiucv_hs_callback_win ( struct sock * sk , struct sk_buff * skb )
{
struct iucv_sock * iucv = iucv_sk ( sk ) ;
struct af_iucv_trans_hdr * trans_hdr =
( struct af_iucv_trans_hdr * ) skb - > data ;
if ( ! iucv )
return NET_RX_SUCCESS ;
if ( sk - > sk_state ! = IUCV_CONNECTED )
return NET_RX_SUCCESS ;
atomic_sub ( trans_hdr - > window , & iucv - > msg_sent ) ;
iucv_sock_wake_msglim ( sk ) ;
return NET_RX_SUCCESS ;
}
/**
* afiucv_hs_callback_rx ( ) - react on received data
* */
static int afiucv_hs_callback_rx ( struct sock * sk , struct sk_buff * skb )
{
struct iucv_sock * iucv = iucv_sk ( sk ) ;
if ( ! iucv ) {
kfree_skb ( skb ) ;
return NET_RX_SUCCESS ;
}
if ( sk - > sk_state ! = IUCV_CONNECTED ) {
kfree_skb ( skb ) ;
return NET_RX_SUCCESS ;
}
/* write stuff from iucv_msg to skb cb */
if ( skb - > len < = sizeof ( struct af_iucv_trans_hdr ) ) {
kfree_skb ( skb ) ;
return NET_RX_SUCCESS ;
}
skb_pull ( skb , sizeof ( struct af_iucv_trans_hdr ) ) ;
skb_reset_transport_header ( skb ) ;
skb_reset_network_header ( skb ) ;
spin_lock ( & iucv - > message_q . lock ) ;
if ( skb_queue_empty ( & iucv - > backlog_skb_q ) ) {
if ( sock_queue_rcv_skb ( sk , skb ) ) {
/* handle rcv queue full */
skb_queue_tail ( & iucv - > backlog_skb_q , skb ) ;
}
} else
skb_queue_tail ( & iucv_sk ( sk ) - > backlog_skb_q , skb ) ;
spin_unlock ( & iucv - > message_q . lock ) ;
return NET_RX_SUCCESS ;
}
/**
* afiucv_hs_rcv ( ) - base function for arriving data through HiperSockets
* transport
* called from netif RX softirq
* */
static int afiucv_hs_rcv ( struct sk_buff * skb , struct net_device * dev ,
struct packet_type * pt , struct net_device * orig_dev )
{
struct hlist_node * node ;
struct sock * sk ;
struct iucv_sock * iucv ;
struct af_iucv_trans_hdr * trans_hdr ;
char nullstring [ 8 ] ;
int err = 0 ;
skb_pull ( skb , ETH_HLEN ) ;
trans_hdr = ( struct af_iucv_trans_hdr * ) skb - > data ;
EBCASC ( trans_hdr - > destAppName , sizeof ( trans_hdr - > destAppName ) ) ;
EBCASC ( trans_hdr - > destUserID , sizeof ( trans_hdr - > destUserID ) ) ;
EBCASC ( trans_hdr - > srcAppName , sizeof ( trans_hdr - > srcAppName ) ) ;
EBCASC ( trans_hdr - > srcUserID , sizeof ( trans_hdr - > srcUserID ) ) ;
memset ( nullstring , 0 , sizeof ( nullstring ) ) ;
iucv = NULL ;
sk = NULL ;
read_lock ( & iucv_sk_list . lock ) ;
sk_for_each ( sk , node , & iucv_sk_list . head ) {
if ( trans_hdr - > flags = = AF_IUCV_FLAG_SYN ) {
if ( ( ! memcmp ( & iucv_sk ( sk ) - > src_name ,
trans_hdr - > destAppName , 8 ) ) & &
( ! memcmp ( & iucv_sk ( sk ) - > src_user_id ,
trans_hdr - > destUserID , 8 ) ) & &
( ! memcmp ( & iucv_sk ( sk ) - > dst_name , nullstring , 8 ) ) & &
( ! memcmp ( & iucv_sk ( sk ) - > dst_user_id ,
nullstring , 8 ) ) ) {
iucv = iucv_sk ( sk ) ;
break ;
}
} else {
if ( ( ! memcmp ( & iucv_sk ( sk ) - > src_name ,
trans_hdr - > destAppName , 8 ) ) & &
( ! memcmp ( & iucv_sk ( sk ) - > src_user_id ,
trans_hdr - > destUserID , 8 ) ) & &
( ! memcmp ( & iucv_sk ( sk ) - > dst_name ,
trans_hdr - > srcAppName , 8 ) ) & &
( ! memcmp ( & iucv_sk ( sk ) - > dst_user_id ,
trans_hdr - > srcUserID , 8 ) ) ) {
iucv = iucv_sk ( sk ) ;
break ;
}
}
}
read_unlock ( & iucv_sk_list . lock ) ;
if ( ! iucv )
sk = NULL ;
/* no sock
how should we send with no sock
1 ) send without sock no send rc checking ?
2 ) introduce default sock to handle this cases
SYN - > send SYN | ACK in good case , send SYN | FIN in bad case
data - > send FIN
SYN | ACK , SYN | FIN , FIN - > no action ? */
switch ( trans_hdr - > flags ) {
case AF_IUCV_FLAG_SYN :
/* connect request */
err = afiucv_hs_callback_syn ( sk , skb ) ;
break ;
case ( AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK ) :
/* connect request confirmed */
err = afiucv_hs_callback_synack ( sk , skb ) ;
break ;
case ( AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN ) :
/* connect request refused */
err = afiucv_hs_callback_synfin ( sk , skb ) ;
break ;
case ( AF_IUCV_FLAG_FIN ) :
/* close request */
err = afiucv_hs_callback_fin ( sk , skb ) ;
break ;
case ( AF_IUCV_FLAG_WIN ) :
err = afiucv_hs_callback_win ( sk , skb ) ;
if ( skb - > len > sizeof ( struct af_iucv_trans_hdr ) )
err = afiucv_hs_callback_rx ( sk , skb ) ;
else
kfree ( skb ) ;
break ;
case 0 :
/* plain data frame */
2011-12-20 02:56:27 +04:00
memcpy ( CB_TRGCLS ( skb ) , & trans_hdr - > iucv_hdr . class ,
CB_TRGCLS_LEN ) ;
2011-08-08 05:33:54 +04:00
err = afiucv_hs_callback_rx ( sk , skb ) ;
break ;
default :
;
}
return err ;
}
/**
* afiucv_hs_callback_txnotify ( ) - handle send notifcations from HiperSockets
* transport
* */
static void afiucv_hs_callback_txnotify ( struct sk_buff * skb ,
enum iucv_tx_notify n )
{
struct sock * isk = skb - > sk ;
struct sock * sk = NULL ;
struct iucv_sock * iucv = NULL ;
struct sk_buff_head * list ;
struct sk_buff * list_skb ;
struct sk_buff * this = NULL ;
unsigned long flags ;
struct hlist_node * node ;
read_lock ( & iucv_sk_list . lock ) ;
sk_for_each ( sk , node , & iucv_sk_list . head )
if ( sk = = isk ) {
iucv = iucv_sk ( sk ) ;
break ;
}
read_unlock ( & iucv_sk_list . lock ) ;
if ( ! iucv )
return ;
bh_lock_sock ( sk ) ;
list = & iucv - > send_skb_q ;
list_skb = list - > next ;
if ( skb_queue_empty ( list ) )
goto out_unlock ;
spin_lock_irqsave ( & list - > lock , flags ) ;
while ( list_skb ! = ( struct sk_buff * ) list ) {
if ( skb_shinfo ( list_skb ) = = skb_shinfo ( skb ) ) {
this = list_skb ;
switch ( n ) {
case TX_NOTIFY_OK :
__skb_unlink ( this , list ) ;
iucv_sock_wake_msglim ( sk ) ;
2011-12-20 02:56:29 +04:00
dev_put ( this - > dev ) ;
2011-08-08 05:33:54 +04:00
kfree_skb ( this ) ;
break ;
case TX_NOTIFY_PENDING :
atomic_inc ( & iucv - > pendings ) ;
break ;
case TX_NOTIFY_DELAYED_OK :
__skb_unlink ( this , list ) ;
atomic_dec ( & iucv - > pendings ) ;
if ( atomic_read ( & iucv - > pendings ) < = 0 )
iucv_sock_wake_msglim ( sk ) ;
2011-12-20 02:56:29 +04:00
dev_put ( this - > dev ) ;
2011-08-08 05:33:54 +04:00
kfree_skb ( this ) ;
break ;
case TX_NOTIFY_UNREACHABLE :
case TX_NOTIFY_DELAYED_UNREACHABLE :
case TX_NOTIFY_TPQFULL : /* not yet used */
case TX_NOTIFY_GENERALERROR :
case TX_NOTIFY_DELAYED_GENERALERROR :
__skb_unlink ( this , list ) ;
2011-12-20 02:56:29 +04:00
dev_put ( this - > dev ) ;
2011-08-08 05:33:54 +04:00
kfree_skb ( this ) ;
if ( ! list_empty ( & iucv - > accept_q ) )
sk - > sk_state = IUCV_SEVERED ;
else
sk - > sk_state = IUCV_DISCONN ;
sk - > sk_state_change ( sk ) ;
break ;
}
break ;
}
list_skb = list_skb - > next ;
}
spin_unlock_irqrestore ( & list - > lock , flags ) ;
2011-12-20 02:56:28 +04:00
if ( sk - > sk_state = = IUCV_CLOSING ) {
if ( skb_queue_empty ( & iucv_sk ( sk ) - > send_skb_q ) ) {
sk - > sk_state = IUCV_CLOSED ;
sk - > sk_state_change ( sk ) ;
}
}
2011-08-08 05:33:54 +04:00
out_unlock :
bh_unlock_sock ( sk ) ;
}
2009-09-14 16:23:23 +04:00
static const struct proto_ops iucv_sock_ops = {
2007-02-09 00:51:54 +03:00
. family = PF_IUCV ,
. owner = THIS_MODULE ,
. release = iucv_sock_release ,
. bind = iucv_sock_bind ,
. connect = iucv_sock_connect ,
. listen = iucv_sock_listen ,
. accept = iucv_sock_accept ,
. getname = iucv_sock_getname ,
. sendmsg = iucv_sock_sendmsg ,
. recvmsg = iucv_sock_recvmsg ,
. poll = iucv_sock_poll ,
. ioctl = sock_no_ioctl ,
. mmap = sock_no_mmap ,
. socketpair = sock_no_socketpair ,
. shutdown = iucv_sock_shutdown ,
2009-04-22 03:26:22 +04:00
. setsockopt = iucv_sock_setsockopt ,
. getsockopt = iucv_sock_getsockopt ,
2007-02-09 00:51:54 +03:00
} ;
2009-10-05 09:58:39 +04:00
static const struct net_proto_family iucv_sock_family_ops = {
2007-02-09 00:51:54 +03:00
. family = AF_IUCV ,
. owner = THIS_MODULE ,
. create = iucv_sock_create ,
} ;
2011-08-08 05:33:54 +04:00
static struct packet_type iucv_packet_type = {
. type = cpu_to_be16 ( ETH_P_AF_IUCV ) ,
. func = afiucv_hs_rcv ,
} ;
static int afiucv_iucv_init ( void )
2011-08-08 05:33:51 +04:00
{
int err ;
err = pr_iucv - > iucv_register ( & af_iucv_handler , 0 ) ;
if ( err )
goto out ;
/* establish dummy device */
af_iucv_driver . bus = pr_iucv - > bus ;
err = driver_register ( & af_iucv_driver ) ;
if ( err )
goto out_iucv ;
af_iucv_dev = kzalloc ( sizeof ( struct device ) , GFP_KERNEL ) ;
if ( ! af_iucv_dev ) {
err = - ENOMEM ;
goto out_driver ;
}
dev_set_name ( af_iucv_dev , " af_iucv " ) ;
af_iucv_dev - > bus = pr_iucv - > bus ;
af_iucv_dev - > parent = pr_iucv - > root ;
af_iucv_dev - > release = ( void ( * ) ( struct device * ) ) kfree ;
af_iucv_dev - > driver = & af_iucv_driver ;
err = device_register ( af_iucv_dev ) ;
if ( err )
goto out_driver ;
return 0 ;
out_driver :
driver_unregister ( & af_iucv_driver ) ;
out_iucv :
pr_iucv - > iucv_unregister ( & af_iucv_handler , 0 ) ;
out :
return err ;
}
2007-05-04 23:23:27 +04:00
static int __init afiucv_init ( void )
2007-02-09 00:51:54 +03:00
{
int err ;
2011-08-08 05:33:54 +04:00
if ( MACHINE_IS_VM ) {
cpcmd ( " QUERY USERID " , iucv_userid , sizeof ( iucv_userid ) , & err ) ;
if ( unlikely ( err ) ) {
WARN_ON ( err ) ;
err = - EPROTONOSUPPORT ;
goto out ;
}
2007-02-09 00:51:54 +03:00
2011-08-08 05:33:54 +04:00
pr_iucv = try_then_request_module ( symbol_get ( iucv_if ) , " iucv " ) ;
if ( ! pr_iucv ) {
printk ( KERN_WARNING " iucv_if lookup failed \n " ) ;
memset ( & iucv_userid , 0 , sizeof ( iucv_userid ) ) ;
}
} else {
memset ( & iucv_userid , 0 , sizeof ( iucv_userid ) ) ;
pr_iucv = NULL ;
2011-08-08 05:33:51 +04:00
}
2007-02-09 00:51:54 +03:00
err = proto_register ( & iucv_proto , 0 ) ;
if ( err )
2011-08-08 05:33:51 +04:00
goto out ;
2007-02-09 00:51:54 +03:00
err = sock_register ( & iucv_sock_family_ops ) ;
if ( err )
goto out_proto ;
2011-08-08 05:33:51 +04:00
2011-08-08 05:33:54 +04:00
if ( pr_iucv ) {
err = afiucv_iucv_init ( ) ;
if ( err )
goto out_sock ;
}
dev_add_pack ( & iucv_packet_type ) ;
2007-02-09 00:51:54 +03:00
return 0 ;
2009-06-16 12:30:44 +04:00
out_sock :
sock_unregister ( PF_IUCV ) ;
2007-02-09 00:51:54 +03:00
out_proto :
proto_unregister ( & iucv_proto ) ;
out :
2011-08-08 05:33:51 +04:00
if ( pr_iucv )
symbol_put ( iucv_if ) ;
2007-02-09 00:51:54 +03:00
return err ;
}
static void __exit afiucv_exit ( void )
{
2011-08-08 05:33:54 +04:00
if ( pr_iucv ) {
device_unregister ( af_iucv_dev ) ;
driver_unregister ( & af_iucv_driver ) ;
pr_iucv - > iucv_unregister ( & af_iucv_handler , 0 ) ;
symbol_put ( iucv_if ) ;
}
dev_remove_pack ( & iucv_packet_type ) ;
2007-02-09 00:51:54 +03:00
sock_unregister ( PF_IUCV ) ;
proto_unregister ( & iucv_proto ) ;
}
module_init ( afiucv_init ) ;
module_exit ( afiucv_exit ) ;
MODULE_AUTHOR ( " Jennifer Hunt <jenhunt@us.ibm.com> " ) ;
MODULE_DESCRIPTION ( " IUCV Sockets ver " VERSION ) ;
MODULE_VERSION ( VERSION ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_ALIAS_NETPROTO ( PF_IUCV ) ;
2011-08-08 05:33:54 +04:00