2010-03-30 17:56:26 +04:00
/*
* Copyright ( C ) ST - Ericsson AB 2010
* Author : Sjur Brendeland sjur . brandeland @ stericsson . com
* License terms : GNU General Public License ( GPL ) version 2
*/
2010-09-06 01:31:11 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
2010-03-30 17:56:26 +04:00
# include <linux/fs.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/spinlock.h>
# include <linux/mutex.h>
# include <linux/list.h>
# include <linux/wait.h>
# include <linux/poll.h>
# include <linux/tcp.h>
# include <linux/uaccess.h>
2010-04-28 12:54:38 +04:00
# include <linux/debugfs.h>
2010-03-30 17:56:26 +04:00
# include <linux/caif/caif_socket.h>
2012-04-12 12:27:24 +04:00
# include <linux/pkt_sched.h>
2010-04-28 12:54:38 +04:00
# include <net/sock.h>
# include <net/tcp_states.h>
2010-03-30 17:56:26 +04:00
# include <net/caif/caif_layer.h>
# include <net/caif/caif_dev.h>
# include <net/caif/cfpkt.h>
MODULE_LICENSE ( " GPL " ) ;
2010-04-28 12:54:38 +04:00
MODULE_ALIAS_NETPROTO ( AF_CAIF ) ;
/*
* CAIF state is re - using the TCP socket states .
* caif_states stored in sk_state reflect the state as reported by
* the CAIF stack , while sk_socket - > state is the state of the socket .
*/
enum caif_states {
CAIF_CONNECTED = TCP_ESTABLISHED ,
CAIF_CONNECTING = TCP_SYN_SENT ,
CAIF_DISCONNECTED = TCP_CLOSE
} ;
# define TX_FLOW_ON_BIT 1
# define RX_FLOW_ON_BIT 2
2010-03-30 17:56:26 +04:00
struct caifsock {
2010-04-28 12:54:38 +04:00
struct sock sk ; /* must be first member */
2010-03-30 17:56:26 +04:00
struct cflayer layer ;
u32 flow_state ;
struct caif_connect_request conn_req ;
2010-04-28 12:54:38 +04:00
struct mutex readlock ;
2010-03-30 17:56:26 +04:00
struct dentry * debugfs_socket_dir ;
2010-06-17 10:55:40 +04:00
int headroom , tailroom , maxframe ;
2010-03-30 17:56:26 +04:00
} ;
2010-04-28 12:54:38 +04:00
static int rx_flow_is_on ( struct caifsock * cf_sk )
{
return test_bit ( RX_FLOW_ON_BIT ,
( void * ) & cf_sk - > flow_state ) ;
}
static int tx_flow_is_on ( struct caifsock * cf_sk )
{
return test_bit ( TX_FLOW_ON_BIT ,
( void * ) & cf_sk - > flow_state ) ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
static void set_rx_flow_off ( struct caifsock * cf_sk )
2010-03-30 17:56:26 +04:00
{
2010-04-28 12:54:38 +04:00
clear_bit ( RX_FLOW_ON_BIT ,
( void * ) & cf_sk - > flow_state ) ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
static void set_rx_flow_on ( struct caifsock * cf_sk )
{
set_bit ( RX_FLOW_ON_BIT ,
( void * ) & cf_sk - > flow_state ) ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
static void set_tx_flow_off ( struct caifsock * cf_sk )
{
clear_bit ( TX_FLOW_ON_BIT ,
( void * ) & cf_sk - > flow_state ) ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
static void set_tx_flow_on ( struct caifsock * cf_sk )
{
set_bit ( TX_FLOW_ON_BIT ,
( void * ) & cf_sk - > flow_state ) ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
static void caif_read_lock ( struct sock * sk )
{
struct caifsock * cf_sk ;
cf_sk = container_of ( sk , struct caifsock , sk ) ;
mutex_lock ( & cf_sk - > readlock ) ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
static void caif_read_unlock ( struct sock * sk )
{
struct caifsock * cf_sk ;
cf_sk = container_of ( sk , struct caifsock , sk ) ;
mutex_unlock ( & cf_sk - > readlock ) ;
}
2010-03-30 17:56:26 +04:00
2010-05-21 06:16:11 +04:00
static int sk_rcvbuf_lowwater ( struct caifsock * cf_sk )
2010-04-28 12:54:38 +04:00
{
/* A quarter of full buffer is used a low water mark */
return cf_sk - > sk . sk_rcvbuf / 4 ;
}
2010-03-30 17:56:26 +04:00
2010-05-21 06:16:11 +04:00
static void caif_flow_ctrl ( struct sock * sk , int mode )
2010-04-28 12:54:38 +04:00
{
struct caifsock * cf_sk ;
cf_sk = container_of ( sk , struct caifsock , sk ) ;
2010-05-21 06:16:10 +04:00
if ( cf_sk - > layer . dn & & cf_sk - > layer . dn - > modemcmd )
2010-04-28 12:54:38 +04:00
cf_sk - > layer . dn - > modemcmd ( cf_sk - > layer . dn , mode ) ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/*
* Copied from sock . c : sock_queue_rcv_skb ( ) , but changed so packets are
* not dropped , but CAIF is sending flow off instead .
*/
2010-05-21 06:16:11 +04:00
static int caif_queue_rcv_skb ( struct sock * sk , struct sk_buff * skb )
2010-04-28 12:54:38 +04:00
{
int err ;
int skb_len ;
unsigned long flags ;
struct sk_buff_head * list = & sk - > sk_receive_queue ;
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( atomic_read ( & sk - > sk_rmem_alloc ) + skb - > truesize > =
2012-04-15 09:58:06 +04:00
( unsigned int ) sk - > sk_rcvbuf & & rx_flow_is_on ( cf_sk ) ) {
2012-05-14 01:56:26 +04:00
net_dbg_ratelimited ( " sending flow OFF (queue len = %d %d) \n " ,
atomic_read ( & cf_sk - > sk . sk_rmem_alloc ) ,
sk_rcvbuf_lowwater ( cf_sk ) ) ;
2010-04-28 12:54:38 +04:00
set_rx_flow_off ( cf_sk ) ;
2010-05-21 06:16:10 +04:00
caif_flow_ctrl ( sk , CAIF_MODEMCMD_FLOW_OFF_REQ ) ;
2010-04-28 12:54:38 +04:00
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = sk_filter ( sk , skb ) ;
if ( err )
return err ;
if ( ! sk_rmem_schedule ( sk , skb - > truesize ) & & rx_flow_is_on ( cf_sk ) ) {
set_rx_flow_off ( cf_sk ) ;
2012-05-14 01:56:26 +04:00
net_dbg_ratelimited ( " sending flow OFF due to rmem_schedule \n " ) ;
2010-05-21 06:16:10 +04:00
caif_flow_ctrl ( sk , CAIF_MODEMCMD_FLOW_OFF_REQ ) ;
2010-04-28 12:54:38 +04:00
}
skb - > dev = NULL ;
skb_set_owner_r ( skb , sk ) ;
/* Cache the SKB length before we tack it onto the receive
* queue . Once it is added it no longer belongs to us and
* may be freed by other threads of control pulling packets
* from the queue .
*/
skb_len = skb - > len ;
spin_lock_irqsave ( & list - > lock , flags ) ;
if ( ! sock_flag ( sk , SOCK_DEAD ) )
__skb_queue_tail ( list , skb ) ;
spin_unlock_irqrestore ( & list - > lock , flags ) ;
if ( ! sock_flag ( sk , SOCK_DEAD ) )
sk - > sk_data_ready ( sk , skb_len ) ;
else
kfree_skb ( skb ) ;
2010-03-30 17:56:26 +04:00
return 0 ;
}
2010-04-28 12:54:38 +04:00
/* Packet Receive Callback function called from CAIF Stack */
static int caif_sktrecv_cb ( struct cflayer * layr , struct cfpkt * pkt )
2010-03-30 17:56:26 +04:00
{
struct caifsock * cf_sk ;
2010-04-28 12:54:38 +04:00
struct sk_buff * skb ;
2010-03-30 17:56:26 +04:00
cf_sk = container_of ( layr , struct caifsock , layer ) ;
2010-04-28 12:54:38 +04:00
skb = cfpkt_tonative ( pkt ) ;
if ( unlikely ( cf_sk - > sk . sk_state ! = CAIF_CONNECTED ) ) {
2011-05-13 06:44:08 +04:00
kfree_skb ( skb ) ;
2010-04-28 12:54:38 +04:00
return 0 ;
}
caif_queue_rcv_skb ( & cf_sk - > sk , skb ) ;
return 0 ;
}
2010-03-30 17:56:26 +04:00
2011-05-13 06:44:04 +04:00
static void cfsk_hold ( struct cflayer * layr )
{
struct caifsock * cf_sk = container_of ( layr , struct caifsock , layer ) ;
sock_hold ( & cf_sk - > sk ) ;
}
static void cfsk_put ( struct cflayer * layr )
{
struct caifsock * cf_sk = container_of ( layr , struct caifsock , layer ) ;
sock_put ( & cf_sk - > sk ) ;
}
2010-04-28 12:54:38 +04:00
/* Packet Control Callback function called from CAIF */
static void caif_ctrl_cb ( struct cflayer * layr ,
enum caif_ctrlcmd flow ,
int phyid )
{
struct caifsock * cf_sk = container_of ( layr , struct caifsock , layer ) ;
2010-03-30 17:56:26 +04:00
switch ( flow ) {
case CAIF_CTRLCMD_FLOW_ON_IND :
2010-04-28 12:54:38 +04:00
/* OK from modem to start sending again */
set_tx_flow_on ( cf_sk ) ;
cf_sk - > sk . sk_state_change ( & cf_sk - > sk ) ;
2010-03-30 17:56:26 +04:00
break ;
case CAIF_CTRLCMD_FLOW_OFF_IND :
2010-04-28 12:54:38 +04:00
/* Modem asks us to shut up */
set_tx_flow_off ( cf_sk ) ;
cf_sk - > sk . sk_state_change ( & cf_sk - > sk ) ;
2010-03-30 17:56:26 +04:00
break ;
case CAIF_CTRLCMD_INIT_RSP :
2010-04-28 12:54:38 +04:00
/* We're now connected */
2011-05-13 06:44:04 +04:00
caif_client_register_refcnt ( & cf_sk - > layer ,
cfsk_hold , cfsk_put ) ;
2010-04-28 12:54:38 +04:00
cf_sk - > sk . sk_state = CAIF_CONNECTED ;
set_tx_flow_on ( cf_sk ) ;
2012-06-24 15:01:36 +04:00
cf_sk - > sk . sk_shutdown = 0 ;
2010-04-28 12:54:38 +04:00
cf_sk - > sk . sk_state_change ( & cf_sk - > sk ) ;
2010-03-30 17:56:26 +04:00
break ;
case CAIF_CTRLCMD_DEINIT_RSP :
2010-04-28 12:54:38 +04:00
/* We're now disconnected */
cf_sk - > sk . sk_state = CAIF_DISCONNECTED ;
cf_sk - > sk . sk_state_change ( & cf_sk - > sk ) ;
2010-03-30 17:56:26 +04:00
break ;
case CAIF_CTRLCMD_INIT_FAIL_RSP :
2010-04-28 12:54:38 +04:00
/* Connect request failed */
cf_sk - > sk . sk_err = ECONNREFUSED ;
cf_sk - > sk . sk_state = CAIF_DISCONNECTED ;
cf_sk - > sk . sk_shutdown = SHUTDOWN_MASK ;
/*
* Socket " standards " seems to require POLLOUT to
* be set at connect failure .
*/
set_tx_flow_on ( cf_sk ) ;
cf_sk - > sk . sk_state_change ( & cf_sk - > sk ) ;
2010-03-30 17:56:26 +04:00
break ;
case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND :
2010-04-28 12:54:38 +04:00
/* Modem has closed this connection, or device is down. */
cf_sk - > sk . sk_shutdown = SHUTDOWN_MASK ;
cf_sk - > sk . sk_err = ECONNRESET ;
set_rx_flow_on ( cf_sk ) ;
cf_sk - > sk . sk_error_report ( & cf_sk - > sk ) ;
2010-03-30 17:56:26 +04:00
break ;
default :
2010-09-06 01:31:11 +04:00
pr_debug ( " Unexpected flow command %d \n " , flow ) ;
2010-03-30 17:56:26 +04:00
}
}
2010-04-28 12:54:38 +04:00
static void caif_check_flow_release ( struct sock * sk )
2010-03-30 17:56:26 +04:00
{
2010-04-28 12:54:38 +04:00
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( rx_flow_is_on ( cf_sk ) )
return ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( atomic_read ( & sk - > sk_rmem_alloc ) < = sk_rcvbuf_lowwater ( cf_sk ) ) {
set_rx_flow_on ( cf_sk ) ;
2010-05-21 06:16:10 +04:00
caif_flow_ctrl ( sk , CAIF_MODEMCMD_FLOW_ON_REQ ) ;
2010-04-28 12:54:38 +04:00
}
}
2010-05-21 06:16:12 +04:00
2010-04-28 12:54:38 +04:00
/*
2010-05-21 06:16:12 +04:00
* Copied from unix_dgram_recvmsg , but removed credit checks ,
* changed locking , address handling and added MSG_TRUNC .
2010-04-28 12:54:38 +04:00
*/
static int caif_seqpkt_recvmsg ( struct kiocb * iocb , struct socket * sock ,
2010-05-21 06:16:12 +04:00
struct msghdr * m , size_t len , int flags )
2010-03-30 17:56:26 +04:00
{
struct sock * sk = sock - > sk ;
struct sk_buff * skb ;
2010-05-21 06:16:12 +04:00
int ret ;
int copylen ;
2010-03-30 17:56:26 +04:00
2010-05-21 06:16:12 +04:00
ret = - EOPNOTSUPP ;
if ( m - > msg_flags & MSG_OOB )
goto read_error ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
skb = skb_recv_datagram ( sk , flags , 0 , & ret ) ;
if ( ! skb )
2010-03-30 17:56:26 +04:00
goto read_error ;
2010-05-21 06:16:12 +04:00
copylen = skb - > len ;
if ( len < copylen ) {
m - > msg_flags | = MSG_TRUNC ;
copylen = len ;
2010-03-30 17:56:26 +04:00
}
2010-05-21 06:16:12 +04:00
ret = skb_copy_datagram_iovec ( skb , 0 , m - > msg_iov , copylen ) ;
2010-04-28 12:54:38 +04:00
if ( ret )
2010-05-21 06:16:12 +04:00
goto out_free ;
2010-03-30 17:56:26 +04:00
2010-05-21 06:16:12 +04:00
ret = ( flags & MSG_TRUNC ) ? skb - > len : copylen ;
out_free :
2010-04-28 12:54:38 +04:00
skb_free_datagram ( sk , skb ) ;
caif_check_flow_release ( sk ) ;
2010-05-21 06:16:12 +04:00
return ret ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
read_error :
return ret ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/* Copied from unix_stream_wait_data, identical except for lock call. */
static long caif_stream_data_wait ( struct sock * sk , long timeo )
{
DEFINE_WAIT ( wait ) ;
lock_sock ( sk ) ;
for ( ; ; ) {
prepare_to_wait ( sk_sleep ( sk ) , & wait , TASK_INTERRUPTIBLE ) ;
if ( ! skb_queue_empty ( & sk - > sk_receive_queue ) | |
sk - > sk_err | |
sk - > sk_state ! = CAIF_CONNECTED | |
sock_flag ( sk , SOCK_DEAD ) | |
( sk - > sk_shutdown & RCV_SHUTDOWN ) | |
signal_pending ( current ) | |
! timeo )
break ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
set_bit ( SOCK_ASYNC_WAITDATA , & sk - > sk_socket - > flags ) ;
release_sock ( sk ) ;
timeo = schedule_timeout ( timeo ) ;
lock_sock ( sk ) ;
clear_bit ( SOCK_ASYNC_WAITDATA , & sk - > sk_socket - > flags ) ;
2010-03-30 17:56:26 +04:00
}
2010-04-28 12:54:38 +04:00
finish_wait ( sk_sleep ( sk ) , & wait ) ;
release_sock ( sk ) ;
return timeo ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/*
* Copied from unix_stream_recvmsg , but removed credit checks ,
* changed locking calls , changed address handling .
*/
static int caif_stream_recvmsg ( struct kiocb * iocb , struct socket * sock ,
struct msghdr * msg , size_t size ,
int flags )
{
struct sock * sk = sock - > sk ;
int copied = 0 ;
int target ;
int err = 0 ;
long timeo ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = - EOPNOTSUPP ;
if ( flags & MSG_OOB )
goto out ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
msg - > msg_namelen = 0 ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/*
* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
err = - EAGAIN ;
if ( sk - > sk_state = = CAIF_CONNECTING )
goto out ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
caif_read_lock ( sk ) ;
target = sock_rcvlowat ( sk , flags & MSG_WAITALL , size ) ;
timeo = sock_rcvtimeo ( sk , flags & MSG_DONTWAIT ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
do {
int chunk ;
struct sk_buff * skb ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
lock_sock ( sk ) ;
skb = skb_dequeue ( & sk - > sk_receive_queue ) ;
caif_check_flow_release ( sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( skb = = NULL ) {
if ( copied > = target )
goto unlock ;
/*
* POSIX 1003.1 g mandates this order .
*/
err = sock_error ( sk ) ;
if ( err )
goto unlock ;
err = - ECONNRESET ;
if ( sk - > sk_shutdown & RCV_SHUTDOWN )
goto unlock ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = - EPIPE ;
if ( sk - > sk_state ! = CAIF_CONNECTED )
goto unlock ;
if ( sock_flag ( sk , SOCK_DEAD ) )
goto unlock ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
release_sock ( sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = - EAGAIN ;
if ( ! timeo )
break ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
caif_read_unlock ( sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
timeo = caif_stream_data_wait ( sk , timeo ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( signal_pending ( current ) ) {
err = sock_intr_errno ( timeo ) ;
goto out ;
}
caif_read_lock ( sk ) ;
continue ;
unlock :
release_sock ( sk ) ;
break ;
2010-03-30 17:56:26 +04:00
}
2010-04-28 12:54:38 +04:00
release_sock ( sk ) ;
chunk = min_t ( unsigned int , skb - > len , size ) ;
if ( memcpy_toiovec ( msg - > msg_iov , skb - > data , chunk ) ) {
skb_queue_head ( & sk - > sk_receive_queue , skb ) ;
if ( copied = = 0 )
copied = - EFAULT ;
break ;
}
copied + = chunk ;
size - = chunk ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/* Mark read part of skb as used */
if ( ! ( flags & MSG_PEEK ) ) {
skb_pull ( skb , chunk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/* put the skb back if we didn't use it up. */
if ( skb - > len ) {
skb_queue_head ( & sk - > sk_receive_queue , skb ) ;
break ;
}
kfree_skb ( skb ) ;
2010-03-30 17:56:26 +04:00
} else {
2010-04-28 12:54:38 +04:00
/*
* It is questionable , see note in unix_dgram_recvmsg .
*/
/* put message back and return */
skb_queue_head ( & sk - > sk_receive_queue , skb ) ;
break ;
2010-03-30 17:56:26 +04:00
}
2010-04-28 12:54:38 +04:00
} while ( size ) ;
caif_read_unlock ( sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
out :
return copied ? : err ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/*
* Copied from sock . c : sock_wait_for_wmem , but change to wait for
* CAIF flow - on and sock_writable .
*/
static long caif_wait_for_flow_on ( struct caifsock * cf_sk ,
int wait_writeable , long timeo , int * err )
{
struct sock * sk = & cf_sk - > sk ;
DEFINE_WAIT ( wait ) ;
for ( ; ; ) {
* err = 0 ;
if ( tx_flow_is_on ( cf_sk ) & &
( ! wait_writeable | | sock_writeable ( & cf_sk - > sk ) ) )
break ;
* err = - ETIMEDOUT ;
if ( ! timeo )
break ;
* err = - ERESTARTSYS ;
if ( signal_pending ( current ) )
break ;
prepare_to_wait ( sk_sleep ( sk ) , & wait , TASK_INTERRUPTIBLE ) ;
* err = - ECONNRESET ;
if ( sk - > sk_shutdown & SHUTDOWN_MASK )
break ;
* err = - sk - > sk_err ;
if ( sk - > sk_err )
break ;
* err = - EPIPE ;
if ( cf_sk - > sk . sk_state ! = CAIF_CONNECTED )
break ;
timeo = schedule_timeout ( timeo ) ;
2010-03-30 17:56:26 +04:00
}
2010-04-28 12:54:38 +04:00
finish_wait ( sk_sleep ( sk ) , & wait ) ;
return timeo ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/*
* Transmit a SKB . The device may temporarily request re - transmission
* by returning EAGAIN .
*/
static int transmit_skb ( struct sk_buff * skb , struct caifsock * cf_sk ,
int noblock , long timeo )
{
struct cfpkt * pkt ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
pkt = cfpkt_fromnative ( CAIF_DIR_OUT , skb ) ;
2011-05-13 06:44:08 +04:00
memset ( skb - > cb , 0 , sizeof ( struct caif_payload_info ) ) ;
2012-04-12 12:27:24 +04:00
cfpkt_set_prio ( pkt , cf_sk - > sk . sk_priority ) ;
2010-03-30 17:56:26 +04:00
2012-02-02 05:21:03 +04:00
if ( cf_sk - > layer . dn = = NULL ) {
kfree_skb ( skb ) ;
2011-04-11 14:43:51 +04:00
return - EINVAL ;
2012-02-02 05:21:03 +04:00
}
2010-03-30 17:56:26 +04:00
2011-04-11 14:43:51 +04:00
return cf_sk - > layer . dn - > transmit ( cf_sk - > layer . dn , pkt ) ;
2010-04-28 12:54:38 +04:00
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
static int caif_seqpkt_sendmsg ( struct kiocb * kiocb , struct socket * sock ,
struct msghdr * msg , size_t len )
{
struct sock * sk = sock - > sk ;
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
int buffer_size ;
int ret = 0 ;
struct sk_buff * skb = NULL ;
int noblock ;
long timeo ;
caif_assert ( cf_sk ) ;
ret = sock_error ( sk ) ;
if ( ret )
goto err ;
ret = - EOPNOTSUPP ;
if ( msg - > msg_flags & MSG_OOB )
goto err ;
ret = - EOPNOTSUPP ;
if ( msg - > msg_namelen )
goto err ;
ret = - EINVAL ;
if ( unlikely ( msg - > msg_iov - > iov_base = = NULL ) )
goto err ;
noblock = msg - > msg_flags & MSG_DONTWAIT ;
timeo = sock_sndtimeo ( sk , noblock ) ;
timeo = caif_wait_for_flow_on ( container_of ( sk , struct caifsock , sk ) ,
1 , timeo , & ret ) ;
2010-06-17 10:55:40 +04:00
if ( ret )
goto err ;
2010-04-28 12:54:38 +04:00
ret = - EPIPE ;
if ( cf_sk - > sk . sk_state ! = CAIF_CONNECTED | |
sock_flag ( sk , SOCK_DEAD ) | |
( sk - > sk_shutdown & RCV_SHUTDOWN ) )
goto err ;
2010-06-17 10:55:40 +04:00
/* Error if trying to write more than maximum frame size. */
ret = - EMSGSIZE ;
if ( len > cf_sk - > maxframe & & cf_sk - > sk . sk_protocol ! = CAIFPROTO_RFM )
goto err ;
buffer_size = len + cf_sk - > headroom + cf_sk - > tailroom ;
2010-04-28 12:54:38 +04:00
ret = - ENOMEM ;
skb = sock_alloc_send_skb ( sk , buffer_size , noblock , & ret ) ;
2010-06-17 10:55:40 +04:00
if ( ! skb | | skb_tailroom ( skb ) < buffer_size )
2010-04-28 12:54:38 +04:00
goto err ;
2010-06-17 10:55:40 +04:00
skb_reserve ( skb , cf_sk - > headroom ) ;
2010-04-28 12:54:38 +04:00
ret = memcpy_fromiovec ( skb_put ( skb , len ) , msg - > msg_iov , len ) ;
if ( ret )
goto err ;
ret = transmit_skb ( skb , cf_sk , noblock , timeo ) ;
if ( ret < 0 )
2011-05-13 06:44:06 +04:00
/* skb is already freed */
return ret ;
2010-04-28 12:54:38 +04:00
return len ;
err :
kfree_skb ( skb ) ;
return ret ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/*
* Copied from unix_stream_sendmsg and adapted to CAIF :
* Changed removed permission handling and added waiting for flow on
* and other minor adaptations .
*/
static int caif_stream_sendmsg ( struct kiocb * kiocb , struct socket * sock ,
struct msghdr * msg , size_t len )
{
struct sock * sk = sock - > sk ;
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
int err , size ;
struct sk_buff * skb ;
int sent = 0 ;
long timeo ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = - EOPNOTSUPP ;
if ( unlikely ( msg - > msg_flags & MSG_OOB ) )
goto out_err ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( unlikely ( msg - > msg_namelen ) )
goto out_err ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
timeo = sock_sndtimeo ( sk , msg - > msg_flags & MSG_DONTWAIT ) ;
timeo = caif_wait_for_flow_on ( cf_sk , 1 , timeo , & err ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( unlikely ( sk - > sk_shutdown & SEND_SHUTDOWN ) )
goto pipe_err ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
while ( sent < len ) {
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
size = len - sent ;
2010-03-30 17:56:26 +04:00
2010-06-17 10:55:40 +04:00
if ( size > cf_sk - > maxframe )
size = cf_sk - > maxframe ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/* If size is more than half of sndbuf, chop up message */
if ( size > ( ( sk - > sk_sndbuf > > 1 ) - 64 ) )
size = ( sk - > sk_sndbuf > > 1 ) - 64 ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( size > SKB_MAX_ALLOC )
size = SKB_MAX_ALLOC ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
skb = sock_alloc_send_skb ( sk ,
2010-06-17 10:55:40 +04:00
size + cf_sk - > headroom +
cf_sk - > tailroom ,
2010-04-28 12:54:38 +04:00
msg - > msg_flags & MSG_DONTWAIT ,
& err ) ;
if ( skb = = NULL )
goto out_err ;
2010-03-30 17:56:26 +04:00
2010-06-17 10:55:40 +04:00
skb_reserve ( skb , cf_sk - > headroom ) ;
2010-04-28 12:54:38 +04:00
/*
* If you pass two values to the sock_alloc_send_skb
* it tries to grab the large buffer with GFP_NOFS
* ( which can fail easily ) , and if it fails grab the
* fallback size buffer which is under a page and will
* succeed . [ Alan ]
*/
size = min_t ( int , size , skb_tailroom ( skb ) ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = memcpy_fromiovec ( skb_put ( skb , size ) , msg - > msg_iov , size ) ;
if ( err ) {
kfree_skb ( skb ) ;
goto out_err ;
}
err = transmit_skb ( skb , cf_sk ,
msg - > msg_flags & MSG_DONTWAIT , timeo ) ;
2012-02-02 05:21:03 +04:00
if ( err < 0 )
/* skb is already freed */
2010-04-28 12:54:38 +04:00
goto pipe_err ;
2012-02-02 05:21:03 +04:00
2010-04-28 12:54:38 +04:00
sent + = size ;
2010-03-30 17:56:26 +04:00
}
2010-04-28 12:54:38 +04:00
return sent ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
pipe_err :
if ( sent = = 0 & & ! ( msg - > msg_flags & MSG_NOSIGNAL ) )
send_sig ( SIGPIPE , current , 0 ) ;
err = - EPIPE ;
out_err :
return sent ? : err ;
2010-03-30 17:56:26 +04:00
}
static int setsockopt ( struct socket * sock ,
int lvl , int opt , char __user * ov , unsigned int ol )
{
struct sock * sk = sock - > sk ;
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
2010-11-01 14:52:47 +03:00
int linksel ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( cf_sk - > sk . sk_socket - > state ! = SS_UNCONNECTED )
2010-03-30 17:56:26 +04:00
return - ENOPROTOOPT ;
2010-04-28 12:54:38 +04:00
2010-03-30 17:56:26 +04:00
switch ( opt ) {
case CAIFSO_LINK_SELECT :
2010-04-28 12:54:38 +04:00
if ( ol < sizeof ( int ) )
2010-03-30 17:56:26 +04:00
return - EINVAL ;
if ( lvl ! = SOL_CAIF )
goto bad_sol ;
if ( copy_from_user ( & linksel , ov , sizeof ( int ) ) )
return - EINVAL ;
lock_sock ( & ( cf_sk - > sk ) ) ;
cf_sk - > conn_req . link_selector = linksel ;
release_sock ( & cf_sk - > sk ) ;
return 0 ;
case CAIFSO_REQ_PARAM :
if ( lvl ! = SOL_CAIF )
goto bad_sol ;
if ( cf_sk - > sk . sk_protocol ! = CAIFPROTO_UTIL )
return - ENOPROTOOPT ;
lock_sock ( & ( cf_sk - > sk ) ) ;
2010-04-28 12:54:38 +04:00
if ( ol > sizeof ( cf_sk - > conn_req . param . data ) | |
copy_from_user ( & cf_sk - > conn_req . param . data , ov , ol ) ) {
2010-03-30 17:56:26 +04:00
release_sock ( & cf_sk - > sk ) ;
return - EINVAL ;
}
2011-01-11 03:00:54 +03:00
cf_sk - > conn_req . param . size = ol ;
2010-03-30 17:56:26 +04:00
release_sock ( & cf_sk - > sk ) ;
return 0 ;
default :
2010-04-28 12:54:38 +04:00
return - ENOPROTOOPT ;
2010-03-30 17:56:26 +04:00
}
return 0 ;
bad_sol :
return - ENOPROTOOPT ;
}
2010-04-28 12:54:38 +04:00
/*
* caif_connect ( ) - Connect a CAIF Socket
* Copied and modified af_irda . c : irda_connect ( ) .
*
* Note : by consulting " errno " , the user space caller may learn the cause
* of the failure . Most of them are visible in the function , others may come
* from subroutines called and are listed here :
* o - EAFNOSUPPORT : bad socket family or type .
* o - ESOCKTNOSUPPORT : bad socket type or protocol
* o - EINVAL : bad socket address , or CAIF link type
* o - ECONNREFUSED : remote end refused the connection .
* o - EINPROGRESS : connect request sent but timed out ( or non - blocking )
* o - EISCONN : already connected .
* o - ETIMEDOUT : Connection timed out ( send timeout )
* o - ENODEV : No link layer to send request
* o - ECONNRESET : Received Shutdown indication or lost link layer
* o - ENOMEM : Out of memory
*
* State Strategy :
* o sk_state : holds the CAIF_ * protocol state , it ' s updated by
* caif_ctrl_cb .
* o sock - > state : holds the SS_ * socket state and is updated by connect and
* disconnect .
*/
static int caif_connect ( struct socket * sock , struct sockaddr * uaddr ,
int addr_len , int flags )
2010-03-30 17:56:26 +04:00
{
struct sock * sk = sock - > sk ;
2010-04-28 12:54:38 +04:00
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
long timeo ;
int err ;
2010-06-17 10:55:40 +04:00
int ifindex , headroom , tailroom ;
2010-10-05 02:42:08 +04:00
unsigned int mtu ;
2010-06-17 10:55:40 +04:00
struct net_device * dev ;
2010-04-28 12:54:38 +04:00
lock_sock ( sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = - EAFNOSUPPORT ;
if ( uaddr - > sa_family ! = AF_CAIF )
2010-03-30 17:56:26 +04:00
goto out ;
2010-04-28 12:54:38 +04:00
switch ( sock - > state ) {
case SS_UNCONNECTED :
/* Normal case, a fresh connect */
caif_assert ( sk - > sk_state = = CAIF_DISCONNECTED ) ;
break ;
case SS_CONNECTING :
switch ( sk - > sk_state ) {
case CAIF_CONNECTED :
sock - > state = SS_CONNECTED ;
err = - EISCONN ;
goto out ;
case CAIF_DISCONNECTED :
/* Reconnect allowed */
break ;
case CAIF_CONNECTING :
err = - EALREADY ;
if ( flags & O_NONBLOCK )
goto out ;
goto wait_connect ;
}
break ;
case SS_CONNECTED :
caif_assert ( sk - > sk_state = = CAIF_CONNECTED | |
sk - > sk_state = = CAIF_DISCONNECTED ) ;
if ( sk - > sk_shutdown & SHUTDOWN_MASK ) {
/* Allow re-connect after SHUTDOWN_IND */
2011-05-13 06:44:05 +04:00
caif_disconnect_client ( sock_net ( sk ) , & cf_sk - > layer ) ;
2011-05-22 15:18:51 +04:00
caif_free_client ( & cf_sk - > layer ) ;
2010-04-28 12:54:38 +04:00
break ;
}
/* No reconnect on a seqpacket socket */
err = - EISCONN ;
goto out ;
case SS_DISCONNECTING :
case SS_FREE :
caif_assert ( 1 ) ; /*Should never happen */
break ;
2010-03-30 17:56:26 +04:00
}
2010-04-28 12:54:38 +04:00
sk - > sk_state = CAIF_DISCONNECTED ;
sock - > state = SS_UNCONNECTED ;
sk_stream_kill_queues ( & cf_sk - > sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = - EINVAL ;
2010-05-22 14:43:42 +04:00
if ( addr_len ! = sizeof ( struct sockaddr_caif ) )
2010-04-28 12:54:38 +04:00
goto out ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
memcpy ( & cf_sk - > conn_req . sockaddr , uaddr ,
2010-03-30 17:56:26 +04:00
sizeof ( struct sockaddr_caif ) ) ;
2010-04-28 12:54:38 +04:00
/* Move to connecting socket, start sending Connect Requests */
sock - > state = SS_CONNECTING ;
sk - > sk_state = CAIF_CONNECTING ;
2011-05-13 06:44:07 +04:00
/* Check priority value comming from socket */
2010-11-01 14:52:47 +03:00
/* if priority value is out of range it will be ajusted */
if ( cf_sk - > sk . sk_priority > CAIF_PRIO_MAX )
cf_sk - > conn_req . priority = CAIF_PRIO_MAX ;
else if ( cf_sk - > sk . sk_priority < CAIF_PRIO_MIN )
cf_sk - > conn_req . priority = CAIF_PRIO_MIN ;
else
cf_sk - > conn_req . priority = cf_sk - > sk . sk_priority ;
/*ifindex = id of the interface.*/
cf_sk - > conn_req . ifindex = cf_sk - > sk . sk_bound_dev_if ;
2010-04-28 12:54:38 +04:00
cf_sk - > layer . receive = caif_sktrecv_cb ;
2011-05-13 06:44:04 +04:00
2011-05-13 06:44:05 +04:00
err = caif_connect_client ( sock_net ( sk ) , & cf_sk - > conn_req ,
2010-06-17 10:55:40 +04:00
& cf_sk - > layer , & ifindex , & headroom , & tailroom ) ;
2011-05-13 06:44:04 +04:00
2010-04-28 12:54:38 +04:00
if ( err < 0 ) {
cf_sk - > sk . sk_socket - > state = SS_UNCONNECTED ;
cf_sk - > sk . sk_state = CAIF_DISCONNECTED ;
goto out ;
2010-03-30 17:56:26 +04:00
}
2010-10-05 02:42:08 +04:00
err = - ENODEV ;
rcu_read_lock ( ) ;
dev = dev_get_by_index_rcu ( sock_net ( sk ) , ifindex ) ;
if ( ! dev ) {
rcu_read_unlock ( ) ;
goto out ;
}
2010-06-17 10:55:40 +04:00
cf_sk - > headroom = LL_RESERVED_SPACE_EXTRA ( dev , headroom ) ;
2010-10-05 02:42:08 +04:00
mtu = dev - > mtu ;
rcu_read_unlock ( ) ;
2010-06-17 10:55:40 +04:00
cf_sk - > tailroom = tailroom ;
2010-10-05 02:42:08 +04:00
cf_sk - > maxframe = mtu - ( headroom + tailroom ) ;
2010-06-17 10:55:40 +04:00
if ( cf_sk - > maxframe < 1 ) {
2010-09-06 01:31:11 +04:00
pr_warn ( " CAIF Interface MTU too small (%d) \n " , dev - > mtu ) ;
2010-11-01 14:52:47 +03:00
err = - ENODEV ;
2010-06-17 10:55:40 +04:00
goto out ;
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
err = - EINPROGRESS ;
wait_connect :
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( sk - > sk_state ! = CAIF_CONNECTED & & ( flags & O_NONBLOCK ) )
goto out ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
timeo = sock_sndtimeo ( sk , flags & O_NONBLOCK ) ;
release_sock ( sk ) ;
2010-05-21 06:16:07 +04:00
err = - ERESTARTSYS ;
timeo = wait_event_interruptible_timeout ( * sk_sleep ( sk ) ,
2010-04-28 12:54:38 +04:00
sk - > sk_state ! = CAIF_CONNECTING ,
timeo ) ;
lock_sock ( sk ) ;
2010-05-21 06:16:07 +04:00
if ( timeo < 0 )
2010-04-28 12:54:38 +04:00
goto out ; /* -ERESTARTSYS */
2010-03-30 17:56:26 +04:00
2010-05-21 06:16:07 +04:00
err = - ETIMEDOUT ;
if ( timeo = = 0 & & sk - > sk_state ! = CAIF_CONNECTED )
goto out ;
2010-04-28 12:54:38 +04:00
if ( sk - > sk_state ! = CAIF_CONNECTED ) {
sock - > state = SS_UNCONNECTED ;
err = sock_error ( sk ) ;
if ( ! err )
err = - ECONNREFUSED ;
goto out ;
}
sock - > state = SS_CONNECTED ;
err = 0 ;
2010-03-30 17:56:26 +04:00
out :
2010-04-28 12:54:38 +04:00
release_sock ( sk ) ;
return err ;
2010-03-30 17:56:26 +04:00
}
2010-04-28 12:54:38 +04:00
/*
* caif_release ( ) - Disconnect a CAIF Socket
* Copied and modified af_irda . c : irda_release ( ) .
*/
static int caif_release ( struct socket * sock )
2010-03-30 17:56:26 +04:00
{
struct sock * sk = sock - > sk ;
2010-04-28 12:54:38 +04:00
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
if ( ! sk )
return 0 ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
set_tx_flow_off ( cf_sk ) ;
2010-03-30 17:56:26 +04:00
/*
2010-04-28 12:54:38 +04:00
* Ensure that packets are not queued after this point in time .
* caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock ,
* this ensures no packets when sock is dead .
2010-03-30 17:56:26 +04:00
*/
2011-05-13 06:44:06 +04:00
spin_lock_bh ( & sk - > sk_receive_queue . lock ) ;
2010-04-28 12:54:38 +04:00
sock_set_flag ( sk , SOCK_DEAD ) ;
2011-05-13 06:44:06 +04:00
spin_unlock_bh ( & sk - > sk_receive_queue . lock ) ;
2010-04-28 12:54:38 +04:00
sock - > sk = NULL ;
2010-03-30 17:56:26 +04:00
2011-05-13 06:44:07 +04:00
WARN_ON ( IS_ERR ( cf_sk - > debugfs_socket_dir ) ) ;
2010-03-30 17:56:26 +04:00
if ( cf_sk - > debugfs_socket_dir ! = NULL )
debugfs_remove_recursive ( cf_sk - > debugfs_socket_dir ) ;
lock_sock ( & ( cf_sk - > sk ) ) ;
2010-04-28 12:54:38 +04:00
sk - > sk_state = CAIF_DISCONNECTED ;
sk - > sk_shutdown = SHUTDOWN_MASK ;
2010-03-30 17:56:26 +04:00
2011-05-22 15:18:51 +04:00
caif_disconnect_client ( sock_net ( sk ) , & cf_sk - > layer ) ;
2010-04-28 12:54:38 +04:00
cf_sk - > sk . sk_socket - > state = SS_DISCONNECTING ;
wake_up_interruptible_poll ( sk_sleep ( sk ) , POLLERR | POLLHUP ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
sock_orphan ( sk ) ;
sk_stream_kill_queues ( & cf_sk - > sk ) ;
release_sock ( sk ) ;
sock_put ( sk ) ;
2011-05-22 15:18:51 +04:00
return 0 ;
2010-04-28 12:54:38 +04:00
}
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
static unsigned int caif_poll ( struct file * file ,
struct socket * sock , poll_table * wait )
{
struct sock * sk = sock - > sk ;
unsigned int mask ;
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
sock_poll_wait ( file , sk_sleep ( sk ) , wait ) ;
mask = 0 ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/* exceptional events? */
if ( sk - > sk_err )
mask | = POLLERR ;
if ( sk - > sk_shutdown = = SHUTDOWN_MASK )
mask | = POLLHUP ;
if ( sk - > sk_shutdown & RCV_SHUTDOWN )
mask | = POLLRDHUP ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
/* readable? */
if ( ! skb_queue_empty ( & sk - > sk_receive_queue ) | |
( sk - > sk_shutdown & RCV_SHUTDOWN ) )
mask | = POLLIN | POLLRDNORM ;
2010-03-30 17:56:26 +04:00
/*
2010-04-28 12:54:38 +04:00
* we set writable also when the other side has shut down the
* connection . This prevents stuck sockets .
2010-03-30 17:56:26 +04:00
*/
2010-04-28 12:54:38 +04:00
if ( sock_writeable ( sk ) & & tx_flow_is_on ( cf_sk ) )
mask | = POLLOUT | POLLWRNORM | POLLWRBAND ;
return mask ;
2010-03-30 17:56:26 +04:00
}
2010-04-28 12:54:38 +04:00
static const struct proto_ops caif_seqpacket_ops = {
. family = PF_CAIF ,
. owner = THIS_MODULE ,
. release = caif_release ,
. bind = sock_no_bind ,
. connect = caif_connect ,
. socketpair = sock_no_socketpair ,
. accept = sock_no_accept ,
. getname = sock_no_getname ,
. poll = caif_poll ,
. ioctl = sock_no_ioctl ,
. listen = sock_no_listen ,
. shutdown = sock_no_shutdown ,
. setsockopt = setsockopt ,
. getsockopt = sock_no_getsockopt ,
. sendmsg = caif_seqpkt_sendmsg ,
. recvmsg = caif_seqpkt_recvmsg ,
. mmap = sock_no_mmap ,
. sendpage = sock_no_sendpage ,
} ;
static const struct proto_ops caif_stream_ops = {
2010-03-30 17:56:26 +04:00
. family = PF_CAIF ,
. owner = THIS_MODULE ,
. release = caif_release ,
. bind = sock_no_bind ,
. connect = caif_connect ,
. socketpair = sock_no_socketpair ,
. accept = sock_no_accept ,
. getname = sock_no_getname ,
. poll = caif_poll ,
. ioctl = sock_no_ioctl ,
. listen = sock_no_listen ,
2010-04-28 12:54:38 +04:00
. shutdown = sock_no_shutdown ,
2010-03-30 17:56:26 +04:00
. setsockopt = setsockopt ,
. getsockopt = sock_no_getsockopt ,
2010-04-28 12:54:38 +04:00
. sendmsg = caif_stream_sendmsg ,
. recvmsg = caif_stream_recvmsg ,
2010-03-30 17:56:26 +04:00
. mmap = sock_no_mmap ,
2010-04-28 12:54:38 +04:00
. sendpage = sock_no_sendpage ,
2010-03-30 17:56:26 +04:00
} ;
/* This function is called when a socket is finally destroyed. */
static void caif_sock_destructor ( struct sock * sk )
{
2010-04-28 12:54:38 +04:00
struct caifsock * cf_sk = container_of ( sk , struct caifsock , sk ) ;
2010-03-30 17:56:26 +04:00
caif_assert ( ! atomic_read ( & sk - > sk_wmem_alloc ) ) ;
caif_assert ( sk_unhashed ( sk ) ) ;
caif_assert ( ! sk - > sk_socket ) ;
if ( ! sock_flag ( sk , SOCK_DEAD ) ) {
2011-05-13 06:44:07 +04:00
pr_debug ( " Attempt to release alive CAIF socket: %p \n " , sk ) ;
2010-03-30 17:56:26 +04:00
return ;
}
2010-04-28 12:54:38 +04:00
sk_stream_kill_queues ( & cf_sk - > sk ) ;
2011-05-13 06:44:04 +04:00
caif_free_client ( & cf_sk - > layer ) ;
2010-03-30 17:56:26 +04:00
}
static int caif_create ( struct net * net , struct socket * sock , int protocol ,
2010-04-28 12:54:38 +04:00
int kern )
2010-03-30 17:56:26 +04:00
{
struct sock * sk = NULL ;
struct caifsock * cf_sk = NULL ;
static struct proto prot = { . name = " PF_CAIF " ,
. owner = THIS_MODULE ,
. obj_size = sizeof ( struct caifsock ) ,
} ;
2010-04-28 12:54:38 +04:00
if ( ! capable ( CAP_SYS_ADMIN ) & & ! capable ( CAP_NET_ADMIN ) )
return - EPERM ;
2010-03-30 17:56:26 +04:00
/*
* The sock - > type specifies the socket type to use .
2010-04-28 12:54:38 +04:00
* The CAIF socket is a packet stream in the sense
* that it is packet based . CAIF trusts the reliability
* of the link , no resending is implemented .
2010-03-30 17:56:26 +04:00
*/
2010-04-28 12:54:38 +04:00
if ( sock - > type = = SOCK_SEQPACKET )
sock - > ops = & caif_seqpacket_ops ;
else if ( sock - > type = = SOCK_STREAM )
sock - > ops = & caif_stream_ops ;
else
2010-03-30 17:56:26 +04:00
return - ESOCKTNOSUPPORT ;
if ( protocol < 0 | | protocol > = CAIFPROTO_MAX )
return - EPROTONOSUPPORT ;
/*
2010-04-28 12:54:38 +04:00
* Set the socket state to unconnected . The socket state
* is really not used at all in the net / core or socket . c but the
2010-03-30 17:56:26 +04:00
* initialization makes sure that sock - > state is not uninitialized .
*/
sk = sk_alloc ( net , PF_CAIF , GFP_KERNEL , & prot ) ;
if ( ! sk )
return - ENOMEM ;
cf_sk = container_of ( sk , struct caifsock , sk ) ;
/* Store the protocol */
sk - > sk_protocol = ( unsigned char ) protocol ;
2012-04-12 12:27:24 +04:00
/* Initialize default priority for well-known cases */
switch ( protocol ) {
case CAIFPROTO_AT :
sk - > sk_priority = TC_PRIO_CONTROL ;
break ;
case CAIFPROTO_RFM :
sk - > sk_priority = TC_PRIO_INTERACTIVE_BULK ;
break ;
default :
sk - > sk_priority = TC_PRIO_BESTEFFORT ;
}
2010-03-30 17:56:26 +04:00
/*
* Lock in order to try to stop someone from opening the socket
* too early .
*/
lock_sock ( & ( cf_sk - > sk ) ) ;
/* Initialize the nozero default sock structure data. */
sock_init_data ( sock , sk ) ;
sk - > sk_destruct = caif_sock_destructor ;
2010-04-28 12:54:38 +04:00
mutex_init ( & cf_sk - > readlock ) ; /* single task reading lock */
cf_sk - > layer . ctrlcmd = caif_ctrl_cb ;
cf_sk - > sk . sk_socket - > state = SS_UNCONNECTED ;
cf_sk - > sk . sk_state = CAIF_DISCONNECTED ;
2010-03-30 17:56:26 +04:00
2010-04-28 12:54:38 +04:00
set_tx_flow_off ( cf_sk ) ;
set_rx_flow_on ( cf_sk ) ;
2010-03-30 17:56:26 +04:00
/* Set default options on configuration */
2010-04-28 12:54:38 +04:00
cf_sk - > conn_req . link_selector = CAIF_LINK_LOW_LATENCY ;
2010-03-30 17:56:26 +04:00
cf_sk - > conn_req . protocol = protocol ;
release_sock ( & cf_sk - > sk ) ;
return 0 ;
}
2010-04-28 12:54:38 +04:00
2010-03-30 17:56:26 +04:00
static struct net_proto_family caif_family_ops = {
. family = PF_CAIF ,
. create = caif_create ,
. owner = THIS_MODULE ,
} ;
2012-02-03 08:36:19 +04:00
static int __init caif_sktinit_module ( void )
2010-03-30 17:56:26 +04:00
{
2010-04-28 12:54:38 +04:00
int err = sock_register ( & caif_family_ops ) ;
2010-03-30 17:56:26 +04:00
if ( ! err )
return err ;
return 0 ;
}
static void __exit caif_sktexit_module ( void )
{
sock_unregister ( PF_CAIF ) ;
}
module_init ( caif_sktinit_module ) ;
module_exit ( caif_sktexit_module ) ;