2010-03-30 17:56:23 +04:00
/*
* Copyright ( C ) ST - Ericsson AB 2010
2013-04-23 03:57:01 +04:00
* Author : Sjur Brendeland
2010-03-30 17:56:23 +04:00
* License terms : GNU General Public License ( GPL ) version 2
*/
2010-09-06 01:31:11 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
2010-03-30 17:56:23 +04:00
# include <linux/stddef.h>
# include <linux/spinlock.h>
# include <linux/slab.h>
2011-05-13 06:43:59 +04:00
# include <linux/rculist.h>
2010-03-30 17:56:23 +04:00
# include <net/caif/cfpkt.h>
# include <net/caif/cfmuxl.h>
# include <net/caif/cfsrvl.h>
# include <net/caif/cffrml.h>
# define container_obj(layr) container_of(layr, struct cfmuxl, layer)
# define CAIF_CTRL_CHANNEL 0
# define UP_CACHE_SIZE 8
# define DN_CACHE_SIZE 8
struct cfmuxl {
struct cflayer layer ;
struct list_head srvl_list ;
struct list_head frml_list ;
struct cflayer * up_cache [ UP_CACHE_SIZE ] ;
struct cflayer * dn_cache [ DN_CACHE_SIZE ] ;
/*
* Set when inserting or removing downwards layers .
*/
spinlock_t transmit_lock ;
/*
* Set when inserting or removing upwards layers .
*/
spinlock_t receive_lock ;
} ;
static int cfmuxl_receive ( struct cflayer * layr , struct cfpkt * pkt ) ;
static int cfmuxl_transmit ( struct cflayer * layr , struct cfpkt * pkt ) ;
static void cfmuxl_ctrlcmd ( struct cflayer * layr , enum caif_ctrlcmd ctrl ,
2013-03-06 23:39:57 +04:00
int phyid ) ;
2010-03-30 17:56:23 +04:00
static struct cflayer * get_up ( struct cfmuxl * muxl , u16 id ) ;
struct cflayer * cfmuxl_create ( void )
{
struct cfmuxl * this = kmalloc ( sizeof ( struct cfmuxl ) , GFP_ATOMIC ) ;
if ( ! this )
return NULL ;
memset ( this , 0 , sizeof ( * this ) ) ;
this - > layer . receive = cfmuxl_receive ;
this - > layer . transmit = cfmuxl_transmit ;
this - > layer . ctrlcmd = cfmuxl_ctrlcmd ;
INIT_LIST_HEAD ( & this - > srvl_list ) ;
INIT_LIST_HEAD ( & this - > frml_list ) ;
spin_lock_init ( & this - > transmit_lock ) ;
spin_lock_init ( & this - > receive_lock ) ;
snprintf ( this - > layer . name , CAIF_LAYER_NAME_SZ , " mux " ) ;
return & this - > layer ;
}
int cfmuxl_set_dnlayer ( struct cflayer * layr , struct cflayer * dn , u8 phyid )
{
struct cfmuxl * muxl = ( struct cfmuxl * ) layr ;
2011-05-13 06:43:59 +04:00
spin_lock_bh ( & muxl - > transmit_lock ) ;
list_add_rcu ( & dn - > node , & muxl - > frml_list ) ;
spin_unlock_bh ( & muxl - > transmit_lock ) ;
2010-03-30 17:56:23 +04:00
return 0 ;
}
static struct cflayer * get_from_id ( struct list_head * list , u16 id )
{
2011-05-13 06:43:59 +04:00
struct cflayer * lyr ;
list_for_each_entry_rcu ( lyr , list , node ) {
if ( lyr - > id = = id )
return lyr ;
2010-03-30 17:56:23 +04:00
}
2011-05-13 06:43:59 +04:00
2010-03-30 17:56:23 +04:00
return NULL ;
}
2011-05-22 15:18:51 +04:00
int cfmuxl_set_uplayer ( struct cflayer * layr , struct cflayer * up , u8 linkid )
{
struct cfmuxl * muxl = container_obj ( layr ) ;
struct cflayer * old ;
spin_lock_bh ( & muxl - > receive_lock ) ;
/* Two entries with same id is wrong, so remove old layer from mux */
old = get_from_id ( & muxl - > srvl_list , linkid ) ;
if ( old ! = NULL )
list_del_rcu ( & old - > node ) ;
list_add_rcu ( & up - > node , & muxl - > srvl_list ) ;
spin_unlock_bh ( & muxl - > receive_lock ) ;
return 0 ;
}
2010-03-30 17:56:23 +04:00
struct cflayer * cfmuxl_remove_dnlayer ( struct cflayer * layr , u8 phyid )
{
struct cfmuxl * muxl = container_obj ( layr ) ;
struct cflayer * dn ;
2011-05-13 06:43:59 +04:00
int idx = phyid % DN_CACHE_SIZE ;
spin_lock_bh ( & muxl - > transmit_lock ) ;
2011-08-01 20:19:00 +04:00
RCU_INIT_POINTER ( muxl - > dn_cache [ idx ] , NULL ) ;
2010-03-30 17:56:23 +04:00
dn = get_from_id ( & muxl - > frml_list , phyid ) ;
2011-05-13 06:43:59 +04:00
if ( dn = = NULL )
goto out ;
list_del_rcu ( & dn - > node ) ;
2010-03-30 17:56:23 +04:00
caif_assert ( dn ! = NULL ) ;
2011-05-13 06:43:59 +04:00
out :
spin_unlock_bh ( & muxl - > transmit_lock ) ;
2010-03-30 17:56:23 +04:00
return dn ;
}
static struct cflayer * get_up ( struct cfmuxl * muxl , u16 id )
{
struct cflayer * up ;
int idx = id % UP_CACHE_SIZE ;
2011-05-13 06:43:59 +04:00
up = rcu_dereference ( muxl - > up_cache [ idx ] ) ;
2010-03-30 17:56:23 +04:00
if ( up = = NULL | | up - > id ! = id ) {
2011-05-13 06:43:59 +04:00
spin_lock_bh ( & muxl - > receive_lock ) ;
2010-03-30 17:56:23 +04:00
up = get_from_id ( & muxl - > srvl_list , id ) ;
2011-05-13 06:43:59 +04:00
rcu_assign_pointer ( muxl - > up_cache [ idx ] , up ) ;
spin_unlock_bh ( & muxl - > receive_lock ) ;
2010-03-30 17:56:23 +04:00
}
return up ;
}
static struct cflayer * get_dn ( struct cfmuxl * muxl , struct dev_info * dev_info )
{
struct cflayer * dn ;
int idx = dev_info - > id % DN_CACHE_SIZE ;
2011-05-13 06:43:59 +04:00
dn = rcu_dereference ( muxl - > dn_cache [ idx ] ) ;
2010-03-30 17:56:23 +04:00
if ( dn = = NULL | | dn - > id ! = dev_info - > id ) {
2011-05-13 06:43:59 +04:00
spin_lock_bh ( & muxl - > transmit_lock ) ;
2010-03-30 17:56:23 +04:00
dn = get_from_id ( & muxl - > frml_list , dev_info - > id ) ;
2011-05-13 06:43:59 +04:00
rcu_assign_pointer ( muxl - > dn_cache [ idx ] , dn ) ;
spin_unlock_bh ( & muxl - > transmit_lock ) ;
2010-03-30 17:56:23 +04:00
}
return dn ;
}
struct cflayer * cfmuxl_remove_uplayer ( struct cflayer * layr , u8 id )
{
struct cflayer * up ;
struct cfmuxl * muxl = container_obj ( layr ) ;
2011-05-13 06:43:59 +04:00
int idx = id % UP_CACHE_SIZE ;
2011-05-22 15:18:51 +04:00
if ( id = = 0 ) {
pr_warn ( " Trying to remove control layer \n " ) ;
return NULL ;
}
2011-05-13 06:43:59 +04:00
spin_lock_bh ( & muxl - > receive_lock ) ;
up = get_from_id ( & muxl - > srvl_list , id ) ;
2010-04-28 12:54:36 +04:00
if ( up = = NULL )
2010-05-21 06:16:11 +04:00
goto out ;
2011-05-13 06:43:59 +04:00
2011-08-01 20:19:00 +04:00
RCU_INIT_POINTER ( muxl - > up_cache [ idx ] , NULL ) ;
2011-05-13 06:43:59 +04:00
list_del_rcu ( & up - > node ) ;
2010-05-21 06:16:11 +04:00
out :
2011-05-13 06:43:59 +04:00
spin_unlock_bh ( & muxl - > receive_lock ) ;
2010-03-30 17:56:23 +04:00
return up ;
}
static int cfmuxl_receive ( struct cflayer * layr , struct cfpkt * pkt )
{
int ret ;
struct cfmuxl * muxl = container_obj ( layr ) ;
u8 id ;
struct cflayer * up ;
if ( cfpkt_extr_head ( pkt , & id , 1 ) < 0 ) {
2010-09-06 01:31:11 +04:00
pr_err ( " erroneous Caif Packet \n " ) ;
2010-03-30 17:56:23 +04:00
cfpkt_destroy ( pkt ) ;
return - EPROTO ;
}
2011-05-13 06:43:59 +04:00
rcu_read_lock ( ) ;
2010-03-30 17:56:23 +04:00
up = get_up ( muxl , id ) ;
2011-05-13 06:43:59 +04:00
2010-03-30 17:56:23 +04:00
if ( up = = NULL ) {
2011-05-13 06:43:59 +04:00
pr_debug ( " Received data on unknown link ID = %d (0x%x) "
" up == NULL " , id , id ) ;
2010-03-30 17:56:23 +04:00
cfpkt_destroy ( pkt ) ;
/*
* Don ' t return ERROR , since modem misbehaves and sends out
* flow on before linksetup response .
*/
2011-05-13 06:43:59 +04:00
rcu_read_unlock ( ) ;
2010-03-30 17:56:23 +04:00
return /* CFGLU_EPROT; */ 0 ;
}
2011-05-13 06:43:59 +04:00
/* We can't hold rcu_lock during receive, so take a ref count instead */
2010-04-28 12:54:36 +04:00
cfsrvl_get ( up ) ;
2011-05-13 06:43:59 +04:00
rcu_read_unlock ( ) ;
2010-03-30 17:56:23 +04:00
ret = up - > receive ( up , pkt ) ;
2011-05-13 06:43:59 +04:00
2010-04-28 12:54:36 +04:00
cfsrvl_put ( up ) ;
2010-03-30 17:56:23 +04:00
return ret ;
}
static int cfmuxl_transmit ( struct cflayer * layr , struct cfpkt * pkt )
{
struct cfmuxl * muxl = container_obj ( layr ) ;
2011-05-13 06:43:59 +04:00
int err ;
2010-03-30 17:56:23 +04:00
u8 linkid ;
struct cflayer * dn ;
struct caif_payload_info * info = cfpkt_info ( pkt ) ;
2011-04-11 14:43:52 +04:00
BUG_ON ( ! info ) ;
2011-05-13 06:43:59 +04:00
rcu_read_lock ( ) ;
2011-04-11 14:43:52 +04:00
dn = get_dn ( muxl , info - > dev_info ) ;
2010-03-30 17:56:23 +04:00
if ( dn = = NULL ) {
2011-05-13 06:43:59 +04:00
pr_debug ( " Send data on unknown phy ID = %d (0x%x) \n " ,
2010-09-06 01:31:11 +04:00
info - > dev_info - > id , info - > dev_info - > id ) ;
2011-05-13 06:43:59 +04:00
rcu_read_unlock ( ) ;
cfpkt_destroy ( pkt ) ;
2010-03-30 17:56:23 +04:00
return - ENOTCONN ;
}
2011-05-13 06:43:59 +04:00
2010-03-30 17:56:23 +04:00
info - > hdr_len + = 1 ;
linkid = info - > channel_id ;
cfpkt_add_head ( pkt , & linkid , 1 ) ;
2011-05-13 06:43:59 +04:00
/* We can't hold rcu_lock during receive, so take a ref count instead */
cffrml_hold ( dn ) ;
rcu_read_unlock ( ) ;
err = dn - > transmit ( dn , pkt ) ;
cffrml_put ( dn ) ;
return err ;
2010-03-30 17:56:23 +04:00
}
static void cfmuxl_ctrlcmd ( struct cflayer * layr , enum caif_ctrlcmd ctrl ,
2013-03-06 23:39:57 +04:00
int phyid )
2010-03-30 17:56:23 +04:00
{
struct cfmuxl * muxl = container_obj ( layr ) ;
struct cflayer * layer ;
2011-05-13 06:43:59 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( layer , & muxl - > srvl_list , node ) {
2011-05-22 15:18:51 +04:00
if ( cfsrvl_phyid_match ( layer , phyid ) & & layer - > ctrlcmd ) {
2011-06-15 16:38:25 +04:00
if ( ( ctrl = = _CAIF_CTRLCMD_PHYIF_DOWN_IND | |
2011-05-22 15:18:51 +04:00
ctrl = = CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ) & &
2012-02-02 05:21:02 +04:00
layer - > id ! = 0 )
cfmuxl_remove_uplayer ( layr , layer - > id ) ;
2011-05-13 06:43:59 +04:00
/* NOTE: ctrlcmd is not allowed to block */
2010-03-30 17:56:23 +04:00
layer - > ctrlcmd ( layer , ctrl , phyid ) ;
2011-05-22 15:18:51 +04:00
}
2010-03-30 17:56:23 +04:00
}
2011-05-13 06:43:59 +04:00
rcu_read_unlock ( ) ;
2010-03-30 17:56:23 +04:00
}