2017-11-07 16:58:41 +03:00
// SPDX-License-Identifier: GPL-2.0
2014-10-02 06:54:14 +04:00
/*
* Greybus connections
*
* Copyright 2014 Google Inc .
2014-12-12 21:08:42 +03:00
* Copyright 2014 Linaro Ltd .
2014-10-02 06:54:14 +04:00
*/
2015-07-23 11:50:02 +03:00
# include <linux/workqueue.h>
2019-08-25 08:54:27 +03:00
# include <linux/greybus.h>
2015-07-23 11:50:02 +03:00
2016-06-03 23:55:37 +03:00
# include "greybus_trace.h"
2014-10-02 06:54:14 +04:00
2016-08-26 13:55:49 +03:00
# define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
2016-01-19 14:51:25 +03:00
static void gb_connection_kref_release ( struct kref * kref ) ;
2014-10-03 23:14:22 +04:00
static DEFINE_SPINLOCK ( gb_connections_lock ) ;
2016-01-19 14:51:26 +03:00
static DEFINE_MUTEX ( gb_connection_mutex ) ;
2016-01-19 14:51:27 +03:00
/* Caller holds gb_connection_mutex. */
2016-06-10 12:29:09 +03:00
static bool gb_connection_cport_in_use ( struct gb_interface * intf , u16 cport_id )
2015-06-10 01:42:58 +03:00
{
2015-11-03 20:03:23 +03:00
struct gb_host_device * hd = intf - > hd ;
2015-06-10 01:42:58 +03:00
struct gb_connection * connection ;
2015-11-25 17:59:12 +03:00
list_for_each_entry ( connection , & hd - > connections , hd_links ) {
if ( connection - > intf = = intf & &
2018-11-25 19:58:15 +03:00
connection - > intf_cport_id = = cport_id )
2016-06-10 12:29:09 +03:00
return true ;
2015-11-25 17:59:12 +03:00
}
2016-06-10 12:29:09 +03:00
return false ;
2015-06-10 01:42:58 +03:00
}
2016-01-19 14:51:25 +03:00
static void gb_connection_get ( struct gb_connection * connection )
{
kref_get ( & connection - > kref ) ;
2016-06-03 23:55:37 +03:00
trace_gb_connection_get ( connection ) ;
2016-01-19 14:51:25 +03:00
}
static void gb_connection_put ( struct gb_connection * connection )
{
2016-06-03 23:55:37 +03:00
trace_gb_connection_put ( connection ) ;
2016-01-19 14:51:25 +03:00
kref_put ( & connection - > kref , gb_connection_kref_release ) ;
}
/*
* Returns a reference - counted pointer to the connection if found .
*/
2015-06-08 20:05:11 +03:00
static struct gb_connection *
2015-11-03 20:03:23 +03:00
gb_connection_hd_find ( struct gb_host_device * hd , u16 cport_id )
2014-10-06 15:53:08 +04:00
{
2015-06-10 01:42:58 +03:00
struct gb_connection * connection ;
2015-03-02 11:55:26 +03:00
unsigned long flags ;
2014-10-06 15:53:08 +04:00
2015-03-02 11:55:26 +03:00
spin_lock_irqsave ( & gb_connections_lock , flags ) ;
2014-11-17 17:08:44 +03:00
list_for_each_entry ( connection , & hd - > connections , hd_links )
2016-01-19 14:51:25 +03:00
if ( connection - > hd_cport_id = = cport_id ) {
gb_connection_get ( connection ) ;
2014-10-06 21:26:02 +04:00
goto found ;
2016-01-19 14:51:25 +03:00
}
2014-10-06 21:26:02 +04:00
connection = NULL ;
2015-06-10 01:42:58 +03:00
found :
2015-03-02 11:55:26 +03:00
spin_unlock_irqrestore ( & gb_connections_lock , flags ) ;
2014-10-06 15:53:08 +04:00
return connection ;
}
2014-11-21 01:09:18 +03:00
/*
* Callback from the host driver to let us know that data has been
2014-12-13 01:10:17 +03:00
* received on the bundle .
2014-11-21 01:09:18 +03:00
*/
2015-11-03 20:03:23 +03:00
void greybus_data_rcvd ( struct gb_host_device * hd , u16 cport_id ,
2018-11-25 19:58:15 +03:00
u8 * data , size_t length )
2014-11-18 03:08:37 +03:00
{
struct gb_connection * connection ;
2016-06-03 23:55:38 +03:00
trace_gb_hd_in ( hd ) ;
2015-05-20 14:18:00 +03:00
connection = gb_connection_hd_find ( hd , cport_id ) ;
2014-11-18 03:08:37 +03:00
if ( ! connection ) {
2015-11-25 17:59:02 +03:00
dev_err ( & hd - > dev ,
2014-11-18 03:08:37 +03:00
" nonexistent connection (%zu bytes dropped) \n " , length ) ;
return ;
}
2014-11-18 22:26:50 +03:00
gb_connection_recv ( connection , data , length ) ;
2016-01-19 14:51:25 +03:00
gb_connection_put ( connection ) ;
2014-11-18 03:08:37 +03:00
}
2014-11-21 01:09:18 +03:00
EXPORT_SYMBOL_GPL ( greybus_data_rcvd ) ;
2014-11-18 03:08:37 +03:00
2015-10-17 02:56:38 +03:00
static void gb_connection_kref_release ( struct kref * kref )
2014-10-24 13:34:46 +04:00
{
2015-10-17 02:56:38 +03:00
struct gb_connection * connection ;
2014-10-24 13:34:46 +04:00
2015-10-17 02:56:38 +03:00
connection = container_of ( kref , struct gb_connection , kref ) ;
2016-01-19 14:51:23 +03:00
2016-06-03 23:55:37 +03:00
trace_gb_connection_release ( connection ) ;
2014-10-24 13:34:46 +04:00
kfree ( connection ) ;
}
2015-11-25 17:59:14 +03:00
static void gb_connection_init_name ( struct gb_connection * connection )
{
u16 hd_cport_id = connection - > hd_cport_id ;
u16 cport_id = 0 ;
u8 intf_id = 0 ;
if ( connection - > intf ) {
intf_id = connection - > intf - > interface_id ;
cport_id = connection - > intf_cport_id ;
}
snprintf ( connection - > name , sizeof ( connection - > name ) ,
2018-11-25 19:58:15 +03:00
" %u/%u:%u " , hd_cport_id , intf_id , cport_id ) ;
2015-11-25 17:59:14 +03:00
}
2014-10-02 06:54:14 +04:00
/*
2016-01-21 19:34:15 +03:00
* _gb_connection_create ( ) - create a Greybus connection
2015-11-25 17:59:11 +03:00
* @ hd : host device of the connection
* @ hd_cport_id : host - device cport id , or - 1 for dynamic allocation
* @ intf : remote interface , or NULL for static connections
* @ bundle : remote - interface bundle ( may be NULL )
* @ cport_id : remote - interface cport id , or 0 for static connections
2016-01-21 19:34:21 +03:00
* @ handler : request handler ( may be NULL )
2016-03-03 15:34:36 +03:00
* @ flags : connection flags
2015-11-25 17:59:11 +03:00
*
* Create a Greybus connection , representing the bidirectional link
2014-10-02 06:54:14 +04:00
* between a CPort on a ( local ) Greybus host device and a CPort on
2015-11-25 17:59:11 +03:00
* another Greybus interface .
2014-10-02 06:54:14 +04:00
*
2014-10-02 06:54:15 +04:00
* A connection also maintains the state of operations sent over the
* connection .
*
2016-01-19 14:51:26 +03:00
* Serialised against concurrent create and destroy using the
* gb_connection_mutex .
*
2016-01-21 19:34:16 +03:00
* Return : A pointer to the new connection if successful , or an ERR_PTR
* otherwise .
2014-10-02 06:54:14 +04:00
*/
2015-11-25 17:59:11 +03:00
static struct gb_connection *
2016-01-21 19:34:15 +03:00
_gb_connection_create ( struct gb_host_device * hd , int hd_cport_id ,
2018-11-25 19:58:15 +03:00
struct gb_interface * intf ,
struct gb_bundle * bundle , int cport_id ,
gb_request_handler_t handler ,
unsigned long flags )
2014-10-02 06:54:14 +04:00
{
struct gb_connection * connection ;
2016-01-21 19:34:16 +03:00
int ret ;
2014-10-02 06:54:14 +04:00
2016-01-19 14:51:26 +03:00
mutex_lock ( & gb_connection_mutex ) ;
2016-06-10 12:29:09 +03:00
if ( intf & & gb_connection_cport_in_use ( intf , cport_id ) ) {
2016-01-19 14:51:27 +03:00
dev_err ( & intf - > dev , " cport %u already in use \n " , cport_id ) ;
2016-01-21 19:34:16 +03:00
ret = - EBUSY ;
2016-01-19 14:51:27 +03:00
goto err_unlock ;
}
2016-05-11 11:18:02 +03:00
ret = gb_hd_cport_allocate ( hd , hd_cport_id , flags ) ;
2016-05-11 11:17:59 +03:00
if ( ret < 0 ) {
dev_err ( & hd - > dev , " failed to allocate cport: %d \n " , ret ) ;
2016-01-19 14:51:26 +03:00
goto err_unlock ;
2016-05-11 11:17:59 +03:00
}
2016-01-21 19:34:16 +03:00
hd_cport_id = ret ;
2015-07-23 11:50:01 +03:00
2014-10-02 06:54:14 +04:00
connection = kzalloc ( sizeof ( * connection ) , GFP_KERNEL ) ;
2016-01-21 19:34:16 +03:00
if ( ! connection ) {
ret = - ENOMEM ;
2016-05-11 11:17:59 +03:00
goto err_hd_cport_release ;
2016-01-21 19:34:16 +03:00
}
2014-10-02 06:54:14 +04:00
2015-07-23 11:50:00 +03:00
connection - > hd_cport_id = hd_cport_id ;
2015-06-12 18:21:08 +03:00
connection - > intf_cport_id = cport_id ;
2015-06-10 01:42:58 +03:00
connection - > hd = hd ;
2015-11-25 17:59:11 +03:00
connection - > intf = intf ;
2014-12-13 01:10:17 +03:00
connection - > bundle = bundle ;
2016-01-21 19:34:21 +03:00
connection - > handler = handler ;
2016-03-03 15:34:36 +03:00
connection - > flags = flags ;
2016-05-11 11:17:55 +03:00
if ( intf & & ( intf - > quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES ) )
connection - > flags | = GB_CONNECTION_FLAG_NO_FLOWCTRL ;
2014-10-22 11:04:30 +04:00
connection - > state = GB_CONNECTION_STATE_DISABLED ;
2014-10-02 21:30:06 +04:00
2015-07-22 18:49:19 +03:00
atomic_set ( & connection - > op_cycle , 0 ) ;
2016-01-19 14:51:06 +03:00
mutex_init ( & connection - > mutex ) ;
2015-07-22 18:49:19 +03:00
spin_lock_init ( & connection - > lock ) ;
INIT_LIST_HEAD ( & connection - > operations ) ;
2015-07-23 11:50:02 +03:00
connection - > wq = alloc_workqueue ( " %s:%d " , WQ_UNBOUND , 1 ,
2015-11-25 17:59:03 +03:00
dev_name ( & hd - > dev ) , hd_cport_id ) ;
2016-01-21 19:34:16 +03:00
if ( ! connection - > wq ) {
ret = - ENOMEM ;
2015-07-23 11:50:02 +03:00
goto err_free_connection ;
2016-01-21 19:34:16 +03:00
}
2015-07-23 11:50:02 +03:00
2015-10-17 02:56:38 +03:00
kref_init ( & connection - > kref ) ;
2014-10-24 13:34:46 +04:00
2015-11-25 17:59:14 +03:00
gb_connection_init_name ( connection ) ;
2016-06-27 21:07:10 +03:00
spin_lock_irq ( & gb_connections_lock ) ;
2015-06-04 15:46:45 +03:00
list_add ( & connection - > hd_links , & hd - > connections ) ;
2015-07-21 15:14:16 +03:00
if ( bundle )
list_add ( & connection - > bundle_links , & bundle - > connections ) ;
else
INIT_LIST_HEAD ( & connection - > bundle_links ) ;
2016-06-27 21:07:10 +03:00
spin_unlock_irq ( & gb_connections_lock ) ;
2014-10-03 23:14:22 +04:00
2016-01-19 14:51:26 +03:00
mutex_unlock ( & gb_connection_mutex ) ;
2016-06-03 23:55:37 +03:00
trace_gb_connection_create ( connection ) ;
2014-10-02 06:54:14 +04:00
return connection ;
2015-07-23 11:50:01 +03:00
2015-07-23 11:50:02 +03:00
err_free_connection :
kfree ( connection ) ;
2016-05-11 11:17:59 +03:00
err_hd_cport_release :
gb_hd_cport_release ( hd , hd_cport_id ) ;
2016-01-19 14:51:26 +03:00
err_unlock :
mutex_unlock ( & gb_connection_mutex ) ;
2015-07-23 11:50:01 +03:00
2016-01-21 19:34:16 +03:00
return ERR_PTR ( ret ) ;
2014-10-02 06:54:14 +04:00
}
2015-11-25 17:59:11 +03:00
struct gb_connection *
2016-01-21 19:34:21 +03:00
gb_connection_create_static ( struct gb_host_device * hd , u16 hd_cport_id ,
2018-11-25 19:58:15 +03:00
gb_request_handler_t handler )
2015-11-25 17:59:11 +03:00
{
2016-03-03 15:34:36 +03:00
return _gb_connection_create ( hd , hd_cport_id , NULL , NULL , 0 , handler ,
2018-11-25 19:58:15 +03:00
GB_CONNECTION_FLAG_HIGH_PRIO ) ;
2015-11-25 17:59:11 +03:00
}
2016-01-21 19:34:12 +03:00
struct gb_connection *
gb_connection_create_control ( struct gb_interface * intf )
{
2016-05-27 18:26:25 +03:00
return _gb_connection_create ( intf - > hd , - 1 , intf , NULL , 0 , NULL ,
2018-11-25 19:58:15 +03:00
GB_CONNECTION_FLAG_CONTROL |
GB_CONNECTION_FLAG_HIGH_PRIO ) ;
2016-01-21 19:34:12 +03:00
}
2015-11-25 17:59:11 +03:00
struct gb_connection *
2016-01-21 19:34:21 +03:00
gb_connection_create ( struct gb_bundle * bundle , u16 cport_id ,
2018-11-25 19:58:15 +03:00
gb_request_handler_t handler )
2015-11-25 17:59:11 +03:00
{
2016-01-21 19:34:14 +03:00
struct gb_interface * intf = bundle - > intf ;
2016-01-21 19:34:21 +03:00
return _gb_connection_create ( intf - > hd , - 1 , intf , bundle , cport_id ,
2018-11-25 19:58:15 +03:00
handler , 0 ) ;
2015-11-25 17:59:11 +03:00
}
2016-01-21 19:34:15 +03:00
EXPORT_SYMBOL_GPL ( gb_connection_create ) ;
2015-11-25 17:59:11 +03:00
2016-03-03 15:34:36 +03:00
struct gb_connection *
gb_connection_create_flags ( struct gb_bundle * bundle , u16 cport_id ,
2018-11-25 19:58:15 +03:00
gb_request_handler_t handler ,
unsigned long flags )
2016-03-03 15:34:36 +03:00
{
struct gb_interface * intf = bundle - > intf ;
2016-06-22 12:42:03 +03:00
if ( WARN_ON_ONCE ( flags & GB_CONNECTION_FLAG_CORE_MASK ) )
flags & = ~ GB_CONNECTION_FLAG_CORE_MASK ;
2016-03-03 15:34:36 +03:00
return _gb_connection_create ( intf - > hd , - 1 , intf , bundle , cport_id ,
2018-11-25 19:58:15 +03:00
handler , flags ) ;
2016-03-03 15:34:36 +03:00
}
EXPORT_SYMBOL_GPL ( gb_connection_create_flags ) ;
2016-05-11 11:17:57 +03:00
struct gb_connection *
gb_connection_create_offloaded ( struct gb_bundle * bundle , u16 cport_id ,
2018-11-25 19:58:15 +03:00
unsigned long flags )
2016-05-11 11:17:57 +03:00
{
flags | = GB_CONNECTION_FLAG_OFFLOADED ;
2016-06-22 12:42:03 +03:00
return gb_connection_create_flags ( bundle , cport_id , NULL , flags ) ;
2016-05-11 11:17:57 +03:00
}
EXPORT_SYMBOL_GPL ( gb_connection_create_offloaded ) ;
2015-09-17 14:17:26 +03:00
static int gb_connection_hd_cport_enable ( struct gb_connection * connection )
{
2015-11-03 20:03:23 +03:00
struct gb_host_device * hd = connection - > hd ;
2015-09-17 14:17:26 +03:00
int ret ;
if ( ! hd - > driver - > cport_enable )
return 0 ;
2016-06-22 12:42:02 +03:00
ret = hd - > driver - > cport_enable ( hd , connection - > hd_cport_id ,
2018-11-25 19:58:15 +03:00
connection - > flags ) ;
2015-09-17 14:17:26 +03:00
if ( ret ) {
2016-06-09 14:04:42 +03:00
dev_err ( & hd - > dev , " %s: failed to enable host cport: %d \n " ,
2018-11-25 19:58:15 +03:00
connection - > name , ret ) ;
2015-09-17 14:17:26 +03:00
return ret ;
}
return 0 ;
}
static void gb_connection_hd_cport_disable ( struct gb_connection * connection )
{
2015-11-03 20:03:23 +03:00
struct gb_host_device * hd = connection - > hd ;
2016-05-27 18:26:28 +03:00
int ret ;
2015-09-17 14:17:26 +03:00
if ( ! hd - > driver - > cport_disable )
return ;
2016-05-27 18:26:28 +03:00
ret = hd - > driver - > cport_disable ( hd , connection - > hd_cport_id ) ;
if ( ret ) {
2016-06-09 14:04:42 +03:00
dev_err ( & hd - > dev , " %s: failed to disable host cport: %d \n " ,
2018-11-25 19:58:15 +03:00
connection - > name , ret ) ;
2016-05-27 18:26:28 +03:00
}
2015-09-17 14:17:26 +03:00
}
2016-08-26 13:55:49 +03:00
static int gb_connection_hd_cport_connected ( struct gb_connection * connection )
{
struct gb_host_device * hd = connection - > hd ;
int ret ;
if ( ! hd - > driver - > cport_connected )
return 0 ;
ret = hd - > driver - > cport_connected ( hd , connection - > hd_cport_id ) ;
if ( ret ) {
dev_err ( & hd - > dev , " %s: failed to set connected state: %d \n " ,
2018-11-25 19:58:15 +03:00
connection - > name , ret ) ;
2016-08-26 13:55:49 +03:00
return ret ;
}
return 0 ;
}
2016-05-27 18:26:37 +03:00
static int gb_connection_hd_cport_flush ( struct gb_connection * connection )
{
struct gb_host_device * hd = connection - > hd ;
int ret ;
if ( ! hd - > driver - > cport_flush )
return 0 ;
ret = hd - > driver - > cport_flush ( hd , connection - > hd_cport_id ) ;
if ( ret ) {
dev_err ( & hd - > dev , " %s: failed to flush host cport: %d \n " ,
2018-11-25 19:58:15 +03:00
connection - > name , ret ) ;
2016-05-27 18:26:37 +03:00
return ret ;
}
return 0 ;
}
2016-08-26 13:55:49 +03:00
static int gb_connection_hd_cport_quiesce ( struct gb_connection * connection )
2016-02-23 20:46:08 +03:00
{
struct gb_host_device * hd = connection - > hd ;
2016-08-26 13:55:49 +03:00
size_t peer_space ;
2016-02-23 20:46:08 +03:00
int ret ;
2016-12-20 23:49:27 +03:00
if ( ! hd - > driver - > cport_quiesce )
return 0 ;
2016-08-26 13:55:49 +03:00
peer_space = sizeof ( struct gb_operation_msg_hdr ) +
sizeof ( struct gb_cport_shutdown_request ) ;
2016-02-23 20:46:08 +03:00
2016-08-26 13:55:49 +03:00
if ( connection - > mode_switch )
peer_space + = sizeof ( struct gb_operation_msg_hdr ) ;
ret = hd - > driver - > cport_quiesce ( hd , connection - > hd_cport_id ,
peer_space ,
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT ) ;
2016-02-23 20:46:08 +03:00
if ( ret ) {
2016-08-26 13:55:49 +03:00
dev_err ( & hd - > dev , " %s: failed to quiesce host cport: %d \n " ,
2018-11-25 19:58:15 +03:00
connection - > name , ret ) ;
2016-02-23 20:46:08 +03:00
return ret ;
}
return 0 ;
}
2016-08-26 13:55:49 +03:00
static int gb_connection_hd_cport_clear ( struct gb_connection * connection )
2016-02-23 20:46:08 +03:00
{
struct gb_host_device * hd = connection - > hd ;
2016-08-26 13:55:49 +03:00
int ret ;
2016-02-23 20:46:08 +03:00
2016-12-20 23:49:27 +03:00
if ( ! hd - > driver - > cport_clear )
return 0 ;
2016-08-26 13:55:49 +03:00
ret = hd - > driver - > cport_clear ( hd , connection - > hd_cport_id ) ;
if ( ret ) {
dev_err ( & hd - > dev , " %s: failed to clear host cport: %d \n " ,
2018-11-25 19:58:15 +03:00
connection - > name , ret ) ;
2016-08-26 13:55:49 +03:00
return ret ;
}
2016-02-23 20:46:08 +03:00
2016-08-26 13:55:49 +03:00
return 0 ;
2016-02-23 20:46:08 +03:00
}
2015-09-17 14:17:21 +03:00
/*
* Request the SVC to create a connection from AP ' s cport to interface ' s
* cport .
*/
static int
gb_connection_svc_connection_create ( struct gb_connection * connection )
{
2015-11-03 20:03:23 +03:00
struct gb_host_device * hd = connection - > hd ;
2015-10-07 22:40:24 +03:00
struct gb_interface * intf ;
2016-03-03 15:34:38 +03:00
u8 cport_flags ;
2015-09-17 14:17:21 +03:00
int ret ;
2015-11-25 17:59:13 +03:00
if ( gb_connection_is_static ( connection ) )
2016-05-27 18:26:31 +03:00
return 0 ;
2015-09-17 14:17:21 +03:00
2015-11-25 17:59:22 +03:00
intf = connection - > intf ;
2016-03-03 15:34:38 +03:00
2016-03-30 01:56:03 +03:00
/*
2016-05-11 11:17:55 +03:00
* Enable either E2EFC or CSD , unless no flow control is requested .
2016-03-30 01:56:03 +03:00
*/
2016-03-03 15:34:38 +03:00
cport_flags = GB_SVC_CPORT_FLAG_CSV_N ;
2016-05-11 11:17:55 +03:00
if ( gb_connection_flow_control_disabled ( connection ) ) {
2016-03-03 15:34:38 +03:00
cport_flags | = GB_SVC_CPORT_FLAG_CSD_N ;
2016-03-03 15:34:39 +03:00
} else if ( gb_connection_e2efc_enabled ( connection ) ) {
2016-03-03 15:34:38 +03:00
cport_flags | = GB_SVC_CPORT_FLAG_CSD_N |
GB_SVC_CPORT_FLAG_E2EFC ;
}
2015-09-17 14:17:21 +03:00
ret = gb_svc_connection_create ( hd - > svc ,
2018-11-25 19:58:15 +03:00
hd - > svc - > ap_intf_id ,
connection - > hd_cport_id ,
intf - > interface_id ,
connection - > intf_cport_id ,
cport_flags ) ;
2015-09-17 14:17:21 +03:00
if ( ret ) {
2015-11-25 17:59:15 +03:00
dev_err ( & connection - > hd - > dev ,
" %s: failed to create svc connection: %d \n " ,
connection - > name , ret ) ;
2015-09-17 14:17:21 +03:00
return ret ;
}
return 0 ;
}
2015-09-07 13:31:22 +03:00
static void
gb_connection_svc_connection_destroy ( struct gb_connection * connection )
{
2015-11-25 17:59:13 +03:00
if ( gb_connection_is_static ( connection ) )
2015-09-07 13:31:22 +03:00
return ;
gb_svc_connection_destroy ( connection - > hd - > svc ,
2015-11-25 17:59:09 +03:00
connection - > hd - > svc - > ap_intf_id ,
2015-09-07 13:31:22 +03:00
connection - > hd_cport_id ,
2015-11-25 17:59:22 +03:00
connection - > intf - > interface_id ,
2015-09-07 13:31:22 +03:00
connection - > intf_cport_id ) ;
}
2015-09-17 14:17:24 +03:00
/* Inform Interface about active CPorts */
static int gb_connection_control_connected ( struct gb_connection * connection )
{
struct gb_control * control ;
u16 cport_id = connection - > intf_cport_id ;
int ret ;
2016-01-19 14:51:12 +03:00
if ( gb_connection_is_static ( connection ) )
2015-09-17 14:17:24 +03:00
return 0 ;
2016-05-27 18:26:25 +03:00
if ( gb_connection_is_control ( connection ) )
2016-01-19 14:51:12 +03:00
return 0 ;
2015-09-17 14:17:24 +03:00
2016-05-27 18:26:25 +03:00
control = connection - > intf - > control ;
2015-09-17 14:17:24 +03:00
ret = gb_control_connected_operation ( control , cport_id ) ;
if ( ret ) {
2015-10-17 02:56:23 +03:00
dev_err ( & connection - > bundle - > dev ,
" failed to connect cport: %d \n " , ret ) ;
2015-09-17 14:17:24 +03:00
return ret ;
}
return 0 ;
}
2016-05-27 18:26:36 +03:00
static void
gb_connection_control_disconnecting ( struct gb_connection * connection )
{
struct gb_control * control ;
u16 cport_id = connection - > intf_cport_id ;
int ret ;
if ( gb_connection_is_static ( connection ) )
return ;
control = connection - > intf - > control ;
ret = gb_control_disconnecting_operation ( control , cport_id ) ;
if ( ret ) {
dev_err ( & connection - > hd - > dev ,
2018-11-25 19:58:15 +03:00
" %s: failed to send disconnecting: %d \n " ,
connection - > name , ret ) ;
2016-05-27 18:26:36 +03:00
}
}
2015-09-17 14:17:23 +03:00
static void
gb_connection_control_disconnected ( struct gb_connection * connection )
2015-08-11 05:05:56 +03:00
{
struct gb_control * control ;
2015-09-17 14:17:23 +03:00
u16 cport_id = connection - > intf_cport_id ;
2015-08-11 05:05:56 +03:00
int ret ;
2016-01-19 14:51:12 +03:00
if ( gb_connection_is_static ( connection ) )
2015-08-11 05:05:56 +03:00
return ;
2016-05-27 18:26:40 +03:00
control = connection - > intf - > control ;
if ( gb_connection_is_control ( connection ) ) {
if ( connection - > mode_switch ) {
ret = gb_control_mode_switch_operation ( control ) ;
if ( ret ) {
/*
* Allow mode switch to time out waiting for
* mailbox event .
*/
return ;
}
}
2016-01-19 14:51:12 +03:00
return ;
2016-05-27 18:26:40 +03:00
}
2015-08-11 05:05:56 +03:00
ret = gb_control_disconnected_operation ( control , cport_id ) ;
2015-09-17 14:17:23 +03:00
if ( ret ) {
2015-10-17 02:56:23 +03:00
dev_warn ( & connection - > bundle - > dev ,
" failed to disconnect cport: %d \n " , ret ) ;
2015-09-17 14:17:23 +03:00
}
2015-08-11 05:05:56 +03:00
}
2016-08-26 13:55:49 +03:00
static int gb_connection_shutdown_operation ( struct gb_connection * connection ,
2018-11-25 19:58:15 +03:00
u8 phase )
2016-05-27 18:26:36 +03:00
{
2016-08-26 13:55:49 +03:00
struct gb_cport_shutdown_request * req ;
2016-05-27 18:26:36 +03:00
struct gb_operation * operation ;
int ret ;
operation = gb_operation_create_core ( connection ,
2018-11-25 19:58:15 +03:00
GB_REQUEST_TYPE_CPORT_SHUTDOWN ,
sizeof ( * req ) , 0 , 0 ,
GFP_KERNEL ) ;
2016-05-27 18:26:36 +03:00
if ( ! operation )
return - ENOMEM ;
2016-08-26 13:55:49 +03:00
req = operation - > request - > payload ;
req - > phase = phase ;
2016-05-27 18:26:36 +03:00
ret = gb_operation_request_send_sync ( operation ) ;
gb_operation_put ( operation ) ;
return ret ;
}
2016-08-26 13:55:49 +03:00
static int gb_connection_cport_shutdown ( struct gb_connection * connection ,
u8 phase )
2016-05-27 18:26:36 +03:00
{
struct gb_host_device * hd = connection - > hd ;
2016-08-26 13:55:49 +03:00
const struct gb_hd_driver * drv = hd - > driver ;
2016-05-27 18:26:36 +03:00
int ret ;
if ( gb_connection_is_static ( connection ) )
return 0 ;
if ( gb_connection_is_offloaded ( connection ) ) {
2016-08-26 13:55:49 +03:00
if ( ! drv - > cport_shutdown )
2016-05-27 18:26:36 +03:00
return 0 ;
2016-08-26 13:55:49 +03:00
ret = drv - > cport_shutdown ( hd , connection - > hd_cport_id , phase ,
2018-11-25 19:58:15 +03:00
GB_OPERATION_TIMEOUT_DEFAULT ) ;
2016-05-27 18:26:36 +03:00
} else {
2016-08-26 13:55:49 +03:00
ret = gb_connection_shutdown_operation ( connection , phase ) ;
2016-05-27 18:26:36 +03:00
}
if ( ret ) {
2016-08-26 13:55:49 +03:00
dev_err ( & hd - > dev , " %s: failed to send cport shutdown (phase %d): %d \n " ,
2018-11-25 19:58:15 +03:00
connection - > name , phase , ret ) ;
2016-05-27 18:26:36 +03:00
return ret ;
}
return 0 ;
}
2016-08-26 13:55:49 +03:00
static int
gb_connection_cport_shutdown_phase_1 ( struct gb_connection * connection )
{
return gb_connection_cport_shutdown ( connection , 1 ) ;
}
static int
gb_connection_cport_shutdown_phase_2 ( struct gb_connection * connection )
{
return gb_connection_cport_shutdown ( connection , 2 ) ;
}
2016-01-19 14:51:04 +03:00
/*
* Cancel all active operations on a connection .
*
2016-05-27 18:26:36 +03:00
* Locking : Called with connection lock held and state set to DISABLED or
* DISCONNECTING .
2016-01-19 14:51:04 +03:00
*/
static void gb_connection_cancel_operations ( struct gb_connection * connection ,
2018-11-25 19:58:15 +03:00
int errno )
2016-01-28 13:20:48 +03:00
__must_hold ( & connection - > lock )
2016-01-19 14:51:04 +03:00
{
struct gb_operation * operation ;
while ( ! list_empty ( & connection - > operations ) ) {
operation = list_last_entry ( & connection - > operations ,
2018-11-25 19:58:15 +03:00
struct gb_operation , links ) ;
2016-01-19 14:51:04 +03:00
gb_operation_get ( operation ) ;
2016-06-27 21:07:09 +03:00
spin_unlock_irq ( & connection - > lock ) ;
2016-01-19 14:51:04 +03:00
if ( gb_operation_is_incoming ( operation ) )
gb_operation_cancel_incoming ( operation , errno ) ;
else
gb_operation_cancel ( operation , errno ) ;
gb_operation_put ( operation ) ;
2016-06-27 21:07:09 +03:00
spin_lock_irq ( & connection - > lock ) ;
2016-01-19 14:51:04 +03:00
}
}
2016-01-19 14:51:08 +03:00
/*
* Cancel all active incoming operations on a connection .
*
* Locking : Called with connection lock held and state set to ENABLED_TX .
*/
static void
gb_connection_flush_incoming_operations ( struct gb_connection * connection ,
2018-11-25 19:58:15 +03:00
int errno )
2016-01-28 13:20:48 +03:00
__must_hold ( & connection - > lock )
2016-01-19 14:51:08 +03:00
{
struct gb_operation * operation ;
bool incoming ;
while ( ! list_empty ( & connection - > operations ) ) {
incoming = false ;
list_for_each_entry ( operation , & connection - > operations ,
2018-11-25 19:58:15 +03:00
links ) {
2016-01-19 14:51:08 +03:00
if ( gb_operation_is_incoming ( operation ) ) {
gb_operation_get ( operation ) ;
incoming = true ;
break ;
}
}
if ( ! incoming )
break ;
2016-06-27 21:07:09 +03:00
spin_unlock_irq ( & connection - > lock ) ;
2016-01-19 14:51:08 +03:00
/* FIXME: flush, not cancel? */
gb_operation_cancel_incoming ( operation , errno ) ;
gb_operation_put ( operation ) ;
2016-06-27 21:07:09 +03:00
spin_lock_irq ( & connection - > lock ) ;
2016-01-19 14:51:08 +03:00
}
}
2016-01-21 19:34:21 +03:00
/*
* _gb_connection_enable ( ) - enable a connection
* @ connection : connection to enable
* @ rx : whether to enable incoming requests
*
* Connection - enable helper for DISABLED - > ENABLED , DISABLED - > ENABLED_TX , and
* ENABLED_TX - > ENABLED state transitions .
*
* Locking : Caller holds connection - > mutex .
*/
static int _gb_connection_enable ( struct gb_connection * connection , bool rx )
2014-10-16 15:35:35 +04:00
{
2014-10-22 11:04:30 +04:00
int ret ;
2016-01-21 19:34:21 +03:00
/* Handle ENABLED_TX -> ENABLED transitions. */
2016-01-19 14:51:07 +03:00
if ( connection - > state = = GB_CONNECTION_STATE_ENABLED_TX ) {
2016-01-21 19:34:21 +03:00
if ( ! ( connection - > handler & & rx ) )
return 0 ;
2016-01-19 14:51:07 +03:00
2016-06-27 21:07:09 +03:00
spin_lock_irq ( & connection - > lock ) ;
2016-01-19 14:51:07 +03:00
connection - > state = GB_CONNECTION_STATE_ENABLED ;
2016-06-27 21:07:09 +03:00
spin_unlock_irq ( & connection - > lock ) ;
2016-01-19 14:51:07 +03:00
2016-01-21 19:34:21 +03:00
return 0 ;
2016-01-19 14:51:07 +03:00
}
2015-09-17 14:17:26 +03:00
ret = gb_connection_hd_cport_enable ( connection ) ;
2015-09-17 14:17:21 +03:00
if ( ret )
2016-01-21 19:34:21 +03:00
return ret ;
2015-09-07 13:31:21 +03:00
2015-09-17 14:17:26 +03:00
ret = gb_connection_svc_connection_create ( connection ) ;
if ( ret )
2016-08-26 13:55:49 +03:00
goto err_hd_cport_clear ;
2015-09-17 14:17:26 +03:00
2016-08-26 13:55:49 +03:00
ret = gb_connection_hd_cport_connected ( connection ) ;
2016-05-27 18:26:31 +03:00
if ( ret )
goto err_svc_connection_destroy ;
2016-06-27 21:07:09 +03:00
spin_lock_irq ( & connection - > lock ) ;
2016-01-21 19:34:21 +03:00
if ( connection - > handler & & rx )
2016-01-19 14:51:07 +03:00
connection - > state = GB_CONNECTION_STATE_ENABLED ;
else
connection - > state = GB_CONNECTION_STATE_ENABLED_TX ;
2016-06-27 21:07:09 +03:00
spin_unlock_irq ( & connection - > lock ) ;
2015-07-14 16:43:30 +03:00
2016-01-08 22:13:45 +03:00
ret = gb_connection_control_connected ( connection ) ;
if ( ret )
2016-05-27 18:26:36 +03:00
goto err_control_disconnecting ;
2016-01-08 22:13:45 +03:00
2016-01-08 22:13:46 +03:00
return 0 ;
2014-10-22 11:04:30 +04:00
2016-05-27 18:26:36 +03:00
err_control_disconnecting :
2016-06-27 21:07:09 +03:00
spin_lock_irq ( & connection - > lock ) ;
2016-05-27 18:26:36 +03:00
connection - > state = GB_CONNECTION_STATE_DISCONNECTING ;
2016-06-27 21:07:09 +03:00
gb_connection_cancel_operations ( connection , - ESHUTDOWN ) ;
spin_unlock_irq ( & connection - > lock ) ;
2015-09-17 14:17:22 +03:00
2016-05-27 18:26:37 +03:00
/* Transmit queue should already be empty. */
gb_connection_hd_cport_flush ( connection ) ;
2016-08-26 13:55:49 +03:00
gb_connection_control_disconnecting ( connection ) ;
gb_connection_cport_shutdown_phase_1 ( connection ) ;
gb_connection_hd_cport_quiesce ( connection ) ;
gb_connection_cport_shutdown_phase_2 ( connection ) ;
2016-05-27 18:26:36 +03:00
gb_connection_control_disconnected ( connection ) ;
connection - > state = GB_CONNECTION_STATE_DISABLED ;
2016-05-27 18:26:31 +03:00
err_svc_connection_destroy :
2016-01-08 22:13:46 +03:00
gb_connection_svc_connection_destroy ( connection ) ;
2016-08-26 13:55:49 +03:00
err_hd_cport_clear :
gb_connection_hd_cport_clear ( connection ) ;
2016-01-08 22:13:46 +03:00
gb_connection_hd_cport_disable ( connection ) ;
2016-01-21 19:34:21 +03:00
return ret ;
}
int gb_connection_enable ( struct gb_connection * connection )
{
int ret = 0 ;
mutex_lock ( & connection - > mutex ) ;
if ( connection - > state = = GB_CONNECTION_STATE_ENABLED )
goto out_unlock ;
ret = _gb_connection_enable ( connection , true ) ;
2016-06-03 23:55:37 +03:00
if ( ! ret )
trace_gb_connection_enable ( connection ) ;
2016-01-21 19:34:21 +03:00
out_unlock :
2016-01-19 14:51:06 +03:00
mutex_unlock ( & connection - > mutex ) ;
2016-01-08 22:13:46 +03:00
return ret ;
}
EXPORT_SYMBOL_GPL ( gb_connection_enable ) ;
2015-08-11 05:05:56 +03:00
2016-01-21 19:34:21 +03:00
int gb_connection_enable_tx ( struct gb_connection * connection )
{
int ret = 0 ;
mutex_lock ( & connection - > mutex ) ;
if ( connection - > state = = GB_CONNECTION_STATE_ENABLED ) {
ret = - EINVAL ;
goto out_unlock ;
}
if ( connection - > state = = GB_CONNECTION_STATE_ENABLED_TX )
goto out_unlock ;
ret = _gb_connection_enable ( connection , false ) ;
2016-06-03 23:55:37 +03:00
if ( ! ret )
trace_gb_connection_enable ( connection ) ;
2016-01-21 19:34:21 +03:00
out_unlock :
mutex_unlock ( & connection - > mutex ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( gb_connection_enable_tx ) ;
2016-01-19 14:51:08 +03:00
void gb_connection_disable_rx ( struct gb_connection * connection )
{
mutex_lock ( & connection - > mutex ) ;
2016-06-27 21:07:09 +03:00
spin_lock_irq ( & connection - > lock ) ;
2016-01-19 14:51:08 +03:00
if ( connection - > state ! = GB_CONNECTION_STATE_ENABLED ) {
2016-06-27 21:07:09 +03:00
spin_unlock_irq ( & connection - > lock ) ;
2016-01-19 14:51:08 +03:00
goto out_unlock ;
}
connection - > state = GB_CONNECTION_STATE_ENABLED_TX ;
2016-06-27 21:07:09 +03:00
gb_connection_flush_incoming_operations ( connection , - ESHUTDOWN ) ;
spin_unlock_irq ( & connection - > lock ) ;
2016-01-19 14:51:08 +03:00
2016-06-03 23:55:37 +03:00
trace_gb_connection_disable ( connection ) ;
2016-01-19 14:51:08 +03:00
out_unlock :
mutex_unlock ( & connection - > mutex ) ;
}
2016-05-05 12:02:29 +03:00
EXPORT_SYMBOL_GPL ( gb_connection_disable_rx ) ;
2016-01-19 14:51:08 +03:00
2016-05-27 18:26:40 +03:00
void gb_connection_mode_switch_prepare ( struct gb_connection * connection )
{
connection - > mode_switch = true ;
}
void gb_connection_mode_switch_complete ( struct gb_connection * connection )
{
gb_connection_svc_connection_destroy ( connection ) ;
2016-08-26 13:55:49 +03:00
gb_connection_hd_cport_clear ( connection ) ;
2016-05-27 18:26:40 +03:00
gb_connection_hd_cport_disable ( connection ) ;
2016-08-26 13:55:49 +03:00
2016-05-27 18:26:40 +03:00
connection - > mode_switch = false ;
}
2016-01-08 22:13:46 +03:00
void gb_connection_disable ( struct gb_connection * connection )
{
2016-01-19 14:51:06 +03:00
mutex_lock ( & connection - > mutex ) ;
2016-01-19 14:51:03 +03:00
if ( connection - > state = = GB_CONNECTION_STATE_DISABLED )
2016-01-19 14:51:06 +03:00
goto out_unlock ;
2016-01-19 14:51:03 +03:00
2016-06-15 05:55:56 +03:00
trace_gb_connection_disable ( connection ) ;
2016-06-27 21:07:09 +03:00
spin_lock_irq ( & connection - > lock ) ;
2016-05-27 18:26:36 +03:00
connection - > state = GB_CONNECTION_STATE_DISCONNECTING ;
2016-06-27 21:07:09 +03:00
gb_connection_cancel_operations ( connection , - ESHUTDOWN ) ;
spin_unlock_irq ( & connection - > lock ) ;
2016-01-19 14:51:03 +03:00
2016-05-27 18:26:37 +03:00
gb_connection_hd_cport_flush ( connection ) ;
2016-08-26 13:55:49 +03:00
gb_connection_control_disconnecting ( connection ) ;
gb_connection_cport_shutdown_phase_1 ( connection ) ;
gb_connection_hd_cport_quiesce ( connection ) ;
gb_connection_cport_shutdown_phase_2 ( connection ) ;
2016-05-27 18:26:36 +03:00
gb_connection_control_disconnected ( connection ) ;
connection - > state = GB_CONNECTION_STATE_DISABLED ;
2016-05-27 18:26:40 +03:00
/* control-connection tear down is deferred when mode switching */
if ( ! connection - > mode_switch ) {
gb_connection_svc_connection_destroy ( connection ) ;
2016-08-26 13:55:49 +03:00
gb_connection_hd_cport_clear ( connection ) ;
2016-05-27 18:26:40 +03:00
gb_connection_hd_cport_disable ( connection ) ;
}
2016-01-19 14:51:06 +03:00
out_unlock :
mutex_unlock ( & connection - > mutex ) ;
2016-01-08 22:13:46 +03:00
}
EXPORT_SYMBOL_GPL ( gb_connection_disable ) ;
2016-05-27 18:26:22 +03:00
/* Disable a connection without communicating with the remote end. */
void gb_connection_disable_forced ( struct gb_connection * connection )
{
mutex_lock ( & connection - > mutex ) ;
if ( connection - > state = = GB_CONNECTION_STATE_DISABLED )
goto out_unlock ;
2016-06-15 05:55:56 +03:00
trace_gb_connection_disable ( connection ) ;
2016-06-27 21:07:09 +03:00
spin_lock_irq ( & connection - > lock ) ;
2016-05-27 18:26:22 +03:00
connection - > state = GB_CONNECTION_STATE_DISABLED ;
2016-06-27 21:07:09 +03:00
gb_connection_cancel_operations ( connection , - ESHUTDOWN ) ;
spin_unlock_irq ( & connection - > lock ) ;
2016-05-27 18:26:22 +03:00
2016-05-27 18:26:37 +03:00
gb_connection_hd_cport_flush ( connection ) ;
2016-08-26 13:55:49 +03:00
2016-05-27 18:26:22 +03:00
gb_connection_svc_connection_destroy ( connection ) ;
2016-08-26 13:55:49 +03:00
gb_connection_hd_cport_clear ( connection ) ;
2016-05-27 18:26:22 +03:00
2016-08-26 13:55:49 +03:00
gb_connection_hd_cport_disable ( connection ) ;
2016-05-27 18:26:22 +03:00
out_unlock :
mutex_unlock ( & connection - > mutex ) ;
}
EXPORT_SYMBOL_GPL ( gb_connection_disable_forced ) ;
2016-01-19 14:51:23 +03:00
/* Caller must have disabled the connection before destroying it. */
2015-08-31 14:51:13 +03:00
void gb_connection_destroy ( struct gb_connection * connection )
{
2016-01-19 14:51:22 +03:00
if ( ! connection )
2015-08-31 14:51:13 +03:00
return ;
2016-05-17 17:13:16 +03:00
if ( WARN_ON ( connection - > state ! = GB_CONNECTION_STATE_DISABLED ) )
gb_connection_disable ( connection ) ;
2016-01-19 14:51:26 +03:00
mutex_lock ( & gb_connection_mutex ) ;
2016-06-27 21:07:10 +03:00
spin_lock_irq ( & gb_connections_lock ) ;
2015-08-31 14:51:13 +03:00
list_del ( & connection - > bundle_links ) ;
list_del ( & connection - > hd_links ) ;
2016-06-27 21:07:10 +03:00
spin_unlock_irq ( & gb_connections_lock ) ;
2015-08-31 14:51:13 +03:00
2016-01-19 14:51:23 +03:00
destroy_workqueue ( connection - > wq ) ;
2016-05-11 11:17:59 +03:00
gb_hd_cport_release ( connection - > hd , connection - > hd_cport_id ) ;
2015-08-31 14:51:13 +03:00
connection - > hd_cport_id = CPORT_ID_BAD ;
2016-01-19 14:51:26 +03:00
mutex_unlock ( & gb_connection_mutex ) ;
2016-01-19 14:51:25 +03:00
gb_connection_put ( connection ) ;
2015-08-31 14:51:13 +03:00
}
2016-01-21 19:34:09 +03:00
EXPORT_SYMBOL_GPL ( gb_connection_destroy ) ;
2015-08-31 14:51:13 +03:00
2015-10-15 18:10:42 +03:00
void gb_connection_latency_tag_enable ( struct gb_connection * connection )
{
2015-11-03 20:03:23 +03:00
struct gb_host_device * hd = connection - > hd ;
2015-10-15 18:10:42 +03:00
int ret ;
if ( ! hd - > driver - > latency_tag_enable )
return ;
ret = hd - > driver - > latency_tag_enable ( hd , connection - > hd_cport_id ) ;
if ( ret ) {
2015-11-25 17:59:15 +03:00
dev_err ( & connection - > hd - > dev ,
" %s: failed to enable latency tag: %d \n " ,
connection - > name , ret ) ;
2015-10-15 18:10:42 +03:00
}
}
EXPORT_SYMBOL_GPL ( gb_connection_latency_tag_enable ) ;
void gb_connection_latency_tag_disable ( struct gb_connection * connection )
{
2015-11-03 20:03:23 +03:00
struct gb_host_device * hd = connection - > hd ;
2015-10-15 18:10:42 +03:00
int ret ;
if ( ! hd - > driver - > latency_tag_disable )
return ;
ret = hd - > driver - > latency_tag_disable ( hd , connection - > hd_cport_id ) ;
if ( ret ) {
2015-11-25 17:59:15 +03:00
dev_err ( & connection - > hd - > dev ,
" %s: failed to disable latency tag: %d \n " ,
connection - > name , ret ) ;
2015-10-15 18:10:42 +03:00
}
}
EXPORT_SYMBOL_GPL ( gb_connection_latency_tag_disable ) ;