2011-05-15 13:43:42 +03:00
/*
*
* Intel Management Engine Interface ( Intel MEI ) Linux driver
2012-02-09 19:25:53 +02:00
* Copyright ( c ) 2003 - 2012 , Intel Corporation .
2011-05-15 13:43:42 +03:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
*/
2013-03-27 16:58:30 +02:00
# include <linux/export.h>
2011-05-15 13:43:42 +03:00
# include <linux/kthread.h>
# include <linux/interrupt.h>
# include <linux/fs.h>
# include <linux/jiffies.h>
2014-09-29 16:31:46 +03:00
# include <linux/slab.h>
2015-10-13 15:02:38 +03:00
# include <linux/pm_runtime.h>
2011-05-15 13:43:42 +03:00
2012-05-09 16:38:59 +03:00
# include <linux/mei.h>
2012-12-25 19:06:03 +02:00
# include "mei_dev.h"
2013-01-08 23:07:12 +02:00
# include "hbm.h"
2013-01-08 23:07:14 +02:00
# include "client.h"
2011-05-15 13:43:42 +03:00
2013-03-17 11:41:20 +02:00
/**
2014-01-08 22:31:46 +02:00
* mei_irq_compl_handler - dispatch complete handlers
2013-03-17 11:41:20 +02:00
* for the completed callbacks
*
2014-09-29 16:31:49 +03:00
* @ dev : mei device
2017-01-27 16:32:45 +02:00
* @ cmpl_list : list of completed cbs
2013-03-17 11:41:20 +02:00
*/
2017-01-27 16:32:45 +02:00
void mei_irq_compl_handler ( struct mei_device * dev , struct list_head * cmpl_list )
2013-03-17 11:41:20 +02:00
{
struct mei_cl_cb * cb , * next ;
struct mei_cl * cl ;
2017-01-27 16:32:45 +02:00
list_for_each_entry_safe ( cb , next , cmpl_list , list ) {
2013-03-17 11:41:20 +02:00
cl = cb - > cl ;
2015-02-10 10:39:39 +02:00
list_del_init ( & cb - > list ) ;
2013-03-17 11:41:20 +02:00
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " completing call back. \n " ) ;
2017-03-20 15:04:03 +02:00
mei_cl_complete ( cl , cb ) ;
2013-03-17 11:41:20 +02:00
}
}
2013-03-27 16:58:30 +02:00
EXPORT_SYMBOL_GPL ( mei_irq_compl_handler ) ;
2013-04-19 22:01:34 +03:00
2011-05-15 13:43:42 +03:00
/**
2013-04-19 22:01:34 +03:00
* mei_cl_hbm_equal - check if hbm is addressed to the client
2011-05-15 13:43:42 +03:00
*
2013-04-19 22:01:34 +03:00
* @ cl : host client
2011-05-15 13:43:42 +03:00
* @ mei_hdr : header of mei client message
*
2014-09-29 16:31:49 +03:00
* Return : true if matches , false otherwise
2011-05-15 13:43:42 +03:00
*/
2013-04-19 22:01:34 +03:00
static inline int mei_cl_hbm_equal ( struct mei_cl * cl ,
struct mei_msg_hdr * mei_hdr )
2011-05-15 13:43:42 +03:00
{
2015-05-04 09:43:56 +03:00
return mei_cl_host_addr ( cl ) = = mei_hdr - > host_addr & &
2015-05-04 09:43:54 +03:00
mei_cl_me_id ( cl ) = = mei_hdr - > me_addr ;
2013-04-19 22:01:34 +03:00
}
2011-05-15 13:43:42 +03:00
2015-02-10 10:39:41 +02:00
/**
* mei_irq_discard_msg - discard received message
*
* @ dev : mei device
* @ hdr : message header
*/
2017-03-20 15:04:03 +02:00
static void mei_irq_discard_msg ( struct mei_device * dev , struct mei_msg_hdr * hdr )
2015-02-10 10:39:41 +02:00
{
/*
* no need to check for size as it is guarantied
* that length fits into rd_msg_buf
*/
mei_read_slots ( dev , dev - > rd_msg_buf , hdr - > length ) ;
dev_dbg ( dev - > dev , " discarding message " MEI_HDR_FMT " \n " ,
MEI_HDR_PRM ( hdr ) ) ;
}
2011-05-15 13:43:42 +03:00
/**
2014-09-29 16:31:50 +03:00
* mei_cl_irq_read_msg - process client message
2011-05-15 13:43:42 +03:00
*
2015-02-10 10:39:36 +02:00
* @ cl : reading client
2011-05-15 13:43:42 +03:00
* @ mei_hdr : header of mei client message
2017-01-27 16:32:45 +02:00
* @ cmpl_list : completion list
2011-05-15 13:43:42 +03:00
*
2015-02-10 10:39:36 +02:00
* Return : always 0
2011-05-15 13:43:42 +03:00
*/
2017-03-20 15:04:03 +02:00
static int mei_cl_irq_read_msg ( struct mei_cl * cl ,
struct mei_msg_hdr * mei_hdr ,
struct list_head * cmpl_list )
2011-05-15 13:43:42 +03:00
{
2015-02-10 10:39:36 +02:00
struct mei_device * dev = cl - > dev ;
struct mei_cl_cb * cb ;
2016-02-07 23:35:19 +02:00
size_t buf_sz ;
2011-05-15 13:43:42 +03:00
2015-02-10 10:39:46 +02:00
cb = list_first_entry_or_null ( & cl - > rd_pending , struct mei_cl_cb , list ) ;
if ( ! cb ) {
2016-06-16 17:58:58 +03:00
if ( ! mei_cl_is_fixed_address ( cl ) ) {
cl_err ( dev , cl , " pending read cb not found \n " ) ;
2016-07-26 01:06:01 +03:00
goto discard ;
2016-06-16 17:58:58 +03:00
}
cb = mei_cl_alloc_cb ( cl , mei_cl_mtu ( cl ) , MEI_FOP_READ , cl - > fp ) ;
if ( ! cb )
2016-07-26 01:06:01 +03:00
goto discard ;
2016-06-16 17:58:58 +03:00
list_add_tail ( & cb - > list , & cl - > rd_pending ) ;
2015-02-10 10:39:36 +02:00
}
2013-04-19 22:01:34 +03:00
2015-03-27 00:27:58 +02:00
if ( ! mei_cl_is_connected ( cl ) ) {
2015-02-10 10:39:46 +02:00
cl_dbg ( dev , cl , " not connected \n " ) ;
cb - > status = - ENODEV ;
2016-07-26 01:06:01 +03:00
goto discard ;
2015-02-10 10:39:36 +02:00
}
2013-04-19 22:01:34 +03:00
2016-02-07 23:35:19 +02:00
buf_sz = mei_hdr - > length + cb - > buf_idx ;
/* catch for integer overflow */
if ( buf_sz < cb - > buf_idx ) {
2016-02-17 18:27:34 +02:00
cl_err ( dev , cl , " message is too big len %d idx %zu \n " ,
2016-02-07 23:35:19 +02:00
mei_hdr - > length , cb - > buf_idx ) ;
cb - > status = - EMSGSIZE ;
2016-07-26 01:06:01 +03:00
goto discard ;
2016-02-07 23:35:19 +02:00
}
if ( cb - > buf . size < buf_sz ) {
2016-02-17 18:27:34 +02:00
cl_dbg ( dev , cl , " message overflow. size %zu len %d idx %zu \n " ,
2015-02-10 10:39:42 +02:00
cb - > buf . size , mei_hdr - > length , cb - > buf_idx ) ;
2016-07-26 01:06:01 +03:00
cb - > status = - EMSGSIZE ;
goto discard ;
2011-05-15 13:43:42 +03:00
}
2016-07-26 01:06:01 +03:00
mei_read_slots ( dev , cb - > buf . data + cb - > buf_idx , mei_hdr - > length ) ;
2015-02-10 10:39:36 +02:00
cb - > buf_idx + = mei_hdr - > length ;
2015-02-10 10:39:41 +02:00
2015-02-10 10:39:36 +02:00
if ( mei_hdr - > msg_complete ) {
2016-02-17 18:27:34 +02:00
cl_dbg ( dev , cl , " completed read length = %zu \n " , cb - > buf_idx ) ;
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2015-10-13 15:02:38 +03:00
} else {
pm_runtime_mark_last_busy ( dev - > dev ) ;
pm_request_autosuspend ( dev - > dev ) ;
2015-02-10 10:39:36 +02:00
}
2016-07-26 01:06:01 +03:00
return 0 ;
2011-05-15 13:43:42 +03:00
2016-07-26 01:06:01 +03:00
discard :
2016-11-11 03:00:07 +02:00
if ( cb )
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2016-07-26 01:06:01 +03:00
mei_irq_discard_msg ( dev , mei_hdr ) ;
2011-05-15 13:43:42 +03:00
return 0 ;
}
2014-02-12 21:41:52 +02:00
/**
* mei_cl_irq_disconnect_rsp - send disconnection response message
*
* @ cl : client
* @ cb : callback block .
* @ cmpl_list : complete list .
*
2014-09-29 16:31:49 +03:00
* Return : 0 , OK ; otherwise , error .
2014-02-12 21:41:52 +02:00
*/
static int mei_cl_irq_disconnect_rsp ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2017-01-27 16:32:45 +02:00
struct list_head * cmpl_list )
2014-02-12 21:41:52 +02:00
{
struct mei_device * dev = cl - > dev ;
2014-02-19 17:35:48 +02:00
u32 msg_slots ;
int slots ;
2014-02-12 21:41:52 +02:00
int ret ;
2014-02-19 17:35:48 +02:00
slots = mei_hbuf_empty_slots ( dev ) ;
msg_slots = mei_data2slots ( sizeof ( struct hbm_client_connect_response ) ) ;
2014-02-12 21:41:52 +02:00
2014-02-19 17:35:48 +02:00
if ( slots < msg_slots )
2014-02-12 21:41:52 +02:00
return - EMSGSIZE ;
ret = mei_hbm_cl_disconnect_rsp ( dev , cl ) ;
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2014-02-12 21:41:52 +02:00
return ret ;
}
2011-05-15 13:43:42 +03:00
/**
2014-09-29 16:31:50 +03:00
* mei_cl_irq_read - processes client read related operation from the
2013-05-12 15:34:46 +03:00
* interrupt thread context - request for flow control credits
2011-05-15 13:43:42 +03:00
*
2013-05-12 15:34:46 +03:00
* @ cl : client
* @ cb : callback block .
2011-05-15 13:43:42 +03:00
* @ cmpl_list : complete list .
*
2014-09-29 16:31:49 +03:00
* Return : 0 , OK ; otherwise , error .
2011-05-15 13:43:42 +03:00
*/
2013-05-12 15:34:46 +03:00
static int mei_cl_irq_read ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2017-01-27 16:32:45 +02:00
struct list_head * cmpl_list )
2011-05-15 13:43:42 +03:00
{
2013-05-12 15:34:46 +03:00
struct mei_device * dev = cl - > dev ;
2014-02-19 17:35:48 +02:00
u32 msg_slots ;
int slots ;
2013-09-16 23:44:43 +03:00
int ret ;
2016-07-26 01:06:06 +03:00
if ( ! list_empty ( & cl - > rd_pending ) )
return 0 ;
2014-02-19 17:35:48 +02:00
msg_slots = mei_data2slots ( sizeof ( struct hbm_flow_control ) ) ;
slots = mei_hbuf_empty_slots ( dev ) ;
2013-09-16 23:44:43 +03:00
2014-02-19 17:35:48 +02:00
if ( slots < msg_slots )
2013-03-11 18:27:02 +02:00
return - EMSGSIZE ;
2012-07-04 19:24:52 +03:00
2013-09-16 23:44:43 +03:00
ret = mei_hbm_cl_flow_control_req ( dev , cl ) ;
if ( ret ) {
cl - > status = ret ;
2013-05-12 15:34:46 +03:00
cb - > buf_idx = 0 ;
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2013-09-16 23:44:43 +03:00
return ret ;
2012-03-14 14:39:42 +02:00
}
2013-09-16 23:44:43 +03:00
2015-02-10 10:39:46 +02:00
list_move_tail ( & cb - > list , & cl - > rd_pending ) ;
2012-03-14 14:39:42 +02:00
2011-05-15 13:43:42 +03:00
return 0 ;
}
2016-02-07 23:35:36 +02:00
static inline bool hdr_is_hbm ( struct mei_msg_hdr * mei_hdr )
{
return mei_hdr - > host_addr = = 0 & & mei_hdr - > me_addr = = 0 ;
}
static inline bool hdr_is_fixed ( struct mei_msg_hdr * mei_hdr )
{
return mei_hdr - > host_addr = = 0 & & mei_hdr - > me_addr ! = 0 ;
}
2017-06-12 12:15:55 +03:00
static inline int hdr_is_valid ( u32 msg_hdr )
{
struct mei_msg_hdr * mei_hdr ;
mei_hdr = ( struct mei_msg_hdr * ) & msg_hdr ;
if ( ! msg_hdr | | mei_hdr - > reserved )
return - EBADMSG ;
return 0 ;
}
2011-05-15 13:43:42 +03:00
/**
2013-04-05 01:05:05 +09:00
* mei_irq_read_handler - bottom half read routine after ISR to
2011-05-15 13:43:42 +03:00
* handle the read processing .
*
* @ dev : the device structure
2013-02-06 14:06:42 +02:00
* @ cmpl_list : An instance of our list structure
2011-05-15 13:43:42 +03:00
* @ slots : slots to read .
*
2014-09-29 16:31:49 +03:00
* Return : 0 on success , < 0 on failure .
2011-05-15 13:43:42 +03:00
*/
2013-02-06 14:06:42 +02:00
int mei_irq_read_handler ( struct mei_device * dev ,
2017-01-27 16:32:45 +02:00
struct list_head * cmpl_list , s32 * slots )
2011-05-15 13:43:42 +03:00
{
struct mei_msg_hdr * mei_hdr ;
2013-11-11 13:26:08 +02:00
struct mei_cl * cl ;
int ret ;
2011-05-15 13:43:42 +03:00
if ( ! dev - > rd_msg_hdr ) {
2013-02-06 14:06:41 +02:00
dev - > rd_msg_hdr = mei_read_hdr ( dev ) ;
2011-05-15 13:43:42 +03:00
( * slots ) - - ;
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " slots =%08x. \n " , * slots ) ;
2011-05-15 13:43:42 +03:00
2017-06-12 12:15:55 +03:00
ret = hdr_is_valid ( dev - > rd_msg_hdr ) ;
if ( ret ) {
dev_err ( dev - > dev , " corrupted message header 0x%08X \n " ,
2013-11-11 13:26:08 +02:00
dev - > rd_msg_hdr ) ;
2017-06-12 12:15:55 +03:00
goto end ;
}
2011-05-15 13:43:42 +03:00
}
2017-06-12 12:15:55 +03:00
mei_hdr = ( struct mei_msg_hdr * ) & dev - > rd_msg_hdr ;
dev_dbg ( dev - > dev , MEI_HDR_FMT , MEI_HDR_PRM ( mei_hdr ) ) ;
2013-11-11 13:26:08 +02:00
if ( mei_slots2data ( * slots ) < mei_hdr - > length ) {
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " less data available than length=%08x. \n " ,
2011-05-15 13:43:42 +03:00
* slots ) ;
/* we can't read the message */
2014-03-03 00:21:28 +02:00
ret = - ENODATA ;
2011-05-15 13:43:42 +03:00
goto end ;
}
2013-11-11 13:26:08 +02:00
/* HBM message */
2016-02-07 23:35:36 +02:00
if ( hdr_is_hbm ( mei_hdr ) ) {
2014-01-08 20:19:21 +02:00
ret = mei_hbm_dispatch ( dev , mei_hdr ) ;
if ( ret ) {
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " mei_hbm_dispatch failed ret = %d \n " ,
2014-01-08 20:19:21 +02:00
ret ) ;
goto end ;
}
2013-11-11 13:26:08 +02:00
goto reset_slots ;
}
2012-12-25 19:06:00 +02:00
2014-01-08 22:31:46 +02:00
/* find recipient cl */
2013-11-11 13:26:08 +02:00
list_for_each_entry ( cl , & dev - > file_list , link ) {
if ( mei_cl_hbm_equal ( cl , mei_hdr ) ) {
cl_dbg ( dev , cl , " got a message \n " ) ;
break ;
}
}
2014-01-08 22:31:46 +02:00
/* if no recipient cl was found we assume corrupted header */
2013-11-11 13:26:08 +02:00
if ( & cl - > link = = & dev - > file_list ) {
2016-02-07 23:35:36 +02:00
/* A message for not connected fixed address clients
* should be silently discarded
*/
if ( hdr_is_fixed ( mei_hdr ) ) {
mei_irq_discard_msg ( dev , mei_hdr ) ;
ret = 0 ;
goto reset_slots ;
}
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " no destination client found 0x%08X \n " ,
2013-11-11 13:26:08 +02:00
dev - > rd_msg_hdr ) ;
ret = - EBADMSG ;
goto end ;
}
2017-03-20 15:04:03 +02:00
ret = mei_cl_irq_read_msg ( cl , mei_hdr , cmpl_list ) ;
2011-05-15 13:43:42 +03:00
2015-02-10 10:39:36 +02:00
2013-11-11 13:26:08 +02:00
reset_slots :
2011-05-15 13:43:42 +03:00
/* reset the number of slots and header */
* slots = mei_count_full_read_slots ( dev ) ;
dev - > rd_msg_hdr = 0 ;
if ( * slots = = - EOVERFLOW ) {
/* overflow - reset */
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " resetting due to slots overflow. \n " ) ;
2011-05-15 13:43:42 +03:00
/* set the event since message has been read */
ret = - ERANGE ;
goto end ;
}
end :
return ret ;
}
2013-03-27 16:58:30 +02:00
EXPORT_SYMBOL_GPL ( mei_irq_read_handler ) ;
2011-05-15 13:43:42 +03:00
/**
2013-02-06 14:06:42 +02:00
* mei_irq_write_handler - dispatch write requests
* after irq received
2011-05-15 13:43:42 +03:00
*
* @ dev : the device structure
2012-11-18 15:13:18 +02:00
* @ cmpl_list : An instance of our list structure
2011-05-15 13:43:42 +03:00
*
2014-09-29 16:31:49 +03:00
* Return : 0 on success , < 0 on failure .
2011-05-15 13:43:42 +03:00
*/
2017-01-27 16:32:45 +02:00
int mei_irq_write_handler ( struct mei_device * dev , struct list_head * cmpl_list )
2011-05-15 13:43:42 +03:00
{
struct mei_cl * cl ;
2013-05-12 15:34:46 +03:00
struct mei_cl_cb * cb , * next ;
2012-11-18 15:13:18 +02:00
s32 slots ;
2011-05-15 13:43:42 +03:00
int ret ;
2014-02-19 17:35:47 +02:00
if ( ! mei_hbuf_acquire ( dev ) )
2011-05-15 13:43:42 +03:00
return 0 ;
2014-02-19 17:35:47 +02:00
2012-11-18 15:13:18 +02:00
slots = mei_hbuf_empty_slots ( dev ) ;
if ( slots < = 0 )
2012-06-19 09:13:36 +03:00
return - EMSGSIZE ;
2011-05-15 13:43:42 +03:00
/* complete all waiting for write CB */
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " complete all waiting for write cb. \n " ) ;
2011-05-15 13:43:42 +03:00
2017-01-27 16:32:45 +02:00
list_for_each_entry_safe ( cb , next , & dev - > write_waiting_list , list ) {
2013-05-12 15:34:46 +03:00
cl = cb - > cl ;
2011-11-27 21:43:34 +02:00
cl - > status = 0 ;
2015-02-10 10:39:39 +02:00
cl_dbg ( dev , cl , " MEI WRITE COMPLETE \n " ) ;
cl - > writing_state = MEI_WRITE_COMPLETE ;
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2011-05-15 13:43:42 +03:00
}
/* complete control write list CB */
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " complete control write list cb. \n " ) ;
2017-01-27 16:32:45 +02:00
list_for_each_entry_safe ( cb , next , & dev - > ctrl_wr_list , list ) {
2013-05-12 15:34:46 +03:00
cl = cb - > cl ;
switch ( cb - > fop_type ) {
2014-08-21 14:29:17 +03:00
case MEI_FOP_DISCONNECT :
2011-11-27 21:43:33 +02:00
/* send disconnect message */
2014-08-21 14:29:17 +03:00
ret = mei_cl_irq_disconnect ( cl , cb , cmpl_list ) ;
2011-11-27 21:43:33 +02:00
if ( ret )
return ret ;
2011-05-15 13:43:42 +03:00
2011-11-27 21:43:33 +02:00
break ;
2012-11-11 17:38:00 +02:00
case MEI_FOP_READ :
2011-11-27 21:43:33 +02:00
/* send flow control message */
2014-02-19 17:35:48 +02:00
ret = mei_cl_irq_read ( cl , cb , cmpl_list ) ;
2011-11-27 21:43:33 +02:00
if ( ret )
return ret ;
2011-05-15 13:43:42 +03:00
2011-11-27 21:43:33 +02:00
break ;
2014-02-12 21:41:51 +02:00
case MEI_FOP_CONNECT :
2011-11-27 21:43:33 +02:00
/* connect message */
2014-02-19 17:35:48 +02:00
ret = mei_cl_irq_connect ( cl , cb , cmpl_list ) ;
2011-11-27 21:43:33 +02:00
if ( ret )
return ret ;
2011-05-15 13:43:42 +03:00
2011-11-27 21:43:33 +02:00
break ;
2014-02-12 21:41:52 +02:00
case MEI_FOP_DISCONNECT_RSP :
/* send disconnect resp */
2014-02-19 17:35:48 +02:00
ret = mei_cl_irq_disconnect_rsp ( cl , cb , cmpl_list ) ;
2014-02-12 21:41:52 +02:00
if ( ret )
return ret ;
2014-03-06 23:53:53 +02:00
break ;
2015-07-26 09:54:18 +03:00
case MEI_FOP_NOTIFY_START :
case MEI_FOP_NOTIFY_STOP :
ret = mei_cl_irq_notify ( cl , cb , cmpl_list ) ;
if ( ret )
return ret ;
break ;
2011-11-27 21:43:33 +02:00
default :
BUG ( ) ;
2011-05-15 13:43:42 +03:00
}
2011-11-27 21:43:33 +02:00
2011-05-15 13:43:42 +03:00
}
/* complete write list CB */
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " complete write list cb. \n " ) ;
2017-01-27 16:32:45 +02:00
list_for_each_entry_safe ( cb , next , & dev - > write_list , list ) {
2013-05-12 15:34:46 +03:00
cl = cb - > cl ;
2017-03-20 15:04:03 +02:00
ret = mei_cl_irq_write ( cl , cb , cmpl_list ) ;
2012-11-18 15:13:19 +02:00
if ( ret )
return ret ;
2011-05-15 13:43:42 +03:00
}
return 0 ;
}
2013-03-27 16:58:30 +02:00
EXPORT_SYMBOL_GPL ( mei_irq_write_handler ) ;
2011-05-15 13:43:42 +03:00
2015-07-23 21:37:13 +03:00
/**
* mei_connect_timeout - connect / disconnect timeouts
*
* @ cl : host client
*/
static void mei_connect_timeout ( struct mei_cl * cl )
{
struct mei_device * dev = cl - > dev ;
if ( cl - > state = = MEI_FILE_CONNECTING ) {
if ( dev - > hbm_f_dot_supported ) {
cl - > state = MEI_FILE_DISCONNECT_REQUIRED ;
wake_up ( & cl - > wait ) ;
return ;
}
}
mei_reset ( dev ) ;
}
2011-05-15 13:43:42 +03:00
2016-09-25 13:25:31 +03:00
# define MEI_STALL_TIMER_FREQ (2 * HZ)
/**
* mei_schedule_stall_timer - re - arm stall_timer work
*
* Schedule stall timer
*
* @ dev : the device structure
*/
void mei_schedule_stall_timer ( struct mei_device * dev )
{
schedule_delayed_work ( & dev - > timer_work , MEI_STALL_TIMER_FREQ ) ;
}
2011-05-15 13:43:42 +03:00
/**
* mei_timer - timer function .
*
* @ work : pointer to the work_struct structure
*
*/
2011-09-07 09:03:13 +03:00
void mei_timer ( struct work_struct * work )
2011-05-15 13:43:42 +03:00
{
2014-02-17 15:13:25 +02:00
struct mei_cl * cl ;
2011-05-15 13:43:42 +03:00
struct mei_device * dev = container_of ( work ,
2011-09-07 09:03:13 +03:00
struct mei_device , timer_work . work ) ;
2016-09-25 13:25:31 +03:00
bool reschedule_timer = false ;
2011-05-15 13:43:42 +03:00
mutex_lock ( & dev - > device_lock ) ;
2014-01-08 20:19:22 +02:00
/* Catch interrupt stalls during HBM init handshake */
if ( dev - > dev_state = = MEI_DEV_INIT_CLIENTS & &
dev - > hbm_state ! = MEI_HBM_IDLE ) {
if ( dev - > init_clients_timer ) {
if ( - - dev - > init_clients_timer = = 0 ) {
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " timer: init clients timeout hbm_state = %d. \n " ,
2014-01-08 20:19:22 +02:00
dev - > hbm_state ) ;
2014-01-12 00:36:09 +02:00
mei_reset ( dev ) ;
2014-01-08 20:19:22 +02:00
goto out ;
2011-05-15 13:43:42 +03:00
}
2016-09-25 13:25:31 +03:00
reschedule_timer = true ;
2011-05-15 13:43:42 +03:00
}
}
2014-01-08 20:19:22 +02:00
if ( dev - > dev_state ! = MEI_DEV_ENABLED )
goto out ;
2011-05-15 13:43:42 +03:00
/*** connect/disconnect timeouts ***/
2014-02-17 15:13:25 +02:00
list_for_each_entry ( cl , & dev - > file_list , link ) {
if ( cl - > timer_count ) {
if ( - - cl - > timer_count = = 0 ) {
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " timer: connect/disconnect timeout. \n " ) ;
2015-07-23 21:37:13 +03:00
mei_connect_timeout ( cl ) ;
2011-05-15 13:43:42 +03:00
goto out ;
}
2016-09-25 13:25:31 +03:00
reschedule_timer = true ;
2011-05-15 13:43:42 +03:00
}
}
out :
2016-09-25 13:25:31 +03:00
if ( dev - > dev_state ! = MEI_DEV_DISABLED & & reschedule_timer )
mei_schedule_stall_timer ( dev ) ;
2011-12-13 23:39:34 +02:00
mutex_unlock ( & dev - > device_lock ) ;
2011-05-15 13:43:42 +03:00
}