2011-05-15 14:43:42 +04:00
/*
*
* Intel Management Engine Interface ( Intel MEI ) Linux driver
2012-02-09 21:25:53 +04:00
* Copyright ( c ) 2003 - 2012 , Intel Corporation .
2011-05-15 14:43:42 +04:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
*/
2013-03-27 18:58:30 +04:00
# include <linux/export.h>
2011-05-15 14:43:42 +04:00
# include <linux/kthread.h>
# include <linux/interrupt.h>
# include <linux/fs.h>
# include <linux/jiffies.h>
2014-09-29 17:31:46 +04:00
# include <linux/slab.h>
2015-10-13 15:02:38 +03:00
# include <linux/pm_runtime.h>
2011-05-15 14:43:42 +04:00
2012-05-09 17:38:59 +04:00
# include <linux/mei.h>
2012-12-25 21:06:03 +04:00
# include "mei_dev.h"
2013-01-09 01:07:12 +04:00
# include "hbm.h"
2013-01-09 01:07:14 +04:00
# include "client.h"
2011-05-15 14:43:42 +04:00
2013-03-17 13:41:20 +04:00
/**
2014-01-09 00:31:46 +04:00
* mei_irq_compl_handler - dispatch complete handlers
2013-03-17 13:41:20 +04:00
* for the completed callbacks
*
2014-09-29 17:31:49 +04:00
* @ dev : mei device
* @ compl_list : list of completed cbs
2013-03-17 13:41:20 +04:00
*/
void mei_irq_compl_handler ( struct mei_device * dev , struct mei_cl_cb * compl_list )
{
struct mei_cl_cb * cb , * next ;
struct mei_cl * cl ;
list_for_each_entry_safe ( cb , next , & compl_list - > list , list ) {
cl = cb - > cl ;
2015-02-10 11:39:39 +03:00
list_del_init ( & cb - > list ) ;
2013-03-17 13:41:20 +04:00
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " completing call back. \n " ) ;
2013-03-17 13:41:20 +04:00
if ( cl = = & dev - > iamthif_cl )
2016-02-08 00:35:26 +03:00
mei_amthif_complete ( cl , cb ) ;
2013-03-17 13:41:20 +04:00
else
2013-05-12 16:34:45 +04:00
mei_cl_complete ( cl , cb ) ;
2013-03-17 13:41:20 +04:00
}
}
2013-03-27 18:58:30 +04:00
EXPORT_SYMBOL_GPL ( mei_irq_compl_handler ) ;
2013-04-19 23:01:34 +04:00
2011-05-15 14:43:42 +04:00
/**
2013-04-19 23:01:34 +04:00
* mei_cl_hbm_equal - check if hbm is addressed to the client
2011-05-15 14:43:42 +04:00
*
2013-04-19 23:01:34 +04:00
* @ cl : host client
2011-05-15 14:43:42 +04:00
* @ mei_hdr : header of mei client message
*
2014-09-29 17:31:49 +04:00
* Return : true if matches , false otherwise
2011-05-15 14:43:42 +04:00
*/
2013-04-19 23:01:34 +04:00
static inline int mei_cl_hbm_equal ( struct mei_cl * cl ,
struct mei_msg_hdr * mei_hdr )
2011-05-15 14:43:42 +04:00
{
2015-05-04 09:43:56 +03:00
return mei_cl_host_addr ( cl ) = = mei_hdr - > host_addr & &
2015-05-04 09:43:54 +03:00
mei_cl_me_id ( cl ) = = mei_hdr - > me_addr ;
2013-04-19 23:01:34 +04:00
}
2011-05-15 14:43:42 +04:00
2015-02-10 11:39:41 +03:00
/**
* mei_irq_discard_msg - discard received message
*
* @ dev : mei device
* @ hdr : message header
*/
void mei_irq_discard_msg ( struct mei_device * dev , struct mei_msg_hdr * hdr )
{
/*
* no need to check for size as it is guarantied
* that length fits into rd_msg_buf
*/
mei_read_slots ( dev , dev - > rd_msg_buf , hdr - > length ) ;
dev_dbg ( dev - > dev , " discarding message " MEI_HDR_FMT " \n " ,
MEI_HDR_PRM ( hdr ) ) ;
}
2011-05-15 14:43:42 +04:00
/**
2014-09-29 17:31:50 +04:00
* mei_cl_irq_read_msg - process client message
2011-05-15 14:43:42 +04:00
*
2015-02-10 11:39:36 +03:00
* @ cl : reading client
2011-05-15 14:43:42 +04:00
* @ mei_hdr : header of mei client message
2015-02-10 11:39:36 +03:00
* @ complete_list : completion list
2011-05-15 14:43:42 +04:00
*
2015-02-10 11:39:36 +03:00
* Return : always 0
2011-05-15 14:43:42 +04:00
*/
2015-02-10 11:39:41 +03:00
int mei_cl_irq_read_msg ( struct mei_cl * cl ,
struct mei_msg_hdr * mei_hdr ,
struct mei_cl_cb * complete_list )
2011-05-15 14:43:42 +04:00
{
2015-02-10 11:39:36 +03:00
struct mei_device * dev = cl - > dev ;
struct mei_cl_cb * cb ;
2016-02-08 00:35:19 +03:00
size_t buf_sz ;
2011-05-15 14:43:42 +04:00
2015-02-10 11:39:46 +03:00
cb = list_first_entry_or_null ( & cl - > rd_pending , struct mei_cl_cb , list ) ;
if ( ! cb ) {
2016-06-16 17:58:58 +03:00
if ( ! mei_cl_is_fixed_address ( cl ) ) {
cl_err ( dev , cl , " pending read cb not found \n " ) ;
2016-07-26 01:06:01 +03:00
goto discard ;
2016-06-16 17:58:58 +03:00
}
cb = mei_cl_alloc_cb ( cl , mei_cl_mtu ( cl ) , MEI_FOP_READ , cl - > fp ) ;
if ( ! cb )
2016-07-26 01:06:01 +03:00
goto discard ;
2016-06-16 17:58:58 +03:00
list_add_tail ( & cb - > list , & cl - > rd_pending ) ;
2015-02-10 11:39:36 +03:00
}
2013-04-19 23:01:34 +04:00
2015-03-27 01:27:58 +03:00
if ( ! mei_cl_is_connected ( cl ) ) {
2015-02-10 11:39:46 +03:00
cl_dbg ( dev , cl , " not connected \n " ) ;
cb - > status = - ENODEV ;
2016-07-26 01:06:01 +03:00
goto discard ;
2015-02-10 11:39:36 +03:00
}
2013-04-19 23:01:34 +04:00
2016-02-08 00:35:19 +03:00
buf_sz = mei_hdr - > length + cb - > buf_idx ;
/* catch for integer overflow */
if ( buf_sz < cb - > buf_idx ) {
2016-02-17 19:27:34 +03:00
cl_err ( dev , cl , " message is too big len %d idx %zu \n " ,
2016-02-08 00:35:19 +03:00
mei_hdr - > length , cb - > buf_idx ) ;
cb - > status = - EMSGSIZE ;
2016-07-26 01:06:01 +03:00
goto discard ;
2016-02-08 00:35:19 +03:00
}
if ( cb - > buf . size < buf_sz ) {
2016-02-17 19:27:34 +03:00
cl_dbg ( dev , cl , " message overflow. size %zu len %d idx %zu \n " ,
2015-02-10 11:39:42 +03:00
cb - > buf . size , mei_hdr - > length , cb - > buf_idx ) ;
2016-07-26 01:06:01 +03:00
cb - > status = - EMSGSIZE ;
goto discard ;
2011-05-15 14:43:42 +04:00
}
2016-07-26 01:06:01 +03:00
mei_read_slots ( dev , cb - > buf . data + cb - > buf_idx , mei_hdr - > length ) ;
2015-02-10 11:39:36 +03:00
cb - > buf_idx + = mei_hdr - > length ;
2015-02-10 11:39:41 +03:00
2015-02-10 11:39:36 +03:00
if ( mei_hdr - > msg_complete ) {
2016-02-17 19:27:34 +03:00
cl_dbg ( dev , cl , " completed read length = %zu \n " , cb - > buf_idx ) ;
2015-02-10 11:39:36 +03:00
list_move_tail ( & cb - > list , & complete_list - > list ) ;
2015-10-13 15:02:38 +03:00
} else {
pm_runtime_mark_last_busy ( dev - > dev ) ;
pm_request_autosuspend ( dev - > dev ) ;
2015-02-10 11:39:36 +03:00
}
2016-07-26 01:06:01 +03:00
return 0 ;
2011-05-15 14:43:42 +04:00
2016-07-26 01:06:01 +03:00
discard :
2016-11-11 04:00:07 +03:00
if ( cb )
list_move_tail ( & cb - > list , & complete_list - > list ) ;
2016-07-26 01:06:01 +03:00
mei_irq_discard_msg ( dev , mei_hdr ) ;
2011-05-15 14:43:42 +04:00
return 0 ;
}
2014-02-12 23:41:52 +04:00
/**
* mei_cl_irq_disconnect_rsp - send disconnection response message
*
* @ cl : client
* @ cb : callback block .
* @ cmpl_list : complete list .
*
2014-09-29 17:31:49 +04:00
* Return : 0 , OK ; otherwise , error .
2014-02-12 23:41:52 +04:00
*/
static int mei_cl_irq_disconnect_rsp ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2014-02-19 19:35:48 +04:00
struct mei_cl_cb * cmpl_list )
2014-02-12 23:41:52 +04:00
{
struct mei_device * dev = cl - > dev ;
2014-02-19 19:35:48 +04:00
u32 msg_slots ;
int slots ;
2014-02-12 23:41:52 +04:00
int ret ;
2014-02-19 19:35:48 +04:00
slots = mei_hbuf_empty_slots ( dev ) ;
msg_slots = mei_data2slots ( sizeof ( struct hbm_client_connect_response ) ) ;
2014-02-12 23:41:52 +04:00
2014-02-19 19:35:48 +04:00
if ( slots < msg_slots )
2014-02-12 23:41:52 +04:00
return - EMSGSIZE ;
ret = mei_hbm_cl_disconnect_rsp ( dev , cl ) ;
2016-04-17 19:16:03 +03:00
list_move_tail ( & cb - > list , & cmpl_list - > list ) ;
2014-02-12 23:41:52 +04:00
return ret ;
}
2011-05-15 14:43:42 +04:00
/**
2014-09-29 17:31:50 +04:00
* mei_cl_irq_read - processes client read related operation from the
2013-05-12 16:34:46 +04:00
* interrupt thread context - request for flow control credits
2011-05-15 14:43:42 +04:00
*
2013-05-12 16:34:46 +04:00
* @ cl : client
* @ cb : callback block .
2011-05-15 14:43:42 +04:00
* @ cmpl_list : complete list .
*
2014-09-29 17:31:49 +04:00
* Return : 0 , OK ; otherwise , error .
2011-05-15 14:43:42 +04:00
*/
2013-05-12 16:34:46 +04:00
static int mei_cl_irq_read ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2014-02-19 19:35:48 +04:00
struct mei_cl_cb * cmpl_list )
2011-05-15 14:43:42 +04:00
{
2013-05-12 16:34:46 +04:00
struct mei_device * dev = cl - > dev ;
2014-02-19 19:35:48 +04:00
u32 msg_slots ;
int slots ;
2013-09-17 00:44:43 +04:00
int ret ;
2016-07-26 01:06:06 +03:00
if ( ! list_empty ( & cl - > rd_pending ) )
return 0 ;
2014-02-19 19:35:48 +04:00
msg_slots = mei_data2slots ( sizeof ( struct hbm_flow_control ) ) ;
slots = mei_hbuf_empty_slots ( dev ) ;
2013-09-17 00:44:43 +04:00
2014-02-19 19:35:48 +04:00
if ( slots < msg_slots )
2013-03-11 20:27:02 +04:00
return - EMSGSIZE ;
2012-07-04 20:24:52 +04:00
2013-09-17 00:44:43 +04:00
ret = mei_hbm_cl_flow_control_req ( dev , cl ) ;
if ( ret ) {
cl - > status = ret ;
2013-05-12 16:34:46 +04:00
cb - > buf_idx = 0 ;
list_move_tail ( & cb - > list , & cmpl_list - > list ) ;
2013-09-17 00:44:43 +04:00
return ret ;
2012-03-14 16:39:42 +04:00
}
2013-09-17 00:44:43 +04:00
2015-02-10 11:39:46 +03:00
list_move_tail ( & cb - > list , & cl - > rd_pending ) ;
2012-03-14 16:39:42 +04:00
2011-05-15 14:43:42 +04:00
return 0 ;
}
2016-02-08 00:35:36 +03:00
static inline bool hdr_is_hbm ( struct mei_msg_hdr * mei_hdr )
{
return mei_hdr - > host_addr = = 0 & & mei_hdr - > me_addr = = 0 ;
}
static inline bool hdr_is_fixed ( struct mei_msg_hdr * mei_hdr )
{
return mei_hdr - > host_addr = = 0 & & mei_hdr - > me_addr ! = 0 ;
}
2011-05-15 14:43:42 +04:00
/**
2013-04-04 20:05:05 +04:00
* mei_irq_read_handler - bottom half read routine after ISR to
2011-05-15 14:43:42 +04:00
* handle the read processing .
*
* @ dev : the device structure
2013-02-06 16:06:42 +04:00
* @ cmpl_list : An instance of our list structure
2011-05-15 14:43:42 +04:00
* @ slots : slots to read .
*
2014-09-29 17:31:49 +04:00
* Return : 0 on success , < 0 on failure .
2011-05-15 14:43:42 +04:00
*/
2013-02-06 16:06:42 +04:00
int mei_irq_read_handler ( struct mei_device * dev ,
struct mei_cl_cb * cmpl_list , s32 * slots )
2011-05-15 14:43:42 +04:00
{
struct mei_msg_hdr * mei_hdr ;
2013-11-11 15:26:08 +04:00
struct mei_cl * cl ;
int ret ;
2011-05-15 14:43:42 +04:00
if ( ! dev - > rd_msg_hdr ) {
2013-02-06 16:06:41 +04:00
dev - > rd_msg_hdr = mei_read_hdr ( dev ) ;
2011-05-15 14:43:42 +04:00
( * slots ) - - ;
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " slots =%08x. \n " , * slots ) ;
2011-05-15 14:43:42 +04:00
}
mei_hdr = ( struct mei_msg_hdr * ) & dev - > rd_msg_hdr ;
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , MEI_HDR_FMT , MEI_HDR_PRM ( mei_hdr ) ) ;
2011-05-15 14:43:42 +04:00
if ( mei_hdr - > reserved | | ! dev - > rd_msg_hdr ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " corrupted message header 0x%08X \n " ,
2013-11-11 15:26:08 +04:00
dev - > rd_msg_hdr ) ;
2011-05-15 14:43:42 +04:00
ret = - EBADMSG ;
goto end ;
}
2013-11-11 15:26:08 +04:00
if ( mei_slots2data ( * slots ) < mei_hdr - > length ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " less data available than length=%08x. \n " ,
2011-05-15 14:43:42 +04:00
* slots ) ;
/* we can't read the message */
2014-03-03 02:21:28 +04:00
ret = - ENODATA ;
2011-05-15 14:43:42 +04:00
goto end ;
}
2013-11-11 15:26:08 +04:00
/* HBM message */
2016-02-08 00:35:36 +03:00
if ( hdr_is_hbm ( mei_hdr ) ) {
2014-01-08 22:19:21 +04:00
ret = mei_hbm_dispatch ( dev , mei_hdr ) ;
if ( ret ) {
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " mei_hbm_dispatch failed ret = %d \n " ,
2014-01-08 22:19:21 +04:00
ret ) ;
goto end ;
}
2013-11-11 15:26:08 +04:00
goto reset_slots ;
}
2012-12-25 21:06:00 +04:00
2014-01-09 00:31:46 +04:00
/* find recipient cl */
2013-11-11 15:26:08 +04:00
list_for_each_entry ( cl , & dev - > file_list , link ) {
if ( mei_cl_hbm_equal ( cl , mei_hdr ) ) {
cl_dbg ( dev , cl , " got a message \n " ) ;
break ;
}
}
2014-01-09 00:31:46 +04:00
/* if no recipient cl was found we assume corrupted header */
2013-11-11 15:26:08 +04:00
if ( & cl - > link = = & dev - > file_list ) {
2016-02-08 00:35:36 +03:00
/* A message for not connected fixed address clients
* should be silently discarded
*/
if ( hdr_is_fixed ( mei_hdr ) ) {
mei_irq_discard_msg ( dev , mei_hdr ) ;
ret = 0 ;
goto reset_slots ;
}
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " no destination client found 0x%08X \n " ,
2013-11-11 15:26:08 +04:00
dev - > rd_msg_hdr ) ;
ret = - EBADMSG ;
goto end ;
}
2015-02-10 11:39:37 +03:00
if ( cl = = & dev - > iamthif_cl ) {
ret = mei_amthif_irq_read_msg ( cl , mei_hdr , cmpl_list ) ;
2011-05-15 14:43:42 +04:00
} else {
2015-02-10 11:39:36 +03:00
ret = mei_cl_irq_read_msg ( cl , mei_hdr , cmpl_list ) ;
2011-05-15 14:43:42 +04:00
}
2015-02-10 11:39:36 +03:00
2013-11-11 15:26:08 +04:00
reset_slots :
2011-05-15 14:43:42 +04:00
/* reset the number of slots and header */
* slots = mei_count_full_read_slots ( dev ) ;
dev - > rd_msg_hdr = 0 ;
if ( * slots = = - EOVERFLOW ) {
/* overflow - reset */
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " resetting due to slots overflow. \n " ) ;
2011-05-15 14:43:42 +04:00
/* set the event since message has been read */
ret = - ERANGE ;
goto end ;
}
end :
return ret ;
}
2013-03-27 18:58:30 +04:00
EXPORT_SYMBOL_GPL ( mei_irq_read_handler ) ;
2011-05-15 14:43:42 +04:00
/**
2013-02-06 16:06:42 +04:00
* mei_irq_write_handler - dispatch write requests
* after irq received
2011-05-15 14:43:42 +04:00
*
* @ dev : the device structure
2012-11-18 17:13:18 +04:00
* @ cmpl_list : An instance of our list structure
2011-05-15 14:43:42 +04:00
*
2014-09-29 17:31:49 +04:00
* Return : 0 on success , < 0 on failure .
2011-05-15 14:43:42 +04:00
*/
2013-03-11 20:27:02 +04:00
int mei_irq_write_handler ( struct mei_device * dev , struct mei_cl_cb * cmpl_list )
2011-05-15 14:43:42 +04:00
{
struct mei_cl * cl ;
2013-05-12 16:34:46 +04:00
struct mei_cl_cb * cb , * next ;
2012-10-15 14:06:48 +04:00
struct mei_cl_cb * list ;
2012-11-18 17:13:18 +04:00
s32 slots ;
2011-05-15 14:43:42 +04:00
int ret ;
2014-02-19 19:35:47 +04:00
if ( ! mei_hbuf_acquire ( dev ) )
2011-05-15 14:43:42 +04:00
return 0 ;
2014-02-19 19:35:47 +04:00
2012-11-18 17:13:18 +04:00
slots = mei_hbuf_empty_slots ( dev ) ;
if ( slots < = 0 )
2012-06-19 10:13:36 +04:00
return - EMSGSIZE ;
2011-05-15 14:43:42 +04:00
/* complete all waiting for write CB */
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " complete all waiting for write cb. \n " ) ;
2011-05-15 14:43:42 +04:00
list = & dev - > write_waiting_list ;
2013-05-12 16:34:46 +04:00
list_for_each_entry_safe ( cb , next , & list - > list , list ) {
cl = cb - > cl ;
2011-11-27 23:43:34 +04:00
cl - > status = 0 ;
2015-02-10 11:39:39 +03:00
cl_dbg ( dev , cl , " MEI WRITE COMPLETE \n " ) ;
cl - > writing_state = MEI_WRITE_COMPLETE ;
list_move_tail ( & cb - > list , & cmpl_list - > list ) ;
2011-05-15 14:43:42 +04:00
}
/* complete control write list CB */
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " complete control write list cb. \n " ) ;
2013-05-12 16:34:46 +04:00
list_for_each_entry_safe ( cb , next , & dev - > ctrl_wr_list . list , list ) {
cl = cb - > cl ;
switch ( cb - > fop_type ) {
2014-08-21 15:29:17 +04:00
case MEI_FOP_DISCONNECT :
2011-11-27 23:43:33 +04:00
/* send disconnect message */
2014-08-21 15:29:17 +04:00
ret = mei_cl_irq_disconnect ( cl , cb , cmpl_list ) ;
2011-11-27 23:43:33 +04:00
if ( ret )
return ret ;
2011-05-15 14:43:42 +04:00
2011-11-27 23:43:33 +04:00
break ;
2012-11-11 19:38:00 +04:00
case MEI_FOP_READ :
2011-11-27 23:43:33 +04:00
/* send flow control message */
2014-02-19 19:35:48 +04:00
ret = mei_cl_irq_read ( cl , cb , cmpl_list ) ;
2011-11-27 23:43:33 +04:00
if ( ret )
return ret ;
2011-05-15 14:43:42 +04:00
2011-11-27 23:43:33 +04:00
break ;
2014-02-12 23:41:51 +04:00
case MEI_FOP_CONNECT :
2011-11-27 23:43:33 +04:00
/* connect message */
2014-02-19 19:35:48 +04:00
ret = mei_cl_irq_connect ( cl , cb , cmpl_list ) ;
2011-11-27 23:43:33 +04:00
if ( ret )
return ret ;
2011-05-15 14:43:42 +04:00
2011-11-27 23:43:33 +04:00
break ;
2014-02-12 23:41:52 +04:00
case MEI_FOP_DISCONNECT_RSP :
/* send disconnect resp */
2014-02-19 19:35:48 +04:00
ret = mei_cl_irq_disconnect_rsp ( cl , cb , cmpl_list ) ;
2014-02-12 23:41:52 +04:00
if ( ret )
return ret ;
2014-03-07 01:53:53 +04:00
break ;
2015-07-26 09:54:18 +03:00
case MEI_FOP_NOTIFY_START :
case MEI_FOP_NOTIFY_STOP :
ret = mei_cl_irq_notify ( cl , cb , cmpl_list ) ;
if ( ret )
return ret ;
break ;
2011-11-27 23:43:33 +04:00
default :
BUG ( ) ;
2011-05-15 14:43:42 +04:00
}
2011-11-27 23:43:33 +04:00
2011-05-15 14:43:42 +04:00
}
/* complete write list CB */
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " complete write list cb. \n " ) ;
2013-05-12 16:34:46 +04:00
list_for_each_entry_safe ( cb , next , & dev - > write_list . list , list ) {
cl = cb - > cl ;
2012-11-18 17:13:19 +04:00
if ( cl = = & dev - > iamthif_cl )
2014-02-19 19:35:48 +04:00
ret = mei_amthif_irq_write ( cl , cb , cmpl_list ) ;
2012-11-18 17:13:19 +04:00
else
2014-02-19 19:35:48 +04:00
ret = mei_cl_irq_write ( cl , cb , cmpl_list ) ;
2012-11-18 17:13:19 +04:00
if ( ret )
return ret ;
2011-05-15 14:43:42 +04:00
}
return 0 ;
}
2013-03-27 18:58:30 +04:00
EXPORT_SYMBOL_GPL ( mei_irq_write_handler ) ;
2011-05-15 14:43:42 +04:00
2015-07-23 21:37:13 +03:00
/**
* mei_connect_timeout - connect / disconnect timeouts
*
* @ cl : host client
*/
static void mei_connect_timeout ( struct mei_cl * cl )
{
struct mei_device * dev = cl - > dev ;
if ( cl - > state = = MEI_FILE_CONNECTING ) {
if ( dev - > hbm_f_dot_supported ) {
cl - > state = MEI_FILE_DISCONNECT_REQUIRED ;
wake_up ( & cl - > wait ) ;
return ;
}
}
mei_reset ( dev ) ;
}
2011-05-15 14:43:42 +04:00
2016-09-25 13:25:31 +03:00
# define MEI_STALL_TIMER_FREQ (2 * HZ)
/**
* mei_schedule_stall_timer - re - arm stall_timer work
*
* Schedule stall timer
*
* @ dev : the device structure
*/
void mei_schedule_stall_timer ( struct mei_device * dev )
{
schedule_delayed_work ( & dev - > timer_work , MEI_STALL_TIMER_FREQ ) ;
}
2011-05-15 14:43:42 +04:00
/**
* mei_timer - timer function .
*
* @ work : pointer to the work_struct structure
*
*/
2011-09-07 10:03:13 +04:00
void mei_timer ( struct work_struct * work )
2011-05-15 14:43:42 +04:00
{
2014-02-17 17:13:25 +04:00
struct mei_cl * cl ;
2011-05-15 14:43:42 +04:00
struct mei_device * dev = container_of ( work ,
2011-09-07 10:03:13 +04:00
struct mei_device , timer_work . work ) ;
2016-09-25 13:25:31 +03:00
bool reschedule_timer = false ;
2011-05-15 14:43:42 +04:00
mutex_lock ( & dev - > device_lock ) ;
2014-01-08 22:19:22 +04:00
/* Catch interrupt stalls during HBM init handshake */
if ( dev - > dev_state = = MEI_DEV_INIT_CLIENTS & &
dev - > hbm_state ! = MEI_HBM_IDLE ) {
if ( dev - > init_clients_timer ) {
if ( - - dev - > init_clients_timer = = 0 ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " timer: init clients timeout hbm_state = %d. \n " ,
2014-01-08 22:19:22 +04:00
dev - > hbm_state ) ;
2014-01-12 02:36:09 +04:00
mei_reset ( dev ) ;
2014-01-08 22:19:22 +04:00
goto out ;
2011-05-15 14:43:42 +04:00
}
2016-09-25 13:25:31 +03:00
reschedule_timer = true ;
2011-05-15 14:43:42 +04:00
}
}
2014-01-08 22:19:22 +04:00
if ( dev - > dev_state ! = MEI_DEV_ENABLED )
goto out ;
2011-05-15 14:43:42 +04:00
/*** connect/disconnect timeouts ***/
2014-02-17 17:13:25 +04:00
list_for_each_entry ( cl , & dev - > file_list , link ) {
if ( cl - > timer_count ) {
if ( - - cl - > timer_count = = 0 ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " timer: connect/disconnect timeout. \n " ) ;
2015-07-23 21:37:13 +03:00
mei_connect_timeout ( cl ) ;
2011-05-15 14:43:42 +04:00
goto out ;
}
2016-09-25 13:25:31 +03:00
reschedule_timer = true ;
2011-05-15 14:43:42 +04:00
}
}
2014-02-17 17:13:21 +04:00
if ( ! mei_cl_is_connected ( & dev - > iamthif_cl ) )
goto out ;
2011-05-15 14:43:42 +04:00
if ( dev - > iamthif_stall_timer ) {
if ( - - dev - > iamthif_stall_timer = = 0 ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " timer: amthif hanged. \n " ) ;
2014-01-12 02:36:09 +04:00
mei_reset ( dev ) ;
2011-05-15 14:43:42 +04:00
2012-11-01 23:17:15 +04:00
mei_amthif_run_next_cmd ( dev ) ;
2016-09-25 13:25:31 +03:00
goto out ;
2011-05-15 14:43:42 +04:00
}
2016-09-25 13:25:31 +03:00
reschedule_timer = true ;
2011-05-15 14:43:42 +04:00
}
out :
2016-09-25 13:25:31 +03:00
if ( dev - > dev_state ! = MEI_DEV_DISABLED & & reschedule_timer )
mei_schedule_stall_timer ( dev ) ;
2011-12-14 01:39:34 +04:00
mutex_unlock ( & dev - > device_lock ) ;
2011-05-15 14:43:42 +04:00
}