2011-05-15 14:43:42 +04:00
/*
*
* Intel Management Engine Interface ( Intel MEI ) Linux driver
2012-02-09 21:25:53 +04:00
* Copyright ( c ) 2003 - 2012 , Intel Corporation .
2011-05-15 14:43:42 +04:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
*/
2013-03-27 18:58:30 +04:00
# include <linux/export.h>
2011-05-15 14:43:42 +04:00
# include <linux/kthread.h>
# include <linux/interrupt.h>
# include <linux/fs.h>
# include <linux/jiffies.h>
2014-09-29 17:31:46 +04:00
# include <linux/slab.h>
2011-05-15 14:43:42 +04:00
2012-05-09 17:38:59 +04:00
# include <linux/mei.h>
2012-12-25 21:06:03 +04:00
# include "mei_dev.h"
2013-01-09 01:07:12 +04:00
# include "hbm.h"
2013-01-09 01:07:14 +04:00
# include "client.h"
2011-05-15 14:43:42 +04:00
2013-03-17 13:41:20 +04:00
/**
2014-01-09 00:31:46 +04:00
* mei_irq_compl_handler - dispatch complete handlers
2013-03-17 13:41:20 +04:00
* for the completed callbacks
*
2014-09-29 17:31:49 +04:00
* @ dev : mei device
* @ compl_list : list of completed cbs
2013-03-17 13:41:20 +04:00
*/
void mei_irq_compl_handler ( struct mei_device * dev , struct mei_cl_cb * compl_list )
{
struct mei_cl_cb * cb , * next ;
struct mei_cl * cl ;
list_for_each_entry_safe ( cb , next , & compl_list - > list , list ) {
cl = cb - > cl ;
2015-02-10 11:39:39 +03:00
list_del_init ( & cb - > list ) ;
2013-03-17 13:41:20 +04:00
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " completing call back. \n " ) ;
2013-03-17 13:41:20 +04:00
if ( cl = = & dev - > iamthif_cl )
mei_amthif_complete ( dev , cb ) ;
else
2013-05-12 16:34:45 +04:00
mei_cl_complete ( cl , cb ) ;
2013-03-17 13:41:20 +04:00
}
}
2013-03-27 18:58:30 +04:00
EXPORT_SYMBOL_GPL ( mei_irq_compl_handler ) ;
2013-04-19 23:01:34 +04:00
2011-05-15 14:43:42 +04:00
/**
2013-04-19 23:01:34 +04:00
* mei_cl_hbm_equal - check if hbm is addressed to the client
2011-05-15 14:43:42 +04:00
*
2013-04-19 23:01:34 +04:00
* @ cl : host client
2011-05-15 14:43:42 +04:00
* @ mei_hdr : header of mei client message
*
2014-09-29 17:31:49 +04:00
* Return : true if matches , false otherwise
2011-05-15 14:43:42 +04:00
*/
2013-04-19 23:01:34 +04:00
static inline int mei_cl_hbm_equal ( struct mei_cl * cl ,
struct mei_msg_hdr * mei_hdr )
2011-05-15 14:43:42 +04:00
{
2013-04-19 23:01:34 +04:00
return cl - > host_client_id = = mei_hdr - > host_addr & &
cl - > me_client_id = = mei_hdr - > me_addr ;
}
/**
2015-02-10 11:39:36 +03:00
* mei_cl_is_reading - checks if the client is in reading state
2013-04-19 23:01:34 +04:00
*
* @ cl : mei client
*
2015-02-10 11:39:36 +03:00
* Return : true if the client is reading
2013-04-19 23:01:34 +04:00
*/
2015-02-10 11:39:36 +03:00
static bool mei_cl_is_reading ( struct mei_cl * cl )
2013-04-19 23:01:34 +04:00
{
2015-02-10 11:39:36 +03:00
return cl - > state = = MEI_FILE_CONNECTED & &
2013-04-19 23:01:34 +04:00
cl - > reading_state ! = MEI_READ_COMPLETE ;
2011-05-15 14:43:42 +04:00
}
2015-02-10 11:39:41 +03:00
/**
* mei_irq_discard_msg - discard received message
*
* @ dev : mei device
* @ hdr : message header
*/
static inline
void mei_irq_discard_msg ( struct mei_device * dev , struct mei_msg_hdr * hdr )
{
/*
* no need to check for size as it is guarantied
* that length fits into rd_msg_buf
*/
mei_read_slots ( dev , dev - > rd_msg_buf , hdr - > length ) ;
dev_dbg ( dev - > dev , " discarding message " MEI_HDR_FMT " \n " ,
MEI_HDR_PRM ( hdr ) ) ;
}
2011-05-15 14:43:42 +04:00
/**
2014-09-29 17:31:50 +04:00
* mei_cl_irq_read_msg - process client message
2011-05-15 14:43:42 +04:00
*
2015-02-10 11:39:36 +03:00
* @ cl : reading client
2011-05-15 14:43:42 +04:00
* @ mei_hdr : header of mei client message
2015-02-10 11:39:36 +03:00
* @ complete_list : completion list
2011-05-15 14:43:42 +04:00
*
2015-02-10 11:39:36 +03:00
* Return : always 0
2011-05-15 14:43:42 +04:00
*/
2015-02-10 11:39:41 +03:00
int mei_cl_irq_read_msg ( struct mei_cl * cl ,
struct mei_msg_hdr * mei_hdr ,
struct mei_cl_cb * complete_list )
2011-05-15 14:43:42 +04:00
{
2015-02-10 11:39:36 +03:00
struct mei_device * dev = cl - > dev ;
struct mei_cl_cb * cb ;
2011-06-16 01:46:03 +04:00
unsigned char * buffer = NULL ;
2011-05-15 14:43:42 +04:00
2015-02-10 11:39:36 +03:00
list_for_each_entry ( cb , & dev - > read_list . list , list ) {
if ( cl = = cb - > cl )
break ;
}
2013-04-19 23:01:34 +04:00
2015-02-10 11:39:36 +03:00
if ( & cb - > list = = & dev - > read_list . list ) {
dev_err ( dev - > dev , " no reader found \n " ) ;
goto out ;
}
2013-04-19 23:01:34 +04:00
2015-02-10 11:39:36 +03:00
if ( ! mei_cl_is_reading ( cl ) ) {
cl_err ( dev , cl , " cl is not reading state=%d reading state=%d \n " ,
cl - > state , cl - > reading_state ) ;
goto out ;
}
2013-04-19 23:01:34 +04:00
2015-02-10 11:39:36 +03:00
cl - > reading_state = MEI_READING ;
2011-05-15 14:43:42 +04:00
2015-02-10 11:39:42 +03:00
if ( cb - > buf . size = = 0 | | cb - > buf . data = = NULL ) {
2015-02-10 11:39:36 +03:00
cl_err ( dev , cl , " response buffer is not allocated. \n " ) ;
list_move_tail ( & cb - > list , & complete_list - > list ) ;
cb - > status = - ENOMEM ;
goto out ;
}
2013-04-19 23:01:34 +04:00
2015-02-10 11:39:42 +03:00
if ( cb - > buf . size < mei_hdr - > length + cb - > buf_idx ) {
2015-02-10 11:39:36 +03:00
cl_dbg ( dev , cl , " message overflow. size %d len %d idx %ld \n " ,
2015-02-10 11:39:42 +03:00
cb - > buf . size , mei_hdr - > length , cb - > buf_idx ) ;
buffer = krealloc ( cb - > buf . data , mei_hdr - > length + cb - > buf_idx ,
2015-02-10 11:39:36 +03:00
GFP_KERNEL ) ;
if ( ! buffer ) {
cb - > status = - ENOMEM ;
list_move_tail ( & cb - > list , & complete_list - > list ) ;
goto out ;
2013-04-19 23:01:34 +04:00
}
2015-02-10 11:39:42 +03:00
cb - > buf . data = buffer ;
cb - > buf . size = mei_hdr - > length + cb - > buf_idx ;
2011-05-15 14:43:42 +04:00
}
2015-02-10 11:39:42 +03:00
buffer = cb - > buf . data + cb - > buf_idx ;
2015-02-10 11:39:36 +03:00
mei_read_slots ( dev , buffer , mei_hdr - > length ) ;
cb - > buf_idx + = mei_hdr - > length ;
2015-02-10 11:39:41 +03:00
2015-02-10 11:39:36 +03:00
if ( mei_hdr - > msg_complete ) {
2015-02-10 11:39:41 +03:00
cb - > read_time = jiffies ;
2015-02-10 11:39:36 +03:00
cl_dbg ( dev , cl , " completed read length = %lu \n " ,
cb - > buf_idx ) ;
list_move_tail ( & cb - > list , & complete_list - > list ) ;
}
out :
2015-02-10 11:39:41 +03:00
if ( ! buffer )
mei_irq_discard_msg ( dev , mei_hdr ) ;
2011-05-15 14:43:42 +04:00
return 0 ;
}
2014-02-12 23:41:52 +04:00
/**
* mei_cl_irq_disconnect_rsp - send disconnection response message
*
* @ cl : client
* @ cb : callback block .
* @ cmpl_list : complete list .
*
2014-09-29 17:31:49 +04:00
* Return : 0 , OK ; otherwise , error .
2014-02-12 23:41:52 +04:00
*/
static int mei_cl_irq_disconnect_rsp ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2014-02-19 19:35:48 +04:00
struct mei_cl_cb * cmpl_list )
2014-02-12 23:41:52 +04:00
{
struct mei_device * dev = cl - > dev ;
2014-02-19 19:35:48 +04:00
u32 msg_slots ;
int slots ;
2014-02-12 23:41:52 +04:00
int ret ;
2014-02-19 19:35:48 +04:00
slots = mei_hbuf_empty_slots ( dev ) ;
msg_slots = mei_data2slots ( sizeof ( struct hbm_client_connect_response ) ) ;
2014-02-12 23:41:52 +04:00
2014-02-19 19:35:48 +04:00
if ( slots < msg_slots )
2014-02-12 23:41:52 +04:00
return - EMSGSIZE ;
ret = mei_hbm_cl_disconnect_rsp ( dev , cl ) ;
cl - > state = MEI_FILE_DISCONNECTED ;
cl - > status = 0 ;
mei_io_cb_free ( cb ) ;
return ret ;
}
2011-05-15 14:43:42 +04:00
/**
2014-08-21 15:29:17 +04:00
* mei_cl_irq_disconnect - processes close related operation from
2013-05-12 16:34:46 +04:00
* interrupt thread context - send disconnect request
2011-05-15 14:43:42 +04:00
*
2013-05-12 16:34:46 +04:00
* @ cl : client
* @ cb : callback block .
2011-05-15 14:43:42 +04:00
* @ cmpl_list : complete list .
*
2014-09-29 17:31:49 +04:00
* Return : 0 , OK ; otherwise , error .
2011-05-15 14:43:42 +04:00
*/
2014-08-21 15:29:17 +04:00
static int mei_cl_irq_disconnect ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2014-02-19 19:35:48 +04:00
struct mei_cl_cb * cmpl_list )
2011-05-15 14:43:42 +04:00
{
2013-05-12 16:34:46 +04:00
struct mei_device * dev = cl - > dev ;
2014-02-19 19:35:48 +04:00
u32 msg_slots ;
int slots ;
2013-05-12 16:34:46 +04:00
2014-02-19 19:35:48 +04:00
msg_slots = mei_data2slots ( sizeof ( struct hbm_client_connect_request ) ) ;
slots = mei_hbuf_empty_slots ( dev ) ;
2011-05-15 14:43:42 +04:00
2014-02-19 19:35:48 +04:00
if ( slots < msg_slots )
2013-03-11 20:27:02 +04:00
return - EMSGSIZE ;
2012-12-25 21:06:11 +04:00
if ( mei_hbm_cl_disconnect_req ( dev , cl ) ) {
2012-07-04 20:24:53 +04:00
cl - > status = 0 ;
2013-05-12 16:34:46 +04:00
cb - > buf_idx = 0 ;
list_move_tail ( & cb - > list , & cmpl_list - > list ) ;
2013-03-11 20:27:02 +04:00
return - EIO ;
2011-05-15 14:43:42 +04:00
}
2013-03-11 20:27:02 +04:00
cl - > state = MEI_FILE_DISCONNECTING ;
cl - > status = 0 ;
2013-05-12 16:34:46 +04:00
cb - > buf_idx = 0 ;
list_move_tail ( & cb - > list , & dev - > ctrl_rd_list . list ) ;
2013-03-11 20:27:02 +04:00
cl - > timer_count = MEI_CONNECT_TIMEOUT ;
2011-05-15 14:43:42 +04:00
return 0 ;
}
/**
2014-09-29 17:31:50 +04:00
* mei_cl_irq_read - processes client read related operation from the
2013-05-12 16:34:46 +04:00
* interrupt thread context - request for flow control credits
2011-05-15 14:43:42 +04:00
*
2013-05-12 16:34:46 +04:00
* @ cl : client
* @ cb : callback block .
2011-05-15 14:43:42 +04:00
* @ cmpl_list : complete list .
*
2014-09-29 17:31:49 +04:00
* Return : 0 , OK ; otherwise , error .
2011-05-15 14:43:42 +04:00
*/
2013-05-12 16:34:46 +04:00
static int mei_cl_irq_read ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2014-02-19 19:35:48 +04:00
struct mei_cl_cb * cmpl_list )
2011-05-15 14:43:42 +04:00
{
2013-05-12 16:34:46 +04:00
struct mei_device * dev = cl - > dev ;
2014-02-19 19:35:48 +04:00
u32 msg_slots ;
int slots ;
2013-09-17 00:44:43 +04:00
int ret ;
2014-02-19 19:35:48 +04:00
msg_slots = mei_data2slots ( sizeof ( struct hbm_flow_control ) ) ;
slots = mei_hbuf_empty_slots ( dev ) ;
2013-09-17 00:44:43 +04:00
2014-02-19 19:35:48 +04:00
if ( slots < msg_slots )
2013-03-11 20:27:02 +04:00
return - EMSGSIZE ;
2012-07-04 20:24:52 +04:00
2013-09-17 00:44:43 +04:00
ret = mei_hbm_cl_flow_control_req ( dev , cl ) ;
if ( ret ) {
cl - > status = ret ;
2013-05-12 16:34:46 +04:00
cb - > buf_idx = 0 ;
list_move_tail ( & cb - > list , & cmpl_list - > list ) ;
2013-09-17 00:44:43 +04:00
return ret ;
2012-03-14 16:39:42 +04:00
}
2013-09-17 00:44:43 +04:00
2013-05-12 16:34:46 +04:00
list_move_tail ( & cb - > list , & dev - > read_list . list ) ;
2012-03-14 16:39:42 +04:00
2011-05-15 14:43:42 +04:00
return 0 ;
}
/**
2014-02-12 23:41:51 +04:00
* mei_cl_irq_connect - send connect request in irq_thread context
2011-05-15 14:43:42 +04:00
*
2013-05-12 16:34:46 +04:00
* @ cl : client
* @ cb : callback block .
2011-05-15 14:43:42 +04:00
* @ cmpl_list : complete list .
*
2014-09-29 17:31:49 +04:00
* Return : 0 , OK ; otherwise , error .
2011-05-15 14:43:42 +04:00
*/
2014-02-12 23:41:51 +04:00
static int mei_cl_irq_connect ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2014-02-19 19:35:48 +04:00
struct mei_cl_cb * cmpl_list )
2011-05-15 14:43:42 +04:00
{
2013-05-12 16:34:46 +04:00
struct mei_device * dev = cl - > dev ;
2014-02-19 19:35:48 +04:00
u32 msg_slots ;
int slots ;
2013-09-17 00:44:43 +04:00
int ret ;
2013-05-12 16:34:46 +04:00
2014-02-19 19:35:48 +04:00
msg_slots = mei_data2slots ( sizeof ( struct hbm_client_connect_request ) ) ;
slots = mei_hbuf_empty_slots ( dev ) ;
2013-03-11 20:27:02 +04:00
2014-02-12 23:41:51 +04:00
if ( mei_cl_is_other_connecting ( cl ) )
return 0 ;
2014-02-19 19:35:48 +04:00
if ( slots < msg_slots )
2013-03-11 20:27:02 +04:00
return - EMSGSIZE ;
2012-07-04 20:24:53 +04:00
cl - > state = MEI_FILE_CONNECTING ;
2013-03-11 20:27:02 +04:00
2013-09-17 00:44:43 +04:00
ret = mei_hbm_cl_connect_req ( dev , cl ) ;
if ( ret ) {
cl - > status = ret ;
2013-05-12 16:34:46 +04:00
cb - > buf_idx = 0 ;
2015-02-10 11:39:45 +03:00
list_del_init ( & cb - > list ) ;
2013-09-17 00:44:43 +04:00
return ret ;
2012-07-04 20:24:53 +04:00
}
2013-05-12 16:34:46 +04:00
list_move_tail ( & cb - > list , & dev - > ctrl_rd_list . list ) ;
cl - > timer_count = MEI_CONNECT_TIMEOUT ;
2011-05-15 14:43:42 +04:00
return 0 ;
}
/**
2013-04-04 20:05:05 +04:00
* mei_irq_read_handler - bottom half read routine after ISR to
2011-05-15 14:43:42 +04:00
* handle the read processing .
*
* @ dev : the device structure
2013-02-06 16:06:42 +04:00
* @ cmpl_list : An instance of our list structure
2011-05-15 14:43:42 +04:00
* @ slots : slots to read .
*
2014-09-29 17:31:49 +04:00
* Return : 0 on success , < 0 on failure .
2011-05-15 14:43:42 +04:00
*/
2013-02-06 16:06:42 +04:00
int mei_irq_read_handler ( struct mei_device * dev ,
struct mei_cl_cb * cmpl_list , s32 * slots )
2011-05-15 14:43:42 +04:00
{
struct mei_msg_hdr * mei_hdr ;
2013-11-11 15:26:08 +04:00
struct mei_cl * cl ;
int ret ;
2011-05-15 14:43:42 +04:00
if ( ! dev - > rd_msg_hdr ) {
2013-02-06 16:06:41 +04:00
dev - > rd_msg_hdr = mei_read_hdr ( dev ) ;
2011-05-15 14:43:42 +04:00
( * slots ) - - ;
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " slots =%08x. \n " , * slots ) ;
2011-05-15 14:43:42 +04:00
}
mei_hdr = ( struct mei_msg_hdr * ) & dev - > rd_msg_hdr ;
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , MEI_HDR_FMT , MEI_HDR_PRM ( mei_hdr ) ) ;
2011-05-15 14:43:42 +04:00
if ( mei_hdr - > reserved | | ! dev - > rd_msg_hdr ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " corrupted message header 0x%08X \n " ,
2013-11-11 15:26:08 +04:00
dev - > rd_msg_hdr ) ;
2011-05-15 14:43:42 +04:00
ret = - EBADMSG ;
goto end ;
}
2013-11-11 15:26:08 +04:00
if ( mei_slots2data ( * slots ) < mei_hdr - > length ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " less data available than length=%08x. \n " ,
2011-05-15 14:43:42 +04:00
* slots ) ;
/* we can't read the message */
2014-03-03 02:21:28 +04:00
ret = - ENODATA ;
2011-05-15 14:43:42 +04:00
goto end ;
}
2013-11-11 15:26:08 +04:00
/* HBM message */
if ( mei_hdr - > host_addr = = 0 & & mei_hdr - > me_addr = = 0 ) {
2014-01-08 22:19:21 +04:00
ret = mei_hbm_dispatch ( dev , mei_hdr ) ;
if ( ret ) {
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " mei_hbm_dispatch failed ret = %d \n " ,
2014-01-08 22:19:21 +04:00
ret ) ;
goto end ;
}
2013-11-11 15:26:08 +04:00
goto reset_slots ;
}
2012-12-25 21:06:00 +04:00
2014-01-09 00:31:46 +04:00
/* find recipient cl */
2013-11-11 15:26:08 +04:00
list_for_each_entry ( cl , & dev - > file_list , link ) {
if ( mei_cl_hbm_equal ( cl , mei_hdr ) ) {
cl_dbg ( dev , cl , " got a message \n " ) ;
break ;
}
}
2014-01-09 00:31:46 +04:00
/* if no recipient cl was found we assume corrupted header */
2013-11-11 15:26:08 +04:00
if ( & cl - > link = = & dev - > file_list ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " no destination client found 0x%08X \n " ,
2013-11-11 15:26:08 +04:00
dev - > rd_msg_hdr ) ;
ret = - EBADMSG ;
goto end ;
}
2015-02-10 11:39:37 +03:00
if ( cl = = & dev - > iamthif_cl ) {
ret = mei_amthif_irq_read_msg ( cl , mei_hdr , cmpl_list ) ;
2011-05-15 14:43:42 +04:00
} else {
2015-02-10 11:39:36 +03:00
ret = mei_cl_irq_read_msg ( cl , mei_hdr , cmpl_list ) ;
2011-05-15 14:43:42 +04:00
}
2015-02-10 11:39:36 +03:00
2013-11-11 15:26:08 +04:00
reset_slots :
2011-05-15 14:43:42 +04:00
/* reset the number of slots and header */
* slots = mei_count_full_read_slots ( dev ) ;
dev - > rd_msg_hdr = 0 ;
if ( * slots = = - EOVERFLOW ) {
/* overflow - reset */
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " resetting due to slots overflow. \n " ) ;
2011-05-15 14:43:42 +04:00
/* set the event since message has been read */
ret = - ERANGE ;
goto end ;
}
end :
return ret ;
}
2013-03-27 18:58:30 +04:00
EXPORT_SYMBOL_GPL ( mei_irq_read_handler ) ;
2011-05-15 14:43:42 +04:00
/**
2013-02-06 16:06:42 +04:00
* mei_irq_write_handler - dispatch write requests
* after irq received
2011-05-15 14:43:42 +04:00
*
* @ dev : the device structure
2012-11-18 17:13:18 +04:00
* @ cmpl_list : An instance of our list structure
2011-05-15 14:43:42 +04:00
*
2014-09-29 17:31:49 +04:00
* Return : 0 on success , < 0 on failure .
2011-05-15 14:43:42 +04:00
*/
2013-03-11 20:27:02 +04:00
int mei_irq_write_handler ( struct mei_device * dev , struct mei_cl_cb * cmpl_list )
2011-05-15 14:43:42 +04:00
{
struct mei_cl * cl ;
2013-05-12 16:34:46 +04:00
struct mei_cl_cb * cb , * next ;
2012-10-15 14:06:48 +04:00
struct mei_cl_cb * list ;
2012-11-18 17:13:18 +04:00
s32 slots ;
2011-05-15 14:43:42 +04:00
int ret ;
2014-02-19 19:35:47 +04:00
if ( ! mei_hbuf_acquire ( dev ) )
2011-05-15 14:43:42 +04:00
return 0 ;
2014-02-19 19:35:47 +04:00
2012-11-18 17:13:18 +04:00
slots = mei_hbuf_empty_slots ( dev ) ;
if ( slots < = 0 )
2012-06-19 10:13:36 +04:00
return - EMSGSIZE ;
2011-05-15 14:43:42 +04:00
/* complete all waiting for write CB */
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " complete all waiting for write cb. \n " ) ;
2011-05-15 14:43:42 +04:00
list = & dev - > write_waiting_list ;
2013-05-12 16:34:46 +04:00
list_for_each_entry_safe ( cb , next , & list - > list , list ) {
cl = cb - > cl ;
2011-11-27 23:43:34 +04:00
cl - > status = 0 ;
2015-02-10 11:39:39 +03:00
cl_dbg ( dev , cl , " MEI WRITE COMPLETE \n " ) ;
cl - > writing_state = MEI_WRITE_COMPLETE ;
list_move_tail ( & cb - > list , & cmpl_list - > list ) ;
2011-05-15 14:43:42 +04:00
}
2012-08-16 20:39:43 +04:00
if ( dev - > wd_state = = MEI_WD_STOPPING ) {
dev - > wd_state = MEI_WD_IDLE ;
2014-02-19 19:35:51 +04:00
wake_up ( & dev - > wait_stop_wd ) ;
2011-05-15 14:43:42 +04:00
}
2014-02-17 17:13:21 +04:00
if ( mei_cl_is_connected ( & dev - > wd_cl ) ) {
2011-05-15 14:43:42 +04:00
if ( dev - > wd_pending & &
2013-01-09 01:07:14 +04:00
mei_cl_flow_ctrl_creds ( & dev - > wd_cl ) > 0 ) {
2014-02-19 19:35:50 +04:00
ret = mei_wd_send ( dev ) ;
if ( ret )
return ret ;
2011-05-25 18:28:22 +04:00
dev - > wd_pending = false ;
2011-05-15 14:43:42 +04:00
}
}
/* complete control write list CB */
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " complete control write list cb. \n " ) ;
2013-05-12 16:34:46 +04:00
list_for_each_entry_safe ( cb , next , & dev - > ctrl_wr_list . list , list ) {
cl = cb - > cl ;
switch ( cb - > fop_type ) {
2014-08-21 15:29:17 +04:00
case MEI_FOP_DISCONNECT :
2011-11-27 23:43:33 +04:00
/* send disconnect message */
2014-08-21 15:29:17 +04:00
ret = mei_cl_irq_disconnect ( cl , cb , cmpl_list ) ;
2011-11-27 23:43:33 +04:00
if ( ret )
return ret ;
2011-05-15 14:43:42 +04:00
2011-11-27 23:43:33 +04:00
break ;
2012-11-11 19:38:00 +04:00
case MEI_FOP_READ :
2011-11-27 23:43:33 +04:00
/* send flow control message */
2014-02-19 19:35:48 +04:00
ret = mei_cl_irq_read ( cl , cb , cmpl_list ) ;
2011-11-27 23:43:33 +04:00
if ( ret )
return ret ;
2011-05-15 14:43:42 +04:00
2011-11-27 23:43:33 +04:00
break ;
2014-02-12 23:41:51 +04:00
case MEI_FOP_CONNECT :
2011-11-27 23:43:33 +04:00
/* connect message */
2014-02-19 19:35:48 +04:00
ret = mei_cl_irq_connect ( cl , cb , cmpl_list ) ;
2011-11-27 23:43:33 +04:00
if ( ret )
return ret ;
2011-05-15 14:43:42 +04:00
2011-11-27 23:43:33 +04:00
break ;
2014-02-12 23:41:52 +04:00
case MEI_FOP_DISCONNECT_RSP :
/* send disconnect resp */
2014-02-19 19:35:48 +04:00
ret = mei_cl_irq_disconnect_rsp ( cl , cb , cmpl_list ) ;
2014-02-12 23:41:52 +04:00
if ( ret )
return ret ;
2014-03-07 01:53:53 +04:00
break ;
2011-11-27 23:43:33 +04:00
default :
BUG ( ) ;
2011-05-15 14:43:42 +04:00
}
2011-11-27 23:43:33 +04:00
2011-05-15 14:43:42 +04:00
}
/* complete write list CB */
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " complete write list cb. \n " ) ;
2013-05-12 16:34:46 +04:00
list_for_each_entry_safe ( cb , next , & dev - > write_list . list , list ) {
cl = cb - > cl ;
2012-11-18 17:13:19 +04:00
if ( cl = = & dev - > iamthif_cl )
2014-02-19 19:35:48 +04:00
ret = mei_amthif_irq_write ( cl , cb , cmpl_list ) ;
2012-11-18 17:13:19 +04:00
else
2014-02-19 19:35:48 +04:00
ret = mei_cl_irq_write ( cl , cb , cmpl_list ) ;
2012-11-18 17:13:19 +04:00
if ( ret )
return ret ;
2011-05-15 14:43:42 +04:00
}
return 0 ;
}
2013-03-27 18:58:30 +04:00
EXPORT_SYMBOL_GPL ( mei_irq_write_handler ) ;
2011-05-15 14:43:42 +04:00
/**
* mei_timer - timer function .
*
* @ work : pointer to the work_struct structure
*
*/
2011-09-07 10:03:13 +04:00
void mei_timer ( struct work_struct * work )
2011-05-15 14:43:42 +04:00
{
unsigned long timeout ;
2014-02-17 17:13:25 +04:00
struct mei_cl * cl ;
2011-05-15 14:43:42 +04:00
struct mei_device * dev = container_of ( work ,
2011-09-07 10:03:13 +04:00
struct mei_device , timer_work . work ) ;
2011-05-15 14:43:42 +04:00
mutex_lock ( & dev - > device_lock ) ;
2014-01-08 22:19:22 +04:00
/* Catch interrupt stalls during HBM init handshake */
if ( dev - > dev_state = = MEI_DEV_INIT_CLIENTS & &
dev - > hbm_state ! = MEI_HBM_IDLE ) {
if ( dev - > init_clients_timer ) {
if ( - - dev - > init_clients_timer = = 0 ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " timer: init clients timeout hbm_state = %d. \n " ,
2014-01-08 22:19:22 +04:00
dev - > hbm_state ) ;
2014-01-12 02:36:09 +04:00
mei_reset ( dev ) ;
2014-01-08 22:19:22 +04:00
goto out ;
2011-05-15 14:43:42 +04:00
}
}
}
2014-01-08 22:19:22 +04:00
if ( dev - > dev_state ! = MEI_DEV_ENABLED )
goto out ;
2011-05-15 14:43:42 +04:00
/*** connect/disconnect timeouts ***/
2014-02-17 17:13:25 +04:00
list_for_each_entry ( cl , & dev - > file_list , link ) {
if ( cl - > timer_count ) {
if ( - - cl - > timer_count = = 0 ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " timer: connect/disconnect timeout. \n " ) ;
2014-01-12 02:36:09 +04:00
mei_reset ( dev ) ;
2011-05-15 14:43:42 +04:00
goto out ;
}
}
}
2014-02-17 17:13:21 +04:00
if ( ! mei_cl_is_connected ( & dev - > iamthif_cl ) )
goto out ;
2011-05-15 14:43:42 +04:00
if ( dev - > iamthif_stall_timer ) {
if ( - - dev - > iamthif_stall_timer = = 0 ) {
2014-09-29 17:31:42 +04:00
dev_err ( dev - > dev , " timer: amthif hanged. \n " ) ;
2014-01-12 02:36:09 +04:00
mei_reset ( dev ) ;
2011-05-25 18:28:22 +04:00
dev - > iamthif_canceled = false ;
2011-05-15 14:43:42 +04:00
dev - > iamthif_state = MEI_IAMTHIF_IDLE ;
dev - > iamthif_timer = 0 ;
2012-10-09 18:50:20 +04:00
mei_io_cb_free ( dev - > iamthif_current_cb ) ;
dev - > iamthif_current_cb = NULL ;
2011-05-15 14:43:42 +04:00
dev - > iamthif_file_object = NULL ;
2012-11-01 23:17:15 +04:00
mei_amthif_run_next_cmd ( dev ) ;
2011-05-15 14:43:42 +04:00
}
}
if ( dev - > iamthif_timer ) {
timeout = dev - > iamthif_timer +
2012-11-01 23:17:14 +04:00
mei_secs_to_jiffies ( MEI_IAMTHIF_READ_TIMER ) ;
2011-05-15 14:43:42 +04:00
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " dev->iamthif_timer = %ld \n " ,
2011-05-15 14:43:42 +04:00
dev - > iamthif_timer ) ;
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " timeout = %ld \n " , timeout ) ;
dev_dbg ( dev - > dev , " jiffies = %ld \n " , jiffies ) ;
2011-05-15 14:43:42 +04:00
if ( time_after ( jiffies , timeout ) ) {
/*
* User didn ' t read the AMTHI data on time ( 15 sec )
* freeing AMTHI for other requests
*/
2014-09-29 17:31:42 +04:00
dev_dbg ( dev - > dev , " freeing AMTHI for other requests \n " ) ;
2011-05-15 14:43:42 +04:00
2014-08-14 18:22:20 +04:00
mei_io_list_flush ( & dev - > amthif_rd_complete_list ,
& dev - > iamthif_cl ) ;
2012-10-09 18:50:20 +04:00
mei_io_cb_free ( dev - > iamthif_current_cb ) ;
dev - > iamthif_current_cb = NULL ;
2011-05-15 14:43:42 +04:00
dev - > iamthif_file_object - > private_data = NULL ;
dev - > iamthif_file_object = NULL ;
dev - > iamthif_timer = 0 ;
2012-11-01 23:17:15 +04:00
mei_amthif_run_next_cmd ( dev ) ;
2011-05-15 14:43:42 +04:00
}
}
out :
2014-01-12 02:36:09 +04:00
if ( dev - > dev_state ! = MEI_DEV_DISABLED )
schedule_delayed_work ( & dev - > timer_work , 2 * HZ ) ;
2011-12-14 01:39:34 +04:00
mutex_unlock ( & dev - > device_lock ) ;
2011-05-15 14:43:42 +04:00
}