2019-03-12 00:10:41 +02:00
// SPDX-License-Identifier: GPL-2.0
2011-05-15 13:43:42 +03:00
/*
2019-03-12 00:10:44 +02:00
* Copyright ( c ) 2003 - 2018 , Intel Corporation . All rights reserved .
2011-05-15 13:43:42 +03:00
* Intel Management Engine Interface ( Intel MEI ) Linux driver
*/
2013-03-27 16:58:30 +02:00
# include <linux/export.h>
2011-05-15 13:43:42 +03:00
# include <linux/kthread.h>
# include <linux/interrupt.h>
# include <linux/fs.h>
# include <linux/jiffies.h>
2014-09-29 16:31:46 +03:00
# include <linux/slab.h>
2015-10-13 15:02:38 +03:00
# include <linux/pm_runtime.h>
2011-05-15 13:43:42 +03:00
2012-05-09 16:38:59 +03:00
# include <linux/mei.h>
2012-12-25 19:06:03 +02:00
# include "mei_dev.h"
2013-01-08 23:07:12 +02:00
# include "hbm.h"
2013-01-08 23:07:14 +02:00
# include "client.h"
2011-05-15 13:43:42 +03:00
2013-03-17 11:41:20 +02:00
/**
2014-01-08 22:31:46 +02:00
* mei_irq_compl_handler - dispatch complete handlers
2013-03-17 11:41:20 +02:00
* for the completed callbacks
*
2014-09-29 16:31:49 +03:00
* @ dev : mei device
2017-01-27 16:32:45 +02:00
* @ cmpl_list : list of completed cbs
2013-03-17 11:41:20 +02:00
*/
2017-01-27 16:32:45 +02:00
void mei_irq_compl_handler ( struct mei_device * dev , struct list_head * cmpl_list )
2013-03-17 11:41:20 +02:00
{
struct mei_cl_cb * cb , * next ;
struct mei_cl * cl ;
2017-01-27 16:32:45 +02:00
list_for_each_entry_safe ( cb , next , cmpl_list , list ) {
2013-03-17 11:41:20 +02:00
cl = cb - > cl ;
2015-02-10 10:39:39 +02:00
list_del_init ( & cb - > list ) ;
2013-03-17 11:41:20 +02:00
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " completing call back. \n " ) ;
2017-03-20 15:04:03 +02:00
mei_cl_complete ( cl , cb ) ;
2013-03-17 11:41:20 +02:00
}
}
2013-03-27 16:58:30 +02:00
EXPORT_SYMBOL_GPL ( mei_irq_compl_handler ) ;
2013-04-19 22:01:34 +03:00
2011-05-15 13:43:42 +03:00
/**
2013-04-19 22:01:34 +03:00
* mei_cl_hbm_equal - check if hbm is addressed to the client
2011-05-15 13:43:42 +03:00
*
2013-04-19 22:01:34 +03:00
* @ cl : host client
2011-05-15 13:43:42 +03:00
* @ mei_hdr : header of mei client message
*
2014-09-29 16:31:49 +03:00
* Return : true if matches , false otherwise
2011-05-15 13:43:42 +03:00
*/
2013-04-19 22:01:34 +03:00
static inline int mei_cl_hbm_equal ( struct mei_cl * cl ,
struct mei_msg_hdr * mei_hdr )
2011-05-15 13:43:42 +03:00
{
2015-05-04 09:43:56 +03:00
return mei_cl_host_addr ( cl ) = = mei_hdr - > host_addr & &
2015-05-04 09:43:54 +03:00
mei_cl_me_id ( cl ) = = mei_hdr - > me_addr ;
2013-04-19 22:01:34 +03:00
}
2011-05-15 13:43:42 +03:00
2015-02-10 10:39:41 +02:00
/**
* mei_irq_discard_msg - discard received message
*
* @ dev : mei device
* @ hdr : message header
2020-08-18 14:51:38 +03:00
* @ discard_len : the length of the message to discard ( excluding header )
2015-02-10 10:39:41 +02:00
*/
2020-08-18 14:51:38 +03:00
static void mei_irq_discard_msg ( struct mei_device * dev , struct mei_msg_hdr * hdr ,
size_t discard_len )
2015-02-10 10:39:41 +02:00
{
2020-08-18 14:51:38 +03:00
if ( hdr - > dma_ring ) {
mei_dma_ring_read ( dev , NULL ,
hdr - > extension [ dev - > rd_msg_hdr_count - 2 ] ) ;
discard_len = 0 ;
}
2015-02-10 10:39:41 +02:00
/*
2023-10-11 10:43:01 +03:00
* no need to check for size as it is guaranteed
2015-02-10 10:39:41 +02:00
* that length fits into rd_msg_buf
*/
2020-08-18 14:51:38 +03:00
mei_read_slots ( dev , dev - > rd_msg_buf , discard_len ) ;
2015-02-10 10:39:41 +02:00
dev_dbg ( dev - > dev , " discarding message " MEI_HDR_FMT " \n " ,
MEI_HDR_PRM ( hdr ) ) ;
}
2011-05-15 13:43:42 +03:00
/**
2014-09-29 16:31:50 +03:00
* mei_cl_irq_read_msg - process client message
2011-05-15 13:43:42 +03:00
*
2015-02-10 10:39:36 +02:00
* @ cl : reading client
2011-05-15 13:43:42 +03:00
* @ mei_hdr : header of mei client message
2020-08-18 14:51:38 +03:00
* @ meta : extend meta header
2017-01-27 16:32:45 +02:00
* @ cmpl_list : completion list
2011-05-15 13:43:42 +03:00
*
2015-02-10 10:39:36 +02:00
* Return : always 0
2011-05-15 13:43:42 +03:00
*/
2017-03-20 15:04:03 +02:00
static int mei_cl_irq_read_msg ( struct mei_cl * cl ,
struct mei_msg_hdr * mei_hdr ,
2020-08-18 14:51:38 +03:00
struct mei_ext_meta_hdr * meta ,
2017-03-20 15:04:03 +02:00
struct list_head * cmpl_list )
2011-05-15 13:43:42 +03:00
{
2015-02-10 10:39:36 +02:00
struct mei_device * dev = cl - > dev ;
struct mei_cl_cb * cb ;
2020-08-18 14:51:38 +03:00
2022-09-27 17:41:31 -07:00
struct mei_ext_hdr_vtag * vtag_hdr = NULL ;
struct mei_ext_hdr_gsc_f2h * gsc_f2h = NULL ;
2016-02-07 23:35:19 +02:00
size_t buf_sz ;
2018-11-22 13:11:39 +02:00
u32 length ;
2022-09-27 17:41:31 -07:00
u32 ext_len ;
2020-08-18 14:51:38 +03:00
length = mei_hdr - > length ;
ext_len = 0 ;
if ( mei_hdr - > extended ) {
ext_len = sizeof ( * meta ) + mei_slots2data ( meta - > size ) ;
length - = ext_len ;
}
2011-05-15 13:43:42 +03:00
2015-02-10 10:39:46 +02:00
cb = list_first_entry_or_null ( & cl - > rd_pending , struct mei_cl_cb , list ) ;
if ( ! cb ) {
2016-06-16 17:58:58 +03:00
if ( ! mei_cl_is_fixed_address ( cl ) ) {
cl_err ( dev , cl , " pending read cb not found \n " ) ;
2016-07-26 01:06:01 +03:00
goto discard ;
2016-06-16 17:58:58 +03:00
}
cb = mei_cl_alloc_cb ( cl , mei_cl_mtu ( cl ) , MEI_FOP_READ , cl - > fp ) ;
if ( ! cb )
2016-07-26 01:06:01 +03:00
goto discard ;
2016-06-16 17:58:58 +03:00
list_add_tail ( & cb - > list , & cl - > rd_pending ) ;
2015-02-10 10:39:36 +02:00
}
2013-04-19 22:01:34 +03:00
2020-08-18 14:51:38 +03:00
if ( mei_hdr - > extended ) {
2022-09-27 17:41:31 -07:00
struct mei_ext_hdr * ext = mei_ext_begin ( meta ) ;
2020-08-18 14:51:38 +03:00
do {
switch ( ext - > type ) {
case MEI_EXT_HDR_VTAG :
2021-06-21 22:37:56 +03:00
vtag_hdr = ( struct mei_ext_hdr_vtag * ) ext ;
2020-08-18 14:51:38 +03:00
break ;
2022-09-27 17:41:31 -07:00
case MEI_EXT_HDR_GSC :
gsc_f2h = ( struct mei_ext_hdr_gsc_f2h * ) ext ;
cb - > ext_hdr = kzalloc ( sizeof ( * gsc_f2h ) , GFP_KERNEL ) ;
if ( ! cb - > ext_hdr ) {
cb - > status = - ENOMEM ;
goto discard ;
}
break ;
2020-08-18 14:51:38 +03:00
case MEI_EXT_HDR_NONE :
fallthrough ;
default :
2022-09-27 17:41:31 -07:00
cl_err ( dev , cl , " unknown extended header \n " ) ;
2020-08-18 14:51:38 +03:00
cb - > status = - EPROTO ;
break ;
}
ext = mei_ext_next ( ext ) ;
} while ( ! mei_ext_last ( meta , ext ) ) ;
2022-09-27 17:41:31 -07:00
if ( ! vtag_hdr & & ! gsc_f2h ) {
cl_dbg ( dev , cl , " no vtag or gsc found in extended header. \n " ) ;
2020-08-18 14:51:38 +03:00
cb - > status = - EPROTO ;
goto discard ;
}
2022-09-27 17:41:31 -07:00
}
2020-08-18 14:51:38 +03:00
2022-09-27 17:41:31 -07:00
if ( vtag_hdr ) {
2021-06-21 22:37:56 +03:00
cl_dbg ( dev , cl , " vtag: %d \n " , vtag_hdr - > vtag ) ;
if ( cb - > vtag & & cb - > vtag ! = vtag_hdr - > vtag ) {
2020-08-18 14:51:38 +03:00
cl_err ( dev , cl , " mismatched tag: %d != %d \n " ,
2021-06-21 22:37:56 +03:00
cb - > vtag , vtag_hdr - > vtag ) ;
2020-08-18 14:51:38 +03:00
cb - > status = - EPROTO ;
goto discard ;
}
2021-06-21 22:37:56 +03:00
cb - > vtag = vtag_hdr - > vtag ;
2020-08-18 14:51:38 +03:00
}
2022-09-27 17:41:31 -07:00
if ( gsc_f2h ) {
u32 ext_hdr_len = mei_ext_hdr_len ( & gsc_f2h - > hdr ) ;
if ( ! dev - > hbm_f_gsc_supported ) {
cl_err ( dev , cl , " gsc extended header is not supported \n " ) ;
cb - > status = - EPROTO ;
goto discard ;
}
if ( length ) {
cl_err ( dev , cl , " no data allowed in cb with gsc \n " ) ;
cb - > status = - EPROTO ;
goto discard ;
}
if ( ext_hdr_len > sizeof ( * gsc_f2h ) ) {
cl_err ( dev , cl , " gsc extended header is too big %u \n " , ext_hdr_len ) ;
cb - > status = - EPROTO ;
goto discard ;
}
memcpy ( cb - > ext_hdr , gsc_f2h , ext_hdr_len ) ;
}
2015-03-27 00:27:58 +02:00
if ( ! mei_cl_is_connected ( cl ) ) {
2015-02-10 10:39:46 +02:00
cl_dbg ( dev , cl , " not connected \n " ) ;
cb - > status = - ENODEV ;
2016-07-26 01:06:01 +03:00
goto discard ;
2015-02-10 10:39:36 +02:00
}
2013-04-19 22:01:34 +03:00
2020-08-18 14:51:38 +03:00
if ( mei_hdr - > dma_ring )
length = mei_hdr - > extension [ mei_data2slots ( ext_len ) ] ;
2018-11-22 13:11:39 +02:00
buf_sz = length + cb - > buf_idx ;
2016-02-07 23:35:19 +02:00
/* catch for integer overflow */
if ( buf_sz < cb - > buf_idx ) {
2016-02-17 18:27:34 +02:00
cl_err ( dev , cl , " message is too big len %d idx %zu \n " ,
2018-11-22 13:11:39 +02:00
length , cb - > buf_idx ) ;
2016-02-07 23:35:19 +02:00
cb - > status = - EMSGSIZE ;
2016-07-26 01:06:01 +03:00
goto discard ;
2016-02-07 23:35:19 +02:00
}
if ( cb - > buf . size < buf_sz ) {
2016-02-17 18:27:34 +02:00
cl_dbg ( dev , cl , " message overflow. size %zu len %d idx %zu \n " ,
2018-11-22 13:11:39 +02:00
cb - > buf . size , length , cb - > buf_idx ) ;
2016-07-26 01:06:01 +03:00
cb - > status = - EMSGSIZE ;
goto discard ;
2011-05-15 13:43:42 +03:00
}
2020-08-18 14:51:38 +03:00
if ( mei_hdr - > dma_ring ) {
2018-11-22 13:11:39 +02:00
mei_dma_ring_read ( dev , cb - > buf . data + cb - > buf_idx , length ) ;
2020-08-18 14:51:38 +03:00
/* for DMA read 0 length to generate interrupt to the device */
mei_read_slots ( dev , cb - > buf . data + cb - > buf_idx , 0 ) ;
} else {
mei_read_slots ( dev , cb - > buf . data + cb - > buf_idx , length ) ;
}
2015-02-10 10:39:36 +02:00
2018-11-22 13:11:39 +02:00
cb - > buf_idx + = length ;
2015-02-10 10:39:41 +02:00
2015-02-10 10:39:36 +02:00
if ( mei_hdr - > msg_complete ) {
2016-02-17 18:27:34 +02:00
cl_dbg ( dev , cl , " completed read length = %zu \n " , cb - > buf_idx ) ;
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2015-10-13 15:02:38 +03:00
} else {
pm_runtime_mark_last_busy ( dev - > dev ) ;
pm_request_autosuspend ( dev - > dev ) ;
2015-02-10 10:39:36 +02:00
}
2016-07-26 01:06:01 +03:00
return 0 ;
2011-05-15 13:43:42 +03:00
2016-07-26 01:06:01 +03:00
discard :
2016-11-11 03:00:07 +02:00
if ( cb )
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2020-08-18 14:51:38 +03:00
mei_irq_discard_msg ( dev , mei_hdr , length ) ;
2011-05-15 13:43:42 +03:00
return 0 ;
}
2014-02-12 21:41:52 +02:00
/**
* mei_cl_irq_disconnect_rsp - send disconnection response message
*
* @ cl : client
* @ cb : callback block .
* @ cmpl_list : complete list .
*
2014-09-29 16:31:49 +03:00
* Return : 0 , OK ; otherwise , error .
2014-02-12 21:41:52 +02:00
*/
static int mei_cl_irq_disconnect_rsp ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2017-01-27 16:32:45 +02:00
struct list_head * cmpl_list )
2014-02-12 21:41:52 +02:00
{
struct mei_device * dev = cl - > dev ;
2014-02-19 17:35:48 +02:00
u32 msg_slots ;
int slots ;
2014-02-12 21:41:52 +02:00
int ret ;
2018-07-31 09:35:33 +03:00
msg_slots = mei_hbm2slots ( sizeof ( struct hbm_client_connect_response ) ) ;
2018-07-12 17:10:08 +03:00
slots = mei_hbuf_empty_slots ( dev ) ;
if ( slots < 0 )
return - EOVERFLOW ;
2014-02-12 21:41:52 +02:00
2018-07-12 17:10:08 +03:00
if ( ( u32 ) slots < msg_slots )
2014-02-12 21:41:52 +02:00
return - EMSGSIZE ;
ret = mei_hbm_cl_disconnect_rsp ( dev , cl ) ;
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2014-02-12 21:41:52 +02:00
return ret ;
}
2011-05-15 13:43:42 +03:00
/**
2014-09-29 16:31:50 +03:00
* mei_cl_irq_read - processes client read related operation from the
2013-05-12 15:34:46 +03:00
* interrupt thread context - request for flow control credits
2011-05-15 13:43:42 +03:00
*
2013-05-12 15:34:46 +03:00
* @ cl : client
* @ cb : callback block .
2011-05-15 13:43:42 +03:00
* @ cmpl_list : complete list .
*
2014-09-29 16:31:49 +03:00
* Return : 0 , OK ; otherwise , error .
2011-05-15 13:43:42 +03:00
*/
2013-05-12 15:34:46 +03:00
static int mei_cl_irq_read ( struct mei_cl * cl , struct mei_cl_cb * cb ,
2017-01-27 16:32:45 +02:00
struct list_head * cmpl_list )
2011-05-15 13:43:42 +03:00
{
2013-05-12 15:34:46 +03:00
struct mei_device * dev = cl - > dev ;
2014-02-19 17:35:48 +02:00
u32 msg_slots ;
int slots ;
2013-09-16 23:44:43 +03:00
int ret ;
2016-07-26 01:06:06 +03:00
if ( ! list_empty ( & cl - > rd_pending ) )
return 0 ;
2018-07-31 09:35:33 +03:00
msg_slots = mei_hbm2slots ( sizeof ( struct hbm_flow_control ) ) ;
2014-02-19 17:35:48 +02:00
slots = mei_hbuf_empty_slots ( dev ) ;
2018-07-12 17:10:08 +03:00
if ( slots < 0 )
return - EOVERFLOW ;
2013-09-16 23:44:43 +03:00
2018-07-12 17:10:08 +03:00
if ( ( u32 ) slots < msg_slots )
2013-03-11 18:27:02 +02:00
return - EMSGSIZE ;
2012-07-04 19:24:52 +03:00
2013-09-16 23:44:43 +03:00
ret = mei_hbm_cl_flow_control_req ( dev , cl ) ;
if ( ret ) {
cl - > status = ret ;
2013-05-12 15:34:46 +03:00
cb - > buf_idx = 0 ;
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2013-09-16 23:44:43 +03:00
return ret ;
2012-03-14 14:39:42 +02:00
}
2013-09-16 23:44:43 +03:00
2021-05-26 22:33:34 +03:00
pm_runtime_mark_last_busy ( dev - > dev ) ;
pm_request_autosuspend ( dev - > dev ) ;
2015-02-10 10:39:46 +02:00
list_move_tail ( & cb - > list , & cl - > rd_pending ) ;
2012-03-14 14:39:42 +02:00
2011-05-15 13:43:42 +03:00
return 0 ;
}
2016-02-07 23:35:36 +02:00
static inline bool hdr_is_hbm ( struct mei_msg_hdr * mei_hdr )
{
return mei_hdr - > host_addr = = 0 & & mei_hdr - > me_addr = = 0 ;
}
static inline bool hdr_is_fixed ( struct mei_msg_hdr * mei_hdr )
{
return mei_hdr - > host_addr = = 0 & & mei_hdr - > me_addr ! = 0 ;
}
2017-06-12 12:15:55 +03:00
static inline int hdr_is_valid ( u32 msg_hdr )
{
struct mei_msg_hdr * mei_hdr ;
2021-01-29 14:07:46 +02:00
u32 expected_len = 0 ;
2017-06-12 12:15:55 +03:00
mei_hdr = ( struct mei_msg_hdr * ) & msg_hdr ;
if ( ! msg_hdr | | mei_hdr - > reserved )
return - EBADMSG ;
2021-01-29 14:07:46 +02:00
if ( mei_hdr - > dma_ring )
expected_len + = MEI_SLOT_SIZE ;
if ( mei_hdr - > extended )
expected_len + = MEI_SLOT_SIZE ;
if ( mei_hdr - > length < expected_len )
2018-11-22 13:11:39 +02:00
return - EBADMSG ;
2017-06-12 12:15:55 +03:00
return 0 ;
}
2011-05-15 13:43:42 +03:00
/**
2013-04-05 01:05:05 +09:00
* mei_irq_read_handler - bottom half read routine after ISR to
2011-05-15 13:43:42 +03:00
* handle the read processing .
*
* @ dev : the device structure
2013-02-06 14:06:42 +02:00
* @ cmpl_list : An instance of our list structure
2011-05-15 13:43:42 +03:00
* @ slots : slots to read .
*
2014-09-29 16:31:49 +03:00
* Return : 0 on success , < 0 on failure .
2011-05-15 13:43:42 +03:00
*/
2013-02-06 14:06:42 +02:00
int mei_irq_read_handler ( struct mei_device * dev ,
2017-01-27 16:32:45 +02:00
struct list_head * cmpl_list , s32 * slots )
2011-05-15 13:43:42 +03:00
{
struct mei_msg_hdr * mei_hdr ;
2020-08-18 14:51:38 +03:00
struct mei_ext_meta_hdr * meta_hdr = NULL ;
2013-11-11 13:26:08 +02:00
struct mei_cl * cl ;
int ret ;
2021-01-29 14:07:46 +02:00
u32 hdr_size_left ;
u32 hdr_size_ext ;
2020-08-18 14:51:38 +03:00
int i ;
int ext_hdr_end ;
2011-05-15 13:43:42 +03:00
2018-11-22 13:11:39 +02:00
if ( ! dev - > rd_msg_hdr [ 0 ] ) {
dev - > rd_msg_hdr [ 0 ] = mei_read_hdr ( dev ) ;
2020-08-18 14:51:38 +03:00
dev - > rd_msg_hdr_count = 1 ;
2011-05-15 13:43:42 +03:00
( * slots ) - - ;
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " slots =%08x. \n " , * slots ) ;
2011-05-15 13:43:42 +03:00
2018-11-22 13:11:39 +02:00
ret = hdr_is_valid ( dev - > rd_msg_hdr [ 0 ] ) ;
2017-06-12 12:15:55 +03:00
if ( ret ) {
dev_err ( dev - > dev , " corrupted message header 0x%08X \n " ,
2018-11-22 13:11:39 +02:00
dev - > rd_msg_hdr [ 0 ] ) ;
2017-06-12 12:15:55 +03:00
goto end ;
}
2011-05-15 13:43:42 +03:00
}
2018-11-22 13:11:39 +02:00
mei_hdr = ( struct mei_msg_hdr * ) dev - > rd_msg_hdr ;
2017-06-12 12:15:55 +03:00
dev_dbg ( dev - > dev , MEI_HDR_FMT , MEI_HDR_PRM ( mei_hdr ) ) ;
2013-11-11 13:26:08 +02:00
if ( mei_slots2data ( * slots ) < mei_hdr - > length ) {
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " less data available than length=%08x. \n " ,
2011-05-15 13:43:42 +03:00
* slots ) ;
/* we can't read the message */
2014-03-03 00:21:28 +02:00
ret = - ENODATA ;
2011-05-15 13:43:42 +03:00
goto end ;
}
2020-08-18 14:51:38 +03:00
ext_hdr_end = 1 ;
2021-01-29 14:07:46 +02:00
hdr_size_left = mei_hdr - > length ;
2020-08-18 14:51:38 +03:00
if ( mei_hdr - > extended ) {
if ( ! dev - > rd_msg_hdr [ 1 ] ) {
2021-06-21 22:37:56 +03:00
dev - > rd_msg_hdr [ 1 ] = mei_read_hdr ( dev ) ;
2020-08-18 14:51:38 +03:00
dev - > rd_msg_hdr_count + + ;
( * slots ) - - ;
2021-06-21 22:37:56 +03:00
dev_dbg ( dev - > dev , " extended header is %08x \n " , dev - > rd_msg_hdr [ 1 ] ) ;
2020-08-18 14:51:38 +03:00
}
2021-06-21 22:37:56 +03:00
meta_hdr = ( ( struct mei_ext_meta_hdr * ) & dev - > rd_msg_hdr [ 1 ] ) ;
2021-01-29 14:07:46 +02:00
if ( check_add_overflow ( ( u32 ) sizeof ( * meta_hdr ) ,
mei_slots2data ( meta_hdr - > size ) ,
& hdr_size_ext ) ) {
dev_err ( dev - > dev , " extended message size too big %d \n " ,
meta_hdr - > size ) ;
return - EBADMSG ;
}
if ( hdr_size_left < hdr_size_ext ) {
dev_err ( dev - > dev , " corrupted message header len %d \n " ,
mei_hdr - > length ) ;
return - EBADMSG ;
}
hdr_size_left - = hdr_size_ext ;
2020-08-18 14:51:38 +03:00
ext_hdr_end = meta_hdr - > size + 2 ;
for ( i = dev - > rd_msg_hdr_count ; i < ext_hdr_end ; i + + ) {
dev - > rd_msg_hdr [ i ] = mei_read_hdr ( dev ) ;
dev_dbg ( dev - > dev , " extended header %d is %08x \n " , i ,
dev - > rd_msg_hdr [ i ] ) ;
dev - > rd_msg_hdr_count + + ;
( * slots ) - - ;
}
}
2018-11-22 13:11:39 +02:00
if ( mei_hdr - > dma_ring ) {
2021-01-29 14:07:46 +02:00
if ( hdr_size_left ! = sizeof ( dev - > rd_msg_hdr [ ext_hdr_end ] ) ) {
dev_err ( dev - > dev , " corrupted message header len %d \n " ,
mei_hdr - > length ) ;
return - EBADMSG ;
}
2020-08-18 14:51:38 +03:00
dev - > rd_msg_hdr [ ext_hdr_end ] = mei_read_hdr ( dev ) ;
dev - > rd_msg_hdr_count + + ;
2018-11-22 13:11:39 +02:00
( * slots ) - - ;
2020-08-18 14:51:38 +03:00
mei_hdr - > length - = sizeof ( dev - > rd_msg_hdr [ ext_hdr_end ] ) ;
2018-11-22 13:11:39 +02:00
}
2013-11-11 13:26:08 +02:00
/* HBM message */
2016-02-07 23:35:36 +02:00
if ( hdr_is_hbm ( mei_hdr ) ) {
2014-01-08 20:19:21 +02:00
ret = mei_hbm_dispatch ( dev , mei_hdr ) ;
if ( ret ) {
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " mei_hbm_dispatch failed ret = %d \n " ,
2014-01-08 20:19:21 +02:00
ret ) ;
goto end ;
}
2013-11-11 13:26:08 +02:00
goto reset_slots ;
}
2012-12-25 19:06:00 +02:00
2014-01-08 22:31:46 +02:00
/* find recipient cl */
2013-11-11 13:26:08 +02:00
list_for_each_entry ( cl , & dev - > file_list , link ) {
if ( mei_cl_hbm_equal ( cl , mei_hdr ) ) {
cl_dbg ( dev , cl , " got a message \n " ) ;
2022-03-08 11:59:26 +02:00
ret = mei_cl_irq_read_msg ( cl , mei_hdr , meta_hdr , cmpl_list ) ;
goto reset_slots ;
2013-11-11 13:26:08 +02:00
}
}
2014-01-08 22:31:46 +02:00
/* if no recipient cl was found we assume corrupted header */
2022-03-08 11:59:26 +02:00
/* A message for not connected fixed address clients
* should be silently discarded
* On power down client may be force cleaned ,
* silently discard such messages
*/
if ( hdr_is_fixed ( mei_hdr ) | |
dev - > dev_state = = MEI_DEV_POWER_DOWN ) {
mei_irq_discard_msg ( dev , mei_hdr , mei_hdr - > length ) ;
ret = 0 ;
goto reset_slots ;
2013-11-11 13:26:08 +02:00
}
2022-03-08 11:59:26 +02:00
dev_err ( dev - > dev , " no destination client found 0x%08X \n " , dev - > rd_msg_hdr [ 0 ] ) ;
ret = - EBADMSG ;
goto end ;
2015-02-10 10:39:36 +02:00
2013-11-11 13:26:08 +02:00
reset_slots :
2011-05-15 13:43:42 +03:00
/* reset the number of slots and header */
2018-11-22 13:11:39 +02:00
memset ( dev - > rd_msg_hdr , 0 , sizeof ( dev - > rd_msg_hdr ) ) ;
2020-08-18 14:51:38 +03:00
dev - > rd_msg_hdr_count = 0 ;
2011-05-15 13:43:42 +03:00
* slots = mei_count_full_read_slots ( dev ) ;
if ( * slots = = - EOVERFLOW ) {
/* overflow - reset */
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " resetting due to slots overflow. \n " ) ;
2011-05-15 13:43:42 +03:00
/* set the event since message has been read */
ret = - ERANGE ;
goto end ;
}
end :
return ret ;
}
2013-03-27 16:58:30 +02:00
EXPORT_SYMBOL_GPL ( mei_irq_read_handler ) ;
2011-05-15 13:43:42 +03:00
/**
2013-02-06 14:06:42 +02:00
* mei_irq_write_handler - dispatch write requests
* after irq received
2011-05-15 13:43:42 +03:00
*
* @ dev : the device structure
2012-11-18 15:13:18 +02:00
* @ cmpl_list : An instance of our list structure
2011-05-15 13:43:42 +03:00
*
2014-09-29 16:31:49 +03:00
* Return : 0 on success , < 0 on failure .
2011-05-15 13:43:42 +03:00
*/
2017-01-27 16:32:45 +02:00
int mei_irq_write_handler ( struct mei_device * dev , struct list_head * cmpl_list )
2011-05-15 13:43:42 +03:00
{
struct mei_cl * cl ;
2013-05-12 15:34:46 +03:00
struct mei_cl_cb * cb , * next ;
2012-11-18 15:13:18 +02:00
s32 slots ;
2011-05-15 13:43:42 +03:00
int ret ;
2014-02-19 17:35:47 +02:00
if ( ! mei_hbuf_acquire ( dev ) )
2011-05-15 13:43:42 +03:00
return 0 ;
2014-02-19 17:35:47 +02:00
2012-11-18 15:13:18 +02:00
slots = mei_hbuf_empty_slots ( dev ) ;
2018-07-12 17:10:08 +03:00
if ( slots < 0 )
return - EOVERFLOW ;
if ( slots = = 0 )
2012-06-19 09:13:36 +03:00
return - EMSGSIZE ;
2011-05-15 13:43:42 +03:00
/* complete all waiting for write CB */
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " complete all waiting for write cb. \n " ) ;
2011-05-15 13:43:42 +03:00
2017-01-27 16:32:45 +02:00
list_for_each_entry_safe ( cb , next , & dev - > write_waiting_list , list ) {
2013-05-12 15:34:46 +03:00
cl = cb - > cl ;
2011-11-27 21:43:34 +02:00
cl - > status = 0 ;
2015-02-10 10:39:39 +02:00
cl_dbg ( dev , cl , " MEI WRITE COMPLETE \n " ) ;
cl - > writing_state = MEI_WRITE_COMPLETE ;
2017-01-27 16:32:45 +02:00
list_move_tail ( & cb - > list , cmpl_list ) ;
2011-05-15 13:43:42 +03:00
}
/* complete control write list CB */
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " complete control write list cb. \n " ) ;
2017-01-27 16:32:45 +02:00
list_for_each_entry_safe ( cb , next , & dev - > ctrl_wr_list , list ) {
2013-05-12 15:34:46 +03:00
cl = cb - > cl ;
switch ( cb - > fop_type ) {
2014-08-21 14:29:17 +03:00
case MEI_FOP_DISCONNECT :
2011-11-27 21:43:33 +02:00
/* send disconnect message */
2014-08-21 14:29:17 +03:00
ret = mei_cl_irq_disconnect ( cl , cb , cmpl_list ) ;
2011-11-27 21:43:33 +02:00
if ( ret )
return ret ;
2011-05-15 13:43:42 +03:00
2011-11-27 21:43:33 +02:00
break ;
2012-11-11 17:38:00 +02:00
case MEI_FOP_READ :
2011-11-27 21:43:33 +02:00
/* send flow control message */
2014-02-19 17:35:48 +02:00
ret = mei_cl_irq_read ( cl , cb , cmpl_list ) ;
2011-11-27 21:43:33 +02:00
if ( ret )
return ret ;
2011-05-15 13:43:42 +03:00
2011-11-27 21:43:33 +02:00
break ;
2014-02-12 21:41:51 +02:00
case MEI_FOP_CONNECT :
2011-11-27 21:43:33 +02:00
/* connect message */
2014-02-19 17:35:48 +02:00
ret = mei_cl_irq_connect ( cl , cb , cmpl_list ) ;
2011-11-27 21:43:33 +02:00
if ( ret )
return ret ;
2011-05-15 13:43:42 +03:00
2011-11-27 21:43:33 +02:00
break ;
2014-02-12 21:41:52 +02:00
case MEI_FOP_DISCONNECT_RSP :
/* send disconnect resp */
2014-02-19 17:35:48 +02:00
ret = mei_cl_irq_disconnect_rsp ( cl , cb , cmpl_list ) ;
2014-02-12 21:41:52 +02:00
if ( ret )
return ret ;
2014-03-06 23:53:53 +02:00
break ;
2015-07-26 09:54:18 +03:00
case MEI_FOP_NOTIFY_START :
case MEI_FOP_NOTIFY_STOP :
ret = mei_cl_irq_notify ( cl , cb , cmpl_list ) ;
if ( ret )
return ret ;
break ;
2021-02-06 16:43:24 +02:00
case MEI_FOP_DMA_MAP :
ret = mei_cl_irq_dma_map ( cl , cb , cmpl_list ) ;
if ( ret )
return ret ;
break ;
case MEI_FOP_DMA_UNMAP :
ret = mei_cl_irq_dma_unmap ( cl , cb , cmpl_list ) ;
if ( ret )
return ret ;
break ;
2011-11-27 21:43:33 +02:00
default :
BUG ( ) ;
2011-05-15 13:43:42 +03:00
}
2011-11-27 21:43:33 +02:00
2011-05-15 13:43:42 +03:00
}
/* complete write list CB */
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " complete write list cb. \n " ) ;
2017-01-27 16:32:45 +02:00
list_for_each_entry_safe ( cb , next , & dev - > write_list , list ) {
2013-05-12 15:34:46 +03:00
cl = cb - > cl ;
2017-03-20 15:04:03 +02:00
ret = mei_cl_irq_write ( cl , cb , cmpl_list ) ;
2012-11-18 15:13:19 +02:00
if ( ret )
return ret ;
2011-05-15 13:43:42 +03:00
}
return 0 ;
}
2013-03-27 16:58:30 +02:00
EXPORT_SYMBOL_GPL ( mei_irq_write_handler ) ;
2011-05-15 13:43:42 +03:00
2015-07-23 21:37:13 +03:00
/**
* mei_connect_timeout - connect / disconnect timeouts
*
* @ cl : host client
*/
static void mei_connect_timeout ( struct mei_cl * cl )
{
struct mei_device * dev = cl - > dev ;
if ( cl - > state = = MEI_FILE_CONNECTING ) {
if ( dev - > hbm_f_dot_supported ) {
cl - > state = MEI_FILE_DISCONNECT_REQUIRED ;
wake_up ( & cl - > wait ) ;
return ;
}
}
mei_reset ( dev ) ;
}
2011-05-15 13:43:42 +03:00
2016-09-25 13:25:31 +03:00
# define MEI_STALL_TIMER_FREQ (2 * HZ)
/**
* mei_schedule_stall_timer - re - arm stall_timer work
*
* @ dev : the device structure
2023-10-11 19:48:44 -07:00
*
* Schedule stall timer
2016-09-25 13:25:31 +03:00
*/
void mei_schedule_stall_timer ( struct mei_device * dev )
{
schedule_delayed_work ( & dev - > timer_work , MEI_STALL_TIMER_FREQ ) ;
}
2011-05-15 13:43:42 +03:00
/**
* mei_timer - timer function .
*
* @ work : pointer to the work_struct structure
*
*/
2011-09-07 09:03:13 +03:00
void mei_timer ( struct work_struct * work )
2011-05-15 13:43:42 +03:00
{
2014-02-17 15:13:25 +02:00
struct mei_cl * cl ;
2011-05-15 13:43:42 +03:00
struct mei_device * dev = container_of ( work ,
2011-09-07 09:03:13 +03:00
struct mei_device , timer_work . work ) ;
2016-09-25 13:25:31 +03:00
bool reschedule_timer = false ;
2011-05-15 13:43:42 +03:00
mutex_lock ( & dev - > device_lock ) ;
2014-01-08 20:19:22 +02:00
/* Catch interrupt stalls during HBM init handshake */
if ( dev - > dev_state = = MEI_DEV_INIT_CLIENTS & &
dev - > hbm_state ! = MEI_HBM_IDLE ) {
if ( dev - > init_clients_timer ) {
if ( - - dev - > init_clients_timer = = 0 ) {
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " timer: init clients timeout hbm_state = %d. \n " ,
2014-01-08 20:19:22 +02:00
dev - > hbm_state ) ;
2014-01-12 00:36:09 +02:00
mei_reset ( dev ) ;
2014-01-08 20:19:22 +02:00
goto out ;
2011-05-15 13:43:42 +03:00
}
2016-09-25 13:25:31 +03:00
reschedule_timer = true ;
2011-05-15 13:43:42 +03:00
}
}
2014-01-08 20:19:22 +02:00
if ( dev - > dev_state ! = MEI_DEV_ENABLED )
goto out ;
2011-05-15 13:43:42 +03:00
/*** connect/disconnect timeouts ***/
2014-02-17 15:13:25 +02:00
list_for_each_entry ( cl , & dev - > file_list , link ) {
if ( cl - > timer_count ) {
if ( - - cl - > timer_count = = 0 ) {
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " timer: connect/disconnect timeout. \n " ) ;
2015-07-23 21:37:13 +03:00
mei_connect_timeout ( cl ) ;
2011-05-15 13:43:42 +03:00
goto out ;
}
2016-09-25 13:25:31 +03:00
reschedule_timer = true ;
2011-05-15 13:43:42 +03:00
}
}
out :
2016-09-25 13:25:31 +03:00
if ( dev - > dev_state ! = MEI_DEV_DISABLED & & reschedule_timer )
mei_schedule_stall_timer ( dev ) ;
2011-12-13 23:39:34 +02:00
mutex_unlock ( & dev - > device_lock ) ;
2011-05-15 13:43:42 +03:00
}