2011-05-15 13:43:43 +03:00
/*
*
* Intel Management Engine Interface ( Intel MEI ) Linux driver
2012-02-09 19:25:53 +02:00
* Copyright ( c ) 2003 - 2012 , Intel Corporation .
2011-05-15 13:43:43 +03:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
*/
# include <linux/pci.h>
2013-02-06 14:06:42 +02:00
# include <linux/kthread.h>
# include <linux/interrupt.h>
2016-06-16 17:58:52 +03:00
# include <linux/pm_runtime.h>
2018-07-31 09:35:37 +03:00
# include <linux/sizes.h>
2012-12-25 19:06:03 +02:00
# include "mei_dev.h"
2013-02-06 14:06:42 +02:00
# include "hbm.h"
2014-03-11 14:49:23 +02:00
# include "hw-me.h"
# include "hw-me-regs.h"
2013-02-06 14:06:42 +02:00
2015-02-10 10:39:33 +02:00
# include "mei-trace.h"
2012-12-25 19:06:06 +02:00
/**
2013-03-27 16:58:29 +02:00
* mei_me_reg_read - Reads 32 bit data from the mei device
2012-12-25 19:06:06 +02:00
*
2014-09-29 16:31:49 +03:00
* @ hw : the me hardware structure
2012-12-25 19:06:06 +02:00
* @ offset : offset from which to read the data
*
2014-09-29 16:31:49 +03:00
* Return : register value ( u32 )
2012-12-25 19:06:06 +02:00
*/
2013-03-27 16:58:29 +02:00
static inline u32 mei_me_reg_read ( const struct mei_me_hw * hw ,
2012-12-25 19:06:06 +02:00
unsigned long offset )
{
2013-02-06 14:06:40 +02:00
return ioread32 ( hw - > mem_addr + offset ) ;
2012-12-25 19:06:06 +02:00
}
/**
2013-03-27 16:58:29 +02:00
* mei_me_reg_write - Writes 32 bit data to the mei device
2012-12-25 19:06:06 +02:00
*
2014-09-29 16:31:49 +03:00
* @ hw : the me hardware structure
2012-12-25 19:06:06 +02:00
* @ offset : offset from which to write the data
* @ value : register value to write ( u32 )
*/
2013-03-27 16:58:29 +02:00
static inline void mei_me_reg_write ( const struct mei_me_hw * hw ,
2012-12-25 19:06:06 +02:00
unsigned long offset , u32 value )
{
2013-02-06 14:06:40 +02:00
iowrite32 ( value , hw - > mem_addr + offset ) ;
2012-12-25 19:06:06 +02:00
}
2011-05-15 13:43:43 +03:00
2012-12-25 19:06:06 +02:00
/**
2013-03-27 16:58:29 +02:00
* mei_me_mecbrw_read - Reads 32 bit data from ME circular buffer
2013-01-08 23:07:24 +02:00
* read window register
2012-12-25 19:06:06 +02:00
*
* @ dev : the device structure
*
2014-09-29 16:31:49 +03:00
* Return : ME_CB_RW register value ( u32 )
2012-12-25 19:06:06 +02:00
*/
2015-02-10 10:39:32 +02:00
static inline u32 mei_me_mecbrw_read ( const struct mei_device * dev )
2012-12-25 19:06:06 +02:00
{
2013-03-27 16:58:29 +02:00
return mei_me_reg_read ( to_me_hw ( dev ) , ME_CB_RW ) ;
2012-12-25 19:06:06 +02:00
}
2015-02-10 10:39:32 +02:00
/**
* mei_me_hcbww_write - write 32 bit data to the host circular buffer
*
* @ dev : the device structure
* @ data : 32 bit data to be written to the host circular buffer
*/
static inline void mei_me_hcbww_write ( struct mei_device * dev , u32 data )
{
mei_me_reg_write ( to_me_hw ( dev ) , H_CB_WW , data ) ;
}
2012-12-25 19:06:06 +02:00
/**
2013-03-27 16:58:29 +02:00
* mei_me_mecsr_read - Reads 32 bit data from the ME CSR
2012-12-25 19:06:06 +02:00
*
2015-02-10 10:39:32 +02:00
* @ dev : the device structure
2012-12-25 19:06:06 +02:00
*
2014-09-29 16:31:49 +03:00
* Return : ME_CSR_HA register value ( u32 )
2012-12-25 19:06:06 +02:00
*/
2015-02-10 10:39:32 +02:00
static inline u32 mei_me_mecsr_read ( const struct mei_device * dev )
2012-12-25 19:06:06 +02:00
{
2015-02-10 10:39:33 +02:00
u32 reg ;
reg = mei_me_reg_read ( to_me_hw ( dev ) , ME_CSR_HA ) ;
trace_mei_reg_read ( dev - > dev , " ME_CSR_HA " , ME_CSR_HA , reg ) ;
return reg ;
2012-12-25 19:06:06 +02:00
}
2011-05-15 13:43:43 +03:00
/**
2013-01-08 23:07:24 +02:00
* mei_hcsr_read - Reads 32 bit data from the host CSR
*
2015-02-10 10:39:32 +02:00
* @ dev : the device structure
2013-01-08 23:07:24 +02:00
*
2014-09-29 16:31:49 +03:00
* Return : H_CSR register value ( u32 )
2013-01-08 23:07:24 +02:00
*/
2015-02-10 10:39:32 +02:00
static inline u32 mei_hcsr_read ( const struct mei_device * dev )
2013-01-08 23:07:24 +02:00
{
2015-02-10 10:39:33 +02:00
u32 reg ;
reg = mei_me_reg_read ( to_me_hw ( dev ) , H_CSR ) ;
trace_mei_reg_read ( dev - > dev , " H_CSR " , H_CSR , reg ) ;
return reg ;
2015-02-10 10:39:32 +02:00
}
/**
* mei_hcsr_write - writes H_CSR register to the mei device
*
* @ dev : the device structure
* @ reg : new register value
*/
static inline void mei_hcsr_write ( struct mei_device * dev , u32 reg )
{
2015-02-10 10:39:33 +02:00
trace_mei_reg_write ( dev - > dev , " H_CSR " , H_CSR , reg ) ;
2015-02-10 10:39:32 +02:00
mei_me_reg_write ( to_me_hw ( dev ) , H_CSR , reg ) ;
2013-01-08 23:07:24 +02:00
}
/**
* mei_hcsr_set - writes H_CSR register to the mei device ,
2011-05-15 13:43:43 +03:00
* and ignores the H_IS bit for it is write - one - to - zero .
*
2015-02-10 10:39:32 +02:00
* @ dev : the device structure
* @ reg : new register value
2011-05-15 13:43:43 +03:00
*/
2015-02-10 10:39:32 +02:00
static inline void mei_hcsr_set ( struct mei_device * dev , u32 reg )
2011-05-15 13:43:43 +03:00
{
2015-08-02 22:20:52 +03:00
reg & = ~ H_CSR_IS_MASK ;
2015-02-10 10:39:32 +02:00
mei_hcsr_write ( dev , reg ) ;
2011-05-15 13:43:43 +03:00
}
2017-02-02 11:26:53 +02:00
/**
* mei_hcsr_set_hig - set host interrupt ( set H_IG )
*
* @ dev : the device structure
*/
static inline void mei_hcsr_set_hig ( struct mei_device * dev )
{
u32 hcsr ;
hcsr = mei_hcsr_read ( dev ) | H_IG ;
mei_hcsr_set ( dev , hcsr ) ;
}
2015-08-02 22:20:54 +03:00
/**
* mei_me_d0i3c_read - Reads 32 bit data from the D0I3C register
*
* @ dev : the device structure
*
* Return : H_D0I3C register value ( u32 )
*/
static inline u32 mei_me_d0i3c_read ( const struct mei_device * dev )
{
u32 reg ;
reg = mei_me_reg_read ( to_me_hw ( dev ) , H_D0I3C ) ;
2015-09-18 00:11:52 +03:00
trace_mei_reg_read ( dev - > dev , " H_D0I3C " , H_D0I3C , reg ) ;
2015-08-02 22:20:54 +03:00
return reg ;
}
/**
* mei_me_d0i3c_write - writes H_D0I3C register to device
*
* @ dev : the device structure
* @ reg : new register value
*/
static inline void mei_me_d0i3c_write ( struct mei_device * dev , u32 reg )
{
2015-09-18 00:11:52 +03:00
trace_mei_reg_write ( dev - > dev , " H_D0I3C " , H_D0I3C , reg ) ;
2015-08-02 22:20:54 +03:00
mei_me_reg_write ( to_me_hw ( dev ) , H_D0I3C , reg ) ;
}
2014-09-29 16:31:43 +03:00
/**
* mei_me_fw_status - read fw status register from pci config space
*
* @ dev : mei device
* @ fw_status : fw status register values
2014-09-29 16:31:50 +03:00
*
* Return : 0 on success , error otherwise
2014-09-29 16:31:43 +03:00
*/
static int mei_me_fw_status ( struct mei_device * dev ,
struct mei_fw_status * fw_status )
{
struct pci_dev * pdev = to_pci_dev ( dev - > dev ) ;
2014-09-29 16:31:45 +03:00
struct mei_me_hw * hw = to_me_hw ( dev ) ;
const struct mei_fw_status * fw_src = & hw - > cfg - > fw_status ;
2014-09-29 16:31:43 +03:00
int ret ;
int i ;
if ( ! fw_status )
return - EINVAL ;
fw_status - > count = fw_src - > count ;
for ( i = 0 ; i < fw_src - > count & & i < MEI_FW_STATUS_MAX ; i + + ) {
2016-02-07 22:46:51 +02:00
ret = pci_read_config_dword ( pdev , fw_src - > status [ i ] ,
& fw_status - > status [ i ] ) ;
trace_mei_pci_cfg_read ( dev - > dev , " PCI_CFG_HSF_X " ,
fw_src - > status [ i ] ,
fw_status - > status [ i ] ) ;
2014-09-29 16:31:43 +03:00
if ( ret )
return ret ;
}
return 0 ;
}
2013-01-08 23:07:31 +02:00
/**
2013-04-05 01:05:05 +09:00
* mei_me_hw_config - configure hw dependent settings
2013-01-08 23:07:31 +02:00
*
* @ dev : mei device
*/
2013-02-06 14:06:41 +02:00
static void mei_me_hw_config ( struct mei_device * dev )
2013-01-08 23:07:31 +02:00
{
2015-08-02 22:20:51 +03:00
struct pci_dev * pdev = to_pci_dev ( dev - > dev ) ;
2014-03-18 22:52:00 +02:00
struct mei_me_hw * hw = to_me_hw ( dev ) ;
2015-08-02 22:20:51 +03:00
u32 hcsr , reg ;
2013-01-08 23:07:31 +02:00
/* Doesn't change in runtime */
2015-08-02 22:20:51 +03:00
hcsr = mei_hcsr_read ( dev ) ;
2018-07-23 13:21:23 +03:00
hw - > hbuf_depth = ( hcsr & H_CBD ) > > 24 ;
2014-03-18 22:52:00 +02:00
2015-08-02 22:20:51 +03:00
reg = 0 ;
pci_read_config_dword ( pdev , PCI_CFG_HFS_1 , & reg ) ;
2016-02-07 22:46:51 +02:00
trace_mei_pci_cfg_read ( dev - > dev , " PCI_CFG_HFS_1 " , PCI_CFG_HFS_1 , reg ) ;
2015-08-02 22:20:51 +03:00
hw - > d0i3_supported =
( ( reg & PCI_CFG_HFS_1_D0I3_MSK ) = = PCI_CFG_HFS_1_D0I3_MSK ) ;
2015-08-02 22:20:56 +03:00
hw - > pg_state = MEI_PG_OFF ;
if ( hw - > d0i3_supported ) {
reg = mei_me_d0i3c_read ( dev ) ;
if ( reg & H_D0I3C_I3 )
hw - > pg_state = MEI_PG_ON ;
}
2013-01-08 23:07:31 +02:00
}
2014-03-18 22:51:59 +02:00
/**
* mei_me_pg_state - translate internal pg state
* to the mei power gating state
*
2014-09-29 16:31:50 +03:00
* @ dev : mei device
*
* Return : MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
2014-03-18 22:51:59 +02:00
*/
static inline enum mei_pg_state mei_me_pg_state ( struct mei_device * dev )
{
2014-03-18 22:52:00 +02:00
struct mei_me_hw * hw = to_me_hw ( dev ) ;
2014-09-29 16:31:37 +03:00
2014-03-18 22:52:00 +02:00
return hw - > pg_state ;
2014-03-18 22:51:59 +02:00
}
2016-12-04 15:22:59 +02:00
static inline u32 me_intr_src ( u32 hcsr )
{
return hcsr & H_CSR_IS_MASK ;
}
/**
* me_intr_disable - disables mei device interrupts
* using supplied hcsr register value .
*
* @ dev : the device structure
* @ hcsr : supplied hcsr register value
*/
static inline void me_intr_disable ( struct mei_device * dev , u32 hcsr )
{
hcsr & = ~ H_CSR_IE_MASK ;
mei_hcsr_set ( dev , hcsr ) ;
}
/**
* mei_me_intr_clear - clear and stop interrupts
*
* @ dev : the device structure
* @ hcsr : supplied hcsr register value
*/
static inline void me_intr_clear ( struct mei_device * dev , u32 hcsr )
{
if ( me_intr_src ( hcsr ) )
mei_hcsr_write ( dev , hcsr ) ;
}
2011-05-15 13:43:43 +03:00
/**
2014-09-29 16:31:50 +03:00
* mei_me_intr_clear - clear and stop interrupts
2012-12-25 19:06:06 +02:00
*
* @ dev : the device structure
*/
2013-02-06 14:06:41 +02:00
static void mei_me_intr_clear ( struct mei_device * dev )
2012-12-25 19:06:06 +02:00
{
2015-02-10 10:39:32 +02:00
u32 hcsr = mei_hcsr_read ( dev ) ;
2014-09-29 16:31:37 +03:00
2016-12-04 15:22:59 +02:00
me_intr_clear ( dev , hcsr ) ;
2012-12-25 19:06:06 +02:00
}
/**
2013-02-06 14:06:41 +02:00
* mei_me_intr_enable - enables mei device interrupts
2011-05-15 13:43:43 +03:00
*
* @ dev : the device structure
*/
2013-02-06 14:06:41 +02:00
static void mei_me_intr_enable ( struct mei_device * dev )
2011-05-15 13:43:43 +03:00
{
2015-02-10 10:39:32 +02:00
u32 hcsr = mei_hcsr_read ( dev ) ;
2014-09-29 16:31:37 +03:00
2015-08-02 22:20:52 +03:00
hcsr | = H_CSR_IE_MASK ;
2015-02-10 10:39:32 +02:00
mei_hcsr_set ( dev , hcsr ) ;
2011-05-15 13:43:43 +03:00
}
/**
2014-09-29 16:31:50 +03:00
* mei_me_intr_disable - disables mei device interrupts
2011-05-15 13:43:43 +03:00
*
* @ dev : the device structure
*/
2013-02-06 14:06:41 +02:00
static void mei_me_intr_disable ( struct mei_device * dev )
2011-05-15 13:43:43 +03:00
{
2015-02-10 10:39:32 +02:00
u32 hcsr = mei_hcsr_read ( dev ) ;
2014-09-29 16:31:37 +03:00
2016-12-04 15:22:59 +02:00
me_intr_disable ( dev , hcsr ) ;
2011-05-15 13:43:43 +03:00
}
2016-12-04 15:22:58 +02:00
/**
* mei_me_synchronize_irq - wait for pending IRQ handlers
*
* @ dev : the device structure
*/
static void mei_me_synchronize_irq ( struct mei_device * dev )
{
struct pci_dev * pdev = to_pci_dev ( dev - > dev ) ;
synchronize_irq ( pdev - > irq ) ;
}
2013-03-10 13:56:07 +02:00
/**
* mei_me_hw_reset_release - release device from the reset
*
* @ dev : the device structure
*/
static void mei_me_hw_reset_release ( struct mei_device * dev )
{
2015-02-10 10:39:32 +02:00
u32 hcsr = mei_hcsr_read ( dev ) ;
2013-03-10 13:56:07 +02:00
hcsr | = H_IG ;
hcsr & = ~ H_RST ;
2015-02-10 10:39:32 +02:00
mei_hcsr_set ( dev , hcsr ) ;
2014-05-12 12:19:39 +03:00
/* complete this write before we set host ready on another CPU */
mmiowb ( ) ;
2013-03-10 13:56:07 +02:00
}
2013-01-08 23:07:27 +02:00
2013-01-08 23:07:29 +02:00
/**
2013-02-06 14:06:41 +02:00
* mei_me_host_set_ready - enable device
2013-01-08 23:07:29 +02:00
*
2014-09-29 16:31:50 +03:00
* @ dev : mei device
2013-01-08 23:07:29 +02:00
*/
2013-02-06 14:06:41 +02:00
static void mei_me_host_set_ready ( struct mei_device * dev )
2013-01-08 23:07:29 +02:00
{
2015-02-10 10:39:32 +02:00
u32 hcsr = mei_hcsr_read ( dev ) ;
2014-09-29 16:31:37 +03:00
2015-08-02 22:20:52 +03:00
hcsr | = H_CSR_IE_MASK | H_IG | H_RDY ;
2015-02-10 10:39:32 +02:00
mei_hcsr_set ( dev , hcsr ) ;
2013-01-08 23:07:29 +02:00
}
2014-09-29 16:31:50 +03:00
2013-01-08 23:07:29 +02:00
/**
2013-02-06 14:06:41 +02:00
* mei_me_host_is_ready - check whether the host has turned ready
2013-01-08 23:07:29 +02:00
*
2014-09-29 16:31:49 +03:00
* @ dev : mei device
* Return : bool
2013-01-08 23:07:29 +02:00
*/
2013-02-06 14:06:41 +02:00
static bool mei_me_host_is_ready ( struct mei_device * dev )
2013-01-08 23:07:29 +02:00
{
2015-02-10 10:39:32 +02:00
u32 hcsr = mei_hcsr_read ( dev ) ;
2014-09-29 16:31:37 +03:00
2014-11-12 23:42:14 +02:00
return ( hcsr & H_RDY ) = = H_RDY ;
2013-01-08 23:07:29 +02:00
}
/**
2013-02-06 14:06:41 +02:00
* mei_me_hw_is_ready - check whether the me ( hw ) has turned ready
2013-01-08 23:07:29 +02:00
*
2014-09-29 16:31:49 +03:00
* @ dev : mei device
* Return : bool
2013-01-08 23:07:29 +02:00
*/
2013-02-06 14:06:41 +02:00
static bool mei_me_hw_is_ready ( struct mei_device * dev )
2013-01-08 23:07:29 +02:00
{
2015-02-10 10:39:32 +02:00
u32 mecsr = mei_me_mecsr_read ( dev ) ;
2014-09-29 16:31:37 +03:00
2014-11-12 23:42:14 +02:00
return ( mecsr & ME_RDY_HRA ) = = ME_RDY_HRA ;
2013-01-08 23:07:29 +02:00
}
2012-12-25 19:06:06 +02:00
2017-02-02 11:26:54 +02:00
/**
* mei_me_hw_is_resetting - check whether the me ( hw ) is in reset
*
* @ dev : mei device
* Return : bool
*/
static bool mei_me_hw_is_resetting ( struct mei_device * dev )
{
u32 mecsr = mei_me_mecsr_read ( dev ) ;
return ( mecsr & ME_RST_HRA ) = = ME_RST_HRA ;
}
2014-09-29 16:31:50 +03:00
/**
* mei_me_hw_ready_wait - wait until the me ( hw ) has turned ready
* or timeout is reached
*
* @ dev : mei device
* Return : 0 on success , error otherwise
*/
2013-03-11 18:27:03 +02:00
static int mei_me_hw_ready_wait ( struct mei_device * dev )
{
mutex_unlock ( & dev - > device_lock ) ;
2014-08-12 20:16:03 +03:00
wait_event_timeout ( dev - > wait_hw_ready ,
2013-07-17 15:13:17 +03:00
dev - > recvd_hw_ready ,
2014-01-14 23:10:10 +02:00
mei_secs_to_jiffies ( MEI_HW_READY_TIMEOUT ) ) ;
2013-03-11 18:27:03 +02:00
mutex_lock ( & dev - > device_lock ) ;
2014-08-12 20:16:03 +03:00
if ( ! dev - > recvd_hw_ready ) {
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " wait hw ready failed \n " ) ;
2014-08-12 20:16:03 +03:00
return - ETIME ;
2013-03-11 18:27:03 +02:00
}
2015-01-25 23:45:28 +02:00
mei_me_hw_reset_release ( dev ) ;
2013-03-11 18:27:03 +02:00
dev - > recvd_hw_ready = false ;
return 0 ;
}
2014-09-29 16:31:50 +03:00
/**
* mei_me_hw_start - hw start routine
*
* @ dev : mei device
* Return : 0 on success , error otherwise
*/
2013-03-11 18:27:03 +02:00
static int mei_me_hw_start ( struct mei_device * dev )
{
int ret = mei_me_hw_ready_wait ( dev ) ;
2014-09-29 16:31:37 +03:00
2013-03-11 18:27:03 +02:00
if ( ret )
return ret ;
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " hw is ready \n " ) ;
2013-03-11 18:27:03 +02:00
mei_me_host_set_ready ( dev ) ;
return ret ;
}
2011-05-15 13:43:43 +03:00
/**
2012-06-25 23:46:28 +03:00
* mei_hbuf_filled_slots - gets number of device filled buffer slots
2011-05-15 13:43:43 +03:00
*
2013-01-17 19:54:15 +01:00
* @ dev : the device structure
2011-05-15 13:43:43 +03:00
*
2014-09-29 16:31:49 +03:00
* Return : number of filled slots
2011-05-15 13:43:43 +03:00
*/
2012-06-25 23:46:28 +03:00
static unsigned char mei_hbuf_filled_slots ( struct mei_device * dev )
2011-05-15 13:43:43 +03:00
{
2014-11-12 23:42:14 +02:00
u32 hcsr ;
2011-05-15 13:43:43 +03:00
char read_ptr , write_ptr ;
2015-02-10 10:39:32 +02:00
hcsr = mei_hcsr_read ( dev ) ;
2012-06-25 23:46:28 +03:00
2014-11-12 23:42:14 +02:00
read_ptr = ( char ) ( ( hcsr & H_CBRP ) > > 8 ) ;
write_ptr = ( char ) ( ( hcsr & H_CBWP ) > > 16 ) ;
2011-05-15 13:43:43 +03:00
return ( unsigned char ) ( write_ptr - read_ptr ) ;
}
/**
2013-04-05 01:05:05 +09:00
* mei_me_hbuf_is_empty - checks if host buffer is empty .
2011-05-15 13:43:43 +03:00
*
* @ dev : the device structure
*
2014-09-29 16:31:49 +03:00
* Return : true if empty , false - otherwise .
2011-05-15 13:43:43 +03:00
*/
2013-02-06 14:06:41 +02:00
static bool mei_me_hbuf_is_empty ( struct mei_device * dev )
2011-05-15 13:43:43 +03:00
{
2012-06-25 23:46:28 +03:00
return mei_hbuf_filled_slots ( dev ) = = 0 ;
2011-05-15 13:43:43 +03:00
}
/**
2013-02-06 14:06:41 +02:00
* mei_me_hbuf_empty_slots - counts write empty slots .
2011-05-15 13:43:43 +03:00
*
* @ dev : the device structure
*
2014-09-29 16:31:49 +03:00
* Return : - EOVERFLOW if overflow , otherwise empty slots count
2011-05-15 13:43:43 +03:00
*/
2013-02-06 14:06:41 +02:00
static int mei_me_hbuf_empty_slots ( struct mei_device * dev )
2011-05-15 13:43:43 +03:00
{
2018-07-23 13:21:23 +03:00
struct mei_me_hw * hw = to_me_hw ( dev ) ;
2012-06-25 23:46:27 +03:00
unsigned char filled_slots , empty_slots ;
2011-05-15 13:43:43 +03:00
2012-06-25 23:46:28 +03:00
filled_slots = mei_hbuf_filled_slots ( dev ) ;
2018-07-23 13:21:23 +03:00
empty_slots = hw - > hbuf_depth - filled_slots ;
2011-05-15 13:43:43 +03:00
/* check for overflow */
2018-07-23 13:21:23 +03:00
if ( filled_slots > hw - > hbuf_depth )
2011-05-15 13:43:43 +03:00
return - EOVERFLOW ;
return empty_slots ;
}
2014-09-29 16:31:50 +03:00
/**
2018-07-23 13:21:23 +03:00
* mei_me_hbuf_depth - returns depth of the hw buffer .
2014-09-29 16:31:50 +03:00
*
* @ dev : the device structure
*
2018-07-23 13:21:23 +03:00
* Return : size of hw buffer in slots
2014-09-29 16:31:50 +03:00
*/
2018-07-23 13:21:23 +03:00
static u32 mei_me_hbuf_depth ( const struct mei_device * dev )
2013-02-06 14:06:41 +02:00
{
2018-07-23 13:21:23 +03:00
struct mei_me_hw * hw = to_me_hw ( dev ) ;
return hw - > hbuf_depth ;
2013-02-06 14:06:41 +02:00
}
2011-05-15 13:43:43 +03:00
/**
2016-11-11 03:00:08 +02:00
* mei_me_hbuf_write - writes a message to host hw buffer .
2011-05-15 13:43:43 +03:00
*
* @ dev : the device structure
2018-07-31 09:35:33 +03:00
* @ hdr : header of message
* @ hdr_len : header length in bytes : must be multiplication of a slot ( 4 bytes )
* @ data : payload
* @ data_len : payload length in bytes
2011-05-15 13:43:43 +03:00
*
2018-07-31 09:35:33 +03:00
* Return : 0 if success , < 0 - otherwise .
2011-05-15 13:43:43 +03:00
*/
2016-11-11 03:00:08 +02:00
static int mei_me_hbuf_write ( struct mei_device * dev ,
2018-07-31 09:35:33 +03:00
const void * hdr , size_t hdr_len ,
const void * data , size_t data_len )
2011-05-15 13:43:43 +03:00
{
2013-03-11 18:27:02 +02:00
unsigned long rem ;
2018-07-12 17:10:09 +03:00
unsigned long i ;
2018-07-31 09:35:33 +03:00
const u32 * reg_buf ;
2013-03-11 18:27:02 +02:00
u32 dw_cnt ;
2012-06-19 09:13:35 +03:00
int empty_slots ;
2011-05-15 13:43:43 +03:00
2018-07-31 09:35:33 +03:00
if ( WARN_ON ( ! hdr | | ! data | | hdr_len & 0x3 ) )
return - EINVAL ;
dev_dbg ( dev - > dev , MEI_HDR_FMT , MEI_HDR_PRM ( ( struct mei_msg_hdr * ) hdr ) ) ;
2011-05-15 13:43:43 +03:00
2012-06-25 23:46:28 +03:00
empty_slots = mei_hbuf_empty_slots ( dev ) ;
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " empty slots = %hu. \n " , empty_slots ) ;
2011-05-15 13:43:43 +03:00
2018-07-12 17:10:08 +03:00
if ( empty_slots < 0 )
return - EOVERFLOW ;
2018-07-31 09:35:33 +03:00
dw_cnt = mei_data2slots ( hdr_len + data_len ) ;
2018-07-12 17:10:08 +03:00
if ( dw_cnt > ( u32 ) empty_slots )
2014-02-19 17:35:48 +02:00
return - EMSGSIZE ;
2011-05-15 13:43:43 +03:00
2018-07-31 09:35:33 +03:00
reg_buf = hdr ;
for ( i = 0 ; i < hdr_len / MEI_SLOT_SIZE ; i + + )
mei_me_hcbww_write ( dev , reg_buf [ i ] ) ;
2011-05-15 13:43:43 +03:00
2018-07-31 09:35:33 +03:00
reg_buf = data ;
for ( i = 0 ; i < data_len / MEI_SLOT_SIZE ; i + + )
2015-02-10 10:39:32 +02:00
mei_me_hcbww_write ( dev , reg_buf [ i ] ) ;
2011-05-15 13:43:43 +03:00
2018-07-31 09:35:33 +03:00
rem = data_len & 0x3 ;
2012-06-19 09:13:35 +03:00
if ( rem > 0 ) {
u32 reg = 0 ;
2014-09-29 16:31:37 +03:00
2018-07-31 09:35:33 +03:00
memcpy ( & reg , ( const u8 * ) data + data_len - rem , rem ) ;
2015-02-10 10:39:32 +02:00
mei_me_hcbww_write ( dev , reg ) ;
2011-05-15 13:43:43 +03:00
}
2017-02-02 11:26:53 +02:00
mei_hcsr_set_hig ( dev ) ;
2013-02-06 14:06:41 +02:00
if ( ! mei_me_hw_is_ready ( dev ) )
2012-03-14 14:39:42 +02:00
return - EIO ;
2011-05-15 13:43:43 +03:00
2012-03-14 14:39:42 +02:00
return 0 ;
2011-05-15 13:43:43 +03:00
}
/**
2013-02-06 14:06:41 +02:00
* mei_me_count_full_read_slots - counts read full slots .
2011-05-15 13:43:43 +03:00
*
* @ dev : the device structure
*
2014-09-29 16:31:49 +03:00
* Return : - EOVERFLOW if overflow , otherwise filled slots count
2011-05-15 13:43:43 +03:00
*/
2013-02-06 14:06:41 +02:00
static int mei_me_count_full_read_slots ( struct mei_device * dev )
2011-05-15 13:43:43 +03:00
{
2014-11-12 23:42:14 +02:00
u32 me_csr ;
2011-05-15 13:43:43 +03:00
char read_ptr , write_ptr ;
unsigned char buffer_depth , filled_slots ;
2015-02-10 10:39:32 +02:00
me_csr = mei_me_mecsr_read ( dev ) ;
2014-11-12 23:42:14 +02:00
buffer_depth = ( unsigned char ) ( ( me_csr & ME_CBD_HRA ) > > 24 ) ;
read_ptr = ( char ) ( ( me_csr & ME_CBRP_HRA ) > > 8 ) ;
write_ptr = ( char ) ( ( me_csr & ME_CBWP_HRA ) > > 16 ) ;
2011-05-15 13:43:43 +03:00
filled_slots = ( unsigned char ) ( write_ptr - read_ptr ) ;
/* check for overflow */
if ( filled_slots > buffer_depth )
return - EOVERFLOW ;
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " filled_slots =%08x \n " , filled_slots ) ;
2011-05-15 13:43:43 +03:00
return ( int ) filled_slots ;
}
/**
2013-02-06 14:06:41 +02:00
* mei_me_read_slots - reads a message from mei device .
2011-05-15 13:43:43 +03:00
*
* @ dev : the device structure
* @ buffer : message buffer will be written
* @ buffer_length : message size will be read
2014-09-29 16:31:50 +03:00
*
* Return : always 0
2011-05-15 13:43:43 +03:00
*/
2013-02-06 14:06:41 +02:00
static int mei_me_read_slots ( struct mei_device * dev , unsigned char * buffer ,
2018-07-23 13:21:22 +03:00
unsigned long buffer_length )
2011-05-15 13:43:43 +03:00
{
2012-02-09 19:25:54 +02:00
u32 * reg_buf = ( u32 * ) buffer ;
2011-05-15 13:43:43 +03:00
2018-07-23 13:21:22 +03:00
for ( ; buffer_length > = MEI_SLOT_SIZE ; buffer_length - = MEI_SLOT_SIZE )
2013-02-06 14:06:41 +02:00
* reg_buf + + = mei_me_mecbrw_read ( dev ) ;
2011-05-15 13:43:43 +03:00
if ( buffer_length > 0 ) {
2013-02-06 14:06:41 +02:00
u32 reg = mei_me_mecbrw_read ( dev ) ;
2014-09-29 16:31:37 +03:00
2012-02-09 19:25:54 +02:00
memcpy ( reg_buf , & reg , buffer_length ) ;
2011-05-15 13:43:43 +03:00
}
2017-02-02 11:26:53 +02:00
mei_hcsr_set_hig ( dev ) ;
2013-02-06 14:06:41 +02:00
return 0 ;
2011-05-15 13:43:43 +03:00
}
2014-03-18 22:51:57 +02:00
/**
2015-02-10 10:39:34 +02:00
* mei_me_pg_set - write pg enter register
2014-03-18 22:51:57 +02:00
*
* @ dev : the device structure
*/
2015-02-10 10:39:34 +02:00
static void mei_me_pg_set ( struct mei_device * dev )
2014-03-18 22:51:57 +02:00
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
2015-02-10 10:39:33 +02:00
u32 reg ;
reg = mei_me_reg_read ( hw , H_HPG_CSR ) ;
trace_mei_reg_read ( dev - > dev , " H_HPG_CSR " , H_HPG_CSR , reg ) ;
2014-09-29 16:31:37 +03:00
2014-03-18 22:51:57 +02:00
reg | = H_HPG_CSR_PGI ;
2015-02-10 10:39:33 +02:00
trace_mei_reg_write ( dev - > dev , " H_HPG_CSR " , H_HPG_CSR , reg ) ;
2014-03-18 22:51:57 +02:00
mei_me_reg_write ( hw , H_HPG_CSR , reg ) ;
}
/**
2015-02-10 10:39:34 +02:00
* mei_me_pg_unset - write pg exit register
2014-03-18 22:51:57 +02:00
*
* @ dev : the device structure
*/
2015-02-10 10:39:34 +02:00
static void mei_me_pg_unset ( struct mei_device * dev )
2014-03-18 22:51:57 +02:00
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
2015-02-10 10:39:33 +02:00
u32 reg ;
reg = mei_me_reg_read ( hw , H_HPG_CSR ) ;
trace_mei_reg_read ( dev - > dev , " H_HPG_CSR " , H_HPG_CSR , reg ) ;
2014-03-18 22:51:57 +02:00
WARN ( ! ( reg & H_HPG_CSR_PGI ) , " PGI is not set \n " ) ;
reg | = H_HPG_CSR_PGIHEXR ;
2015-02-10 10:39:33 +02:00
trace_mei_reg_write ( dev - > dev , " H_HPG_CSR " , H_HPG_CSR , reg ) ;
2014-03-18 22:51:57 +02:00
mei_me_reg_write ( hw , H_HPG_CSR , reg ) ;
}
2014-03-18 22:52:00 +02:00
/**
2015-08-02 22:20:54 +03:00
* mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
2014-03-18 22:52:00 +02:00
*
* @ dev : the device structure
*
2014-09-29 16:31:49 +03:00
* Return : 0 on success an error code otherwise
2014-03-18 22:52:00 +02:00
*/
2015-08-02 22:20:54 +03:00
static int mei_me_pg_legacy_enter_sync ( struct mei_device * dev )
2014-03-18 22:52:00 +02:00
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
unsigned long timeout = mei_secs_to_jiffies ( MEI_PGI_TIMEOUT ) ;
int ret ;
dev - > pg_event = MEI_PG_EVENT_WAIT ;
ret = mei_hbm_pg ( dev , MEI_PG_ISOLATION_ENTRY_REQ_CMD ) ;
if ( ret )
return ret ;
mutex_unlock ( & dev - > device_lock ) ;
wait_event_timeout ( dev - > wait_pg ,
dev - > pg_event = = MEI_PG_EVENT_RECEIVED , timeout ) ;
mutex_lock ( & dev - > device_lock ) ;
if ( dev - > pg_event = = MEI_PG_EVENT_RECEIVED ) {
2015-02-10 10:39:34 +02:00
mei_me_pg_set ( dev ) ;
2014-03-18 22:52:00 +02:00
ret = 0 ;
} else {
ret = - ETIME ;
}
dev - > pg_event = MEI_PG_EVENT_IDLE ;
hw - > pg_state = MEI_PG_ON ;
return ret ;
}
/**
2015-08-02 22:20:54 +03:00
* mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
2014-03-18 22:52:00 +02:00
*
* @ dev : the device structure
*
2014-09-29 16:31:49 +03:00
* Return : 0 on success an error code otherwise
2014-03-18 22:52:00 +02:00
*/
2015-08-02 22:20:54 +03:00
static int mei_me_pg_legacy_exit_sync ( struct mei_device * dev )
2014-03-18 22:52:00 +02:00
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
unsigned long timeout = mei_secs_to_jiffies ( MEI_PGI_TIMEOUT ) ;
int ret ;
if ( dev - > pg_event = = MEI_PG_EVENT_RECEIVED )
goto reply ;
dev - > pg_event = MEI_PG_EVENT_WAIT ;
2015-02-10 10:39:34 +02:00
mei_me_pg_unset ( dev ) ;
2014-03-18 22:52:00 +02:00
mutex_unlock ( & dev - > device_lock ) ;
wait_event_timeout ( dev - > wait_pg ,
dev - > pg_event = = MEI_PG_EVENT_RECEIVED , timeout ) ;
mutex_lock ( & dev - > device_lock ) ;
reply :
2015-06-13 08:51:17 +03:00
if ( dev - > pg_event ! = MEI_PG_EVENT_RECEIVED ) {
ret = - ETIME ;
goto out ;
}
dev - > pg_event = MEI_PG_EVENT_INTR_WAIT ;
ret = mei_hbm_pg ( dev , MEI_PG_ISOLATION_EXIT_RES_CMD ) ;
if ( ret )
return ret ;
mutex_unlock ( & dev - > device_lock ) ;
wait_event_timeout ( dev - > wait_pg ,
dev - > pg_event = = MEI_PG_EVENT_INTR_RECEIVED , timeout ) ;
mutex_lock ( & dev - > device_lock ) ;
if ( dev - > pg_event = = MEI_PG_EVENT_INTR_RECEIVED )
ret = 0 ;
2014-03-18 22:52:00 +02:00
else
ret = - ETIME ;
2015-06-13 08:51:17 +03:00
out :
2014-03-18 22:52:00 +02:00
dev - > pg_event = MEI_PG_EVENT_IDLE ;
hw - > pg_state = MEI_PG_OFF ;
return ret ;
}
2015-06-13 08:51:17 +03:00
/**
* mei_me_pg_in_transition - is device now in pg transition
*
* @ dev : the device structure
*
* Return : true if in pg transition , false otherwise
*/
static bool mei_me_pg_in_transition ( struct mei_device * dev )
{
return dev - > pg_event > = MEI_PG_EVENT_WAIT & &
dev - > pg_event < = MEI_PG_EVENT_INTR_WAIT ;
}
2014-03-18 22:51:58 +02:00
/**
* mei_me_pg_is_enabled - detect if PG is supported by HW
*
* @ dev : the device structure
*
2014-09-29 16:31:49 +03:00
* Return : true is pg supported , false otherwise
2014-03-18 22:51:58 +02:00
*/
static bool mei_me_pg_is_enabled ( struct mei_device * dev )
{
2015-08-02 22:20:54 +03:00
struct mei_me_hw * hw = to_me_hw ( dev ) ;
2015-02-10 10:39:32 +02:00
u32 reg = mei_me_mecsr_read ( dev ) ;
2014-03-18 22:51:58 +02:00
2015-08-02 22:20:54 +03:00
if ( hw - > d0i3_supported )
return true ;
2014-03-18 22:51:58 +02:00
if ( ( reg & ME_PGIC_HRA ) = = 0 )
goto notsupported ;
2014-08-21 14:29:21 +03:00
if ( ! dev - > hbm_f_pg_supported )
2014-03-18 22:51:58 +02:00
goto notsupported ;
return true ;
notsupported :
2015-08-02 22:20:54 +03:00
dev_dbg ( dev - > dev , " pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d \n " ,
hw - > d0i3_supported ,
2014-03-18 22:51:58 +02:00
! ! ( reg & ME_PGIC_HRA ) ,
dev - > version . major_version ,
dev - > version . minor_version ,
HBM_MAJOR_VERSION_PGI ,
HBM_MINOR_VERSION_PGI ) ;
return false ;
}
2015-06-13 08:51:17 +03:00
/**
2015-08-02 22:20:54 +03:00
* mei_me_d0i3_set - write d0i3 register bit on mei device .
2015-06-13 08:51:17 +03:00
*
* @ dev : the device structure
2015-08-02 22:20:54 +03:00
* @ intr : ask for interrupt
*
* Return : D0I3C register value
2015-06-13 08:51:17 +03:00
*/
2015-08-02 22:20:54 +03:00
static u32 mei_me_d0i3_set ( struct mei_device * dev , bool intr )
{
u32 reg = mei_me_d0i3c_read ( dev ) ;
reg | = H_D0I3C_I3 ;
if ( intr )
reg | = H_D0I3C_IR ;
else
reg & = ~ H_D0I3C_IR ;
mei_me_d0i3c_write ( dev , reg ) ;
/* read it to ensure HW consistency */
reg = mei_me_d0i3c_read ( dev ) ;
return reg ;
}
/**
* mei_me_d0i3_unset - clean d0i3 register bit on mei device .
*
* @ dev : the device structure
*
* Return : D0I3C register value
*/
static u32 mei_me_d0i3_unset ( struct mei_device * dev )
{
u32 reg = mei_me_d0i3c_read ( dev ) ;
reg & = ~ H_D0I3C_I3 ;
reg | = H_D0I3C_IR ;
mei_me_d0i3c_write ( dev , reg ) ;
/* read it to ensure HW consistency */
reg = mei_me_d0i3c_read ( dev ) ;
return reg ;
}
/**
* mei_me_d0i3_enter_sync - perform d0i3 entry procedure
*
* @ dev : the device structure
*
* Return : 0 on success an error code otherwise
*/
static int mei_me_d0i3_enter_sync ( struct mei_device * dev )
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
unsigned long d0i3_timeout = mei_secs_to_jiffies ( MEI_D0I3_TIMEOUT ) ;
unsigned long pgi_timeout = mei_secs_to_jiffies ( MEI_PGI_TIMEOUT ) ;
int ret ;
u32 reg ;
reg = mei_me_d0i3c_read ( dev ) ;
if ( reg & H_D0I3C_I3 ) {
/* we are in d0i3, nothing to do */
dev_dbg ( dev - > dev , " d0i3 set not needed \n " ) ;
ret = 0 ;
goto on ;
}
/* PGI entry procedure */
dev - > pg_event = MEI_PG_EVENT_WAIT ;
ret = mei_hbm_pg ( dev , MEI_PG_ISOLATION_ENTRY_REQ_CMD ) ;
if ( ret )
/* FIXME: should we reset here? */
goto out ;
mutex_unlock ( & dev - > device_lock ) ;
wait_event_timeout ( dev - > wait_pg ,
dev - > pg_event = = MEI_PG_EVENT_RECEIVED , pgi_timeout ) ;
mutex_lock ( & dev - > device_lock ) ;
if ( dev - > pg_event ! = MEI_PG_EVENT_RECEIVED ) {
ret = - ETIME ;
goto out ;
}
/* end PGI entry procedure */
dev - > pg_event = MEI_PG_EVENT_INTR_WAIT ;
reg = mei_me_d0i3_set ( dev , true ) ;
if ( ! ( reg & H_D0I3C_CIP ) ) {
dev_dbg ( dev - > dev , " d0i3 enter wait not needed \n " ) ;
ret = 0 ;
goto on ;
}
mutex_unlock ( & dev - > device_lock ) ;
wait_event_timeout ( dev - > wait_pg ,
dev - > pg_event = = MEI_PG_EVENT_INTR_RECEIVED , d0i3_timeout ) ;
mutex_lock ( & dev - > device_lock ) ;
if ( dev - > pg_event ! = MEI_PG_EVENT_INTR_RECEIVED ) {
reg = mei_me_d0i3c_read ( dev ) ;
if ( ! ( reg & H_D0I3C_I3 ) ) {
ret = - ETIME ;
goto out ;
}
}
ret = 0 ;
on :
hw - > pg_state = MEI_PG_ON ;
out :
dev - > pg_event = MEI_PG_EVENT_IDLE ;
dev_dbg ( dev - > dev , " d0i3 enter ret = %d \n " , ret ) ;
return ret ;
}
/**
* mei_me_d0i3_enter - perform d0i3 entry procedure
* no hbm PG handshake
* no waiting for confirmation ; runs with interrupts
* disabled
*
* @ dev : the device structure
*
* Return : 0 on success an error code otherwise
*/
static int mei_me_d0i3_enter ( struct mei_device * dev )
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
u32 reg ;
reg = mei_me_d0i3c_read ( dev ) ;
if ( reg & H_D0I3C_I3 ) {
/* we are in d0i3, nothing to do */
dev_dbg ( dev - > dev , " already d0i3 : set not needed \n " ) ;
goto on ;
}
mei_me_d0i3_set ( dev , false ) ;
on :
hw - > pg_state = MEI_PG_ON ;
dev - > pg_event = MEI_PG_EVENT_IDLE ;
dev_dbg ( dev - > dev , " d0i3 enter \n " ) ;
return 0 ;
}
/**
* mei_me_d0i3_exit_sync - perform d0i3 exit procedure
*
* @ dev : the device structure
*
* Return : 0 on success an error code otherwise
*/
static int mei_me_d0i3_exit_sync ( struct mei_device * dev )
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
unsigned long timeout = mei_secs_to_jiffies ( MEI_D0I3_TIMEOUT ) ;
int ret ;
u32 reg ;
dev - > pg_event = MEI_PG_EVENT_INTR_WAIT ;
reg = mei_me_d0i3c_read ( dev ) ;
if ( ! ( reg & H_D0I3C_I3 ) ) {
/* we are not in d0i3, nothing to do */
dev_dbg ( dev - > dev , " d0i3 exit not needed \n " ) ;
ret = 0 ;
goto off ;
}
reg = mei_me_d0i3_unset ( dev ) ;
if ( ! ( reg & H_D0I3C_CIP ) ) {
dev_dbg ( dev - > dev , " d0i3 exit wait not needed \n " ) ;
ret = 0 ;
goto off ;
}
mutex_unlock ( & dev - > device_lock ) ;
wait_event_timeout ( dev - > wait_pg ,
dev - > pg_event = = MEI_PG_EVENT_INTR_RECEIVED , timeout ) ;
mutex_lock ( & dev - > device_lock ) ;
if ( dev - > pg_event ! = MEI_PG_EVENT_INTR_RECEIVED ) {
reg = mei_me_d0i3c_read ( dev ) ;
if ( reg & H_D0I3C_I3 ) {
ret = - ETIME ;
goto out ;
}
}
ret = 0 ;
off :
hw - > pg_state = MEI_PG_OFF ;
out :
dev - > pg_event = MEI_PG_EVENT_IDLE ;
dev_dbg ( dev - > dev , " d0i3 exit ret = %d \n " , ret ) ;
return ret ;
}
/**
* mei_me_pg_legacy_intr - perform legacy pg processing
* in interrupt thread handler
*
* @ dev : the device structure
*/
static void mei_me_pg_legacy_intr ( struct mei_device * dev )
2015-06-13 08:51:17 +03:00
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
if ( dev - > pg_event ! = MEI_PG_EVENT_INTR_WAIT )
return ;
dev - > pg_event = MEI_PG_EVENT_INTR_RECEIVED ;
hw - > pg_state = MEI_PG_OFF ;
if ( waitqueue_active ( & dev - > wait_pg ) )
wake_up ( & dev - > wait_pg ) ;
}
2015-08-02 22:20:54 +03:00
/**
* mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
*
* @ dev : the device structure
2016-12-04 15:22:59 +02:00
* @ intr_source : interrupt source
2015-08-02 22:20:54 +03:00
*/
2016-12-04 15:22:59 +02:00
static void mei_me_d0i3_intr ( struct mei_device * dev , u32 intr_source )
2015-08-02 22:20:54 +03:00
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
if ( dev - > pg_event = = MEI_PG_EVENT_INTR_WAIT & &
2016-12-04 15:22:59 +02:00
( intr_source & H_D0I3C_IS ) ) {
2015-08-02 22:20:54 +03:00
dev - > pg_event = MEI_PG_EVENT_INTR_RECEIVED ;
if ( hw - > pg_state = = MEI_PG_ON ) {
hw - > pg_state = MEI_PG_OFF ;
if ( dev - > hbm_state ! = MEI_HBM_IDLE ) {
/*
* force H_RDY because it could be
* wiped off during PG
*/
dev_dbg ( dev - > dev , " d0i3 set host ready \n " ) ;
mei_me_host_set_ready ( dev ) ;
}
} else {
hw - > pg_state = MEI_PG_ON ;
}
wake_up ( & dev - > wait_pg ) ;
}
2016-12-04 15:22:59 +02:00
if ( hw - > pg_state = = MEI_PG_ON & & ( intr_source & H_IS ) ) {
2015-08-02 22:20:54 +03:00
/*
* HW sent some data and we are in D0i3 , so
* we got here because of HW initiated exit from D0i3 .
* Start runtime pm resume sequence to exit low power state .
*/
dev_dbg ( dev - > dev , " d0i3 want resume \n " ) ;
mei_hbm_pg_resume ( dev ) ;
}
}
/**
* mei_me_pg_intr - perform pg processing in interrupt thread handler
*
* @ dev : the device structure
2016-12-04 15:22:59 +02:00
* @ intr_source : interrupt source
2015-08-02 22:20:54 +03:00
*/
2016-12-04 15:22:59 +02:00
static void mei_me_pg_intr ( struct mei_device * dev , u32 intr_source )
2015-08-02 22:20:54 +03:00
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
if ( hw - > d0i3_supported )
2016-12-04 15:22:59 +02:00
mei_me_d0i3_intr ( dev , intr_source ) ;
2015-08-02 22:20:54 +03:00
else
mei_me_pg_legacy_intr ( dev ) ;
}
/**
* mei_me_pg_enter_sync - perform runtime pm entry procedure
*
* @ dev : the device structure
*
* Return : 0 on success an error code otherwise
*/
int mei_me_pg_enter_sync ( struct mei_device * dev )
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
if ( hw - > d0i3_supported )
return mei_me_d0i3_enter_sync ( dev ) ;
else
return mei_me_pg_legacy_enter_sync ( dev ) ;
}
/**
* mei_me_pg_exit_sync - perform runtime pm exit procedure
*
* @ dev : the device structure
*
* Return : 0 on success an error code otherwise
*/
int mei_me_pg_exit_sync ( struct mei_device * dev )
{
struct mei_me_hw * hw = to_me_hw ( dev ) ;
if ( hw - > d0i3_supported )
return mei_me_d0i3_exit_sync ( dev ) ;
else
return mei_me_pg_legacy_exit_sync ( dev ) ;
}
2015-08-02 22:20:55 +03:00
/**
* mei_me_hw_reset - resets fw via mei csr register .
*
* @ dev : the device structure
* @ intr_enable : if interrupt should be enabled after reset .
*
2015-08-02 22:20:56 +03:00
* Return : 0 on success an error code otherwise
2015-08-02 22:20:55 +03:00
*/
static int mei_me_hw_reset ( struct mei_device * dev , bool intr_enable )
{
2015-08-02 22:20:56 +03:00
struct mei_me_hw * hw = to_me_hw ( dev ) ;
int ret ;
u32 hcsr ;
if ( intr_enable ) {
mei_me_intr_enable ( dev ) ;
if ( hw - > d0i3_supported ) {
ret = mei_me_d0i3_exit_sync ( dev ) ;
if ( ret )
return ret ;
}
}
2015-08-02 22:20:55 +03:00
2016-06-16 17:58:52 +03:00
pm_runtime_set_active ( dev - > dev ) ;
2015-08-02 22:20:56 +03:00
hcsr = mei_hcsr_read ( dev ) ;
2015-08-02 22:20:55 +03:00
/* H_RST may be found lit before reset is started,
* for example if preceding reset flow hasn ' t completed .
* In that case asserting H_RST will be ignored , therefore
* we need to clean H_RST bit to start a successful reset sequence .
*/
if ( ( hcsr & H_RST ) = = H_RST ) {
dev_warn ( dev - > dev , " H_RST is set = 0x%08X " , hcsr ) ;
hcsr & = ~ H_RST ;
mei_hcsr_set ( dev , hcsr ) ;
hcsr = mei_hcsr_read ( dev ) ;
}
hcsr | = H_RST | H_IG | H_CSR_IS_MASK ;
2015-08-02 22:20:56 +03:00
if ( ! intr_enable )
2015-08-02 22:20:55 +03:00
hcsr & = ~ H_CSR_IE_MASK ;
dev - > recvd_hw_ready = false ;
mei_hcsr_write ( dev , hcsr ) ;
/*
* Host reads the H_CSR once to ensure that the
* posted write to H_CSR completes .
*/
hcsr = mei_hcsr_read ( dev ) ;
if ( ( hcsr & H_RST ) = = 0 )
dev_warn ( dev - > dev , " H_RST is not set = 0x%08X " , hcsr ) ;
if ( ( hcsr & H_RDY ) = = H_RDY )
dev_warn ( dev - > dev , " H_RDY is not cleared 0x%08X " , hcsr ) ;
2015-08-02 22:20:56 +03:00
if ( ! intr_enable ) {
2015-08-02 22:20:55 +03:00
mei_me_hw_reset_release ( dev ) ;
2015-08-02 22:20:56 +03:00
if ( hw - > d0i3_supported ) {
ret = mei_me_d0i3_enter ( dev ) ;
if ( ret )
return ret ;
}
}
2015-08-02 22:20:55 +03:00
return 0 ;
}
2013-02-06 14:06:42 +02:00
/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @ irq : The irq number
* @ dev_id : pointer to the device structure
*
2014-09-29 16:31:49 +03:00
* Return : irqreturn_t
2013-02-06 14:06:42 +02:00
*/
irqreturn_t mei_me_irq_quick_handler ( int irq , void * dev_id )
{
2015-08-02 22:20:52 +03:00
struct mei_device * dev = ( struct mei_device * ) dev_id ;
u32 hcsr ;
2013-02-06 14:06:42 +02:00
2015-08-02 22:20:52 +03:00
hcsr = mei_hcsr_read ( dev ) ;
2016-12-04 15:22:59 +02:00
if ( ! me_intr_src ( hcsr ) )
2013-02-06 14:06:42 +02:00
return IRQ_NONE ;
2016-12-04 15:22:59 +02:00
dev_dbg ( dev - > dev , " interrupt source 0x%08X \n " , me_intr_src ( hcsr ) ) ;
2013-02-06 14:06:42 +02:00
2016-12-04 15:22:59 +02:00
/* disable interrupts on device */
me_intr_disable ( dev , hcsr ) ;
2013-02-06 14:06:42 +02:00
return IRQ_WAKE_THREAD ;
}
/**
* mei_me_irq_thread_handler - function called after ISR to handle the interrupt
* processing .
*
* @ irq : The irq number
* @ dev_id : pointer to the device structure
*
2014-09-29 16:31:49 +03:00
* Return : irqreturn_t
2013-02-06 14:06:42 +02:00
*
*/
irqreturn_t mei_me_irq_thread_handler ( int irq , void * dev_id )
{
struct mei_device * dev = ( struct mei_device * ) dev_id ;
2017-01-27 16:32:45 +02:00
struct list_head cmpl_list ;
2013-02-06 14:06:42 +02:00
s32 slots ;
2016-12-04 15:22:59 +02:00
u32 hcsr ;
2014-01-08 20:19:21 +02:00
int rets = 0 ;
2013-02-06 14:06:42 +02:00
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " function called after ISR to handle the interrupt processing. \n " ) ;
2013-02-06 14:06:42 +02:00
/* initialize our complete list */
mutex_lock ( & dev - > device_lock ) ;
2016-12-04 15:22:59 +02:00
hcsr = mei_hcsr_read ( dev ) ;
me_intr_clear ( dev , hcsr ) ;
2017-01-27 16:32:45 +02:00
INIT_LIST_HEAD ( & cmpl_list ) ;
2013-02-06 14:06:42 +02:00
/* check if ME wants a reset */
2014-01-12 00:36:09 +02:00
if ( ! mei_hw_is_ready ( dev ) & & dev - > dev_state ! = MEI_DEV_RESETTING ) {
2014-09-29 16:31:42 +03:00
dev_warn ( dev - > dev , " FW not ready: resetting. \n " ) ;
2014-01-08 20:19:21 +02:00
schedule_work ( & dev - > reset_work ) ;
goto end ;
2013-02-06 14:06:42 +02:00
}
2017-02-02 11:26:54 +02:00
if ( mei_me_hw_is_resetting ( dev ) )
mei_hcsr_set_hig ( dev ) ;
2016-12-04 15:22:59 +02:00
mei_me_pg_intr ( dev , me_intr_src ( hcsr ) ) ;
2015-06-13 08:51:17 +03:00
2013-02-06 14:06:42 +02:00
/* check if we need to start the dev */
if ( ! mei_host_is_ready ( dev ) ) {
if ( mei_hw_is_ready ( dev ) ) {
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " we need to start the dev. \n " ) ;
2013-03-11 18:27:03 +02:00
dev - > recvd_hw_ready = true ;
2014-08-12 20:16:03 +03:00
wake_up ( & dev - > wait_hw_ready ) ;
2013-02-06 14:06:42 +02:00
} else {
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " Spurious Interrupt \n " ) ;
2013-02-06 14:06:42 +02:00
}
2014-01-08 20:19:21 +02:00
goto end ;
2013-02-06 14:06:42 +02:00
}
/* check slots available for reading */
slots = mei_count_full_read_slots ( dev ) ;
while ( slots > 0 ) {
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " slots to read = %08x \n " , slots ) ;
2017-01-27 16:32:45 +02:00
rets = mei_irq_read_handler ( dev , & cmpl_list , & slots ) ;
2014-03-03 00:21:28 +02:00
/* There is a race between ME write and interrupt delivery:
* Not all data is always available immediately after the
* interrupt , so try to read again on the next interrupt .
*/
if ( rets = = - ENODATA )
break ;
2017-12-12 13:27:06 +02:00
if ( rets & &
2017-12-19 17:35:30 +00:00
( dev - > dev_state ! = MEI_DEV_RESETTING & &
2017-12-12 13:27:06 +02:00
dev - > dev_state ! = MEI_DEV_POWER_DOWN ) ) {
2014-09-29 16:31:42 +03:00
dev_err ( dev - > dev , " mei_irq_read_handler ret = %d. \n " ,
2014-03-03 00:21:28 +02:00
rets ) ;
2014-01-08 20:19:21 +02:00
schedule_work ( & dev - > reset_work ) ;
2013-02-06 14:06:42 +02:00
goto end ;
2014-01-08 20:19:21 +02:00
}
2013-02-06 14:06:42 +02:00
}
2014-01-08 20:19:21 +02:00
2014-02-19 17:35:47 +02:00
dev - > hbuf_is_ready = mei_hbuf_is_ready ( dev ) ;
2014-03-18 22:52:00 +02:00
/*
* During PG handshake only allowed write is the replay to the
* PG exit message , so block calling write function
2015-06-13 08:51:17 +03:00
* if the pg event is in PG handshake
2014-03-18 22:52:00 +02:00
*/
2015-06-13 08:51:17 +03:00
if ( dev - > pg_event ! = MEI_PG_EVENT_WAIT & &
dev - > pg_event ! = MEI_PG_EVENT_RECEIVED ) {
2017-01-27 16:32:45 +02:00
rets = mei_irq_write_handler ( dev , & cmpl_list ) ;
2014-03-18 22:52:00 +02:00
dev - > hbuf_is_ready = mei_hbuf_is_ready ( dev ) ;
}
2013-02-06 14:06:42 +02:00
2017-01-27 16:32:45 +02:00
mei_irq_compl_handler ( dev , & cmpl_list ) ;
2013-02-06 14:06:42 +02:00
2014-01-08 20:19:21 +02:00
end :
2014-09-29 16:31:42 +03:00
dev_dbg ( dev - > dev , " interrupt thread end ret = %d \n " , rets ) ;
2016-12-04 15:22:59 +02:00
mei_me_intr_enable ( dev ) ;
2014-01-08 20:19:21 +02:00
mutex_unlock ( & dev - > device_lock ) ;
2013-02-06 14:06:42 +02:00
return IRQ_HANDLED ;
}
2014-03-31 17:59:23 +03:00
2013-02-06 14:06:41 +02:00
static const struct mei_hw_ops mei_me_hw_ops = {
2014-09-29 16:31:43 +03:00
. fw_status = mei_me_fw_status ,
2014-03-18 22:51:59 +02:00
. pg_state = mei_me_pg_state ,
2013-02-06 14:06:41 +02:00
. host_is_ready = mei_me_host_is_ready ,
. hw_is_ready = mei_me_hw_is_ready ,
. hw_reset = mei_me_hw_reset ,
2013-03-11 18:27:03 +02:00
. hw_config = mei_me_hw_config ,
. hw_start = mei_me_hw_start ,
2013-02-06 14:06:41 +02:00
2015-06-13 08:51:17 +03:00
. pg_in_transition = mei_me_pg_in_transition ,
2014-03-18 22:51:58 +02:00
. pg_is_enabled = mei_me_pg_is_enabled ,
2013-02-06 14:06:41 +02:00
. intr_clear = mei_me_intr_clear ,
. intr_enable = mei_me_intr_enable ,
. intr_disable = mei_me_intr_disable ,
2016-12-04 15:22:58 +02:00
. synchronize_irq = mei_me_synchronize_irq ,
2013-02-06 14:06:41 +02:00
. hbuf_free_slots = mei_me_hbuf_empty_slots ,
. hbuf_is_ready = mei_me_hbuf_is_empty ,
2018-07-23 13:21:23 +03:00
. hbuf_depth = mei_me_hbuf_depth ,
2013-02-06 14:06:41 +02:00
2016-11-11 03:00:08 +02:00
. write = mei_me_hbuf_write ,
2013-02-06 14:06:41 +02:00
. rdbuf_full_slots = mei_me_count_full_read_slots ,
. read_hdr = mei_me_mecbrw_read ,
. read = mei_me_read_slots
} ;
2014-05-13 01:30:54 +03:00
static bool mei_me_fw_type_nm ( struct pci_dev * pdev )
{
u32 reg ;
2014-09-29 16:31:37 +03:00
2014-05-13 01:30:54 +03:00
pci_read_config_dword ( pdev , PCI_CFG_HFS_2 , & reg ) ;
2016-02-07 22:46:51 +02:00
trace_mei_pci_cfg_read ( & pdev - > dev , " PCI_CFG_HFS_2 " , PCI_CFG_HFS_2 , reg ) ;
2014-05-13 01:30:54 +03:00
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
return ( reg & 0x600 ) = = 0x200 ;
}
# define MEI_CFG_FW_NM \
. quirk_probe = mei_me_fw_type_nm
static bool mei_me_fw_type_sps ( struct pci_dev * pdev )
{
u32 reg ;
2016-07-20 10:24:02 +03:00
unsigned int devfn ;
/*
* Read ME FW Status register to check for SPS Firmware
* The SPS FW is only signaled in pci function 0
*/
devfn = PCI_DEVFN ( PCI_SLOT ( pdev - > devfn ) , 0 ) ;
pci_bus_read_config_dword ( pdev - > bus , devfn , PCI_CFG_HFS_1 , & reg ) ;
2016-02-07 22:46:51 +02:00
trace_mei_pci_cfg_read ( & pdev - > dev , " PCI_CFG_HFS_1 " , PCI_CFG_HFS_1 , reg ) ;
2014-05-13 01:30:54 +03:00
/* if bits [19:16] = 15, running SPS Firmware */
return ( reg & 0xf0000 ) = = 0xf0000 ;
}
# define MEI_CFG_FW_SPS \
. quirk_probe = mei_me_fw_type_sps
2017-06-14 10:03:15 +03:00
# define MEI_CFG_ICH_HFS \
2014-05-13 01:30:53 +03:00
. fw_status . count = 0
2017-06-14 10:03:15 +03:00
# define MEI_CFG_ICH10_HFS \
2014-05-13 01:30:53 +03:00
. fw_status . count = 1 , \
. fw_status . status [ 0 ] = PCI_CFG_HFS_1
# define MEI_CFG_PCH_HFS \
. fw_status . count = 2 , \
. fw_status . status [ 0 ] = PCI_CFG_HFS_1 , \
. fw_status . status [ 1 ] = PCI_CFG_HFS_2
2014-11-19 17:01:38 +02:00
# define MEI_CFG_PCH8_HFS \
. fw_status . count = 6 , \
. fw_status . status [ 0 ] = PCI_CFG_HFS_1 , \
. fw_status . status [ 1 ] = PCI_CFG_HFS_2 , \
. fw_status . status [ 2 ] = PCI_CFG_HFS_3 , \
. fw_status . status [ 3 ] = PCI_CFG_HFS_4 , \
. fw_status . status [ 4 ] = PCI_CFG_HFS_5 , \
. fw_status . status [ 5 ] = PCI_CFG_HFS_6
2014-05-13 01:30:53 +03:00
2018-07-31 09:35:37 +03:00
# define MEI_CFG_DMA_128 \
. dma_size [ DMA_DSCR_HOST ] = SZ_128K , \
. dma_size [ DMA_DSCR_DEVICE ] = SZ_128K , \
. dma_size [ DMA_DSCR_CTRL ] = PAGE_SIZE
2014-05-13 01:30:53 +03:00
/* ICH Legacy devices */
2017-06-14 10:03:15 +03:00
static const struct mei_cfg mei_me_ich_cfg = {
MEI_CFG_ICH_HFS ,
2014-05-13 01:30:53 +03:00
} ;
/* ICH devices */
2017-06-14 10:03:15 +03:00
static const struct mei_cfg mei_me_ich10_cfg = {
MEI_CFG_ICH10_HFS ,
2014-05-13 01:30:53 +03:00
} ;
/* PCH devices */
2017-06-14 10:03:15 +03:00
static const struct mei_cfg mei_me_pch_cfg = {
2014-05-13 01:30:53 +03:00
MEI_CFG_PCH_HFS ,
} ;
2014-05-13 01:30:54 +03:00
/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
2017-06-14 10:03:15 +03:00
static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
2014-05-13 01:30:54 +03:00
MEI_CFG_PCH_HFS ,
MEI_CFG_FW_NM ,
} ;
2014-11-19 17:01:38 +02:00
/* PCH8 Lynx Point and newer devices */
2017-06-14 10:03:15 +03:00
static const struct mei_cfg mei_me_pch8_cfg = {
2014-11-19 17:01:38 +02:00
MEI_CFG_PCH8_HFS ,
} ;
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
2017-06-14 10:03:15 +03:00
static const struct mei_cfg mei_me_pch8_sps_cfg = {
2014-11-19 17:01:38 +02:00
MEI_CFG_PCH8_HFS ,
2014-05-13 01:30:54 +03:00
MEI_CFG_FW_SPS ,
} ;
2018-07-31 09:35:37 +03:00
/* Cannon Lake and newer devices */
static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_PCH8_HFS ,
MEI_CFG_DMA_128 ,
} ;
2017-06-14 10:03:15 +03:00
/*
* mei_cfg_list - A list of platform platform specific configurations .
* Note : has to be synchronized with enum mei_cfg_idx .
*/
static const struct mei_cfg * const mei_cfg_list [ ] = {
[ MEI_ME_UNDEF_CFG ] = NULL ,
[ MEI_ME_ICH_CFG ] = & mei_me_ich_cfg ,
[ MEI_ME_ICH10_CFG ] = & mei_me_ich10_cfg ,
[ MEI_ME_PCH_CFG ] = & mei_me_pch_cfg ,
[ MEI_ME_PCH_CPT_PBG_CFG ] = & mei_me_pch_cpt_pbg_cfg ,
[ MEI_ME_PCH8_CFG ] = & mei_me_pch8_cfg ,
[ MEI_ME_PCH8_SPS_CFG ] = & mei_me_pch8_sps_cfg ,
2018-07-31 09:35:37 +03:00
[ MEI_ME_PCH12_CFG ] = & mei_me_pch12_cfg ,
2017-06-14 10:03:15 +03:00
} ;
const struct mei_cfg * mei_me_get_cfg ( kernel_ulong_t idx )
{
BUILD_BUG_ON ( ARRAY_SIZE ( mei_cfg_list ) ! = MEI_ME_NUM_CFG ) ;
if ( idx > = MEI_ME_NUM_CFG )
return NULL ;
return mei_cfg_list [ idx ] ;
} ;
2013-02-06 14:06:40 +02:00
/**
2013-04-05 01:05:05 +09:00
* mei_me_dev_init - allocates and initializes the mei device structure
2013-02-06 14:06:40 +02:00
*
* @ pdev : The pci device structure
2014-05-13 01:30:53 +03:00
* @ cfg : per device generation config
2013-02-06 14:06:40 +02:00
*
2017-01-26 17:16:26 +02:00
* Return : The mei_device pointer on success , NULL on failure .
2013-02-06 14:06:40 +02:00
*/
2014-05-13 01:30:53 +03:00
struct mei_device * mei_me_dev_init ( struct pci_dev * pdev ,
const struct mei_cfg * cfg )
2013-02-06 14:06:40 +02:00
{
struct mei_device * dev ;
2014-09-29 16:31:45 +03:00
struct mei_me_hw * hw ;
2018-11-22 13:11:36 +02:00
int i ;
2013-02-06 14:06:40 +02:00
2017-01-26 17:16:26 +02:00
dev = devm_kzalloc ( & pdev - > dev , sizeof ( struct mei_device ) +
sizeof ( struct mei_me_hw ) , GFP_KERNEL ) ;
2013-02-06 14:06:40 +02:00
if ( ! dev )
return NULL ;
2018-11-22 13:11:36 +02:00
2014-09-29 16:31:45 +03:00
hw = to_me_hw ( dev ) ;
2013-02-06 14:06:40 +02:00
2018-11-22 13:11:36 +02:00
for ( i = 0 ; i < DMA_DSCR_NUM ; i + + )
dev - > dr_dscr [ i ] . size = cfg - > dma_size [ i ] ;
2014-09-29 16:31:41 +03:00
mei_device_init ( dev , & pdev - > dev , & mei_me_hw_ops ) ;
2014-09-29 16:31:45 +03:00
hw - > cfg = cfg ;
2018-11-22 13:11:36 +02:00
2013-02-06 14:06:40 +02:00
return dev ;
}
2013-02-06 14:06:42 +02:00