2019-03-12 00:10:41 +02:00
// SPDX-License-Identifier: GPL-2.0
2013-02-06 14:06:39 +02:00
/*
2020-02-23 22:44:19 +02:00
* Copyright ( c ) 2003 - 2020 , Intel Corporation . All rights reserved .
2013-02-06 14:06:39 +02:00
* Intel Management Engine Interface ( Intel MEI ) Linux driver
*/
2019-03-12 00:10:41 +02:00
2013-02-06 14:06:39 +02:00
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/device.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/pci.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
2016-01-07 16:46:13 +01:00
# include <linux/pm_domain.h>
2014-03-18 22:52:02 +02:00
# include <linux/pm_runtime.h>
2013-02-06 14:06:39 +02:00
# include <linux/mei.h>
# include "mei_dev.h"
# include "client.h"
2014-03-11 14:49:23 +02:00
# include "hw-me-regs.h"
# include "hw-me.h"
2013-02-06 14:06:39 +02:00
/* mei_pci_tbl - PCI Device ID Table */
2014-03-16 14:35:58 +02:00
static const struct pci_device_id mei_me_pci_tbl [ ] = {
2017-06-14 10:03:15 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_82946GZ , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_82G35 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_82Q965 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_82G965 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_82GM965 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_82GME965 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_82Q35 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_82G33 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_82Q33 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_82X38 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_3200 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_6 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_7 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_8 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_9 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9_10 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9M_1 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9M_2 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9M_3 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH9M_4 , MEI_ME_ICH_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH10_1 , MEI_ME_ICH10_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH10_2 , MEI_ME_ICH10_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH10_3 , MEI_ME_ICH10_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICH10_4 , MEI_ME_ICH10_CFG ) } ,
2019-10-04 21:17:22 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_IBXPK_1 , MEI_ME_PCH6_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_IBXPK_2 , MEI_ME_PCH6_CFG ) } ,
2017-06-14 10:03:15 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CPT_1 , MEI_ME_PCH_CPT_PBG_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_PBG_1 , MEI_ME_PCH_CPT_PBG_CFG ) } ,
2019-10-04 21:17:22 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_PPT_1 , MEI_ME_PCH7_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_PPT_2 , MEI_ME_PCH7_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_PPT_3 , MEI_ME_PCH7_CFG ) } ,
2017-06-14 10:03:15 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_LPT_H , MEI_ME_PCH8_SPS_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_LPT_W , MEI_ME_PCH8_SPS_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_LPT_LP , MEI_ME_PCH8_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_LPT_HR , MEI_ME_PCH8_SPS_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_WPT_LP , MEI_ME_PCH8_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_WPT_LP_2 , MEI_ME_PCH8_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_SPT , MEI_ME_PCH8_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_SPT_2 , MEI_ME_PCH8_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_SPT_H , MEI_ME_PCH8_SPS_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_SPT_H_2 , MEI_ME_PCH8_SPS_CFG ) } ,
2019-01-13 14:24:47 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_LBG , MEI_ME_PCH12_CFG ) } ,
2017-06-14 10:03:15 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_BXT_M , MEI_ME_PCH8_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_APL_I , MEI_ME_PCH8_CFG ) } ,
2019-01-13 14:24:48 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_DNV_IE , MEI_ME_PCH8_CFG ) } ,
2017-09-24 11:35:34 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_GLK , MEI_ME_PCH8_CFG ) } ,
2017-06-14 10:03:15 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_KBP , MEI_ME_PCH8_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_KBP_2 , MEI_ME_PCH8_CFG ) } ,
2016-09-12 16:21:43 +03:00
2018-11-22 13:11:42 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CNP_LP , MEI_ME_PCH12_CFG ) } ,
2020-02-27 13:27:37 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CNP_LP_3 , MEI_ME_PCH8_CFG ) } ,
2018-11-22 13:11:42 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CNP_H , MEI_ME_PCH12_CFG ) } ,
2020-02-27 13:27:37 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CNP_H_3 , MEI_ME_PCH8_CFG ) } ,
2018-02-18 11:05:15 +02:00
2019-10-02 02:59:57 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CMP_LP , MEI_ME_PCH12_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CMP_LP_3 , MEI_ME_PCH8_CFG ) } ,
2019-11-05 17:05:14 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CMP_V , MEI_ME_PCH12_CFG ) } ,
2020-01-19 11:42:29 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CMP_H , MEI_ME_PCH12_CFG ) } ,
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CMP_H_3 , MEI_ME_PCH8_CFG ) } ,
2019-10-02 02:59:57 +03:00
2019-01-24 14:45:02 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_ICP_LP , MEI_ME_PCH12_CFG ) } ,
2019-11-07 12:44:45 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_TGP_LP , MEI_ME_PCH15_CFG ) } ,
2019-08-19 13:32:10 +03:00
2020-01-24 02:14:55 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_JSP_N , MEI_ME_PCH15_CFG ) } ,
2019-11-07 12:44:45 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_MCC , MEI_ME_PCH15_CFG ) } ,
2019-07-12 12:58:14 +03:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_MCC_4 , MEI_ME_PCH8_CFG ) } ,
2020-03-24 23:07:30 +02:00
{ MEI_PCI_DEVICE ( MEI_DEV_ID_CDF , MEI_ME_PCH8_CFG ) } ,
2013-02-06 14:06:39 +02:00
/* required last entry */
{ 0 , }
} ;
2013-03-27 16:58:29 +02:00
MODULE_DEVICE_TABLE ( pci , mei_me_pci_tbl ) ;
2013-02-06 14:06:39 +02:00
2014-12-04 22:43:07 +01:00
# ifdef CONFIG_PM
2014-03-18 22:52:05 +02:00
static inline void mei_me_set_pm_domain ( struct mei_device * dev ) ;
static inline void mei_me_unset_pm_domain ( struct mei_device * dev ) ;
# else
static inline void mei_me_set_pm_domain ( struct mei_device * dev ) { }
static inline void mei_me_unset_pm_domain ( struct mei_device * dev ) { }
2014-12-04 22:43:07 +01:00
# endif /* CONFIG_PM */
2014-03-18 22:52:05 +02:00
2019-11-07 00:38:41 +02:00
static int mei_me_read_fws ( const struct mei_device * dev , int where , u32 * val )
{
struct pci_dev * pdev = to_pci_dev ( dev - > dev ) ;
return pci_read_config_dword ( pdev , where , val ) ;
}
2013-02-06 14:06:39 +02:00
/**
2014-09-29 16:31:50 +03:00
* mei_me_quirk_probe - probe for devices that doesn ' t valid ME interface
2013-04-05 01:05:05 +09:00
*
2013-02-06 14:06:39 +02:00
* @ pdev : PCI device structure
2014-05-13 01:30:54 +03:00
* @ cfg : per generation config
2013-02-06 14:06:39 +02:00
*
2014-09-29 16:31:49 +03:00
* Return : true if ME Interface is valid , false otherwise
2013-02-06 14:06:39 +02:00
*/
2013-03-27 16:58:29 +02:00
static bool mei_me_quirk_probe ( struct pci_dev * pdev ,
2014-05-13 01:30:54 +03:00
const struct mei_cfg * cfg )
2013-02-06 14:06:39 +02:00
{
2014-05-13 01:30:54 +03:00
if ( cfg - > quirk_probe & & cfg - > quirk_probe ( pdev ) ) {
dev_info ( & pdev - > dev , " Device doesn't have valid ME Interface \n " ) ;
return false ;
2014-03-25 21:25:18 +02:00
}
2013-02-06 14:06:39 +02:00
return true ;
}
2014-05-13 01:30:54 +03:00
2013-02-06 14:06:39 +02:00
/**
2014-09-29 16:31:50 +03:00
* mei_me_probe - Device Initialization Routine
2013-02-06 14:06:39 +02:00
*
* @ pdev : PCI device structure
* @ ent : entry in kcs_pci_tbl
*
2014-09-29 16:31:49 +03:00
* Return : 0 on success , < 0 on failure .
2013-02-06 14:06:39 +02:00
*/
2013-03-27 16:58:29 +02:00
static int mei_me_probe ( struct pci_dev * pdev , const struct pci_device_id * ent )
2013-02-06 14:06:39 +02:00
{
2017-06-14 10:03:15 +03:00
const struct mei_cfg * cfg ;
2013-02-06 14:06:39 +02:00
struct mei_device * dev ;
2013-02-06 14:06:40 +02:00
struct mei_me_hw * hw ;
2015-08-02 22:20:52 +03:00
unsigned int irqflags ;
2013-02-06 14:06:39 +02:00
int err ;
2017-06-14 10:03:15 +03:00
cfg = mei_me_get_cfg ( ent - > driver_data ) ;
if ( ! cfg )
return - ENODEV ;
2013-02-06 14:06:39 +02:00
2014-05-13 01:30:54 +03:00
if ( ! mei_me_quirk_probe ( pdev , cfg ) )
return - ENODEV ;
2013-02-06 14:06:39 +02:00
/* enable pci dev */
2017-01-26 17:16:26 +02:00
err = pcim_enable_device ( pdev ) ;
2013-02-06 14:06:39 +02:00
if ( err ) {
dev_err ( & pdev - > dev , " failed to enable pci device. \n " ) ;
goto end ;
}
/* set PCI host mastering */
pci_set_master ( pdev ) ;
2017-01-26 17:16:26 +02:00
/* pci request regions and mapping IO device memory for mei driver */
err = pcim_iomap_regions ( pdev , BIT ( 0 ) , KBUILD_MODNAME ) ;
2013-02-06 14:06:39 +02:00
if ( err ) {
dev_err ( & pdev - > dev , " failed to get pci regions. \n " ) ;
2017-01-26 17:16:26 +02:00
goto end ;
2013-02-06 14:06:39 +02:00
}
2013-12-17 15:56:57 +02:00
if ( dma_set_mask ( & pdev - > dev , DMA_BIT_MASK ( 64 ) ) | |
dma_set_coherent_mask ( & pdev - > dev , DMA_BIT_MASK ( 64 ) ) ) {
err = dma_set_mask ( & pdev - > dev , DMA_BIT_MASK ( 32 ) ) ;
if ( err )
err = dma_set_coherent_mask ( & pdev - > dev ,
DMA_BIT_MASK ( 32 ) ) ;
}
if ( err ) {
dev_err ( & pdev - > dev , " No usable DMA configuration, aborting \n " ) ;
2017-01-26 17:16:26 +02:00
goto end ;
2013-12-17 15:56:57 +02:00
}
2013-02-06 14:06:39 +02:00
/* allocates and initializes the mei dev structure */
2019-11-07 00:38:39 +02:00
dev = mei_me_dev_init ( & pdev - > dev , cfg ) ;
2013-02-06 14:06:39 +02:00
if ( ! dev ) {
err = - ENOMEM ;
2017-01-26 17:16:26 +02:00
goto end ;
2013-02-06 14:06:39 +02:00
}
2013-02-06 14:06:40 +02:00
hw = to_me_hw ( dev ) ;
2017-01-26 17:16:26 +02:00
hw - > mem_addr = pcim_iomap_table ( pdev ) [ 0 ] ;
2019-11-07 00:38:41 +02:00
hw - > read_fws = mei_me_read_fws ;
2017-01-26 17:16:26 +02:00
2013-02-06 14:06:39 +02:00
pci_enable_msi ( pdev ) ;
2020-04-17 11:45:38 -07:00
hw - > irq = pdev - > irq ;
2013-02-06 14:06:39 +02:00
/* request and enable interrupt */
2015-08-02 22:20:52 +03:00
irqflags = pci_dev_msi_enabled ( pdev ) ? IRQF_ONESHOT : IRQF_SHARED ;
err = request_threaded_irq ( pdev - > irq ,
2013-02-06 14:06:42 +02:00
mei_me_irq_quick_handler ,
mei_me_irq_thread_handler ,
2015-08-02 22:20:52 +03:00
irqflags , KBUILD_MODNAME , dev ) ;
2013-02-06 14:06:39 +02:00
if ( err ) {
dev_err ( & pdev - > dev , " request_threaded_irq failure. irq = %d \n " ,
pdev - > irq ) ;
2017-01-26 17:16:26 +02:00
goto end ;
2013-02-06 14:06:39 +02:00
}
2013-03-27 16:58:28 +02:00
if ( mei_start ( dev ) ) {
2013-02-06 14:06:39 +02:00
dev_err ( & pdev - > dev , " init hw failure. \n " ) ;
err = - ENODEV ;
goto release_irq ;
}
2014-03-18 22:52:02 +02:00
pm_runtime_set_autosuspend_delay ( & pdev - > dev , MEI_ME_RPM_TIMEOUT ) ;
pm_runtime_use_autosuspend ( & pdev - > dev ) ;
2014-06-23 15:10:35 +03:00
err = mei_register ( dev , & pdev - > dev ) ;
2013-02-06 14:06:39 +02:00
if ( err )
2016-02-07 23:35:20 +02:00
goto stop ;
2013-02-06 14:06:39 +02:00
pci_set_drvdata ( pdev , dev ) ;
2017-08-03 17:30:19 +03:00
/*
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend .
*/
2017-10-25 14:16:46 +02:00
dev_pm_set_driver_flags ( & pdev - > dev , DPM_FLAG_NEVER_SKIP ) ;
2017-08-03 17:30:19 +03:00
2014-03-18 22:52:05 +02:00
/*
2017-09-26 09:18:27 +03:00
* ME maps runtime suspend / resume to D0i states ,
* hence we need to go around native PCI runtime service which
* eventually brings the device into D3cold / hot state ,
* but the mei device cannot wake up from D3 unlike from D0i3 .
* To get around the PCI device native runtime pm ,
* ME uses runtime pm domain handlers which take precedence
* over the driver ' s pm handlers .
*/
mei_me_set_pm_domain ( dev ) ;
2014-03-18 22:52:05 +02:00
2018-01-02 12:01:41 +02:00
if ( mei_pg_is_enabled ( dev ) ) {
2014-03-18 22:52:02 +02:00
pm_runtime_put_noidle ( & pdev - > dev ) ;
2018-01-02 12:01:41 +02:00
if ( hw - > d0i3_supported )
pm_runtime_allow ( & pdev - > dev ) ;
}
2014-03-18 22:52:02 +02:00
2013-10-21 22:05:42 +03:00
dev_dbg ( & pdev - > dev , " initialization successful. \n " ) ;
2013-02-06 14:06:39 +02:00
return 0 ;
2016-02-07 23:35:20 +02:00
stop :
mei_stop ( dev ) ;
2013-02-06 14:06:39 +02:00
release_irq :
2013-11-11 13:26:06 +02:00
mei_cancel_work ( dev ) ;
2013-02-06 14:06:39 +02:00
mei_disable_interrupts ( dev ) ;
free_irq ( pdev - > irq , dev ) ;
end :
dev_err ( & pdev - > dev , " initialization failed. \n " ) ;
return err ;
}
2017-03-20 15:04:02 +02:00
/**
* mei_me_shutdown - Device Removal Routine
*
* @ pdev : PCI device structure
*
* mei_me_shutdown is called from the reboot notifier
* it ' s a simplified version of remove so we go down
* faster .
*/
static void mei_me_shutdown ( struct pci_dev * pdev )
{
struct mei_device * dev ;
dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return ;
dev_dbg ( & pdev - > dev , " shutdown \n " ) ;
mei_stop ( dev ) ;
2017-09-26 09:18:27 +03:00
mei_me_unset_pm_domain ( dev ) ;
2017-03-20 15:04:02 +02:00
mei_disable_interrupts ( dev ) ;
free_irq ( pdev - > irq , dev ) ;
}
2013-02-06 14:06:39 +02:00
/**
2014-09-29 16:31:50 +03:00
* mei_me_remove - Device Removal Routine
2013-02-06 14:06:39 +02:00
*
* @ pdev : PCI device structure
*
2017-03-20 15:04:02 +02:00
* mei_me_remove is called by the PCI subsystem to alert the driver
2013-02-06 14:06:39 +02:00
* that it should release a PCI device .
*/
2013-03-27 16:58:29 +02:00
static void mei_me_remove ( struct pci_dev * pdev )
2013-02-06 14:06:39 +02:00
{
struct mei_device * dev ;
dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return ;
2014-03-18 22:52:02 +02:00
if ( mei_pg_is_enabled ( dev ) )
pm_runtime_get_noresume ( & pdev - > dev ) ;
2013-10-17 13:52:10 +03:00
dev_dbg ( & pdev - > dev , " stop \n " ) ;
2013-03-10 13:56:08 +02:00
mei_stop ( dev ) ;
2013-02-06 14:06:39 +02:00
2017-09-26 09:18:27 +03:00
mei_me_unset_pm_domain ( dev ) ;
2014-03-18 22:52:05 +02:00
2013-02-06 14:06:39 +02:00
mei_disable_interrupts ( dev ) ;
free_irq ( pdev - > irq , dev ) ;
2013-04-05 22:10:34 +03:00
mei_deregister ( dev ) ;
2013-02-06 14:06:39 +02:00
}
2017-01-26 17:16:26 +02:00
2014-02-18 14:31:08 +02:00
# ifdef CONFIG_PM_SLEEP
2013-03-27 16:58:29 +02:00
static int mei_me_pci_suspend ( struct device * device )
2013-02-06 14:06:39 +02:00
{
struct pci_dev * pdev = to_pci_dev ( device ) ;
struct mei_device * dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return - ENODEV ;
2013-10-17 13:52:10 +03:00
dev_dbg ( & pdev - > dev , " suspend \n " ) ;
2013-02-06 14:06:39 +02:00
2013-03-10 13:56:08 +02:00
mei_stop ( dev ) ;
mei_disable_interrupts ( dev ) ;
2013-02-06 14:06:39 +02:00
free_irq ( pdev - > irq , dev ) ;
pci_disable_msi ( pdev ) ;
2013-03-10 13:56:08 +02:00
return 0 ;
2013-02-06 14:06:39 +02:00
}
2013-03-27 16:58:29 +02:00
static int mei_me_pci_resume ( struct device * device )
2013-02-06 14:06:39 +02:00
{
struct pci_dev * pdev = to_pci_dev ( device ) ;
struct mei_device * dev ;
2015-08-02 22:20:52 +03:00
unsigned int irqflags ;
2013-02-06 14:06:39 +02:00
int err ;
dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return - ENODEV ;
pci_enable_msi ( pdev ) ;
2015-08-02 22:20:52 +03:00
irqflags = pci_dev_msi_enabled ( pdev ) ? IRQF_ONESHOT : IRQF_SHARED ;
2013-02-06 14:06:39 +02:00
/* request and enable interrupt */
2015-08-02 22:20:52 +03:00
err = request_threaded_irq ( pdev - > irq ,
2013-02-06 14:06:42 +02:00
mei_me_irq_quick_handler ,
mei_me_irq_thread_handler ,
2015-08-02 22:20:52 +03:00
irqflags , KBUILD_MODNAME , dev ) ;
2013-02-06 14:06:39 +02:00
if ( err ) {
dev_err ( & pdev - > dev , " request_threaded_irq failed: irq = %d. \n " ,
pdev - > irq ) ;
return err ;
}
2014-01-12 00:36:09 +02:00
err = mei_restart ( dev ) ;
if ( err )
return err ;
2013-02-06 14:06:39 +02:00
/* Start timer if stopped in suspend */
schedule_delayed_work ( & dev - > timer_work , HZ ) ;
2014-01-12 00:36:09 +02:00
return 0 ;
2013-02-06 14:06:39 +02:00
}
2014-03-18 22:52:02 +02:00
# endif /* CONFIG_PM_SLEEP */
2014-12-04 22:43:07 +01:00
# ifdef CONFIG_PM
2014-03-18 22:52:02 +02:00
static int mei_me_pm_runtime_idle ( struct device * device )
{
struct mei_device * dev ;
2019-07-23 20:46:27 +08:00
dev_dbg ( device , " rpm: me: runtime_idle \n " ) ;
2014-03-18 22:52:02 +02:00
2019-07-23 20:46:27 +08:00
dev = dev_get_drvdata ( device ) ;
2014-03-18 22:52:02 +02:00
if ( ! dev )
return - ENODEV ;
if ( mei_write_is_idle ( dev ) )
2014-07-17 10:53:36 +03:00
pm_runtime_autosuspend ( device ) ;
2014-03-18 22:52:02 +02:00
return - EBUSY ;
}
static int mei_me_pm_runtime_suspend ( struct device * device )
{
struct mei_device * dev ;
int ret ;
2019-07-23 20:46:27 +08:00
dev_dbg ( device , " rpm: me: runtime suspend \n " ) ;
2014-03-18 22:52:02 +02:00
2019-07-23 20:46:27 +08:00
dev = dev_get_drvdata ( device ) ;
2014-03-18 22:52:02 +02:00
if ( ! dev )
return - ENODEV ;
mutex_lock ( & dev - > device_lock ) ;
if ( mei_write_is_idle ( dev ) )
2015-02-10 10:39:34 +02:00
ret = mei_me_pg_enter_sync ( dev ) ;
2014-03-18 22:52:02 +02:00
else
ret = - EAGAIN ;
mutex_unlock ( & dev - > device_lock ) ;
2019-07-23 20:46:27 +08:00
dev_dbg ( device , " rpm: me: runtime suspend ret=%d \n " , ret ) ;
2014-03-18 22:52:02 +02:00
2016-06-16 17:58:52 +03:00
if ( ret & & ret ! = - EAGAIN )
schedule_work ( & dev - > reset_work ) ;
2014-03-18 22:52:02 +02:00
return ret ;
}
static int mei_me_pm_runtime_resume ( struct device * device )
{
struct mei_device * dev ;
int ret ;
2019-07-23 20:46:27 +08:00
dev_dbg ( device , " rpm: me: runtime resume \n " ) ;
2014-03-18 22:52:02 +02:00
2019-07-23 20:46:27 +08:00
dev = dev_get_drvdata ( device ) ;
2014-03-18 22:52:02 +02:00
if ( ! dev )
return - ENODEV ;
mutex_lock ( & dev - > device_lock ) ;
2015-02-10 10:39:34 +02:00
ret = mei_me_pg_exit_sync ( dev ) ;
2014-03-18 22:52:02 +02:00
mutex_unlock ( & dev - > device_lock ) ;
2019-07-23 20:46:27 +08:00
dev_dbg ( device , " rpm: me: runtime resume ret = %d \n " , ret ) ;
2014-03-18 22:52:02 +02:00
2016-06-16 17:58:52 +03:00
if ( ret )
schedule_work ( & dev - > reset_work ) ;
2014-03-18 22:52:02 +02:00
return ret ;
}
2014-03-18 22:52:05 +02:00
/**
2014-09-16 22:00:47 +02:00
* mei_me_set_pm_domain - fill and set pm domain structure for device
2014-03-18 22:52:05 +02:00
*
* @ dev : mei_device
*/
static inline void mei_me_set_pm_domain ( struct mei_device * dev )
{
2014-09-29 16:31:44 +03:00
struct pci_dev * pdev = to_pci_dev ( dev - > dev ) ;
2014-03-18 22:52:05 +02:00
if ( pdev - > dev . bus & & pdev - > dev . bus - > pm ) {
dev - > pg_domain . ops = * pdev - > dev . bus - > pm ;
dev - > pg_domain . ops . runtime_suspend = mei_me_pm_runtime_suspend ;
dev - > pg_domain . ops . runtime_resume = mei_me_pm_runtime_resume ;
dev - > pg_domain . ops . runtime_idle = mei_me_pm_runtime_idle ;
2016-01-07 16:46:13 +01:00
dev_pm_domain_set ( & pdev - > dev , & dev - > pg_domain ) ;
2014-03-18 22:52:05 +02:00
}
}
/**
2014-09-16 22:00:47 +02:00
* mei_me_unset_pm_domain - clean pm domain structure for device
2014-03-18 22:52:05 +02:00
*
* @ dev : mei_device
*/
static inline void mei_me_unset_pm_domain ( struct mei_device * dev )
{
/* stop using pm callbacks if any */
2016-01-07 16:46:13 +01:00
dev_pm_domain_set ( dev - > dev , NULL ) ;
2014-03-18 22:52:05 +02:00
}
2014-03-18 22:52:02 +02:00
static const struct dev_pm_ops mei_me_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS ( mei_me_pci_suspend ,
mei_me_pci_resume )
SET_RUNTIME_PM_OPS (
mei_me_pm_runtime_suspend ,
mei_me_pm_runtime_resume ,
mei_me_pm_runtime_idle )
} ;
2014-02-18 14:31:08 +02:00
2013-03-27 16:58:29 +02:00
# define MEI_ME_PM_OPS (&mei_me_pm_ops)
2013-02-06 14:06:39 +02:00
# else
2013-03-27 16:58:29 +02:00
# define MEI_ME_PM_OPS NULL
2014-03-18 22:52:02 +02:00
# endif /* CONFIG_PM */
2013-02-06 14:06:39 +02:00
/*
* PCI driver structure
*/
2013-03-27 16:58:29 +02:00
static struct pci_driver mei_me_driver = {
2013-02-06 14:06:39 +02:00
. name = KBUILD_MODNAME ,
2013-03-27 16:58:29 +02:00
. id_table = mei_me_pci_tbl ,
. probe = mei_me_probe ,
. remove = mei_me_remove ,
2017-03-20 15:04:02 +02:00
. shutdown = mei_me_shutdown ,
2013-03-27 16:58:29 +02:00
. driver . pm = MEI_ME_PM_OPS ,
2017-06-29 09:40:02 +03:00
. driver . probe_type = PROBE_PREFER_ASYNCHRONOUS ,
2013-02-06 14:06:39 +02:00
} ;
2013-03-27 16:58:29 +02:00
module_pci_driver ( mei_me_driver ) ;
2013-02-06 14:06:39 +02:00
MODULE_AUTHOR ( " Intel Corporation " ) ;
MODULE_DESCRIPTION ( " Intel(R) Management Engine Interface " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;