2014-01-16 02:58:35 +04:00
/*
*
* Intel Management Engine Interface ( Intel MEI ) Linux driver
* Copyright ( c ) 2013 - 2014 , Intel Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/device.h>
# include <linux/fs.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/pci.h>
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/uuid.h>
# include <linux/jiffies.h>
# include <linux/interrupt.h>
# include <linux/workqueue.h>
2016-01-07 18:46:13 +03:00
# include <linux/pm_domain.h>
2014-03-19 00:52:03 +04:00
# include <linux/pm_runtime.h>
2014-01-16 02:58:35 +04:00
# include <linux/mei.h>
# include "mei_dev.h"
# include "hw-txe.h"
2014-03-16 16:35:58 +04:00
static const struct pci_device_id mei_txe_pci_tbl [ ] = {
2014-09-29 17:31:45 +04:00
{ PCI_VDEVICE ( INTEL , 0x0F18 ) } , /* Baytrail */
2014-11-18 01:13:21 +03:00
{ PCI_VDEVICE ( INTEL , 0x2298 ) } , /* Cherrytrail */
2014-09-29 17:31:45 +04:00
2014-01-16 02:58:35 +04:00
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , mei_txe_pci_tbl ) ;
2014-12-05 00:43:07 +03:00
# ifdef CONFIG_PM
2014-03-19 00:52:06 +04:00
static inline void mei_txe_set_pm_domain ( struct mei_device * dev ) ;
static inline void mei_txe_unset_pm_domain ( struct mei_device * dev ) ;
# else
static inline void mei_txe_set_pm_domain ( struct mei_device * dev ) { }
static inline void mei_txe_unset_pm_domain ( struct mei_device * dev ) { }
2014-12-05 00:43:07 +03:00
# endif /* CONFIG_PM */
2014-01-16 02:58:35 +04:00
static void mei_txe_pci_iounmap ( struct pci_dev * pdev , struct mei_txe_hw * hw )
{
int i ;
2014-09-29 17:31:37 +04:00
2014-01-16 02:58:35 +04:00
for ( i = SEC_BAR ; i < NUM_OF_MEM_BARS ; i + + ) {
if ( hw - > mem_addr [ i ] ) {
pci_iounmap ( pdev , hw - > mem_addr [ i ] ) ;
hw - > mem_addr [ i ] = NULL ;
}
}
}
/**
2015-02-10 11:39:35 +03:00
* mei_txe_probe - Device Initialization Routine
2014-01-16 02:58:35 +04:00
*
* @ pdev : PCI device structure
* @ ent : entry in mei_txe_pci_tbl
*
2014-09-29 17:31:49 +04:00
* Return : 0 on success , < 0 on failure .
2014-01-16 02:58:35 +04:00
*/
static int mei_txe_probe ( struct pci_dev * pdev , const struct pci_device_id * ent )
{
struct mei_device * dev ;
struct mei_txe_hw * hw ;
int err ;
int i ;
/* enable pci dev */
err = pci_enable_device ( pdev ) ;
if ( err ) {
dev_err ( & pdev - > dev , " failed to enable pci device. \n " ) ;
goto end ;
}
/* set PCI host mastering */
pci_set_master ( pdev ) ;
/* pci request regions for mei driver */
err = pci_request_regions ( pdev , KBUILD_MODNAME ) ;
if ( err ) {
dev_err ( & pdev - > dev , " failed to get pci regions. \n " ) ;
goto disable_device ;
}
err = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 36 ) ) ;
if ( err ) {
err = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
if ( err ) {
dev_err ( & pdev - > dev , " No suitable DMA available. \n " ) ;
goto release_regions ;
}
}
/* allocates and initializes the mei dev structure */
2014-09-29 17:31:45 +04:00
dev = mei_txe_dev_init ( pdev ) ;
2014-01-16 02:58:35 +04:00
if ( ! dev ) {
err = - ENOMEM ;
goto release_regions ;
}
hw = to_txe_hw ( dev ) ;
/* mapping IO device memory */
for ( i = SEC_BAR ; i < NUM_OF_MEM_BARS ; i + + ) {
hw - > mem_addr [ i ] = pci_iomap ( pdev , i , 0 ) ;
if ( ! hw - > mem_addr [ i ] ) {
dev_err ( & pdev - > dev , " mapping I/O device memory failure. \n " ) ;
err = - ENOMEM ;
goto free_device ;
}
}
pci_enable_msi ( pdev ) ;
/* clear spurious interrupts */
mei_clear_interrupts ( dev ) ;
/* request and enable interrupt */
if ( pci_dev_msi_enabled ( pdev ) )
err = request_threaded_irq ( pdev - > irq ,
NULL ,
mei_txe_irq_thread_handler ,
IRQF_ONESHOT , KBUILD_MODNAME , dev ) ;
else
err = request_threaded_irq ( pdev - > irq ,
mei_txe_irq_quick_handler ,
mei_txe_irq_thread_handler ,
IRQF_SHARED , KBUILD_MODNAME , dev ) ;
if ( err ) {
dev_err ( & pdev - > dev , " mei: request_threaded_irq failure. irq = %d \n " ,
pdev - > irq ) ;
goto free_device ;
}
if ( mei_start ( dev ) ) {
dev_err ( & pdev - > dev , " init hw failure. \n " ) ;
err = - ENODEV ;
goto release_irq ;
}
2014-03-19 00:52:03 +04:00
pm_runtime_set_autosuspend_delay ( & pdev - > dev , MEI_TXI_RPM_TIMEOUT ) ;
pm_runtime_use_autosuspend ( & pdev - > dev ) ;
2014-06-23 16:10:35 +04:00
err = mei_register ( dev , & pdev - > dev ) ;
2014-01-16 02:58:35 +04:00
if ( err )
2016-02-08 00:35:20 +03:00
goto stop ;
2014-01-16 02:58:35 +04:00
pci_set_drvdata ( pdev , dev ) ;
2014-03-19 00:52:06 +04:00
/*
* For not wake - able HW runtime pm framework
* can ' t be used on pci device level .
* Use domain runtime pm callbacks instead .
*/
if ( ! pci_dev_run_wake ( pdev ) )
mei_txe_set_pm_domain ( dev ) ;
2014-03-19 00:52:03 +04:00
pm_runtime_put_noidle ( & pdev - > dev ) ;
2014-01-16 02:58:35 +04:00
return 0 ;
2016-02-08 00:35:20 +03:00
stop :
mei_stop ( dev ) ;
2014-01-16 02:58:35 +04:00
release_irq :
mei_cancel_work ( dev ) ;
/* disable interrupts */
mei_disable_interrupts ( dev ) ;
free_irq ( pdev - > irq , dev ) ;
pci_disable_msi ( pdev ) ;
free_device :
mei_txe_pci_iounmap ( pdev , hw ) ;
kfree ( dev ) ;
release_regions :
pci_release_regions ( pdev ) ;
disable_device :
pci_disable_device ( pdev ) ;
end :
dev_err ( & pdev - > dev , " initialization failed. \n " ) ;
return err ;
}
/**
2015-02-10 11:39:35 +03:00
* mei_txe_remove - Device Removal Routine
2014-01-16 02:58:35 +04:00
*
* @ pdev : PCI device structure
*
* mei_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device .
*/
static void mei_txe_remove ( struct pci_dev * pdev )
{
struct mei_device * dev ;
struct mei_txe_hw * hw ;
dev = pci_get_drvdata ( pdev ) ;
if ( ! dev ) {
dev_err ( & pdev - > dev , " mei: dev =NULL \n " ) ;
return ;
}
2014-03-19 00:52:03 +04:00
pm_runtime_get_noresume ( & pdev - > dev ) ;
2014-01-16 02:58:35 +04:00
hw = to_txe_hw ( dev ) ;
mei_stop ( dev ) ;
2014-03-19 00:52:06 +04:00
if ( ! pci_dev_run_wake ( pdev ) )
mei_txe_unset_pm_domain ( dev ) ;
2014-01-16 02:58:35 +04:00
/* disable interrupts */
mei_disable_interrupts ( dev ) ;
free_irq ( pdev - > irq , dev ) ;
pci_disable_msi ( pdev ) ;
pci_set_drvdata ( pdev , NULL ) ;
mei_txe_pci_iounmap ( pdev , hw ) ;
mei_deregister ( dev ) ;
kfree ( dev ) ;
pci_release_regions ( pdev ) ;
pci_disable_device ( pdev ) ;
}
2014-02-18 16:31:09 +04:00
# ifdef CONFIG_PM_SLEEP
2014-01-16 02:58:35 +04:00
static int mei_txe_pci_suspend ( struct device * device )
{
struct pci_dev * pdev = to_pci_dev ( device ) ;
struct mei_device * dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return - ENODEV ;
dev_dbg ( & pdev - > dev , " suspend \n " ) ;
mei_stop ( dev ) ;
mei_disable_interrupts ( dev ) ;
free_irq ( pdev - > irq , dev ) ;
pci_disable_msi ( pdev ) ;
return 0 ;
}
static int mei_txe_pci_resume ( struct device * device )
{
struct pci_dev * pdev = to_pci_dev ( device ) ;
struct mei_device * dev ;
int err ;
dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return - ENODEV ;
pci_enable_msi ( pdev ) ;
mei_clear_interrupts ( dev ) ;
/* request and enable interrupt */
if ( pci_dev_msi_enabled ( pdev ) )
err = request_threaded_irq ( pdev - > irq ,
NULL ,
mei_txe_irq_thread_handler ,
IRQF_ONESHOT , KBUILD_MODNAME , dev ) ;
else
err = request_threaded_irq ( pdev - > irq ,
mei_txe_irq_quick_handler ,
mei_txe_irq_thread_handler ,
IRQF_SHARED , KBUILD_MODNAME , dev ) ;
if ( err ) {
dev_err ( & pdev - > dev , " request_threaded_irq failed: irq = %d. \n " ,
pdev - > irq ) ;
return err ;
}
err = mei_restart ( dev ) ;
return err ;
}
2014-03-19 00:52:03 +04:00
# endif /* CONFIG_PM_SLEEP */
2014-12-05 00:43:07 +03:00
# ifdef CONFIG_PM
2014-03-19 00:52:03 +04:00
static int mei_txe_pm_runtime_idle ( struct device * device )
{
struct pci_dev * pdev = to_pci_dev ( device ) ;
struct mei_device * dev ;
dev_dbg ( & pdev - > dev , " rpm: txe: runtime_idle \n " ) ;
dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return - ENODEV ;
if ( mei_write_is_idle ( dev ) )
2014-07-17 11:53:36 +04:00
pm_runtime_autosuspend ( device ) ;
2014-03-19 00:52:03 +04:00
return - EBUSY ;
}
static int mei_txe_pm_runtime_suspend ( struct device * device )
{
struct pci_dev * pdev = to_pci_dev ( device ) ;
struct mei_device * dev ;
int ret ;
dev_dbg ( & pdev - > dev , " rpm: txe: runtime suspend \n " ) ;
dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return - ENODEV ;
2014-01-16 02:58:35 +04:00
2014-03-19 00:52:03 +04:00
mutex_lock ( & dev - > device_lock ) ;
if ( mei_write_is_idle ( dev ) )
ret = mei_txe_aliveness_set_sync ( dev , 0 ) ;
else
ret = - EAGAIN ;
/*
* If everything is okay we ' re about to enter PCI low
* power state ( D3 ) therefor we need to disable the
* interrupts towards host .
* However if device is not wakeable we do not enter
* D - low state and we need to keep the interrupt kicking
*/
2015-05-04 09:43:59 +03:00
if ( ! ret & & pci_dev_run_wake ( pdev ) )
2014-03-19 00:52:03 +04:00
mei_disable_interrupts ( dev ) ;
dev_dbg ( & pdev - > dev , " rpm: txe: runtime suspend ret=%d \n " , ret ) ;
mutex_unlock ( & dev - > device_lock ) ;
2016-06-16 17:58:52 +03:00
if ( ret & & ret ! = - EAGAIN )
schedule_work ( & dev - > reset_work ) ;
2014-03-19 00:52:03 +04:00
return ret ;
}
static int mei_txe_pm_runtime_resume ( struct device * device )
{
struct pci_dev * pdev = to_pci_dev ( device ) ;
struct mei_device * dev ;
int ret ;
dev_dbg ( & pdev - > dev , " rpm: txe: runtime resume \n " ) ;
dev = pci_get_drvdata ( pdev ) ;
if ( ! dev )
return - ENODEV ;
mutex_lock ( & dev - > device_lock ) ;
mei_enable_interrupts ( dev ) ;
ret = mei_txe_aliveness_set_sync ( dev , 1 ) ;
mutex_unlock ( & dev - > device_lock ) ;
dev_dbg ( & pdev - > dev , " rpm: txe: runtime resume ret = %d \n " , ret ) ;
2016-06-16 17:58:52 +03:00
if ( ret )
schedule_work ( & dev - > reset_work ) ;
2014-03-19 00:52:03 +04:00
return ret ;
}
2014-03-19 00:52:06 +04:00
/**
2014-09-17 00:00:47 +04:00
* mei_txe_set_pm_domain - fill and set pm domain structure for device
2014-03-19 00:52:06 +04:00
*
* @ dev : mei_device
*/
static inline void mei_txe_set_pm_domain ( struct mei_device * dev )
{
2014-09-29 17:31:44 +04:00
struct pci_dev * pdev = to_pci_dev ( dev - > dev ) ;
2014-03-19 00:52:06 +04:00
if ( pdev - > dev . bus & & pdev - > dev . bus - > pm ) {
dev - > pg_domain . ops = * pdev - > dev . bus - > pm ;
dev - > pg_domain . ops . runtime_suspend = mei_txe_pm_runtime_suspend ;
dev - > pg_domain . ops . runtime_resume = mei_txe_pm_runtime_resume ;
dev - > pg_domain . ops . runtime_idle = mei_txe_pm_runtime_idle ;
2016-01-07 18:46:13 +03:00
dev_pm_domain_set ( & pdev - > dev , & dev - > pg_domain ) ;
2014-03-19 00:52:06 +04:00
}
}
/**
2014-09-17 00:00:47 +04:00
* mei_txe_unset_pm_domain - clean pm domain structure for device
2014-03-19 00:52:06 +04:00
*
* @ dev : mei_device
*/
static inline void mei_txe_unset_pm_domain ( struct mei_device * dev )
{
/* stop using pm callbacks if any */
2016-01-07 18:46:13 +03:00
dev_pm_domain_set ( dev - > dev , NULL ) ;
2014-03-19 00:52:06 +04:00
}
2014-03-19 00:52:03 +04:00
static const struct dev_pm_ops mei_txe_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS ( mei_txe_pci_suspend ,
mei_txe_pci_resume )
SET_RUNTIME_PM_OPS (
mei_txe_pm_runtime_suspend ,
mei_txe_pm_runtime_resume ,
mei_txe_pm_runtime_idle )
} ;
2014-01-16 02:58:35 +04:00
# define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
# else
# define MEI_TXE_PM_OPS NULL
2014-03-19 00:52:03 +04:00
# endif /* CONFIG_PM */
2014-01-16 02:58:35 +04:00
/*
* PCI driver structure
*/
static struct pci_driver mei_txe_driver = {
. name = KBUILD_MODNAME ,
. id_table = mei_txe_pci_tbl ,
. probe = mei_txe_probe ,
. remove = mei_txe_remove ,
. shutdown = mei_txe_remove ,
. driver . pm = MEI_TXE_PM_OPS ,
} ;
module_pci_driver ( mei_txe_driver ) ;
MODULE_AUTHOR ( " Intel Corporation " ) ;
MODULE_DESCRIPTION ( " Intel(R) Trusted Execution Environment Interface " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;