2013-11-12 11:46:16 -06:00
/*
* AMD Cryptographic Coprocessor ( CCP ) driver
*
2016-03-01 13:49:04 -06:00
* Copyright ( C ) 2013 , 2016 Advanced Micro Devices , Inc .
2013-11-12 11:46:16 -06:00
*
* Author : Tom Lendacky < thomas . lendacky @ amd . com >
2016-07-26 19:09:20 -05:00
* Author : Gary R Hook < gary . hook @ amd . com >
2013-11-12 11:46:16 -06:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/module.h>
# include <linux/kernel.h>
2014-06-05 10:17:45 -05:00
# include <linux/device.h>
2013-11-12 11:46:16 -06:00
# include <linux/pci.h>
# include <linux/pci_ids.h>
2014-06-05 10:17:45 -05:00
# include <linux/dma-mapping.h>
2013-11-12 11:46:16 -06:00
# include <linux/kthread.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/spinlock.h>
# include <linux/delay.h>
# include <linux/ccp.h>
# include "ccp-dev.h"
# define MSIX_VECTORS 2
struct ccp_msix {
u32 vector ;
char name [ 16 ] ;
} ;
struct ccp_pci {
int msix_count ;
struct ccp_msix msix [ MSIX_VECTORS ] ;
} ;
static int ccp_get_msix_irqs ( struct ccp_device * ccp )
{
struct ccp_pci * ccp_pci = ccp - > dev_specific ;
struct device * dev = ccp - > dev ;
2015-12-23 20:49:01 +08:00
struct pci_dev * pdev = to_pci_dev ( dev ) ;
2013-11-12 11:46:16 -06:00
struct msix_entry msix_entry [ MSIX_VECTORS ] ;
unsigned int name_len = sizeof ( ccp_pci - > msix [ 0 ] . name ) - 1 ;
int v , ret ;
for ( v = 0 ; v < ARRAY_SIZE ( msix_entry ) ; v + + )
msix_entry [ v ] . entry = v ;
2014-04-15 09:54:31 +02:00
ret = pci_enable_msix_range ( pdev , msix_entry , 1 , v ) ;
if ( ret < 0 )
2013-11-12 11:46:16 -06:00
return ret ;
2014-04-15 09:54:31 +02:00
ccp_pci - > msix_count = ret ;
2013-11-12 11:46:16 -06:00
for ( v = 0 ; v < ccp_pci - > msix_count ; v + + ) {
/* Set the interrupt names and request the irqs */
2016-03-01 13:49:04 -06:00
snprintf ( ccp_pci - > msix [ v ] . name , name_len , " %s-%u " ,
ccp - > name , v ) ;
2013-11-12 11:46:16 -06:00
ccp_pci - > msix [ v ] . vector = msix_entry [ v ] . vector ;
2016-03-01 13:49:25 -06:00
ret = request_irq ( ccp_pci - > msix [ v ] . vector ,
ccp - > vdata - > perform - > irqhandler ,
2013-11-12 11:46:16 -06:00
0 , ccp_pci - > msix [ v ] . name , dev ) ;
if ( ret ) {
dev_notice ( dev , " unable to allocate MSI-X IRQ (%d) \n " ,
ret ) ;
goto e_irq ;
}
}
return 0 ;
e_irq :
while ( v - - )
free_irq ( ccp_pci - > msix [ v ] . vector , dev ) ;
pci_disable_msix ( pdev ) ;
ccp_pci - > msix_count = 0 ;
return ret ;
}
static int ccp_get_msi_irq ( struct ccp_device * ccp )
{
struct device * dev = ccp - > dev ;
2015-12-23 20:49:01 +08:00
struct pci_dev * pdev = to_pci_dev ( dev ) ;
2013-11-12 11:46:16 -06:00
int ret ;
ret = pci_enable_msi ( pdev ) ;
if ( ret )
return ret ;
2014-06-05 10:17:45 -05:00
ccp - > irq = pdev - > irq ;
2016-03-01 13:49:25 -06:00
ret = request_irq ( ccp - > irq , ccp - > vdata - > perform - > irqhandler , 0 ,
ccp - > name , dev ) ;
2013-11-12 11:46:16 -06:00
if ( ret ) {
dev_notice ( dev , " unable to allocate MSI IRQ (%d) \n " , ret ) ;
goto e_msi ;
}
return 0 ;
e_msi :
pci_disable_msi ( pdev ) ;
return ret ;
}
static int ccp_get_irqs ( struct ccp_device * ccp )
{
struct device * dev = ccp - > dev ;
int ret ;
ret = ccp_get_msix_irqs ( ccp ) ;
if ( ! ret )
return 0 ;
/* Couldn't get MSI-X vectors, try MSI */
dev_notice ( dev , " could not enable MSI-X (%d), trying MSI \n " , ret ) ;
ret = ccp_get_msi_irq ( ccp ) ;
if ( ! ret )
return 0 ;
/* Couldn't get MSI interrupt */
dev_notice ( dev , " could not enable MSI (%d) \n " , ret ) ;
return ret ;
}
static void ccp_free_irqs ( struct ccp_device * ccp )
{
struct ccp_pci * ccp_pci = ccp - > dev_specific ;
struct device * dev = ccp - > dev ;
2015-12-23 20:49:01 +08:00
struct pci_dev * pdev = to_pci_dev ( dev ) ;
2013-11-12 11:46:16 -06:00
if ( ccp_pci - > msix_count ) {
while ( ccp_pci - > msix_count - - )
free_irq ( ccp_pci - > msix [ ccp_pci - > msix_count ] . vector ,
dev ) ;
pci_disable_msix ( pdev ) ;
2016-07-26 19:10:21 -05:00
} else if ( ccp - > irq ) {
2014-06-05 10:17:45 -05:00
free_irq ( ccp - > irq , dev ) ;
2013-11-12 11:46:16 -06:00
pci_disable_msi ( pdev ) ;
}
2016-07-26 19:10:21 -05:00
ccp - > irq = 0 ;
2013-11-12 11:46:16 -06:00
}
static int ccp_find_mmio_area ( struct ccp_device * ccp )
{
struct device * dev = ccp - > dev ;
2015-12-23 20:49:01 +08:00
struct pci_dev * pdev = to_pci_dev ( dev ) ;
2013-11-12 11:46:16 -06:00
resource_size_t io_len ;
unsigned long io_flags ;
2016-07-26 19:09:20 -05:00
io_flags = pci_resource_flags ( pdev , ccp - > vdata - > bar ) ;
io_len = pci_resource_len ( pdev , ccp - > vdata - > bar ) ;
if ( ( io_flags & IORESOURCE_MEM ) & &
( io_len > = ( ccp - > vdata - > offset + 0x800 ) ) )
return ccp - > vdata - > bar ;
2013-11-12 11:46:16 -06:00
return - EIO ;
}
static int ccp_pci_probe ( struct pci_dev * pdev , const struct pci_device_id * id )
{
struct ccp_device * ccp ;
struct ccp_pci * ccp_pci ;
struct device * dev = & pdev - > dev ;
unsigned int bar ;
int ret ;
ret = - ENOMEM ;
ccp = ccp_alloc_struct ( dev ) ;
if ( ! ccp )
goto e_err ;
2015-02-03 13:07:23 -06:00
ccp_pci = devm_kzalloc ( dev , sizeof ( * ccp_pci ) , GFP_KERNEL ) ;
if ( ! ccp_pci )
goto e_err ;
2013-11-12 11:46:16 -06:00
ccp - > dev_specific = ccp_pci ;
2016-03-01 13:49:15 -06:00
ccp - > vdata = ( struct ccp_vdata * ) id - > driver_data ;
if ( ! ccp - > vdata | | ! ccp - > vdata - > version ) {
ret = - ENODEV ;
dev_err ( dev , " missing driver data \n " ) ;
goto e_err ;
}
2013-11-12 11:46:16 -06:00
ccp - > get_irq = ccp_get_irqs ;
ccp - > free_irq = ccp_free_irqs ;
ret = pci_request_regions ( pdev , " ccp " ) ;
if ( ret ) {
dev_err ( dev , " pci_request_regions failed (%d) \n " , ret ) ;
2015-02-03 13:07:23 -06:00
goto e_err ;
2013-11-12 11:46:16 -06:00
}
ret = pci_enable_device ( pdev ) ;
if ( ret ) {
dev_err ( dev , " pci_enable_device failed (%d) \n " , ret ) ;
goto e_regions ;
}
pci_set_master ( pdev ) ;
ret = ccp_find_mmio_area ( ccp ) ;
if ( ret < 0 )
goto e_device ;
bar = ret ;
ret = - EIO ;
ccp - > io_map = pci_iomap ( pdev , bar , 0 ) ;
2015-02-03 13:07:05 -06:00
if ( ! ccp - > io_map ) {
2013-11-12 11:46:16 -06:00
dev_err ( dev , " pci_iomap failed \n " ) ;
goto e_device ;
}
2016-07-26 19:09:20 -05:00
ccp - > io_regs = ccp - > io_map + ccp - > vdata - > offset ;
2013-11-12 11:46:16 -06:00
2014-06-05 10:17:45 -05:00
ret = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK ( 48 ) ) ;
if ( ret ) {
ret = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK ( 32 ) ) ;
2013-11-12 11:46:16 -06:00
if ( ret ) {
2014-06-05 10:17:45 -05:00
dev_err ( dev , " dma_set_mask_and_coherent failed (%d) \n " ,
2013-11-12 11:46:16 -06:00
ret ) ;
2014-06-05 10:17:45 -05:00
goto e_iomap ;
2013-11-12 11:46:16 -06:00
}
}
dev_set_drvdata ( dev , ccp ) ;
2016-07-26 19:10:21 -05:00
if ( ccp - > vdata - > setup )
ccp - > vdata - > setup ( ccp ) ;
2016-07-26 19:10:49 -05:00
2016-03-01 13:49:25 -06:00
ret = ccp - > vdata - > perform - > init ( ccp ) ;
2013-11-12 11:46:16 -06:00
if ( ret )
2014-06-05 10:17:45 -05:00
goto e_iomap ;
2013-11-12 11:46:16 -06:00
dev_notice ( dev , " enabled \n " ) ;
return 0 ;
2014-06-05 10:17:45 -05:00
e_iomap :
2013-11-12 11:46:16 -06:00
pci_iounmap ( pdev , ccp - > io_map ) ;
e_device :
pci_disable_device ( pdev ) ;
e_regions :
pci_release_regions ( pdev ) ;
e_err :
dev_notice ( dev , " initialization failed \n " ) ;
return ret ;
}
static void ccp_pci_remove ( struct pci_dev * pdev )
{
struct device * dev = & pdev - > dev ;
struct ccp_device * ccp = dev_get_drvdata ( dev ) ;
2014-01-06 13:34:29 -06:00
if ( ! ccp )
return ;
2016-03-01 13:49:25 -06:00
ccp - > vdata - > perform - > destroy ( ccp ) ;
2013-11-12 11:46:16 -06:00
pci_iounmap ( pdev , ccp - > io_map ) ;
pci_disable_device ( pdev ) ;
pci_release_regions ( pdev ) ;
dev_notice ( dev , " disabled \n " ) ;
}
# ifdef CONFIG_PM
static int ccp_pci_suspend ( struct pci_dev * pdev , pm_message_t state )
{
struct device * dev = & pdev - > dev ;
struct ccp_device * ccp = dev_get_drvdata ( dev ) ;
unsigned long flags ;
unsigned int i ;
spin_lock_irqsave ( & ccp - > cmd_lock , flags ) ;
ccp - > suspending = 1 ;
/* Wake all the queue kthreads to prepare for suspend */
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + )
wake_up_process ( ccp - > cmd_q [ i ] . kthread ) ;
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
/* Wait for all queue kthreads to say they're done */
while ( ! ccp_queues_suspended ( ccp ) )
wait_event_interruptible ( ccp - > suspend_queue ,
ccp_queues_suspended ( ccp ) ) ;
return 0 ;
}
static int ccp_pci_resume ( struct pci_dev * pdev )
{
struct device * dev = & pdev - > dev ;
struct ccp_device * ccp = dev_get_drvdata ( dev ) ;
unsigned long flags ;
unsigned int i ;
spin_lock_irqsave ( & ccp - > cmd_lock , flags ) ;
ccp - > suspending = 0 ;
/* Wake up all the kthreads */
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
ccp - > cmd_q [ i ] . suspended = 0 ;
wake_up_process ( ccp - > cmd_q [ i ] . kthread ) ;
}
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
return 0 ;
}
# endif
2014-08-08 15:56:03 +02:00
static const struct pci_device_id ccp_pci_table [ ] = {
2016-03-01 13:49:15 -06:00
{ PCI_VDEVICE ( AMD , 0x1537 ) , ( kernel_ulong_t ) & ccpv3 } ,
2016-09-28 11:53:47 -05:00
{ PCI_VDEVICE ( AMD , 0x1456 ) , ( kernel_ulong_t ) & ccpv5a } ,
{ PCI_VDEVICE ( AMD , 0x1468 ) , ( kernel_ulong_t ) & ccpv5b } ,
2013-11-12 11:46:16 -06:00
/* Last entry must be zero */
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , ccp_pci_table ) ;
static struct pci_driver ccp_pci_driver = {
2015-10-01 16:32:50 -05:00
. name = " ccp " ,
2013-11-12 11:46:16 -06:00
. id_table = ccp_pci_table ,
. probe = ccp_pci_probe ,
. remove = ccp_pci_remove ,
# ifdef CONFIG_PM
. suspend = ccp_pci_suspend ,
. resume = ccp_pci_resume ,
# endif
} ;
int ccp_pci_init ( void )
{
return pci_register_driver ( & ccp_pci_driver ) ;
}
void ccp_pci_exit ( void )
{
pci_unregister_driver ( & ccp_pci_driver ) ;
}