2019-06-01 10:08:55 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2012-08-22 16:17:43 -05:00
/*
2020-03-19 23:27:58 -04:00
* Copyright ( C ) 2012 - 2020 IBM Corporation
2012-08-22 16:17:43 -05:00
*
2014-12-04 21:01:51 -06:00
* Author : Ashley Lai < ashleydlai @ gmail . com >
2012-08-22 16:17:43 -05:00
*
* Maintained by : < tpmdd - devel @ lists . sourceforge . net >
*
* Device driver for TCG / TCPA TPM ( trusted platform module ) .
* Specifications at www . trustedcomputinggroup . org
*/
# include <linux/dma-mapping.h>
# include <linux/dmapool.h>
# include <linux/slab.h>
# include <asm/vio.h>
# include <asm/irq.h>
# include <linux/types.h>
# include <linux/list.h>
# include <linux/spinlock.h>
# include <linux/interrupt.h>
# include <linux/wait.h>
# include <asm/prom.h>
# include "tpm.h"
# include "tpm_ibmvtpm.h"
static const char tpm_ibmvtpm_driver_name [ ] = " tpm_ibmvtpm " ;
2017-08-17 23:04:21 +05:30
static const struct vio_device_id tpm_ibmvtpm_device_table [ ] = {
2012-08-22 16:17:43 -05:00
{ " IBM,vtpm " , " IBM,vtpm " } ,
2020-03-12 11:53:32 -04:00
{ " IBM,vtpm " , " IBM,vtpm20 " } ,
2012-08-22 16:17:43 -05:00
{ " " , " " }
} ;
MODULE_DEVICE_TABLE ( vio , tpm_ibmvtpm_device_table ) ;
/**
2018-10-26 21:40:43 +03:00
* ibmvtpm_send_crq_word ( ) - Send a CRQ request
2017-02-24 20:35:16 +01:00
* @ vdev : vio device struct
* @ w1 : pre - constructed first word of tpm crq ( second word is reserved )
*
* Return :
* 0 - Success
* Non - zero - Failure
*/
static int ibmvtpm_send_crq_word ( struct vio_dev * vdev , u64 w1 )
{
return plpar_hcall_norets ( H_SEND_CRQ , vdev - > unit_address , w1 , 0 ) ;
}
/**
2018-10-26 21:40:43 +03:00
* ibmvtpm_send_crq ( ) - Send a CRQ request
2016-11-23 12:04:14 +02:00
*
2012-08-22 16:17:43 -05:00
* @ vdev : vio device struct
2017-02-24 20:35:16 +01:00
* @ valid : Valid field
* @ msg : Type field
* @ len : Length field
* @ data : Data field
*
* The ibmvtpm crq is defined as follows :
*
* Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Word0 | Valid | Type | Length | Data
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Word1 | Reserved
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*
* Which matches the following structure ( on bigendian host ) :
*
* struct ibmvtpm_crq {
* u8 valid ;
* u8 msg ;
* __be16 len ;
* __be32 data ;
* __be64 reserved ;
* } __attribute__ ( ( packed , aligned ( 8 ) ) ) ;
*
* However , the value is passed in a register so just compute the numeric value
* to load into the register avoiding byteswap altogether . Endian only affects
* memory loads and stores - registers are internally represented the same .
2012-08-22 16:17:43 -05:00
*
2016-11-23 12:04:14 +02:00
* Return :
2017-02-24 20:35:16 +01:00
* 0 ( H_SUCCESS ) - Success
2012-08-22 16:17:43 -05:00
* Non - zero - Failure
*/
2017-02-24 20:35:16 +01:00
static int ibmvtpm_send_crq ( struct vio_dev * vdev ,
u8 valid , u8 msg , u16 len , u32 data )
2012-08-22 16:17:43 -05:00
{
2017-02-24 20:35:16 +01:00
u64 w1 = ( ( u64 ) valid < < 56 ) | ( ( u64 ) msg < < 48 ) | ( ( u64 ) len < < 32 ) |
( u64 ) data ;
return ibmvtpm_send_crq_word ( vdev , w1 ) ;
2012-08-22 16:17:43 -05:00
}
/**
* tpm_ibmvtpm_recv - Receive data after send
2016-11-23 12:04:14 +02:00
*
2012-08-22 16:17:43 -05:00
* @ chip : tpm chip struct
* @ buf : buffer to read
2016-11-23 12:04:14 +02:00
* @ count : size of buffer
2012-08-22 16:17:43 -05:00
*
2016-11-23 12:04:14 +02:00
* Return :
2012-08-22 16:17:43 -05:00
* Number of bytes read
*/
static int tpm_ibmvtpm_recv ( struct tpm_chip * chip , u8 * buf , size_t count )
{
2016-03-31 22:57:00 +02:00
struct ibmvtpm_dev * ibmvtpm = dev_get_drvdata ( & chip - > dev ) ;
2012-08-22 16:17:43 -05:00
u16 len ;
if ( ! ibmvtpm - > rtce_buf ) {
dev_err ( ibmvtpm - > dev , " ibmvtpm device is not ready \n " ) ;
return 0 ;
}
2012-09-12 12:49:50 -05:00
len = ibmvtpm - > res_len ;
2012-08-22 16:17:43 -05:00
2012-09-12 12:49:50 -05:00
if ( count < len ) {
2012-08-22 16:17:43 -05:00
dev_err ( ibmvtpm - > dev ,
2013-09-14 16:57:58 -06:00
" Invalid size in recv: count=%zd, crq_size=%d \n " ,
2012-09-12 12:49:50 -05:00
count , len ) ;
2012-08-22 16:17:43 -05:00
return - EIO ;
}
spin_lock ( & ibmvtpm - > rtce_lock ) ;
2012-09-12 12:49:50 -05:00
memcpy ( ( void * ) buf , ( void * ) ibmvtpm - > rtce_buf , len ) ;
memset ( ibmvtpm - > rtce_buf , 0 , len ) ;
ibmvtpm - > res_len = 0 ;
2012-08-22 16:17:43 -05:00
spin_unlock ( & ibmvtpm - > rtce_lock ) ;
return len ;
}
2020-03-19 23:27:58 -04:00
/**
* ibmvtpm_crq_send_init - Send a CRQ initialize message
* @ ibmvtpm : vtpm device struct
*
* Return :
* 0 on success .
* Non - zero on failure .
*/
static int ibmvtpm_crq_send_init ( struct ibmvtpm_dev * ibmvtpm )
{
int rc ;
rc = ibmvtpm_send_crq_word ( ibmvtpm - > vdev , INIT_CRQ_CMD ) ;
if ( rc ! = H_SUCCESS )
dev_err ( ibmvtpm - > dev ,
" %s failed rc=%d \n " , __func__ , rc ) ;
return rc ;
}
/**
* tpm_ibmvtpm_resume - Resume from suspend
*
* @ dev : device struct
*
* Return : Always 0.
*/
static int tpm_ibmvtpm_resume ( struct device * dev )
{
struct tpm_chip * chip = dev_get_drvdata ( dev ) ;
struct ibmvtpm_dev * ibmvtpm = dev_get_drvdata ( & chip - > dev ) ;
int rc = 0 ;
do {
if ( rc )
msleep ( 100 ) ;
rc = plpar_hcall_norets ( H_ENABLE_CRQ ,
ibmvtpm - > vdev - > unit_address ) ;
} while ( rc = = H_IN_PROGRESS | | rc = = H_BUSY | | H_IS_LONG_BUSY ( rc ) ) ;
if ( rc ) {
dev_err ( dev , " Error enabling ibmvtpm rc=%d \n " , rc ) ;
return rc ;
}
rc = vio_enable_interrupts ( ibmvtpm - > vdev ) ;
if ( rc ) {
dev_err ( dev , " Error vio_enable_interrupts rc=%d \n " , rc ) ;
return rc ;
}
rc = ibmvtpm_crq_send_init ( ibmvtpm ) ;
if ( rc )
dev_err ( dev , " Error send_init rc=%d \n " , rc ) ;
return rc ;
}
2012-08-22 16:17:43 -05:00
/**
2019-02-08 18:30:58 +02:00
* tpm_ibmvtpm_send ( ) - Send a TPM command
2012-08-22 16:17:43 -05:00
* @ chip : tpm chip struct
* @ buf : buffer contains data to send
2016-11-23 12:04:14 +02:00
* @ count : size of buffer
2012-08-22 16:17:43 -05:00
*
2016-11-23 12:04:14 +02:00
* Return :
2019-02-08 18:30:58 +02:00
* 0 on success ,
* - errno on error
2012-08-22 16:17:43 -05:00
*/
static int tpm_ibmvtpm_send ( struct tpm_chip * chip , u8 * buf , size_t count )
{
2016-03-31 22:57:00 +02:00
struct ibmvtpm_dev * ibmvtpm = dev_get_drvdata ( & chip - > dev ) ;
2020-03-19 23:27:58 -04:00
bool retry = true ;
2015-12-09 08:52:01 -05:00
int rc , sig ;
2012-08-22 16:17:43 -05:00
if ( ! ibmvtpm - > rtce_buf ) {
dev_err ( ibmvtpm - > dev , " ibmvtpm device is not ready \n " ) ;
return 0 ;
}
if ( count > ibmvtpm - > rtce_size ) {
dev_err ( ibmvtpm - > dev ,
2013-09-14 16:57:58 -06:00
" Invalid size in send: count=%zd, rtce_size=%d \n " ,
2012-08-22 16:17:43 -05:00
count , ibmvtpm - > rtce_size ) ;
return - EIO ;
}
2015-12-09 08:52:01 -05:00
if ( ibmvtpm - > tpm_processing_cmd ) {
dev_info ( ibmvtpm - > dev ,
" Need to wait for TPM to finish \n " ) ;
/* wait for previous command to finish */
sig = wait_event_interruptible ( ibmvtpm - > wq , ! ibmvtpm - > tpm_processing_cmd ) ;
if ( sig )
return - EINTR ;
}
2012-08-22 16:17:43 -05:00
spin_lock ( & ibmvtpm - > rtce_lock ) ;
2015-12-09 08:52:01 -05:00
ibmvtpm - > res_len = 0 ;
2012-08-22 16:17:43 -05:00
memcpy ( ( void * ) ibmvtpm - > rtce_buf , ( void * ) buf , count ) ;
2015-12-09 08:52:01 -05:00
/*
* set the processing flag before the Hcall , since we may get the
* result ( interrupt ) before even being able to check rc .
*/
2021-08-12 22:45:48 +03:00
ibmvtpm - > tpm_processing_cmd = 1 ;
2015-12-09 08:52:01 -05:00
2020-03-19 23:27:58 -04:00
again :
2017-02-24 20:35:16 +01:00
rc = ibmvtpm_send_crq ( ibmvtpm - > vdev ,
IBMVTPM_VALID_CMD , VTPM_TPM_COMMAND ,
count , ibmvtpm - > rtce_dma_handle ) ;
2012-08-22 16:17:43 -05:00
if ( rc ! = H_SUCCESS ) {
2020-03-19 23:27:58 -04:00
/*
* H_CLOSED can be returned after LPM resume . Call
* tpm_ibmvtpm_resume ( ) to re - enable the CRQ then retry
* ibmvtpm_send_crq ( ) once before failing .
*/
if ( rc = = H_CLOSED & & retry ) {
tpm_ibmvtpm_resume ( ibmvtpm - > dev ) ;
retry = false ;
goto again ;
}
2012-08-22 16:17:43 -05:00
dev_err ( ibmvtpm - > dev , " tpm_ibmvtpm_send failed rc=%d \n " , rc ) ;
2021-08-12 22:45:48 +03:00
ibmvtpm - > tpm_processing_cmd = 0 ;
2020-03-19 23:27:58 -04:00
}
2012-08-22 16:17:43 -05:00
spin_unlock ( & ibmvtpm - > rtce_lock ) ;
2020-03-19 23:27:58 -04:00
return 0 ;
2012-08-22 16:17:43 -05:00
}
static void tpm_ibmvtpm_cancel ( struct tpm_chip * chip )
{
return ;
}
static u8 tpm_ibmvtpm_status ( struct tpm_chip * chip )
{
2021-08-12 22:45:48 +03:00
struct ibmvtpm_dev * ibmvtpm = dev_get_drvdata ( & chip - > dev ) ;
return ibmvtpm - > tpm_processing_cmd ;
2012-08-22 16:17:43 -05:00
}
/**
* ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
2016-11-23 12:04:14 +02:00
*
2012-08-22 16:17:43 -05:00
* @ ibmvtpm : vtpm device struct
*
2016-11-23 12:04:14 +02:00
* Return :
* 0 on success .
* Non - zero on failure .
2012-08-22 16:17:43 -05:00
*/
static int ibmvtpm_crq_get_rtce_size ( struct ibmvtpm_dev * ibmvtpm )
{
int rc ;
2017-02-24 20:35:16 +01:00
rc = ibmvtpm_send_crq ( ibmvtpm - > vdev ,
IBMVTPM_VALID_CMD , VTPM_GET_RTCE_BUFFER_SIZE , 0 , 0 ) ;
2012-08-22 16:17:43 -05:00
if ( rc ! = H_SUCCESS )
dev_err ( ibmvtpm - > dev ,
" ibmvtpm_crq_get_rtce_size failed rc=%d \n " , rc ) ;
return rc ;
}
/**
* ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
* - Note that this is vtpm version and not tpm version
2016-11-23 12:04:14 +02:00
*
2012-08-22 16:17:43 -05:00
* @ ibmvtpm : vtpm device struct
*
2016-11-23 12:04:14 +02:00
* Return :
* 0 on success .
* Non - zero on failure .
2012-08-22 16:17:43 -05:00
*/
static int ibmvtpm_crq_get_version ( struct ibmvtpm_dev * ibmvtpm )
{
int rc ;
2017-02-24 20:35:16 +01:00
rc = ibmvtpm_send_crq ( ibmvtpm - > vdev ,
IBMVTPM_VALID_CMD , VTPM_GET_VERSION , 0 , 0 ) ;
2012-08-22 16:17:43 -05:00
if ( rc ! = H_SUCCESS )
dev_err ( ibmvtpm - > dev ,
" ibmvtpm_crq_get_version failed rc=%d \n " , rc ) ;
return rc ;
}
/**
* ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
* @ ibmvtpm : vtpm device struct
*
2016-11-23 12:04:14 +02:00
* Return :
* 0 on success .
* Non - zero on failure .
2012-08-22 16:17:43 -05:00
*/
static int ibmvtpm_crq_send_init_complete ( struct ibmvtpm_dev * ibmvtpm )
{
int rc ;
2017-02-24 20:35:16 +01:00
rc = ibmvtpm_send_crq_word ( ibmvtpm - > vdev , INIT_CRQ_COMP_CMD ) ;
2012-08-22 16:17:43 -05:00
if ( rc ! = H_SUCCESS )
dev_err ( ibmvtpm - > dev ,
" ibmvtpm_crq_send_init_complete failed rc=%d \n " , rc ) ;
return rc ;
}
/**
* tpm_ibmvtpm_remove - ibm vtpm remove entry point
* @ vdev : vio device struct
*
2016-11-23 12:04:14 +02:00
* Return : Always 0.
2012-08-22 16:17:43 -05:00
*/
2021-02-25 23:18:34 +01:00
static void tpm_ibmvtpm_remove ( struct vio_dev * vdev )
2012-08-22 16:17:43 -05:00
{
2016-03-31 22:57:00 +02:00
struct tpm_chip * chip = dev_get_drvdata ( & vdev - > dev ) ;
struct ibmvtpm_dev * ibmvtpm = dev_get_drvdata ( & chip - > dev ) ;
2012-08-22 16:17:43 -05:00
int rc = 0 ;
2014-12-12 11:46:34 -08:00
tpm_chip_unregister ( chip ) ;
2012-08-22 16:17:43 -05:00
free_irq ( vdev - > irq , ibmvtpm ) ;
do {
if ( rc )
msleep ( 100 ) ;
rc = plpar_hcall_norets ( H_FREE_CRQ , vdev - > unit_address ) ;
} while ( rc = = H_BUSY | | H_IS_LONG_BUSY ( rc ) ) ;
dma_unmap_single ( ibmvtpm - > dev , ibmvtpm - > crq_dma_handle ,
CRQ_RES_BUF_SIZE , DMA_BIDIRECTIONAL ) ;
free_page ( ( unsigned long ) ibmvtpm - > crq_queue . crq_addr ) ;
if ( ibmvtpm - > rtce_buf ) {
dma_unmap_single ( ibmvtpm - > dev , ibmvtpm - > rtce_dma_handle ,
ibmvtpm - > rtce_size , DMA_BIDIRECTIONAL ) ;
kfree ( ibmvtpm - > rtce_buf ) ;
}
kfree ( ibmvtpm ) ;
2017-03-15 01:28:07 -04:00
/* For tpm_ibmvtpm_get_desired_dma */
dev_set_drvdata ( & vdev - > dev , NULL ) ;
2012-08-22 16:17:43 -05:00
}
/**
* tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
* @ vdev : vio device struct
*
2016-11-23 12:04:14 +02:00
* Return :
* Number of bytes the driver needs to DMA map .
2012-08-22 16:17:43 -05:00
*/
static unsigned long tpm_ibmvtpm_get_desired_dma ( struct vio_dev * vdev )
{
2016-03-31 22:57:00 +02:00
struct tpm_chip * chip = dev_get_drvdata ( & vdev - > dev ) ;
2017-03-15 01:28:07 -04:00
struct ibmvtpm_dev * ibmvtpm ;
2014-11-30 15:01:28 +01:00
2016-11-23 12:04:14 +02:00
/*
* ibmvtpm initializes at probe time , so the data we are
* asking for may not be set yet . Estimate that 4 K required
* for TCE - mapped buffer in addition to CRQ .
*/
2017-03-15 01:28:07 -04:00
if ( chip )
ibmvtpm = dev_get_drvdata ( & chip - > dev ) ;
else
2014-11-30 15:01:28 +01:00
return CRQ_RES_BUF_SIZE + PAGE_SIZE ;
2012-08-22 16:17:43 -05:00
return CRQ_RES_BUF_SIZE + ibmvtpm - > rtce_size ;
}
/**
* tpm_ibmvtpm_suspend - Suspend
* @ dev : device struct
*
2016-11-23 12:04:14 +02:00
* Return : Always 0.
2012-08-22 16:17:43 -05:00
*/
static int tpm_ibmvtpm_suspend ( struct device * dev )
{
2016-03-31 22:57:00 +02:00
struct tpm_chip * chip = dev_get_drvdata ( dev ) ;
struct ibmvtpm_dev * ibmvtpm = dev_get_drvdata ( & chip - > dev ) ;
2012-08-22 16:17:43 -05:00
int rc = 0 ;
2017-02-24 20:35:16 +01:00
rc = ibmvtpm_send_crq ( ibmvtpm - > vdev ,
IBMVTPM_VALID_CMD , VTPM_PREPARE_TO_SUSPEND , 0 , 0 ) ;
2012-08-22 16:17:43 -05:00
if ( rc ! = H_SUCCESS )
dev_err ( ibmvtpm - > dev ,
" tpm_ibmvtpm_suspend failed rc=%d \n " , rc ) ;
return rc ;
}
/**
* ibmvtpm_reset_crq - Reset CRQ
2016-11-23 12:04:14 +02:00
*
2012-08-22 16:17:43 -05:00
* @ ibmvtpm : ibm vtpm struct
*
2016-11-23 12:04:14 +02:00
* Return :
* 0 on success .
* Non - zero on failure .
2012-08-22 16:17:43 -05:00
*/
static int ibmvtpm_reset_crq ( struct ibmvtpm_dev * ibmvtpm )
{
int rc = 0 ;
do {
if ( rc )
msleep ( 100 ) ;
rc = plpar_hcall_norets ( H_FREE_CRQ ,
ibmvtpm - > vdev - > unit_address ) ;
} while ( rc = = H_BUSY | | H_IS_LONG_BUSY ( rc ) ) ;
memset ( ibmvtpm - > crq_queue . crq_addr , 0 , CRQ_RES_BUF_SIZE ) ;
ibmvtpm - > crq_queue . index = 0 ;
return plpar_hcall_norets ( H_REG_CRQ , ibmvtpm - > vdev - > unit_address ,
ibmvtpm - > crq_dma_handle , CRQ_RES_BUF_SIZE ) ;
}
2013-01-22 13:52:35 -06:00
static bool tpm_ibmvtpm_req_canceled ( struct tpm_chip * chip , u8 status )
{
return ( status = = 0 ) ;
}
2013-11-26 13:30:43 -07:00
static const struct tpm_class_ops tpm_ibmvtpm = {
2012-08-22 16:17:43 -05:00
. recv = tpm_ibmvtpm_recv ,
. send = tpm_ibmvtpm_send ,
. cancel = tpm_ibmvtpm_cancel ,
. status = tpm_ibmvtpm_status ,
2021-08-12 22:45:48 +03:00
. req_complete_mask = 1 ,
2012-08-22 16:17:43 -05:00
. req_complete_val = 0 ,
2013-01-22 13:52:35 -06:00
. req_canceled = tpm_ibmvtpm_req_canceled ,
2012-08-22 16:17:43 -05:00
} ;
static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
. suspend = tpm_ibmvtpm_suspend ,
. resume = tpm_ibmvtpm_resume ,
} ;
/**
* ibmvtpm_crq_get_next - Get next responded crq
*
2016-11-23 12:04:14 +02:00
* @ ibmvtpm : vtpm device struct
*
* Return : vtpm crq pointer or NULL .
2012-08-22 16:17:43 -05:00
*/
static struct ibmvtpm_crq * ibmvtpm_crq_get_next ( struct ibmvtpm_dev * ibmvtpm )
{
struct ibmvtpm_crq_queue * crq_q = & ibmvtpm - > crq_queue ;
struct ibmvtpm_crq * crq = & crq_q - > crq_addr [ crq_q - > index ] ;
if ( crq - > valid & VTPM_MSG_RES ) {
if ( + + crq_q - > index = = crq_q - > num_entry )
crq_q - > index = 0 ;
2012-09-12 12:49:50 -05:00
smp_rmb ( ) ;
2012-08-22 16:17:43 -05:00
} else
crq = NULL ;
return crq ;
}
/**
* ibmvtpm_crq_process - Process responded crq
*
2016-11-23 12:04:14 +02:00
* @ crq : crq to be processed
* @ ibmvtpm : vtpm device struct
*
2012-08-22 16:17:43 -05:00
*/
static void ibmvtpm_crq_process ( struct ibmvtpm_crq * crq ,
struct ibmvtpm_dev * ibmvtpm )
{
int rc = 0 ;
switch ( crq - > valid ) {
case VALID_INIT_CRQ :
switch ( crq - > msg ) {
case INIT_CRQ_RES :
dev_info ( ibmvtpm - > dev , " CRQ initialized \n " ) ;
rc = ibmvtpm_crq_send_init_complete ( ibmvtpm ) ;
if ( rc )
dev_err ( ibmvtpm - > dev , " Unable to send CRQ init complete rc=%d \n " , rc ) ;
return ;
case INIT_CRQ_COMP_RES :
dev_info ( ibmvtpm - > dev ,
" CRQ initialization completed \n " ) ;
return ;
default :
dev_err ( ibmvtpm - > dev , " Unknown crq message type: %d \n " , crq - > msg ) ;
return ;
}
case IBMVTPM_VALID_CMD :
switch ( crq - > msg ) {
case VTPM_GET_RTCE_BUFFER_SIZE_RES :
2015-02-12 21:02:24 -05:00
if ( be16_to_cpu ( crq - > len ) < = 0 ) {
2012-08-22 16:17:43 -05:00
dev_err ( ibmvtpm - > dev , " Invalid rtce size \n " ) ;
return ;
}
2015-02-12 21:02:24 -05:00
ibmvtpm - > rtce_size = be16_to_cpu ( crq - > len ) ;
2012-08-22 16:17:43 -05:00
ibmvtpm - > rtce_buf = kmalloc ( ibmvtpm - > rtce_size ,
2015-10-07 20:11:51 -04:00
GFP_ATOMIC ) ;
2012-08-22 16:17:43 -05:00
if ( ! ibmvtpm - > rtce_buf ) {
dev_err ( ibmvtpm - > dev , " Failed to allocate memory for rtce buffer \n " ) ;
return ;
}
ibmvtpm - > rtce_dma_handle = dma_map_single ( ibmvtpm - > dev ,
ibmvtpm - > rtce_buf , ibmvtpm - > rtce_size ,
DMA_BIDIRECTIONAL ) ;
if ( dma_mapping_error ( ibmvtpm - > dev ,
ibmvtpm - > rtce_dma_handle ) ) {
kfree ( ibmvtpm - > rtce_buf ) ;
ibmvtpm - > rtce_buf = NULL ;
dev_err ( ibmvtpm - > dev , " Failed to dma map rtce buffer \n " ) ;
}
return ;
case VTPM_GET_VERSION_RES :
2015-02-12 21:02:24 -05:00
ibmvtpm - > vtpm_version = be32_to_cpu ( crq - > data ) ;
2012-08-22 16:17:43 -05:00
return ;
case VTPM_TPM_COMMAND_RES :
2012-09-12 12:49:50 -05:00
/* len of the data in rtce buffer */
2015-02-12 21:02:24 -05:00
ibmvtpm - > res_len = be16_to_cpu ( crq - > len ) ;
2021-08-12 22:45:48 +03:00
ibmvtpm - > tpm_processing_cmd = 0 ;
2012-09-12 12:49:50 -05:00
wake_up_interruptible ( & ibmvtpm - > wq ) ;
2012-08-22 16:17:43 -05:00
return ;
default :
return ;
}
}
return ;
}
/**
* ibmvtpm_interrupt - Interrupt handler
2016-11-23 12:04:14 +02:00
*
2012-08-22 16:17:43 -05:00
* @ irq : irq number to handle
* @ vtpm_instance : vtpm that received interrupt
*
* Returns :
* IRQ_HANDLED
* */
static irqreturn_t ibmvtpm_interrupt ( int irq , void * vtpm_instance )
{
struct ibmvtpm_dev * ibmvtpm = ( struct ibmvtpm_dev * ) vtpm_instance ;
struct ibmvtpm_crq * crq ;
2012-09-12 12:49:50 -05:00
/* while loop is needed for initial setup (get version and
* get rtce_size ) . There should be only one tpm request at any
* given time .
*/
2012-08-22 16:17:43 -05:00
while ( ( crq = ibmvtpm_crq_get_next ( ibmvtpm ) ) ! = NULL ) {
ibmvtpm_crq_process ( crq , ibmvtpm ) ;
2020-03-12 11:53:31 -04:00
wake_up_interruptible ( & ibmvtpm - > crq_queue . wq ) ;
2012-08-22 16:17:43 -05:00
crq - > valid = 0 ;
2012-09-12 12:49:50 -05:00
smp_wmb ( ) ;
2012-08-22 16:17:43 -05:00
}
2012-09-12 12:49:50 -05:00
return IRQ_HANDLED ;
2012-08-22 16:17:43 -05:00
}
/**
* tpm_ibmvtpm_probe - ibm vtpm initialize entry point
2016-11-23 12:04:14 +02:00
*
2012-08-22 16:17:43 -05:00
* @ vio_dev : vio device struct
* @ id : vio device id struct
*
2016-11-23 12:04:14 +02:00
* Return :
* 0 on success .
* Non - zero on failure .
2012-08-22 16:17:43 -05:00
*/
2012-11-19 13:22:42 -05:00
static int tpm_ibmvtpm_probe ( struct vio_dev * vio_dev ,
2012-08-22 16:17:43 -05:00
const struct vio_device_id * id )
{
struct ibmvtpm_dev * ibmvtpm ;
struct device * dev = & vio_dev - > dev ;
struct ibmvtpm_crq_queue * crq_q ;
struct tpm_chip * chip ;
int rc = - ENOMEM , rc1 ;
2014-12-12 11:46:34 -08:00
chip = tpmm_chip_alloc ( dev , & tpm_ibmvtpm ) ;
if ( IS_ERR ( chip ) )
return PTR_ERR ( chip ) ;
2012-08-22 16:17:43 -05:00
ibmvtpm = kzalloc ( sizeof ( struct ibmvtpm_dev ) , GFP_KERNEL ) ;
if ( ! ibmvtpm ) {
dev_err ( dev , " kzalloc for ibmvtpm failed \n " ) ;
goto cleanup ;
}
2015-05-22 13:23:02 -04:00
ibmvtpm - > dev = dev ;
ibmvtpm - > vdev = vio_dev ;
2012-08-22 16:17:43 -05:00
crq_q = & ibmvtpm - > crq_queue ;
crq_q - > crq_addr = ( struct ibmvtpm_crq * ) get_zeroed_page ( GFP_KERNEL ) ;
if ( ! crq_q - > crq_addr ) {
dev_err ( dev , " Unable to allocate memory for crq_addr \n " ) ;
goto cleanup ;
}
crq_q - > num_entry = CRQ_RES_BUF_SIZE / sizeof ( * crq_q - > crq_addr ) ;
2020-03-12 11:53:31 -04:00
init_waitqueue_head ( & crq_q - > wq ) ;
2012-08-22 16:17:43 -05:00
ibmvtpm - > crq_dma_handle = dma_map_single ( dev , crq_q - > crq_addr ,
CRQ_RES_BUF_SIZE ,
DMA_BIDIRECTIONAL ) ;
if ( dma_mapping_error ( dev , ibmvtpm - > crq_dma_handle ) ) {
dev_err ( dev , " dma mapping failed \n " ) ;
goto cleanup ;
}
rc = plpar_hcall_norets ( H_REG_CRQ , vio_dev - > unit_address ,
ibmvtpm - > crq_dma_handle , CRQ_RES_BUF_SIZE ) ;
if ( rc = = H_RESOURCE )
rc = ibmvtpm_reset_crq ( ibmvtpm ) ;
if ( rc ) {
dev_err ( dev , " Unable to register CRQ rc=%d \n " , rc ) ;
goto reg_crq_cleanup ;
}
rc = request_irq ( vio_dev - > irq , ibmvtpm_interrupt , 0 ,
tpm_ibmvtpm_driver_name , ibmvtpm ) ;
if ( rc ) {
dev_err ( dev , " Error %d register irq 0x%x \n " , rc , vio_dev - > irq ) ;
goto init_irq_cleanup ;
}
rc = vio_enable_interrupts ( vio_dev ) ;
if ( rc ) {
dev_err ( dev , " Error %d enabling interrupts \n " , rc ) ;
goto init_irq_cleanup ;
}
2012-09-12 12:49:50 -05:00
init_waitqueue_head ( & ibmvtpm - > wq ) ;
2012-08-22 16:17:43 -05:00
crq_q - > index = 0 ;
2016-04-28 15:27:17 +10:00
dev_set_drvdata ( & chip - > dev , ibmvtpm ) ;
2012-08-22 16:17:43 -05:00
spin_lock_init ( & ibmvtpm - > rtce_lock ) ;
rc = ibmvtpm_crq_send_init ( ibmvtpm ) ;
if ( rc )
goto init_irq_cleanup ;
rc = ibmvtpm_crq_get_version ( ibmvtpm ) ;
if ( rc )
goto init_irq_cleanup ;
rc = ibmvtpm_crq_get_rtce_size ( ibmvtpm ) ;
if ( rc )
goto init_irq_cleanup ;
2020-03-12 11:53:31 -04:00
if ( ! wait_event_timeout ( ibmvtpm - > crq_queue . wq ,
ibmvtpm - > rtce_buf ! = NULL ,
HZ ) ) {
2022-03-18 14:02:01 +08:00
rc = - ENODEV ;
2020-03-12 11:53:31 -04:00
dev_err ( dev , " CRQ response timed out \n " ) ;
goto init_irq_cleanup ;
}
2021-08-12 22:45:48 +03:00
if ( ! strcmp ( id - > compat , " IBM,vtpm20 " ) )
2020-06-19 13:30:40 +10:00
chip - > flags | = TPM_CHIP_FLAG_TPM2 ;
2021-08-12 22:45:48 +03:00
rc = tpm_get_timeouts ( chip ) ;
if ( rc )
goto init_irq_cleanup ;
if ( chip - > flags & TPM_CHIP_FLAG_TPM2 ) {
2020-06-19 13:30:40 +10:00
rc = tpm2_get_cc_attrs_tbl ( chip ) ;
if ( rc )
goto init_irq_cleanup ;
}
2014-12-12 11:46:34 -08:00
return tpm_chip_register ( chip ) ;
2012-08-22 16:17:43 -05:00
init_irq_cleanup :
do {
rc1 = plpar_hcall_norets ( H_FREE_CRQ , vio_dev - > unit_address ) ;
} while ( rc1 = = H_BUSY | | H_IS_LONG_BUSY ( rc1 ) ) ;
reg_crq_cleanup :
dma_unmap_single ( dev , ibmvtpm - > crq_dma_handle , CRQ_RES_BUF_SIZE ,
DMA_BIDIRECTIONAL ) ;
cleanup :
if ( ibmvtpm ) {
if ( crq_q - > crq_addr )
free_page ( ( unsigned long ) crq_q - > crq_addr ) ;
kfree ( ibmvtpm ) ;
}
return rc ;
}
static struct vio_driver ibmvtpm_driver = {
. id_table = tpm_ibmvtpm_device_table ,
. probe = tpm_ibmvtpm_probe ,
. remove = tpm_ibmvtpm_remove ,
. get_desired_dma = tpm_ibmvtpm_get_desired_dma ,
. name = tpm_ibmvtpm_driver_name ,
. pm = & tpm_ibmvtpm_pm_ops ,
} ;
/**
2016-11-23 12:04:14 +02:00
* ibmvtpm_module_init - Initialize ibm vtpm module .
2012-08-22 16:17:43 -05:00
*
2016-11-23 12:04:14 +02:00
*
* Return :
* 0 on success .
* Non - zero on failure .
2012-08-22 16:17:43 -05:00
*/
static int __init ibmvtpm_module_init ( void )
{
return vio_register_driver ( & ibmvtpm_driver ) ;
}
/**
2016-11-23 12:04:14 +02:00
* ibmvtpm_module_exit - Tear down ibm vtpm module .
2012-08-22 16:17:43 -05:00
*/
static void __exit ibmvtpm_module_exit ( void )
{
vio_unregister_driver ( & ibmvtpm_driver ) ;
}
module_init ( ibmvtpm_module_init ) ;
module_exit ( ibmvtpm_module_exit ) ;
MODULE_AUTHOR ( " adlai@us.ibm.com " ) ;
MODULE_DESCRIPTION ( " IBM vTPM Driver " ) ;
MODULE_VERSION ( " 1.0 " ) ;
MODULE_LICENSE ( " GPL " ) ;