2015-04-10 23:15:59 +03:00
/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
2015-03-12 00:28:10 +03:00
* Copyright ( C ) 2015 Linaro Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA .
*/
# include <linux/slab.h>
# include <linux/io.h>
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/errno.h>
# include <linux/err.h>
# include <linux/qcom_scm.h>
2016-06-04 02:25:25 +03:00
# include <linux/dma-mapping.h>
2015-03-12 00:28:10 +03:00
# include "qcom_scm.h"
# define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
# define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
# define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
# define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
# define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
# define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
# define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
# define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
struct qcom_scm_entry {
int flag ;
void * entry ;
} ;
static struct qcom_scm_entry qcom_scm_wb [ ] = {
{ . flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 } ,
{ . flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 } ,
{ . flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 } ,
{ . flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 } ,
} ;
static DEFINE_MUTEX ( qcom_scm_lock ) ;
/**
* struct qcom_scm_command - one SCM command buffer
* @ len : total available memory for command and response
* @ buf_offset : start of command buffer
* @ resp_hdr_offset : start of response buffer
* @ id : command to be executed
* @ buf : buffer returned from qcom_scm_get_command_buffer ( )
*
* An SCM command is laid out in memory as follows :
*
* - - - - - - - - - - - - - - - - - - - < - - - struct qcom_scm_command
* | command header |
* - - - - - - - - - - - - - - - - - - - < - - - qcom_scm_get_command_buffer ( )
* | command buffer |
* - - - - - - - - - - - - - - - - - - - < - - - struct qcom_scm_response and
* | response header | qcom_scm_command_to_response ( )
* - - - - - - - - - - - - - - - - - - - < - - - qcom_scm_get_response_buffer ( )
* | response buffer |
* - - - - - - - - - - - - - - - - - - -
*
* There can be arbitrary padding between the headers and buffers so
* you should always use the appropriate qcom_scm_get_ * _buffer ( ) routines
* to access the buffers in a safe manner .
*/
struct qcom_scm_command {
__le32 len ;
__le32 buf_offset ;
__le32 resp_hdr_offset ;
__le32 id ;
__le32 buf [ 0 ] ;
} ;
/**
* struct qcom_scm_response - one SCM response buffer
* @ len : total available memory for response
* @ buf_offset : start of response data relative to start of qcom_scm_response
* @ is_complete : indicates if the command has finished processing
*/
struct qcom_scm_response {
__le32 len ;
__le32 buf_offset ;
__le32 is_complete ;
} ;
/**
* qcom_scm_command_to_response ( ) - Get a pointer to a qcom_scm_response
* @ cmd : command
*
* Returns a pointer to a response for a command .
*/
static inline struct qcom_scm_response * qcom_scm_command_to_response (
const struct qcom_scm_command * cmd )
{
return ( void * ) cmd + le32_to_cpu ( cmd - > resp_hdr_offset ) ;
}
/**
* qcom_scm_get_command_buffer ( ) - Get a pointer to a command buffer
* @ cmd : command
*
* Returns a pointer to the command buffer of a command .
*/
static inline void * qcom_scm_get_command_buffer ( const struct qcom_scm_command * cmd )
{
return ( void * ) cmd - > buf ;
}
/**
* qcom_scm_get_response_buffer ( ) - Get a pointer to a response buffer
* @ rsp : response
*
* Returns a pointer to a response buffer of a response .
*/
static inline void * qcom_scm_get_response_buffer ( const struct qcom_scm_response * rsp )
{
return ( void * ) rsp + le32_to_cpu ( rsp - > buf_offset ) ;
}
static u32 smc ( u32 cmd_addr )
{
int context_id ;
register u32 r0 asm ( " r0 " ) = 1 ;
register u32 r1 asm ( " r1 " ) = ( u32 ) & context_id ;
register u32 r2 asm ( " r2 " ) = cmd_addr ;
do {
asm volatile (
__asmeq ( " %0 " , " r0 " )
__asmeq ( " %1 " , " r0 " )
__asmeq ( " %2 " , " r1 " )
__asmeq ( " %3 " , " r2 " )
# ifdef REQUIRES_SEC
" .arch_extension sec \n "
# endif
" smc #0 @ switch to secure world \n "
: " =r " ( r0 )
: " r " ( r0 ) , " r " ( r1 ) , " r " ( r2 )
: " r3 " ) ;
} while ( r0 = = QCOM_SCM_INTERRUPTED ) ;
return r0 ;
}
/**
* qcom_scm_call ( ) - Send an SCM command
2016-06-04 02:25:25 +03:00
* @ dev : struct device
2015-03-12 00:28:10 +03:00
* @ svc_id : service identifier
* @ cmd_id : command identifier
* @ cmd_buf : command buffer
* @ cmd_len : length of the command buffer
* @ resp_buf : response buffer
* @ resp_len : length of the response buffer
*
* Sends a command to the SCM and waits for the command to finish processing .
*
* A note on cache maintenance :
* Note that any buffers that are expected to be accessed by the secure world
* must be flushed before invoking qcom_scm_call and invalidated in the cache
* immediately after qcom_scm_call returns . Cache maintenance on the command
* and response buffers is taken care of by qcom_scm_call ; however , callers are
* responsible for any other cached buffers passed over to the secure world .
*/
2016-06-04 02:25:25 +03:00
static int qcom_scm_call ( struct device * dev , u32 svc_id , u32 cmd_id ,
const void * cmd_buf , size_t cmd_len , void * resp_buf ,
size_t resp_len )
2015-03-12 00:28:10 +03:00
{
int ret ;
struct qcom_scm_command * cmd ;
struct qcom_scm_response * rsp ;
2016-06-04 02:25:25 +03:00
size_t alloc_len = sizeof ( * cmd ) + cmd_len + sizeof ( * rsp ) + resp_len ;
dma_addr_t cmd_phys ;
2015-03-12 00:28:10 +03:00
2016-06-04 02:25:25 +03:00
cmd = kzalloc ( PAGE_ALIGN ( alloc_len ) , GFP_KERNEL ) ;
2015-03-12 00:28:10 +03:00
if ( ! cmd )
return - ENOMEM ;
2016-06-04 02:25:25 +03:00
cmd - > len = cpu_to_le32 ( alloc_len ) ;
cmd - > buf_offset = cpu_to_le32 ( sizeof ( * cmd ) ) ;
cmd - > resp_hdr_offset = cpu_to_le32 ( sizeof ( * cmd ) + cmd_len ) ;
2015-03-12 00:28:10 +03:00
cmd - > id = cpu_to_le32 ( ( svc_id < < 10 ) | cmd_id ) ;
if ( cmd_buf )
memcpy ( qcom_scm_get_command_buffer ( cmd ) , cmd_buf , cmd_len ) ;
2016-06-04 02:25:25 +03:00
rsp = qcom_scm_command_to_response ( cmd ) ;
cmd_phys = dma_map_single ( dev , cmd , alloc_len , DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , cmd_phys ) ) {
kfree ( cmd ) ;
return - ENOMEM ;
}
2015-03-12 00:28:10 +03:00
mutex_lock ( & qcom_scm_lock ) ;
2016-06-04 02:25:25 +03:00
ret = smc ( cmd_phys ) ;
if ( ret < 0 )
ret = qcom_scm_remap_error ( ret ) ;
2015-03-12 00:28:10 +03:00
mutex_unlock ( & qcom_scm_lock ) ;
if ( ret )
goto out ;
do {
2016-06-04 02:25:25 +03:00
dma_sync_single_for_cpu ( dev , cmd_phys + sizeof ( * cmd ) + cmd_len ,
sizeof ( * rsp ) , DMA_FROM_DEVICE ) ;
2015-03-12 00:28:10 +03:00
} while ( ! rsp - > is_complete ) ;
2016-06-04 02:25:25 +03:00
if ( resp_buf ) {
dma_sync_single_for_cpu ( dev , cmd_phys + sizeof ( * cmd ) + cmd_len +
le32_to_cpu ( rsp - > buf_offset ) ,
resp_len , DMA_FROM_DEVICE ) ;
memcpy ( resp_buf , qcom_scm_get_response_buffer ( rsp ) ,
resp_len ) ;
}
2015-03-12 00:28:10 +03:00
out :
2016-06-04 02:25:25 +03:00
dma_unmap_single ( dev , cmd_phys , alloc_len , DMA_TO_DEVICE ) ;
kfree ( cmd ) ;
2015-03-12 00:28:10 +03:00
return ret ;
}
# define SCM_CLASS_REGISTER (0x2 << 8)
# define SCM_MASK_IRQS BIT(5)
# define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
SCM_CLASS_REGISTER | \
SCM_MASK_IRQS | \
( n & 0xf ) )
/**
* qcom_scm_call_atomic1 ( ) - Send an atomic SCM command with one argument
* @ svc_id : service identifier
* @ cmd_id : command identifier
* @ arg1 : first argument
*
* This shall only be used with commands that are guaranteed to be
* uninterruptable , atomic and SMP safe .
*/
static s32 qcom_scm_call_atomic1 ( u32 svc , u32 cmd , u32 arg1 )
{
int context_id ;
register u32 r0 asm ( " r0 " ) = SCM_ATOMIC ( svc , cmd , 1 ) ;
register u32 r1 asm ( " r1 " ) = ( u32 ) & context_id ;
register u32 r2 asm ( " r2 " ) = arg1 ;
asm volatile (
__asmeq ( " %0 " , " r0 " )
__asmeq ( " %1 " , " r0 " )
__asmeq ( " %2 " , " r1 " )
__asmeq ( " %3 " , " r2 " )
# ifdef REQUIRES_SEC
" .arch_extension sec \n "
# endif
" smc #0 @ switch to secure world \n "
: " =r " ( r0 )
: " r " ( r0 ) , " r " ( r1 ) , " r " ( r2 )
: " r3 " ) ;
return r0 ;
}
2016-06-04 02:25:23 +03:00
/**
* qcom_scm_call_atomic2 ( ) - Send an atomic SCM command with two arguments
* @ svc_id : service identifier
* @ cmd_id : command identifier
* @ arg1 : first argument
* @ arg2 : second argument
*
* This shall only be used with commands that are guaranteed to be
* uninterruptable , atomic and SMP safe .
*/
static s32 qcom_scm_call_atomic2 ( u32 svc , u32 cmd , u32 arg1 , u32 arg2 )
{
int context_id ;
register u32 r0 asm ( " r0 " ) = SCM_ATOMIC ( svc , cmd , 2 ) ;
register u32 r1 asm ( " r1 " ) = ( u32 ) & context_id ;
register u32 r2 asm ( " r2 " ) = arg1 ;
register u32 r3 asm ( " r3 " ) = arg2 ;
asm volatile (
__asmeq ( " %0 " , " r0 " )
__asmeq ( " %1 " , " r0 " )
__asmeq ( " %2 " , " r1 " )
__asmeq ( " %3 " , " r2 " )
__asmeq ( " %4 " , " r3 " )
# ifdef REQUIRES_SEC
" .arch_extension sec \n "
# endif
" smc #0 @ switch to secure world \n "
: " =r " ( r0 )
: " r " ( r0 ) , " r " ( r1 ) , " r " ( r2 ) , " r " ( r3 )
) ;
return r0 ;
}
2015-03-12 00:28:10 +03:00
u32 qcom_scm_get_version ( void )
{
int context_id ;
static u32 version = - 1 ;
register u32 r0 asm ( " r0 " ) ;
register u32 r1 asm ( " r1 " ) ;
if ( version ! = - 1 )
return version ;
mutex_lock ( & qcom_scm_lock ) ;
r0 = 0x1 < < 8 ;
r1 = ( u32 ) & context_id ;
do {
asm volatile (
__asmeq ( " %0 " , " r0 " )
__asmeq ( " %1 " , " r1 " )
__asmeq ( " %2 " , " r0 " )
__asmeq ( " %3 " , " r1 " )
# ifdef REQUIRES_SEC
" .arch_extension sec \n "
# endif
" smc #0 @ switch to secure world \n "
: " =r " ( r0 ) , " =r " ( r1 )
: " r " ( r0 ) , " r " ( r1 )
: " r2 " , " r3 " ) ;
} while ( r0 = = QCOM_SCM_INTERRUPTED ) ;
version = r1 ;
mutex_unlock ( & qcom_scm_lock ) ;
return version ;
}
EXPORT_SYMBOL ( qcom_scm_get_version ) ;
/**
* qcom_scm_set_cold_boot_addr ( ) - Set the cold boot address for cpus
* @ entry : Entry point function for the cpus
* @ cpus : The cpumask of cpus that will use the entry point
*
* Set the cold boot address of the cpus . Any cpu outside the supported
* range would be removed from the cpu present mask .
*/
int __qcom_scm_set_cold_boot_addr ( void * entry , const cpumask_t * cpus )
{
int flags = 0 ;
int cpu ;
int scm_cb_flags [ ] = {
QCOM_SCM_FLAG_COLDBOOT_CPU0 ,
QCOM_SCM_FLAG_COLDBOOT_CPU1 ,
QCOM_SCM_FLAG_COLDBOOT_CPU2 ,
QCOM_SCM_FLAG_COLDBOOT_CPU3 ,
} ;
if ( ! cpus | | ( cpus & & cpumask_empty ( cpus ) ) )
return - EINVAL ;
for_each_cpu ( cpu , cpus ) {
if ( cpu < ARRAY_SIZE ( scm_cb_flags ) )
flags | = scm_cb_flags [ cpu ] ;
else
set_cpu_present ( cpu , false ) ;
}
2016-06-04 02:25:23 +03:00
return qcom_scm_call_atomic2 ( QCOM_SCM_SVC_BOOT , QCOM_SCM_BOOT_ADDR ,
flags , virt_to_phys ( entry ) ) ;
2015-03-12 00:28:10 +03:00
}
/**
* qcom_scm_set_warm_boot_addr ( ) - Set the warm boot address for cpus
* @ entry : Entry point function for the cpus
* @ cpus : The cpumask of cpus that will use the entry point
*
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down . CPU power down may be executed on cpuidle or hotplug .
*/
2016-06-04 02:25:25 +03:00
int __qcom_scm_set_warm_boot_addr ( struct device * dev , void * entry ,
const cpumask_t * cpus )
2015-03-12 00:28:10 +03:00
{
int ret ;
int flags = 0 ;
int cpu ;
2016-06-04 02:25:23 +03:00
struct {
__le32 flags ;
__le32 addr ;
} cmd ;
2015-03-12 00:28:10 +03:00
/*
* Reassign only if we are switching from hotplug entry point
* to cpuidle entry point or vice versa .
*/
for_each_cpu ( cpu , cpus ) {
if ( entry = = qcom_scm_wb [ cpu ] . entry )
continue ;
flags | = qcom_scm_wb [ cpu ] . flag ;
}
/* No change in entry function */
if ( ! flags )
return 0 ;
2016-06-04 02:25:23 +03:00
cmd . addr = cpu_to_le32 ( virt_to_phys ( entry ) ) ;
cmd . flags = cpu_to_le32 ( flags ) ;
2016-06-04 02:25:25 +03:00
ret = qcom_scm_call ( dev , QCOM_SCM_SVC_BOOT , QCOM_SCM_BOOT_ADDR ,
2016-06-04 02:25:23 +03:00
& cmd , sizeof ( cmd ) , NULL , 0 ) ;
2015-03-12 00:28:10 +03:00
if ( ! ret ) {
for_each_cpu ( cpu , cpus )
qcom_scm_wb [ cpu ] . entry = entry ;
}
return ret ;
}
/**
* qcom_scm_cpu_power_down ( ) - Power down the cpu
* @ flags - Flags to flush cache
*
* This is an end point to power down cpu . If there was a pending interrupt ,
* the control would return from this function , otherwise , the cpu jumps to the
* warm boot entry point set for this cpu upon reset .
*/
void __qcom_scm_cpu_power_down ( u32 flags )
{
qcom_scm_call_atomic1 ( QCOM_SCM_SVC_BOOT , QCOM_SCM_CMD_TERMINATE_PC ,
flags & QCOM_SCM_FLUSH_FLAG_MASK ) ;
}
2015-04-10 23:15:59 +03:00
2016-06-04 02:25:25 +03:00
int __qcom_scm_is_call_available ( struct device * dev , u32 svc_id , u32 cmd_id )
2015-04-10 23:15:59 +03:00
{
int ret ;
2015-09-29 22:48:53 +03:00
__le32 svc_cmd = cpu_to_le32 ( ( svc_id < < 10 ) | cmd_id ) ;
__le32 ret_val = 0 ;
2015-04-10 23:15:59 +03:00
2016-06-04 02:25:25 +03:00
ret = qcom_scm_call ( dev , QCOM_SCM_SVC_INFO , QCOM_IS_CALL_AVAIL_CMD ,
& svc_cmd , sizeof ( svc_cmd ) , & ret_val ,
sizeof ( ret_val ) ) ;
2015-04-10 23:15:59 +03:00
if ( ret )
return ret ;
2015-09-29 22:48:53 +03:00
return le32_to_cpu ( ret_val ) ;
2015-04-10 23:15:59 +03:00
}
2016-06-04 02:25:25 +03:00
int __qcom_scm_hdcp_req ( struct device * dev , struct qcom_scm_hdcp_req * req ,
u32 req_cnt , u32 * resp )
2015-04-10 23:15:59 +03:00
{
if ( req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT )
return - ERANGE ;
2016-06-04 02:25:25 +03:00
return qcom_scm_call ( dev , QCOM_SCM_SVC_HDCP , QCOM_SCM_CMD_HDCP ,
2015-04-10 23:15:59 +03:00
req , req_cnt * sizeof ( * req ) , resp , sizeof ( * resp ) ) ;
}