2010-08-27 10:01:23 -07:00
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2015-03-02 16:30:29 -07:00
* Copyright ( C ) 2015 Linaro Ltd .
2010-08-27 10:01:23 -07:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA .
*/
# include <linux/slab.h>
# include <linux/io.h>
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/errno.h>
# include <linux/err.h>
2015-02-26 15:49:09 -06:00
# include <linux/qcom_scm.h>
2010-08-27 10:01:23 -07:00
2014-08-04 18:31:43 -07:00
# include <asm/outercache.h>
2010-08-27 10:01:23 -07:00
# include <asm/cacheflush.h>
2015-02-04 16:30:46 -06:00
# define QCOM_SCM_ENOMEM -5
# define QCOM_SCM_EOPNOTSUPP -4
# define QCOM_SCM_EINVAL_ADDR -3
# define QCOM_SCM_EINVAL_ARG -2
# define QCOM_SCM_ERROR -1
# define QCOM_SCM_INTERRUPTED 1
2010-08-27 10:01:23 -07:00
2015-03-02 16:30:28 -07:00
# define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
# define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
# define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
# define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
2015-03-02 16:30:29 -07:00
# define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
# define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
# define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
# define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
struct qcom_scm_entry {
int flag ;
void * entry ;
} ;
static struct qcom_scm_entry qcom_scm_wb [ ] = {
{ . flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 } ,
{ . flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 } ,
{ . flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 } ,
{ . flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 } ,
} ;
2015-02-04 16:30:46 -06:00
static DEFINE_MUTEX ( qcom_scm_lock ) ;
2010-08-27 10:01:23 -07:00
/**
2015-02-04 16:30:46 -06:00
* struct qcom_scm_command - one SCM command buffer
2010-08-27 10:01:23 -07:00
* @ len : total available memory for command and response
* @ buf_offset : start of command buffer
* @ resp_hdr_offset : start of response buffer
* @ id : command to be executed
2015-02-04 16:30:46 -06:00
* @ buf : buffer returned from qcom_scm_get_command_buffer ( )
2010-08-27 10:01:23 -07:00
*
2011-03-30 22:57:33 -03:00
* An SCM command is laid out in memory as follows :
2010-08-27 10:01:23 -07:00
*
2015-02-04 16:30:46 -06:00
* - - - - - - - - - - - - - - - - - - - < - - - struct qcom_scm_command
2010-08-27 10:01:23 -07:00
* | command header |
2015-02-04 16:30:46 -06:00
* - - - - - - - - - - - - - - - - - - - < - - - qcom_scm_get_command_buffer ( )
2010-08-27 10:01:23 -07:00
* | command buffer |
2015-02-04 16:30:46 -06:00
* - - - - - - - - - - - - - - - - - - - < - - - struct qcom_scm_response and
* | response header | qcom_scm_command_to_response ( )
* - - - - - - - - - - - - - - - - - - - < - - - qcom_scm_get_response_buffer ( )
2010-08-27 10:01:23 -07:00
* | response buffer |
* - - - - - - - - - - - - - - - - - - -
*
* There can be arbitrary padding between the headers and buffers so
2015-02-04 16:30:46 -06:00
* you should always use the appropriate qcom_scm_get_ * _buffer ( ) routines
2010-08-27 10:01:23 -07:00
* to access the buffers in a safe manner .
*/
2015-02-04 16:30:46 -06:00
struct qcom_scm_command {
2015-01-21 11:21:15 -08:00
__le32 len ;
__le32 buf_offset ;
__le32 resp_hdr_offset ;
__le32 id ;
__le32 buf [ 0 ] ;
2010-08-27 10:01:23 -07:00
} ;
/**
2015-02-04 16:30:46 -06:00
* struct qcom_scm_response - one SCM response buffer
2010-08-27 10:01:23 -07:00
* @ len : total available memory for response
2015-02-04 16:30:46 -06:00
* @ buf_offset : start of response data relative to start of qcom_scm_response
2010-08-27 10:01:23 -07:00
* @ is_complete : indicates if the command has finished processing
*/
2015-02-04 16:30:46 -06:00
struct qcom_scm_response {
2015-01-21 11:21:15 -08:00
__le32 len ;
__le32 buf_offset ;
__le32 is_complete ;
2010-08-27 10:01:23 -07:00
} ;
/**
2015-02-04 16:30:46 -06:00
* alloc_qcom_scm_command ( ) - Allocate an SCM command
2010-08-27 10:01:23 -07:00
* @ cmd_size : size of the command buffer
* @ resp_size : size of the response buffer
*
* Allocate an SCM command , including enough room for the command
* and response headers as well as the command and response buffers .
*
2015-02-04 16:30:46 -06:00
* Returns a valid & qcom_scm_command on success or % NULL if the allocation fails .
2010-08-27 10:01:23 -07:00
*/
2015-02-04 16:30:46 -06:00
static struct qcom_scm_command * alloc_qcom_scm_command ( size_t cmd_size , size_t resp_size )
2010-08-27 10:01:23 -07:00
{
2015-02-04 16:30:46 -06:00
struct qcom_scm_command * cmd ;
size_t len = sizeof ( * cmd ) + sizeof ( struct qcom_scm_response ) + cmd_size +
2010-08-27 10:01:23 -07:00
resp_size ;
2015-01-21 11:21:15 -08:00
u32 offset ;
2010-08-27 10:01:23 -07:00
cmd = kzalloc ( PAGE_ALIGN ( len ) , GFP_KERNEL ) ;
if ( cmd ) {
2015-01-21 11:21:15 -08:00
cmd - > len = cpu_to_le32 ( len ) ;
2015-02-04 16:30:46 -06:00
offset = offsetof ( struct qcom_scm_command , buf ) ;
2015-01-21 11:21:15 -08:00
cmd - > buf_offset = cpu_to_le32 ( offset ) ;
cmd - > resp_hdr_offset = cpu_to_le32 ( offset + cmd_size ) ;
2010-08-27 10:01:23 -07:00
}
return cmd ;
}
/**
2015-02-04 16:30:46 -06:00
* free_qcom_scm_command ( ) - Free an SCM command
2010-08-27 10:01:23 -07:00
* @ cmd : command to free
*
* Free an SCM command .
*/
2015-02-04 16:30:46 -06:00
static inline void free_qcom_scm_command ( struct qcom_scm_command * cmd )
2010-08-27 10:01:23 -07:00
{
kfree ( cmd ) ;
}
/**
2015-02-04 16:30:46 -06:00
* qcom_scm_command_to_response ( ) - Get a pointer to a qcom_scm_response
2010-08-27 10:01:23 -07:00
* @ cmd : command
*
* Returns a pointer to a response for a command .
*/
2015-02-04 16:30:46 -06:00
static inline struct qcom_scm_response * qcom_scm_command_to_response (
const struct qcom_scm_command * cmd )
2010-08-27 10:01:23 -07:00
{
2015-01-21 11:21:15 -08:00
return ( void * ) cmd + le32_to_cpu ( cmd - > resp_hdr_offset ) ;
2010-08-27 10:01:23 -07:00
}
/**
2015-02-04 16:30:46 -06:00
* qcom_scm_get_command_buffer ( ) - Get a pointer to a command buffer
2010-08-27 10:01:23 -07:00
* @ cmd : command
*
* Returns a pointer to the command buffer of a command .
*/
2015-02-04 16:30:46 -06:00
static inline void * qcom_scm_get_command_buffer ( const struct qcom_scm_command * cmd )
2010-08-27 10:01:23 -07:00
{
return ( void * ) cmd - > buf ;
}
/**
2015-02-04 16:30:46 -06:00
* qcom_scm_get_response_buffer ( ) - Get a pointer to a response buffer
2010-08-27 10:01:23 -07:00
* @ rsp : response
*
* Returns a pointer to a response buffer of a response .
*/
2015-02-04 16:30:46 -06:00
static inline void * qcom_scm_get_response_buffer ( const struct qcom_scm_response * rsp )
2010-08-27 10:01:23 -07:00
{
2015-01-21 11:21:15 -08:00
return ( void * ) rsp + le32_to_cpu ( rsp - > buf_offset ) ;
2010-08-27 10:01:23 -07:00
}
2015-02-04 16:30:46 -06:00
static int qcom_scm_remap_error ( int err )
2010-08-27 10:01:23 -07:00
{
2015-02-04 16:30:46 -06:00
pr_err ( " qcom_scm_call failed with error code %d \n " , err ) ;
2010-08-27 10:01:23 -07:00
switch ( err ) {
2015-02-04 16:30:46 -06:00
case QCOM_SCM_ERROR :
2010-08-27 10:01:23 -07:00
return - EIO ;
2015-02-04 16:30:46 -06:00
case QCOM_SCM_EINVAL_ADDR :
case QCOM_SCM_EINVAL_ARG :
2010-08-27 10:01:23 -07:00
return - EINVAL ;
2015-02-04 16:30:46 -06:00
case QCOM_SCM_EOPNOTSUPP :
2010-08-27 10:01:23 -07:00
return - EOPNOTSUPP ;
2015-02-04 16:30:46 -06:00
case QCOM_SCM_ENOMEM :
2010-08-27 10:01:23 -07:00
return - ENOMEM ;
}
return - EINVAL ;
}
static u32 smc ( u32 cmd_addr )
{
int context_id ;
register u32 r0 asm ( " r0 " ) = 1 ;
register u32 r1 asm ( " r1 " ) = ( u32 ) & context_id ;
register u32 r2 asm ( " r2 " ) = cmd_addr ;
2011-02-24 10:44:44 -08:00
do {
asm volatile (
__asmeq ( " %0 " , " r0 " )
__asmeq ( " %1 " , " r0 " )
__asmeq ( " %2 " , " r1 " )
__asmeq ( " %3 " , " r2 " )
2011-11-08 13:07:36 +00:00
# ifdef REQUIRES_SEC
" .arch_extension sec \n "
# endif
2011-02-24 10:44:44 -08:00
" smc #0 @ switch to secure world \n "
: " =r " ( r0 )
: " r " ( r0 ) , " r " ( r1 ) , " r " ( r2 )
: " r3 " ) ;
2015-02-04 16:30:46 -06:00
} while ( r0 = = QCOM_SCM_INTERRUPTED ) ;
2011-02-24 10:44:44 -08:00
2010-08-27 10:01:23 -07:00
return r0 ;
}
2015-02-04 16:30:46 -06:00
static int __qcom_scm_call ( const struct qcom_scm_command * cmd )
2010-08-27 10:01:23 -07:00
{
int ret ;
u32 cmd_addr = virt_to_phys ( cmd ) ;
/*
2014-08-04 18:31:45 -07:00
* Flush the command buffer so that the secure world sees
* the correct data .
2010-08-27 10:01:23 -07:00
*/
2014-08-04 18:31:45 -07:00
__cpuc_flush_dcache_area ( ( void * ) cmd , cmd - > len ) ;
outer_flush_range ( cmd_addr , cmd_addr + cmd - > len ) ;
2011-02-24 10:44:44 -08:00
ret = smc ( cmd_addr ) ;
if ( ret < 0 )
2015-02-04 16:30:46 -06:00
ret = qcom_scm_remap_error ( ret ) ;
2010-08-27 10:01:23 -07:00
return ret ;
}
2015-02-04 16:30:46 -06:00
static void qcom_scm_inv_range ( unsigned long start , unsigned long end )
2014-08-04 18:31:43 -07:00
{
2014-08-04 18:31:44 -07:00
u32 cacheline_size , ctr ;
asm volatile ( " mrc p15, 0, %0, c0, c0, 1 " : " =r " ( ctr ) ) ;
cacheline_size = 4 < < ( ( ctr > > 16 ) & 0xf ) ;
start = round_down ( start , cacheline_size ) ;
end = round_up ( end , cacheline_size ) ;
2014-08-04 18:31:43 -07:00
outer_inv_range ( start , end ) ;
while ( start < end ) {
asm ( " mcr p15, 0, %0, c7, c6, 1 " : : " r " ( start )
: " memory " ) ;
2014-08-04 18:31:44 -07:00
start + = cacheline_size ;
2014-08-04 18:31:43 -07:00
}
dsb ( ) ;
isb ( ) ;
}
2010-08-27 10:01:23 -07:00
/**
2015-02-04 16:30:46 -06:00
* qcom_scm_call ( ) - Send an SCM command
2010-08-27 10:01:23 -07:00
* @ svc_id : service identifier
* @ cmd_id : command identifier
* @ cmd_buf : command buffer
* @ cmd_len : length of the command buffer
* @ resp_buf : response buffer
* @ resp_len : length of the response buffer
*
* Sends a command to the SCM and waits for the command to finish processing .
2014-08-04 18:31:45 -07:00
*
* A note on cache maintenance :
* Note that any buffers that are expected to be accessed by the secure world
2015-02-04 16:30:46 -06:00
* must be flushed before invoking qcom_scm_call and invalidated in the cache
* immediately after qcom_scm_call returns . Cache maintenance on the command
* and response buffers is taken care of by qcom_scm_call ; however , callers are
2014-08-04 18:31:45 -07:00
* responsible for any other cached buffers passed over to the secure world .
2010-08-27 10:01:23 -07:00
*/
2015-02-04 16:30:46 -06:00
static int qcom_scm_call ( u32 svc_id , u32 cmd_id , const void * cmd_buf ,
size_t cmd_len , void * resp_buf , size_t resp_len )
2010-08-27 10:01:23 -07:00
{
int ret ;
2015-02-04 16:30:46 -06:00
struct qcom_scm_command * cmd ;
struct qcom_scm_response * rsp ;
2014-08-04 18:31:43 -07:00
unsigned long start , end ;
2010-08-27 10:01:23 -07:00
2015-02-04 16:30:46 -06:00
cmd = alloc_qcom_scm_command ( cmd_len , resp_len ) ;
2010-08-27 10:01:23 -07:00
if ( ! cmd )
return - ENOMEM ;
2015-01-21 11:21:15 -08:00
cmd - > id = cpu_to_le32 ( ( svc_id < < 10 ) | cmd_id ) ;
2010-08-27 10:01:23 -07:00
if ( cmd_buf )
2015-02-04 16:30:46 -06:00
memcpy ( qcom_scm_get_command_buffer ( cmd ) , cmd_buf , cmd_len ) ;
2010-08-27 10:01:23 -07:00
2015-02-04 16:30:46 -06:00
mutex_lock ( & qcom_scm_lock ) ;
ret = __qcom_scm_call ( cmd ) ;
mutex_unlock ( & qcom_scm_lock ) ;
2010-08-27 10:01:23 -07:00
if ( ret )
goto out ;
2015-02-04 16:30:46 -06:00
rsp = qcom_scm_command_to_response ( cmd ) ;
2014-08-04 18:31:43 -07:00
start = ( unsigned long ) rsp ;
2010-08-27 10:01:23 -07:00
do {
2015-02-04 16:30:46 -06:00
qcom_scm_inv_range ( start , start + sizeof ( * rsp ) ) ;
2010-08-27 10:01:23 -07:00
} while ( ! rsp - > is_complete ) ;
2015-02-04 16:30:46 -06:00
end = ( unsigned long ) qcom_scm_get_response_buffer ( rsp ) + resp_len ;
qcom_scm_inv_range ( start , end ) ;
2014-08-04 18:31:43 -07:00
2010-08-27 10:01:23 -07:00
if ( resp_buf )
2015-02-04 16:30:46 -06:00
memcpy ( resp_buf , qcom_scm_get_response_buffer ( rsp ) , resp_len ) ;
2010-08-27 10:01:23 -07:00
out :
2015-02-04 16:30:46 -06:00
free_qcom_scm_command ( cmd ) ;
2010-08-27 10:01:23 -07:00
return ret ;
}
2015-03-02 16:30:30 -07:00
# define SCM_CLASS_REGISTER (0x2 << 8)
# define SCM_MASK_IRQS BIT(5)
# define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
SCM_CLASS_REGISTER | \
SCM_MASK_IRQS | \
( n & 0xf ) )
/**
* qcom_scm_call_atomic1 ( ) - Send an atomic SCM command with one argument
* @ svc_id : service identifier
* @ cmd_id : command identifier
* @ arg1 : first argument
*
* This shall only be used with commands that are guaranteed to be
* uninterruptable , atomic and SMP safe .
*/
static s32 qcom_scm_call_atomic1 ( u32 svc , u32 cmd , u32 arg1 )
{
int context_id ;
register u32 r0 asm ( " r0 " ) = SCM_ATOMIC ( svc , cmd , 1 ) ;
register u32 r1 asm ( " r1 " ) = ( u32 ) & context_id ;
register u32 r2 asm ( " r2 " ) = arg1 ;
asm volatile (
__asmeq ( " %0 " , " r0 " )
__asmeq ( " %1 " , " r0 " )
__asmeq ( " %2 " , " r1 " )
__asmeq ( " %3 " , " r2 " )
# ifdef REQUIRES_SEC
" .arch_extension sec \n "
# endif
" smc #0 @ switch to secure world \n "
: " =r " ( r0 )
: " r " ( r0 ) , " r " ( r1 ) , " r " ( r2 )
: " r3 " ) ;
return r0 ;
}
2015-02-04 16:30:46 -06:00
u32 qcom_scm_get_version ( void )
2010-08-27 10:01:23 -07:00
{
int context_id ;
static u32 version = - 1 ;
2011-02-24 10:44:43 -08:00
register u32 r0 asm ( " r0 " ) ;
register u32 r1 asm ( " r1 " ) ;
2010-08-27 10:01:23 -07:00
if ( version ! = - 1 )
return version ;
2015-02-04 16:30:46 -06:00
mutex_lock ( & qcom_scm_lock ) ;
2011-02-24 10:44:43 -08:00
r0 = 0x1 < < 8 ;
r1 = ( u32 ) & context_id ;
2011-02-24 10:44:44 -08:00
do {
asm volatile (
__asmeq ( " %0 " , " r0 " )
__asmeq ( " %1 " , " r1 " )
__asmeq ( " %2 " , " r0 " )
__asmeq ( " %3 " , " r1 " )
2012-04-30 19:17:20 -07:00
# ifdef REQUIRES_SEC
" .arch_extension sec \n "
# endif
2011-02-24 10:44:44 -08:00
" smc #0 @ switch to secure world \n "
: " =r " ( r0 ) , " =r " ( r1 )
: " r " ( r0 ) , " r " ( r1 )
: " r2 " , " r3 " ) ;
2015-02-04 16:30:46 -06:00
} while ( r0 = = QCOM_SCM_INTERRUPTED ) ;
2011-02-24 10:44:44 -08:00
2010-08-27 10:01:23 -07:00
version = r1 ;
2015-02-04 16:30:46 -06:00
mutex_unlock ( & qcom_scm_lock ) ;
2010-08-27 10:01:23 -07:00
return version ;
}
2015-02-04 16:30:46 -06:00
EXPORT_SYMBOL ( qcom_scm_get_version ) ;
2015-02-04 15:46:04 -06:00
2015-02-04 16:30:46 -06:00
# define QCOM_SCM_SVC_BOOT 0x1
# define QCOM_SCM_BOOT_ADDR 0x1
2015-02-04 15:46:04 -06:00
/*
* Set the cold / warm boot address for one of the CPU cores .
*/
2015-03-02 16:30:28 -07:00
static int qcom_scm_set_boot_addr ( u32 addr , int flags )
2015-02-04 15:46:04 -06:00
{
struct {
__le32 flags ;
__le32 addr ;
} cmd ;
cmd . addr = cpu_to_le32 ( addr ) ;
cmd . flags = cpu_to_le32 ( flags ) ;
2015-02-04 16:30:46 -06:00
return qcom_scm_call ( QCOM_SCM_SVC_BOOT , QCOM_SCM_BOOT_ADDR ,
2015-02-04 15:46:04 -06:00
& cmd , sizeof ( cmd ) , NULL , 0 ) ;
}
2015-03-02 16:30:28 -07:00
/**
* qcom_scm_set_cold_boot_addr ( ) - Set the cold boot address for cpus
* @ entry : Entry point function for the cpus
* @ cpus : The cpumask of cpus that will use the entry point
*
* Set the cold boot address of the cpus . Any cpu outside the supported
* range would be removed from the cpu present mask .
*/
int qcom_scm_set_cold_boot_addr ( void * entry , const cpumask_t * cpus )
{
int flags = 0 ;
int cpu ;
int scm_cb_flags [ ] = {
QCOM_SCM_FLAG_COLDBOOT_CPU0 ,
QCOM_SCM_FLAG_COLDBOOT_CPU1 ,
QCOM_SCM_FLAG_COLDBOOT_CPU2 ,
QCOM_SCM_FLAG_COLDBOOT_CPU3 ,
} ;
if ( ! cpus | | ( cpus & & cpumask_empty ( cpus ) ) )
return - EINVAL ;
for_each_cpu ( cpu , cpus ) {
if ( cpu < ARRAY_SIZE ( scm_cb_flags ) )
flags | = scm_cb_flags [ cpu ] ;
else
set_cpu_present ( cpu , false ) ;
}
return qcom_scm_set_boot_addr ( virt_to_phys ( entry ) , flags ) ;
}
EXPORT_SYMBOL ( qcom_scm_set_cold_boot_addr ) ;
2015-03-02 16:30:29 -07:00
/**
* qcom_scm_set_warm_boot_addr ( ) - Set the warm boot address for cpus
* @ entry : Entry point function for the cpus
* @ cpus : The cpumask of cpus that will use the entry point
*
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down . CPU power down may be executed on cpuidle or hotplug .
*/
int qcom_scm_set_warm_boot_addr ( void * entry , const cpumask_t * cpus )
{
int ret ;
int flags = 0 ;
int cpu ;
/*
* Reassign only if we are switching from hotplug entry point
* to cpuidle entry point or vice versa .
*/
for_each_cpu ( cpu , cpus ) {
if ( entry = = qcom_scm_wb [ cpu ] . entry )
continue ;
flags | = qcom_scm_wb [ cpu ] . flag ;
}
/* No change in entry function */
if ( ! flags )
return 0 ;
ret = qcom_scm_set_boot_addr ( virt_to_phys ( entry ) , flags ) ;
if ( ! ret ) {
for_each_cpu ( cpu , cpus )
qcom_scm_wb [ cpu ] . entry = entry ;
}
return ret ;
}
EXPORT_SYMBOL ( qcom_scm_set_warm_boot_addr ) ;
2015-03-02 16:30:30 -07:00
# define QCOM_SCM_CMD_TERMINATE_PC 0x2
# define QCOM_SCM_FLUSH_FLAG_MASK 0x3
/**
* qcom_scm_cpu_power_down ( ) - Power down the cpu
* @ flags - Flags to flush cache
*
* This is an end point to power down cpu . If there was a pending interrupt ,
* the control would return from this function , otherwise , the cpu jumps to the
* warm boot entry point set for this cpu upon reset .
*/
void qcom_scm_cpu_power_down ( u32 flags )
{
qcom_scm_call_atomic1 ( QCOM_SCM_SVC_BOOT , QCOM_SCM_CMD_TERMINATE_PC ,
flags & QCOM_SCM_FLUSH_FLAG_MASK ) ;
}
EXPORT_SYMBOL ( qcom_scm_cpu_power_down ) ;