2014-03-07 09:30:24 +04:00
/*
* PowerNV OPAL asynchronous completion interfaces
*
2017-11-03 05:41:42 +03:00
* Copyright 2013 - 2017 IBM Corp .
2014-03-07 09:30:24 +04:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# undef DEBUG
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/sched.h>
# include <linux/semaphore.h>
# include <linux/spinlock.h>
# include <linux/wait.h>
# include <linux/gfp.h>
# include <linux/of.h>
2014-07-15 16:22:24 +04:00
# include <asm/machdep.h>
2014-03-07 09:30:24 +04:00
# include <asm/opal.h>
2017-11-03 05:41:42 +03:00
enum opal_async_token_state {
ASYNC_TOKEN_UNALLOCATED = 0 ,
ASYNC_TOKEN_ALLOCATED ,
2017-11-03 05:41:44 +03:00
ASYNC_TOKEN_DISPATCHED ,
ASYNC_TOKEN_ABANDONED ,
2017-11-03 05:41:42 +03:00
ASYNC_TOKEN_COMPLETED
} ;
struct opal_async_token {
enum opal_async_token_state state ;
struct opal_msg response ;
} ;
2014-03-07 09:30:24 +04:00
static DECLARE_WAIT_QUEUE_HEAD ( opal_async_wait ) ;
static DEFINE_SPINLOCK ( opal_async_comp_lock ) ;
static struct semaphore opal_async_sem ;
static unsigned int opal_max_async_tokens ;
2017-11-03 05:41:42 +03:00
static struct opal_async_token * opal_async_tokens ;
2014-03-07 09:30:24 +04:00
2017-11-03 05:41:41 +03:00
static int __opal_async_get_token ( void )
2014-03-07 09:30:24 +04:00
{
unsigned long flags ;
2017-11-03 05:41:42 +03:00
int i , token = - EBUSY ;
2014-03-07 09:30:24 +04:00
spin_lock_irqsave ( & opal_async_comp_lock , flags ) ;
2017-11-03 05:41:42 +03:00
for ( i = 0 ; i < opal_max_async_tokens ; i + + ) {
if ( opal_async_tokens [ i ] . state = = ASYNC_TOKEN_UNALLOCATED ) {
opal_async_tokens [ i ] . state = ASYNC_TOKEN_ALLOCATED ;
token = i ;
break ;
}
2014-03-07 09:30:24 +04:00
}
spin_unlock_irqrestore ( & opal_async_comp_lock , flags ) ;
return token ;
}
2017-11-03 05:41:42 +03:00
/*
* Note : If the returned token is used in an opal call and opal returns
2017-11-03 05:41:44 +03:00
* OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response ( ) or
* opal_async_wait_response_interruptible ( ) at least once before calling another
* opal_async_ * function
2017-11-03 05:41:42 +03:00
*/
2014-03-07 09:30:24 +04:00
int opal_async_get_token_interruptible ( void )
{
int token ;
/* Wait until a token is available */
if ( down_interruptible ( & opal_async_sem ) )
return - ERESTARTSYS ;
token = __opal_async_get_token ( ) ;
if ( token < 0 )
up ( & opal_async_sem ) ;
return token ;
}
2014-10-14 12:38:36 +04:00
EXPORT_SYMBOL_GPL ( opal_async_get_token_interruptible ) ;
2014-03-07 09:30:24 +04:00
2017-11-03 05:41:41 +03:00
static int __opal_async_release_token ( int token )
2014-03-07 09:30:24 +04:00
{
unsigned long flags ;
2017-11-03 05:41:42 +03:00
int rc ;
2014-03-07 09:30:24 +04:00
if ( token < 0 | | token > = opal_max_async_tokens ) {
pr_err ( " %s: Passed token is out of range, token %d \n " ,
__func__ , token ) ;
return - EINVAL ;
}
spin_lock_irqsave ( & opal_async_comp_lock , flags ) ;
2017-11-03 05:41:42 +03:00
switch ( opal_async_tokens [ token ] . state ) {
case ASYNC_TOKEN_COMPLETED :
case ASYNC_TOKEN_ALLOCATED :
opal_async_tokens [ token ] . state = ASYNC_TOKEN_UNALLOCATED ;
rc = 0 ;
break ;
2017-11-03 05:41:44 +03:00
/*
* DISPATCHED and ABANDONED tokens must wait for OPAL to respond .
* Mark a DISPATCHED token as ABANDONED so that the response handling
* code knows no one cares and that it can free it then .
*/
case ASYNC_TOKEN_DISPATCHED :
opal_async_tokens [ token ] . state = ASYNC_TOKEN_ABANDONED ;
/* Fall through */
2017-11-03 05:41:42 +03:00
default :
rc = 1 ;
}
2014-03-07 09:30:24 +04:00
spin_unlock_irqrestore ( & opal_async_comp_lock , flags ) ;
2017-11-03 05:41:42 +03:00
return rc ;
2014-03-07 09:30:24 +04:00
}
int opal_async_release_token ( int token )
{
int ret ;
ret = __opal_async_release_token ( token ) ;
2017-11-03 05:41:42 +03:00
if ( ! ret )
up ( & opal_async_sem ) ;
2014-03-07 09:30:24 +04:00
2017-11-03 05:41:42 +03:00
return ret ;
2014-03-07 09:30:24 +04:00
}
2014-10-14 12:38:36 +04:00
EXPORT_SYMBOL_GPL ( opal_async_release_token ) ;
2014-03-07 09:30:24 +04:00
int opal_async_wait_response ( uint64_t token , struct opal_msg * msg )
{
if ( token > = opal_max_async_tokens ) {
pr_err ( " %s: Invalid token passed \n " , __func__ ) ;
return - EINVAL ;
}
if ( ! msg ) {
pr_err ( " %s: Invalid message pointer passed \n " , __func__ ) ;
return - EINVAL ;
}
2017-11-03 05:41:44 +03:00
/*
* There is no need to mark the token as dispatched , wait_event ( )
* will block until the token completes .
*
* Wakeup the poller before we wait for events to speed things
2016-07-04 07:51:44 +03:00
* up on platforms or simulators where the interrupts aren ' t
* functional .
*/
opal_wake_poller ( ) ;
2017-11-03 05:41:42 +03:00
wait_event ( opal_async_wait , opal_async_tokens [ token ] . state
= = ASYNC_TOKEN_COMPLETED ) ;
memcpy ( msg , & opal_async_tokens [ token ] . response , sizeof ( * msg ) ) ;
2014-03-07 09:30:24 +04:00
return 0 ;
}
2014-10-14 12:38:36 +04:00
EXPORT_SYMBOL_GPL ( opal_async_wait_response ) ;
2014-03-07 09:30:24 +04:00
2017-11-03 05:41:44 +03:00
int opal_async_wait_response_interruptible ( uint64_t token , struct opal_msg * msg )
{
unsigned long flags ;
int ret ;
if ( token > = opal_max_async_tokens ) {
pr_err ( " %s: Invalid token passed \n " , __func__ ) ;
return - EINVAL ;
}
if ( ! msg ) {
pr_err ( " %s: Invalid message pointer passed \n " , __func__ ) ;
return - EINVAL ;
}
/*
* The first time this gets called we mark the token as DISPATCHED
* so that if wait_event_interruptible ( ) returns not zero and the
* caller frees the token , we know not to actually free the token
* until the response comes .
*
* Only change if the token is ALLOCATED - it may have been
* completed even before the caller gets around to calling this
* the first time .
*
* There is also a dirty great comment at the token allocation
* function that if the opal call returns OPAL_ASYNC_COMPLETION to
* the caller then the caller * must * call this or the not
* interruptible version before doing anything else with the
* token .
*/
if ( opal_async_tokens [ token ] . state = = ASYNC_TOKEN_ALLOCATED ) {
spin_lock_irqsave ( & opal_async_comp_lock , flags ) ;
if ( opal_async_tokens [ token ] . state = = ASYNC_TOKEN_ALLOCATED )
opal_async_tokens [ token ] . state = ASYNC_TOKEN_DISPATCHED ;
spin_unlock_irqrestore ( & opal_async_comp_lock , flags ) ;
}
/*
* Wakeup the poller before we wait for events to speed things
* up on platforms or simulators where the interrupts aren ' t
* functional .
*/
opal_wake_poller ( ) ;
ret = wait_event_interruptible ( opal_async_wait ,
opal_async_tokens [ token ] . state = =
ASYNC_TOKEN_COMPLETED ) ;
if ( ! ret )
memcpy ( msg , & opal_async_tokens [ token ] . response , sizeof ( * msg ) ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( opal_async_wait_response_interruptible ) ;
2017-11-03 05:41:42 +03:00
/* Called from interrupt context */
2014-03-07 09:30:24 +04:00
static int opal_async_comp_event ( struct notifier_block * nb ,
unsigned long msg_type , void * msg )
{
struct opal_msg * comp_msg = msg ;
2017-11-03 05:41:44 +03:00
enum opal_async_token_state state ;
2014-03-07 09:30:24 +04:00
unsigned long flags ;
2014-03-28 09:33:33 +04:00
uint64_t token ;
2014-03-07 09:30:24 +04:00
if ( msg_type ! = OPAL_MSG_ASYNC_COMP )
return 0 ;
2014-03-28 09:33:33 +04:00
token = be64_to_cpu ( comp_msg - > params [ 0 ] ) ;
2014-03-07 09:30:24 +04:00
spin_lock_irqsave ( & opal_async_comp_lock , flags ) ;
2017-11-03 05:41:44 +03:00
state = opal_async_tokens [ token ] . state ;
2017-11-03 05:41:42 +03:00
opal_async_tokens [ token ] . state = ASYNC_TOKEN_COMPLETED ;
2014-03-07 09:30:24 +04:00
spin_unlock_irqrestore ( & opal_async_comp_lock , flags ) ;
2017-11-03 05:41:44 +03:00
if ( state = = ASYNC_TOKEN_ABANDONED ) {
/* Free the token, no one else will */
opal_async_release_token ( token ) ;
return 0 ;
}
memcpy ( & opal_async_tokens [ token ] . response , comp_msg , sizeof ( * comp_msg ) ) ;
2014-03-07 09:30:24 +04:00
wake_up ( & opal_async_wait ) ;
return 0 ;
}
static struct notifier_block opal_async_comp_nb = {
. notifier_call = opal_async_comp_event ,
. next = NULL ,
. priority = 0 ,
} ;
2015-05-15 07:06:36 +03:00
int __init opal_async_comp_init ( void )
2014-03-07 09:30:24 +04:00
{
struct device_node * opal_node ;
const __be32 * async ;
int err ;
opal_node = of_find_node_by_path ( " /ibm,opal " ) ;
if ( ! opal_node ) {
pr_err ( " %s: Opal node not found \n " , __func__ ) ;
err = - ENOENT ;
goto out ;
}
async = of_get_property ( opal_node , " opal-msg-async-num " , NULL ) ;
if ( ! async ) {
2017-08-21 18:16:47 +03:00
pr_err ( " %s: %pOF has no opal-msg-async-num \n " ,
__func__ , opal_node ) ;
2014-03-07 09:30:24 +04:00
err = - ENOENT ;
goto out_opal_node ;
}
opal_max_async_tokens = be32_to_cpup ( async ) ;
2017-11-03 05:41:42 +03:00
opal_async_tokens = kcalloc ( opal_max_async_tokens ,
sizeof ( * opal_async_tokens ) , GFP_KERNEL ) ;
if ( ! opal_async_tokens ) {
err = - ENOMEM ;
goto out_opal_node ;
}
2014-03-07 09:30:24 +04:00
err = opal_message_notifier_register ( OPAL_MSG_ASYNC_COMP ,
& opal_async_comp_nb ) ;
if ( err ) {
pr_err ( " %s: Can't register OPAL event notifier (%d) \n " ,
__func__ , err ) ;
2017-11-03 05:41:42 +03:00
kfree ( opal_async_tokens ) ;
2014-03-07 09:30:24 +04:00
goto out_opal_node ;
}
2017-11-03 05:41:41 +03:00
sema_init ( & opal_async_sem , opal_max_async_tokens ) ;
2014-03-07 09:30:24 +04:00
out_opal_node :
of_node_put ( opal_node ) ;
out :
return err ;
}