2008-07-29 22:34:05 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( c ) 2008 Silicon Graphics , Inc . All Rights Reserved .
*/
/*
* Cross Partition Communication ( XPC ) sn2 - based functions .
*
* Architecture specific implementation of common functions .
*
*/
2008-07-29 22:34:06 -07:00
# include <linux/delay.h>
2008-07-29 22:34:05 -07:00
# include <asm/uncached.h>
2008-07-29 22:34:16 -07:00
# include <asm/sn/mspec.h>
2008-07-29 22:34:05 -07:00
# include <asm/sn/sn_sal.h>
# include "xpc.h"
2008-07-29 22:34:13 -07:00
/*
* Define the number of u64s required to represent all the C - brick nasids
* as a bitmap . The cross - partition kernel modules deal only with
* C - brick nasids , thus the need for bitmaps which don ' t account for
* odd - numbered ( non C - brick ) nasids .
*/
# define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2)
# define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8)
# define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64)
/*
* Memory for XPC ' s amo variables is allocated by the MSPEC driver . These
* pages are located in the lowest granule . The lowest granule uses 4 k pages
* for cached references and an alternate TLB handler to never provide a
* cacheable mapping for the entire region . This will prevent speculative
* reading of cached copies of our lines from being issued which will cause
* a PI FSB Protocol error to be generated by the SHUB . For XPC , we need 64
* amo variables ( based on XP_MAX_NPARTITIONS_SN2 ) to identify the senders of
* NOTIFY IRQs , 128 amo variables ( based on XP_NASID_MASK_WORDS_SN2 ) to identify
* the senders of ACTIVATE IRQs , 1 amo variable to identify which remote
* partitions ( i . e . , XPCs ) consider themselves currently engaged with the
* local XPC and 1 amo variable to request partition deactivation .
*/
# define XPC_NOTIFY_IRQ_AMOS_SN2 0
# define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \
XP_MAX_NPARTITIONS_SN2 )
# define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \
XP_NASID_MASK_WORDS_SN2 )
# define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1)
/*
* Buffer used to store a local copy of portions of a remote partition ' s
* reserved page ( either its header and part_nasids mask , or its vars ) .
*/
static void * xpc_remote_copy_buffer_base_sn2 ;
2008-07-29 22:34:18 -07:00
static char * xpc_remote_copy_buffer_sn2 ;
2008-07-29 22:34:13 -07:00
2008-07-29 22:34:13 -07:00
static struct xpc_vars_sn2 * xpc_vars_sn2 ;
static struct xpc_vars_part_sn2 * xpc_vars_part_sn2 ;
2008-07-29 22:34:05 -07:00
2008-07-29 22:34:18 -07:00
static int
xpc_setup_partitions_sn_sn2 ( void )
{
/* nothing needs to be done */
return 0 ;
}
2009-04-02 16:59:10 -07:00
static void
xpc_teardown_partitions_sn_sn2 ( void )
{
/* nothing needs to be done */
}
2008-07-29 22:34:09 -07:00
/* SH_IPI_ACCESS shub register value on startup */
2008-07-29 22:34:13 -07:00
static u64 xpc_sh1_IPI_access_sn2 ;
static u64 xpc_sh2_IPI_access0_sn2 ;
static u64 xpc_sh2_IPI_access1_sn2 ;
static u64 xpc_sh2_IPI_access2_sn2 ;
static u64 xpc_sh2_IPI_access3_sn2 ;
2008-07-29 22:34:09 -07:00
/*
* Change protections to allow IPI operations .
*/
static void
xpc_allow_IPI_ops_sn2 ( void )
{
int node ;
int nasid ;
2008-07-29 22:34:14 -07:00
/* !!! The following should get moved into SAL. */
2008-07-29 22:34:09 -07:00
if ( is_shub2 ( ) ) {
2008-07-29 22:34:13 -07:00
xpc_sh2_IPI_access0_sn2 =
2008-07-29 22:34:09 -07:00
( u64 ) HUB_L ( ( u64 * ) LOCAL_MMR_ADDR ( SH2_IPI_ACCESS0 ) ) ;
2008-07-29 22:34:13 -07:00
xpc_sh2_IPI_access1_sn2 =
2008-07-29 22:34:09 -07:00
( u64 ) HUB_L ( ( u64 * ) LOCAL_MMR_ADDR ( SH2_IPI_ACCESS1 ) ) ;
2008-07-29 22:34:13 -07:00
xpc_sh2_IPI_access2_sn2 =
2008-07-29 22:34:09 -07:00
( u64 ) HUB_L ( ( u64 * ) LOCAL_MMR_ADDR ( SH2_IPI_ACCESS2 ) ) ;
2008-07-29 22:34:13 -07:00
xpc_sh2_IPI_access3_sn2 =
2008-07-29 22:34:09 -07:00
( u64 ) HUB_L ( ( u64 * ) LOCAL_MMR_ADDR ( SH2_IPI_ACCESS3 ) ) ;
for_each_online_node ( node ) {
nasid = cnodeid_to_nasid ( node ) ;
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH2_IPI_ACCESS0 ) ,
- 1UL ) ;
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH2_IPI_ACCESS1 ) ,
- 1UL ) ;
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH2_IPI_ACCESS2 ) ,
- 1UL ) ;
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH2_IPI_ACCESS3 ) ,
- 1UL ) ;
}
} else {
2008-07-29 22:34:13 -07:00
xpc_sh1_IPI_access_sn2 =
2008-07-29 22:34:09 -07:00
( u64 ) HUB_L ( ( u64 * ) LOCAL_MMR_ADDR ( SH1_IPI_ACCESS ) ) ;
for_each_online_node ( node ) {
nasid = cnodeid_to_nasid ( node ) ;
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH1_IPI_ACCESS ) ,
- 1UL ) ;
}
}
}
/*
* Restrict protections to disallow IPI operations .
*/
static void
xpc_disallow_IPI_ops_sn2 ( void )
{
int node ;
int nasid ;
2008-07-29 22:34:14 -07:00
/* !!! The following should get moved into SAL. */
2008-07-29 22:34:09 -07:00
if ( is_shub2 ( ) ) {
for_each_online_node ( node ) {
nasid = cnodeid_to_nasid ( node ) ;
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH2_IPI_ACCESS0 ) ,
2008-07-29 22:34:13 -07:00
xpc_sh2_IPI_access0_sn2 ) ;
2008-07-29 22:34:09 -07:00
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH2_IPI_ACCESS1 ) ,
2008-07-29 22:34:13 -07:00
xpc_sh2_IPI_access1_sn2 ) ;
2008-07-29 22:34:09 -07:00
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH2_IPI_ACCESS2 ) ,
2008-07-29 22:34:13 -07:00
xpc_sh2_IPI_access2_sn2 ) ;
2008-07-29 22:34:09 -07:00
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH2_IPI_ACCESS3 ) ,
2008-07-29 22:34:13 -07:00
xpc_sh2_IPI_access3_sn2 ) ;
2008-07-29 22:34:09 -07:00
}
} else {
for_each_online_node ( node ) {
nasid = cnodeid_to_nasid ( node ) ;
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid , SH1_IPI_ACCESS ) ,
2008-07-29 22:34:13 -07:00
xpc_sh1_IPI_access_sn2 ) ;
2008-07-29 22:34:09 -07:00
}
}
}
2008-07-29 22:34:07 -07:00
/*
2008-07-29 22:34:10 -07:00
* The following set of functions are used for the sending and receiving of
* IRQs ( also known as IPIs ) . There are two flavors of IRQs , one that is
* associated with partition activity ( SGI_XPC_ACTIVATE ) and the other that
* is associated with channel activity ( SGI_XPC_NOTIFY ) .
2008-07-29 22:34:07 -07:00
*/
static u64
2008-07-29 22:34:11 -07:00
xpc_receive_IRQ_amo_sn2 ( struct amo * amo )
2008-07-29 22:34:07 -07:00
{
return FETCHOP_LOAD_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_CLEAR ) ;
}
static enum xp_retval
2008-07-29 22:34:11 -07:00
xpc_send_IRQ_sn2 ( struct amo * amo , u64 flag , int nasid , int phys_cpuid ,
int vector )
2008-07-29 22:34:07 -07:00
{
int ret = 0 ;
unsigned long irq_flags ;
local_irq_save ( irq_flags ) ;
FETCHOP_STORE_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_OR , flag ) ;
sn_send_IPI_phys ( nasid , phys_cpuid , vector , 0 ) ;
/*
* We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor . If we
* didn ' t , we ' d never know that the other partition is down and would
2008-07-29 22:34:11 -07:00
* keep sending IRQs and amos to it until the heartbeat times out .
2008-07-29 22:34:07 -07:00
*/
ret = xp_nofault_PIOR ( ( u64 * ) GLOBAL_MMR_ADDR ( NASID_GET ( & amo - > variable ) ,
xp_nofault_PIOR_target ) ) ;
local_irq_restore ( irq_flags ) ;
2008-07-29 22:34:16 -07:00
return ( ret = = 0 ) ? xpSuccess : xpPioReadError ;
2008-07-29 22:34:07 -07:00
}
2008-07-29 22:34:11 -07:00
static struct amo *
2008-07-29 22:34:10 -07:00
xpc_init_IRQ_amo_sn2 ( int index )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:13 -07:00
struct amo * amo = xpc_vars_sn2 - > amos_page + index ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:11 -07:00
( void ) xpc_receive_IRQ_amo_sn2 ( amo ) ; /* clear amo variable */
2008-07-29 22:34:07 -07:00
return amo ;
}
/*
2008-07-29 22:34:10 -07:00
* Functions associated with SGI_XPC_ACTIVATE IRQ .
2008-07-29 22:34:07 -07:00
*/
2008-07-29 22:34:09 -07:00
/*
* Notify the heartbeat check thread that an activate IRQ has been received .
*/
static irqreturn_t
xpc_handle_activate_IRQ_sn2 ( int irq , void * dev_id )
{
2008-07-29 22:34:18 -07:00
unsigned long irq_flags ;
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
xpc_activate_IRQ_rcvd + + ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
2008-07-29 22:34:09 -07:00
wake_up_interruptible ( & xpc_activate_IRQ_wq ) ;
return IRQ_HANDLED ;
}
2008-07-29 22:34:07 -07:00
/*
2008-07-29 22:34:11 -07:00
* Flag the appropriate amo variable and send an IRQ to the specified node .
2008-07-29 22:34:07 -07:00
*/
static void
2008-07-29 22:34:16 -07:00
xpc_send_activate_IRQ_sn2 ( unsigned long amos_page_pa , int from_nasid ,
int to_nasid , int to_phys_cpuid )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:11 -07:00
struct amo * amos = ( struct amo * ) __va ( amos_page_pa +
2008-07-29 22:34:13 -07:00
( XPC_ACTIVATE_IRQ_AMOS_SN2 *
2008-07-29 22:34:11 -07:00
sizeof ( struct amo ) ) ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:14 -07:00
( void ) xpc_send_IRQ_sn2 ( & amos [ BIT_WORD ( from_nasid / 2 ) ] ,
BIT_MASK ( from_nasid / 2 ) , to_nasid ,
2008-07-29 22:34:07 -07:00
to_phys_cpuid , SGI_XPC_ACTIVATE ) ;
}
static void
2008-07-29 22:34:10 -07:00
xpc_send_local_activate_IRQ_sn2 ( int from_nasid )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:18 -07:00
unsigned long irq_flags ;
2008-07-29 22:34:13 -07:00
struct amo * amos = ( struct amo * ) __va ( xpc_vars_sn2 - > amos_page_pa +
2008-07-29 22:34:13 -07:00
( XPC_ACTIVATE_IRQ_AMOS_SN2 *
2008-07-29 22:34:11 -07:00
sizeof ( struct amo ) ) ) ;
2008-07-29 22:34:07 -07:00
/* fake the sending and receipt of an activate IRQ from remote nasid */
2008-07-29 22:34:14 -07:00
FETCHOP_STORE_OP ( TO_AMO ( ( u64 ) & amos [ BIT_WORD ( from_nasid / 2 ) ] . variable ) ,
FETCHOP_OR , BIT_MASK ( from_nasid / 2 ) ) ;
2008-07-29 22:34:18 -07:00
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
xpc_activate_IRQ_rcvd + + ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
2008-07-29 22:34:09 -07:00
wake_up_interruptible ( & xpc_activate_IRQ_wq ) ;
2008-07-29 22:34:07 -07:00
}
2008-07-29 22:34:09 -07:00
/*
2008-07-29 22:34:10 -07:00
* Functions associated with SGI_XPC_NOTIFY IRQ .
2008-07-29 22:34:09 -07:00
*/
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
/*
2008-07-29 22:34:10 -07:00
* Check to see if any chctl flags were sent from the specified partition .
2008-07-29 22:34:09 -07:00
*/
2008-07-29 22:34:07 -07:00
static void
2008-07-29 22:34:10 -07:00
xpc_check_for_sent_chctl_flags_sn2 ( struct xpc_partition * part )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:10 -07:00
union xpc_channel_ctl_flags chctl ;
2008-07-29 22:34:09 -07:00
unsigned long irq_flags ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:10 -07:00
chctl . all_flags = xpc_receive_IRQ_amo_sn2 ( part - > sn . sn2 .
local_chctl_amo_va ) ;
if ( chctl . all_flags = = 0 )
2008-07-29 22:34:09 -07:00
return ;
2008-07-29 22:34:10 -07:00
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
part - > chctl . all_flags | = chctl . all_flags ;
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
2008-07-29 22:34:09 -07:00
2008-07-29 22:34:10 -07:00
dev_dbg ( xpc_chan , " received notify IRQ from partid=%d, chctl.all_flags= "
" 0x%lx \n " , XPC_PARTID ( part ) , chctl . all_flags ) ;
2008-07-29 22:34:09 -07:00
xpc_wakeup_channel_mgr ( part ) ;
2008-07-29 22:34:07 -07:00
}
2008-07-29 22:34:09 -07:00
/*
* Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
* partition actually sent it . Since SGI_XPC_NOTIFY IRQs may be shared by more
2008-07-29 22:34:11 -07:00
* than one partition , we use an amo structure per partition to indicate
2008-07-29 22:34:10 -07:00
* whether a partition has sent an IRQ or not . If it has , then wake up the
2008-07-29 22:34:09 -07:00
* associated kthread to handle it .
*
2008-07-29 22:34:10 -07:00
* All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
2008-07-29 22:34:09 -07:00
* running on other partitions .
*
* Noteworthy Arguments :
*
* irq - Interrupt ReQuest number . NOT USED .
*
2008-07-29 22:34:10 -07:00
* dev_id - partid of IRQ ' s potential sender .
2008-07-29 22:34:09 -07:00
*/
static irqreturn_t
xpc_handle_notify_IRQ_sn2 ( int irq , void * dev_id )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:09 -07:00
short partid = ( short ) ( u64 ) dev_id ;
struct xpc_partition * part = & xpc_partitions [ partid ] ;
2008-07-29 22:34:16 -07:00
DBUG_ON ( partid < 0 | | partid > = XP_MAX_NPARTITIONS_SN2 ) ;
2008-07-29 22:34:09 -07:00
if ( xpc_part_ref ( part ) ) {
2008-07-29 22:34:10 -07:00
xpc_check_for_sent_chctl_flags_sn2 ( part ) ;
2008-07-29 22:34:09 -07:00
xpc_part_deref ( part ) ;
}
return IRQ_HANDLED ;
2008-07-29 22:34:07 -07:00
}
/*
2008-07-29 22:34:10 -07:00
* Check to see if xpc_handle_notify_IRQ_sn2 ( ) dropped any IRQs on the floor
* because the write to their associated amo variable completed after the IRQ
2008-07-29 22:34:09 -07:00
* was received .
2008-07-29 22:34:07 -07:00
*/
2008-07-29 22:34:09 -07:00
static void
2008-07-29 22:34:10 -07:00
xpc_check_for_dropped_notify_IRQ_sn2 ( struct xpc_partition * part )
2008-07-29 22:34:09 -07:00
{
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
if ( xpc_part_ref ( part ) ) {
2008-07-29 22:34:10 -07:00
xpc_check_for_sent_chctl_flags_sn2 ( part ) ;
2008-07-29 22:34:09 -07:00
part_sn2 - > dropped_notify_IRQ_timer . expires = jiffies +
2008-07-29 22:34:10 -07:00
XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL ;
2008-07-29 22:34:09 -07:00
add_timer ( & part_sn2 - > dropped_notify_IRQ_timer ) ;
xpc_part_deref ( part ) ;
}
}
2008-07-29 22:34:07 -07:00
/*
2008-07-29 22:34:10 -07:00
* Send a notify IRQ to the remote partition that is associated with the
2008-07-29 22:34:07 -07:00
* specified channel .
*/
static void
2008-07-29 22:34:10 -07:00
xpc_send_notify_IRQ_sn2 ( struct xpc_channel * ch , u8 chctl_flag ,
char * chctl_flag_string , unsigned long * irq_flags )
2008-07-29 22:34:07 -07:00
{
struct xpc_partition * part = & xpc_partitions [ ch - > partid ] ;
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:10 -07:00
union xpc_channel_ctl_flags chctl = { 0 } ;
2008-07-29 22:34:07 -07:00
enum xp_retval ret ;
2008-07-29 22:34:18 -07:00
if ( likely ( part - > act_state ! = XPC_P_AS_DEACTIVATING ) ) {
2008-07-29 22:34:10 -07:00
chctl . flags [ ch - > number ] = chctl_flag ;
ret = xpc_send_IRQ_sn2 ( part_sn2 - > remote_chctl_amo_va ,
chctl . all_flags ,
part_sn2 - > notify_IRQ_nasid ,
part_sn2 - > notify_IRQ_phys_cpuid ,
2008-07-29 22:34:07 -07:00
SGI_XPC_NOTIFY ) ;
dev_dbg ( xpc_chan , " %s sent to partid=%d, channel=%d, ret=%d \n " ,
2008-07-29 22:34:10 -07:00
chctl_flag_string , ch - > partid , ch - > number , ret ) ;
2008-07-29 22:34:07 -07:00
if ( unlikely ( ret ! = xpSuccess ) ) {
if ( irq_flags ! = NULL )
spin_unlock_irqrestore ( & ch - > lock , * irq_flags ) ;
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
if ( irq_flags ! = NULL )
spin_lock_irqsave ( & ch - > lock , * irq_flags ) ;
}
}
}
2008-07-29 22:34:10 -07:00
# define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
xpc_send_notify_IRQ_sn2 ( _ch , _ipi_f , # _ipi_f , _irq_f )
2008-07-29 22:34:07 -07:00
/*
* Make it look like the remote partition , which is associated with the
2008-07-29 22:34:10 -07:00
* specified channel , sent us a notify IRQ . This faked IRQ will be handled
* by xpc_check_for_dropped_notify_IRQ_sn2 ( ) .
2008-07-29 22:34:07 -07:00
*/
static void
2008-07-29 22:34:10 -07:00
xpc_send_local_notify_IRQ_sn2 ( struct xpc_channel * ch , u8 chctl_flag ,
char * chctl_flag_string )
2008-07-29 22:34:07 -07:00
{
struct xpc_partition * part = & xpc_partitions [ ch - > partid ] ;
2008-07-29 22:34:10 -07:00
union xpc_channel_ctl_flags chctl = { 0 } ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:10 -07:00
chctl . flags [ ch - > number ] = chctl_flag ;
FETCHOP_STORE_OP ( TO_AMO ( ( u64 ) & part - > sn . sn2 . local_chctl_amo_va - >
variable ) , FETCHOP_OR , chctl . all_flags ) ;
2008-07-29 22:34:07 -07:00
dev_dbg ( xpc_chan , " %s sent local from partid=%d, channel=%d \n " ,
2008-07-29 22:34:10 -07:00
chctl_flag_string , ch - > partid , ch - > number ) ;
2008-07-29 22:34:07 -07:00
}
2008-07-29 22:34:10 -07:00
# define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
xpc_send_local_notify_IRQ_sn2 ( _ch , _ipi_f , # _ipi_f )
2008-07-29 22:34:07 -07:00
static void
2008-07-29 22:34:10 -07:00
xpc_send_chctl_closerequest_sn2 ( struct xpc_channel * ch ,
unsigned long * irq_flags )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:18 -07:00
struct xpc_openclose_args * args = ch - > sn . sn2 . local_openclose_args ;
2008-07-29 22:34:07 -07:00
args - > reason = ch - > reason ;
2008-07-29 22:34:10 -07:00
XPC_SEND_NOTIFY_IRQ_SN2 ( ch , XPC_CHCTL_CLOSEREQUEST , irq_flags ) ;
2008-07-29 22:34:07 -07:00
}
static void
2008-07-29 22:34:10 -07:00
xpc_send_chctl_closereply_sn2 ( struct xpc_channel * ch , unsigned long * irq_flags )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:10 -07:00
XPC_SEND_NOTIFY_IRQ_SN2 ( ch , XPC_CHCTL_CLOSEREPLY , irq_flags ) ;
2008-07-29 22:34:07 -07:00
}
static void
2008-07-29 22:34:10 -07:00
xpc_send_chctl_openrequest_sn2 ( struct xpc_channel * ch , unsigned long * irq_flags )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:18 -07:00
struct xpc_openclose_args * args = ch - > sn . sn2 . local_openclose_args ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:19 -07:00
args - > entry_size = ch - > entry_size ;
2008-07-29 22:34:07 -07:00
args - > local_nentries = ch - > local_nentries ;
2008-07-29 22:34:10 -07:00
XPC_SEND_NOTIFY_IRQ_SN2 ( ch , XPC_CHCTL_OPENREQUEST , irq_flags ) ;
2008-07-29 22:34:07 -07:00
}
static void
2008-07-29 22:34:10 -07:00
xpc_send_chctl_openreply_sn2 ( struct xpc_channel * ch , unsigned long * irq_flags )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:18 -07:00
struct xpc_openclose_args * args = ch - > sn . sn2 . local_openclose_args ;
2008-07-29 22:34:07 -07:00
args - > remote_nentries = ch - > remote_nentries ;
args - > local_nentries = ch - > local_nentries ;
2008-07-29 22:34:18 -07:00
args - > local_msgqueue_pa = xp_pa ( ch - > sn . sn2 . local_msgqueue ) ;
2008-07-29 22:34:10 -07:00
XPC_SEND_NOTIFY_IRQ_SN2 ( ch , XPC_CHCTL_OPENREPLY , irq_flags ) ;
2008-07-29 22:34:07 -07:00
}
static void
2008-07-29 22:34:10 -07:00
xpc_send_chctl_msgrequest_sn2 ( struct xpc_channel * ch )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:10 -07:00
XPC_SEND_NOTIFY_IRQ_SN2 ( ch , XPC_CHCTL_MSGREQUEST , NULL ) ;
2008-07-29 22:34:07 -07:00
}
static void
2008-07-29 22:34:10 -07:00
xpc_send_chctl_local_msgrequest_sn2 ( struct xpc_channel * ch )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:10 -07:00
XPC_SEND_LOCAL_NOTIFY_IRQ_SN2 ( ch , XPC_CHCTL_MSGREQUEST ) ;
2008-07-29 22:34:07 -07:00
}
2009-04-02 16:59:10 -07:00
static enum xp_retval
2008-07-29 22:34:18 -07:00
xpc_save_remote_msgqueue_pa_sn2 ( struct xpc_channel * ch ,
unsigned long msgqueue_pa )
{
ch - > sn . sn2 . remote_msgqueue_pa = msgqueue_pa ;
2009-04-02 16:59:10 -07:00
return xpSuccess ;
2008-07-29 22:34:18 -07:00
}
2008-07-29 22:34:07 -07:00
/*
* This next set of functions are used to keep track of when a partition is
* potentially engaged in accessing memory belonging to another partition .
*/
static void
2008-07-29 22:34:09 -07:00
xpc_indicate_partition_engaged_sn2 ( struct xpc_partition * part )
2008-07-29 22:34:07 -07:00
{
unsigned long irq_flags ;
2008-07-29 22:34:11 -07:00
struct amo * amo = ( struct amo * ) __va ( part - > sn . sn2 . remote_amos_page_pa +
2008-07-29 22:34:13 -07:00
( XPC_ENGAGED_PARTITIONS_AMO_SN2 *
2008-07-29 22:34:11 -07:00
sizeof ( struct amo ) ) ) ;
2008-07-29 22:34:07 -07:00
local_irq_save ( irq_flags ) ;
2008-07-29 22:34:11 -07:00
/* set bit corresponding to our partid in remote partition's amo */
2008-07-29 22:34:07 -07:00
FETCHOP_STORE_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_OR ,
2008-07-29 22:34:14 -07:00
BIT ( sn_partition_id ) ) ;
2008-07-29 22:34:07 -07:00
/*
* We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor . If we
* didn ' t , we ' d never know that the other partition is down and would
2008-07-29 22:34:11 -07:00
* keep sending IRQs and amos to it until the heartbeat times out .
2008-07-29 22:34:07 -07:00
*/
( void ) xp_nofault_PIOR ( ( u64 * ) GLOBAL_MMR_ADDR ( NASID_GET ( & amo - >
variable ) ,
xp_nofault_PIOR_target ) ) ;
local_irq_restore ( irq_flags ) ;
}
static void
2008-07-29 22:34:09 -07:00
xpc_indicate_partition_disengaged_sn2 ( struct xpc_partition * part )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:07 -07:00
unsigned long irq_flags ;
2008-07-29 22:34:11 -07:00
struct amo * amo = ( struct amo * ) __va ( part_sn2 - > remote_amos_page_pa +
2008-07-29 22:34:13 -07:00
( XPC_ENGAGED_PARTITIONS_AMO_SN2 *
2008-07-29 22:34:11 -07:00
sizeof ( struct amo ) ) ) ;
2008-07-29 22:34:07 -07:00
local_irq_save ( irq_flags ) ;
2008-07-29 22:34:11 -07:00
/* clear bit corresponding to our partid in remote partition's amo */
2008-07-29 22:34:07 -07:00
FETCHOP_STORE_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_AND ,
2008-07-29 22:34:14 -07:00
~ BIT ( sn_partition_id ) ) ;
2008-07-29 22:34:07 -07:00
/*
* We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor . If we
* didn ' t , we ' d never know that the other partition is down and would
2008-07-29 22:34:11 -07:00
* keep sending IRQs and amos to it until the heartbeat times out .
2008-07-29 22:34:07 -07:00
*/
( void ) xp_nofault_PIOR ( ( u64 * ) GLOBAL_MMR_ADDR ( NASID_GET ( & amo - >
variable ) ,
xp_nofault_PIOR_target ) ) ;
local_irq_restore ( irq_flags ) ;
/*
2008-07-29 22:34:09 -07:00
* Send activate IRQ to get other side to see that we ' ve cleared our
2008-07-29 22:34:11 -07:00
* bit in their engaged partitions amo .
2008-07-29 22:34:07 -07:00
*/
2008-07-29 22:34:10 -07:00
xpc_send_activate_IRQ_sn2 ( part_sn2 - > remote_amos_page_pa ,
2008-07-29 22:34:09 -07:00
cnodeid_to_nasid ( 0 ) ,
part_sn2 - > activate_IRQ_nasid ,
part_sn2 - > activate_IRQ_phys_cpuid ) ;
2008-07-29 22:34:07 -07:00
}
2008-07-29 22:34:18 -07:00
static void
xpc_assume_partition_disengaged_sn2 ( short partid )
{
struct amo * amo = xpc_vars_sn2 - > amos_page +
XPC_ENGAGED_PARTITIONS_AMO_SN2 ;
/* clear bit(s) based on partid mask in our partition's amo */
FETCHOP_STORE_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_AND ,
~ BIT ( partid ) ) ;
}
2008-07-29 22:34:09 -07:00
static int
xpc_partition_engaged_sn2 ( short partid )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:13 -07:00
struct amo * amo = xpc_vars_sn2 - > amos_page +
XPC_ENGAGED_PARTITIONS_AMO_SN2 ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:11 -07:00
/* our partition's amo variable ANDed with partid mask */
2008-07-29 22:34:07 -07:00
return ( FETCHOP_LOAD_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_LOAD ) &
2008-07-29 22:34:14 -07:00
BIT ( partid ) ) ! = 0 ;
2008-07-29 22:34:07 -07:00
}
2008-07-29 22:34:09 -07:00
static int
xpc_any_partition_engaged_sn2 ( void )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:13 -07:00
struct amo * amo = xpc_vars_sn2 - > amos_page +
XPC_ENGAGED_PARTITIONS_AMO_SN2 ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:11 -07:00
/* our partition's amo variable */
2008-07-29 22:34:09 -07:00
return FETCHOP_LOAD_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_LOAD ) ! = 0 ;
2008-07-29 22:34:07 -07:00
}
2008-07-29 22:34:09 -07:00
/* original protection values for each node */
static u64 xpc_prot_vec_sn2 [ MAX_NUMNODES ] ;
/*
2008-07-29 22:34:11 -07:00
* Change protections to allow amo operations on non - Shub 1.1 systems .
2008-07-29 22:34:09 -07:00
*/
static enum xp_retval
2008-07-29 22:34:11 -07:00
xpc_allow_amo_ops_sn2 ( struct amo * amos_page )
2008-07-29 22:34:09 -07:00
{
2008-11-05 17:27:22 -06:00
enum xp_retval ret = xpSuccess ;
2008-07-29 22:34:09 -07:00
/*
* On SHUB 1.1 , we cannot call sn_change_memprotect ( ) since the BIST
* collides with memory operations . On those systems we call
2008-07-29 22:34:11 -07:00
* xpc_allow_amo_ops_shub_wars_1_1_sn2 ( ) instead .
2008-07-29 22:34:09 -07:00
*/
2008-11-05 17:27:22 -06:00
if ( ! enable_shub_wars_1_1 ( ) )
ret = xp_expand_memprotect ( ia64_tpa ( ( u64 ) amos_page ) , PAGE_SIZE ) ;
return ret ;
2008-07-29 22:34:09 -07:00
}
/*
2008-07-29 22:34:11 -07:00
* Change protections to allow amo operations on Shub 1.1 systems .
2008-07-29 22:34:09 -07:00
*/
static void
2008-07-29 22:34:11 -07:00
xpc_allow_amo_ops_shub_wars_1_1_sn2 ( void )
2008-07-29 22:34:09 -07:00
{
int node ;
int nasid ;
if ( ! enable_shub_wars_1_1 ( ) )
return ;
for_each_online_node ( node ) {
nasid = cnodeid_to_nasid ( node ) ;
/* save current protection values */
xpc_prot_vec_sn2 [ node ] =
( u64 ) HUB_L ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid ,
SH1_MD_DQLP_MMR_DIR_PRIVEC0 ) ) ;
/* open up everything */
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid ,
SH1_MD_DQLP_MMR_DIR_PRIVEC0 ) ,
- 1UL ) ;
HUB_S ( ( u64 * ) GLOBAL_MMR_ADDR ( nasid ,
SH1_MD_DQRP_MMR_DIR_PRIVEC0 ) ,
- 1UL ) ;
}
}
2008-07-29 22:34:16 -07:00
static enum xp_retval
2008-07-29 22:34:16 -07:00
xpc_get_partition_rsvd_page_pa_sn2 ( void * buf , u64 * cookie , unsigned long * rp_pa ,
2008-07-29 22:34:16 -07:00
size_t * len )
{
s64 status ;
enum xp_retval ret ;
2008-07-29 22:34:16 -07:00
status = sn_partition_reserved_page_pa ( ( u64 ) buf , cookie , rp_pa , len ) ;
2008-07-29 22:34:16 -07:00
if ( status = = SALRET_OK )
ret = xpSuccess ;
else if ( status = = SALRET_MORE_PASSES )
ret = xpNeedMoreInfo ;
else
ret = xpSalError ;
return ret ;
}
2008-07-29 22:34:18 -07:00
static int
xpc_setup_rsvd_page_sn_sn2 ( struct xpc_rsvd_page * rp )
2008-07-29 22:34:05 -07:00
{
2008-07-29 22:34:11 -07:00
struct amo * amos_page ;
2008-07-29 22:34:05 -07:00
int i ;
int ret ;
2008-07-29 22:34:13 -07:00
xpc_vars_sn2 = XPC_RP_VARS ( rp ) ;
2008-07-29 22:34:05 -07:00
2008-07-29 22:34:16 -07:00
rp - > sn . vars_pa = xp_pa ( xpc_vars_sn2 ) ;
2008-07-29 22:34:05 -07:00
2008-07-29 22:34:06 -07:00
/* vars_part array follows immediately after vars */
2008-07-29 22:34:13 -07:00
xpc_vars_part_sn2 = ( struct xpc_vars_part_sn2 * ) ( ( u8 * ) XPC_RP_VARS ( rp ) +
XPC_RP_VARS_SIZE ) ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:05 -07:00
/*
2008-07-29 22:34:13 -07:00
* Before clearing xpc_vars_sn2 , see if a page of amos had been
* previously allocated . If not we ' ll need to allocate one and set
* permissions so that cross - partition amos are allowed .
2008-07-29 22:34:05 -07:00
*
2008-07-29 22:34:11 -07:00
* The allocated amo page needs MCA reporting to remain disabled after
2008-07-29 22:34:05 -07:00
* XPC has unloaded . To make this work , we keep a copy of the pointer
2008-07-29 22:34:13 -07:00
* to this page ( i . e . , amos_page ) in the struct xpc_vars_sn2 structure ,
2008-07-29 22:34:05 -07:00
* which is pointed to by the reserved page , and re - use that saved copy
2008-07-29 22:34:11 -07:00
* on subsequent loads of XPC . This amo page is never freed , and its
2008-07-29 22:34:05 -07:00
* memory protections are never restricted .
*/
2008-07-29 22:34:13 -07:00
amos_page = xpc_vars_sn2 - > amos_page ;
2008-07-29 22:34:05 -07:00
if ( amos_page = = NULL ) {
2008-07-29 22:34:11 -07:00
amos_page = ( struct amo * ) TO_AMO ( uncached_alloc_page ( 0 , 1 ) ) ;
2008-07-29 22:34:05 -07:00
if ( amos_page = = NULL ) {
2008-07-29 22:34:11 -07:00
dev_err ( xpc_part , " can't allocate page of amos \n " ) ;
2008-07-29 22:34:18 -07:00
return - ENOMEM ;
2008-07-29 22:34:05 -07:00
}
/*
2008-07-29 22:34:11 -07:00
* Open up amo - R / W to cpu . This is done on Shub 1.1 systems
* when xpc_allow_amo_ops_shub_wars_1_1_sn2 ( ) is called .
2008-07-29 22:34:05 -07:00
*/
2008-07-29 22:34:11 -07:00
ret = xpc_allow_amo_ops_sn2 ( amos_page ) ;
2008-07-29 22:34:09 -07:00
if ( ret ! = xpSuccess ) {
2008-07-29 22:34:11 -07:00
dev_err ( xpc_part , " can't allow amo operations \n " ) ;
2008-07-29 22:34:09 -07:00
uncached_free_page ( __IA64_UNCACHED_OFFSET |
TO_PHYS ( ( u64 ) amos_page ) , 1 ) ;
2008-07-29 22:34:18 -07:00
return - EPERM ;
2008-07-29 22:34:05 -07:00
}
}
2008-07-29 22:34:13 -07:00
/* clear xpc_vars_sn2 */
memset ( xpc_vars_sn2 , 0 , sizeof ( struct xpc_vars_sn2 ) ) ;
2008-07-29 22:34:05 -07:00
2008-07-29 22:34:13 -07:00
xpc_vars_sn2 - > version = XPC_V_VERSION ;
xpc_vars_sn2 - > activate_IRQ_nasid = cpuid_to_nasid ( 0 ) ;
xpc_vars_sn2 - > activate_IRQ_phys_cpuid = cpu_physical_id ( 0 ) ;
2008-07-29 22:34:16 -07:00
xpc_vars_sn2 - > vars_part_pa = xp_pa ( xpc_vars_part_sn2 ) ;
2008-07-29 22:34:13 -07:00
xpc_vars_sn2 - > amos_page_pa = ia64_tpa ( ( u64 ) amos_page ) ;
xpc_vars_sn2 - > amos_page = amos_page ; /* save for next load of XPC */
2008-07-29 22:34:05 -07:00
2008-07-29 22:34:13 -07:00
/* clear xpc_vars_part_sn2 */
memset ( ( u64 * ) xpc_vars_part_sn2 , 0 , sizeof ( struct xpc_vars_part_sn2 ) *
2008-07-29 22:34:16 -07:00
XP_MAX_NPARTITIONS_SN2 ) ;
2008-07-29 22:34:05 -07:00
2008-07-29 22:34:11 -07:00
/* initialize the activate IRQ related amo variables */
2008-07-29 22:34:14 -07:00
for ( i = 0 ; i < xpc_nasid_mask_nlongs ; i + + )
2008-07-29 22:34:13 -07:00
( void ) xpc_init_IRQ_amo_sn2 ( XPC_ACTIVATE_IRQ_AMOS_SN2 + i ) ;
2008-07-29 22:34:05 -07:00
2008-07-29 22:34:11 -07:00
/* initialize the engaged remote partitions related amo variables */
2008-07-29 22:34:13 -07:00
( void ) xpc_init_IRQ_amo_sn2 ( XPC_ENGAGED_PARTITIONS_AMO_SN2 ) ;
( void ) xpc_init_IRQ_amo_sn2 ( XPC_DEACTIVATE_REQUEST_AMO_SN2 ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:18 -07:00
return 0 ;
2008-07-29 22:34:07 -07:00
}
static void
xpc_increment_heartbeat_sn2 ( void )
{
2008-07-29 22:34:13 -07:00
xpc_vars_sn2 - > heartbeat + + ;
2008-07-29 22:34:07 -07:00
}
static void
xpc_offline_heartbeat_sn2 ( void )
{
xpc_increment_heartbeat_sn2 ( ) ;
2008-07-29 22:34:13 -07:00
xpc_vars_sn2 - > heartbeat_offline = 1 ;
2008-07-29 22:34:07 -07:00
}
static void
xpc_online_heartbeat_sn2 ( void )
{
xpc_increment_heartbeat_sn2 ( ) ;
2008-07-29 22:34:13 -07:00
xpc_vars_sn2 - > heartbeat_offline = 0 ;
2008-07-29 22:34:07 -07:00
}
static void
xpc_heartbeat_init_sn2 ( void )
{
2008-07-29 22:34:13 -07:00
DBUG_ON ( xpc_vars_sn2 = = NULL ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:13 -07:00
bitmap_zero ( xpc_vars_sn2 - > heartbeating_to_mask , XP_MAX_NPARTITIONS_SN2 ) ;
xpc_heartbeating_to_mask = & xpc_vars_sn2 - > heartbeating_to_mask [ 0 ] ;
2008-07-29 22:34:07 -07:00
xpc_online_heartbeat_sn2 ( ) ;
}
static void
xpc_heartbeat_exit_sn2 ( void )
{
xpc_offline_heartbeat_sn2 ( ) ;
}
2008-07-29 22:34:17 -07:00
static enum xp_retval
xpc_get_remote_heartbeat_sn2 ( struct xpc_partition * part )
2008-07-29 22:34:07 -07:00
{
struct xpc_vars_sn2 * remote_vars ;
enum xp_retval ret ;
2008-07-29 22:34:13 -07:00
remote_vars = ( struct xpc_vars_sn2 * ) xpc_remote_copy_buffer_sn2 ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:17 -07:00
/* pull the remote vars structure that contains the heartbeat */
ret = xp_remote_memcpy ( xp_pa ( remote_vars ) ,
part - > sn . sn2 . remote_vars_pa ,
XPC_RP_VARS_SIZE ) ;
if ( ret ! = xpSuccess )
return ret ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:17 -07:00
dev_dbg ( xpc_part , " partid=%d, heartbeat=%ld, last_heartbeat=%ld, "
" heartbeat_offline=%ld, HB_mask[0]=0x%lx \n " , XPC_PARTID ( part ) ,
remote_vars - > heartbeat , part - > last_heartbeat ,
remote_vars - > heartbeat_offline ,
remote_vars - > heartbeating_to_mask [ 0 ] ) ;
if ( ( remote_vars - > heartbeat = = part - > last_heartbeat & &
remote_vars - > heartbeat_offline = = 0 ) | |
! xpc_hb_allowed ( sn_partition_id ,
& remote_vars - > heartbeating_to_mask ) ) {
ret = xpNoHeartbeat ;
} else {
2008-07-29 22:34:07 -07:00
part - > last_heartbeat = remote_vars - > heartbeat ;
}
2008-07-29 22:34:17 -07:00
return ret ;
2008-07-29 22:34:07 -07:00
}
/*
* Get a copy of the remote partition ' s XPC variables from the reserved page .
*
* remote_vars points to a buffer that is cacheline aligned for BTE copies and
* assumed to be of size XPC_RP_VARS_SIZE .
*/
static enum xp_retval
2008-07-29 22:34:16 -07:00
xpc_get_remote_vars_sn2 ( unsigned long remote_vars_pa ,
struct xpc_vars_sn2 * remote_vars )
2008-07-29 22:34:07 -07:00
{
enum xp_retval ret ;
if ( remote_vars_pa = = 0 )
return xpVarsNotSet ;
/* pull over the cross partition variables */
2008-07-29 22:34:16 -07:00
ret = xp_remote_memcpy ( xp_pa ( remote_vars ) , remote_vars_pa ,
2008-07-29 22:34:07 -07:00
XPC_RP_VARS_SIZE ) ;
if ( ret ! = xpSuccess )
return ret ;
if ( XPC_VERSION_MAJOR ( remote_vars - > version ) ! =
XPC_VERSION_MAJOR ( XPC_V_VERSION ) ) {
return xpBadVersion ;
}
2008-07-29 22:34:05 -07:00
return xpSuccess ;
}
2008-07-29 22:34:07 -07:00
static void
2008-07-29 22:34:09 -07:00
xpc_request_partition_activation_sn2 ( struct xpc_rsvd_page * remote_rp ,
2008-07-29 22:34:16 -07:00
unsigned long remote_rp_pa , int nasid )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:10 -07:00
xpc_send_local_activate_IRQ_sn2 ( nasid ) ;
2008-07-29 22:34:09 -07:00
}
static void
xpc_request_partition_reactivation_sn2 ( struct xpc_partition * part )
{
2008-07-29 22:34:10 -07:00
xpc_send_local_activate_IRQ_sn2 ( part - > sn . sn2 . activate_IRQ_nasid ) ;
2008-07-29 22:34:09 -07:00
}
static void
xpc_request_partition_deactivation_sn2 ( struct xpc_partition * part )
{
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
unsigned long irq_flags ;
2008-07-29 22:34:11 -07:00
struct amo * amo = ( struct amo * ) __va ( part_sn2 - > remote_amos_page_pa +
2008-07-29 22:34:13 -07:00
( XPC_DEACTIVATE_REQUEST_AMO_SN2 *
2008-07-29 22:34:11 -07:00
sizeof ( struct amo ) ) ) ;
2008-07-29 22:34:09 -07:00
local_irq_save ( irq_flags ) ;
2008-07-29 22:34:11 -07:00
/* set bit corresponding to our partid in remote partition's amo */
2008-07-29 22:34:09 -07:00
FETCHOP_STORE_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_OR ,
2008-07-29 22:34:14 -07:00
BIT ( sn_partition_id ) ) ;
2008-07-29 22:34:09 -07:00
/*
* We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor . If we
* didn ' t , we ' d never know that the other partition is down and would
2008-07-29 22:34:11 -07:00
* keep sending IRQs and amos to it until the heartbeat times out .
2008-07-29 22:34:09 -07:00
*/
( void ) xp_nofault_PIOR ( ( u64 * ) GLOBAL_MMR_ADDR ( NASID_GET ( & amo - >
variable ) ,
xp_nofault_PIOR_target ) ) ;
local_irq_restore ( irq_flags ) ;
/*
* Send activate IRQ to get other side to see that we ' ve set our
2008-07-29 22:34:11 -07:00
* bit in their deactivate request amo .
2008-07-29 22:34:09 -07:00
*/
2008-07-29 22:34:10 -07:00
xpc_send_activate_IRQ_sn2 ( part_sn2 - > remote_amos_page_pa ,
2008-07-29 22:34:09 -07:00
cnodeid_to_nasid ( 0 ) ,
part_sn2 - > activate_IRQ_nasid ,
part_sn2 - > activate_IRQ_phys_cpuid ) ;
}
static void
xpc_cancel_partition_deactivation_request_sn2 ( struct xpc_partition * part )
{
unsigned long irq_flags ;
2008-07-29 22:34:11 -07:00
struct amo * amo = ( struct amo * ) __va ( part - > sn . sn2 . remote_amos_page_pa +
2008-07-29 22:34:13 -07:00
( XPC_DEACTIVATE_REQUEST_AMO_SN2 *
2008-07-29 22:34:11 -07:00
sizeof ( struct amo ) ) ) ;
2008-07-29 22:34:09 -07:00
local_irq_save ( irq_flags ) ;
2008-07-29 22:34:11 -07:00
/* clear bit corresponding to our partid in remote partition's amo */
2008-07-29 22:34:09 -07:00
FETCHOP_STORE_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_AND ,
2008-07-29 22:34:14 -07:00
~ BIT ( sn_partition_id ) ) ;
2008-07-29 22:34:09 -07:00
/*
* We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor . If we
* didn ' t , we ' d never know that the other partition is down and would
2008-07-29 22:34:11 -07:00
* keep sending IRQs and amos to it until the heartbeat times out .
2008-07-29 22:34:09 -07:00
*/
( void ) xp_nofault_PIOR ( ( u64 * ) GLOBAL_MMR_ADDR ( NASID_GET ( & amo - >
variable ) ,
xp_nofault_PIOR_target ) ) ;
local_irq_restore ( irq_flags ) ;
}
static int
xpc_partition_deactivation_requested_sn2 ( short partid )
{
2008-07-29 22:34:13 -07:00
struct amo * amo = xpc_vars_sn2 - > amos_page +
XPC_DEACTIVATE_REQUEST_AMO_SN2 ;
2008-07-29 22:34:09 -07:00
2008-07-29 22:34:11 -07:00
/* our partition's amo variable ANDed with partid mask */
2008-07-29 22:34:09 -07:00
return ( FETCHOP_LOAD_OP ( TO_AMO ( ( u64 ) & amo - > variable ) , FETCHOP_LOAD ) &
2008-07-29 22:34:14 -07:00
BIT ( partid ) ) ! = 0 ;
2008-07-29 22:34:07 -07:00
}
/*
* Update the remote partition ' s info .
*/
static void
xpc_update_partition_info_sn2 ( struct xpc_partition * part , u8 remote_rp_version ,
2008-07-29 22:34:15 -07:00
unsigned long * remote_rp_ts_jiffies ,
2008-07-29 22:34:16 -07:00
unsigned long remote_rp_pa ,
unsigned long remote_vars_pa ,
2008-07-29 22:34:07 -07:00
struct xpc_vars_sn2 * remote_vars )
{
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:07 -07:00
part - > remote_rp_version = remote_rp_version ;
dev_dbg ( xpc_part , " remote_rp_version = 0x%016x \n " ,
part - > remote_rp_version ) ;
2008-07-29 22:34:15 -07:00
part - > remote_rp_ts_jiffies = * remote_rp_ts_jiffies ;
dev_dbg ( xpc_part , " remote_rp_ts_jiffies = 0x%016lx \n " ,
part - > remote_rp_ts_jiffies ) ;
2008-07-29 22:34:07 -07:00
part - > remote_rp_pa = remote_rp_pa ;
dev_dbg ( xpc_part , " remote_rp_pa = 0x%016lx \n " , part - > remote_rp_pa ) ;
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_vars_pa = remote_vars_pa ;
2008-07-29 22:34:07 -07:00
dev_dbg ( xpc_part , " remote_vars_pa = 0x%016lx \n " ,
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_vars_pa ) ;
2008-07-29 22:34:07 -07:00
2009-01-15 13:50:57 -08:00
part - > last_heartbeat = remote_vars - > heartbeat - 1 ;
2008-07-29 22:34:07 -07:00
dev_dbg ( xpc_part , " last_heartbeat = 0x%016lx \n " ,
part - > last_heartbeat ) ;
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_vars_part_pa = remote_vars - > vars_part_pa ;
2008-07-29 22:34:07 -07:00
dev_dbg ( xpc_part , " remote_vars_part_pa = 0x%016lx \n " ,
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_vars_part_pa ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
part_sn2 - > activate_IRQ_nasid = remote_vars - > activate_IRQ_nasid ;
dev_dbg ( xpc_part , " activate_IRQ_nasid = 0x%x \n " ,
part_sn2 - > activate_IRQ_nasid ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
part_sn2 - > activate_IRQ_phys_cpuid =
remote_vars - > activate_IRQ_phys_cpuid ;
dev_dbg ( xpc_part , " activate_IRQ_phys_cpuid = 0x%x \n " ,
part_sn2 - > activate_IRQ_phys_cpuid ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_amos_page_pa = remote_vars - > amos_page_pa ;
2008-07-29 22:34:07 -07:00
dev_dbg ( xpc_part , " remote_amos_page_pa = 0x%lx \n " ,
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_amos_page_pa ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_vars_version = remote_vars - > version ;
2008-07-29 22:34:07 -07:00
dev_dbg ( xpc_part , " remote_vars_version = 0x%x \n " ,
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_vars_version ) ;
2008-07-29 22:34:07 -07:00
}
/*
2008-07-29 22:34:10 -07:00
* Prior code has determined the nasid which generated a activate IRQ .
* Inspect that nasid to determine if its partition needs to be activated
* or deactivated .
2008-07-29 22:34:07 -07:00
*
2008-07-29 22:34:10 -07:00
* A partition is considered " awaiting activation " if our partition
2008-07-29 22:34:07 -07:00
* flags indicate it is not active and it has a heartbeat . A
* partition is considered " awaiting deactivation " if our partition
* flags indicate it is active but it has no heartbeat or it is not
* sending its heartbeat to us .
*
* To determine the heartbeat , the remote nasid must have a properly
* initialized reserved page .
*/
static void
2008-07-29 22:34:09 -07:00
xpc_identify_activate_IRQ_req_sn2 ( int nasid )
2008-07-29 22:34:07 -07:00
{
struct xpc_rsvd_page * remote_rp ;
struct xpc_vars_sn2 * remote_vars ;
2008-07-29 22:34:16 -07:00
unsigned long remote_rp_pa ;
unsigned long remote_vars_pa ;
2008-07-29 22:34:07 -07:00
int remote_rp_version ;
int reactivate = 0 ;
2008-07-29 22:34:15 -07:00
unsigned long remote_rp_ts_jiffies = 0 ;
2008-07-29 22:34:07 -07:00
short partid ;
struct xpc_partition * part ;
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 ;
2008-07-29 22:34:07 -07:00
enum xp_retval ret ;
/* pull over the reserved page structure */
2008-07-29 22:34:13 -07:00
remote_rp = ( struct xpc_rsvd_page * ) xpc_remote_copy_buffer_sn2 ;
2008-07-29 22:34:07 -07:00
ret = xpc_get_remote_rp ( nasid , NULL , remote_rp , & remote_rp_pa ) ;
if ( ret ! = xpSuccess ) {
dev_warn ( xpc_part , " unable to get reserved page from nasid %d, "
" which sent interrupt, reason=%d \n " , nasid , ret ) ;
return ;
}
remote_vars_pa = remote_rp - > sn . vars_pa ;
remote_rp_version = remote_rp - > version ;
2008-07-29 22:34:15 -07:00
remote_rp_ts_jiffies = remote_rp - > ts_jiffies ;
2008-07-29 22:34:07 -07:00
partid = remote_rp - > SAL_partid ;
part = & xpc_partitions [ partid ] ;
2008-07-29 22:34:09 -07:00
part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:07 -07:00
/* pull over the cross partition variables */
2008-07-29 22:34:13 -07:00
remote_vars = ( struct xpc_vars_sn2 * ) xpc_remote_copy_buffer_sn2 ;
2008-07-29 22:34:07 -07:00
ret = xpc_get_remote_vars_sn2 ( remote_vars_pa , remote_vars ) ;
if ( ret ! = xpSuccess ) {
dev_warn ( xpc_part , " unable to get XPC variables from nasid %d, "
" which sent interrupt, reason=%d \n " , nasid , ret ) ;
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
return ;
}
2008-07-29 22:34:09 -07:00
part - > activate_IRQ_rcvd + + ;
2008-07-29 22:34:07 -07:00
dev_dbg ( xpc_part , " partid for nasid %d is %d; IRQs = %d; HB = "
2008-07-29 22:34:09 -07:00
" %ld:0x%lx \n " , ( int ) nasid , ( int ) partid , part - > activate_IRQ_rcvd ,
2008-07-29 22:34:07 -07:00
remote_vars - > heartbeat , remote_vars - > heartbeating_to_mask [ 0 ] ) ;
if ( xpc_partition_disengaged ( part ) & &
2008-07-29 22:34:18 -07:00
part - > act_state = = XPC_P_AS_INACTIVE ) {
2008-07-29 22:34:07 -07:00
xpc_update_partition_info_sn2 ( part , remote_rp_version ,
2008-07-29 22:34:15 -07:00
& remote_rp_ts_jiffies ,
remote_rp_pa , remote_vars_pa ,
remote_vars ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
if ( xpc_partition_deactivation_requested_sn2 ( partid ) ) {
/*
* Other side is waiting on us to deactivate even though
* we already have .
*/
return ;
2008-07-29 22:34:07 -07:00
}
xpc_activate_partition ( part ) ;
return ;
}
DBUG_ON ( part - > remote_rp_version = = 0 ) ;
2008-07-29 22:34:09 -07:00
DBUG_ON ( part_sn2 - > remote_vars_version = = 0 ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:15 -07:00
if ( remote_rp_ts_jiffies ! = part - > remote_rp_ts_jiffies ) {
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
/* the other side rebooted */
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
DBUG_ON ( xpc_partition_engaged_sn2 ( partid ) ) ;
DBUG_ON ( xpc_partition_deactivation_requested_sn2 ( partid ) ) ;
2008-07-29 22:34:07 -07:00
xpc_update_partition_info_sn2 ( part , remote_rp_version ,
2008-07-29 22:34:15 -07:00
& remote_rp_ts_jiffies ,
remote_rp_pa , remote_vars_pa ,
remote_vars ) ;
2008-07-29 22:34:07 -07:00
reactivate = 1 ;
}
2008-07-29 22:34:09 -07:00
if ( part - > disengage_timeout > 0 & & ! xpc_partition_disengaged ( part ) ) {
2008-07-29 22:34:07 -07:00
/* still waiting on other side to disengage from us */
return ;
}
2008-07-29 22:34:09 -07:00
if ( reactivate )
2008-07-29 22:34:07 -07:00
XPC_DEACTIVATE_PARTITION ( part , xpReactivating ) ;
2008-07-29 22:34:09 -07:00
else if ( xpc_partition_deactivation_requested_sn2 ( partid ) )
2008-07-29 22:34:07 -07:00
XPC_DEACTIVATE_PARTITION ( part , xpOtherGoingDown ) ;
}
/*
2008-07-29 22:34:11 -07:00
* Loop through the activation amo variables and process any bits
2008-07-29 22:34:07 -07:00
* which are set . Each bit indicates a nasid sending a partition
* activation or deactivation request .
*
* Return # of IRQs detected .
*/
int
2008-07-29 22:34:09 -07:00
xpc_identify_activate_IRQ_sender_sn2 ( void )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:14 -07:00
int l ;
int b ;
unsigned long nasid_mask_long ;
2008-07-29 22:34:07 -07:00
u64 nasid ; /* remote nasid */
int n_IRQs_detected = 0 ;
2008-07-29 22:34:11 -07:00
struct amo * act_amos ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:13 -07:00
act_amos = xpc_vars_sn2 - > amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2 ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:14 -07:00
/* scan through activate amo variables looking for non-zero entries */
for ( l = 0 ; l < xpc_nasid_mask_nlongs ; l + + ) {
2008-07-29 22:34:07 -07:00
if ( xpc_exiting )
break ;
2008-07-29 22:34:14 -07:00
nasid_mask_long = xpc_receive_IRQ_amo_sn2 ( & act_amos [ l ] ) ;
b = find_first_bit ( & nasid_mask_long , BITS_PER_LONG ) ;
if ( b > = BITS_PER_LONG ) {
/* no IRQs from nasids in this amo variable */
2008-07-29 22:34:07 -07:00
continue ;
}
2008-07-29 22:34:14 -07:00
dev_dbg ( xpc_part , " amo[%d] gave back 0x%lx \n " , l ,
nasid_mask_long ) ;
2008-07-29 22:34:07 -07:00
/*
* If this nasid has been added to the machine since
* our partition was reset , this will retain the
* remote nasid in our reserved pages machine mask .
* This is used in the event of module reload .
*/
2008-07-29 22:34:14 -07:00
xpc_mach_nasids [ l ] | = nasid_mask_long ;
2008-07-29 22:34:07 -07:00
/* locate the nasid(s) which sent interrupts */
2008-07-29 22:34:14 -07:00
do {
n_IRQs_detected + + ;
nasid = ( l * BITS_PER_LONG + b ) * 2 ;
dev_dbg ( xpc_part , " interrupt from nasid %ld \n " , nasid ) ;
xpc_identify_activate_IRQ_req_sn2 ( nasid ) ;
b = find_next_bit ( & nasid_mask_long , BITS_PER_LONG ,
b + 1 ) ;
} while ( b < BITS_PER_LONG ) ;
2008-07-29 22:34:07 -07:00
}
return n_IRQs_detected ;
}
static void
2008-07-29 22:34:18 -07:00
xpc_process_activate_IRQ_rcvd_sn2 ( void )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:18 -07:00
unsigned long irq_flags ;
int n_IRQs_expected ;
2008-07-29 22:34:07 -07:00
int n_IRQs_detected ;
2008-07-29 22:34:18 -07:00
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
n_IRQs_expected = xpc_activate_IRQ_rcvd ;
xpc_activate_IRQ_rcvd = 0 ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
2008-07-29 22:34:09 -07:00
n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2 ( ) ;
2008-07-29 22:34:07 -07:00
if ( n_IRQs_detected < n_IRQs_expected ) {
2008-07-29 22:34:11 -07:00
/* retry once to help avoid missing amo */
2008-07-29 22:34:09 -07:00
( void ) xpc_identify_activate_IRQ_sender_sn2 ( ) ;
2008-07-29 22:34:07 -07:00
}
}
2008-07-29 22:34:11 -07:00
/*
2008-07-29 22:34:18 -07:00
* Setup the channel structures that are sn2 specific .
2008-07-29 22:34:06 -07:00
*/
static enum xp_retval
2008-07-29 22:34:18 -07:00
xpc_setup_ch_structures_sn_sn2 ( struct xpc_partition * part )
2008-07-29 22:34:06 -07:00
{
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:18 -07:00
struct xpc_channel_sn2 * ch_sn2 ;
2008-07-29 22:34:06 -07:00
enum xp_retval retval ;
int ret ;
int cpuid ;
int ch_number ;
struct timer_list * timer ;
short partid = XPC_PARTID ( part ) ;
/* allocate all the required GET/PUT values */
2008-07-29 22:34:11 -07:00
part_sn2 - > local_GPs =
2008-07-29 22:34:18 -07:00
xpc_kzalloc_cacheline_aligned ( XPC_GP_SIZE , GFP_KERNEL ,
& part_sn2 - > local_GPs_base ) ;
2008-07-29 22:34:09 -07:00
if ( part_sn2 - > local_GPs = = NULL ) {
2008-07-29 22:34:06 -07:00
dev_err ( xpc_chan , " can't get memory for local get/put "
" values \n " ) ;
2008-07-29 22:34:18 -07:00
return xpNoMemory ;
2008-07-29 22:34:06 -07:00
}
2008-07-29 22:34:11 -07:00
part_sn2 - > remote_GPs =
2008-07-29 22:34:18 -07:00
xpc_kzalloc_cacheline_aligned ( XPC_GP_SIZE , GFP_KERNEL ,
& part_sn2 - > remote_GPs_base ) ;
2008-07-29 22:34:09 -07:00
if ( part_sn2 - > remote_GPs = = NULL ) {
2008-07-29 22:34:06 -07:00
dev_err ( xpc_chan , " can't get memory for remote get/put "
" values \n " ) ;
retval = xpNoMemory ;
2008-07-29 22:34:18 -07:00
goto out_1 ;
2008-07-29 22:34:06 -07:00
}
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_GPs_pa = 0 ;
2008-07-29 22:34:06 -07:00
/* allocate all the required open and close args */
2008-07-29 22:34:18 -07:00
part_sn2 - > local_openclose_args =
xpc_kzalloc_cacheline_aligned ( XPC_OPENCLOSE_ARGS_SIZE ,
GFP_KERNEL , & part_sn2 - >
local_openclose_args_base ) ;
if ( part_sn2 - > local_openclose_args = = NULL ) {
2008-07-29 22:34:06 -07:00
dev_err ( xpc_chan , " can't get memory for local connect args \n " ) ;
retval = xpNoMemory ;
2008-07-29 22:34:18 -07:00
goto out_2 ;
2008-07-29 22:34:06 -07:00
}
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_openclose_args_pa = 0 ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:10 -07:00
part_sn2 - > local_chctl_amo_va = xpc_init_IRQ_amo_sn2 ( partid ) ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:10 -07:00
part_sn2 - > notify_IRQ_nasid = 0 ;
part_sn2 - > notify_IRQ_phys_cpuid = 0 ;
part_sn2 - > remote_chctl_amo_va = NULL ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:10 -07:00
sprintf ( part_sn2 - > notify_IRQ_owner , " xpc%02d " , partid ) ;
2008-07-29 22:34:09 -07:00
ret = request_irq ( SGI_XPC_NOTIFY , xpc_handle_notify_IRQ_sn2 ,
2008-07-29 22:34:10 -07:00
IRQF_SHARED , part_sn2 - > notify_IRQ_owner ,
2008-07-29 22:34:09 -07:00
( void * ) ( u64 ) partid ) ;
2008-07-29 22:34:06 -07:00
if ( ret ! = 0 ) {
dev_err ( xpc_chan , " can't register NOTIFY IRQ handler, "
" errno=%d \n " , - ret ) ;
retval = xpLackOfResources ;
2008-07-29 22:34:18 -07:00
goto out_3 ;
2008-07-29 22:34:06 -07:00
}
2008-07-29 22:34:10 -07:00
/* Setup a timer to check for dropped notify IRQs */
2008-07-29 22:34:09 -07:00
timer = & part_sn2 - > dropped_notify_IRQ_timer ;
2008-07-29 22:34:06 -07:00
init_timer ( timer ) ;
2008-07-29 22:34:09 -07:00
timer - > function =
2008-07-29 22:34:10 -07:00
( void ( * ) ( unsigned long ) ) xpc_check_for_dropped_notify_IRQ_sn2 ;
2008-07-29 22:34:06 -07:00
timer - > data = ( unsigned long ) part ;
2008-07-29 22:34:10 -07:00
timer - > expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL ;
2008-07-29 22:34:06 -07:00
add_timer ( timer ) ;
for ( ch_number = 0 ; ch_number < part - > nchannels ; ch_number + + ) {
2008-07-29 22:34:18 -07:00
ch_sn2 = & part - > channels [ ch_number ] . sn . sn2 ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:18 -07:00
ch_sn2 - > local_GP = & part_sn2 - > local_GPs [ ch_number ] ;
ch_sn2 - > local_openclose_args =
& part_sn2 - > local_openclose_args [ ch_number ] ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:18 -07:00
mutex_init ( & ch_sn2 - > msg_to_pull_mutex ) ;
2008-07-29 22:34:06 -07:00
}
/*
* Setup the per partition specific variables required by the
* remote partition to establish channel connections with us .
*
* The setting of the magic # indicates that these per partition
* specific variables are ready to be used .
*/
2008-07-29 22:34:16 -07:00
xpc_vars_part_sn2 [ partid ] . GPs_pa = xp_pa ( part_sn2 - > local_GPs ) ;
2008-07-29 22:34:13 -07:00
xpc_vars_part_sn2 [ partid ] . openclose_args_pa =
2008-07-29 22:34:18 -07:00
xp_pa ( part_sn2 - > local_openclose_args ) ;
2008-07-29 22:34:13 -07:00
xpc_vars_part_sn2 [ partid ] . chctl_amo_pa =
2008-07-29 22:34:16 -07:00
xp_pa ( part_sn2 - > local_chctl_amo_va ) ;
2008-07-29 22:34:06 -07:00
cpuid = raw_smp_processor_id ( ) ; /* any CPU in this partition will do */
2008-07-29 22:34:13 -07:00
xpc_vars_part_sn2 [ partid ] . notify_IRQ_nasid = cpuid_to_nasid ( cpuid ) ;
xpc_vars_part_sn2 [ partid ] . notify_IRQ_phys_cpuid =
cpu_physical_id ( cpuid ) ;
xpc_vars_part_sn2 [ partid ] . nchannels = part - > nchannels ;
2008-07-29 22:34:18 -07:00
xpc_vars_part_sn2 [ partid ] . magic = XPC_VP_MAGIC1_SN2 ;
2008-07-29 22:34:06 -07:00
return xpSuccess ;
2008-07-29 22:34:18 -07:00
/* setup of ch structures failed */
2008-07-29 22:34:06 -07:00
out_3 :
2008-07-29 22:34:18 -07:00
kfree ( part_sn2 - > local_openclose_args_base ) ;
part_sn2 - > local_openclose_args = NULL ;
out_2 :
2008-07-29 22:34:09 -07:00
kfree ( part_sn2 - > remote_GPs_base ) ;
part_sn2 - > remote_GPs = NULL ;
2008-07-29 22:34:18 -07:00
out_1 :
2008-07-29 22:34:09 -07:00
kfree ( part_sn2 - > local_GPs_base ) ;
part_sn2 - > local_GPs = NULL ;
2008-07-29 22:34:06 -07:00
return retval ;
}
/*
2008-07-29 22:34:18 -07:00
* Teardown the channel structures that are sn2 specific .
2008-07-29 22:34:06 -07:00
*/
static void
2008-07-29 22:34:18 -07:00
xpc_teardown_ch_structures_sn_sn2 ( struct xpc_partition * part )
2008-07-29 22:34:06 -07:00
{
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:06 -07:00
short partid = XPC_PARTID ( part ) ;
/*
2008-07-29 22:34:18 -07:00
* Indicate that the variables specific to the remote partition are no
* longer available for its use .
2008-07-29 22:34:06 -07:00
*/
2008-07-29 22:34:13 -07:00
xpc_vars_part_sn2 [ partid ] . magic = 0 ;
2008-07-29 22:34:06 -07:00
/* in case we've still got outstanding timers registered... */
2008-07-29 22:34:09 -07:00
del_timer_sync ( & part_sn2 - > dropped_notify_IRQ_timer ) ;
2008-07-29 22:34:18 -07:00
free_irq ( SGI_XPC_NOTIFY , ( void * ) ( u64 ) partid ) ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:18 -07:00
kfree ( part_sn2 - > local_openclose_args_base ) ;
part_sn2 - > local_openclose_args = NULL ;
2008-07-29 22:34:09 -07:00
kfree ( part_sn2 - > remote_GPs_base ) ;
part_sn2 - > remote_GPs = NULL ;
kfree ( part_sn2 - > local_GPs_base ) ;
part_sn2 - > local_GPs = NULL ;
2008-07-29 22:34:10 -07:00
part_sn2 - > local_chctl_amo_va = NULL ;
2008-07-29 22:34:06 -07:00
}
/*
* Create a wrapper that hides the underlying mechanism for pulling a cacheline
* ( or multiple cachelines ) from a remote partition .
*
2008-07-29 22:34:16 -07:00
* src_pa must be a cacheline aligned physical address on the remote partition .
2008-07-29 22:34:06 -07:00
* dst must be a cacheline aligned virtual address on this partition .
* cnt must be cacheline sized
*/
2008-07-29 22:34:14 -07:00
/* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */
2008-07-29 22:34:06 -07:00
static enum xp_retval
xpc_pull_remote_cachelines_sn2 ( struct xpc_partition * part , void * dst ,
2008-07-29 22:34:16 -07:00
const unsigned long src_pa , size_t cnt )
2008-07-29 22:34:06 -07:00
{
enum xp_retval ret ;
2008-07-29 22:34:16 -07:00
DBUG_ON ( src_pa ! = L1_CACHE_ALIGN ( src_pa ) ) ;
DBUG_ON ( ( unsigned long ) dst ! = L1_CACHE_ALIGN ( ( unsigned long ) dst ) ) ;
2008-07-29 22:34:06 -07:00
DBUG_ON ( cnt ! = L1_CACHE_ALIGN ( cnt ) ) ;
2008-07-29 22:34:18 -07:00
if ( part - > act_state = = XPC_P_AS_DEACTIVATING )
2008-07-29 22:34:06 -07:00
return part - > reason ;
2008-07-29 22:34:16 -07:00
ret = xp_remote_memcpy ( xp_pa ( dst ) , src_pa , cnt ) ;
2008-07-29 22:34:06 -07:00
if ( ret ! = xpSuccess ) {
dev_dbg ( xpc_chan , " xp_remote_memcpy() from partition %d failed, "
" ret=%d \n " , XPC_PARTID ( part ) , ret ) ;
}
return ret ;
}
/*
* Pull the remote per partition specific variables from the specified
* partition .
*/
static enum xp_retval
xpc_pull_remote_vars_part_sn2 ( struct xpc_partition * part )
{
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:06 -07:00
u8 buffer [ L1_CACHE_BYTES * 2 ] ;
struct xpc_vars_part_sn2 * pulled_entry_cacheline =
( struct xpc_vars_part_sn2 * ) L1_CACHE_ALIGN ( ( u64 ) buffer ) ;
struct xpc_vars_part_sn2 * pulled_entry ;
2008-07-29 22:34:16 -07:00
unsigned long remote_entry_cacheline_pa ;
unsigned long remote_entry_pa ;
2008-07-29 22:34:06 -07:00
short partid = XPC_PARTID ( part ) ;
enum xp_retval ret ;
/* pull the cacheline that contains the variables we're interested in */
2008-07-29 22:34:09 -07:00
DBUG_ON ( part_sn2 - > remote_vars_part_pa ! =
L1_CACHE_ALIGN ( part_sn2 - > remote_vars_part_pa ) ) ;
2008-07-29 22:34:06 -07:00
DBUG_ON ( sizeof ( struct xpc_vars_part_sn2 ) ! = L1_CACHE_BYTES / 2 ) ;
2008-07-29 22:34:09 -07:00
remote_entry_pa = part_sn2 - > remote_vars_part_pa +
2008-07-29 22:34:06 -07:00
sn_partition_id * sizeof ( struct xpc_vars_part_sn2 ) ;
remote_entry_cacheline_pa = ( remote_entry_pa & ~ ( L1_CACHE_BYTES - 1 ) ) ;
pulled_entry = ( struct xpc_vars_part_sn2 * ) ( ( u64 ) pulled_entry_cacheline
+ ( remote_entry_pa &
( L1_CACHE_BYTES - 1 ) ) ) ;
ret = xpc_pull_remote_cachelines_sn2 ( part , pulled_entry_cacheline ,
2008-07-29 22:34:16 -07:00
remote_entry_cacheline_pa ,
2008-07-29 22:34:06 -07:00
L1_CACHE_BYTES ) ;
if ( ret ! = xpSuccess ) {
dev_dbg ( xpc_chan , " failed to pull XPC vars_part from "
" partition %d, ret=%d \n " , partid , ret ) ;
return ret ;
}
/* see if they've been set up yet */
2008-07-29 22:34:18 -07:00
if ( pulled_entry - > magic ! = XPC_VP_MAGIC1_SN2 & &
pulled_entry - > magic ! = XPC_VP_MAGIC2_SN2 ) {
2008-07-29 22:34:06 -07:00
if ( pulled_entry - > magic ! = 0 ) {
dev_dbg ( xpc_chan , " partition %d's XPC vars_part for "
" partition %d has bad magic value (=0x%lx) \n " ,
partid , sn_partition_id , pulled_entry - > magic ) ;
return xpBadMagic ;
}
/* they've not been initialized yet */
return xpRetry ;
}
2008-07-29 22:34:18 -07:00
if ( xpc_vars_part_sn2 [ partid ] . magic = = XPC_VP_MAGIC1_SN2 ) {
2008-07-29 22:34:06 -07:00
/* validate the variables */
if ( pulled_entry - > GPs_pa = = 0 | |
pulled_entry - > openclose_args_pa = = 0 | |
2008-07-29 22:34:10 -07:00
pulled_entry - > chctl_amo_pa = = 0 ) {
2008-07-29 22:34:06 -07:00
dev_err ( xpc_chan , " partition %d's XPC vars_part for "
" partition %d are not valid \n " , partid ,
sn_partition_id ) ;
return xpInvalidAddress ;
}
/* the variables we imported look to be valid */
2008-07-29 22:34:09 -07:00
part_sn2 - > remote_GPs_pa = pulled_entry - > GPs_pa ;
part_sn2 - > remote_openclose_args_pa =
2008-07-29 22:34:06 -07:00
pulled_entry - > openclose_args_pa ;
2008-07-29 22:34:10 -07:00
part_sn2 - > remote_chctl_amo_va =
2008-07-29 22:34:11 -07:00
( struct amo * ) __va ( pulled_entry - > chctl_amo_pa ) ;
2008-07-29 22:34:10 -07:00
part_sn2 - > notify_IRQ_nasid = pulled_entry - > notify_IRQ_nasid ;
part_sn2 - > notify_IRQ_phys_cpuid =
pulled_entry - > notify_IRQ_phys_cpuid ;
2008-07-29 22:34:06 -07:00
if ( part - > nchannels > pulled_entry - > nchannels )
part - > nchannels = pulled_entry - > nchannels ;
/* let the other side know that we've pulled their variables */
2008-07-29 22:34:18 -07:00
xpc_vars_part_sn2 [ partid ] . magic = XPC_VP_MAGIC2_SN2 ;
2008-07-29 22:34:06 -07:00
}
2008-07-29 22:34:18 -07:00
if ( pulled_entry - > magic = = XPC_VP_MAGIC1_SN2 )
2008-07-29 22:34:06 -07:00
return xpRetry ;
return xpSuccess ;
}
/*
* Establish first contact with the remote partititon . This involves pulling
* the XPC per partition variables from the remote partition and waiting for
* the remote partition to pull ours .
*/
static enum xp_retval
xpc_make_first_contact_sn2 ( struct xpc_partition * part )
{
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:06 -07:00
enum xp_retval ret ;
2008-07-29 22:34:07 -07:00
/*
2008-07-29 22:34:11 -07:00
* Register the remote partition ' s amos with SAL so it can handle
2008-07-29 22:34:07 -07:00
* and cleanup errors within that address range should the remote
* partition go down . We don ' t unregister this range because it is
* difficult to tell when outstanding writes to the remote partition
* are finished and thus when it is safe to unregister . This should
* not result in wasted space in the SAL xp_addr_region table because
* we should get the same page for remote_amos_page_pa after module
* reloads and system reboots .
*/
2008-07-29 22:34:09 -07:00
if ( sn_register_xp_addr_region ( part_sn2 - > remote_amos_page_pa ,
2008-07-29 22:34:07 -07:00
PAGE_SIZE , 1 ) < 0 ) {
dev_warn ( xpc_part , " xpc_activating(%d) failed to register "
" xp_addr region \n " , XPC_PARTID ( part ) ) ;
ret = xpPhysAddrRegFailed ;
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
return ret ;
}
2008-07-29 22:34:09 -07:00
/*
* Send activate IRQ to get other side to activate if they ' ve not
* already begun to do so .
*/
2008-07-29 22:34:10 -07:00
xpc_send_activate_IRQ_sn2 ( part_sn2 - > remote_amos_page_pa ,
2008-07-29 22:34:09 -07:00
cnodeid_to_nasid ( 0 ) ,
part_sn2 - > activate_IRQ_nasid ,
part_sn2 - > activate_IRQ_phys_cpuid ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:06 -07:00
while ( ( ret = xpc_pull_remote_vars_part_sn2 ( part ) ) ! = xpSuccess ) {
if ( ret ! = xpRetry ) {
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
return ret ;
}
dev_dbg ( xpc_part , " waiting to make first contact with "
" partition %d \n " , XPC_PARTID ( part ) ) ;
/* wait a 1/4 of a second or so */
( void ) msleep_interruptible ( 250 ) ;
2008-07-29 22:34:18 -07:00
if ( part - > act_state = = XPC_P_AS_DEACTIVATING )
2008-07-29 22:34:06 -07:00
return part - > reason ;
}
return xpSuccess ;
}
/*
2008-07-29 22:34:10 -07:00
* Get the chctl flags and pull the openclose args and / or remote GPs as needed .
2008-07-29 22:34:06 -07:00
*/
static u64
2008-07-29 22:34:10 -07:00
xpc_get_chctl_all_flags_sn2 ( struct xpc_partition * part )
2008-07-29 22:34:06 -07:00
{
2008-07-29 22:34:09 -07:00
struct xpc_partition_sn2 * part_sn2 = & part - > sn . sn2 ;
2008-07-29 22:34:06 -07:00
unsigned long irq_flags ;
2008-07-29 22:34:10 -07:00
union xpc_channel_ctl_flags chctl ;
2008-07-29 22:34:06 -07:00
enum xp_retval ret ;
/*
2008-07-29 22:34:10 -07:00
* See if there are any chctl flags to be handled .
2008-07-29 22:34:06 -07:00
*/
2008-07-29 22:34:10 -07:00
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
chctl = part - > chctl ;
if ( chctl . all_flags ! = 0 )
part - > chctl . all_flags = 0 ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:10 -07:00
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:10 -07:00
if ( xpc_any_openclose_chctl_flags_set ( & chctl ) ) {
2008-07-29 22:34:09 -07:00
ret = xpc_pull_remote_cachelines_sn2 ( part , part - >
remote_openclose_args ,
2008-07-29 22:34:16 -07:00
part_sn2 - >
2008-07-29 22:34:06 -07:00
remote_openclose_args_pa ,
XPC_OPENCLOSE_ARGS_SIZE ) ;
if ( ret ! = xpSuccess ) {
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
dev_dbg ( xpc_chan , " failed to pull openclose args from "
" partition %d, ret=%d \n " , XPC_PARTID ( part ) ,
ret ) ;
2008-07-29 22:34:10 -07:00
/* don't bother processing chctl flags anymore */
chctl . all_flags = 0 ;
2008-07-29 22:34:06 -07:00
}
}
2008-07-29 22:34:10 -07:00
if ( xpc_any_msg_chctl_flags_set ( & chctl ) ) {
2008-07-29 22:34:09 -07:00
ret = xpc_pull_remote_cachelines_sn2 ( part , part_sn2 - > remote_GPs ,
2008-07-29 22:34:16 -07:00
part_sn2 - > remote_GPs_pa ,
2008-07-29 22:34:06 -07:00
XPC_GP_SIZE ) ;
if ( ret ! = xpSuccess ) {
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
dev_dbg ( xpc_chan , " failed to pull GPs from partition "
" %d, ret=%d \n " , XPC_PARTID ( part ) , ret ) ;
2008-07-29 22:34:10 -07:00
/* don't bother processing chctl flags anymore */
chctl . all_flags = 0 ;
2008-07-29 22:34:06 -07:00
}
}
2008-07-29 22:34:10 -07:00
return chctl . all_flags ;
2008-07-29 22:34:06 -07:00
}
2008-07-29 22:34:11 -07:00
/*
* Allocate the local message queue and the notify queue .
*/
static enum xp_retval
xpc_allocate_local_msgqueue_sn2 ( struct xpc_channel * ch )
{
2008-07-29 22:34:18 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:11 -07:00
unsigned long irq_flags ;
int nentries ;
size_t nbytes ;
for ( nentries = ch - > local_nentries ; nentries > 0 ; nentries - - ) {
2008-07-29 22:34:19 -07:00
nbytes = nentries * ch - > entry_size ;
2008-07-29 22:34:18 -07:00
ch_sn2 - > local_msgqueue =
xpc_kzalloc_cacheline_aligned ( nbytes , GFP_KERNEL ,
& ch_sn2 - > local_msgqueue_base ) ;
if ( ch_sn2 - > local_msgqueue = = NULL )
2008-07-29 22:34:11 -07:00
continue ;
2008-07-29 22:34:19 -07:00
nbytes = nentries * sizeof ( struct xpc_notify_sn2 ) ;
2008-07-29 22:34:18 -07:00
ch_sn2 - > notify_queue = kzalloc ( nbytes , GFP_KERNEL ) ;
if ( ch_sn2 - > notify_queue = = NULL ) {
kfree ( ch_sn2 - > local_msgqueue_base ) ;
ch_sn2 - > local_msgqueue = NULL ;
2008-07-29 22:34:11 -07:00
continue ;
}
spin_lock_irqsave ( & ch - > lock , irq_flags ) ;
if ( nentries < ch - > local_nentries ) {
dev_dbg ( xpc_chan , " nentries=%d local_nentries=%d, "
" partid=%d, channel=%d \n " , nentries ,
ch - > local_nentries , ch - > partid , ch - > number ) ;
ch - > local_nentries = nentries ;
}
spin_unlock_irqrestore ( & ch - > lock , irq_flags ) ;
return xpSuccess ;
}
dev_dbg ( xpc_chan , " can't get memory for local message queue and notify "
" queue, partid=%d, channel=%d \n " , ch - > partid , ch - > number ) ;
return xpNoMemory ;
}
/*
* Allocate the cached remote message queue .
*/
static enum xp_retval
xpc_allocate_remote_msgqueue_sn2 ( struct xpc_channel * ch )
{
2008-07-29 22:34:18 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:11 -07:00
unsigned long irq_flags ;
int nentries ;
size_t nbytes ;
DBUG_ON ( ch - > remote_nentries < = 0 ) ;
for ( nentries = ch - > remote_nentries ; nentries > 0 ; nentries - - ) {
2008-07-29 22:34:19 -07:00
nbytes = nentries * ch - > entry_size ;
2008-07-29 22:34:18 -07:00
ch_sn2 - > remote_msgqueue =
xpc_kzalloc_cacheline_aligned ( nbytes , GFP_KERNEL , & ch_sn2 - >
remote_msgqueue_base ) ;
if ( ch_sn2 - > remote_msgqueue = = NULL )
2008-07-29 22:34:11 -07:00
continue ;
spin_lock_irqsave ( & ch - > lock , irq_flags ) ;
if ( nentries < ch - > remote_nentries ) {
dev_dbg ( xpc_chan , " nentries=%d remote_nentries=%d, "
" partid=%d, channel=%d \n " , nentries ,
ch - > remote_nentries , ch - > partid , ch - > number ) ;
ch - > remote_nentries = nentries ;
}
spin_unlock_irqrestore ( & ch - > lock , irq_flags ) ;
return xpSuccess ;
}
dev_dbg ( xpc_chan , " can't get memory for cached remote message queue, "
" partid=%d, channel=%d \n " , ch - > partid , ch - > number ) ;
return xpNoMemory ;
}
/*
* Allocate message queues and other stuff associated with a channel .
*
* Note : Assumes all of the channel sizes are filled in .
*/
static enum xp_retval
2008-07-29 22:34:18 -07:00
xpc_setup_msg_structures_sn2 ( struct xpc_channel * ch )
2008-07-29 22:34:11 -07:00
{
2008-07-29 22:34:18 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:11 -07:00
enum xp_retval ret ;
DBUG_ON ( ch - > flags & XPC_C_SETUP ) ;
ret = xpc_allocate_local_msgqueue_sn2 ( ch ) ;
if ( ret = = xpSuccess ) {
ret = xpc_allocate_remote_msgqueue_sn2 ( ch ) ;
if ( ret ! = xpSuccess ) {
2008-07-29 22:34:18 -07:00
kfree ( ch_sn2 - > local_msgqueue_base ) ;
ch_sn2 - > local_msgqueue = NULL ;
kfree ( ch_sn2 - > notify_queue ) ;
ch_sn2 - > notify_queue = NULL ;
2008-07-29 22:34:11 -07:00
}
}
return ret ;
}
/*
* Free up message queues and other stuff that were allocated for the specified
* channel .
*/
static void
2008-07-29 22:34:18 -07:00
xpc_teardown_msg_structures_sn2 ( struct xpc_channel * ch )
2008-07-29 22:34:11 -07:00
{
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
DBUG_ON ( ! spin_is_locked ( & ch - > lock ) ) ;
2008-07-29 22:34:18 -07:00
ch_sn2 - > remote_msgqueue_pa = 0 ;
2008-07-29 22:34:11 -07:00
ch_sn2 - > local_GP - > get = 0 ;
ch_sn2 - > local_GP - > put = 0 ;
ch_sn2 - > remote_GP . get = 0 ;
ch_sn2 - > remote_GP . put = 0 ;
ch_sn2 - > w_local_GP . get = 0 ;
ch_sn2 - > w_local_GP . put = 0 ;
ch_sn2 - > w_remote_GP . get = 0 ;
ch_sn2 - > w_remote_GP . put = 0 ;
ch_sn2 - > next_msg_to_pull = 0 ;
if ( ch - > flags & XPC_C_SETUP ) {
dev_dbg ( xpc_chan , " ch->flags=0x%x, partid=%d, channel=%d \n " ,
ch - > flags , ch - > partid , ch - > number ) ;
2008-07-29 22:34:18 -07:00
kfree ( ch_sn2 - > local_msgqueue_base ) ;
ch_sn2 - > local_msgqueue = NULL ;
kfree ( ch_sn2 - > remote_msgqueue_base ) ;
ch_sn2 - > remote_msgqueue = NULL ;
kfree ( ch_sn2 - > notify_queue ) ;
ch_sn2 - > notify_queue = NULL ;
2008-07-29 22:34:11 -07:00
}
}
2008-07-29 22:34:09 -07:00
/*
* Notify those who wanted to be notified upon delivery of their message .
*/
static void
xpc_notify_senders_sn2 ( struct xpc_channel * ch , enum xp_retval reason , s64 put )
{
2008-07-29 22:34:19 -07:00
struct xpc_notify_sn2 * notify ;
2008-07-29 22:34:09 -07:00
u8 notify_type ;
s64 get = ch - > sn . sn2 . w_remote_GP . get - 1 ;
while ( + + get < put & & atomic_read ( & ch - > n_to_notify ) > 0 ) {
2008-07-29 22:34:18 -07:00
notify = & ch - > sn . sn2 . notify_queue [ get % ch - > local_nentries ] ;
2008-07-29 22:34:09 -07:00
/*
* See if the notify entry indicates it was associated with
* a message who ' s sender wants to be notified . It is possible
* that it is , but someone else is doing or has done the
* notification .
*/
notify_type = notify - > type ;
if ( notify_type = = 0 | |
cmpxchg ( & notify - > type , notify_type , 0 ) ! = notify_type ) {
continue ;
}
DBUG_ON ( notify_type ! = XPC_N_CALL ) ;
atomic_dec ( & ch - > n_to_notify ) ;
if ( notify - > func ! = NULL ) {
2008-07-29 22:34:19 -07:00
dev_dbg ( xpc_chan , " notify->func() called, notify=0x%p "
" msg_number=%ld partid=%d channel=%d \n " ,
2008-07-29 22:34:09 -07:00
( void * ) notify , get , ch - > partid , ch - > number ) ;
notify - > func ( reason , ch - > partid , ch - > number ,
notify - > key ) ;
2008-07-29 22:34:19 -07:00
dev_dbg ( xpc_chan , " notify->func() returned, notify=0x%p "
" msg_number=%ld partid=%d channel=%d \n " ,
( void * ) notify , get , ch - > partid , ch - > number ) ;
2008-07-29 22:34:09 -07:00
}
}
}
static void
xpc_notify_senders_of_disconnect_sn2 ( struct xpc_channel * ch )
{
xpc_notify_senders_sn2 ( ch , ch - > reason , ch - > sn . sn2 . w_local_GP . put ) ;
}
/*
* Clear some of the msg flags in the local message queue .
*/
static inline void
xpc_clear_local_msgqueue_flags_sn2 ( struct xpc_channel * ch )
{
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg ;
2008-07-29 22:34:09 -07:00
s64 get ;
get = ch_sn2 - > w_remote_GP . get ;
do {
2008-07-29 22:34:19 -07:00
msg = ( struct xpc_msg_sn2 * ) ( ( u64 ) ch_sn2 - > local_msgqueue +
( get % ch - > local_nentries ) *
ch - > entry_size ) ;
2009-01-29 14:25:07 -08:00
DBUG_ON ( ! ( msg - > flags & XPC_M_SN2_READY ) ) ;
2008-07-29 22:34:09 -07:00
msg - > flags = 0 ;
} while ( + + get < ch_sn2 - > remote_GP . get ) ;
}
/*
* Clear some of the msg flags in the remote message queue .
*/
static inline void
xpc_clear_remote_msgqueue_flags_sn2 ( struct xpc_channel * ch )
{
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg ;
2008-07-29 22:34:09 -07:00
s64 put ;
2009-01-29 14:25:07 -08:00
/* flags are zeroed when the buffer is allocated */
if ( ch_sn2 - > remote_GP . put < ch - > remote_nentries )
return ;
put = max ( ch_sn2 - > w_remote_GP . put , ch - > remote_nentries ) ;
2008-07-29 22:34:09 -07:00
do {
2008-07-29 22:34:19 -07:00
msg = ( struct xpc_msg_sn2 * ) ( ( u64 ) ch_sn2 - > remote_msgqueue +
( put % ch - > remote_nentries ) *
ch - > entry_size ) ;
2009-01-29 14:25:07 -08:00
DBUG_ON ( ! ( msg - > flags & XPC_M_SN2_READY ) ) ;
DBUG_ON ( ! ( msg - > flags & XPC_M_SN2_DONE ) ) ;
DBUG_ON ( msg - > number ! = put - ch - > remote_nentries ) ;
2008-07-29 22:34:09 -07:00
msg - > flags = 0 ;
} while ( + + put < ch_sn2 - > remote_GP . put ) ;
}
2008-07-29 22:34:19 -07:00
static int
xpc_n_of_deliverable_payloads_sn2 ( struct xpc_channel * ch )
{
return ch - > sn . sn2 . w_remote_GP . put - ch - > sn . sn2 . w_local_GP . get ;
}
2008-07-29 22:34:09 -07:00
static void
2008-07-29 22:34:10 -07:00
xpc_process_msg_chctl_flags_sn2 ( struct xpc_partition * part , int ch_number )
2008-07-29 22:34:09 -07:00
{
struct xpc_channel * ch = & part - > channels [ ch_number ] ;
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:19 -07:00
int npayloads_sent ;
2008-07-29 22:34:09 -07:00
ch_sn2 - > remote_GP = part - > sn . sn2 . remote_GPs [ ch_number ] ;
/* See what, if anything, has changed for each connected channel */
xpc_msgqueue_ref ( ch ) ;
if ( ch_sn2 - > w_remote_GP . get = = ch_sn2 - > remote_GP . get & &
ch_sn2 - > w_remote_GP . put = = ch_sn2 - > remote_GP . put ) {
/* nothing changed since GPs were last pulled */
xpc_msgqueue_deref ( ch ) ;
return ;
}
if ( ! ( ch - > flags & XPC_C_CONNECTED ) ) {
xpc_msgqueue_deref ( ch ) ;
return ;
}
/*
* First check to see if messages recently sent by us have been
* received by the other side . ( The remote GET value will have
* changed since we last looked at it . )
*/
if ( ch_sn2 - > w_remote_GP . get ! = ch_sn2 - > remote_GP . get ) {
/*
* We need to notify any senders that want to be notified
* that their sent messages have been received by their
* intended recipients . We need to do this before updating
* w_remote_GP . get so that we don ' t allocate the same message
* queue entries prematurely ( see xpc_allocate_msg ( ) ) .
*/
if ( atomic_read ( & ch - > n_to_notify ) > 0 ) {
/*
* Notify senders that messages sent have been
* received and delivered by the other side .
*/
xpc_notify_senders_sn2 ( ch , xpMsgDelivered ,
ch_sn2 - > remote_GP . get ) ;
}
/*
* Clear msg - > flags in previously sent messages , so that
* they ' re ready for xpc_allocate_msg ( ) .
*/
xpc_clear_local_msgqueue_flags_sn2 ( ch ) ;
ch_sn2 - > w_remote_GP . get = ch_sn2 - > remote_GP . get ;
dev_dbg ( xpc_chan , " w_remote_GP.get changed to %ld, partid=%d, "
" channel=%d \n " , ch_sn2 - > w_remote_GP . get , ch - > partid ,
ch - > number ) ;
/*
* If anyone was waiting for message queue entries to become
* available , wake them up .
*/
if ( atomic_read ( & ch - > n_on_msg_allocate_wq ) > 0 )
wake_up ( & ch - > msg_allocate_wq ) ;
}
/*
* Now check for newly sent messages by the other side . ( The remote
* PUT value will have changed since we last looked at it . )
*/
if ( ch_sn2 - > w_remote_GP . put ! = ch_sn2 - > remote_GP . put ) {
/*
* Clear msg - > flags in previously received messages , so that
2008-07-29 22:34:19 -07:00
* they ' re ready for xpc_get_deliverable_payload_sn2 ( ) .
2008-07-29 22:34:09 -07:00
*/
xpc_clear_remote_msgqueue_flags_sn2 ( ch ) ;
2009-01-29 14:25:06 -08:00
smp_wmb ( ) ; /* ensure flags have been cleared before bte_copy */
2008-07-29 22:34:09 -07:00
ch_sn2 - > w_remote_GP . put = ch_sn2 - > remote_GP . put ;
dev_dbg ( xpc_chan , " w_remote_GP.put changed to %ld, partid=%d, "
" channel=%d \n " , ch_sn2 - > w_remote_GP . put , ch - > partid ,
ch - > number ) ;
2008-07-29 22:34:19 -07:00
npayloads_sent = xpc_n_of_deliverable_payloads_sn2 ( ch ) ;
if ( npayloads_sent > 0 ) {
2008-07-29 22:34:09 -07:00
dev_dbg ( xpc_chan , " msgs waiting to be copied and "
" delivered=%d, partid=%d, channel=%d \n " ,
2008-07-29 22:34:19 -07:00
npayloads_sent , ch - > partid , ch - > number ) ;
2008-07-29 22:34:09 -07:00
if ( ch - > flags & XPC_C_CONNECTEDCALLOUT_MADE )
2008-07-29 22:34:19 -07:00
xpc_activate_kthreads ( ch , npayloads_sent ) ;
2008-07-29 22:34:09 -07:00
}
}
xpc_msgqueue_deref ( ch ) ;
}
2008-07-29 22:34:19 -07:00
static struct xpc_msg_sn2 *
2008-07-29 22:34:06 -07:00
xpc_pull_remote_msg_sn2 ( struct xpc_channel * ch , s64 get )
{
struct xpc_partition * part = & xpc_partitions [ ch - > partid ] ;
2008-07-29 22:34:09 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:16 -07:00
unsigned long remote_msg_pa ;
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg ;
2008-07-29 22:34:16 -07:00
u32 msg_index ;
u32 nmsgs ;
2008-07-29 22:34:06 -07:00
u64 msg_offset ;
enum xp_retval ret ;
2008-07-29 22:34:09 -07:00
if ( mutex_lock_interruptible ( & ch_sn2 - > msg_to_pull_mutex ) ! = 0 ) {
2008-07-29 22:34:06 -07:00
/* we were interrupted by a signal */
return NULL ;
}
2008-07-29 22:34:09 -07:00
while ( get > = ch_sn2 - > next_msg_to_pull ) {
2008-07-29 22:34:06 -07:00
/* pull as many messages as are ready and able to be pulled */
2008-07-29 22:34:09 -07:00
msg_index = ch_sn2 - > next_msg_to_pull % ch - > remote_nentries ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:09 -07:00
DBUG_ON ( ch_sn2 - > next_msg_to_pull > = ch_sn2 - > w_remote_GP . put ) ;
nmsgs = ch_sn2 - > w_remote_GP . put - ch_sn2 - > next_msg_to_pull ;
2008-07-29 22:34:06 -07:00
if ( msg_index + nmsgs > ch - > remote_nentries ) {
/* ignore the ones that wrap the msg queue for now */
nmsgs = ch - > remote_nentries - msg_index ;
}
2008-07-29 22:34:19 -07:00
msg_offset = msg_index * ch - > entry_size ;
msg = ( struct xpc_msg_sn2 * ) ( ( u64 ) ch_sn2 - > remote_msgqueue +
2008-07-29 22:34:18 -07:00
msg_offset ) ;
remote_msg_pa = ch_sn2 - > remote_msgqueue_pa + msg_offset ;
2008-07-29 22:34:06 -07:00
2008-07-29 22:34:16 -07:00
ret = xpc_pull_remote_cachelines_sn2 ( part , msg , remote_msg_pa ,
2008-07-29 22:34:19 -07:00
nmsgs * ch - > entry_size ) ;
2008-07-29 22:34:06 -07:00
if ( ret ! = xpSuccess ) {
dev_dbg ( xpc_chan , " failed to pull %d msgs starting with "
" msg %ld from partition %d, channel=%d, "
2008-07-29 22:34:09 -07:00
" ret=%d \n " , nmsgs , ch_sn2 - > next_msg_to_pull ,
2008-07-29 22:34:06 -07:00
ch - > partid , ch - > number , ret ) ;
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
2008-07-29 22:34:09 -07:00
mutex_unlock ( & ch_sn2 - > msg_to_pull_mutex ) ;
2008-07-29 22:34:06 -07:00
return NULL ;
}
2008-07-29 22:34:09 -07:00
ch_sn2 - > next_msg_to_pull + = nmsgs ;
2008-07-29 22:34:06 -07:00
}
2008-07-29 22:34:09 -07:00
mutex_unlock ( & ch_sn2 - > msg_to_pull_mutex ) ;
2008-07-29 22:34:06 -07:00
/* return the message we were looking for */
2008-07-29 22:34:19 -07:00
msg_offset = ( get % ch - > remote_nentries ) * ch - > entry_size ;
msg = ( struct xpc_msg_sn2 * ) ( ( u64 ) ch_sn2 - > remote_msgqueue + msg_offset ) ;
2008-07-29 22:34:06 -07:00
return msg ;
}
/*
2008-07-29 22:34:19 -07:00
* Get the next deliverable message ' s payload .
2008-07-29 22:34:06 -07:00
*/
2008-07-29 22:34:19 -07:00
static void *
xpc_get_deliverable_payload_sn2 ( struct xpc_channel * ch )
2008-07-29 22:34:06 -07:00
{
2008-07-29 22:34:09 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg ;
void * payload = NULL ;
2008-07-29 22:34:06 -07:00
s64 get ;
do {
if ( ch - > flags & XPC_C_DISCONNECTING )
break ;
2008-07-29 22:34:09 -07:00
get = ch_sn2 - > w_local_GP . get ;
2009-01-29 14:25:06 -08:00
smp_rmb ( ) ; /* guarantee that .get loads before .put */
2008-07-29 22:34:09 -07:00
if ( get = = ch_sn2 - > w_remote_GP . put )
2008-07-29 22:34:06 -07:00
break ;
/* There are messages waiting to be pulled and delivered.
* We need to try to secure one for ourselves . We ' ll do this
* by trying to increment w_local_GP . get and hope that no one
* else beats us to it . If they do , we ' ll we ' ll simply have
* to try again for the next one .
*/
2008-07-29 22:34:09 -07:00
if ( cmpxchg ( & ch_sn2 - > w_local_GP . get , get , get + 1 ) = = get ) {
2008-07-29 22:34:06 -07:00
/* we got the entry referenced by get */
dev_dbg ( xpc_chan , " w_local_GP.get changed to %ld, "
" partid=%d, channel=%d \n " , get + 1 ,
ch - > partid , ch - > number ) ;
/* pull the message from the remote partition */
msg = xpc_pull_remote_msg_sn2 ( ch , get ) ;
2009-01-29 14:25:07 -08:00
if ( msg ! = NULL ) {
DBUG_ON ( msg - > number ! = get ) ;
DBUG_ON ( msg - > flags & XPC_M_SN2_DONE ) ;
DBUG_ON ( ! ( msg - > flags & XPC_M_SN2_READY ) ) ;
2008-07-29 22:34:06 -07:00
2009-01-29 14:25:07 -08:00
payload = & msg - > payload ;
}
2008-07-29 22:34:06 -07:00
break ;
}
} while ( 1 ) ;
2008-07-29 22:34:19 -07:00
return payload ;
2008-07-29 22:34:06 -07:00
}
2008-07-29 22:34:07 -07:00
/*
* Now we actually send the messages that are ready to be sent by advancing
2008-07-29 22:34:10 -07:00
* the local message queue ' s Put value and then send a chctl msgrequest to the
* recipient partition .
2008-07-29 22:34:07 -07:00
*/
static void
xpc_send_msgs_sn2 ( struct xpc_channel * ch , s64 initial_put )
{
2008-07-29 22:34:09 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg ;
2008-07-29 22:34:07 -07:00
s64 put = initial_put + 1 ;
2008-07-29 22:34:10 -07:00
int send_msgrequest = 0 ;
2008-07-29 22:34:07 -07:00
while ( 1 ) {
while ( 1 ) {
2008-07-29 22:34:09 -07:00
if ( put = = ch_sn2 - > w_local_GP . put )
2008-07-29 22:34:07 -07:00
break ;
2008-07-29 22:34:19 -07:00
msg = ( struct xpc_msg_sn2 * ) ( ( u64 ) ch_sn2 - >
local_msgqueue + ( put %
ch - > local_nentries ) *
ch - > entry_size ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:19 -07:00
if ( ! ( msg - > flags & XPC_M_SN2_READY ) )
2008-07-29 22:34:07 -07:00
break ;
put + + ;
}
if ( put = = initial_put ) {
/* nothing's changed */
break ;
}
2008-07-29 22:34:09 -07:00
if ( cmpxchg_rel ( & ch_sn2 - > local_GP - > put , initial_put , put ) ! =
2008-07-29 22:34:07 -07:00
initial_put ) {
/* someone else beat us to it */
2008-07-29 22:34:09 -07:00
DBUG_ON ( ch_sn2 - > local_GP - > put < initial_put ) ;
2008-07-29 22:34:07 -07:00
break ;
}
/* we just set the new value of local_GP->put */
dev_dbg ( xpc_chan , " local_GP->put changed to %ld, partid=%d, "
" channel=%d \n " , put , ch - > partid , ch - > number ) ;
2008-07-29 22:34:10 -07:00
send_msgrequest = 1 ;
2008-07-29 22:34:07 -07:00
/*
* We need to ensure that the message referenced by
2008-07-29 22:34:19 -07:00
* local_GP - > put is not XPC_M_SN2_READY or that local_GP - > put
2008-07-29 22:34:07 -07:00
* equals w_local_GP . put , so we ' ll go have a look .
*/
initial_put = put ;
}
2008-07-29 22:34:10 -07:00
if ( send_msgrequest )
xpc_send_chctl_msgrequest_sn2 ( ch ) ;
2008-07-29 22:34:07 -07:00
}
/*
* Allocate an entry for a message from the message queue associated with the
* specified channel .
*/
static enum xp_retval
xpc_allocate_msg_sn2 ( struct xpc_channel * ch , u32 flags ,
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * * address_of_msg )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:09 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg ;
2008-07-29 22:34:07 -07:00
enum xp_retval ret ;
s64 put ;
/*
* Get the next available message entry from the local message queue .
* If none are available , we ' ll make sure that we grab the latest
* GP values .
*/
ret = xpTimeout ;
while ( 1 ) {
2008-07-29 22:34:09 -07:00
put = ch_sn2 - > w_local_GP . put ;
2009-01-29 14:25:06 -08:00
smp_rmb ( ) ; /* guarantee that .put loads before .get */
2008-07-29 22:34:09 -07:00
if ( put - ch_sn2 - > w_remote_GP . get < ch - > local_nentries ) {
2008-07-29 22:34:07 -07:00
/* There are available message entries. We need to try
* to secure one for ourselves . We ' ll do this by trying
* to increment w_local_GP . put as long as someone else
* doesn ' t beat us to it . If they do , we ' ll have to
* try again .
*/
2008-07-29 22:34:09 -07:00
if ( cmpxchg ( & ch_sn2 - > w_local_GP . put , put , put + 1 ) = =
put ) {
2008-07-29 22:34:07 -07:00
/* we got the entry referenced by put */
break ;
}
continue ; /* try again */
}
/*
* There aren ' t any available msg entries at this time .
*
* In waiting for a message entry to become available ,
2008-07-29 22:34:10 -07:00
* we set a timeout in case the other side is not sending
* completion interrupts . This lets us fake a notify IRQ
* that will cause the notify IRQ handler to fetch the latest
* GP values as if an interrupt was sent by the other side .
2008-07-29 22:34:07 -07:00
*/
if ( ret = = xpTimeout )
2008-07-29 22:34:10 -07:00
xpc_send_chctl_local_msgrequest_sn2 ( ch ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:08 -07:00
if ( flags & XPC_NOWAIT )
2008-07-29 22:34:07 -07:00
return xpNoWait ;
ret = xpc_allocate_msg_wait ( ch ) ;
2008-07-29 22:34:08 -07:00
if ( ret ! = xpInterrupted & & ret ! = xpTimeout )
2008-07-29 22:34:07 -07:00
return ret ;
}
/* get the message's address and initialize it */
2008-07-29 22:34:19 -07:00
msg = ( struct xpc_msg_sn2 * ) ( ( u64 ) ch_sn2 - > local_msgqueue +
( put % ch - > local_nentries ) *
ch - > entry_size ) ;
2008-07-29 22:34:07 -07:00
DBUG_ON ( msg - > flags ! = 0 ) ;
msg - > number = put ;
dev_dbg ( xpc_chan , " w_local_GP.put changed to %ld; msg=0x%p, "
" msg_number=%ld, partid=%d, channel=%d \n " , put + 1 ,
( void * ) msg , msg - > number , ch - > partid , ch - > number ) ;
* address_of_msg = msg ;
return xpSuccess ;
}
/*
* Common code that does the actual sending of the message by advancing the
2008-07-29 22:34:10 -07:00
* local message queue ' s Put value and sends a chctl msgrequest to the
* partition the message is being sent to .
2008-07-29 22:34:07 -07:00
*/
static enum xp_retval
2008-07-29 22:34:19 -07:00
xpc_send_payload_sn2 ( struct xpc_channel * ch , u32 flags , void * payload ,
u16 payload_size , u8 notify_type , xpc_notify_func func ,
void * key )
2008-07-29 22:34:07 -07:00
{
enum xp_retval ret = xpSuccess ;
2008-07-29 22:34:18 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg = msg ;
struct xpc_notify_sn2 * notify = notify ;
2008-07-29 22:34:08 -07:00
s64 msg_number ;
s64 put ;
2008-07-29 22:34:07 -07:00
DBUG_ON ( notify_type = = XPC_N_CALL & & func = = NULL ) ;
2008-07-29 22:34:08 -07:00
2008-07-29 22:34:19 -07:00
if ( XPC_MSG_SIZE ( payload_size ) > ch - > entry_size )
2008-07-29 22:34:08 -07:00
return xpPayloadTooBig ;
xpc_msgqueue_ref ( ch ) ;
2008-07-29 22:34:07 -07:00
if ( ch - > flags & XPC_C_DISCONNECTING ) {
2008-07-29 22:34:08 -07:00
ret = ch - > reason ;
goto out_1 ;
}
if ( ! ( ch - > flags & XPC_C_CONNECTED ) ) {
ret = xpNotConnected ;
goto out_1 ;
2008-07-29 22:34:07 -07:00
}
2008-07-29 22:34:08 -07:00
ret = xpc_allocate_msg_sn2 ( ch , flags , & msg ) ;
if ( ret ! = xpSuccess )
goto out_1 ;
msg_number = msg - > number ;
2008-07-29 22:34:07 -07:00
if ( notify_type ! = 0 ) {
/*
* Tell the remote side to send an ACK interrupt when the
* message has been delivered .
*/
2008-07-29 22:34:19 -07:00
msg - > flags | = XPC_M_SN2_INTERRUPT ;
2008-07-29 22:34:07 -07:00
atomic_inc ( & ch - > n_to_notify ) ;
2008-07-29 22:34:18 -07:00
notify = & ch_sn2 - > notify_queue [ msg_number % ch - > local_nentries ] ;
2008-07-29 22:34:07 -07:00
notify - > func = func ;
notify - > key = key ;
notify - > type = notify_type ;
2008-07-29 22:34:14 -07:00
/* ??? Is a mb() needed here? */
2008-07-29 22:34:07 -07:00
if ( ch - > flags & XPC_C_DISCONNECTING ) {
/*
* An error occurred between our last error check and
* this one . We will try to clear the type field from
* the notify entry . If we succeed then
* xpc_disconnect_channel ( ) didn ' t already process
* the notify entry .
*/
if ( cmpxchg ( & notify - > type , notify_type , 0 ) = =
notify_type ) {
atomic_dec ( & ch - > n_to_notify ) ;
ret = ch - > reason ;
}
2008-07-29 22:34:08 -07:00
goto out_1 ;
2008-07-29 22:34:07 -07:00
}
}
2008-07-29 22:34:08 -07:00
memcpy ( & msg - > payload , payload , payload_size ) ;
2008-07-29 22:34:19 -07:00
msg - > flags | = XPC_M_SN2_READY ;
2008-07-29 22:34:07 -07:00
/*
* The preceding store of msg - > flags must occur before the following
2008-07-29 22:34:09 -07:00
* load of local_GP - > put .
2008-07-29 22:34:07 -07:00
*/
2009-01-29 14:25:06 -08:00
smp_mb ( ) ;
2008-07-29 22:34:07 -07:00
/* see if the message is next in line to be sent, if so send it */
2008-07-29 22:34:18 -07:00
put = ch_sn2 - > local_GP - > put ;
2008-07-29 22:34:07 -07:00
if ( put = = msg_number )
xpc_send_msgs_sn2 ( ch , put ) ;
2008-07-29 22:34:08 -07:00
out_1 :
2008-07-29 22:34:07 -07:00
xpc_msgqueue_deref ( ch ) ;
return ret ;
}
/*
* Now we actually acknowledge the messages that have been delivered and ack ' d
* by advancing the cached remote message queue ' s Get value and if requested
2008-07-29 22:34:10 -07:00
* send a chctl msgrequest to the message sender ' s partition .
2008-07-29 22:34:19 -07:00
*
* If a message has XPC_M_SN2_INTERRUPT set , send an interrupt to the partition
* that sent the message .
2008-07-29 22:34:07 -07:00
*/
static void
xpc_acknowledge_msgs_sn2 ( struct xpc_channel * ch , s64 initial_get , u8 msg_flags )
{
2008-07-29 22:34:09 -07:00
struct xpc_channel_sn2 * ch_sn2 = & ch - > sn . sn2 ;
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg ;
2008-07-29 22:34:07 -07:00
s64 get = initial_get + 1 ;
2008-07-29 22:34:10 -07:00
int send_msgrequest = 0 ;
2008-07-29 22:34:07 -07:00
while ( 1 ) {
while ( 1 ) {
2008-07-29 22:34:09 -07:00
if ( get = = ch_sn2 - > w_local_GP . get )
2008-07-29 22:34:07 -07:00
break ;
2008-07-29 22:34:19 -07:00
msg = ( struct xpc_msg_sn2 * ) ( ( u64 ) ch_sn2 - >
remote_msgqueue + ( get %
ch - > remote_nentries ) *
ch - > entry_size ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:19 -07:00
if ( ! ( msg - > flags & XPC_M_SN2_DONE ) )
2008-07-29 22:34:07 -07:00
break ;
msg_flags | = msg - > flags ;
get + + ;
}
if ( get = = initial_get ) {
/* nothing's changed */
break ;
}
2008-07-29 22:34:09 -07:00
if ( cmpxchg_rel ( & ch_sn2 - > local_GP - > get , initial_get , get ) ! =
2008-07-29 22:34:07 -07:00
initial_get ) {
/* someone else beat us to it */
2008-07-29 22:34:09 -07:00
DBUG_ON ( ch_sn2 - > local_GP - > get < = initial_get ) ;
2008-07-29 22:34:07 -07:00
break ;
}
/* we just set the new value of local_GP->get */
dev_dbg ( xpc_chan , " local_GP->get changed to %ld, partid=%d, "
" channel=%d \n " , get , ch - > partid , ch - > number ) ;
2008-07-29 22:34:19 -07:00
send_msgrequest = ( msg_flags & XPC_M_SN2_INTERRUPT ) ;
2008-07-29 22:34:07 -07:00
/*
* We need to ensure that the message referenced by
2008-07-29 22:34:19 -07:00
* local_GP - > get is not XPC_M_SN2_DONE or that local_GP - > get
2008-07-29 22:34:07 -07:00
* equals w_local_GP . get , so we ' ll go have a look .
*/
initial_get = get ;
}
2008-07-29 22:34:10 -07:00
if ( send_msgrequest )
xpc_send_chctl_msgrequest_sn2 ( ch ) ;
2008-07-29 22:34:07 -07:00
}
static void
2008-07-29 22:34:19 -07:00
xpc_received_payload_sn2 ( struct xpc_channel * ch , void * payload )
2008-07-29 22:34:07 -07:00
{
2008-07-29 22:34:19 -07:00
struct xpc_msg_sn2 * msg ;
s64 msg_number ;
2008-07-29 22:34:07 -07:00
s64 get ;
2008-07-29 22:34:19 -07:00
msg = container_of ( payload , struct xpc_msg_sn2 , payload ) ;
msg_number = msg - > number ;
2008-07-29 22:34:07 -07:00
dev_dbg ( xpc_chan , " msg=0x%p, msg_number=%ld, partid=%d, channel=%d \n " ,
( void * ) msg , msg_number , ch - > partid , ch - > number ) ;
2009-01-29 14:25:07 -08:00
DBUG_ON ( ( ( ( u64 ) msg - ( u64 ) ch - > sn . sn2 . remote_msgqueue ) / ch - > entry_size ) ! =
2008-07-29 22:34:07 -07:00
msg_number % ch - > remote_nentries ) ;
2009-01-29 14:25:07 -08:00
DBUG_ON ( ! ( msg - > flags & XPC_M_SN2_READY ) ) ;
2008-07-29 22:34:19 -07:00
DBUG_ON ( msg - > flags & XPC_M_SN2_DONE ) ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:19 -07:00
msg - > flags | = XPC_M_SN2_DONE ;
2008-07-29 22:34:07 -07:00
/*
* The preceding store of msg - > flags must occur before the following
2008-07-29 22:34:09 -07:00
* load of local_GP - > get .
2008-07-29 22:34:07 -07:00
*/
2009-01-29 14:25:06 -08:00
smp_mb ( ) ;
2008-07-29 22:34:07 -07:00
/*
* See if this message is next in line to be acknowledged as having
* been delivered .
*/
2008-07-29 22:34:09 -07:00
get = ch - > sn . sn2 . local_GP - > get ;
2008-07-29 22:34:07 -07:00
if ( get = = msg_number )
xpc_acknowledge_msgs_sn2 ( ch , get , msg - > flags ) ;
}
2008-07-29 22:34:09 -07:00
int
2008-07-29 22:34:05 -07:00
xpc_init_sn2 ( void )
{
2008-07-29 22:34:09 -07:00
int ret ;
2008-07-29 22:34:13 -07:00
size_t buf_size ;
2008-07-29 22:34:09 -07:00
2008-07-29 22:34:18 -07:00
xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2 ;
2009-04-02 16:59:10 -07:00
xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_sn2 ;
2008-07-29 22:34:16 -07:00
xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2 ;
2008-07-29 22:34:18 -07:00
xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2 ;
2008-07-29 22:34:07 -07:00
xpc_increment_heartbeat = xpc_increment_heartbeat_sn2 ;
xpc_offline_heartbeat = xpc_offline_heartbeat_sn2 ;
xpc_online_heartbeat = xpc_online_heartbeat_sn2 ;
xpc_heartbeat_init = xpc_heartbeat_init_sn2 ;
xpc_heartbeat_exit = xpc_heartbeat_exit_sn2 ;
2008-07-29 22:34:17 -07:00
xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_sn2 ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
xpc_request_partition_activation = xpc_request_partition_activation_sn2 ;
xpc_request_partition_reactivation =
xpc_request_partition_reactivation_sn2 ;
xpc_request_partition_deactivation =
xpc_request_partition_deactivation_sn2 ;
xpc_cancel_partition_deactivation_request =
xpc_cancel_partition_deactivation_request_sn2 ;
2008-07-29 22:34:09 -07:00
xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2 ;
2008-07-29 22:34:18 -07:00
xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2 ;
xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2 ;
2008-07-29 22:34:06 -07:00
xpc_make_first_contact = xpc_make_first_contact_sn2 ;
2008-07-29 22:34:18 -07:00
2008-07-29 22:34:10 -07:00
xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2 ;
2008-07-29 22:34:18 -07:00
xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2 ;
xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2 ;
xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2 ;
xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2 ;
xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2 ;
xpc_setup_msg_structures = xpc_setup_msg_structures_sn2 ;
xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2 ;
2008-07-29 22:34:09 -07:00
xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2 ;
2008-07-29 22:34:10 -07:00
xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2 ;
2008-07-29 22:34:19 -07:00
xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2 ;
xpc_get_deliverable_payload = xpc_get_deliverable_payload_sn2 ;
2008-07-29 22:34:07 -07:00
2008-07-29 22:34:09 -07:00
xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2 ;
xpc_indicate_partition_disengaged =
xpc_indicate_partition_disengaged_sn2 ;
2008-07-29 22:34:18 -07:00
xpc_partition_engaged = xpc_partition_engaged_sn2 ;
xpc_any_partition_engaged = xpc_any_partition_engaged_sn2 ;
2008-07-29 22:34:09 -07:00
xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2 ;
2008-07-29 22:34:19 -07:00
xpc_send_payload = xpc_send_payload_sn2 ;
xpc_received_payload = xpc_received_payload_sn2 ;
if ( offsetof ( struct xpc_msg_sn2 , payload ) > XPC_MSG_HDR_MAX_SIZE ) {
dev_err ( xpc_part , " header portion of struct xpc_msg_sn2 is "
" larger than %d \n " , XPC_MSG_HDR_MAX_SIZE ) ;
return - E2BIG ;
}
2008-07-29 22:34:09 -07:00
2008-07-29 22:34:13 -07:00
buf_size = max ( XPC_RP_VARS_SIZE ,
XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2 ) ;
xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned ( buf_size ,
GFP_KERNEL ,
& xpc_remote_copy_buffer_base_sn2 ) ;
if ( xpc_remote_copy_buffer_sn2 = = NULL ) {
dev_err ( xpc_part , " can't get memory for remote copy buffer \n " ) ;
return - ENOMEM ;
}
2008-07-29 22:34:11 -07:00
/* open up protections for IPI and [potentially] amo operations */
2008-07-29 22:34:09 -07:00
xpc_allow_IPI_ops_sn2 ( ) ;
2008-07-29 22:34:11 -07:00
xpc_allow_amo_ops_shub_wars_1_1_sn2 ( ) ;
2008-07-29 22:34:09 -07:00
/*
* This is safe to do before the xpc_hb_checker thread has started
* because the handler releases a wait queue . If an interrupt is
* received before the thread is waiting , it will not go to sleep ,
* but rather immediately process the interrupt .
*/
ret = request_irq ( SGI_XPC_ACTIVATE , xpc_handle_activate_IRQ_sn2 , 0 ,
" xpc hb " , NULL ) ;
if ( ret ! = 0 ) {
dev_err ( xpc_part , " can't register ACTIVATE IRQ handler, "
" errno=%d \n " , - ret ) ;
xpc_disallow_IPI_ops_sn2 ( ) ;
2008-07-29 22:34:13 -07:00
kfree ( xpc_remote_copy_buffer_base_sn2 ) ;
2008-07-29 22:34:09 -07:00
}
return ret ;
2008-07-29 22:34:05 -07:00
}
void
xpc_exit_sn2 ( void )
{
2008-07-29 22:34:09 -07:00
free_irq ( SGI_XPC_ACTIVATE , NULL ) ;
xpc_disallow_IPI_ops_sn2 ( ) ;
2008-07-29 22:34:13 -07:00
kfree ( xpc_remote_copy_buffer_base_sn2 ) ;
2008-07-29 22:34:05 -07:00
}