2008-07-30 09:34:05 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
2009-02-05 02:12:24 +03:00
* Copyright ( c ) 2008 - 2009 Silicon Graphics , Inc . All Rights Reserved .
2008-07-30 09:34:05 +04:00
*/
/*
* Cross Partition Communication ( XPC ) uv - based functions .
*
* Architecture specific implementation of common functions .
*
*/
# include <linux/kernel.h>
2008-07-30 09:34:18 +04:00
# include <linux/mm.h>
# include <linux/interrupt.h>
# include <linux/delay.h>
# include <linux/device.h>
2008-11-06 02:28:00 +03:00
# include <linux/err.h>
2008-07-30 09:34:16 +04:00
# include <asm/uv/uv_hub.h>
2008-11-06 02:28:00 +03:00
# if defined CONFIG_X86_64
# include <asm/uv/bios.h>
# include <asm/uv/uv_irq.h>
# elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
# include <asm/sn/intr.h>
# include <asm/sn/sn_sal.h>
# endif
2008-07-30 09:34:18 +04:00
# include "../sgi-gru/gru.h"
2008-07-30 09:34:16 +04:00
# include "../sgi-gru/grukservices.h"
2008-07-30 09:34:05 +04:00
# include "xpc.h"
2009-04-03 03:59:10 +04:00
# if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
struct uv_IO_APIC_route_entry {
__u64 vector : 8 ,
delivery_mode : 3 ,
dest_mode : 1 ,
delivery_status : 1 ,
polarity : 1 ,
__reserved_1 : 1 ,
trigger : 1 ,
mask : 1 ,
__reserved_2 : 15 ,
dest : 32 ;
} ;
# endif
2009-04-14 01:40:18 +04:00
static struct xpc_heartbeat_uv * xpc_heartbeat_uv ;
2008-07-30 09:34:07 +04:00
2008-07-30 09:34:18 +04:00
# define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
2008-11-06 02:28:00 +03:00
# define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_ACTIVATE_MSG_SIZE_UV )
# define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
# define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
# define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_NOTIFY_MSG_SIZE_UV )
# define XPC_NOTIFY_IRQ_NAME "xpc_notify"
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
static struct xpc_gru_mq_uv * xpc_activate_mq_uv ;
static struct xpc_gru_mq_uv * xpc_notify_mq_uv ;
2008-07-30 09:34:18 +04:00
static int
2009-04-14 01:40:19 +04:00
xpc_setup_partitions_uv ( void )
2008-07-30 09:34:18 +04:00
{
short partid ;
struct xpc_partition_uv * part_uv ;
for ( partid = 0 ; partid < XP_MAX_NPARTITIONS_UV ; partid + + ) {
part_uv = & xpc_partitions [ partid ] . sn . uv ;
2009-04-03 03:59:10 +04:00
mutex_init ( & part_uv - > cached_activate_gru_mq_desc_mutex ) ;
2008-07-30 09:34:18 +04:00
spin_lock_init ( & part_uv - > flags_lock ) ;
part_uv - > remote_act_state = XPC_P_AS_INACTIVE ;
}
return 0 ;
}
2009-04-03 03:59:10 +04:00
static void
2009-04-14 01:40:19 +04:00
xpc_teardown_partitions_uv ( void )
2009-04-03 03:59:10 +04:00
{
short partid ;
struct xpc_partition_uv * part_uv ;
unsigned long irq_flags ;
for ( partid = 0 ; partid < XP_MAX_NPARTITIONS_UV ; partid + + ) {
part_uv = & xpc_partitions [ partid ] . sn . uv ;
if ( part_uv - > cached_activate_gru_mq_desc ! = NULL ) {
mutex_lock ( & part_uv - > cached_activate_gru_mq_desc_mutex ) ;
spin_lock_irqsave ( & part_uv - > flags_lock , irq_flags ) ;
part_uv - > flags & = ~ XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV ;
spin_unlock_irqrestore ( & part_uv - > flags_lock , irq_flags ) ;
kfree ( part_uv - > cached_activate_gru_mq_desc ) ;
part_uv - > cached_activate_gru_mq_desc = NULL ;
mutex_unlock ( & part_uv - >
cached_activate_gru_mq_desc_mutex ) ;
}
}
}
2008-11-06 02:28:00 +03:00
static int
xpc_get_gru_mq_irq_uv ( struct xpc_gru_mq_uv * mq , int cpu , char * irq_name )
{
2009-04-03 03:59:10 +04:00
int mmr_pnode = uv_blade_to_pnode ( mq - > mmr_blade ) ;
2008-11-06 02:28:00 +03:00
# if defined CONFIG_X86_64
2009-09-30 20:02:59 +04:00
mq - > irq = uv_setup_irq ( irq_name , cpu , mq - > mmr_blade , mq - > mmr_offset ,
UV_AFFINITY_CPU ) ;
2008-11-06 02:28:00 +03:00
if ( mq - > irq < 0 ) {
dev_err ( xpc_part , " uv_setup_irq() returned error=%d \n " ,
2009-04-03 03:59:10 +04:00
- mq - > irq ) ;
return mq - > irq ;
2008-11-06 02:28:00 +03:00
}
2009-04-03 03:59:10 +04:00
mq - > mmr_value = uv_read_global_mmr64 ( mmr_pnode , mq - > mmr_offset ) ;
2008-11-06 02:28:00 +03:00
2009-04-03 03:59:10 +04:00
# elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
2008-11-06 02:28:00 +03:00
if ( strcmp ( irq_name , XPC_ACTIVATE_IRQ_NAME ) = = 0 )
mq - > irq = SGI_XPC_ACTIVATE ;
else if ( strcmp ( irq_name , XPC_NOTIFY_IRQ_NAME ) = = 0 )
mq - > irq = SGI_XPC_NOTIFY ;
else
return - EINVAL ;
2009-04-03 03:59:10 +04:00
mq - > mmr_value = ( unsigned long ) cpu_physical_id ( cpu ) < < 32 | mq - > irq ;
uv_write_global_mmr64 ( mmr_pnode , mq - > mmr_offset , mq - > mmr_value ) ;
2008-11-06 02:28:00 +03:00
# else
# error not a supported configuration
# endif
return 0 ;
}
static void
xpc_release_gru_mq_irq_uv ( struct xpc_gru_mq_uv * mq )
{
# if defined CONFIG_X86_64
2009-09-30 20:02:59 +04:00
uv_teardown_irq ( mq - > irq ) ;
2008-11-06 02:28:00 +03:00
# elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
int mmr_pnode ;
unsigned long mmr_value ;
mmr_pnode = uv_blade_to_pnode ( mq - > mmr_blade ) ;
mmr_value = 1UL < < 16 ;
uv_write_global_mmr64 ( mmr_pnode , mq - > mmr_offset , mmr_value ) ;
# else
# error not a supported configuration
# endif
}
static int
xpc_gru_mq_watchlist_alloc_uv ( struct xpc_gru_mq_uv * mq )
{
int ret ;
# if defined CONFIG_X86_64
2008-12-12 20:07:00 +03:00
ret = uv_bios_mq_watchlist_alloc ( mq - > mmr_blade , uv_gpa ( mq - > address ) ,
mq - > order , & mq - > mmr_offset ) ;
2008-11-06 02:28:00 +03:00
if ( ret < 0 ) {
dev_err ( xpc_part , " uv_bios_mq_watchlist_alloc() failed, "
" ret=%d \n " , ret ) ;
return ret ;
}
# elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
2009-04-03 03:59:10 +04:00
ret = sn_mq_watchlist_alloc ( mq - > mmr_blade , ( void * ) uv_gpa ( mq - > address ) ,
2008-12-12 20:07:00 +03:00
mq - > order , & mq - > mmr_offset ) ;
2008-11-06 02:28:00 +03:00
if ( ret < 0 ) {
dev_err ( xpc_part , " sn_mq_watchlist_alloc() failed, ret=%d \n " ,
ret ) ;
return - EBUSY ;
}
# else
# error not a supported configuration
# endif
mq - > watchlist_num = ret ;
return 0 ;
}
static void
xpc_gru_mq_watchlist_free_uv ( struct xpc_gru_mq_uv * mq )
{
int ret ;
# if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_free ( mq - > mmr_blade , mq - > watchlist_num ) ;
BUG_ON ( ret ! = BIOS_STATUS_SUCCESS ) ;
# elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_free ( mq - > mmr_blade , mq - > watchlist_num ) ;
BUG_ON ( ret ! = SALRET_OK ) ;
# else
# error not a supported configuration
# endif
}
static struct xpc_gru_mq_uv *
xpc_create_gru_mq_uv ( unsigned int mq_size , int cpu , char * irq_name ,
2008-07-30 09:34:18 +04:00
irq_handler_t irq_handler )
{
2008-11-06 02:28:00 +03:00
enum xp_retval xp_ret ;
2008-07-30 09:34:18 +04:00
int ret ;
int nid ;
2008-11-06 02:28:00 +03:00
int pg_order ;
2008-07-30 09:34:18 +04:00
struct page * page ;
2008-11-06 02:28:00 +03:00
struct xpc_gru_mq_uv * mq ;
2009-04-03 03:59:10 +04:00
struct uv_IO_APIC_route_entry * mmr_value ;
2008-11-06 02:28:00 +03:00
mq = kmalloc ( sizeof ( struct xpc_gru_mq_uv ) , GFP_KERNEL ) ;
if ( mq = = NULL ) {
dev_err ( xpc_part , " xpc_create_gru_mq_uv() failed to kmalloc() "
" a xpc_gru_mq_uv structure \n " ) ;
ret = - ENOMEM ;
2009-04-03 03:59:10 +04:00
goto out_0 ;
}
mq - > gru_mq_desc = kzalloc ( sizeof ( struct gru_message_queue_desc ) ,
GFP_KERNEL ) ;
if ( mq - > gru_mq_desc = = NULL ) {
dev_err ( xpc_part , " xpc_create_gru_mq_uv() failed to kmalloc() "
" a gru_message_queue_desc structure \n " ) ;
ret = - ENOMEM ;
2008-11-06 02:28:00 +03:00
goto out_1 ;
}
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
pg_order = get_order ( mq_size ) ;
mq - > order = pg_order + PAGE_SHIFT ;
mq_size = 1UL < < mq - > order ;
mq - > mmr_blade = uv_cpu_to_blade_id ( cpu ) ;
nid = cpu_to_node ( cpu ) ;
2009-06-17 02:31:54 +04:00
page = alloc_pages_exact_node ( nid , GFP_KERNEL | __GFP_ZERO | GFP_THISNODE ,
2008-11-06 02:28:00 +03:00
pg_order ) ;
2008-07-30 09:34:19 +04:00
if ( page = = NULL ) {
dev_err ( xpc_part , " xpc_create_gru_mq_uv() failed to alloc %d "
" bytes of memory on nid=%d for GRU mq \n " , mq_size , nid ) ;
2008-11-06 02:28:00 +03:00
ret = - ENOMEM ;
goto out_2 ;
2008-07-30 09:34:19 +04:00
}
2008-11-06 02:28:00 +03:00
mq - > address = page_address ( page ) ;
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
/* enable generation of irq when GRU mq operation occurs to this mq */
ret = xpc_gru_mq_watchlist_alloc_uv ( mq ) ;
if ( ret ! = 0 )
goto out_3 ;
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
ret = xpc_get_gru_mq_irq_uv ( mq , cpu , irq_name ) ;
if ( ret ! = 0 )
goto out_4 ;
ret = request_irq ( mq - > irq , irq_handler , 0 , irq_name , NULL ) ;
2008-07-30 09:34:18 +04:00
if ( ret ! = 0 ) {
dev_err ( xpc_part , " request_irq(irq=%d) returned error=%d \n " ,
2009-04-03 03:59:10 +04:00
mq - > irq , - ret ) ;
2008-11-06 02:28:00 +03:00
goto out_5 ;
2008-07-30 09:34:18 +04:00
}
2009-04-03 03:59:10 +04:00
mmr_value = ( struct uv_IO_APIC_route_entry * ) & mq - > mmr_value ;
ret = gru_create_message_queue ( mq - > gru_mq_desc , mq - > address , mq_size ,
nid , mmr_value - > vector , mmr_value - > dest ) ;
if ( ret ! = 0 ) {
dev_err ( xpc_part , " gru_create_message_queue() returned "
" error=%d \n " , ret ) ;
ret = - EINVAL ;
goto out_6 ;
}
2008-11-06 02:28:00 +03:00
/* allow other partitions to access this GRU mq */
xp_ret = xp_expand_memprotect ( xp_pa ( mq - > address ) , mq_size ) ;
if ( xp_ret ! = xpSuccess ) {
ret = - EACCES ;
goto out_6 ;
}
2008-07-30 09:34:18 +04:00
return mq ;
2008-11-06 02:28:00 +03:00
/* something went wrong */
out_6 :
free_irq ( mq - > irq , NULL ) ;
out_5 :
xpc_release_gru_mq_irq_uv ( mq ) ;
out_4 :
xpc_gru_mq_watchlist_free_uv ( mq ) ;
out_3 :
free_pages ( ( unsigned long ) mq - > address , pg_order ) ;
out_2 :
2009-04-03 03:59:10 +04:00
kfree ( mq - > gru_mq_desc ) ;
2008-11-06 02:28:00 +03:00
out_1 :
2009-04-03 03:59:10 +04:00
kfree ( mq ) ;
out_0 :
2008-11-06 02:28:00 +03:00
return ERR_PTR ( ret ) ;
2008-07-30 09:34:18 +04:00
}
2008-07-30 09:34:05 +04:00
2008-07-30 09:34:07 +04:00
static void
2008-11-06 02:28:00 +03:00
xpc_destroy_gru_mq_uv ( struct xpc_gru_mq_uv * mq )
2008-07-30 09:34:18 +04:00
{
2008-11-06 02:28:00 +03:00
unsigned int mq_size ;
int pg_order ;
int ret ;
/* disallow other partitions to access GRU mq */
mq_size = 1UL < < mq - > order ;
ret = xp_restrict_memprotect ( xp_pa ( mq - > address ) , mq_size ) ;
BUG_ON ( ret ! = xpSuccess ) ;
/* unregister irq handler and release mq irq/vector mapping */
free_irq ( mq - > irq , NULL ) ;
xpc_release_gru_mq_irq_uv ( mq ) ;
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
/* disable generation of irq when GRU mq op occurs to this mq */
xpc_gru_mq_watchlist_free_uv ( mq ) ;
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
pg_order = mq - > order - PAGE_SHIFT ;
free_pages ( ( unsigned long ) mq - > address , pg_order ) ;
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
kfree ( mq ) ;
2008-07-30 09:34:18 +04:00
}
static enum xp_retval
2009-04-03 03:59:10 +04:00
xpc_send_gru_msg ( struct gru_message_queue_desc * gru_mq_desc , void * msg ,
size_t msg_size )
2008-07-30 09:34:07 +04:00
{
2008-07-30 09:34:18 +04:00
enum xp_retval xp_ret ;
int ret ;
while ( 1 ) {
2009-04-03 03:59:10 +04:00
ret = gru_send_message_gpa ( gru_mq_desc , msg , msg_size ) ;
2008-07-30 09:34:18 +04:00
if ( ret = = MQE_OK ) {
xp_ret = xpSuccess ;
break ;
}
if ( ret = = MQE_QUEUE_FULL ) {
dev_dbg ( xpc_chan , " gru_send_message_gpa() returned "
" error=MQE_QUEUE_FULL \n " ) ;
/* !!! handle QLimit reached; delay & try again */
/* ??? Do we add a limit to the number of retries? */
( void ) msleep_interruptible ( 10 ) ;
} else if ( ret = = MQE_CONGESTION ) {
dev_dbg ( xpc_chan , " gru_send_message_gpa() returned "
" error=MQE_CONGESTION \n " ) ;
/* !!! handle LB Overflow; simply try again */
/* ??? Do we add a limit to the number of retries? */
} else {
/* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
dev_err ( xpc_chan , " gru_send_message_gpa() returned "
" error=%d \n " , ret ) ;
xp_ret = xpGruSendMqError ;
break ;
}
}
return xp_ret ;
}
static void
xpc_process_activate_IRQ_rcvd_uv ( void )
{
unsigned long irq_flags ;
short partid ;
struct xpc_partition * part ;
u8 act_state_req ;
DBUG_ON ( xpc_activate_IRQ_rcvd = = 0 ) ;
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
for ( partid = 0 ; partid < XP_MAX_NPARTITIONS_UV ; partid + + ) {
part = & xpc_partitions [ partid ] ;
if ( part - > sn . uv . act_state_req = = 0 )
continue ;
xpc_activate_IRQ_rcvd - - ;
BUG_ON ( xpc_activate_IRQ_rcvd < 0 ) ;
act_state_req = part - > sn . uv . act_state_req ;
part - > sn . uv . act_state_req = 0 ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
if ( act_state_req = = XPC_P_ASR_ACTIVATE_UV ) {
if ( part - > act_state = = XPC_P_AS_INACTIVE )
xpc_activate_partition ( part ) ;
else if ( part - > act_state = = XPC_P_AS_DEACTIVATING )
XPC_DEACTIVATE_PARTITION ( part , xpReactivating ) ;
} else if ( act_state_req = = XPC_P_ASR_REACTIVATE_UV ) {
if ( part - > act_state = = XPC_P_AS_INACTIVE )
xpc_activate_partition ( part ) ;
else
XPC_DEACTIVATE_PARTITION ( part , xpReactivating ) ;
} else if ( act_state_req = = XPC_P_ASR_DEACTIVATE_UV ) {
XPC_DEACTIVATE_PARTITION ( part , part - > sn . uv . reason ) ;
} else {
BUG ( ) ;
}
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
if ( xpc_activate_IRQ_rcvd = = 0 )
break ;
}
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
}
2008-07-30 09:34:19 +04:00
static void
xpc_handle_activate_mq_msg_uv ( struct xpc_partition * part ,
struct xpc_activate_mq_msghdr_uv * msg_hdr ,
int * wakeup_hb_checker )
2008-07-30 09:34:18 +04:00
{
unsigned long irq_flags ;
2008-07-30 09:34:19 +04:00
struct xpc_partition_uv * part_uv = & part - > sn . uv ;
2008-07-30 09:34:18 +04:00
struct xpc_openclose_args * args ;
2008-07-30 09:34:19 +04:00
part_uv - > remote_act_state = msg_hdr - > act_state ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
switch ( msg_hdr - > type ) {
case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV :
/* syncing of remote_act_state was just done above */
break ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV : {
struct xpc_activate_mq_msg_activate_req_uv * msg ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
/*
* ? ? ? Do we deal here with ts_jiffies being different
* ? ? ? if act_state ! = XPC_P_AS_INACTIVE instead of
* ? ? ? below ?
*/
msg = container_of ( msg_hdr , struct
xpc_activate_mq_msg_activate_req_uv , hdr ) ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
if ( part_uv - > act_state_req = = 0 )
xpc_activate_IRQ_rcvd + + ;
part_uv - > act_state_req = XPC_P_ASR_ACTIVATE_UV ;
part - > remote_rp_pa = msg - > rp_gpa ; /* !!! _pa is _gpa */
part - > remote_rp_ts_jiffies = msg_hdr - > rp_ts_jiffies ;
2009-04-14 01:40:18 +04:00
part_uv - > heartbeat_gpa = msg - > heartbeat_gpa ;
2009-04-03 03:59:10 +04:00
if ( msg - > activate_gru_mq_desc_gpa ! =
part_uv - > activate_gru_mq_desc_gpa ) {
spin_lock_irqsave ( & part_uv - > flags_lock , irq_flags ) ;
part_uv - > flags & = ~ XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV ;
spin_unlock_irqrestore ( & part_uv - > flags_lock , irq_flags ) ;
part_uv - > activate_gru_mq_desc_gpa =
msg - > activate_gru_mq_desc_gpa ;
}
2008-07-30 09:34:19 +04:00
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
( * wakeup_hb_checker ) + + ;
break ;
}
case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV : {
struct xpc_activate_mq_msg_deactivate_req_uv * msg ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
msg = container_of ( msg_hdr , struct
xpc_activate_mq_msg_deactivate_req_uv , hdr ) ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
if ( part_uv - > act_state_req = = 0 )
xpc_activate_IRQ_rcvd + + ;
part_uv - > act_state_req = XPC_P_ASR_DEACTIVATE_UV ;
part_uv - > reason = msg - > reason ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
( * wakeup_hb_checker ) + + ;
return ;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV : {
struct xpc_activate_mq_msg_chctl_closerequest_uv * msg ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
msg = container_of ( msg_hdr , struct
xpc_activate_mq_msg_chctl_closerequest_uv ,
hdr ) ;
args = & part - > remote_openclose_args [ msg - > ch_number ] ;
args - > reason = msg - > reason ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
part - > chctl . flags [ msg - > ch_number ] | = XPC_CHCTL_CLOSEREQUEST ;
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
xpc_wakeup_channel_mgr ( part ) ;
break ;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV : {
struct xpc_activate_mq_msg_chctl_closereply_uv * msg ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
msg = container_of ( msg_hdr , struct
xpc_activate_mq_msg_chctl_closereply_uv ,
hdr ) ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
part - > chctl . flags [ msg - > ch_number ] | = XPC_CHCTL_CLOSEREPLY ;
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
xpc_wakeup_channel_mgr ( part ) ;
break ;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV : {
struct xpc_activate_mq_msg_chctl_openrequest_uv * msg ;
msg = container_of ( msg_hdr , struct
xpc_activate_mq_msg_chctl_openrequest_uv ,
hdr ) ;
args = & part - > remote_openclose_args [ msg - > ch_number ] ;
args - > entry_size = msg - > entry_size ;
args - > local_nentries = msg - > local_nentries ;
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
part - > chctl . flags [ msg - > ch_number ] | = XPC_CHCTL_OPENREQUEST ;
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
xpc_wakeup_channel_mgr ( part ) ;
break ;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV : {
struct xpc_activate_mq_msg_chctl_openreply_uv * msg ;
msg = container_of ( msg_hdr , struct
xpc_activate_mq_msg_chctl_openreply_uv , hdr ) ;
args = & part - > remote_openclose_args [ msg - > ch_number ] ;
args - > remote_nentries = msg - > remote_nentries ;
args - > local_nentries = msg - > local_nentries ;
2009-04-03 03:59:10 +04:00
args - > local_msgqueue_pa = msg - > notify_gru_mq_desc_gpa ;
2008-07-30 09:34:19 +04:00
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
part - > chctl . flags [ msg - > ch_number ] | = XPC_CHCTL_OPENREPLY ;
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
xpc_wakeup_channel_mgr ( part ) ;
break ;
}
2009-04-14 01:40:19 +04:00
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV : {
struct xpc_activate_mq_msg_chctl_opencomplete_uv * msg ;
msg = container_of ( msg_hdr , struct
xpc_activate_mq_msg_chctl_opencomplete_uv , hdr ) ;
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
part - > chctl . flags [ msg - > ch_number ] | = XPC_CHCTL_OPENCOMPLETE ;
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
xpc_wakeup_channel_mgr ( part ) ;
}
2008-07-30 09:34:19 +04:00
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV :
spin_lock_irqsave ( & part_uv - > flags_lock , irq_flags ) ;
part_uv - > flags | = XPC_P_ENGAGED_UV ;
spin_unlock_irqrestore ( & part_uv - > flags_lock , irq_flags ) ;
break ;
case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV :
spin_lock_irqsave ( & part_uv - > flags_lock , irq_flags ) ;
part_uv - > flags & = ~ XPC_P_ENGAGED_UV ;
spin_unlock_irqrestore ( & part_uv - > flags_lock , irq_flags ) ;
break ;
default :
dev_err ( xpc_part , " received unknown activate_mq msg type=%d "
" from partition=%d \n " , msg_hdr - > type , XPC_PARTID ( part ) ) ;
/* get hb checker to deactivate from the remote partition */
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
if ( part_uv - > act_state_req = = 0 )
xpc_activate_IRQ_rcvd + + ;
part_uv - > act_state_req = XPC_P_ASR_DEACTIVATE_UV ;
part_uv - > reason = xpBadMsgType ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
( * wakeup_hb_checker ) + + ;
return ;
}
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
if ( msg_hdr - > rp_ts_jiffies ! = part - > remote_rp_ts_jiffies & &
part - > remote_rp_ts_jiffies ! = 0 ) {
/*
* ? ? ? Does what we do here need to be sensitive to
* ? ? ? act_state or remote_act_state ?
*/
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
if ( part_uv - > act_state_req = = 0 )
xpc_activate_IRQ_rcvd + + ;
part_uv - > act_state_req = XPC_P_ASR_REACTIVATE_UV ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
( * wakeup_hb_checker ) + + ;
}
}
static irqreturn_t
xpc_handle_activate_IRQ_uv ( int irq , void * dev_id )
{
struct xpc_activate_mq_msghdr_uv * msg_hdr ;
short partid ;
struct xpc_partition * part ;
int wakeup_hb_checker = 0 ;
2009-04-03 03:59:10 +04:00
int part_referenced ;
2008-07-30 09:34:19 +04:00
2008-11-06 02:28:00 +03:00
while ( 1 ) {
2009-04-03 03:59:10 +04:00
msg_hdr = gru_get_next_message ( xpc_activate_mq_uv - > gru_mq_desc ) ;
2008-11-06 02:28:00 +03:00
if ( msg_hdr = = NULL )
break ;
2008-07-30 09:34:19 +04:00
partid = msg_hdr - > partid ;
if ( partid < 0 | | partid > = XP_MAX_NPARTITIONS_UV ) {
dev_err ( xpc_part , " xpc_handle_activate_IRQ_uv() "
" received invalid partid=0x%x in message \n " ,
partid ) ;
} else {
part = & xpc_partitions [ partid ] ;
2009-04-03 03:59:10 +04:00
part_referenced = xpc_part_ref ( part ) ;
xpc_handle_activate_mq_msg_uv ( part , msg_hdr ,
& wakeup_hb_checker ) ;
if ( part_referenced )
2008-07-30 09:34:19 +04:00
xpc_part_deref ( part ) ;
2008-07-30 09:34:18 +04:00
}
2009-04-03 03:59:10 +04:00
gru_free_message ( xpc_activate_mq_uv - > gru_mq_desc , msg_hdr ) ;
2008-07-30 09:34:18 +04:00
}
if ( wakeup_hb_checker )
wake_up_interruptible ( & xpc_activate_IRQ_wq ) ;
return IRQ_HANDLED ;
}
2009-04-03 03:59:10 +04:00
static enum xp_retval
xpc_cache_remote_gru_mq_desc_uv ( struct gru_message_queue_desc * gru_mq_desc ,
unsigned long gru_mq_desc_gpa )
{
enum xp_retval ret ;
ret = xp_remote_memcpy ( uv_gpa ( gru_mq_desc ) , gru_mq_desc_gpa ,
sizeof ( struct gru_message_queue_desc ) ) ;
if ( ret = = xpSuccess )
gru_mq_desc - > mq = NULL ;
return ret ;
}
2008-07-30 09:34:18 +04:00
static enum xp_retval
xpc_send_activate_IRQ_uv ( struct xpc_partition * part , void * msg , size_t msg_size ,
int msg_type )
{
struct xpc_activate_mq_msghdr_uv * msg_hdr = msg ;
2009-04-03 03:59:10 +04:00
struct xpc_partition_uv * part_uv = & part - > sn . uv ;
struct gru_message_queue_desc * gru_mq_desc ;
unsigned long irq_flags ;
enum xp_retval ret ;
2008-07-30 09:34:18 +04:00
DBUG_ON ( msg_size > XPC_ACTIVATE_MSG_SIZE_UV ) ;
msg_hdr - > type = msg_type ;
2009-04-03 03:59:10 +04:00
msg_hdr - > partid = xp_partition_id ;
2008-07-30 09:34:18 +04:00
msg_hdr - > act_state = part - > act_state ;
msg_hdr - > rp_ts_jiffies = xpc_rsvd_page - > ts_jiffies ;
2009-04-03 03:59:10 +04:00
mutex_lock ( & part_uv - > cached_activate_gru_mq_desc_mutex ) ;
again :
if ( ! ( part_uv - > flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV ) ) {
gru_mq_desc = part_uv - > cached_activate_gru_mq_desc ;
if ( gru_mq_desc = = NULL ) {
gru_mq_desc = kmalloc ( sizeof ( struct
gru_message_queue_desc ) ,
GFP_KERNEL ) ;
if ( gru_mq_desc = = NULL ) {
ret = xpNoMemory ;
goto done ;
}
part_uv - > cached_activate_gru_mq_desc = gru_mq_desc ;
}
ret = xpc_cache_remote_gru_mq_desc_uv ( gru_mq_desc ,
part_uv - >
activate_gru_mq_desc_gpa ) ;
if ( ret ! = xpSuccess )
goto done ;
spin_lock_irqsave ( & part_uv - > flags_lock , irq_flags ) ;
part_uv - > flags | = XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV ;
spin_unlock_irqrestore ( & part_uv - > flags_lock , irq_flags ) ;
}
2008-07-30 09:34:18 +04:00
/* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
2009-04-03 03:59:10 +04:00
ret = xpc_send_gru_msg ( part_uv - > cached_activate_gru_mq_desc , msg ,
msg_size ) ;
if ( ret ! = xpSuccess ) {
smp_rmb ( ) ; /* ensure a fresh copy of part_uv->flags */
if ( ! ( part_uv - > flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV ) )
goto again ;
}
done :
mutex_unlock ( & part_uv - > cached_activate_gru_mq_desc_mutex ) ;
return ret ;
2008-07-30 09:34:18 +04:00
}
static void
xpc_send_activate_IRQ_part_uv ( struct xpc_partition * part , void * msg ,
size_t msg_size , int msg_type )
{
enum xp_retval ret ;
ret = xpc_send_activate_IRQ_uv ( part , msg , msg_size , msg_type ) ;
if ( unlikely ( ret ! = xpSuccess ) )
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
}
static void
xpc_send_activate_IRQ_ch_uv ( struct xpc_channel * ch , unsigned long * irq_flags ,
void * msg , size_t msg_size , int msg_type )
{
2009-04-03 03:59:10 +04:00
struct xpc_partition * part = & xpc_partitions [ ch - > partid ] ;
2008-07-30 09:34:18 +04:00
enum xp_retval ret ;
ret = xpc_send_activate_IRQ_uv ( part , msg , msg_size , msg_type ) ;
if ( unlikely ( ret ! = xpSuccess ) ) {
if ( irq_flags ! = NULL )
spin_unlock_irqrestore ( & ch - > lock , * irq_flags ) ;
XPC_DEACTIVATE_PARTITION ( part , ret ) ;
if ( irq_flags ! = NULL )
spin_lock_irqsave ( & ch - > lock , * irq_flags ) ;
}
}
static void
xpc_send_local_activate_IRQ_uv ( struct xpc_partition * part , int act_state_req )
{
unsigned long irq_flags ;
struct xpc_partition_uv * part_uv = & part - > sn . uv ;
2008-07-30 09:34:07 +04:00
/*
2008-11-06 02:29:48 +03:00
* ! ! ! Make our side think that the remote partition sent an activate
2009-04-14 01:40:18 +04:00
* ! ! ! mq message our way by doing what the activate IRQ handler would
2008-07-30 09:34:14 +04:00
* ! ! ! do had one really been sent .
2008-07-30 09:34:07 +04:00
*/
2008-07-30 09:34:18 +04:00
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
if ( part_uv - > act_state_req = = 0 )
xpc_activate_IRQ_rcvd + + ;
part_uv - > act_state_req = act_state_req ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
wake_up_interruptible ( & xpc_activate_IRQ_wq ) ;
2008-07-30 09:34:07 +04:00
}
2008-07-30 09:34:05 +04:00
static enum xp_retval
2008-07-30 09:34:18 +04:00
xpc_get_partition_rsvd_page_pa_uv ( void * buf , u64 * cookie , unsigned long * rp_pa ,
size_t * len )
2008-07-30 09:34:05 +04:00
{
2008-11-06 02:29:48 +03:00
s64 status ;
enum xp_retval ret ;
# if defined CONFIG_X86_64
status = uv_bios_reserved_page_pa ( ( u64 ) buf , cookie , ( u64 * ) rp_pa ,
( u64 * ) len ) ;
if ( status = = BIOS_STATUS_SUCCESS )
ret = xpSuccess ;
else if ( status = = BIOS_STATUS_MORE_PASSES )
ret = xpNeedMoreInfo ;
else
ret = xpBiosError ;
# elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
status = sn_partition_reserved_page_pa ( ( u64 ) buf , cookie , rp_pa , len ) ;
if ( status = = SALRET_OK )
ret = xpSuccess ;
else if ( status = = SALRET_MORE_PASSES )
ret = xpNeedMoreInfo ;
else
ret = xpSalError ;
# else
# error not a supported configuration
# endif
return ret ;
2008-07-30 09:34:18 +04:00
}
static int
2009-04-14 01:40:19 +04:00
xpc_setup_rsvd_page_uv ( struct xpc_rsvd_page * rp )
2008-07-30 09:34:18 +04:00
{
2009-04-14 01:40:18 +04:00
xpc_heartbeat_uv =
& xpc_partitions [ sn_partition_id ] . sn . uv . cached_heartbeat ;
rp - > sn . uv . heartbeat_gpa = uv_gpa ( xpc_heartbeat_uv ) ;
rp - > sn . uv . activate_gru_mq_desc_gpa =
2009-04-03 03:59:10 +04:00
uv_gpa ( xpc_activate_mq_uv - > gru_mq_desc ) ;
2008-07-30 09:34:18 +04:00
return 0 ;
}
static void
2009-04-14 01:40:18 +04:00
xpc_allow_hb_uv ( short partid )
2008-07-30 09:34:18 +04:00
{
2009-04-14 01:40:18 +04:00
}
2008-07-30 09:34:18 +04:00
2009-04-14 01:40:18 +04:00
static void
xpc_disallow_hb_uv ( short partid )
{
}
2008-07-30 09:34:18 +04:00
2009-04-14 01:40:18 +04:00
static void
xpc_disallow_all_hbs_uv ( void )
{
2008-07-30 09:34:05 +04:00
}
2008-07-30 09:34:07 +04:00
static void
xpc_increment_heartbeat_uv ( void )
{
2009-04-14 01:40:18 +04:00
xpc_heartbeat_uv - > value + + ;
2008-07-30 09:34:18 +04:00
}
static void
xpc_offline_heartbeat_uv ( void )
{
2009-04-14 01:40:18 +04:00
xpc_increment_heartbeat_uv ( ) ;
xpc_heartbeat_uv - > offline = 1 ;
2008-07-30 09:34:18 +04:00
}
static void
xpc_online_heartbeat_uv ( void )
{
2009-04-14 01:40:18 +04:00
xpc_increment_heartbeat_uv ( ) ;
xpc_heartbeat_uv - > offline = 0 ;
2008-07-30 09:34:07 +04:00
}
static void
xpc_heartbeat_init_uv ( void )
{
2009-04-14 01:40:18 +04:00
xpc_heartbeat_uv - > value = 1 ;
xpc_heartbeat_uv - > offline = 0 ;
2008-07-30 09:34:07 +04:00
}
static void
xpc_heartbeat_exit_uv ( void )
{
2009-04-14 01:40:18 +04:00
xpc_offline_heartbeat_uv ( ) ;
2008-07-30 09:34:18 +04:00
}
static enum xp_retval
xpc_get_remote_heartbeat_uv ( struct xpc_partition * part )
{
struct xpc_partition_uv * part_uv = & part - > sn . uv ;
2009-04-14 01:40:18 +04:00
enum xp_retval ret ;
2008-07-30 09:34:18 +04:00
2009-04-14 01:40:18 +04:00
ret = xp_remote_memcpy ( uv_gpa ( & part_uv - > cached_heartbeat ) ,
part_uv - > heartbeat_gpa ,
sizeof ( struct xpc_heartbeat_uv ) ) ;
if ( ret ! = xpSuccess )
return ret ;
2008-07-30 09:34:18 +04:00
2009-04-14 01:40:18 +04:00
if ( part_uv - > cached_heartbeat . value = = part - > last_heartbeat & &
! part_uv - > cached_heartbeat . offline ) {
2008-07-30 09:34:18 +04:00
2009-04-14 01:40:18 +04:00
ret = xpNoHeartbeat ;
} else {
part - > last_heartbeat = part_uv - > cached_heartbeat . value ;
2008-07-30 09:34:18 +04:00
}
return ret ;
2008-07-30 09:34:07 +04:00
}
static void
2008-07-30 09:34:09 +04:00
xpc_request_partition_activation_uv ( struct xpc_rsvd_page * remote_rp ,
2008-07-30 09:34:18 +04:00
unsigned long remote_rp_gpa , int nasid )
2008-07-30 09:34:07 +04:00
{
short partid = remote_rp - > SAL_partid ;
struct xpc_partition * part = & xpc_partitions [ partid ] ;
2008-07-30 09:34:18 +04:00
struct xpc_activate_mq_msg_activate_req_uv msg ;
2008-07-30 09:34:07 +04:00
2008-07-30 09:34:18 +04:00
part - > remote_rp_pa = remote_rp_gpa ; /* !!! _pa here is really _gpa */
part - > remote_rp_ts_jiffies = remote_rp - > ts_jiffies ;
2009-04-14 01:40:18 +04:00
part - > sn . uv . heartbeat_gpa = remote_rp - > sn . uv . heartbeat_gpa ;
2009-04-03 03:59:10 +04:00
part - > sn . uv . activate_gru_mq_desc_gpa =
2009-04-14 01:40:18 +04:00
remote_rp - > sn . uv . activate_gru_mq_desc_gpa ;
2008-07-30 09:34:18 +04:00
/*
* ? ? ? Is it a good idea to make this conditional on what is
* ? ? ? potentially stale state information ?
*/
if ( part - > sn . uv . remote_act_state = = XPC_P_AS_INACTIVE ) {
msg . rp_gpa = uv_gpa ( xpc_rsvd_page ) ;
2009-04-14 01:40:18 +04:00
msg . heartbeat_gpa = xpc_rsvd_page - > sn . uv . heartbeat_gpa ;
2009-04-03 03:59:10 +04:00
msg . activate_gru_mq_desc_gpa =
2009-04-14 01:40:18 +04:00
xpc_rsvd_page - > sn . uv . activate_gru_mq_desc_gpa ;
2008-07-30 09:34:18 +04:00
xpc_send_activate_IRQ_part_uv ( part , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV ) ;
}
2008-07-30 09:34:07 +04:00
2008-07-30 09:34:18 +04:00
if ( part - > act_state = = XPC_P_AS_INACTIVE )
xpc_send_local_activate_IRQ_uv ( part , XPC_P_ASR_ACTIVATE_UV ) ;
2008-07-30 09:34:07 +04:00
}
2008-07-30 09:34:09 +04:00
static void
xpc_request_partition_reactivation_uv ( struct xpc_partition * part )
{
2008-07-30 09:34:18 +04:00
xpc_send_local_activate_IRQ_uv ( part , XPC_P_ASR_ACTIVATE_UV ) ;
}
static void
xpc_request_partition_deactivation_uv ( struct xpc_partition * part )
{
struct xpc_activate_mq_msg_deactivate_req_uv msg ;
/*
* ? ? ? Is it a good idea to make this conditional on what is
* ? ? ? potentially stale state information ?
*/
if ( part - > sn . uv . remote_act_state ! = XPC_P_AS_DEACTIVATING & &
part - > sn . uv . remote_act_state ! = XPC_P_AS_INACTIVE ) {
msg . reason = part - > reason ;
xpc_send_activate_IRQ_part_uv ( part , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV ) ;
}
2008-07-30 09:34:09 +04:00
}
2008-07-30 09:34:19 +04:00
static void
xpc_cancel_partition_deactivation_request_uv ( struct xpc_partition * part )
{
/* nothing needs to be done */
return ;
}
static void
xpc_init_fifo_uv ( struct xpc_fifo_head_uv * head )
{
head - > first = NULL ;
head - > last = NULL ;
spin_lock_init ( & head - > lock ) ;
head - > n_entries = 0 ;
}
static void *
xpc_get_fifo_entry_uv ( struct xpc_fifo_head_uv * head )
{
unsigned long irq_flags ;
struct xpc_fifo_entry_uv * first ;
spin_lock_irqsave ( & head - > lock , irq_flags ) ;
first = head - > first ;
if ( head - > first ! = NULL ) {
head - > first = first - > next ;
if ( head - > first = = NULL )
head - > last = NULL ;
}
2009-04-03 03:59:10 +04:00
head - > n_entries - - ;
BUG_ON ( head - > n_entries < 0 ) ;
2008-07-30 09:34:19 +04:00
spin_unlock_irqrestore ( & head - > lock , irq_flags ) ;
first - > next = NULL ;
return first ;
}
static void
xpc_put_fifo_entry_uv ( struct xpc_fifo_head_uv * head ,
struct xpc_fifo_entry_uv * last )
{
unsigned long irq_flags ;
last - > next = NULL ;
spin_lock_irqsave ( & head - > lock , irq_flags ) ;
if ( head - > last ! = NULL )
head - > last - > next = last ;
else
head - > first = last ;
head - > last = last ;
2009-04-03 03:59:10 +04:00
head - > n_entries + + ;
2008-07-30 09:34:19 +04:00
spin_unlock_irqrestore ( & head - > lock , irq_flags ) ;
}
static int
xpc_n_of_fifo_entries_uv ( struct xpc_fifo_head_uv * head )
{
return head - > n_entries ;
}
2008-07-30 09:34:06 +04:00
/*
2008-07-30 09:34:18 +04:00
* Setup the channel structures that are uv specific .
2008-07-30 09:34:06 +04:00
*/
static enum xp_retval
2009-04-14 01:40:19 +04:00
xpc_setup_ch_structures_uv ( struct xpc_partition * part )
2008-07-30 09:34:06 +04:00
{
2008-07-30 09:34:19 +04:00
struct xpc_channel_uv * ch_uv ;
int ch_number ;
for ( ch_number = 0 ; ch_number < part - > nchannels ; ch_number + + ) {
ch_uv = & part - > channels [ ch_number ] . sn . uv ;
xpc_init_fifo_uv ( & ch_uv - > msg_slot_free_list ) ;
xpc_init_fifo_uv ( & ch_uv - > recv_msg_list ) ;
}
return xpSuccess ;
2008-07-30 09:34:06 +04:00
}
/*
2008-07-30 09:34:18 +04:00
* Teardown the channel structures that are uv specific .
2008-07-30 09:34:06 +04:00
*/
static void
2009-04-14 01:40:19 +04:00
xpc_teardown_ch_structures_uv ( struct xpc_partition * part )
2008-07-30 09:34:06 +04:00
{
2008-07-30 09:34:19 +04:00
/* nothing needs to be done */
2008-07-30 09:34:06 +04:00
return ;
}
static enum xp_retval
xpc_make_first_contact_uv ( struct xpc_partition * part )
{
2008-07-30 09:34:18 +04:00
struct xpc_activate_mq_msg_uv msg ;
/*
* We send a sync msg to get the remote partition ' s remote_act_state
* updated to our current act_state which at this point should
* be XPC_P_AS_ACTIVATING .
*/
xpc_send_activate_IRQ_part_uv ( part , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV ) ;
while ( part - > sn . uv . remote_act_state ! = XPC_P_AS_ACTIVATING ) {
dev_dbg ( xpc_part , " waiting to make first contact with "
" partition %d \n " , XPC_PARTID ( part ) ) ;
/* wait a 1/4 of a second or so */
( void ) msleep_interruptible ( 250 ) ;
if ( part - > act_state = = XPC_P_AS_DEACTIVATING )
return part - > reason ;
}
return xpSuccess ;
2008-07-30 09:34:06 +04:00
}
static u64
2008-07-30 09:34:10 +04:00
xpc_get_chctl_all_flags_uv ( struct xpc_partition * part )
2008-07-30 09:34:06 +04:00
{
2008-07-30 09:34:18 +04:00
unsigned long irq_flags ;
union xpc_channel_ctl_flags chctl ;
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
chctl = part - > chctl ;
if ( chctl . all_flags ! = 0 )
part - > chctl . all_flags = 0 ;
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
return chctl . all_flags ;
}
2008-07-30 09:34:19 +04:00
static enum xp_retval
xpc_allocate_send_msg_slot_uv ( struct xpc_channel * ch )
{
struct xpc_channel_uv * ch_uv = & ch - > sn . uv ;
struct xpc_send_msg_slot_uv * msg_slot ;
unsigned long irq_flags ;
int nentries ;
int entry ;
size_t nbytes ;
for ( nentries = ch - > local_nentries ; nentries > 0 ; nentries - - ) {
nbytes = nentries * sizeof ( struct xpc_send_msg_slot_uv ) ;
ch_uv - > send_msg_slots = kzalloc ( nbytes , GFP_KERNEL ) ;
if ( ch_uv - > send_msg_slots = = NULL )
continue ;
for ( entry = 0 ; entry < nentries ; entry + + ) {
msg_slot = & ch_uv - > send_msg_slots [ entry ] ;
msg_slot - > msg_slot_number = entry ;
xpc_put_fifo_entry_uv ( & ch_uv - > msg_slot_free_list ,
& msg_slot - > next ) ;
}
spin_lock_irqsave ( & ch - > lock , irq_flags ) ;
if ( nentries < ch - > local_nentries )
ch - > local_nentries = nentries ;
spin_unlock_irqrestore ( & ch - > lock , irq_flags ) ;
return xpSuccess ;
}
return xpNoMemory ;
}
static enum xp_retval
xpc_allocate_recv_msg_slot_uv ( struct xpc_channel * ch )
{
struct xpc_channel_uv * ch_uv = & ch - > sn . uv ;
struct xpc_notify_mq_msg_uv * msg_slot ;
unsigned long irq_flags ;
int nentries ;
int entry ;
size_t nbytes ;
for ( nentries = ch - > remote_nentries ; nentries > 0 ; nentries - - ) {
nbytes = nentries * ch - > entry_size ;
ch_uv - > recv_msg_slots = kzalloc ( nbytes , GFP_KERNEL ) ;
if ( ch_uv - > recv_msg_slots = = NULL )
continue ;
for ( entry = 0 ; entry < nentries ; entry + + ) {
2009-02-05 02:12:24 +03:00
msg_slot = ch_uv - > recv_msg_slots +
entry * ch - > entry_size ;
2008-07-30 09:34:19 +04:00
msg_slot - > hdr . msg_slot_number = entry ;
}
spin_lock_irqsave ( & ch - > lock , irq_flags ) ;
if ( nentries < ch - > remote_nentries )
ch - > remote_nentries = nentries ;
spin_unlock_irqrestore ( & ch - > lock , irq_flags ) ;
return xpSuccess ;
}
return xpNoMemory ;
}
/*
* Allocate msg_slots associated with the channel .
*/
2008-07-30 09:34:18 +04:00
static enum xp_retval
xpc_setup_msg_structures_uv ( struct xpc_channel * ch )
{
2008-07-30 09:34:19 +04:00
static enum xp_retval ret ;
struct xpc_channel_uv * ch_uv = & ch - > sn . uv ;
DBUG_ON ( ch - > flags & XPC_C_SETUP ) ;
2009-04-03 03:59:10 +04:00
ch_uv - > cached_notify_gru_mq_desc = kmalloc ( sizeof ( struct
gru_message_queue_desc ) ,
GFP_KERNEL ) ;
if ( ch_uv - > cached_notify_gru_mq_desc = = NULL )
return xpNoMemory ;
2008-07-30 09:34:19 +04:00
ret = xpc_allocate_send_msg_slot_uv ( ch ) ;
if ( ret = = xpSuccess ) {
ret = xpc_allocate_recv_msg_slot_uv ( ch ) ;
if ( ret ! = xpSuccess ) {
kfree ( ch_uv - > send_msg_slots ) ;
xpc_init_fifo_uv ( & ch_uv - > msg_slot_free_list ) ;
}
}
return ret ;
2008-07-30 09:34:18 +04:00
}
2008-07-30 09:34:19 +04:00
/*
* Free up msg_slots and clear other stuff that were setup for the specified
* channel .
*/
2008-07-30 09:34:18 +04:00
static void
xpc_teardown_msg_structures_uv ( struct xpc_channel * ch )
{
struct xpc_channel_uv * ch_uv = & ch - > sn . uv ;
2008-07-30 09:34:19 +04:00
DBUG_ON ( ! spin_is_locked ( & ch - > lock ) ) ;
2009-04-03 03:59:10 +04:00
kfree ( ch_uv - > cached_notify_gru_mq_desc ) ;
ch_uv - > cached_notify_gru_mq_desc = NULL ;
2008-07-30 09:34:18 +04:00
2008-07-30 09:34:19 +04:00
if ( ch - > flags & XPC_C_SETUP ) {
xpc_init_fifo_uv ( & ch_uv - > msg_slot_free_list ) ;
kfree ( ch_uv - > send_msg_slots ) ;
xpc_init_fifo_uv ( & ch_uv - > recv_msg_list ) ;
kfree ( ch_uv - > recv_msg_slots ) ;
}
2008-07-30 09:34:18 +04:00
}
static void
xpc_send_chctl_closerequest_uv ( struct xpc_channel * ch , unsigned long * irq_flags )
{
struct xpc_activate_mq_msg_chctl_closerequest_uv msg ;
msg . ch_number = ch - > number ;
msg . reason = ch - > reason ;
xpc_send_activate_IRQ_ch_uv ( ch , irq_flags , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV ) ;
}
static void
xpc_send_chctl_closereply_uv ( struct xpc_channel * ch , unsigned long * irq_flags )
{
struct xpc_activate_mq_msg_chctl_closereply_uv msg ;
msg . ch_number = ch - > number ;
xpc_send_activate_IRQ_ch_uv ( ch , irq_flags , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV ) ;
}
static void
xpc_send_chctl_openrequest_uv ( struct xpc_channel * ch , unsigned long * irq_flags )
{
struct xpc_activate_mq_msg_chctl_openrequest_uv msg ;
msg . ch_number = ch - > number ;
2008-07-30 09:34:19 +04:00
msg . entry_size = ch - > entry_size ;
2008-07-30 09:34:18 +04:00
msg . local_nentries = ch - > local_nentries ;
xpc_send_activate_IRQ_ch_uv ( ch , irq_flags , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV ) ;
}
static void
xpc_send_chctl_openreply_uv ( struct xpc_channel * ch , unsigned long * irq_flags )
{
struct xpc_activate_mq_msg_chctl_openreply_uv msg ;
msg . ch_number = ch - > number ;
msg . local_nentries = ch - > local_nentries ;
msg . remote_nentries = ch - > remote_nentries ;
2009-04-03 03:59:10 +04:00
msg . notify_gru_mq_desc_gpa = uv_gpa ( xpc_notify_mq_uv - > gru_mq_desc ) ;
2008-07-30 09:34:18 +04:00
xpc_send_activate_IRQ_ch_uv ( ch , irq_flags , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV ) ;
}
2009-04-14 01:40:19 +04:00
static void
xpc_send_chctl_opencomplete_uv ( struct xpc_channel * ch , unsigned long * irq_flags )
{
struct xpc_activate_mq_msg_chctl_opencomplete_uv msg ;
msg . ch_number = ch - > number ;
xpc_send_activate_IRQ_ch_uv ( ch , irq_flags , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV ) ;
}
2008-07-30 09:34:19 +04:00
static void
xpc_send_chctl_local_msgrequest_uv ( struct xpc_partition * part , int ch_number )
{
unsigned long irq_flags ;
spin_lock_irqsave ( & part - > chctl_lock , irq_flags ) ;
part - > chctl . flags [ ch_number ] | = XPC_CHCTL_MSGREQUEST ;
spin_unlock_irqrestore ( & part - > chctl_lock , irq_flags ) ;
xpc_wakeup_channel_mgr ( part ) ;
}
2009-04-03 03:59:10 +04:00
static enum xp_retval
2008-07-30 09:34:18 +04:00
xpc_save_remote_msgqueue_pa_uv ( struct xpc_channel * ch ,
2009-04-03 03:59:10 +04:00
unsigned long gru_mq_desc_gpa )
2008-07-30 09:34:18 +04:00
{
2009-04-03 03:59:10 +04:00
struct xpc_channel_uv * ch_uv = & ch - > sn . uv ;
DBUG_ON ( ch_uv - > cached_notify_gru_mq_desc = = NULL ) ;
return xpc_cache_remote_gru_mq_desc_uv ( ch_uv - > cached_notify_gru_mq_desc ,
gru_mq_desc_gpa ) ;
2008-07-30 09:34:18 +04:00
}
static void
xpc_indicate_partition_engaged_uv ( struct xpc_partition * part )
{
struct xpc_activate_mq_msg_uv msg ;
xpc_send_activate_IRQ_part_uv ( part , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV ) ;
}
static void
xpc_indicate_partition_disengaged_uv ( struct xpc_partition * part )
{
struct xpc_activate_mq_msg_uv msg ;
xpc_send_activate_IRQ_part_uv ( part , & msg , sizeof ( msg ) ,
XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV ) ;
}
static void
xpc_assume_partition_disengaged_uv ( short partid )
{
struct xpc_partition_uv * part_uv = & xpc_partitions [ partid ] . sn . uv ;
unsigned long irq_flags ;
spin_lock_irqsave ( & part_uv - > flags_lock , irq_flags ) ;
part_uv - > flags & = ~ XPC_P_ENGAGED_UV ;
spin_unlock_irqrestore ( & part_uv - > flags_lock , irq_flags ) ;
}
static int
xpc_partition_engaged_uv ( short partid )
{
return ( xpc_partitions [ partid ] . sn . uv . flags & XPC_P_ENGAGED_UV ) ! = 0 ;
}
static int
xpc_any_partition_engaged_uv ( void )
{
struct xpc_partition_uv * part_uv ;
short partid ;
for ( partid = 0 ; partid < XP_MAX_NPARTITIONS_UV ; partid + + ) {
part_uv = & xpc_partitions [ partid ] . sn . uv ;
if ( ( part_uv - > flags & XPC_P_ENGAGED_UV ) ! = 0 )
return 1 ;
}
return 0 ;
2008-07-30 09:34:06 +04:00
}
2008-07-30 09:34:19 +04:00
static enum xp_retval
xpc_allocate_msg_slot_uv ( struct xpc_channel * ch , u32 flags ,
struct xpc_send_msg_slot_uv * * address_of_msg_slot )
{
enum xp_retval ret ;
struct xpc_send_msg_slot_uv * msg_slot ;
struct xpc_fifo_entry_uv * entry ;
while ( 1 ) {
entry = xpc_get_fifo_entry_uv ( & ch - > sn . uv . msg_slot_free_list ) ;
if ( entry ! = NULL )
break ;
if ( flags & XPC_NOWAIT )
return xpNoWait ;
ret = xpc_allocate_msg_wait ( ch ) ;
if ( ret ! = xpInterrupted & & ret ! = xpTimeout )
return ret ;
}
msg_slot = container_of ( entry , struct xpc_send_msg_slot_uv , next ) ;
* address_of_msg_slot = msg_slot ;
return xpSuccess ;
}
static void
xpc_free_msg_slot_uv ( struct xpc_channel * ch ,
struct xpc_send_msg_slot_uv * msg_slot )
{
xpc_put_fifo_entry_uv ( & ch - > sn . uv . msg_slot_free_list , & msg_slot - > next ) ;
/* wakeup anyone waiting for a free msg slot */
if ( atomic_read ( & ch - > n_on_msg_allocate_wq ) > 0 )
wake_up ( & ch - > msg_allocate_wq ) ;
}
static void
xpc_notify_sender_uv ( struct xpc_channel * ch ,
struct xpc_send_msg_slot_uv * msg_slot ,
enum xp_retval reason )
{
xpc_notify_func func = msg_slot - > func ;
if ( func ! = NULL & & cmpxchg ( & msg_slot - > func , func , NULL ) = = func ) {
atomic_dec ( & ch - > n_to_notify ) ;
dev_dbg ( xpc_chan , " msg_slot->func() called, msg_slot=0x%p "
" msg_slot_number=%d partid=%d channel=%d \n " , msg_slot ,
msg_slot - > msg_slot_number , ch - > partid , ch - > number ) ;
func ( reason , ch - > partid , ch - > number , msg_slot - > key ) ;
dev_dbg ( xpc_chan , " msg_slot->func() returned, msg_slot=0x%p "
" msg_slot_number=%d partid=%d channel=%d \n " , msg_slot ,
msg_slot - > msg_slot_number , ch - > partid , ch - > number ) ;
}
}
static void
xpc_handle_notify_mq_ack_uv ( struct xpc_channel * ch ,
struct xpc_notify_mq_msg_uv * msg )
{
struct xpc_send_msg_slot_uv * msg_slot ;
int entry = msg - > hdr . msg_slot_number % ch - > local_nentries ;
msg_slot = & ch - > sn . uv . send_msg_slots [ entry ] ;
BUG_ON ( msg_slot - > msg_slot_number ! = msg - > hdr . msg_slot_number ) ;
msg_slot - > msg_slot_number + = ch - > local_nentries ;
if ( msg_slot - > func ! = NULL )
xpc_notify_sender_uv ( ch , msg_slot , xpMsgDelivered ) ;
xpc_free_msg_slot_uv ( ch , msg_slot ) ;
}
static void
xpc_handle_notify_mq_msg_uv ( struct xpc_partition * part ,
struct xpc_notify_mq_msg_uv * msg )
{
struct xpc_partition_uv * part_uv = & part - > sn . uv ;
struct xpc_channel * ch ;
struct xpc_channel_uv * ch_uv ;
struct xpc_notify_mq_msg_uv * msg_slot ;
unsigned long irq_flags ;
int ch_number = msg - > hdr . ch_number ;
if ( unlikely ( ch_number > = part - > nchannels ) ) {
dev_err ( xpc_part , " xpc_handle_notify_IRQ_uv() received invalid "
" channel number=0x%x in message from partid=%d \n " ,
ch_number , XPC_PARTID ( part ) ) ;
/* get hb checker to deactivate from the remote partition */
spin_lock_irqsave ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
if ( part_uv - > act_state_req = = 0 )
xpc_activate_IRQ_rcvd + + ;
part_uv - > act_state_req = XPC_P_ASR_DEACTIVATE_UV ;
part_uv - > reason = xpBadChannelNumber ;
spin_unlock_irqrestore ( & xpc_activate_IRQ_rcvd_lock , irq_flags ) ;
wake_up_interruptible ( & xpc_activate_IRQ_wq ) ;
return ;
}
ch = & part - > channels [ ch_number ] ;
xpc_msgqueue_ref ( ch ) ;
if ( ! ( ch - > flags & XPC_C_CONNECTED ) ) {
xpc_msgqueue_deref ( ch ) ;
return ;
}
/* see if we're really dealing with an ACK for a previously sent msg */
if ( msg - > hdr . size = = 0 ) {
xpc_handle_notify_mq_ack_uv ( ch , msg ) ;
xpc_msgqueue_deref ( ch ) ;
return ;
}
/* we're dealing with a normal message sent via the notify_mq */
ch_uv = & ch - > sn . uv ;
2009-02-05 02:12:24 +03:00
msg_slot = ch_uv - > recv_msg_slots +
( msg - > hdr . msg_slot_number % ch - > remote_nentries ) * ch - > entry_size ;
2008-07-30 09:34:19 +04:00
BUG_ON ( msg - > hdr . msg_slot_number ! = msg_slot - > hdr . msg_slot_number ) ;
BUG_ON ( msg_slot - > hdr . size ! = 0 ) ;
memcpy ( msg_slot , msg , msg - > hdr . size ) ;
xpc_put_fifo_entry_uv ( & ch_uv - > recv_msg_list , & msg_slot - > hdr . u . next ) ;
if ( ch - > flags & XPC_C_CONNECTEDCALLOUT_MADE ) {
/*
* If there is an existing idle kthread get it to deliver
* the payload , otherwise we ' ll have to get the channel mgr
* for this partition to create a kthread to do the delivery .
*/
if ( atomic_read ( & ch - > kthreads_idle ) > 0 )
wake_up_nr ( & ch - > idle_wq , 1 ) ;
else
xpc_send_chctl_local_msgrequest_uv ( part , ch - > number ) ;
}
xpc_msgqueue_deref ( ch ) ;
}
static irqreturn_t
xpc_handle_notify_IRQ_uv ( int irq , void * dev_id )
{
struct xpc_notify_mq_msg_uv * msg ;
short partid ;
struct xpc_partition * part ;
2009-04-03 03:59:10 +04:00
while ( ( msg = gru_get_next_message ( xpc_notify_mq_uv - > gru_mq_desc ) ) ! =
NULL ) {
2008-07-30 09:34:19 +04:00
partid = msg - > hdr . partid ;
if ( partid < 0 | | partid > = XP_MAX_NPARTITIONS_UV ) {
dev_err ( xpc_part , " xpc_handle_notify_IRQ_uv() received "
" invalid partid=0x%x in message \n " , partid ) ;
} else {
part = & xpc_partitions [ partid ] ;
if ( xpc_part_ref ( part ) ) {
xpc_handle_notify_mq_msg_uv ( part , msg ) ;
xpc_part_deref ( part ) ;
}
}
2009-04-03 03:59:10 +04:00
gru_free_message ( xpc_notify_mq_uv - > gru_mq_desc , msg ) ;
2008-07-30 09:34:19 +04:00
}
return IRQ_HANDLED ;
}
static int
xpc_n_of_deliverable_payloads_uv ( struct xpc_channel * ch )
{
return xpc_n_of_fifo_entries_uv ( & ch - > sn . uv . recv_msg_list ) ;
}
static void
xpc_process_msg_chctl_flags_uv ( struct xpc_partition * part , int ch_number )
{
struct xpc_channel * ch = & part - > channels [ ch_number ] ;
int ndeliverable_payloads ;
xpc_msgqueue_ref ( ch ) ;
ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv ( ch ) ;
if ( ndeliverable_payloads > 0 & &
( ch - > flags & XPC_C_CONNECTED ) & &
( ch - > flags & XPC_C_CONNECTEDCALLOUT_MADE ) ) {
xpc_activate_kthreads ( ch , ndeliverable_payloads ) ;
}
xpc_msgqueue_deref ( ch ) ;
}
static enum xp_retval
xpc_send_payload_uv ( struct xpc_channel * ch , u32 flags , void * payload ,
u16 payload_size , u8 notify_type , xpc_notify_func func ,
void * key )
{
enum xp_retval ret = xpSuccess ;
struct xpc_send_msg_slot_uv * msg_slot = NULL ;
struct xpc_notify_mq_msg_uv * msg ;
u8 msg_buffer [ XPC_NOTIFY_MSG_SIZE_UV ] ;
size_t msg_size ;
DBUG_ON ( notify_type ! = XPC_N_CALL ) ;
msg_size = sizeof ( struct xpc_notify_mq_msghdr_uv ) + payload_size ;
if ( msg_size > ch - > entry_size )
return xpPayloadTooBig ;
xpc_msgqueue_ref ( ch ) ;
if ( ch - > flags & XPC_C_DISCONNECTING ) {
ret = ch - > reason ;
goto out_1 ;
}
if ( ! ( ch - > flags & XPC_C_CONNECTED ) ) {
ret = xpNotConnected ;
goto out_1 ;
}
ret = xpc_allocate_msg_slot_uv ( ch , flags , & msg_slot ) ;
if ( ret ! = xpSuccess )
goto out_1 ;
if ( func ! = NULL ) {
atomic_inc ( & ch - > n_to_notify ) ;
msg_slot - > key = key ;
2009-01-30 01:25:06 +03:00
smp_wmb ( ) ; /* a non-NULL func must hit memory after the key */
2008-07-30 09:34:19 +04:00
msg_slot - > func = func ;
if ( ch - > flags & XPC_C_DISCONNECTING ) {
ret = ch - > reason ;
goto out_2 ;
}
}
msg = ( struct xpc_notify_mq_msg_uv * ) & msg_buffer ;
msg - > hdr . partid = xp_partition_id ;
msg - > hdr . ch_number = ch - > number ;
msg - > hdr . size = msg_size ;
msg - > hdr . msg_slot_number = msg_slot - > msg_slot_number ;
memcpy ( & msg - > payload , payload , payload_size ) ;
2009-04-03 03:59:10 +04:00
ret = xpc_send_gru_msg ( ch - > sn . uv . cached_notify_gru_mq_desc , msg ,
msg_size ) ;
2008-07-30 09:34:19 +04:00
if ( ret = = xpSuccess )
goto out_1 ;
XPC_DEACTIVATE_PARTITION ( & xpc_partitions [ ch - > partid ] , ret ) ;
out_2 :
if ( func ! = NULL ) {
/*
* Try to NULL the msg_slot ' s func field . If we fail , then
* xpc_notify_senders_of_disconnect_uv ( ) beat us to it , in which
* case we need to pretend we succeeded to send the message
* since the user will get a callout for the disconnect error
* by xpc_notify_senders_of_disconnect_uv ( ) , and to also get an
* error returned here will confuse them . Additionally , since
* in this case the channel is being disconnected we don ' t need
* to put the the msg_slot back on the free list .
*/
if ( cmpxchg ( & msg_slot - > func , func , NULL ) ! = func ) {
ret = xpSuccess ;
goto out_1 ;
}
msg_slot - > key = NULL ;
atomic_dec ( & ch - > n_to_notify ) ;
}
xpc_free_msg_slot_uv ( ch , msg_slot ) ;
out_1 :
xpc_msgqueue_deref ( ch ) ;
return ret ;
}
/*
* Tell the callers of xpc_send_notify ( ) that the status of their payloads
* is unknown because the channel is now disconnecting .
*
* We don ' t worry about putting these msg_slots on the free list since the
* msg_slots themselves are about to be kfree ' d .
*/
static void
xpc_notify_senders_of_disconnect_uv ( struct xpc_channel * ch )
{
struct xpc_send_msg_slot_uv * msg_slot ;
int entry ;
DBUG_ON ( ! ( ch - > flags & XPC_C_DISCONNECTING ) ) ;
for ( entry = 0 ; entry < ch - > local_nentries ; entry + + ) {
if ( atomic_read ( & ch - > n_to_notify ) = = 0 )
break ;
msg_slot = & ch - > sn . uv . send_msg_slots [ entry ] ;
if ( msg_slot - > func ! = NULL )
xpc_notify_sender_uv ( ch , msg_slot , ch - > reason ) ;
}
}
/*
* Get the next deliverable message ' s payload .
*/
static void *
xpc_get_deliverable_payload_uv ( struct xpc_channel * ch )
{
struct xpc_fifo_entry_uv * entry ;
struct xpc_notify_mq_msg_uv * msg ;
void * payload = NULL ;
if ( ! ( ch - > flags & XPC_C_DISCONNECTING ) ) {
entry = xpc_get_fifo_entry_uv ( & ch - > sn . uv . recv_msg_list ) ;
if ( entry ! = NULL ) {
msg = container_of ( entry , struct xpc_notify_mq_msg_uv ,
hdr . u . next ) ;
payload = & msg - > payload ;
}
}
return payload ;
}
static void
xpc_received_payload_uv ( struct xpc_channel * ch , void * payload )
2008-07-30 09:34:06 +04:00
{
2008-07-30 09:34:19 +04:00
struct xpc_notify_mq_msg_uv * msg ;
enum xp_retval ret ;
msg = container_of ( payload , struct xpc_notify_mq_msg_uv , payload ) ;
/* return an ACK to the sender of this message */
msg - > hdr . partid = xp_partition_id ;
msg - > hdr . size = 0 ; /* size of zero indicates this is an ACK */
2009-04-03 03:59:10 +04:00
ret = xpc_send_gru_msg ( ch - > sn . uv . cached_notify_gru_mq_desc , msg ,
2008-07-30 09:34:19 +04:00
sizeof ( struct xpc_notify_mq_msghdr_uv ) ) ;
if ( ret ! = xpSuccess )
XPC_DEACTIVATE_PARTITION ( & xpc_partitions [ ch - > partid ] , ret ) ;
msg - > hdr . msg_slot_number + = ch - > remote_nentries ;
2008-07-30 09:34:06 +04:00
}
2009-04-14 01:40:19 +04:00
static struct xpc_arch_operations xpc_arch_ops_uv = {
. setup_partitions = xpc_setup_partitions_uv ,
. teardown_partitions = xpc_teardown_partitions_uv ,
. process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv ,
. get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv ,
. setup_rsvd_page = xpc_setup_rsvd_page_uv ,
. allow_hb = xpc_allow_hb_uv ,
. disallow_hb = xpc_disallow_hb_uv ,
. disallow_all_hbs = xpc_disallow_all_hbs_uv ,
. increment_heartbeat = xpc_increment_heartbeat_uv ,
. offline_heartbeat = xpc_offline_heartbeat_uv ,
. online_heartbeat = xpc_online_heartbeat_uv ,
. heartbeat_init = xpc_heartbeat_init_uv ,
. heartbeat_exit = xpc_heartbeat_exit_uv ,
. get_remote_heartbeat = xpc_get_remote_heartbeat_uv ,
. request_partition_activation =
xpc_request_partition_activation_uv ,
. request_partition_reactivation =
xpc_request_partition_reactivation_uv ,
. request_partition_deactivation =
xpc_request_partition_deactivation_uv ,
. cancel_partition_deactivation_request =
xpc_cancel_partition_deactivation_request_uv ,
. setup_ch_structures = xpc_setup_ch_structures_uv ,
. teardown_ch_structures = xpc_teardown_ch_structures_uv ,
. make_first_contact = xpc_make_first_contact_uv ,
. get_chctl_all_flags = xpc_get_chctl_all_flags_uv ,
. send_chctl_closerequest = xpc_send_chctl_closerequest_uv ,
. send_chctl_closereply = xpc_send_chctl_closereply_uv ,
. send_chctl_openrequest = xpc_send_chctl_openrequest_uv ,
. send_chctl_openreply = xpc_send_chctl_openreply_uv ,
. send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv ,
. process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv ,
. save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv ,
. setup_msg_structures = xpc_setup_msg_structures_uv ,
. teardown_msg_structures = xpc_teardown_msg_structures_uv ,
. indicate_partition_engaged = xpc_indicate_partition_engaged_uv ,
. indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv ,
. assume_partition_disengaged = xpc_assume_partition_disengaged_uv ,
. partition_engaged = xpc_partition_engaged_uv ,
. any_partition_engaged = xpc_any_partition_engaged_uv ,
. n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv ,
. send_payload = xpc_send_payload_uv ,
. get_deliverable_payload = xpc_get_deliverable_payload_uv ,
. received_payload = xpc_received_payload_uv ,
. notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv ,
} ;
2008-07-30 09:34:18 +04:00
int
2008-07-30 09:34:05 +04:00
xpc_init_uv ( void )
{
2009-04-14 01:40:19 +04:00
xpc_arch_ops = xpc_arch_ops_uv ;
2008-07-30 09:34:19 +04:00
if ( sizeof ( struct xpc_notify_mq_msghdr_uv ) > XPC_MSG_HDR_MAX_SIZE ) {
dev_err ( xpc_part , " xpc_notify_mq_msghdr_uv is larger than %d \n " ,
XPC_MSG_HDR_MAX_SIZE ) ;
return - E2BIG ;
}
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
xpc_activate_mq_uv = xpc_create_gru_mq_uv ( XPC_ACTIVATE_MQ_SIZE_UV , 0 ,
XPC_ACTIVATE_IRQ_NAME ,
2008-07-30 09:34:18 +04:00
xpc_handle_activate_IRQ_uv ) ;
2008-11-06 02:28:00 +03:00
if ( IS_ERR ( xpc_activate_mq_uv ) )
return PTR_ERR ( xpc_activate_mq_uv ) ;
2008-07-30 09:34:18 +04:00
2008-11-06 02:28:00 +03:00
xpc_notify_mq_uv = xpc_create_gru_mq_uv ( XPC_NOTIFY_MQ_SIZE_UV , 0 ,
XPC_NOTIFY_IRQ_NAME ,
2008-07-30 09:34:19 +04:00
xpc_handle_notify_IRQ_uv ) ;
2008-11-06 02:28:00 +03:00
if ( IS_ERR ( xpc_notify_mq_uv ) ) {
xpc_destroy_gru_mq_uv ( xpc_activate_mq_uv ) ;
return PTR_ERR ( xpc_notify_mq_uv ) ;
2008-07-30 09:34:19 +04:00
}
2008-07-30 09:34:18 +04:00
return 0 ;
2008-07-30 09:34:05 +04:00
}
void
xpc_exit_uv ( void )
{
2008-11-06 02:28:00 +03:00
xpc_destroy_gru_mq_uv ( xpc_notify_mq_uv ) ;
xpc_destroy_gru_mq_uv ( xpc_activate_mq_uv ) ;
2008-07-30 09:34:05 +04:00
}