2008-06-02 08:56:14 -05:00
/*
* SGI UltraViolet TLB flush routines .
*
* ( c ) 2008 Cliff Wickman < cpw @ sgi . com > , SGI .
*
* This code is released under the GNU General Public License version 2 or
* later .
*/
2008-10-14 21:43:43 -07:00
# include <linux/seq_file.h>
2008-06-02 08:56:14 -05:00
# include <linux/proc_fs.h>
# include <linux/kernel.h>
# include <asm/mmu_context.h>
2009-01-21 17:26:06 +09:00
# include <asm/uv/uv.h>
2008-06-02 08:56:14 -05:00
# include <asm/uv/uv_mmrs.h>
2008-06-18 14:28:19 +02:00
# include <asm/uv/uv_hub.h>
2008-06-02 08:56:14 -05:00
# include <asm/uv/uv_bau.h>
2009-02-17 13:58:15 +01:00
# include <asm/apic.h>
2008-06-18 14:28:19 +02:00
# include <asm/idle.h>
2008-06-12 08:23:48 -05:00
# include <asm/tsc.h>
2008-08-19 12:51:59 -05:00
# include <asm/irq_vectors.h>
2008-06-02 08:56:14 -05:00
2008-06-18 14:28:19 +02:00
static struct bau_control * * uv_bau_table_bases __read_mostly ;
static int uv_bau_retry_limit __read_mostly ;
/* position of pnode (which is nasid>>1): */
static int uv_nshift __read_mostly ;
2009-04-14 10:56:48 -05:00
/* base pnode in this partition */
static int uv_partition_base_pnode __read_mostly ;
2008-06-18 14:28:19 +02:00
static unsigned long uv_mmask __read_mostly ;
2008-06-02 08:56:14 -05:00
2008-06-18 14:15:43 +02:00
static DEFINE_PER_CPU ( struct ptc_stats , ptcstats ) ;
static DEFINE_PER_CPU ( struct bau_control , bau_control ) ;
2008-06-02 08:56:14 -05:00
2009-04-03 08:34:05 -05:00
/*
* Determine the first node on a blade .
*/
static int __init blade_to_first_node ( int blade )
{
int node , b ;
for_each_online_node ( node ) {
b = uv_node_to_blade_id ( node ) ;
if ( blade = = b )
return node ;
}
2009-04-14 10:56:48 -05:00
return - 1 ; /* shouldn't happen */
2009-04-03 08:34:05 -05:00
}
/*
* Determine the apicid of the first cpu on a blade .
*/
static int __init blade_to_first_apicid ( int blade )
{
int cpu ;
for_each_present_cpu ( cpu )
if ( blade = = uv_cpu_to_blade_id ( cpu ) )
return per_cpu ( x86_cpu_to_apicid , cpu ) ;
return - 1 ;
}
2008-06-02 08:56:14 -05:00
/*
* Free a software acknowledge hardware resource by clearing its Pending
* bit . This will return a reply to the sender .
* If the message has timed out , a reply has already been sent by the
* hardware but the resource has not been released . In that case our
* clear of the Timeout bit ( as well ) will free the resource . No reply will
* be sent ( the hardware will only do one reply per message ) .
*/
2008-06-12 08:23:48 -05:00
static void uv_reply_to_message ( int resource ,
2008-06-18 14:28:19 +02:00
struct bau_payload_queue_entry * msg ,
struct bau_msg_status * msp )
2008-06-02 08:56:14 -05:00
{
2008-06-12 08:23:48 -05:00
unsigned long dw ;
2008-06-02 08:56:14 -05:00
2008-06-12 08:23:48 -05:00
dw = ( 1 < < ( resource + UV_SW_ACK_NPENDING ) ) | ( 1 < < resource ) ;
2008-06-02 08:56:14 -05:00
msg - > replied_to = 1 ;
msg - > sw_ack_vector = 0 ;
if ( msp )
msp - > seen_by . bits = 0 ;
2008-06-12 08:23:48 -05:00
uv_write_local_mmr ( UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS , dw ) ;
2008-06-02 08:56:14 -05:00
}
/*
* Do all the things a cpu should do for a TLB shootdown message .
* Other cpu ' s may come here at the same time for this message .
*/
2008-06-12 08:23:48 -05:00
static void uv_bau_process_message ( struct bau_payload_queue_entry * msg ,
2008-06-18 14:28:19 +02:00
int msg_slot , int sw_ack_slot )
2008-06-02 08:56:14 -05:00
{
unsigned long this_cpu_mask ;
struct bau_msg_status * msp ;
2008-06-18 14:28:19 +02:00
int cpu ;
2008-06-02 08:56:14 -05:00
msp = __get_cpu_var ( bau_control ) . msg_statuses + msg_slot ;
cpu = uv_blade_processor_id ( ) ;
msg - > number_of_cpus =
2009-04-03 08:34:05 -05:00
uv_blade_nr_online_cpus ( uv_node_to_blade_id ( numa_node_id ( ) ) ) ;
2008-06-18 14:15:43 +02:00
this_cpu_mask = 1UL < < cpu ;
2008-06-02 08:56:14 -05:00
if ( msp - > seen_by . bits & this_cpu_mask )
return ;
atomic_or_long ( & msp - > seen_by . bits , this_cpu_mask ) ;
if ( msg - > replied_to = = 1 )
return ;
if ( msg - > address = = TLB_FLUSH_ALL ) {
local_flush_tlb ( ) ;
__get_cpu_var ( ptcstats ) . alltlb + + ;
} else {
__flush_tlb_one ( msg - > address ) ;
__get_cpu_var ( ptcstats ) . onetlb + + ;
}
__get_cpu_var ( ptcstats ) . requestee + + ;
atomic_inc_short ( & msg - > acknowledge_count ) ;
if ( msg - > number_of_cpus = = msg - > acknowledge_count )
uv_reply_to_message ( sw_ack_slot , msg , msp ) ;
}
/*
2008-06-18 14:15:43 +02:00
* Examine the payload queue on one distribution node to see
2008-06-02 08:56:14 -05:00
* which messages have not been seen , and which cpu ( s ) have not seen them .
*
* Returns the number of cpu ' s that have not responded .
*/
2008-06-18 14:15:43 +02:00
static int uv_examine_destination ( struct bau_control * bau_tablesp , int sender )
2008-06-02 08:56:14 -05:00
{
struct bau_payload_queue_entry * msg ;
struct bau_msg_status * msp ;
2008-06-18 14:28:19 +02:00
int count = 0 ;
int i ;
int j ;
2008-06-02 08:56:14 -05:00
2008-06-18 14:15:43 +02:00
for ( msg = bau_tablesp - > va_queue_first , i = 0 ; i < DEST_Q_SIZE ;
msg + + , i + + ) {
if ( ( msg - > sending_cpu = = sender ) & & ( ! msg - > replied_to ) ) {
msp = bau_tablesp - > msg_statuses + i ;
printk ( KERN_DEBUG
" blade %d: address:%#lx %d of %d, not cpu(s): " ,
i , msg - > address , msg - > acknowledge_count ,
msg - > number_of_cpus ) ;
for ( j = 0 ; j < msg - > number_of_cpus ; j + + ) {
2008-06-18 14:28:19 +02:00
if ( ! ( ( 1L < < j ) & msp - > seen_by . bits ) ) {
2008-06-18 14:15:43 +02:00
count + + ;
printk ( " %d " , j ) ;
}
}
printk ( " \n " ) ;
}
}
return count ;
}
/*
* Examine the payload queue on all the distribution nodes to see
* which messages have not been seen , and which cpu ( s ) have not seen them .
*
* Returns the number of cpu ' s that have not responded .
*/
static int uv_examine_destinations ( struct bau_target_nodemask * distribution )
{
int sender ;
int i ;
int count = 0 ;
2008-06-02 08:56:14 -05:00
sender = smp_processor_id ( ) ;
2008-06-18 14:28:19 +02:00
for ( i = 0 ; i < sizeof ( struct bau_target_nodemask ) * BITSPERBYTE ; i + + ) {
2008-06-12 08:23:48 -05:00
if ( ! bau_node_isset ( i , distribution ) )
continue ;
2008-06-18 14:15:43 +02:00
count + = uv_examine_destination ( uv_bau_table_bases [ i ] , sender ) ;
2008-06-02 08:56:14 -05:00
}
return count ;
}
2008-06-12 08:23:48 -05:00
/*
* wait for completion of a broadcast message
*
* return COMPLETE , RETRY or GIVEUP
*/
2008-06-18 14:15:43 +02:00
static int uv_wait_completion ( struct bau_desc * bau_desc ,
2008-06-12 08:23:48 -05:00
unsigned long mmr_offset , int right_shift )
{
int exams = 0 ;
long destination_timeouts = 0 ;
long source_timeouts = 0 ;
unsigned long descriptor_status ;
while ( ( descriptor_status = ( ( ( unsigned long )
uv_read_local_mmr ( mmr_offset ) > >
right_shift ) & UV_ACT_STATUS_MASK ) ) ! =
DESC_STATUS_IDLE ) {
if ( descriptor_status = = DESC_STATUS_SOURCE_TIMEOUT ) {
source_timeouts + + ;
if ( source_timeouts > SOURCE_TIMEOUT_LIMIT )
source_timeouts = 0 ;
__get_cpu_var ( ptcstats ) . s_retry + + ;
return FLUSH_RETRY ;
}
/*
* spin here looking for progress at the destinations
*/
if ( descriptor_status = = DESC_STATUS_DESTINATION_TIMEOUT ) {
destination_timeouts + + ;
if ( destination_timeouts > DESTINATION_TIMEOUT_LIMIT ) {
/*
* returns number of cpus not responding
*/
if ( uv_examine_destinations
( & bau_desc - > distribution ) = = 0 ) {
__get_cpu_var ( ptcstats ) . d_retry + + ;
return FLUSH_RETRY ;
}
exams + + ;
if ( exams > = uv_bau_retry_limit ) {
printk ( KERN_DEBUG
" uv_flush_tlb_others " ) ;
printk ( " giving up on cpu %d \n " ,
smp_processor_id ( ) ) ;
return FLUSH_GIVEUP ;
}
/*
* delays can hang the simulator
udelay ( 1000 ) ;
*/
destination_timeouts = 0 ;
}
}
2009-01-15 09:51:20 -06:00
cpu_relax ( ) ;
2008-06-12 08:23:48 -05:00
}
return FLUSH_COMPLETE ;
}
/**
* uv_flush_send_and_wait
*
* Send a broadcast and wait for a broadcast message to complete .
*
2009-01-21 17:26:06 +09:00
* The flush_mask contains the cpus the broadcast was sent to .
2008-06-12 08:23:48 -05:00
*
2009-01-21 17:26:06 +09:00
* Returns NULL if all remote flushing was done . The mask is zeroed .
* Returns @ flush_mask if some remote flushing remains to be done . The
* mask will have some bits still set .
2008-06-12 08:23:48 -05:00
*/
2009-04-03 08:34:05 -05:00
const struct cpumask * uv_flush_send_and_wait ( int cpu , int this_pnode ,
2009-01-21 17:26:06 +09:00
struct bau_desc * bau_desc ,
struct cpumask * flush_mask )
2008-06-12 08:23:48 -05:00
{
int completion_status = 0 ;
int right_shift ;
int tries = 0 ;
2009-04-03 08:34:05 -05:00
int pnode ;
2008-06-18 14:28:19 +02:00
int bit ;
2008-06-12 08:23:48 -05:00
unsigned long mmr_offset ;
2008-06-18 14:28:19 +02:00
unsigned long index ;
2008-06-12 08:23:48 -05:00
cycles_t time1 ;
cycles_t time2 ;
if ( cpu < UV_CPUS_PER_ACT_STATUS ) {
mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0 ;
right_shift = cpu * UV_ACT_STATUS_SIZE ;
} else {
mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1 ;
right_shift =
( ( cpu - UV_CPUS_PER_ACT_STATUS ) * UV_ACT_STATUS_SIZE ) ;
}
time1 = get_cycles ( ) ;
do {
tries + + ;
2008-06-18 14:15:43 +02:00
index = ( 1UL < < UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT ) |
cpu ;
2008-06-12 08:23:48 -05:00
uv_write_local_mmr ( UVH_LB_BAU_SB_ACTIVATION_CONTROL , index ) ;
completion_status = uv_wait_completion ( bau_desc , mmr_offset ,
right_shift ) ;
} while ( completion_status = = FLUSH_RETRY ) ;
time2 = get_cycles ( ) ;
__get_cpu_var ( ptcstats ) . sflush + = ( time2 - time1 ) ;
if ( tries > 1 )
__get_cpu_var ( ptcstats ) . retriesok + + ;
if ( completion_status = = FLUSH_GIVEUP ) {
/*
* Cause the caller to do an IPI - style TLB shootdown on
* the cpu ' s , all of which are still in the mask .
*/
__get_cpu_var ( ptcstats ) . ptc_i + + ;
2009-01-29 15:35:26 -06:00
return flush_mask ;
2008-06-12 08:23:48 -05:00
}
/*
* Success , so clear the remote cpu ' s from the mask so we don ' t
* use the IPI method of shootdown on them .
*/
2009-01-21 17:26:06 +09:00
for_each_cpu ( bit , flush_mask ) {
2009-04-03 08:34:05 -05:00
pnode = uv_cpu_to_pnode ( bit ) ;
if ( pnode = = this_pnode )
2008-06-12 08:23:48 -05:00
continue ;
2009-01-21 17:26:06 +09:00
cpumask_clear_cpu ( bit , flush_mask ) ;
2008-06-12 08:23:48 -05:00
}
2009-01-21 17:26:06 +09:00
if ( ! cpumask_empty ( flush_mask ) )
return flush_mask ;
return NULL ;
2008-06-12 08:23:48 -05:00
}
2009-03-13 14:49:57 +10:30
static DEFINE_PER_CPU ( cpumask_var_t , uv_flush_tlb_mask ) ;
2008-06-02 08:56:14 -05:00
/**
* uv_flush_tlb_others - globally purge translation cache of a virtual
* address or all TLB ' s
2009-01-21 17:26:06 +09:00
* @ cpumask : mask of all cpu ' s in which the address is to be removed
2008-06-02 08:56:14 -05:00
* @ mm : mm_struct containing virtual address range
* @ va : virtual address to be removed ( or TLB_FLUSH_ALL for all TLB ' s on cpu )
2009-01-21 17:26:06 +09:00
* @ cpu : the current cpu
2008-06-02 08:56:14 -05:00
*
* This is the entry point for initiating any UV global TLB shootdown .
*
* Purges the translation caches of all specified processors of the given
* virtual address , or purges all TLB ' s on specified processors .
*
2009-01-21 17:26:06 +09:00
* The caller has derived the cpumask from the mm_struct . This function
* is called only if there are bits set in the mask . ( e . g . flush_tlb_page ( ) )
2008-06-02 08:56:14 -05:00
*
2009-01-21 17:26:06 +09:00
* The cpumask is converted into a nodemask of the nodes containing
2008-06-02 08:56:14 -05:00
* the cpus .
2008-06-12 08:23:48 -05:00
*
2009-01-21 17:26:06 +09:00
* Note that this function should be called with preemption disabled .
*
* Returns NULL if all remote flushing was done .
* Returns pointer to cpumask if some remote flushing remains to be
* done . The returned pointer is valid till preemption is re - enabled .
2008-06-02 08:56:14 -05:00
*/
2009-01-21 17:26:06 +09:00
const struct cpumask * uv_flush_tlb_others ( const struct cpumask * cpumask ,
struct mm_struct * mm ,
unsigned long va , unsigned int cpu )
2008-06-02 08:56:14 -05:00
{
2009-03-13 14:49:57 +10:30
struct cpumask * flush_mask = __get_cpu_var ( uv_flush_tlb_mask ) ;
2008-06-02 08:56:14 -05:00
int i ;
2008-06-12 08:23:48 -05:00
int bit ;
2009-04-03 08:34:05 -05:00
int pnode ;
2009-01-21 17:26:06 +09:00
int uv_cpu ;
2009-04-03 08:34:05 -05:00
int this_pnode ;
2008-06-12 08:23:48 -05:00
int locals = 0 ;
2008-06-18 14:15:43 +02:00
struct bau_desc * bau_desc ;
2009-01-21 17:26:06 +09:00
cpumask_andnot ( flush_mask , cpumask , cpumask_of ( cpu ) ) ;
uv_cpu = uv_blade_processor_id ( ) ;
2009-04-03 08:34:05 -05:00
this_pnode = uv_hub_info - > pnode ;
2008-06-02 08:56:14 -05:00
bau_desc = __get_cpu_var ( bau_control ) . descriptor_base ;
2009-01-21 17:26:06 +09:00
bau_desc + = UV_ITEMS_PER_DESCRIPTOR * uv_cpu ;
2008-06-02 08:56:14 -05:00
bau_nodes_clear ( & bau_desc - > distribution , UV_DISTRIBUTION_SIZE ) ;
i = 0 ;
2009-01-21 17:26:06 +09:00
for_each_cpu ( bit , flush_mask ) {
2009-04-03 08:34:05 -05:00
pnode = uv_cpu_to_pnode ( bit ) ;
BUG_ON ( pnode > ( UV_DISTRIBUTION_SIZE - 1 ) ) ;
if ( pnode = = this_pnode ) {
2008-06-12 08:23:48 -05:00
locals + + ;
2008-06-02 08:56:14 -05:00
continue ;
2008-06-12 08:23:48 -05:00
}
2009-04-14 10:56:48 -05:00
bau_node_set ( pnode - uv_partition_base_pnode ,
& bau_desc - > distribution ) ;
2008-06-02 08:56:14 -05:00
i + + ;
}
2008-06-12 08:23:48 -05:00
if ( i = = 0 ) {
/*
* no off_node flushing ; return status for local node
*/
if ( locals )
2009-01-21 17:26:06 +09:00
return flush_mask ;
2008-06-12 08:23:48 -05:00
else
2009-01-21 17:26:06 +09:00
return NULL ;
2008-06-12 08:23:48 -05:00
}
2008-06-02 08:56:14 -05:00
__get_cpu_var ( ptcstats ) . requestor + + ;
__get_cpu_var ( ptcstats ) . ntargeted + = i ;
bau_desc - > payload . address = va ;
2009-01-21 17:26:06 +09:00
bau_desc - > payload . sending_cpu = cpu ;
2008-06-02 08:56:14 -05:00
2009-04-03 08:34:05 -05:00
return uv_flush_send_and_wait ( uv_cpu , this_pnode , bau_desc , flush_mask ) ;
2008-06-02 08:56:14 -05:00
}
/*
* The BAU message interrupt comes here . ( registered by set_intr_gate )
* See entry_64 . S
*
* We received a broadcast assist message .
*
* Interrupts may have been disabled ; this interrupt could represent
* the receipt of several messages .
*
* All cores / threads on this node get this interrupt .
* The last one to see it does the s / w ack .
* ( the resource will not be freed until noninterruptable cpus see this
* interrupt ; hardware will timeout the s / w ack and reply ERROR )
*/
2008-06-12 08:23:48 -05:00
void uv_bau_message_interrupt ( struct pt_regs * regs )
2008-06-02 08:56:14 -05:00
{
2008-06-18 14:15:43 +02:00
struct bau_payload_queue_entry * va_queue_first ;
struct bau_payload_queue_entry * va_queue_last ;
2008-06-18 14:28:19 +02:00
struct bau_payload_queue_entry * msg ;
2008-06-02 08:56:14 -05:00
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2008-06-18 14:28:19 +02:00
cycles_t time1 ;
cycles_t time2 ;
2008-06-02 08:56:14 -05:00
int msg_slot ;
int sw_ack_slot ;
int fw ;
int count = 0 ;
unsigned long local_pnode ;
ack_APIC_irq ( ) ;
exit_idle ( ) ;
irq_enter ( ) ;
2008-06-12 08:23:48 -05:00
time1 = get_cycles ( ) ;
2008-06-02 08:56:14 -05:00
local_pnode = uv_blade_to_pnode ( uv_numa_blade_id ( ) ) ;
2008-06-18 14:28:19 +02:00
va_queue_first = __get_cpu_var ( bau_control ) . va_queue_first ;
2008-06-18 14:15:43 +02:00
va_queue_last = __get_cpu_var ( bau_control ) . va_queue_last ;
2008-06-18 14:28:19 +02:00
2008-06-02 08:56:14 -05:00
msg = __get_cpu_var ( bau_control ) . bau_msg_head ;
while ( msg - > sw_ack_vector ) {
count + + ;
fw = msg - > sw_ack_vector ;
2008-06-18 14:28:19 +02:00
msg_slot = msg - va_queue_first ;
2008-06-02 08:56:14 -05:00
sw_ack_slot = ffs ( fw ) - 1 ;
uv_bau_process_message ( msg , msg_slot , sw_ack_slot ) ;
msg + + ;
2008-06-18 14:15:43 +02:00
if ( msg > va_queue_last )
msg = va_queue_first ;
2008-06-02 08:56:14 -05:00
__get_cpu_var ( bau_control ) . bau_msg_head = msg ;
}
if ( ! count )
__get_cpu_var ( ptcstats ) . nomsg + + ;
else if ( count > 1 )
__get_cpu_var ( ptcstats ) . multmsg + + ;
2008-06-12 08:23:48 -05:00
time2 = get_cycles ( ) ;
__get_cpu_var ( ptcstats ) . dflush + = ( time2 - time1 ) ;
2008-06-02 08:56:14 -05:00
irq_exit ( ) ;
set_irq_regs ( old_regs ) ;
}
2009-04-03 08:34:32 -05:00
/*
* uv_enable_timeouts
*
* Each target blade ( i . e . blades that have cpu ' s ) needs to have
* shootdown message timeouts enabled . The timeout does not cause
* an interrupt , but causes an error message to be returned to
* the sender .
*/
2008-06-12 08:23:48 -05:00
static void uv_enable_timeouts ( void )
2008-06-02 08:56:14 -05:00
{
int blade ;
2009-04-03 08:34:32 -05:00
int nblades ;
2008-06-02 08:56:14 -05:00
int pnode ;
2009-04-03 08:34:32 -05:00
unsigned long mmr_image ;
2008-06-02 08:56:14 -05:00
2009-04-03 08:34:32 -05:00
nblades = uv_num_possible_blades ( ) ;
2008-06-02 08:56:14 -05:00
2009-04-03 08:34:32 -05:00
for ( blade = 0 ; blade < nblades ; blade + + ) {
if ( ! uv_blade_nr_possible_cpus ( blade ) )
2008-06-02 08:56:14 -05:00
continue ;
2009-04-03 08:34:32 -05:00
2008-06-02 08:56:14 -05:00
pnode = uv_blade_to_pnode ( blade ) ;
2009-04-03 08:34:32 -05:00
mmr_image =
uv_read_global_mmr64 ( pnode , UVH_LB_BAU_MISC_CONTROL ) ;
/*
* Set the timeout period and then lock it in , in three
* steps ; captures and locks in the period .
*
* To program the period , the SOFT_ACK_MODE must be off .
*/
mmr_image & = ~ ( ( unsigned long ) 1 < <
UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT ) ;
uv_write_global_mmr64
( pnode , UVH_LB_BAU_MISC_CONTROL , mmr_image ) ;
/*
* Set the 4 - bit period .
*/
mmr_image & = ~ ( ( unsigned long ) 0xf < <
UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT ) ;
mmr_image | = ( UV_INTD_SOFT_ACK_TIMEOUT_PERIOD < <
UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT ) ;
uv_write_global_mmr64
( pnode , UVH_LB_BAU_MISC_CONTROL , mmr_image ) ;
/*
* Subsequent reversals of the timebase bit ( 3 ) cause an
* immediate timeout of one or all INTD resources as
* indicated in bits 2 : 0 ( 7 causes all of them to timeout ) .
*/
mmr_image | = ( ( unsigned long ) 1 < <
UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT ) ;
uv_write_global_mmr64
( pnode , UVH_LB_BAU_MISC_CONTROL , mmr_image ) ;
2008-06-02 08:56:14 -05:00
}
}
2008-06-12 08:23:48 -05:00
static void * uv_ptc_seq_start ( struct seq_file * file , loff_t * offset )
2008-06-02 08:56:14 -05:00
{
if ( * offset < num_possible_cpus ( ) )
return offset ;
return NULL ;
}
2008-06-12 08:23:48 -05:00
static void * uv_ptc_seq_next ( struct seq_file * file , void * data , loff_t * offset )
2008-06-02 08:56:14 -05:00
{
( * offset ) + + ;
if ( * offset < num_possible_cpus ( ) )
return offset ;
return NULL ;
}
2008-06-12 08:23:48 -05:00
static void uv_ptc_seq_stop ( struct seq_file * file , void * data )
2008-06-02 08:56:14 -05:00
{
}
/*
* Display the statistics thru / proc
* data points to the cpu number
*/
2008-06-12 08:23:48 -05:00
static int uv_ptc_seq_show ( struct seq_file * file , void * data )
2008-06-02 08:56:14 -05:00
{
struct ptc_stats * stat ;
int cpu ;
cpu = * ( loff_t * ) data ;
if ( ! cpu ) {
seq_printf ( file ,
" # cpu requestor requestee one all sretry dretry ptc_i " ) ;
seq_printf ( file ,
2008-06-12 08:23:48 -05:00
" sw_ack sflush dflush sok dnomsg dmult starget \n " ) ;
2008-06-02 08:56:14 -05:00
}
if ( cpu < num_possible_cpus ( ) & & cpu_online ( cpu ) ) {
stat = & per_cpu ( ptcstats , cpu ) ;
seq_printf ( file , " cpu %d %ld %ld %ld %ld %ld %ld %ld " ,
cpu , stat - > requestor ,
stat - > requestee , stat - > onetlb , stat - > alltlb ,
stat - > s_retry , stat - > d_retry , stat - > ptc_i ) ;
seq_printf ( file , " %lx %ld %ld %ld %ld %ld %ld \n " ,
2009-04-03 08:34:05 -05:00
uv_read_global_mmr64 ( uv_cpu_to_pnode ( cpu ) ,
2008-06-02 08:56:14 -05:00
UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE ) ,
2008-06-12 08:23:48 -05:00
stat - > sflush , stat - > dflush ,
2008-06-02 08:56:14 -05:00
stat - > retriesok , stat - > nomsg ,
stat - > multmsg , stat - > ntargeted ) ;
}
return 0 ;
}
/*
* 0 : display meaning of the statistics
* > 0 : retry limit
*/
2008-06-12 08:23:48 -05:00
static ssize_t uv_ptc_proc_write ( struct file * file , const char __user * user ,
2008-06-18 14:28:19 +02:00
size_t count , loff_t * data )
2008-06-02 08:56:14 -05:00
{
long newmode ;
char optstr [ 64 ] ;
2008-06-23 08:32:25 -05:00
if ( count = = 0 | | count > sizeof ( optstr ) )
2008-06-19 11:16:24 -05:00
return - EINVAL ;
2008-06-02 08:56:14 -05:00
if ( copy_from_user ( optstr , user , count ) )
return - EFAULT ;
optstr [ count - 1 ] = ' \0 ' ;
if ( strict_strtoul ( optstr , 10 , & newmode ) < 0 ) {
printk ( KERN_DEBUG " %s is invalid \n " , optstr ) ;
return - EINVAL ;
}
if ( newmode = = 0 ) {
printk ( KERN_DEBUG " # cpu: cpu number \n " ) ;
printk ( KERN_DEBUG
" requestor: times this cpu was the flush requestor \n " ) ;
printk ( KERN_DEBUG
" requestee: times this cpu was requested to flush its TLBs \n " ) ;
printk ( KERN_DEBUG
" one: times requested to flush a single address \n " ) ;
printk ( KERN_DEBUG
" all: times requested to flush all TLB's \n " ) ;
printk ( KERN_DEBUG
" sretry: number of retries of source-side timeouts \n " ) ;
printk ( KERN_DEBUG
" dretry: number of retries of destination-side timeouts \n " ) ;
printk ( KERN_DEBUG
" ptc_i: times UV fell through to IPI-style flushes \n " ) ;
printk ( KERN_DEBUG
" sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE \n " ) ;
printk ( KERN_DEBUG
2008-06-12 08:23:48 -05:00
" sflush_us: cycles spent in uv_flush_tlb_others() \n " ) ;
2008-06-02 08:56:14 -05:00
printk ( KERN_DEBUG
2008-06-12 08:23:48 -05:00
" dflush_us: cycles spent in handling flush requests \n " ) ;
2008-06-02 08:56:14 -05:00
printk ( KERN_DEBUG " sok: successes on retry \n " ) ;
printk ( KERN_DEBUG " dnomsg: interrupts with no message \n " ) ;
printk ( KERN_DEBUG
" dmult: interrupts with multiple messages \n " ) ;
printk ( KERN_DEBUG " starget: nodes targeted \n " ) ;
} else {
uv_bau_retry_limit = newmode ;
printk ( KERN_DEBUG " timeout retry limit:%d \n " ,
uv_bau_retry_limit ) ;
}
return count ;
}
static const struct seq_operations uv_ptc_seq_ops = {
2008-06-18 14:15:43 +02:00
. start = uv_ptc_seq_start ,
. next = uv_ptc_seq_next ,
. stop = uv_ptc_seq_stop ,
. show = uv_ptc_seq_show
2008-06-02 08:56:14 -05:00
} ;
2008-06-12 08:23:48 -05:00
static int uv_ptc_proc_open ( struct inode * inode , struct file * file )
2008-06-02 08:56:14 -05:00
{
return seq_open ( file , & uv_ptc_seq_ops ) ;
}
static const struct file_operations proc_uv_ptc_operations = {
2008-06-12 08:23:48 -05:00
. open = uv_ptc_proc_open ,
. read = seq_read ,
. write = uv_ptc_proc_write ,
. llseek = seq_lseek ,
. release = seq_release ,
2008-06-02 08:56:14 -05:00
} ;
2008-06-12 08:23:48 -05:00
static int __init uv_ptc_init ( void )
2008-06-02 08:56:14 -05:00
{
2008-06-12 08:23:48 -05:00
struct proc_dir_entry * proc_uv_ptc ;
2008-06-02 08:56:14 -05:00
if ( ! is_uv_system ( ) )
return 0 ;
proc_uv_ptc = create_proc_entry ( UV_PTC_BASENAME , 0444 , NULL ) ;
if ( ! proc_uv_ptc ) {
printk ( KERN_ERR " unable to create %s proc entry \n " ,
UV_PTC_BASENAME ) ;
return - EINVAL ;
}
proc_uv_ptc - > proc_fops = & proc_uv_ptc_operations ;
return 0 ;
}
2008-06-12 08:23:48 -05:00
/*
* begin the initialization of the per - blade control structures
*/
static struct bau_control * __init uv_table_bases_init ( int blade , int node )
2008-06-02 08:56:14 -05:00
{
2008-06-12 08:23:48 -05:00
int i ;
struct bau_msg_status * msp ;
2008-06-18 14:15:43 +02:00
struct bau_control * bau_tabp ;
2008-06-12 08:23:48 -05:00
2008-06-18 14:15:43 +02:00
bau_tabp =
2008-06-12 08:23:48 -05:00
kmalloc_node ( sizeof ( struct bau_control ) , GFP_KERNEL , node ) ;
2008-06-18 14:15:43 +02:00
BUG_ON ( ! bau_tabp ) ;
2008-06-18 14:28:19 +02:00
2008-06-18 14:15:43 +02:00
bau_tabp - > msg_statuses =
2008-06-12 08:23:48 -05:00
kmalloc_node ( sizeof ( struct bau_msg_status ) *
2008-06-18 14:15:43 +02:00
DEST_Q_SIZE , GFP_KERNEL , node ) ;
BUG_ON ( ! bau_tabp - > msg_statuses ) ;
2008-06-18 14:28:19 +02:00
2008-06-18 14:15:43 +02:00
for ( i = 0 , msp = bau_tabp - > msg_statuses ; i < DEST_Q_SIZE ; i + + , msp + + )
2008-06-12 08:23:48 -05:00
bau_cpubits_clear ( & msp - > seen_by , ( int )
uv_blade_nr_possible_cpus ( blade ) ) ;
2008-06-18 14:28:19 +02:00
2008-06-18 14:15:43 +02:00
uv_bau_table_bases [ blade ] = bau_tabp ;
2008-06-18 14:28:19 +02:00
2008-06-18 14:51:57 +02:00
return bau_tabp ;
2008-06-02 08:56:14 -05:00
}
2008-06-12 08:23:48 -05:00
/*
* finish the initialization of the per - blade control structures
*/
2008-06-18 14:28:19 +02:00
static void __init
2009-04-03 08:34:05 -05:00
uv_table_bases_finish ( int blade ,
2008-06-18 14:28:19 +02:00
struct bau_control * bau_tablesp ,
struct bau_desc * adp )
2008-06-12 08:23:48 -05:00
{
struct bau_control * bcp ;
2009-04-03 08:34:05 -05:00
int cpu ;
2008-06-12 08:23:48 -05:00
2009-04-03 08:34:05 -05:00
for_each_present_cpu ( cpu ) {
if ( blade ! = uv_cpu_to_blade_id ( cpu ) )
continue ;
2008-06-18 14:28:19 +02:00
2009-04-03 08:34:05 -05:00
bcp = ( struct bau_control * ) & per_cpu ( bau_control , cpu ) ;
2008-06-18 14:28:19 +02:00
bcp - > bau_msg_head = bau_tablesp - > va_queue_first ;
bcp - > va_queue_first = bau_tablesp - > va_queue_first ;
bcp - > va_queue_last = bau_tablesp - > va_queue_last ;
bcp - > msg_statuses = bau_tablesp - > msg_statuses ;
bcp - > descriptor_base = adp ;
2008-06-12 08:23:48 -05:00
}
}
2008-06-02 08:56:14 -05:00
/*
2008-06-12 08:23:48 -05:00
* initialize the sending side ' s sending buffers
2008-06-02 08:56:14 -05:00
*/
2008-06-18 14:15:43 +02:00
static struct bau_desc * __init
2008-06-12 08:23:48 -05:00
uv_activation_descriptor_init ( int node , int pnode )
2008-06-02 08:56:14 -05:00
{
int i ;
unsigned long pa ;
unsigned long m ;
2008-06-12 08:23:48 -05:00
unsigned long n ;
2008-06-02 08:56:14 -05:00
unsigned long mmr_image ;
2008-06-18 14:15:43 +02:00
struct bau_desc * adp ;
struct bau_desc * ad2 ;
2008-06-12 08:23:48 -05:00
2009-04-03 08:34:05 -05:00
adp = ( struct bau_desc * ) kmalloc_node ( 16384 , GFP_KERNEL , node ) ;
2008-06-18 14:15:43 +02:00
BUG_ON ( ! adp ) ;
2008-06-18 14:28:19 +02:00
2009-04-16 07:53:09 -05:00
pa = uv_gpa ( adp ) ; /* need the real nasid*/
2008-06-12 08:23:48 -05:00
n = pa > > uv_nshift ;
m = pa & uv_mmask ;
2008-06-18 14:28:19 +02:00
2008-06-12 08:23:48 -05:00
mmr_image = uv_read_global_mmr64 ( pnode , UVH_LB_BAU_SB_DESCRIPTOR_BASE ) ;
2008-06-18 14:28:19 +02:00
if ( mmr_image ) {
2008-06-12 08:23:48 -05:00
uv_write_global_mmr64 ( pnode , ( unsigned long )
UVH_LB_BAU_SB_DESCRIPTOR_BASE ,
( n < < UV_DESC_BASE_PNODE_SHIFT | m ) ) ;
2008-06-18 14:28:19 +02:00
}
2008-06-12 08:23:48 -05:00
for ( i = 0 , ad2 = adp ; i < UV_ACTIVATION_DESCRIPTOR_SIZE ; i + + , ad2 + + ) {
2008-06-18 14:15:43 +02:00
memset ( ad2 , 0 , sizeof ( struct bau_desc ) ) ;
2008-06-12 08:23:48 -05:00
ad2 - > header . sw_ack_flag = 1 ;
2009-04-14 10:56:48 -05:00
/*
* base_dest_nodeid is the first node in the partition , so
* the bit map will indicate partition - relative node numbers .
* note that base_dest_nodeid is actually a nasid .
*/
ad2 - > header . base_dest_nodeid = uv_partition_base_pnode < < 1 ;
2008-06-12 08:23:48 -05:00
ad2 - > header . command = UV_NET_ENDPOINT_INTD ;
ad2 - > header . int_both = 1 ;
/*
* all others need to be set to zero :
* fairness chaining multilevel count replied_to
*/
}
return adp ;
}
/*
* initialize the destination side ' s receiving buffers
*/
2008-06-18 14:28:19 +02:00
static struct bau_payload_queue_entry * __init
uv_payload_queue_init ( int node , int pnode , struct bau_control * bau_tablesp )
2008-06-12 08:23:48 -05:00
{
2008-06-02 08:56:14 -05:00
struct bau_payload_queue_entry * pqp ;
2009-04-16 07:53:09 -05:00
unsigned long pa ;
int pn ;
2008-06-18 14:28:19 +02:00
char * cp ;
2008-06-02 08:56:14 -05:00
2008-06-18 14:15:43 +02:00
pqp = ( struct bau_payload_queue_entry * ) kmalloc_node (
( DEST_Q_SIZE + 1 ) * sizeof ( struct bau_payload_queue_entry ) ,
GFP_KERNEL , node ) ;
BUG_ON ( ! pqp ) ;
2008-06-18 14:28:19 +02:00
2008-06-12 08:23:48 -05:00
cp = ( char * ) pqp + 31 ;
pqp = ( struct bau_payload_queue_entry * ) ( ( ( unsigned long ) cp > > 5 ) < < 5 ) ;
bau_tablesp - > va_queue_first = pqp ;
2009-04-16 07:53:09 -05:00
/*
* need the pnode of where the memory was really allocated
*/
pa = uv_gpa ( pqp ) ;
pn = pa > > uv_nshift ;
2008-06-12 08:23:48 -05:00
uv_write_global_mmr64 ( pnode ,
UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST ,
2009-04-16 07:53:09 -05:00
( ( unsigned long ) pn < < UV_PAYLOADQ_PNODE_SHIFT ) |
2008-06-12 08:23:48 -05:00
uv_physnodeaddr ( pqp ) ) ;
uv_write_global_mmr64 ( pnode , UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL ,
uv_physnodeaddr ( pqp ) ) ;
2008-06-18 14:15:43 +02:00
bau_tablesp - > va_queue_last = pqp + ( DEST_Q_SIZE - 1 ) ;
2008-06-12 08:23:48 -05:00
uv_write_global_mmr64 ( pnode , UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST ,
( unsigned long )
uv_physnodeaddr ( bau_tablesp - > va_queue_last ) ) ;
2008-06-18 14:15:43 +02:00
memset ( pqp , 0 , sizeof ( struct bau_payload_queue_entry ) * DEST_Q_SIZE ) ;
2008-06-18 14:28:19 +02:00
2008-06-12 08:23:48 -05:00
return pqp ;
}
2008-06-02 08:56:14 -05:00
2008-06-12 08:23:48 -05:00
/*
* Initialization of each UV blade ' s structures
*/
2009-04-03 08:34:05 -05:00
static int __init uv_init_blade ( int blade )
2008-06-12 08:23:48 -05:00
{
2009-04-03 08:34:05 -05:00
int node ;
2008-06-12 08:23:48 -05:00
int pnode ;
unsigned long pa ;
unsigned long apicid ;
2008-06-18 14:15:43 +02:00
struct bau_desc * adp ;
2008-06-12 08:23:48 -05:00
struct bau_payload_queue_entry * pqp ;
struct bau_control * bau_tablesp ;
2008-06-02 08:56:14 -05:00
2009-04-03 08:34:05 -05:00
node = blade_to_first_node ( blade ) ;
2008-06-12 08:23:48 -05:00
bau_tablesp = uv_table_bases_init ( blade , node ) ;
pnode = uv_blade_to_pnode ( blade ) ;
adp = uv_activation_descriptor_init ( node , pnode ) ;
pqp = uv_payload_queue_init ( node , pnode , bau_tablesp ) ;
2009-04-03 08:34:05 -05:00
uv_table_bases_finish ( blade , bau_tablesp , adp ) ;
2008-06-12 08:23:48 -05:00
/*
* the below initialization can ' t be in firmware because the
* messaging IRQ will be determined by the OS
*/
2009-04-03 08:34:05 -05:00
apicid = blade_to_first_apicid ( blade ) ;
2008-06-12 08:23:48 -05:00
pa = uv_read_global_mmr64 ( pnode , UVH_BAU_DATA_CONFIG ) ;
if ( ( pa & 0xff ) ! = UV_BAU_MESSAGE ) {
uv_write_global_mmr64 ( pnode , UVH_BAU_DATA_CONFIG ,
( ( apicid < < 32 ) | UV_BAU_MESSAGE ) ) ;
2008-06-02 08:56:14 -05:00
}
2008-06-12 08:23:48 -05:00
return 0 ;
}
/*
* Initialization of BAU - related structures
*/
static int __init uv_bau_init ( void )
{
int blade ;
int nblades ;
2009-03-18 08:22:30 +10:30
int cur_cpu ;
2008-06-12 08:23:48 -05:00
if ( ! is_uv_system ( ) )
return 0 ;
2008-06-02 08:56:14 -05:00
2009-03-13 14:49:57 +10:30
for_each_possible_cpu ( cur_cpu )
alloc_cpumask_var_node ( & per_cpu ( uv_flush_tlb_mask , cur_cpu ) ,
GFP_KERNEL , cpu_to_node ( cur_cpu ) ) ;
2008-06-12 08:23:48 -05:00
uv_bau_retry_limit = 1 ;
2008-06-02 08:56:14 -05:00
uv_nshift = uv_hub_info - > n_val ;
2008-06-18 14:15:43 +02:00
uv_mmask = ( 1UL < < uv_hub_info - > n_val ) - 1 ;
2009-04-03 08:34:05 -05:00
nblades = uv_num_possible_blades ( ) ;
2008-06-02 08:56:14 -05:00
uv_bau_table_bases = ( struct bau_control * * )
kmalloc ( nblades * sizeof ( struct bau_control * ) , GFP_KERNEL ) ;
2008-06-18 14:15:43 +02:00
BUG_ON ( ! uv_bau_table_bases ) ;
2008-06-18 14:28:19 +02:00
2009-04-14 10:56:48 -05:00
uv_partition_base_pnode = 0x7fffffff ;
for ( blade = 0 ; blade < nblades ; blade + + )
if ( uv_blade_nr_possible_cpus ( blade ) & &
( uv_blade_to_pnode ( blade ) < uv_partition_base_pnode ) )
uv_partition_base_pnode = uv_blade_to_pnode ( blade ) ;
2009-04-03 08:34:05 -05:00
for ( blade = 0 ; blade < nblades ; blade + + )
if ( uv_blade_nr_possible_cpus ( blade ) )
uv_init_blade ( blade ) ;
2008-08-19 12:51:59 -05:00
alloc_intr_gate ( UV_BAU_MESSAGE , uv_bau_message_intr1 ) ;
2008-06-02 08:56:14 -05:00
uv_enable_timeouts ( ) ;
2008-06-18 14:28:19 +02:00
2008-06-02 08:56:14 -05:00
return 0 ;
}
__initcall ( uv_bau_init ) ;
2008-06-12 08:23:48 -05:00
__initcall ( uv_ptc_init ) ;