2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General
* Public License . See the file " COPYING " in the main directory of this
* archive for more details .
*
* Copyright ( C ) 2000 - 2001 by Kanoj Sarcar ( kanoj @ sgi . com )
* Copyright ( C ) 2000 - 2001 by Silicon Graphics , Inc .
* Copyright ( C ) 2000 , 2001 , 2002 Ralf Baechle
* Copyright ( C ) 2000 , 2001 Broadcom Corporation
*/
# ifndef __ASM_SMP_H
# define __ASM_SMP_H
# include <linux/bitops.h>
# include <linux/linkage.h>
2009-06-19 14:05:26 +01:00
# include <linux/smp.h>
2005-04-16 15:20:36 -07:00
# include <linux/threads.h>
# include <linux/cpumask.h>
2007-11-19 12:23:51 +00:00
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2007-11-19 12:23:51 +00:00
# include <asm/smp-ops.h>
2005-04-16 15:20:36 -07:00
2007-03-02 20:42:04 +00:00
extern int smp_num_siblings ;
extern cpumask_t cpu_sibling_map [ ] ;
2014-06-26 11:41:26 +08:00
extern cpumask_t cpu_core_map [ ] ;
2016-07-13 14:12:52 +01:00
extern cpumask_t cpu_foreign_map [ ] ;
2007-03-02 20:42:04 +00:00
2017-12-12 09:57:47 +00:00
static inline int raw_smp_processor_id ( void )
{
# if defined(__VDSO__)
extern int vdso_smp_processor_id ( void )
__compiletime_error ( " VDSO should not call smp_processor_id() " ) ;
return vdso_smp_processor_id ( ) ;
# else
return current_thread_info ( ) - > cpu ;
# endif
}
# define raw_smp_processor_id raw_smp_processor_id
2005-04-16 15:20:36 -07:00
/* Map from cpu id to sequential logical cpu number. This will only
2013-01-22 12:59:30 +01:00
not be idempotent when cpus failed to come on - line . */
2017-09-28 12:34:04 -05:00
extern int __cpu_number_map [ CONFIG_MIPS_NR_CPU_NR_MAP ] ;
2005-04-16 15:20:36 -07:00
# define cpu_number_map(cpu) __cpu_number_map[cpu]
/* The reverse map from sequential logical cpu number to cpu id. */
extern int __cpu_logical_map [ NR_CPUS ] ;
# define cpu_logical_map(cpu) __cpu_logical_map[cpu]
# define NO_PROC_ID (-1)
2013-01-22 12:59:30 +01:00
# define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
2005-04-16 15:20:36 -07:00
# define SMP_CALL_FUNCTION 0x2
2008-12-11 15:33:32 -08:00
/* Octeon - Tell another core to flush its icache */
# define SMP_ICACHE_FLUSH 0x4
2016-12-02 09:58:28 +01:00
# define SMP_ASK_C0COUNT 0x8
2008-12-11 15:33:32 -08:00
2014-02-14 16:30:52 +00:00
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask ;
2017-07-12 14:37:25 -07:00
extern asmlinkage void smp_bootstrap ( void ) ;
2005-04-16 15:20:36 -07:00
2016-07-13 14:12:45 +01:00
extern void calculate_cpu_foreign_map ( void ) ;
2005-04-16 15:20:36 -07:00
/*
* this function sends a ' reschedule ' IPI to another CPU .
* it goes straight through and wastes no time serializing
* anything . Worst case is that we lose a reschedule . . .
*/
static inline void smp_send_reschedule ( int cpu )
{
2017-07-19 09:21:03 +01:00
extern const struct plat_smp_ops * mp_ops ; /* private */
2007-11-19 12:23:51 +00:00
mp_ops - > send_ipi_single ( cpu , SMP_RESCHEDULE_YOURSELF ) ;
2005-04-16 15:20:36 -07:00
}
2009-06-23 10:00:31 +01:00
# ifdef CONFIG_HOTPLUG_CPU
static inline int __cpu_disable ( void )
{
2017-07-19 09:21:03 +01:00
extern const struct plat_smp_ops * mp_ops ; /* private */
2009-06-23 10:00:31 +01:00
return mp_ops - > cpu_disable ( ) ;
}
static inline void __cpu_die ( unsigned int cpu )
{
2017-07-19 09:21:03 +01:00
extern const struct plat_smp_ops * mp_ops ; /* private */
2009-06-23 10:00:31 +01:00
mp_ops - > cpu_die ( cpu ) ;
}
extern void play_dead ( void ) ;
# endif
2018-09-11 14:49:21 -07:00
# ifdef CONFIG_KEXEC
static inline void kexec_nonboot_cpu ( void )
{
extern const struct plat_smp_ops * mp_ops ; /* private */
return mp_ops - > kexec_nonboot_cpu ( ) ;
}
static inline void * kexec_nonboot_cpu_func ( void )
{
extern const struct plat_smp_ops * mp_ops ; /* private */
return mp_ops - > kexec_nonboot_cpu ;
}
# endif
2016-09-20 09:47:26 +01:00
/*
* This function will set up the necessary IPIs for Linux to communicate
* with the CPUs in mask .
* Return 0 on success .
*/
int mips_smp_ipi_allocate ( const struct cpumask * mask ) ;
/*
* This function will free up IPIs allocated with mips_smp_ipi_allocate to the
* CPUs in mask , which must be a subset of the IPIs that have been configured .
* Return 0 on success .
*/
int mips_smp_ipi_free ( const struct cpumask * mask ) ;
2009-09-25 15:35:28 +01:00
static inline void arch_send_call_function_single_ipi ( int cpu )
{
2017-07-19 09:21:03 +01:00
extern const struct plat_smp_ops * mp_ops ; /* private */
2009-09-25 15:35:28 +01:00
2020-04-23 19:44:21 -04:00
mp_ops - > send_ipi_single ( cpu , SMP_CALL_FUNCTION ) ;
2009-09-25 15:35:28 +01:00
}
static inline void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
{
2017-07-19 09:21:03 +01:00
extern const struct plat_smp_ops * mp_ops ; /* private */
2009-09-25 15:35:28 +01:00
mp_ops - > send_ipi_mask ( mask , SMP_CALL_FUNCTION ) ;
}
2008-06-17 10:45:23 +02:00
2005-04-16 15:20:36 -07:00
# endif /* __ASM_SMP_H */