2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / vfp / vfpmodule . c
*
* Copyright ( C ) 2004 ARM Limited .
* Written by Deep Blue Solutions Limited .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/module.h>
# include <linux/types.h>
2010-12-18 13:59:49 +03:00
# include <linux/cpu.h>
2005-04-17 02:20:36 +04:00
# include <linux/kernel.h>
2010-12-18 13:59:49 +03:00
# include <linux/notifier.h>
2005-04-17 02:20:36 +04:00
# include <linux/signal.h>
# include <linux/sched.h>
2010-12-18 13:59:49 +03:00
# include <linux/smp.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
2006-06-21 16:31:52 +04:00
2010-07-01 16:41:05 +04:00
# include <asm/cputype.h>
2006-06-21 16:31:52 +04:00
# include <asm/thread_notify.h>
2005-04-17 02:20:36 +04:00
# include <asm/vfp.h>
# include "vfpinstr.h"
# include "vfp.h"
/*
* Our undef handlers ( in entry . S )
*/
void vfp_testing_entry ( void ) ;
void vfp_support_entry ( void ) ;
2007-06-10 15:22:20 +04:00
void vfp_null_entry ( void ) ;
2005-04-17 02:20:36 +04:00
2007-06-10 15:22:20 +04:00
void ( * vfp_vector ) ( void ) = vfp_null_entry ;
2007-01-24 20:47:08 +03:00
union vfp_state * last_VFP_context [ NR_CPUS ] ;
2005-04-17 02:20:36 +04:00
/*
* Dual - use variable .
* Used in startup : set to non - zero if VFP checks fail
* After startup , holds VFP architecture
*/
unsigned int VFP_arch ;
2009-12-12 17:47:40 +03:00
/*
* Per - thread VFP initialization .
*/
static void vfp_thread_flush ( struct thread_info * thread )
{
union vfp_state * vfp = & thread - > vfpstate ;
unsigned int cpu ;
memset ( vfp , 0 , sizeof ( union vfp_state ) ) ;
vfp - > hard . fpexc = FPEXC_EN ;
vfp - > hard . fpscr = FPSCR_ROUND_NEAREST ;
/*
* Disable VFP to ensure we initialize it first . We must ensure
* that the modification of last_VFP_context [ ] and hardware disable
* are done for the same CPU and without preemption .
*/
cpu = get_cpu ( ) ;
if ( last_VFP_context [ cpu ] = = vfp )
last_VFP_context [ cpu ] = NULL ;
fmxr ( FPEXC , fmrx ( FPEXC ) & ~ FPEXC_EN ) ;
put_cpu ( ) ;
}
2009-12-18 17:34:43 +03:00
static void vfp_thread_exit ( struct thread_info * thread )
2009-12-12 17:47:40 +03:00
{
/* release case: Per-thread VFP cleanup. */
union vfp_state * vfp = & thread - > vfpstate ;
2009-12-18 17:34:43 +03:00
unsigned int cpu = get_cpu ( ) ;
2009-12-12 17:47:40 +03:00
if ( last_VFP_context [ cpu ] = = vfp )
last_VFP_context [ cpu ] = NULL ;
2009-12-18 17:34:43 +03:00
put_cpu ( ) ;
2009-12-12 17:47:40 +03:00
}
/*
* When this function is called with the following ' cmd ' s , the following
* is true while this function is being run :
* THREAD_NOFTIFY_SWTICH :
* - the previously running thread will not be scheduled onto another CPU .
* - the next thread to be run ( v ) will not be running on another CPU .
* - thread - > cpu is the local CPU number
* - not preemptible as we ' re called in the middle of a thread switch
* THREAD_NOTIFY_FLUSH :
* - the thread ( v ) will be running on the local CPU , so
* v = = = current_thread_info ( )
* - thread - > cpu is the local CPU number at the time it is accessed ,
* but may change at any time .
* - we could be preempted if tree preempt rcu is enabled , so
* it is unsafe to use thread - > cpu .
2009-12-18 17:34:43 +03:00
* THREAD_NOTIFY_EXIT
* - the thread ( v ) will be running on the local CPU , so
* v = = = current_thread_info ( )
* - thread - > cpu is the local CPU number at the time it is accessed ,
* but may change at any time .
* - we could be preempted if tree preempt rcu is enabled , so
* it is unsafe to use thread - > cpu .
2009-12-12 17:47:40 +03:00
*/
2006-06-21 16:31:52 +04:00
static int vfp_notifier ( struct notifier_block * self , unsigned long cmd , void * v )
2005-04-17 02:20:36 +04:00
{
2006-06-21 16:31:52 +04:00
struct thread_info * thread = v ;
2005-04-17 02:20:36 +04:00
2006-08-27 15:38:34 +04:00
if ( likely ( cmd = = THREAD_NOTIFY_SWITCH ) ) {
2007-01-24 20:47:08 +03:00
u32 fpexc = fmrx ( FPEXC ) ;
# ifdef CONFIG_SMP
2009-12-12 17:47:40 +03:00
unsigned int cpu = thread - > cpu ;
2007-01-24 20:47:08 +03:00
/*
* On SMP , if VFP is enabled , save the old state in
* case the thread migrates to a different CPU . The
* restoring is done lazily .
*/
2007-07-18 12:37:10 +04:00
if ( ( fpexc & FPEXC_EN ) & & last_VFP_context [ cpu ] ) {
2007-01-24 20:47:08 +03:00
vfp_save_state ( last_VFP_context [ cpu ] , fpexc ) ;
last_VFP_context [ cpu ] - > hard . cpu = cpu ;
}
/*
* Thread migration , just force the reloading of the
* state on the new CPU in case the VFP registers
* contain stale data .
*/
if ( thread - > vfpstate . hard . cpu ! = cpu )
last_VFP_context [ cpu ] = NULL ;
# endif
2006-08-27 15:38:34 +04:00
/*
* Always disable VFP so we can lazily save / restore the
* old state .
*/
2007-07-18 12:37:10 +04:00
fmxr ( FPEXC , fpexc & ~ FPEXC_EN ) ;
2006-08-27 15:38:34 +04:00
return NOTIFY_DONE ;
}
2009-12-12 17:47:40 +03:00
if ( cmd = = THREAD_NOTIFY_FLUSH )
vfp_thread_flush ( thread ) ;
else
2009-12-18 17:34:43 +03:00
vfp_thread_exit ( thread ) ;
2006-08-27 15:38:34 +04:00
2006-06-21 16:31:52 +04:00
return NOTIFY_DONE ;
2005-04-17 02:20:36 +04:00
}
2006-06-21 16:31:52 +04:00
static struct notifier_block vfp_notifier_block = {
. notifier_call = vfp_notifier ,
} ;
2005-04-17 02:20:36 +04:00
/*
* Raise a SIGFPE for the current process .
* sicode describes the signal being raised .
*/
2011-01-08 15:05:09 +03:00
static void vfp_raise_sigfpe ( unsigned int sicode , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
siginfo_t info ;
memset ( & info , 0 , sizeof ( info ) ) ;
info . si_signo = SIGFPE ;
info . si_code = sicode ;
2006-10-11 20:22:44 +04:00
info . si_addr = ( void __user * ) ( instruction_pointer ( regs ) - 4 ) ;
2005-04-17 02:20:36 +04:00
/*
* This is the same as NWFPE , because it ' s not clear what
* this is used for
*/
current - > thread . error_code = 0 ;
current - > thread . trap_no = 6 ;
2005-06-30 02:02:02 +04:00
send_sig_info ( SIGFPE , & info , current ) ;
2005-04-17 02:20:36 +04:00
}
2007-11-22 20:32:01 +03:00
static void vfp_panic ( char * reason , u32 inst )
2005-04-17 02:20:36 +04:00
{
int i ;
printk ( KERN_ERR " VFP: Error: %s \n " , reason ) ;
printk ( KERN_ERR " VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x \n " ,
2007-11-22 20:32:01 +03:00
fmrx ( FPEXC ) , fmrx ( FPSCR ) , inst ) ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < 32 ; i + = 2 )
printk ( KERN_ERR " VFP: s%2u: 0x%08x s%2u: 0x%08x \n " ,
i , vfp_get_float ( i ) , i + 1 , vfp_get_float ( i + 1 ) ) ;
}
/*
* Process bitmask of exception conditions .
*/
static void vfp_raise_exceptions ( u32 exceptions , u32 inst , u32 fpscr , struct pt_regs * regs )
{
int si_code = 0 ;
pr_debug ( " VFP: raising exceptions %08x \n " , exceptions ) ;
2006-08-27 15:42:08 +04:00
if ( exceptions = = VFP_EXCEPTION_ERROR ) {
2007-11-22 20:32:01 +03:00
vfp_panic ( " unhandled bounce " , inst ) ;
2005-04-17 02:20:36 +04:00
vfp_raise_sigfpe ( 0 , regs ) ;
return ;
}
/*
2010-02-01 20:50:40 +03:00
* If any of the status flags are set , update the FPSCR .
2005-04-17 02:20:36 +04:00
* Comparison instructions always return at least one of
* these flags set .
*/
2010-02-01 20:50:40 +03:00
if ( exceptions & ( FPSCR_N | FPSCR_Z | FPSCR_C | FPSCR_V ) )
fpscr & = ~ ( FPSCR_N | FPSCR_Z | FPSCR_C | FPSCR_V ) ;
2005-04-17 02:20:36 +04:00
fpscr | = exceptions ;
fmxr ( FPSCR , fpscr ) ;
# define RAISE(stat,en,sig) \
if ( exceptions & stat & & fpscr & en ) \
si_code = sig ;
/*
* These are arranged in priority order , least to highest .
*/
2006-10-23 14:19:40 +04:00
RAISE ( FPSCR_DZC , FPSCR_DZE , FPE_FLTDIV ) ;
2005-04-17 02:20:36 +04:00
RAISE ( FPSCR_IXC , FPSCR_IXE , FPE_FLTRES ) ;
RAISE ( FPSCR_UFC , FPSCR_UFE , FPE_FLTUND ) ;
RAISE ( FPSCR_OFC , FPSCR_OFE , FPE_FLTOVF ) ;
RAISE ( FPSCR_IOC , FPSCR_IOE , FPE_FLTINV ) ;
if ( si_code )
vfp_raise_sigfpe ( si_code , regs ) ;
}
/*
* Emulate a VFP instruction .
*/
static u32 vfp_emulate_instruction ( u32 inst , u32 fpscr , struct pt_regs * regs )
{
2006-08-27 15:42:08 +04:00
u32 exceptions = VFP_EXCEPTION_ERROR ;
2005-04-17 02:20:36 +04:00
pr_debug ( " VFP: emulate: INST=0x%08x SCR=0x%08x \n " , inst , fpscr ) ;
if ( INST_CPRTDO ( inst ) ) {
if ( ! INST_CPRT ( inst ) ) {
/*
* CPDO
*/
if ( vfp_single ( inst ) ) {
exceptions = vfp_single_cpdo ( inst , fpscr ) ;
} else {
exceptions = vfp_double_cpdo ( inst , fpscr ) ;
}
} else {
/*
* A CPRT instruction can not appear in FPINST2 , nor
* can it cause an exception . Therefore , we do not
* have to emulate it .
*/
}
} else {
/*
* A CPDT instruction can not appear in FPINST2 , nor can
* it cause an exception . Therefore , we do not have to
* emulate it .
*/
}
2006-04-25 23:41:27 +04:00
return exceptions & ~ VFP_NAN_FLAG ;
2005-04-17 02:20:36 +04:00
}
/*
* Package up a bounce condition .
*/
2007-11-22 20:32:01 +03:00
void VFP_bounce ( u32 trigger , u32 fpexc , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
2007-11-22 20:32:01 +03:00
u32 fpscr , orig_fpscr , fpsid , exceptions ;
2005-04-17 02:20:36 +04:00
pr_debug ( " VFP: bounce: trigger %08x fpexc %08x \n " , trigger , fpexc ) ;
/*
2007-11-22 20:32:01 +03:00
* At this point , FPEXC can have the following configuration :
*
* EX DEX IXE
* 0 1 x - synchronous exception
* 1 x 0 - asynchronous exception
* 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
* 0 0 1 - synchronous on VFP9 ( non - standard subarch 1
* implementation ) , undefined otherwise
*
* Clear various bits and enable access to the VFP so we can
* handle the bounce .
2005-04-17 02:20:36 +04:00
*/
2007-11-22 20:32:01 +03:00
fmxr ( FPEXC , fpexc & ~ ( FPEXC_EX | FPEXC_DEX | FPEXC_FP2V | FPEXC_VV | FPEXC_TRAP_MASK ) ) ;
2005-04-17 02:20:36 +04:00
2007-11-22 20:32:01 +03:00
fpsid = fmrx ( FPSID ) ;
2005-04-17 02:20:36 +04:00
orig_fpscr = fpscr = fmrx ( FPSCR ) ;
/*
2007-11-22 20:32:01 +03:00
* Check for the special VFP subarch 1 and FPSCR . IXE bit case
2005-04-17 02:20:36 +04:00
*/
2007-11-22 20:32:01 +03:00
if ( ( fpsid & FPSID_ARCH_MASK ) = = ( 1 < < FPSID_ARCH_BIT )
& & ( fpscr & FPSCR_IXE ) ) {
/*
* Synchronous exception , emulate the trigger instruction
*/
2005-04-17 02:20:36 +04:00
goto emulate ;
}
2007-11-22 20:32:01 +03:00
if ( fpexc & FPEXC_EX ) {
2009-05-30 17:00:18 +04:00
# ifndef CONFIG_CPU_FEROCEON
2007-11-22 20:32:01 +03:00
/*
* Asynchronous exception . The instruction is read from FPINST
* and the interrupted instruction has to be restarted .
*/
trigger = fmrx ( FPINST ) ;
regs - > ARM_pc - = 4 ;
2009-05-30 17:00:18 +04:00
# endif
2007-11-22 20:32:01 +03:00
} else if ( ! ( fpexc & FPEXC_DEX ) ) {
/*
* Illegal combination of bits . It can be caused by an
* unallocated VFP instruction but with FPSCR . IXE set and not
* on VFP subarch 1.
*/
vfp_raise_exceptions ( VFP_EXCEPTION_ERROR , trigger , fpscr , regs ) ;
2009-04-01 23:27:18 +04:00
goto exit ;
2007-11-22 20:32:01 +03:00
}
2005-04-17 02:20:36 +04:00
/*
2007-11-22 20:32:01 +03:00
* Modify fpscr to indicate the number of iterations remaining .
* If FPEXC . EX is 0 , FPEXC . DEX is 1 and the FPEXC . VV bit indicates
* whether FPEXC . VECITR or FPSCR . LEN is used .
2005-04-17 02:20:36 +04:00
*/
2007-11-22 20:32:01 +03:00
if ( fpexc & ( FPEXC_EX | FPEXC_VV ) ) {
2005-04-17 02:20:36 +04:00
u32 len ;
len = fpexc + ( 1 < < FPEXC_LENGTH_BIT ) ;
fpscr & = ~ FPSCR_LENGTH_MASK ;
fpscr | = ( len & FPEXC_LENGTH_MASK ) < < ( FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT ) ;
}
/*
* Handle the first FP instruction . We used to take note of the
* FPEXC bounce reason , but this appears to be unreliable .
* Emulate the bounced instruction instead .
*/
2007-11-22 20:32:01 +03:00
exceptions = vfp_emulate_instruction ( trigger , fpscr , regs ) ;
2005-04-17 02:20:36 +04:00
if ( exceptions )
2007-11-22 20:32:01 +03:00
vfp_raise_exceptions ( exceptions , trigger , orig_fpscr , regs ) ;
2005-04-17 02:20:36 +04:00
/*
2007-11-22 20:32:01 +03:00
* If there isn ' t a second FP instruction , exit now . Note that
* the FPEXC . FP2V bit is valid only if FPEXC . EX is 1.
2005-04-17 02:20:36 +04:00
*/
2007-11-22 20:32:01 +03:00
if ( fpexc ^ ( FPEXC_EX | FPEXC_FP2V ) )
2009-04-01 23:27:18 +04:00
goto exit ;
2005-04-17 02:20:36 +04:00
/*
* The barrier ( ) here prevents fpinst2 being read
* before the condition above .
*/
barrier ( ) ;
trigger = fmrx ( FPINST2 ) ;
emulate :
2007-11-22 20:32:01 +03:00
exceptions = vfp_emulate_instruction ( trigger , orig_fpscr , regs ) ;
2005-04-17 02:20:36 +04:00
if ( exceptions )
vfp_raise_exceptions ( exceptions , trigger , orig_fpscr , regs ) ;
2009-04-01 23:27:18 +04:00
exit :
preempt_enable ( ) ;
2005-04-17 02:20:36 +04:00
}
2006-12-08 18:22:20 +03:00
2007-01-03 02:40:30 +03:00
static void vfp_enable ( void * unused )
{
u32 access = get_copro_access ( ) ;
/*
* Enable full access to VFP ( cp10 and cp11 )
*/
set_copro_access ( access | CPACC_FULL ( 10 ) | CPACC_FULL ( 11 ) ) ;
}
2008-12-18 14:26:54 +03:00
# ifdef CONFIG_PM
# include <linux/sysdev.h>
static int vfp_pm_suspend ( struct sys_device * dev , pm_message_t state )
{
struct thread_info * ti = current_thread_info ( ) ;
u32 fpexc = fmrx ( FPEXC ) ;
/* if vfp is on, then save state for resumption */
if ( fpexc & FPEXC_EN ) {
printk ( KERN_DEBUG " %s: saving vfp state \n " , __func__ ) ;
vfp_save_state ( & ti - > vfpstate , fpexc ) ;
/* disable, just in case */
fmxr ( FPEXC , fmrx ( FPEXC ) & ~ FPEXC_EN ) ;
}
/* clear any information we had about last context state */
memset ( last_VFP_context , 0 , sizeof ( last_VFP_context ) ) ;
return 0 ;
}
static int vfp_pm_resume ( struct sys_device * dev )
{
/* ensure we have access to the vfp */
vfp_enable ( NULL ) ;
/* and disable it to ensure the next usage restores the state */
fmxr ( FPEXC , fmrx ( FPEXC ) & ~ FPEXC_EN ) ;
return 0 ;
}
static struct sysdev_class vfp_pm_sysclass = {
. name = " vfp " ,
. suspend = vfp_pm_suspend ,
. resume = vfp_pm_resume ,
} ;
static struct sys_device vfp_pm_sysdev = {
. cls = & vfp_pm_sysclass ,
} ;
static void vfp_pm_init ( void )
{
sysdev_class_register ( & vfp_pm_sysclass ) ;
sysdev_register ( & vfp_pm_sysdev ) ;
}
# else
static inline void vfp_pm_init ( void ) { }
# endif /* CONFIG_PM */
2010-02-06 14:36:23 +03:00
void vfp_sync_hwstate ( struct thread_info * thread )
2009-02-11 15:12:56 +03:00
{
unsigned int cpu = get_cpu ( ) ;
/*
2010-02-06 14:27:45 +03:00
* If the thread we ' re interested in is the current owner of the
* hardware VFP state , then we need to save its state .
2009-02-11 15:12:56 +03:00
*/
2010-02-06 14:27:45 +03:00
if ( last_VFP_context [ cpu ] = = & thread - > vfpstate ) {
u32 fpexc = fmrx ( FPEXC ) ;
2009-02-11 15:12:56 +03:00
2010-02-06 14:27:45 +03:00
/*
* Save the last VFP state on this CPU .
*/
fmxr ( FPEXC , fpexc | FPEXC_EN ) ;
vfp_save_state ( & thread - > vfpstate , fpexc | FPEXC_EN ) ;
2010-02-06 14:36:23 +03:00
fmxr ( FPEXC , fpexc ) ;
}
2009-02-11 15:12:56 +03:00
2010-02-06 14:36:23 +03:00
put_cpu ( ) ;
}
void vfp_flush_hwstate ( struct thread_info * thread )
{
unsigned int cpu = get_cpu ( ) ;
2009-02-11 15:12:56 +03:00
/*
2010-02-06 14:36:23 +03:00
* If the thread we ' re interested in is the current owner of the
* hardware VFP state , then we need to save its state .
2009-02-11 15:12:56 +03:00
*/
2010-02-06 14:36:23 +03:00
if ( last_VFP_context [ cpu ] = = & thread - > vfpstate ) {
u32 fpexc = fmrx ( FPEXC ) ;
2010-02-06 14:27:45 +03:00
fmxr ( FPEXC , fpexc & ~ FPEXC_EN ) ;
2009-02-11 15:12:56 +03:00
2010-02-06 14:27:45 +03:00
/*
* Set the context to NULL to force a reload the next time
* the thread uses the VFP .
*/
last_VFP_context [ cpu ] = NULL ;
}
2009-02-11 15:12:56 +03:00
2010-04-11 18:57:07 +04:00
# ifdef CONFIG_SMP
/*
* For SMP we still have to take care of the case where the thread
* migrates to another CPU and then back to the original CPU on which
* the last VFP user is still the same thread . Mark the thread VFP
* state as belonging to a non - existent CPU so that the saved one will
* be reloaded in the above case .
*/
thread - > vfpstate . hard . cpu = NR_CPUS ;
# endif
2009-02-11 15:12:56 +03:00
put_cpu ( ) ;
}
2010-12-18 13:59:49 +03:00
/*
* VFP hardware can lose all context when a CPU goes offline .
* Safely clear our held state when a CPU has been killed , and
* re - enable access to VFP when the CPU comes back online .
*
* Both CPU_DYING and CPU_STARTING are called on the CPU which
* is being offlined / onlined .
*/
static int vfp_hotplug ( struct notifier_block * b , unsigned long action ,
void * hcpu )
{
if ( action = = CPU_DYING | | action = = CPU_DYING_FROZEN ) {
unsigned int cpu = ( long ) hcpu ;
last_VFP_context [ cpu ] = NULL ;
} else if ( action = = CPU_STARTING | | action = = CPU_STARTING_FROZEN )
vfp_enable ( NULL ) ;
return NOTIFY_OK ;
}
2007-01-03 02:40:30 +03:00
2005-04-17 02:20:36 +04:00
/*
* VFP support code initialisation .
*/
static int __init vfp_init ( void )
{
unsigned int vfpsid ;
2006-12-08 18:22:20 +03:00
unsigned int cpu_arch = cpu_architecture ( ) ;
2007-11-22 20:32:01 +03:00
if ( cpu_arch > = CPU_ARCH_ARMv6 )
vfp_enable ( NULL ) ;
2005-04-17 02:20:36 +04:00
/*
* First check that there is a VFP that we can use .
* The handler is already setup to just log calls , so
* we just need to read the VFPSID register .
*/
2007-06-10 15:22:20 +04:00
vfp_vector = vfp_testing_entry ;
2007-09-09 17:24:59 +04:00
barrier ( ) ;
2005-04-17 02:20:36 +04:00
vfpsid = fmrx ( FPSID ) ;
2007-01-03 02:40:30 +03:00
barrier ( ) ;
2007-06-10 15:22:20 +04:00
vfp_vector = vfp_null_entry ;
2005-04-17 02:20:36 +04:00
printk ( KERN_INFO " VFP support v0.3: " ) ;
2007-11-22 20:32:01 +03:00
if ( VFP_arch )
2005-04-17 02:20:36 +04:00
printk ( " not present \n " ) ;
2007-11-22 20:32:01 +03:00
else if ( vfpsid & FPSID_NODOUBLE ) {
2005-04-17 02:20:36 +04:00
printk ( " no double precision support \n " ) ;
} else {
2010-12-18 13:59:49 +03:00
hotcpu_notifier ( vfp_hotplug , 0 ) ;
2008-06-06 13:18:06 +04:00
smp_call_function ( vfp_enable , NULL , 1 ) ;
2007-01-03 02:40:30 +03:00
2005-04-17 02:20:36 +04:00
VFP_arch = ( vfpsid & FPSID_ARCH_MASK ) > > FPSID_ARCH_BIT ; /* Extract the architecture version */
printk ( " implementor %02x architecture %d part %02x variant %x rev %x \n " ,
( vfpsid & FPSID_IMPLEMENTER_MASK ) > > FPSID_IMPLEMENTER_BIT ,
( vfpsid & FPSID_ARCH_MASK ) > > FPSID_ARCH_BIT ,
( vfpsid & FPSID_PART_MASK ) > > FPSID_PART_BIT ,
( vfpsid & FPSID_VARIANT_MASK ) > > FPSID_VARIANT_BIT ,
( vfpsid & FPSID_REV_MASK ) > > FPSID_REV_BIT ) ;
2006-12-08 18:22:20 +03:00
2005-04-17 02:20:36 +04:00
vfp_vector = vfp_support_entry ;
2006-06-21 16:31:52 +04:00
thread_register_notifier ( & vfp_notifier_block ) ;
2008-12-18 14:26:54 +03:00
vfp_pm_init ( ) ;
2006-12-08 18:22:20 +03:00
/*
* We detected VFP , and the support code is
* in place ; report VFP support to userspace .
*/
elf_hwcap | = HWCAP_VFP ;
2009-02-11 15:13:56 +03:00
# ifdef CONFIG_VFPv3
2010-03-26 17:44:57 +03:00
if ( VFP_arch > = 2 ) {
2009-02-11 15:13:56 +03:00
elf_hwcap | = HWCAP_VFPv3 ;
/*
* Check for VFPv3 D16 . CPUs in this configuration
* only have 16 x 64 bit registers .
*/
if ( ( ( fmrx ( MVFR0 ) & MVFR0_A_SIMD_MASK ) ) = = 1 )
elf_hwcap | = HWCAP_VFPv3D16 ;
}
# endif
2008-11-06 16:23:07 +03:00
# ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD
* load / store instructions , integer and single
2010-07-01 16:41:05 +04:00
* precision floating point operations . Only check
* for NEON if the hardware has the MVFR registers .
2008-11-06 16:23:07 +03:00
*/
2010-07-01 16:41:05 +04:00
if ( ( read_cpuid_id ( ) & 0x000f0000 ) = = 0x000f0000 ) {
if ( ( fmrx ( MVFR1 ) & 0x000fff00 ) = = 0x00011100 )
elf_hwcap | = HWCAP_NEON ;
}
2008-11-06 16:23:07 +03:00
# endif
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
late_initcall ( vfp_init ) ;