2005-04-16 15:20:36 -07:00
/*
* linux / arch / arm / vfp / vfpmodule . c
*
* Copyright ( C ) 2004 ARM Limited .
* Written by Deep Blue Solutions Limited .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/module.h>
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/signal.h>
# include <linux/sched.h>
# include <linux/init.h>
2006-06-21 13:31:52 +01:00
# include <asm/thread_notify.h>
2005-04-16 15:20:36 -07:00
# include <asm/vfp.h>
# include "vfpinstr.h"
# include "vfp.h"
/*
* Our undef handlers ( in entry . S )
*/
void vfp_testing_entry ( void ) ;
void vfp_support_entry ( void ) ;
2007-06-10 12:22:20 +01:00
void vfp_null_entry ( void ) ;
2005-04-16 15:20:36 -07:00
2007-06-10 12:22:20 +01:00
void ( * vfp_vector ) ( void ) = vfp_null_entry ;
2007-01-24 18:47:08 +01:00
union vfp_state * last_VFP_context [ NR_CPUS ] ;
2005-04-16 15:20:36 -07:00
/*
* Dual - use variable .
* Used in startup : set to non - zero if VFP checks fail
* After startup , holds VFP architecture
*/
unsigned int VFP_arch ;
2006-06-21 13:31:52 +01:00
static int vfp_notifier ( struct notifier_block * self , unsigned long cmd , void * v )
2005-04-16 15:20:36 -07:00
{
2006-06-21 13:31:52 +01:00
struct thread_info * thread = v ;
2006-08-27 12:38:34 +01:00
union vfp_state * vfp ;
2007-01-24 18:47:08 +01:00
__u32 cpu = thread - > cpu ;
2005-04-16 15:20:36 -07:00
2006-08-27 12:38:34 +01:00
if ( likely ( cmd = = THREAD_NOTIFY_SWITCH ) ) {
2007-01-24 18:47:08 +01:00
u32 fpexc = fmrx ( FPEXC ) ;
# ifdef CONFIG_SMP
/*
* On SMP , if VFP is enabled , save the old state in
* case the thread migrates to a different CPU . The
* restoring is done lazily .
*/
2007-07-18 09:37:10 +01:00
if ( ( fpexc & FPEXC_EN ) & & last_VFP_context [ cpu ] ) {
2007-01-24 18:47:08 +01:00
vfp_save_state ( last_VFP_context [ cpu ] , fpexc ) ;
last_VFP_context [ cpu ] - > hard . cpu = cpu ;
}
/*
* Thread migration , just force the reloading of the
* state on the new CPU in case the VFP registers
* contain stale data .
*/
if ( thread - > vfpstate . hard . cpu ! = cpu )
last_VFP_context [ cpu ] = NULL ;
# endif
2006-08-27 12:38:34 +01:00
/*
* Always disable VFP so we can lazily save / restore the
* old state .
*/
2007-07-18 09:37:10 +01:00
fmxr ( FPEXC , fpexc & ~ FPEXC_EN ) ;
2006-08-27 12:38:34 +01:00
return NOTIFY_DONE ;
}
vfp = & thread - > vfpstate ;
if ( cmd = = THREAD_NOTIFY_FLUSH ) {
2006-06-21 13:31:52 +01:00
/*
* Per - thread VFP initialisation .
*/
memset ( vfp , 0 , sizeof ( union vfp_state ) ) ;
2005-04-16 15:20:36 -07:00
2007-07-18 09:37:10 +01:00
vfp - > hard . fpexc = FPEXC_EN ;
2006-06-21 13:31:52 +01:00
vfp - > hard . fpscr = FPSCR_ROUND_NEAREST ;
2005-04-16 15:20:36 -07:00
2006-06-21 13:31:52 +01:00
/*
* Disable VFP to ensure we initialise it first .
*/
2007-07-18 09:37:10 +01:00
fmxr ( FPEXC , fmrx ( FPEXC ) & ~ FPEXC_EN ) ;
2006-06-21 13:31:52 +01:00
}
2006-08-27 12:38:34 +01:00
/* flush and release case: Per-thread VFP cleanup. */
2007-01-24 18:47:08 +01:00
if ( last_VFP_context [ cpu ] = = vfp )
last_VFP_context [ cpu ] = NULL ;
2006-08-27 12:38:34 +01:00
2006-06-21 13:31:52 +01:00
return NOTIFY_DONE ;
2005-04-16 15:20:36 -07:00
}
2006-06-21 13:31:52 +01:00
static struct notifier_block vfp_notifier_block = {
. notifier_call = vfp_notifier ,
} ;
2005-04-16 15:20:36 -07:00
/*
* Raise a SIGFPE for the current process .
* sicode describes the signal being raised .
*/
void vfp_raise_sigfpe ( unsigned int sicode , struct pt_regs * regs )
{
siginfo_t info ;
memset ( & info , 0 , sizeof ( info ) ) ;
info . si_signo = SIGFPE ;
info . si_code = sicode ;
2006-10-11 17:22:44 +01:00
info . si_addr = ( void __user * ) ( instruction_pointer ( regs ) - 4 ) ;
2005-04-16 15:20:36 -07:00
/*
* This is the same as NWFPE , because it ' s not clear what
* this is used for
*/
current - > thread . error_code = 0 ;
current - > thread . trap_no = 6 ;
2005-06-29 23:02:02 +01:00
send_sig_info ( SIGFPE , & info , current ) ;
2005-04-16 15:20:36 -07:00
}
2007-11-22 18:32:01 +01:00
static void vfp_panic ( char * reason , u32 inst )
2005-04-16 15:20:36 -07:00
{
int i ;
printk ( KERN_ERR " VFP: Error: %s \n " , reason ) ;
printk ( KERN_ERR " VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x \n " ,
2007-11-22 18:32:01 +01:00
fmrx ( FPEXC ) , fmrx ( FPSCR ) , inst ) ;
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < 32 ; i + = 2 )
printk ( KERN_ERR " VFP: s%2u: 0x%08x s%2u: 0x%08x \n " ,
i , vfp_get_float ( i ) , i + 1 , vfp_get_float ( i + 1 ) ) ;
}
/*
* Process bitmask of exception conditions .
*/
static void vfp_raise_exceptions ( u32 exceptions , u32 inst , u32 fpscr , struct pt_regs * regs )
{
int si_code = 0 ;
pr_debug ( " VFP: raising exceptions %08x \n " , exceptions ) ;
2006-08-27 12:42:08 +01:00
if ( exceptions = = VFP_EXCEPTION_ERROR ) {
2007-11-22 18:32:01 +01:00
vfp_panic ( " unhandled bounce " , inst ) ;
2005-04-16 15:20:36 -07:00
vfp_raise_sigfpe ( 0 , regs ) ;
return ;
}
/*
2007-11-22 18:32:01 +01:00
* Update the FPSCR with the additional exception flags .
2005-04-16 15:20:36 -07:00
* Comparison instructions always return at least one of
* these flags set .
*/
fpscr | = exceptions ;
fmxr ( FPSCR , fpscr ) ;
# define RAISE(stat,en,sig) \
if ( exceptions & stat & & fpscr & en ) \
si_code = sig ;
/*
* These are arranged in priority order , least to highest .
*/
2006-10-23 11:19:40 +01:00
RAISE ( FPSCR_DZC , FPSCR_DZE , FPE_FLTDIV ) ;
2005-04-16 15:20:36 -07:00
RAISE ( FPSCR_IXC , FPSCR_IXE , FPE_FLTRES ) ;
RAISE ( FPSCR_UFC , FPSCR_UFE , FPE_FLTUND ) ;
RAISE ( FPSCR_OFC , FPSCR_OFE , FPE_FLTOVF ) ;
RAISE ( FPSCR_IOC , FPSCR_IOE , FPE_FLTINV ) ;
if ( si_code )
vfp_raise_sigfpe ( si_code , regs ) ;
}
/*
* Emulate a VFP instruction .
*/
static u32 vfp_emulate_instruction ( u32 inst , u32 fpscr , struct pt_regs * regs )
{
2006-08-27 12:42:08 +01:00
u32 exceptions = VFP_EXCEPTION_ERROR ;
2005-04-16 15:20:36 -07:00
pr_debug ( " VFP: emulate: INST=0x%08x SCR=0x%08x \n " , inst , fpscr ) ;
if ( INST_CPRTDO ( inst ) ) {
if ( ! INST_CPRT ( inst ) ) {
/*
* CPDO
*/
if ( vfp_single ( inst ) ) {
exceptions = vfp_single_cpdo ( inst , fpscr ) ;
} else {
exceptions = vfp_double_cpdo ( inst , fpscr ) ;
}
} else {
/*
* A CPRT instruction can not appear in FPINST2 , nor
* can it cause an exception . Therefore , we do not
* have to emulate it .
*/
}
} else {
/*
* A CPDT instruction can not appear in FPINST2 , nor can
* it cause an exception . Therefore , we do not have to
* emulate it .
*/
}
2006-04-25 20:41:27 +01:00
return exceptions & ~ VFP_NAN_FLAG ;
2005-04-16 15:20:36 -07:00
}
/*
* Package up a bounce condition .
*/
2007-11-22 18:32:01 +01:00
void VFP_bounce ( u32 trigger , u32 fpexc , struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
2007-11-22 18:32:01 +01:00
u32 fpscr , orig_fpscr , fpsid , exceptions ;
2005-04-16 15:20:36 -07:00
pr_debug ( " VFP: bounce: trigger %08x fpexc %08x \n " , trigger , fpexc ) ;
/*
2007-11-22 18:32:01 +01:00
* At this point , FPEXC can have the following configuration :
*
* EX DEX IXE
* 0 1 x - synchronous exception
* 1 x 0 - asynchronous exception
* 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
* 0 0 1 - synchronous on VFP9 ( non - standard subarch 1
* implementation ) , undefined otherwise
*
* Clear various bits and enable access to the VFP so we can
* handle the bounce .
2005-04-16 15:20:36 -07:00
*/
2007-11-22 18:32:01 +01:00
fmxr ( FPEXC , fpexc & ~ ( FPEXC_EX | FPEXC_DEX | FPEXC_FP2V | FPEXC_VV | FPEXC_TRAP_MASK ) ) ;
2005-04-16 15:20:36 -07:00
2007-11-22 18:32:01 +01:00
fpsid = fmrx ( FPSID ) ;
2005-04-16 15:20:36 -07:00
orig_fpscr = fpscr = fmrx ( FPSCR ) ;
/*
2007-11-22 18:32:01 +01:00
* Check for the special VFP subarch 1 and FPSCR . IXE bit case
2005-04-16 15:20:36 -07:00
*/
2007-11-22 18:32:01 +01:00
if ( ( fpsid & FPSID_ARCH_MASK ) = = ( 1 < < FPSID_ARCH_BIT )
& & ( fpscr & FPSCR_IXE ) ) {
/*
* Synchronous exception , emulate the trigger instruction
*/
2005-04-16 15:20:36 -07:00
goto emulate ;
}
2007-11-22 18:32:01 +01:00
if ( fpexc & FPEXC_EX ) {
/*
* Asynchronous exception . The instruction is read from FPINST
* and the interrupted instruction has to be restarted .
*/
trigger = fmrx ( FPINST ) ;
regs - > ARM_pc - = 4 ;
} else if ( ! ( fpexc & FPEXC_DEX ) ) {
/*
* Illegal combination of bits . It can be caused by an
* unallocated VFP instruction but with FPSCR . IXE set and not
* on VFP subarch 1.
*/
vfp_raise_exceptions ( VFP_EXCEPTION_ERROR , trigger , fpscr , regs ) ;
return ;
}
2005-04-16 15:20:36 -07:00
/*
2007-11-22 18:32:01 +01:00
* Modify fpscr to indicate the number of iterations remaining .
* If FPEXC . EX is 0 , FPEXC . DEX is 1 and the FPEXC . VV bit indicates
* whether FPEXC . VECITR or FPSCR . LEN is used .
2005-04-16 15:20:36 -07:00
*/
2007-11-22 18:32:01 +01:00
if ( fpexc & ( FPEXC_EX | FPEXC_VV ) ) {
2005-04-16 15:20:36 -07:00
u32 len ;
len = fpexc + ( 1 < < FPEXC_LENGTH_BIT ) ;
fpscr & = ~ FPSCR_LENGTH_MASK ;
fpscr | = ( len & FPEXC_LENGTH_MASK ) < < ( FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT ) ;
}
/*
* Handle the first FP instruction . We used to take note of the
* FPEXC bounce reason , but this appears to be unreliable .
* Emulate the bounced instruction instead .
*/
2007-11-22 18:32:01 +01:00
exceptions = vfp_emulate_instruction ( trigger , fpscr , regs ) ;
2005-04-16 15:20:36 -07:00
if ( exceptions )
2007-11-22 18:32:01 +01:00
vfp_raise_exceptions ( exceptions , trigger , orig_fpscr , regs ) ;
2005-04-16 15:20:36 -07:00
/*
2007-11-22 18:32:01 +01:00
* If there isn ' t a second FP instruction , exit now . Note that
* the FPEXC . FP2V bit is valid only if FPEXC . EX is 1.
2005-04-16 15:20:36 -07:00
*/
2007-11-22 18:32:01 +01:00
if ( fpexc ^ ( FPEXC_EX | FPEXC_FP2V ) )
2005-04-16 15:20:36 -07:00
return ;
/*
* The barrier ( ) here prevents fpinst2 being read
* before the condition above .
*/
barrier ( ) ;
trigger = fmrx ( FPINST2 ) ;
emulate :
2007-11-22 18:32:01 +01:00
exceptions = vfp_emulate_instruction ( trigger , orig_fpscr , regs ) ;
2005-04-16 15:20:36 -07:00
if ( exceptions )
vfp_raise_exceptions ( exceptions , trigger , orig_fpscr , regs ) ;
}
2006-12-08 15:22:20 +00:00
2007-01-02 23:40:30 +00:00
static void vfp_enable ( void * unused )
{
u32 access = get_copro_access ( ) ;
/*
* Enable full access to VFP ( cp10 and cp11 )
*/
set_copro_access ( access | CPACC_FULL ( 10 ) | CPACC_FULL ( 11 ) ) ;
}
# include <linux/smp.h>
2005-04-16 15:20:36 -07:00
/*
* VFP support code initialisation .
*/
static int __init vfp_init ( void )
{
unsigned int vfpsid ;
2006-12-08 15:22:20 +00:00
unsigned int cpu_arch = cpu_architecture ( ) ;
2007-11-22 18:32:01 +01:00
if ( cpu_arch > = CPU_ARCH_ARMv6 )
vfp_enable ( NULL ) ;
2005-04-16 15:20:36 -07:00
/*
* First check that there is a VFP that we can use .
* The handler is already setup to just log calls , so
* we just need to read the VFPSID register .
*/
2007-06-10 12:22:20 +01:00
vfp_vector = vfp_testing_entry ;
2007-09-09 14:24:59 +01:00
barrier ( ) ;
2005-04-16 15:20:36 -07:00
vfpsid = fmrx ( FPSID ) ;
2007-01-02 23:40:30 +00:00
barrier ( ) ;
2007-06-10 12:22:20 +01:00
vfp_vector = vfp_null_entry ;
2005-04-16 15:20:36 -07:00
printk ( KERN_INFO " VFP support v0.3: " ) ;
2007-11-22 18:32:01 +01:00
if ( VFP_arch )
2005-04-16 15:20:36 -07:00
printk ( " not present \n " ) ;
2007-11-22 18:32:01 +01:00
else if ( vfpsid & FPSID_NODOUBLE ) {
2005-04-16 15:20:36 -07:00
printk ( " no double precision support \n " ) ;
} else {
2008-06-06 11:18:06 +02:00
smp_call_function ( vfp_enable , NULL , 1 ) ;
2007-01-02 23:40:30 +00:00
2005-04-16 15:20:36 -07:00
VFP_arch = ( vfpsid & FPSID_ARCH_MASK ) > > FPSID_ARCH_BIT ; /* Extract the architecture version */
printk ( " implementor %02x architecture %d part %02x variant %x rev %x \n " ,
( vfpsid & FPSID_IMPLEMENTER_MASK ) > > FPSID_IMPLEMENTER_BIT ,
( vfpsid & FPSID_ARCH_MASK ) > > FPSID_ARCH_BIT ,
( vfpsid & FPSID_PART_MASK ) > > FPSID_PART_BIT ,
( vfpsid & FPSID_VARIANT_MASK ) > > FPSID_VARIANT_BIT ,
( vfpsid & FPSID_REV_MASK ) > > FPSID_REV_BIT ) ;
2006-12-08 15:22:20 +00:00
2005-04-16 15:20:36 -07:00
vfp_vector = vfp_support_entry ;
2006-06-21 13:31:52 +01:00
thread_register_notifier ( & vfp_notifier_block ) ;
2006-12-08 15:22:20 +00:00
/*
* We detected VFP , and the support code is
* in place ; report VFP support to userspace .
*/
elf_hwcap | = HWCAP_VFP ;
2005-04-16 15:20:36 -07:00
}
return 0 ;
}
late_initcall ( vfp_init ) ;