2007-07-03 14:37:43 +01:00
/*
* General MIPS MT support routines , usable in AP / SP , SMVP , or SMTC kernels
* Copyright ( C ) 2005 Mips Technologies , Inc
*/
# include <linux/cpu.h>
# include <linux/cpumask.h>
# include <linux/delay.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/security.h>
# include <linux/types.h>
# include <asm/uaccess.h>
/*
* CPU mask used to set process affinity for MT VPEs / TCs with FPUs
*/
cpumask_t mt_fpu_cpumask ;
static int fpaff_threshold = - 1 ;
2009-09-17 02:25:07 +02:00
unsigned long mt_fpemul_threshold ;
2007-07-03 14:37:43 +01:00
/*
* Replacement functions for the sys_sched_setaffinity ( ) and
* sys_sched_getaffinity ( ) system calls , so that we can integrate
* FPU affinity with the user ' s requested processor affinity .
* This code is 98 % identical with the sys_sched_setaffinity ( )
* and sys_sched_getaffinity ( ) system calls , and should be
* updated when kernel / sched . c changes .
*/
/*
* find_process_by_pid - find a process with a matching PID value .
* used in sys_sched_set / getaffinity ( ) in kernel / sched . c , so
* cloned here .
*/
static inline struct task_struct * find_process_by_pid ( pid_t pid )
{
2008-02-04 23:44:24 -08:00
return pid ? find_task_by_vpid ( pid ) : current ;
2007-07-03 14:37:43 +01:00
}
/*
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_setaffinity ( pid_t pid , unsigned int len ,
unsigned long __user * user_mask_ptr )
{
cpumask_t new_mask ;
cpumask_t effective_mask ;
int retval ;
struct task_struct * p ;
2007-07-25 16:19:33 +01:00
struct thread_info * ti ;
2008-11-14 10:38:37 +11:00
uid_t euid ;
2007-07-03 14:37:43 +01:00
if ( len < sizeof ( new_mask ) )
return - EINVAL ;
if ( copy_from_user ( & new_mask , user_mask_ptr , sizeof ( new_mask ) ) )
return - EFAULT ;
2008-01-25 21:08:02 +01:00
get_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
read_lock ( & tasklist_lock ) ;
p = find_process_by_pid ( pid ) ;
if ( ! p ) {
read_unlock ( & tasklist_lock ) ;
2008-01-25 21:08:02 +01:00
put_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
return - ESRCH ;
}
/*
* It is not safe to call set_cpus_allowed with the
* tasklist_lock held . We will bump the task_struct ' s
* usage count and drop tasklist_lock before invoking
* set_cpus_allowed .
*/
get_task_struct ( p ) ;
2008-11-14 10:38:37 +11:00
euid = current_euid ( ) ;
2007-07-03 14:37:43 +01:00
retval = - EPERM ;
2009-01-11 18:27:10 +00:00
if ( euid ! = p - > cred - > euid & & euid ! = p - > cred - > uid & &
! capable ( CAP_SYS_NICE ) ) {
2007-07-03 14:37:43 +01:00
read_unlock ( & tasklist_lock ) ;
goto out_unlock ;
}
retval = security_task_setscheduler ( p , 0 , NULL ) ;
if ( retval )
goto out_unlock ;
/* Record new user-specified CPU set for future reference */
p - > thread . user_cpus_allowed = new_mask ;
/* Unlock the task list */
read_unlock ( & tasklist_lock ) ;
/* Compute new global allowed CPU set if necessary */
2007-07-25 16:19:33 +01:00
ti = task_thread_info ( p ) ;
if ( test_ti_thread_flag ( ti , TIF_FPUBOUND ) & &
cpus_intersects ( new_mask , mt_fpu_cpumask ) ) {
2007-07-03 14:37:43 +01:00
cpus_and ( effective_mask , new_mask , mt_fpu_cpumask ) ;
2010-03-26 23:03:07 +01:00
retval = set_cpus_allowed_ptr ( p , & effective_mask ) ;
2007-07-03 14:37:43 +01:00
} else {
2007-07-25 16:19:33 +01:00
clear_ti_thread_flag ( ti , TIF_FPUBOUND ) ;
2010-03-26 23:03:07 +01:00
retval = set_cpus_allowed_ptr ( p , & new_mask ) ;
2007-07-03 14:37:43 +01:00
}
out_unlock :
put_task_struct ( p ) ;
2008-01-25 21:08:02 +01:00
put_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
return retval ;
}
/*
* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_getaffinity ( pid_t pid , unsigned int len ,
unsigned long __user * user_mask_ptr )
{
unsigned int real_len ;
cpumask_t mask ;
int retval ;
struct task_struct * p ;
real_len = sizeof ( mask ) ;
if ( len < real_len )
return - EINVAL ;
2008-01-25 21:08:02 +01:00
get_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
read_lock ( & tasklist_lock ) ;
retval = - ESRCH ;
p = find_process_by_pid ( pid ) ;
if ( ! p )
goto out_unlock ;
retval = security_task_getscheduler ( p ) ;
if ( retval )
goto out_unlock ;
cpus_and ( mask , p - > thread . user_cpus_allowed , cpu_possible_map ) ;
out_unlock :
read_unlock ( & tasklist_lock ) ;
2008-01-25 21:08:02 +01:00
put_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
if ( retval )
return retval ;
if ( copy_to_user ( user_mask_ptr , & mask , real_len ) )
return - EFAULT ;
return real_len ;
}
static int __init fpaff_thresh ( char * str )
{
get_option ( & str , & fpaff_threshold ) ;
return 1 ;
}
__setup ( " fpaff= " , fpaff_thresh ) ;
/*
* FPU Use Factor empirically derived from experiments on 34 K
*/
2008-09-09 21:33:36 +02:00
# define FPUSEFACTOR 2000
2007-07-03 14:37:43 +01:00
static __init int mt_fp_affinity_init ( void )
{
if ( fpaff_threshold > = 0 ) {
mt_fpemul_threshold = fpaff_threshold ;
} else {
mt_fpemul_threshold =
( FPUSEFACTOR * ( loops_per_jiffy / ( 500000 / HZ ) ) ) / HZ ;
}
printk ( KERN_DEBUG " FPU Affinity set after %ld emulations \n " ,
mt_fpemul_threshold ) ;
return 0 ;
}
arch_initcall ( mt_fp_affinity_init ) ;