2007-07-03 14:37:43 +01:00
/*
2014-05-23 16:29:44 +02:00
* General MIPS MT support routines , usable in AP / SP and SMVP .
2007-07-03 14:37:43 +01:00
* Copyright ( C ) 2005 Mips Technologies , Inc
*/
# include <linux/cpu.h>
2010-05-29 03:19:57 +01:00
# include <linux/cpuset.h>
2007-07-03 14:37:43 +01:00
# include <linux/cpumask.h>
# include <linux/delay.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/security.h>
# include <linux/types.h>
# include <asm/uaccess.h>
/*
* CPU mask used to set process affinity for MT VPEs / TCs with FPUs
*/
cpumask_t mt_fpu_cpumask ;
static int fpaff_threshold = - 1 ;
2009-09-17 02:25:07 +02:00
unsigned long mt_fpemul_threshold ;
2007-07-03 14:37:43 +01:00
/*
* Replacement functions for the sys_sched_setaffinity ( ) and
* sys_sched_getaffinity ( ) system calls , so that we can integrate
* FPU affinity with the user ' s requested processor affinity .
* This code is 98 % identical with the sys_sched_setaffinity ( )
* and sys_sched_getaffinity ( ) system calls , and should be
2013-06-04 13:10:24 +05:30
* updated when kernel / sched / core . c changes .
2007-07-03 14:37:43 +01:00
*/
/*
* find_process_by_pid - find a process with a matching PID value .
2013-06-04 13:10:24 +05:30
* used in sys_sched_set / getaffinity ( ) in kernel / sched / core . c , so
2007-07-03 14:37:43 +01:00
* cloned here .
*/
static inline struct task_struct * find_process_by_pid ( pid_t pid )
{
2008-02-04 23:44:24 -08:00
return pid ? find_task_by_vpid ( pid ) : current ;
2007-07-03 14:37:43 +01:00
}
2010-05-29 03:19:57 +01:00
/*
* check the target process has a UID that matches the current process ' s
*/
static bool check_same_owner ( struct task_struct * p )
{
const struct cred * cred = current_cred ( ) , * pcred ;
bool match ;
rcu_read_lock ( ) ;
pcred = __task_cred ( p ) ;
2012-12-10 15:56:44 +01:00
match = ( uid_eq ( cred - > euid , pcred - > euid ) | |
uid_eq ( cred - > euid , pcred - > uid ) ) ;
2010-05-29 03:19:57 +01:00
rcu_read_unlock ( ) ;
return match ;
}
2007-07-03 14:37:43 +01:00
/*
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_setaffinity ( pid_t pid , unsigned int len ,
unsigned long __user * user_mask_ptr )
{
2010-05-29 03:19:57 +01:00
cpumask_var_t cpus_allowed , new_mask , effective_mask ;
2007-07-25 16:19:33 +01:00
struct thread_info * ti ;
2010-05-29 03:19:57 +01:00
struct task_struct * p ;
int retval ;
2007-07-03 14:37:43 +01:00
if ( len < sizeof ( new_mask ) )
return - EINVAL ;
if ( copy_from_user ( & new_mask , user_mask_ptr , sizeof ( new_mask ) ) )
return - EFAULT ;
2008-01-25 21:08:02 +01:00
get_online_cpus ( ) ;
2010-05-29 03:19:57 +01:00
rcu_read_lock ( ) ;
2007-07-03 14:37:43 +01:00
p = find_process_by_pid ( pid ) ;
if ( ! p ) {
2010-05-29 03:19:57 +01:00
rcu_read_unlock ( ) ;
2008-01-25 21:08:02 +01:00
put_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
return - ESRCH ;
}
2010-05-29 03:19:57 +01:00
/* Prevent p going away */
2007-07-03 14:37:43 +01:00
get_task_struct ( p ) ;
2010-05-29 03:19:57 +01:00
rcu_read_unlock ( ) ;
2007-07-03 14:37:43 +01:00
2010-05-29 03:19:57 +01:00
if ( ! alloc_cpumask_var ( & cpus_allowed , GFP_KERNEL ) ) {
retval = - ENOMEM ;
goto out_put_task ;
}
if ( ! alloc_cpumask_var ( & new_mask , GFP_KERNEL ) ) {
retval = - ENOMEM ;
goto out_free_cpus_allowed ;
}
if ( ! alloc_cpumask_var ( & effective_mask , GFP_KERNEL ) ) {
retval = - ENOMEM ;
goto out_free_new_mask ;
}
2007-07-03 14:37:43 +01:00
retval = - EPERM ;
2010-05-29 03:19:57 +01:00
if ( ! check_same_owner ( p ) & & ! capable ( CAP_SYS_NICE ) )
2007-07-03 14:37:43 +01:00
goto out_unlock ;
2010-10-24 22:23:50 +01:00
retval = security_task_setscheduler ( p ) ;
2007-07-03 14:37:43 +01:00
if ( retval )
goto out_unlock ;
/* Record new user-specified CPU set for future reference */
2010-05-29 03:19:57 +01:00
cpumask_copy ( & p - > thread . user_cpus_allowed , new_mask ) ;
2007-07-03 14:37:43 +01:00
2010-05-29 03:19:57 +01:00
again :
2007-07-03 14:37:43 +01:00
/* Compute new global allowed CPU set if necessary */
2007-07-25 16:19:33 +01:00
ti = task_thread_info ( p ) ;
if ( test_ti_thread_flag ( ti , TIF_FPUBOUND ) & &
2015-03-05 10:49:17 +10:30
cpumask_intersects ( new_mask , & mt_fpu_cpumask ) ) {
cpumask_and ( effective_mask , new_mask , & mt_fpu_cpumask ) ;
2010-05-29 03:19:57 +01:00
retval = set_cpus_allowed_ptr ( p , effective_mask ) ;
2007-07-03 14:37:43 +01:00
} else {
2010-05-29 03:19:57 +01:00
cpumask_copy ( effective_mask , new_mask ) ;
2007-07-25 16:19:33 +01:00
clear_ti_thread_flag ( ti , TIF_FPUBOUND ) ;
2010-05-29 03:19:57 +01:00
retval = set_cpus_allowed_ptr ( p , new_mask ) ;
2007-07-03 14:37:43 +01:00
}
2010-05-29 03:19:57 +01:00
if ( ! retval ) {
cpuset_cpus_allowed ( p , cpus_allowed ) ;
if ( ! cpumask_subset ( effective_mask , cpus_allowed ) ) {
/*
* We must have raced with a concurrent cpuset
* update . Just reset the cpus_allowed to the
* cpuset ' s cpus_allowed
*/
cpumask_copy ( new_mask , cpus_allowed ) ;
goto again ;
}
}
2007-07-03 14:37:43 +01:00
out_unlock :
2010-05-29 03:19:57 +01:00
free_cpumask_var ( effective_mask ) ;
out_free_new_mask :
free_cpumask_var ( new_mask ) ;
out_free_cpus_allowed :
free_cpumask_var ( cpus_allowed ) ;
out_put_task :
2007-07-03 14:37:43 +01:00
put_task_struct ( p ) ;
2008-01-25 21:08:02 +01:00
put_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
return retval ;
}
/*
* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_getaffinity ( pid_t pid , unsigned int len ,
unsigned long __user * user_mask_ptr )
{
unsigned int real_len ;
cpumask_t mask ;
int retval ;
struct task_struct * p ;
real_len = sizeof ( mask ) ;
if ( len < real_len )
return - EINVAL ;
2008-01-25 21:08:02 +01:00
get_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
read_lock ( & tasklist_lock ) ;
retval = - ESRCH ;
p = find_process_by_pid ( pid ) ;
if ( ! p )
goto out_unlock ;
retval = security_task_getscheduler ( p ) ;
if ( retval )
goto out_unlock ;
2012-03-29 15:38:30 +10:30
cpumask_and ( & mask , & p - > thread . user_cpus_allowed , cpu_possible_mask ) ;
2007-07-03 14:37:43 +01:00
out_unlock :
read_unlock ( & tasklist_lock ) ;
2008-01-25 21:08:02 +01:00
put_online_cpus ( ) ;
2007-07-03 14:37:43 +01:00
if ( retval )
return retval ;
if ( copy_to_user ( user_mask_ptr , & mask , real_len ) )
return - EFAULT ;
return real_len ;
}
static int __init fpaff_thresh ( char * str )
{
get_option ( & str , & fpaff_threshold ) ;
return 1 ;
}
__setup ( " fpaff= " , fpaff_thresh ) ;
/*
* FPU Use Factor empirically derived from experiments on 34 K
*/
2008-09-09 21:33:36 +02:00
# define FPUSEFACTOR 2000
2007-07-03 14:37:43 +01:00
static __init int mt_fp_affinity_init ( void )
{
if ( fpaff_threshold > = 0 ) {
mt_fpemul_threshold = fpaff_threshold ;
} else {
mt_fpemul_threshold =
( FPUSEFACTOR * ( loops_per_jiffy / ( 500000 / HZ ) ) ) / HZ ;
}
printk ( KERN_DEBUG " FPU Affinity set after %ld emulations \n " ,
mt_fpemul_threshold ) ;
return 0 ;
}
arch_initcall ( mt_fp_affinity_init ) ;