2016-10-28 15:04:42 -07:00
/*
* User interface for Resource Alloction in Resource Director Technology ( RDT )
*
* Copyright ( C ) 2016 Intel Corporation
*
* Author : Fenghua Yu < fenghua . yu @ intel . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* More information about RDT be found in the Intel ( R ) x86 Architecture
* Software Developer Manual .
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2018-06-22 15:42:06 -07:00
# include <linux/cacheinfo.h>
2016-10-28 15:04:45 -07:00
# include <linux/cpu.h>
2018-06-22 15:42:25 -07:00
# include <linux/debugfs.h>
2016-10-28 15:04:42 -07:00
# include <linux/fs.h>
# include <linux/sysfs.h>
# include <linux/kernfs.h>
2017-09-25 16:39:33 -07:00
# include <linux/seq_buf.h>
2016-10-28 15:04:43 -07:00
# include <linux/seq_file.h>
2017-02-08 18:51:30 +01:00
# include <linux/sched/signal.h>
2017-02-08 18:51:36 +01:00
# include <linux/sched/task.h>
2016-10-28 15:04:42 -07:00
# include <linux/slab.h>
2016-10-28 15:04:46 -07:00
# include <linux/task_work.h>
2016-10-28 15:04:42 -07:00
# include <uapi/linux/magic.h>
2017-07-25 14:14:23 -07:00
# include <asm/intel_rdt_sched.h>
# include "intel_rdt.h"
2016-10-28 15:04:42 -07:00
2017-07-25 14:14:41 -07:00
DEFINE_STATIC_KEY_FALSE ( rdt_enable_key ) ;
DEFINE_STATIC_KEY_FALSE ( rdt_mon_enable_key ) ;
2017-07-25 14:14:25 -07:00
DEFINE_STATIC_KEY_FALSE ( rdt_alloc_enable_key ) ;
2017-07-25 14:14:24 -07:00
static struct kernfs_root * rdt_root ;
2016-10-28 15:04:42 -07:00
struct rdtgroup rdtgroup_default ;
LIST_HEAD ( rdt_all_groups ) ;
2016-10-28 15:04:43 -07:00
/* Kernel fs node for "info" directory under root */
static struct kernfs_node * kn_info ;
2017-07-25 14:14:41 -07:00
/* Kernel fs node for "mon_groups" directory under root */
static struct kernfs_node * kn_mongrp ;
/* Kernel fs node for "mon_data" directory under root */
static struct kernfs_node * kn_mondata ;
2017-09-25 16:39:33 -07:00
static struct seq_buf last_cmd_status ;
static char last_cmd_status_buf [ 512 ] ;
2018-06-22 15:42:25 -07:00
struct dentry * debugfs_resctrl ;
2017-09-25 16:39:33 -07:00
void rdt_last_cmd_clear ( void )
{
lockdep_assert_held ( & rdtgroup_mutex ) ;
seq_buf_clear ( & last_cmd_status ) ;
}
void rdt_last_cmd_puts ( const char * s )
{
lockdep_assert_held ( & rdtgroup_mutex ) ;
seq_buf_puts ( & last_cmd_status , s ) ;
}
void rdt_last_cmd_printf ( const char * fmt , . . . )
{
va_list ap ;
va_start ( ap , fmt ) ;
lockdep_assert_held ( & rdtgroup_mutex ) ;
seq_buf_vprintf ( & last_cmd_status , fmt , ap ) ;
va_end ( ap ) ;
}
2016-10-28 15:04:44 -07:00
/*
* Trivial allocator for CLOSIDs . Since h / w only supports a small number ,
* we can keep a bitmap of free CLOSIDs in a single integer .
*
* Using a global CLOSID across all resources has some advantages and
* some drawbacks :
* + We can simply set " current->closid " to assign a task to a resource
* group .
* + Context switch code can avoid extra memory references deciding which
* CLOSID to load into the PQR_ASSOC MSR
* - We give up some options in configuring resource groups across multi - socket
* systems .
* - Our choices on how to configure each resource become progressively more
* limited as the number of resources grows .
*/
static int closid_free_map ;
2018-09-15 14:58:21 -07:00
static int closid_free_map_len ;
int closids_supported ( void )
{
return closid_free_map_len ;
}
2016-10-28 15:04:44 -07:00
static void closid_init ( void )
{
struct rdt_resource * r ;
int rdt_min_closid = 32 ;
/* Compute rdt_min_closid across all resources */
2017-07-25 14:14:25 -07:00
for_each_alloc_enabled_rdt_resource ( r )
2016-10-28 15:04:44 -07:00
rdt_min_closid = min ( rdt_min_closid , r - > num_closid ) ;
closid_free_map = BIT_MASK ( rdt_min_closid ) - 1 ;
/* CLOSID 0 is always reserved for the default group */
closid_free_map & = ~ 1 ;
2018-09-15 14:58:21 -07:00
closid_free_map_len = rdt_min_closid ;
2016-10-28 15:04:44 -07:00
}
2017-07-25 14:14:24 -07:00
static int closid_alloc ( void )
2016-10-28 15:04:44 -07:00
{
2017-07-25 14:14:33 -07:00
u32 closid = ffs ( closid_free_map ) ;
2016-10-28 15:04:44 -07:00
if ( closid = = 0 )
return - ENOSPC ;
closid - - ;
closid_free_map & = ~ ( 1 < < closid ) ;
return closid ;
}
2018-06-22 15:41:58 -07:00
void closid_free ( int closid )
2016-10-28 15:04:44 -07:00
{
closid_free_map | = 1 < < closid ;
}
2018-06-22 15:41:57 -07:00
/**
* closid_allocated - test if provided closid is in use
* @ closid : closid to be tested
*
* Return : true if @ closid is currently associated with a resource group ,
* false if @ closid is free
*/
2018-06-22 15:41:59 -07:00
static bool closid_allocated ( unsigned int closid )
2018-06-22 15:41:57 -07:00
{
return ( closid_free_map & ( 1 < < closid ) ) = = 0 ;
}
2018-06-22 15:41:55 -07:00
/**
* rdtgroup_mode_by_closid - Return mode of resource group with closid
* @ closid : closid if the resource group
*
* Each resource group is associated with a @ closid . Here the mode
* of a resource group can be queried by searching for it using its closid .
*
* Return : mode as & enum rdtgrp_mode of resource group with closid @ closid
*/
enum rdtgrp_mode rdtgroup_mode_by_closid ( int closid )
{
struct rdtgroup * rdtgrp ;
list_for_each_entry ( rdtgrp , & rdt_all_groups , rdtgroup_list ) {
if ( rdtgrp - > closid = = closid )
return rdtgrp - > mode ;
}
return RDT_NUM_MODES ;
}
2018-06-22 15:41:56 -07:00
static const char * const rdt_mode_str [ ] = {
2018-06-22 15:42:08 -07:00
[ RDT_MODE_SHAREABLE ] = " shareable " ,
[ RDT_MODE_EXCLUSIVE ] = " exclusive " ,
[ RDT_MODE_PSEUDO_LOCKSETUP ] = " pseudo-locksetup " ,
[ RDT_MODE_PSEUDO_LOCKED ] = " pseudo-locked " ,
2018-06-22 15:41:56 -07:00
} ;
/**
* rdtgroup_mode_str - Return the string representation of mode
* @ mode : the resource group mode as & enum rdtgroup_mode
*
* Return : string representation of valid mode , " unknown " otherwise
*/
static const char * rdtgroup_mode_str ( enum rdtgrp_mode mode )
{
if ( mode < RDT_MODE_SHAREABLE | | mode > = RDT_NUM_MODES )
return " unknown " ;
return rdt_mode_str [ mode ] ;
}
2016-10-28 15:04:43 -07:00
/* set uid and gid of rdtgroup dirs and files to that of the creator */
static int rdtgroup_kn_set_ugid ( struct kernfs_node * kn )
{
struct iattr iattr = { . ia_valid = ATTR_UID | ATTR_GID ,
. ia_uid = current_fsuid ( ) ,
. ia_gid = current_fsgid ( ) , } ;
if ( uid_eq ( iattr . ia_uid , GLOBAL_ROOT_UID ) & &
gid_eq ( iattr . ia_gid , GLOBAL_ROOT_GID ) )
return 0 ;
return kernfs_setattr ( kn , & iattr ) ;
}
static int rdtgroup_add_file ( struct kernfs_node * parent_kn , struct rftype * rft )
{
struct kernfs_node * kn ;
int ret ;
kn = __kernfs_create_file ( parent_kn , rft - > name , rft - > mode ,
2018-07-20 21:56:47 +00:00
GLOBAL_ROOT_UID , GLOBAL_ROOT_GID ,
2016-10-28 15:04:43 -07:00
0 , rft - > kf_ops , rft , NULL , NULL ) ;
if ( IS_ERR ( kn ) )
return PTR_ERR ( kn ) ;
ret = rdtgroup_kn_set_ugid ( kn ) ;
if ( ret ) {
kernfs_remove ( kn ) ;
return ret ;
}
return 0 ;
}
static int rdtgroup_seqfile_show ( struct seq_file * m , void * arg )
{
struct kernfs_open_file * of = m - > private ;
struct rftype * rft = of - > kn - > priv ;
if ( rft - > seq_show )
return rft - > seq_show ( of , m , arg ) ;
return 0 ;
}
static ssize_t rdtgroup_file_write ( struct kernfs_open_file * of , char * buf ,
size_t nbytes , loff_t off )
{
struct rftype * rft = of - > kn - > priv ;
if ( rft - > write )
return rft - > write ( of , buf , nbytes , off ) ;
return - EINVAL ;
}
static struct kernfs_ops rdtgroup_kf_single_ops = {
. atomic_write_len = PAGE_SIZE ,
. write = rdtgroup_file_write ,
. seq_show = rdtgroup_seqfile_show ,
} ;
2017-07-25 14:14:38 -07:00
static struct kernfs_ops kf_mondata_ops = {
. atomic_write_len = PAGE_SIZE ,
. seq_show = rdtgroup_mondata_show ,
} ;
2017-04-10 16:52:32 +02:00
static bool is_cpu_list ( struct kernfs_open_file * of )
{
struct rftype * rft = of - > kn - > priv ;
return rft - > flags & RFTYPE_FLAGS_CPUS_LIST ;
}
2016-10-28 15:04:45 -07:00
static int rdtgroup_cpus_show ( struct kernfs_open_file * of ,
struct seq_file * s , void * v )
{
struct rdtgroup * rdtgrp ;
int ret = 0 ;
rdtgrp = rdtgroup_kn_lock_live ( of - > kn ) ;
2017-04-10 16:52:32 +02:00
if ( rdtgrp ) {
2018-06-30 22:17:33 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKED )
seq_printf ( s , is_cpu_list ( of ) ? " %*pbl \n " : " %*pb \n " ,
cpumask_pr_args ( & rdtgrp - > plr - > d - > cpu_mask ) ) ;
else
seq_printf ( s , is_cpu_list ( of ) ? " %*pbl \n " : " %*pb \n " ,
cpumask_pr_args ( & rdtgrp - > cpu_mask ) ) ;
2017-04-10 16:52:32 +02:00
} else {
2016-10-28 15:04:45 -07:00
ret = - ENOENT ;
2017-04-10 16:52:32 +02:00
}
2016-10-28 15:04:45 -07:00
rdtgroup_kn_unlock ( of - > kn ) ;
return ret ;
}
2016-11-11 17:02:38 -08:00
/*
* This is safe against intel_rdt_sched_in ( ) called from __switch_to ( )
* because __switch_to ( ) is executed with interrupts disabled . A local call
2017-07-25 14:14:36 -07:00
* from update_closid_rmid ( ) is proteced against __switch_to ( ) because
2016-11-11 17:02:38 -08:00
* preemption is disabled .
*/
2017-07-25 14:14:36 -07:00
static void update_cpu_closid_rmid ( void * info )
2016-11-11 17:02:38 -08:00
{
2017-07-25 14:14:35 -07:00
struct rdtgroup * r = info ;
2017-07-25 14:14:36 -07:00
if ( r ) {
2017-08-09 11:46:34 -07:00
this_cpu_write ( pqr_state . default_closid , r - > closid ) ;
this_cpu_write ( pqr_state . default_rmid , r - > mon . rmid ) ;
2017-07-25 14:14:36 -07:00
}
2017-07-25 14:14:35 -07:00
2016-11-11 17:02:38 -08:00
/*
* We cannot unconditionally write the MSR because the current
* executing task might have its own closid selected . Just reuse
* the context switch code .
*/
intel_rdt_sched_in ( ) ;
}
2016-11-18 15:18:04 -08:00
/*
* Update the PGR_ASSOC MSR on all cpus in @ cpu_mask ,
*
2017-07-25 14:14:35 -07:00
* Per task closids / rmids must have been set up before calling this function .
2016-11-18 15:18:04 -08:00
*/
static void
2017-07-25 14:14:36 -07:00
update_closid_rmid ( const struct cpumask * cpu_mask , struct rdtgroup * r )
2016-11-11 17:02:38 -08:00
{
int cpu = get_cpu ( ) ;
if ( cpumask_test_cpu ( cpu , cpu_mask ) )
2017-07-25 14:14:36 -07:00
update_cpu_closid_rmid ( r ) ;
smp_call_function_many ( cpu_mask , update_cpu_closid_rmid , r , 1 ) ;
2016-11-11 17:02:38 -08:00
put_cpu ( ) ;
}
2017-07-25 14:14:36 -07:00
static int cpus_mon_write ( struct rdtgroup * rdtgrp , cpumask_var_t newmask ,
cpumask_var_t tmpmask )
{
struct rdtgroup * prgrp = rdtgrp - > mon . parent , * crgrp ;
struct list_head * head ;
/* Check whether cpus belong to parent ctrl group */
cpumask_andnot ( tmpmask , newmask , & prgrp - > cpu_mask ) ;
2017-09-25 16:39:36 -07:00
if ( cpumask_weight ( tmpmask ) ) {
rdt_last_cmd_puts ( " can only add CPUs to mongroup that belong to parent \n " ) ;
2017-07-25 14:14:36 -07:00
return - EINVAL ;
2017-09-25 16:39:36 -07:00
}
2017-07-25 14:14:36 -07:00
/* Check whether cpus are dropped from this group */
cpumask_andnot ( tmpmask , & rdtgrp - > cpu_mask , newmask ) ;
if ( cpumask_weight ( tmpmask ) ) {
/* Give any dropped cpus to parent rdtgroup */
cpumask_or ( & prgrp - > cpu_mask , & prgrp - > cpu_mask , tmpmask ) ;
update_closid_rmid ( tmpmask , prgrp ) ;
}
/*
* If we added cpus , remove them from previous group that owned them
* and update per - cpu rmid
*/
cpumask_andnot ( tmpmask , newmask , & rdtgrp - > cpu_mask ) ;
if ( cpumask_weight ( tmpmask ) ) {
head = & prgrp - > mon . crdtgrp_list ;
list_for_each_entry ( crgrp , head , mon . crdtgrp_list ) {
if ( crgrp = = rdtgrp )
continue ;
cpumask_andnot ( & crgrp - > cpu_mask , & crgrp - > cpu_mask ,
tmpmask ) ;
}
update_closid_rmid ( tmpmask , rdtgrp ) ;
}
/* Done pushing/pulling - update this group with new mask */
cpumask_copy ( & rdtgrp - > cpu_mask , newmask ) ;
return 0 ;
}
static void cpumask_rdtgrp_clear ( struct rdtgroup * r , struct cpumask * m )
{
struct rdtgroup * crgrp ;
cpumask_andnot ( & r - > cpu_mask , & r - > cpu_mask , m ) ;
/* update the child mon group masks as well*/
list_for_each_entry ( crgrp , & r - > mon . crdtgrp_list , mon . crdtgrp_list )
cpumask_and ( & crgrp - > cpu_mask , & r - > cpu_mask , & crgrp - > cpu_mask ) ;
}
2017-07-25 14:14:35 -07:00
static int cpus_ctrl_write ( struct rdtgroup * rdtgrp , cpumask_var_t newmask ,
2017-07-25 14:14:36 -07:00
cpumask_var_t tmpmask , cpumask_var_t tmpmask1 )
2017-07-25 14:14:35 -07:00
{
2017-07-25 14:14:36 -07:00
struct rdtgroup * r , * crgrp ;
struct list_head * head ;
2017-07-25 14:14:35 -07:00
/* Check whether cpus are dropped from this group */
cpumask_andnot ( tmpmask , & rdtgrp - > cpu_mask , newmask ) ;
if ( cpumask_weight ( tmpmask ) ) {
/* Can't drop from default group */
2017-09-25 16:39:36 -07:00
if ( rdtgrp = = & rdtgroup_default ) {
rdt_last_cmd_puts ( " Can't drop CPUs from default group \n " ) ;
2017-07-25 14:14:35 -07:00
return - EINVAL ;
2017-09-25 16:39:36 -07:00
}
2017-07-25 14:14:35 -07:00
/* Give any dropped cpus to rdtgroup_default */
cpumask_or ( & rdtgroup_default . cpu_mask ,
& rdtgroup_default . cpu_mask , tmpmask ) ;
2017-07-25 14:14:36 -07:00
update_closid_rmid ( tmpmask , & rdtgroup_default ) ;
2017-07-25 14:14:35 -07:00
}
/*
2017-07-25 14:14:36 -07:00
* If we added cpus , remove them from previous group and
* the prev group ' s child groups that owned them
* and update per - cpu closid / rmid .
2017-07-25 14:14:35 -07:00
*/
cpumask_andnot ( tmpmask , newmask , & rdtgrp - > cpu_mask ) ;
if ( cpumask_weight ( tmpmask ) ) {
list_for_each_entry ( r , & rdt_all_groups , rdtgroup_list ) {
if ( r = = rdtgrp )
continue ;
2017-07-25 14:14:36 -07:00
cpumask_and ( tmpmask1 , & r - > cpu_mask , tmpmask ) ;
if ( cpumask_weight ( tmpmask1 ) )
cpumask_rdtgrp_clear ( r , tmpmask1 ) ;
2017-07-25 14:14:35 -07:00
}
2017-07-25 14:14:36 -07:00
update_closid_rmid ( tmpmask , rdtgrp ) ;
2017-07-25 14:14:35 -07:00
}
/* Done pushing/pulling - update this group with new mask */
cpumask_copy ( & rdtgrp - > cpu_mask , newmask ) ;
2017-07-25 14:14:36 -07:00
/*
* Clear child mon group masks since there is a new parent mask
* now and update the rmid for the cpus the child lost .
*/
head = & rdtgrp - > mon . crdtgrp_list ;
list_for_each_entry ( crgrp , head , mon . crdtgrp_list ) {
cpumask_and ( tmpmask , & rdtgrp - > cpu_mask , & crgrp - > cpu_mask ) ;
update_closid_rmid ( tmpmask , rdtgrp ) ;
cpumask_clear ( & crgrp - > cpu_mask ) ;
}
2017-07-25 14:14:35 -07:00
return 0 ;
}
2016-10-28 15:04:45 -07:00
static ssize_t rdtgroup_cpus_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
{
2017-07-25 14:14:36 -07:00
cpumask_var_t tmpmask , newmask , tmpmask1 ;
2017-07-25 14:14:35 -07:00
struct rdtgroup * rdtgrp ;
2016-11-11 17:02:38 -08:00
int ret ;
2016-10-28 15:04:45 -07:00
if ( ! buf )
return - EINVAL ;
if ( ! zalloc_cpumask_var ( & tmpmask , GFP_KERNEL ) )
return - ENOMEM ;
if ( ! zalloc_cpumask_var ( & newmask , GFP_KERNEL ) ) {
free_cpumask_var ( tmpmask ) ;
return - ENOMEM ;
}
2017-07-25 14:14:36 -07:00
if ( ! zalloc_cpumask_var ( & tmpmask1 , GFP_KERNEL ) ) {
free_cpumask_var ( tmpmask ) ;
free_cpumask_var ( newmask ) ;
return - ENOMEM ;
}
2016-11-15 15:12:13 +01:00
2016-10-28 15:04:45 -07:00
rdtgrp = rdtgroup_kn_lock_live ( of - > kn ) ;
2017-09-25 16:39:36 -07:00
rdt_last_cmd_clear ( ) ;
2016-10-28 15:04:45 -07:00
if ( ! rdtgrp ) {
ret = - ENOENT ;
2017-09-25 16:39:36 -07:00
rdt_last_cmd_puts ( " directory was removed \n " ) ;
2016-10-28 15:04:45 -07:00
goto unlock ;
}
2018-06-22 15:42:12 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKED | |
rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP ) {
ret = - EINVAL ;
rdt_last_cmd_puts ( " pseudo-locking in progress \n " ) ;
goto unlock ;
}
2017-04-10 16:52:32 +02:00
if ( is_cpu_list ( of ) )
ret = cpulist_parse ( buf , newmask ) ;
else
ret = cpumask_parse ( buf , newmask ) ;
2017-09-25 16:39:36 -07:00
if ( ret ) {
rdt_last_cmd_puts ( " bad cpu list/mask \n " ) ;
2016-10-28 15:04:45 -07:00
goto unlock ;
2017-09-25 16:39:36 -07:00
}
2016-10-28 15:04:45 -07:00
/* check that user didn't specify any offline cpus */
cpumask_andnot ( tmpmask , newmask , cpu_online_mask ) ;
if ( cpumask_weight ( tmpmask ) ) {
ret = - EINVAL ;
2017-09-25 16:39:36 -07:00
rdt_last_cmd_puts ( " can only assign online cpus \n " ) ;
2016-11-15 15:12:13 +01:00
goto unlock ;
2016-10-28 15:04:45 -07:00
}
2017-07-25 14:14:35 -07:00
if ( rdtgrp - > type = = RDTCTRL_GROUP )
2017-07-25 14:14:36 -07:00
ret = cpus_ctrl_write ( rdtgrp , newmask , tmpmask , tmpmask1 ) ;
else if ( rdtgrp - > type = = RDTMON_GROUP )
ret = cpus_mon_write ( rdtgrp , newmask , tmpmask ) ;
2017-07-25 14:14:35 -07:00
else
ret = - EINVAL ;
2016-10-28 15:04:45 -07:00
unlock :
rdtgroup_kn_unlock ( of - > kn ) ;
free_cpumask_var ( tmpmask ) ;
free_cpumask_var ( newmask ) ;
2017-07-25 14:14:36 -07:00
free_cpumask_var ( tmpmask1 ) ;
2016-10-28 15:04:45 -07:00
return ret ? : nbytes ;
}
2016-10-28 15:04:46 -07:00
struct task_move_callback {
struct callback_head work ;
struct rdtgroup * rdtgrp ;
} ;
static void move_myself ( struct callback_head * head )
{
struct task_move_callback * callback ;
struct rdtgroup * rdtgrp ;
callback = container_of ( head , struct task_move_callback , work ) ;
rdtgrp = callback - > rdtgrp ;
/*
* If resource group was deleted before this task work callback
* was invoked , then assign the task to root group and free the
* resource group .
*/
if ( atomic_dec_and_test ( & rdtgrp - > waitcount ) & &
( rdtgrp - > flags & RDT_DELETED ) ) {
current - > closid = 0 ;
2017-07-25 14:14:34 -07:00
current - > rmid = 0 ;
2016-10-28 15:04:46 -07:00
kfree ( rdtgrp ) ;
}
2016-12-01 12:55:14 -08:00
preempt_disable ( ) ;
2016-10-28 15:04:48 -07:00
/* update PQR_ASSOC MSR to make resource group go into effect */
intel_rdt_sched_in ( ) ;
2016-12-01 12:55:14 -08:00
preempt_enable ( ) ;
2016-10-28 15:04:48 -07:00
2016-10-28 15:04:46 -07:00
kfree ( callback ) ;
}
static int __rdtgroup_move_task ( struct task_struct * tsk ,
struct rdtgroup * rdtgrp )
{
struct task_move_callback * callback ;
int ret ;
callback = kzalloc ( sizeof ( * callback ) , GFP_KERNEL ) ;
if ( ! callback )
return - ENOMEM ;
callback - > work . func = move_myself ;
callback - > rdtgrp = rdtgrp ;
/*
* Take a refcount , so rdtgrp cannot be freed before the
* callback has been invoked .
*/
atomic_inc ( & rdtgrp - > waitcount ) ;
ret = task_work_add ( tsk , & callback - > work , true ) ;
if ( ret ) {
/*
* Task is exiting . Drop the refcount and free the callback .
* No need to check the refcount as the group cannot be
* deleted before the write function unlocks rdtgroup_mutex .
*/
atomic_dec ( & rdtgrp - > waitcount ) ;
kfree ( callback ) ;
2017-09-25 16:39:35 -07:00
rdt_last_cmd_puts ( " task exited \n " ) ;
2016-10-28 15:04:46 -07:00
} else {
2017-07-25 14:14:34 -07:00
/*
* For ctrl_mon groups move both closid and rmid .
* For monitor groups , can move the tasks only from
* their parent CTRL group .
*/
if ( rdtgrp - > type = = RDTCTRL_GROUP ) {
tsk - > closid = rdtgrp - > closid ;
tsk - > rmid = rdtgrp - > mon . rmid ;
} else if ( rdtgrp - > type = = RDTMON_GROUP ) {
2017-09-25 16:39:35 -07:00
if ( rdtgrp - > mon . parent - > closid = = tsk - > closid ) {
2017-07-25 14:14:34 -07:00
tsk - > rmid = rdtgrp - > mon . rmid ;
2017-09-25 16:39:35 -07:00
} else {
rdt_last_cmd_puts ( " Can't move task to different control group \n " ) ;
2017-07-25 14:14:34 -07:00
ret = - EINVAL ;
2017-09-25 16:39:35 -07:00
}
2017-07-25 14:14:34 -07:00
}
2016-10-28 15:04:46 -07:00
}
return ret ;
}
2018-06-22 15:42:10 -07:00
/**
* rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
* @ r : Resource group
*
* Return : 1 if tasks have been assigned to @ r , 0 otherwise
*/
int rdtgroup_tasks_assigned ( struct rdtgroup * r )
{
struct task_struct * p , * t ;
int ret = 0 ;
lockdep_assert_held ( & rdtgroup_mutex ) ;
rcu_read_lock ( ) ;
for_each_process_thread ( p , t ) {
if ( ( r - > type = = RDTCTRL_GROUP & & t - > closid = = r - > closid ) | |
( r - > type = = RDTMON_GROUP & & t - > rmid = = r - > mon . rmid ) ) {
ret = 1 ;
break ;
}
}
rcu_read_unlock ( ) ;
return ret ;
}
2016-10-28 15:04:46 -07:00
static int rdtgroup_task_write_permission ( struct task_struct * task ,
struct kernfs_open_file * of )
{
const struct cred * tcred = get_task_cred ( task ) ;
const struct cred * cred = current_cred ( ) ;
int ret = 0 ;
/*
* Even if we ' re attaching all tasks in the thread group , we only
* need to check permissions on one of them .
*/
if ( ! uid_eq ( cred - > euid , GLOBAL_ROOT_UID ) & &
! uid_eq ( cred - > euid , tcred - > uid ) & &
2017-09-25 16:39:35 -07:00
! uid_eq ( cred - > euid , tcred - > suid ) ) {
rdt_last_cmd_printf ( " No permission to move task %d \n " , task - > pid ) ;
2016-10-28 15:04:46 -07:00
ret = - EPERM ;
2017-09-25 16:39:35 -07:00
}
2016-10-28 15:04:46 -07:00
put_cred ( tcred ) ;
return ret ;
}
static int rdtgroup_move_task ( pid_t pid , struct rdtgroup * rdtgrp ,
struct kernfs_open_file * of )
{
struct task_struct * tsk ;
int ret ;
rcu_read_lock ( ) ;
if ( pid ) {
tsk = find_task_by_vpid ( pid ) ;
if ( ! tsk ) {
rcu_read_unlock ( ) ;
2017-09-25 16:39:35 -07:00
rdt_last_cmd_printf ( " No task %d \n " , pid ) ;
2016-10-28 15:04:46 -07:00
return - ESRCH ;
}
} else {
tsk = current ;
}
get_task_struct ( tsk ) ;
rcu_read_unlock ( ) ;
ret = rdtgroup_task_write_permission ( tsk , of ) ;
if ( ! ret )
ret = __rdtgroup_move_task ( tsk , rdtgrp ) ;
put_task_struct ( tsk ) ;
return ret ;
}
static ssize_t rdtgroup_tasks_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
{
struct rdtgroup * rdtgrp ;
int ret = 0 ;
pid_t pid ;
if ( kstrtoint ( strstrip ( buf ) , 0 , & pid ) | | pid < 0 )
return - EINVAL ;
rdtgrp = rdtgroup_kn_lock_live ( of - > kn ) ;
2018-06-22 15:42:12 -07:00
if ( ! rdtgrp ) {
rdtgroup_kn_unlock ( of - > kn ) ;
return - ENOENT ;
}
2017-09-25 16:39:35 -07:00
rdt_last_cmd_clear ( ) ;
2016-10-28 15:04:46 -07:00
2018-06-22 15:42:12 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKED | |
rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP ) {
ret = - EINVAL ;
rdt_last_cmd_puts ( " pseudo-locking in progress \n " ) ;
goto unlock ;
}
ret = rdtgroup_move_task ( pid , rdtgrp , of ) ;
2016-10-28 15:04:46 -07:00
2018-06-22 15:42:12 -07:00
unlock :
2016-10-28 15:04:46 -07:00
rdtgroup_kn_unlock ( of - > kn ) ;
return ret ? : nbytes ;
}
static void show_rdt_tasks ( struct rdtgroup * r , struct seq_file * s )
{
struct task_struct * p , * t ;
rcu_read_lock ( ) ;
for_each_process_thread ( p , t ) {
2017-07-25 14:14:34 -07:00
if ( ( r - > type = = RDTCTRL_GROUP & & t - > closid = = r - > closid ) | |
( r - > type = = RDTMON_GROUP & & t - > rmid = = r - > mon . rmid ) )
2016-10-28 15:04:46 -07:00
seq_printf ( s , " %d \n " , t - > pid ) ;
}
rcu_read_unlock ( ) ;
}
static int rdtgroup_tasks_show ( struct kernfs_open_file * of ,
struct seq_file * s , void * v )
{
struct rdtgroup * rdtgrp ;
int ret = 0 ;
rdtgrp = rdtgroup_kn_lock_live ( of - > kn ) ;
if ( rdtgrp )
show_rdt_tasks ( rdtgrp , s ) ;
else
ret = - ENOENT ;
rdtgroup_kn_unlock ( of - > kn ) ;
return ret ;
}
2017-09-25 16:39:33 -07:00
static int rdt_last_cmd_status_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
int len ;
mutex_lock ( & rdtgroup_mutex ) ;
len = seq_buf_used ( & last_cmd_status ) ;
if ( len )
seq_printf ( seq , " %.*s " , len , last_cmd_status_buf ) ;
else
seq_puts ( seq , " ok \n " ) ;
mutex_unlock ( & rdtgroup_mutex ) ;
return 0 ;
}
2016-10-28 15:04:43 -07:00
static int rdt_num_closids_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
seq_printf ( seq , " %d \n " , r - > num_closid ) ;
return 0 ;
}
2017-04-07 17:33:51 -07:00
static int rdt_default_ctrl_show ( struct kernfs_open_file * of ,
2016-10-28 15:04:43 -07:00
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
2017-04-07 17:33:51 -07:00
seq_printf ( seq , " %x \n " , r - > default_ctrl ) ;
2016-10-28 15:04:43 -07:00
return 0 ;
}
2016-11-03 14:09:06 -07:00
static int rdt_min_cbm_bits_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
2017-04-14 13:00:36 +02:00
seq_printf ( seq , " %u \n " , r - > cache . min_cbm_bits ) ;
2017-04-07 17:33:55 -07:00
return 0 ;
}
2017-07-25 15:39:04 -07:00
static int rdt_shareable_bits_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
seq_printf ( seq , " %x \n " , r - > cache . shareable_bits ) ;
return 0 ;
}
2018-06-22 15:42:05 -07:00
/**
* rdt_bit_usage_show - Display current usage of resources
*
* A domain is a shared resource that can now be allocated differently . Here
* we display the current regions of the domain as an annotated bitmask .
* For each domain of this resource its allocation bitmask
* is annotated as below to indicate the current usage of the corresponding bit :
* 0 - currently unused
* X - currently available for sharing and used by software and hardware
* H - currently used by hardware only but available for software use
* S - currently used and shareable by software only
* E - currently used exclusively by one resource group
2018-06-22 15:42:23 -07:00
* P - currently pseudo - locked by one resource group
2018-06-22 15:42:05 -07:00
*/
static int rdt_bit_usage_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
2018-06-22 15:42:23 -07:00
u32 sw_shareable = 0 , hw_shareable = 0 ;
u32 exclusive = 0 , pseudo_locked = 0 ;
2018-06-22 15:42:05 -07:00
struct rdt_domain * dom ;
2018-06-22 15:42:23 -07:00
int i , hwb , swb , excl , psl ;
2018-06-22 15:42:05 -07:00
enum rdtgrp_mode mode ;
bool sep = false ;
u32 * ctrl ;
mutex_lock ( & rdtgroup_mutex ) ;
hw_shareable = r - > cache . shareable_bits ;
list_for_each_entry ( dom , & r - > domains , list ) {
if ( sep )
seq_putc ( seq , ' ; ' ) ;
ctrl = dom - > ctrl_val ;
sw_shareable = 0 ;
exclusive = 0 ;
seq_printf ( seq , " %d= " , dom - > id ) ;
2018-09-15 14:58:22 -07:00
for ( i = 0 ; i < closids_supported ( ) ; i + + , ctrl + + ) {
2018-06-22 15:42:05 -07:00
if ( ! closid_allocated ( i ) )
continue ;
mode = rdtgroup_mode_by_closid ( i ) ;
switch ( mode ) {
case RDT_MODE_SHAREABLE :
sw_shareable | = * ctrl ;
break ;
case RDT_MODE_EXCLUSIVE :
exclusive | = * ctrl ;
break ;
2018-06-22 15:42:23 -07:00
case RDT_MODE_PSEUDO_LOCKSETUP :
2018-06-22 15:42:08 -07:00
/*
2018-06-22 15:42:23 -07:00
* RDT_MODE_PSEUDO_LOCKSETUP is possible
* here but not included since the CBM
* associated with this CLOSID in this mode
* is not initialized and no task or cpu can be
* assigned this CLOSID .
2018-06-22 15:42:08 -07:00
*/
2018-06-22 15:42:23 -07:00
break ;
2018-06-22 15:42:08 -07:00
case RDT_MODE_PSEUDO_LOCKED :
2018-06-22 15:42:05 -07:00
case RDT_NUM_MODES :
WARN ( 1 ,
" invalid mode for closid %d \n " , i ) ;
break ;
}
}
for ( i = r - > cache . cbm_len - 1 ; i > = 0 ; i - - ) {
2018-06-22 15:42:23 -07:00
pseudo_locked = dom - > plr ? dom - > plr - > cbm : 0 ;
2018-06-22 15:42:05 -07:00
hwb = test_bit ( i , ( unsigned long * ) & hw_shareable ) ;
swb = test_bit ( i , ( unsigned long * ) & sw_shareable ) ;
excl = test_bit ( i , ( unsigned long * ) & exclusive ) ;
2018-06-22 15:42:23 -07:00
psl = test_bit ( i , ( unsigned long * ) & pseudo_locked ) ;
2018-06-22 15:42:05 -07:00
if ( hwb & & swb )
seq_putc ( seq , ' X ' ) ;
else if ( hwb & & ! swb )
seq_putc ( seq , ' H ' ) ;
else if ( ! hwb & & swb )
seq_putc ( seq , ' S ' ) ;
else if ( excl )
seq_putc ( seq , ' E ' ) ;
2018-06-22 15:42:23 -07:00
else if ( psl )
seq_putc ( seq , ' P ' ) ;
2018-06-22 15:42:05 -07:00
else /* Unused bits remain */
seq_putc ( seq , ' 0 ' ) ;
}
sep = true ;
}
seq_putc ( seq , ' \n ' ) ;
mutex_unlock ( & rdtgroup_mutex ) ;
return 0 ;
}
2017-04-07 17:33:55 -07:00
static int rdt_min_bw_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
2016-11-03 14:09:06 -07:00
2017-04-07 17:33:55 -07:00
seq_printf ( seq , " %u \n " , r - > membw . min_bw ) ;
return 0 ;
}
2017-07-25 14:14:30 -07:00
static int rdt_num_rmids_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
seq_printf ( seq , " %d \n " , r - > num_rmid ) ;
return 0 ;
}
static int rdt_mon_features_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
struct mon_evt * mevt ;
list_for_each_entry ( mevt , & r - > evt_list , list )
seq_printf ( seq , " %s \n " , mevt - > name ) ;
return 0 ;
}
2017-04-07 17:33:55 -07:00
static int rdt_bw_gran_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
seq_printf ( seq , " %u \n " , r - > membw . bw_gran ) ;
return 0 ;
}
static int rdt_delay_linear_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
seq_printf ( seq , " %u \n " , r - > membw . delay_linear ) ;
2016-11-03 14:09:06 -07:00
return 0 ;
}
2017-07-25 14:14:30 -07:00
static int max_threshold_occ_show ( struct kernfs_open_file * of ,
struct seq_file * seq , void * v )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
seq_printf ( seq , " %u \n " , intel_cqm_threshold * r - > mon_scale ) ;
return 0 ;
}
static ssize_t max_threshold_occ_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
{
struct rdt_resource * r = of - > kn - > parent - > priv ;
unsigned int bytes ;
int ret ;
ret = kstrtouint ( buf , 0 , & bytes ) ;
if ( ret )
return ret ;
if ( bytes > ( boot_cpu_data . x86_cache_size * 1024 ) )
return - EINVAL ;
intel_cqm_threshold = bytes / r - > mon_scale ;
2017-08-08 10:28:59 +01:00
return nbytes ;
2017-07-25 14:14:30 -07:00
}
2018-06-22 15:41:56 -07:00
/*
* rdtgroup_mode_show - Display mode of this resource group
*/
static int rdtgroup_mode_show ( struct kernfs_open_file * of ,
struct seq_file * s , void * v )
{
struct rdtgroup * rdtgrp ;
rdtgrp = rdtgroup_kn_lock_live ( of - > kn ) ;
if ( ! rdtgrp ) {
rdtgroup_kn_unlock ( of - > kn ) ;
return - ENOENT ;
}
seq_printf ( s , " %s \n " , rdtgroup_mode_str ( rdtgrp - > mode ) ) ;
rdtgroup_kn_unlock ( of - > kn ) ;
return 0 ;
}
2018-06-22 15:42:01 -07:00
/**
* rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
* @ r : Resource to which domain instance @ d belongs .
* @ d : The domain instance for which @ closid is being tested .
* @ cbm : Capacity bitmask being tested .
* @ closid : Intended closid for @ cbm .
* @ exclusive : Only check if overlaps with exclusive resource groups
*
* Checks if provided @ cbm intended to be used for @ closid on domain
* @ d overlaps with any other closids or other hardware usage associated
* with this domain . If @ exclusive is true then only overlaps with
* resource groups in exclusive mode will be considered . If @ exclusive
* is false then overlaps with any resource group or hardware entities
* will be considered .
*
* Return : false if CBM does not overlap , true if it does .
*/
2018-06-22 15:42:04 -07:00
bool rdtgroup_cbm_overlaps ( struct rdt_resource * r , struct rdt_domain * d ,
u32 _cbm , int closid , bool exclusive )
2018-06-22 15:42:01 -07:00
{
unsigned long * cbm = ( unsigned long * ) & _cbm ;
unsigned long * ctrl_b ;
enum rdtgrp_mode mode ;
u32 * ctrl ;
int i ;
/* Check for any overlap with regions used by hardware directly */
if ( ! exclusive ) {
if ( bitmap_intersects ( cbm ,
( unsigned long * ) & r - > cache . shareable_bits ,
r - > cache . cbm_len ) )
return true ;
}
/* Check for overlap with other resource groups */
ctrl = d - > ctrl_val ;
2018-09-15 14:58:25 -07:00
for ( i = 0 ; i < closids_supported ( ) ; i + + , ctrl + + ) {
2018-06-22 15:42:01 -07:00
ctrl_b = ( unsigned long * ) ctrl ;
2018-06-22 15:42:17 -07:00
mode = rdtgroup_mode_by_closid ( i ) ;
if ( closid_allocated ( i ) & & i ! = closid & &
mode ! = RDT_MODE_PSEUDO_LOCKSETUP ) {
2018-06-22 15:42:01 -07:00
if ( bitmap_intersects ( cbm , ctrl_b , r - > cache . cbm_len ) ) {
if ( exclusive ) {
if ( mode = = RDT_MODE_EXCLUSIVE )
return true ;
continue ;
}
return true ;
}
}
}
return false ;
}
/**
* rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
*
* An exclusive resource group implies that there should be no sharing of
* its allocated resources . At the time this group is considered to be
* exclusive this test can determine if its current schemata supports this
* setting by testing for overlap with all other resource groups .
*
* Return : true if resource group can be exclusive , false if there is overlap
* with allocations of other resource groups and thus this resource group
* cannot be exclusive .
*/
static bool rdtgroup_mode_test_exclusive ( struct rdtgroup * rdtgrp )
{
int closid = rdtgrp - > closid ;
struct rdt_resource * r ;
2018-09-15 14:58:26 -07:00
bool has_cache = false ;
2018-06-22 15:42:01 -07:00
struct rdt_domain * d ;
for_each_alloc_enabled_rdt_resource ( r ) {
2018-09-15 14:58:26 -07:00
if ( r - > rid = = RDT_RESOURCE_MBA )
continue ;
has_cache = true ;
2018-06-22 15:42:01 -07:00
list_for_each_entry ( d , & r - > domains , list ) {
if ( rdtgroup_cbm_overlaps ( r , d , d - > ctrl_val [ closid ] ,
2018-09-15 14:58:26 -07:00
rdtgrp - > closid , false ) ) {
rdt_last_cmd_puts ( " schemata overlaps \n " ) ;
2018-06-22 15:42:01 -07:00
return false ;
2018-09-15 14:58:26 -07:00
}
2018-06-22 15:42:01 -07:00
}
}
2018-09-15 14:58:26 -07:00
if ( ! has_cache ) {
rdt_last_cmd_puts ( " cannot be exclusive without CAT/CDP \n " ) ;
return false ;
}
2018-06-22 15:42:01 -07:00
return true ;
}
/**
* rdtgroup_mode_write - Modify the resource group ' s mode
*
*/
2018-06-22 15:41:56 -07:00
static ssize_t rdtgroup_mode_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
{
struct rdtgroup * rdtgrp ;
enum rdtgrp_mode mode ;
int ret = 0 ;
/* Valid input requires a trailing newline */
if ( nbytes = = 0 | | buf [ nbytes - 1 ] ! = ' \n ' )
return - EINVAL ;
buf [ nbytes - 1 ] = ' \0 ' ;
rdtgrp = rdtgroup_kn_lock_live ( of - > kn ) ;
if ( ! rdtgrp ) {
rdtgroup_kn_unlock ( of - > kn ) ;
return - ENOENT ;
}
rdt_last_cmd_clear ( ) ;
mode = rdtgrp - > mode ;
2018-06-22 15:42:01 -07:00
if ( ( ! strcmp ( buf , " shareable " ) & & mode = = RDT_MODE_SHAREABLE ) | |
2018-06-22 15:42:17 -07:00
( ! strcmp ( buf , " exclusive " ) & & mode = = RDT_MODE_EXCLUSIVE ) | |
( ! strcmp ( buf , " pseudo-locksetup " ) & &
mode = = RDT_MODE_PSEUDO_LOCKSETUP ) | |
( ! strcmp ( buf , " pseudo-locked " ) & & mode = = RDT_MODE_PSEUDO_LOCKED ) )
2018-06-22 15:41:56 -07:00
goto out ;
2018-06-22 15:42:17 -07:00
if ( mode = = RDT_MODE_PSEUDO_LOCKED ) {
rdt_last_cmd_printf ( " cannot change pseudo-locked group \n " ) ;
ret = - EINVAL ;
goto out ;
}
2018-06-22 15:41:56 -07:00
if ( ! strcmp ( buf , " shareable " ) ) {
2018-06-22 15:42:17 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP ) {
ret = rdtgroup_locksetup_exit ( rdtgrp ) ;
if ( ret )
goto out ;
}
2018-06-22 15:41:56 -07:00
rdtgrp - > mode = RDT_MODE_SHAREABLE ;
2018-06-22 15:42:01 -07:00
} else if ( ! strcmp ( buf , " exclusive " ) ) {
if ( ! rdtgroup_mode_test_exclusive ( rdtgrp ) ) {
ret = - EINVAL ;
goto out ;
}
2018-06-22 15:42:17 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP ) {
ret = rdtgroup_locksetup_exit ( rdtgrp ) ;
if ( ret )
goto out ;
}
2018-06-22 15:42:01 -07:00
rdtgrp - > mode = RDT_MODE_EXCLUSIVE ;
2018-06-22 15:42:17 -07:00
} else if ( ! strcmp ( buf , " pseudo-locksetup " ) ) {
ret = rdtgroup_locksetup_enter ( rdtgrp ) ;
if ( ret )
goto out ;
rdtgrp - > mode = RDT_MODE_PSEUDO_LOCKSETUP ;
2018-06-22 15:41:56 -07:00
} else {
rdt_last_cmd_printf ( " unknown/unsupported mode \n " ) ;
ret = - EINVAL ;
}
out :
rdtgroup_kn_unlock ( of - > kn ) ;
return ret ? : nbytes ;
}
2018-06-22 15:42:06 -07:00
/**
* rdtgroup_cbm_to_size - Translate CBM to size in bytes
* @ r : RDT resource to which @ d belongs .
* @ d : RDT domain instance .
* @ cbm : bitmask for which the size should be computed .
*
* The bitmask provided associated with the RDT domain instance @ d will be
* translated into how many bytes it represents . The size in bytes is
* computed by first dividing the total cache size by the CBM length to
* determine how many bytes each bit in the bitmask represents . The result
* is multiplied with the number of bits set in the bitmask .
*/
unsigned int rdtgroup_cbm_to_size ( struct rdt_resource * r ,
struct rdt_domain * d , u32 cbm )
{
struct cpu_cacheinfo * ci ;
unsigned int size = 0 ;
int num_b , i ;
num_b = bitmap_weight ( ( unsigned long * ) & cbm , r - > cache . cbm_len ) ;
ci = get_cpu_cacheinfo ( cpumask_any ( & d - > cpu_mask ) ) ;
for ( i = 0 ; i < ci - > num_leaves ; i + + ) {
if ( ci - > info_list [ i ] . level = = r - > cache_level ) {
size = ci - > info_list [ i ] . size / r - > cache . cbm_len * num_b ;
break ;
}
}
return size ;
}
/**
* rdtgroup_size_show - Display size in bytes of allocated regions
*
* The " size " file mirrors the layout of the " schemata " file , printing the
* size in bytes of each region instead of the capacity bitmask .
*
*/
static int rdtgroup_size_show ( struct kernfs_open_file * of ,
struct seq_file * s , void * v )
{
struct rdtgroup * rdtgrp ;
struct rdt_resource * r ;
struct rdt_domain * d ;
unsigned int size ;
2018-09-15 14:58:20 -07:00
bool sep ;
u32 ctrl ;
2018-06-22 15:42:06 -07:00
rdtgrp = rdtgroup_kn_lock_live ( of - > kn ) ;
if ( ! rdtgrp ) {
rdtgroup_kn_unlock ( of - > kn ) ;
return - ENOENT ;
}
2018-06-22 15:42:23 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKED ) {
seq_printf ( s , " %*s: " , max_name_width , rdtgrp - > plr - > r - > name ) ;
size = rdtgroup_cbm_to_size ( rdtgrp - > plr - > r ,
rdtgrp - > plr - > d ,
rdtgrp - > plr - > cbm ) ;
seq_printf ( s , " %d=%u \n " , rdtgrp - > plr - > d - > id , size ) ;
goto out ;
}
2018-06-22 15:42:06 -07:00
for_each_alloc_enabled_rdt_resource ( r ) {
2018-09-15 14:58:20 -07:00
sep = false ;
2018-06-22 15:42:06 -07:00
seq_printf ( s , " %*s: " , max_name_width , r - > name ) ;
list_for_each_entry ( d , & r - > domains , list ) {
if ( sep )
seq_putc ( s , ' ; ' ) ;
2018-06-22 15:42:17 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP ) {
size = 0 ;
} else {
2018-09-15 14:58:20 -07:00
ctrl = ( ! is_mba_sc ( r ) ?
d - > ctrl_val [ rdtgrp - > closid ] :
d - > mbps_val [ rdtgrp - > closid ] ) ;
if ( r - > rid = = RDT_RESOURCE_MBA )
size = ctrl ;
else
size = rdtgroup_cbm_to_size ( r , d , ctrl ) ;
2018-06-22 15:42:17 -07:00
}
2018-06-22 15:42:06 -07:00
seq_printf ( s , " %d=%u " , d - > id , size ) ;
sep = true ;
}
seq_putc ( s , ' \n ' ) ;
}
2018-06-22 15:42:23 -07:00
out :
2018-06-22 15:42:06 -07:00
rdtgroup_kn_unlock ( of - > kn ) ;
return 0 ;
}
2016-10-28 15:04:43 -07:00
/* rdtgroup information files for one cache resource. */
2017-07-25 14:14:29 -07:00
static struct rftype res_common_files [ ] = {
2017-09-25 16:39:33 -07:00
{
. name = " last_cmd_status " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_last_cmd_status_show ,
. fflags = RF_TOP_INFO ,
} ,
2016-10-28 15:04:43 -07:00
{
. name = " num_closids " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_num_closids_show ,
2017-07-25 14:14:29 -07:00
. fflags = RF_CTRL_INFO ,
2016-10-28 15:04:43 -07:00
} ,
2017-07-25 14:14:30 -07:00
{
. name = " mon_features " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_mon_features_show ,
. fflags = RF_MON_INFO ,
} ,
{
. name = " num_rmids " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_num_rmids_show ,
. fflags = RF_MON_INFO ,
} ,
2016-10-28 15:04:43 -07:00
{
. name = " cbm_mask " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
2017-04-07 17:33:51 -07:00
. seq_show = rdt_default_ctrl_show ,
2017-07-25 14:14:29 -07:00
. fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE ,
2016-10-28 15:04:43 -07:00
} ,
2016-11-03 14:09:06 -07:00
{
. name = " min_cbm_bits " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_min_cbm_bits_show ,
2017-07-25 14:14:29 -07:00
. fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE ,
2017-04-07 17:33:55 -07:00
} ,
2017-07-25 15:39:04 -07:00
{
. name = " shareable_bits " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_shareable_bits_show ,
. fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE ,
} ,
2018-06-22 15:42:05 -07:00
{
. name = " bit_usage " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_bit_usage_show ,
. fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE ,
} ,
2017-04-07 17:33:55 -07:00
{
. name = " min_bandwidth " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_min_bw_show ,
2017-07-25 14:14:29 -07:00
. fflags = RF_CTRL_INFO | RFTYPE_RES_MB ,
2017-04-07 17:33:55 -07:00
} ,
{
. name = " bandwidth_gran " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_bw_gran_show ,
2017-07-25 14:14:29 -07:00
. fflags = RF_CTRL_INFO | RFTYPE_RES_MB ,
2017-04-07 17:33:55 -07:00
} ,
{
. name = " delay_linear " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdt_delay_linear_show ,
2017-07-25 14:14:29 -07:00
. fflags = RF_CTRL_INFO | RFTYPE_RES_MB ,
} ,
2017-07-25 14:14:30 -07:00
{
. name = " max_threshold_occupancy " ,
. mode = 0644 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. write = max_threshold_occ_write ,
. seq_show = max_threshold_occ_show ,
. fflags = RF_MON_INFO | RFTYPE_RES_CACHE ,
} ,
2017-07-25 14:14:29 -07:00
{
. name = " cpus " ,
. mode = 0644 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. write = rdtgroup_cpus_write ,
. seq_show = rdtgroup_cpus_show ,
. fflags = RFTYPE_BASE ,
} ,
{
. name = " cpus_list " ,
. mode = 0644 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. write = rdtgroup_cpus_write ,
. seq_show = rdtgroup_cpus_show ,
. flags = RFTYPE_FLAGS_CPUS_LIST ,
. fflags = RFTYPE_BASE ,
} ,
{
. name = " tasks " ,
. mode = 0644 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. write = rdtgroup_tasks_write ,
. seq_show = rdtgroup_tasks_show ,
. fflags = RFTYPE_BASE ,
} ,
{
. name = " schemata " ,
. mode = 0644 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. write = rdtgroup_schemata_write ,
. seq_show = rdtgroup_schemata_show ,
. fflags = RF_CTRL_BASE ,
2017-04-07 17:33:55 -07:00
} ,
2018-06-22 15:41:56 -07:00
{
. name = " mode " ,
. mode = 0644 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. write = rdtgroup_mode_write ,
. seq_show = rdtgroup_mode_show ,
. fflags = RF_CTRL_BASE ,
} ,
2018-06-22 15:42:06 -07:00
{
. name = " size " ,
. mode = 0444 ,
. kf_ops = & rdtgroup_kf_single_ops ,
. seq_show = rdtgroup_size_show ,
. fflags = RF_CTRL_BASE ,
} ,
2017-04-07 17:33:55 -07:00
} ;
2017-07-25 14:14:29 -07:00
static int rdtgroup_add_files ( struct kernfs_node * kn , unsigned long fflags )
2017-04-07 17:33:55 -07:00
{
2017-07-25 14:14:29 -07:00
struct rftype * rfts , * rft ;
int ret , len ;
rfts = res_common_files ;
len = ARRAY_SIZE ( res_common_files ) ;
lockdep_assert_held ( & rdtgroup_mutex ) ;
for ( rft = rfts ; rft < rfts + len ; rft + + ) {
if ( ( fflags & rft - > fflags ) = = rft - > fflags ) {
ret = rdtgroup_add_file ( kn , rft ) ;
if ( ret )
goto error ;
}
}
return 0 ;
error :
pr_warn ( " Failed to add %s, err=%d \n " , rft - > name , ret ) ;
while ( - - rft > = rfts ) {
if ( ( fflags & rft - > fflags ) = = rft - > fflags )
kernfs_remove_by_name ( kn , rft - > name ) ;
}
return ret ;
2017-04-07 17:33:55 -07:00
}
2018-06-22 15:42:11 -07:00
/**
* rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
* @ r : The resource group with which the file is associated .
* @ name : Name of the file
*
* The permissions of named resctrl file , directory , or link are modified
* to not allow read , write , or execute by any user .
*
* WARNING : This function is intended to communicate to the user that the
* resctrl file has been locked down - that it is not relevant to the
* particular state the system finds itself in . It should not be relied
* on to protect from user access because after the file ' s permissions
* are restricted the user can still change the permissions using chmod
* from the command line .
*
* Return : 0 on success , < 0 on failure .
*/
int rdtgroup_kn_mode_restrict ( struct rdtgroup * r , const char * name )
{
struct iattr iattr = { . ia_valid = ATTR_MODE , } ;
struct kernfs_node * kn ;
int ret = 0 ;
kn = kernfs_find_and_get_ns ( r - > kn , name , NULL ) ;
if ( ! kn )
return - ENOENT ;
switch ( kernfs_type ( kn ) ) {
case KERNFS_DIR :
iattr . ia_mode = S_IFDIR ;
break ;
case KERNFS_FILE :
iattr . ia_mode = S_IFREG ;
break ;
case KERNFS_LINK :
iattr . ia_mode = S_IFLNK ;
break ;
}
ret = kernfs_setattr ( kn , & iattr ) ;
kernfs_put ( kn ) ;
return ret ;
}
/**
* rdtgroup_kn_mode_restore - Restore user access to named resctrl file
* @ r : The resource group with which the file is associated .
* @ name : Name of the file
2018-06-30 22:17:32 -07:00
* @ mask : Mask of permissions that should be restored
2018-06-22 15:42:11 -07:00
*
* Restore the permissions of the named file . If @ name is a directory the
* permissions of its parent will be used .
*
* Return : 0 on success , < 0 on failure .
*/
2018-06-30 22:17:32 -07:00
int rdtgroup_kn_mode_restore ( struct rdtgroup * r , const char * name ,
umode_t mask )
2018-06-22 15:42:11 -07:00
{
struct iattr iattr = { . ia_valid = ATTR_MODE , } ;
struct kernfs_node * kn , * parent ;
struct rftype * rfts , * rft ;
int ret , len ;
rfts = res_common_files ;
len = ARRAY_SIZE ( res_common_files ) ;
for ( rft = rfts ; rft < rfts + len ; rft + + ) {
if ( ! strcmp ( rft - > name , name ) )
2018-06-30 22:17:32 -07:00
iattr . ia_mode = rft - > mode & mask ;
2018-06-22 15:42:11 -07:00
}
kn = kernfs_find_and_get_ns ( r - > kn , name , NULL ) ;
if ( ! kn )
return - ENOENT ;
switch ( kernfs_type ( kn ) ) {
case KERNFS_DIR :
parent = kernfs_get_parent ( kn ) ;
if ( parent ) {
iattr . ia_mode | = parent - > mode ;
kernfs_put ( parent ) ;
}
iattr . ia_mode | = S_IFDIR ;
break ;
case KERNFS_FILE :
iattr . ia_mode | = S_IFREG ;
break ;
case KERNFS_LINK :
iattr . ia_mode | = S_IFLNK ;
break ;
}
ret = kernfs_setattr ( kn , & iattr ) ;
kernfs_put ( kn ) ;
return ret ;
}
2017-07-25 14:14:29 -07:00
static int rdtgroup_mkdir_info_resdir ( struct rdt_resource * r , char * name ,
unsigned long fflags )
2017-04-07 17:33:54 -07:00
{
2017-07-25 14:14:29 -07:00
struct kernfs_node * kn_subdir ;
int ret ;
kn_subdir = kernfs_create_dir ( kn_info , name ,
kn_info - > mode , r ) ;
if ( IS_ERR ( kn_subdir ) )
return PTR_ERR ( kn_subdir ) ;
kernfs_get ( kn_subdir ) ;
ret = rdtgroup_kn_set_ugid ( kn_subdir ) ;
if ( ret )
return ret ;
ret = rdtgroup_add_files ( kn_subdir , fflags ) ;
if ( ! ret )
kernfs_activate ( kn_subdir ) ;
return ret ;
2017-04-07 17:33:54 -07:00
}
2016-10-28 15:04:43 -07:00
static int rdtgroup_create_info_dir ( struct kernfs_node * parent_kn )
{
struct rdt_resource * r ;
2017-07-25 14:14:29 -07:00
unsigned long fflags ;
2017-07-25 14:14:30 -07:00
char name [ 32 ] ;
2017-07-25 14:14:29 -07:00
int ret ;
2016-10-28 15:04:43 -07:00
/* create the directory */
kn_info = kernfs_create_dir ( parent_kn , " info " , parent_kn - > mode , NULL ) ;
if ( IS_ERR ( kn_info ) )
return PTR_ERR ( kn_info ) ;
kernfs_get ( kn_info ) ;
2017-09-25 16:39:33 -07:00
ret = rdtgroup_add_files ( kn_info , RF_TOP_INFO ) ;
if ( ret )
goto out_destroy ;
2017-07-25 14:14:25 -07:00
for_each_alloc_enabled_rdt_resource ( r ) {
2017-07-25 14:14:29 -07:00
fflags = r - > fflags | RF_CTRL_INFO ;
ret = rdtgroup_mkdir_info_resdir ( r , r - > name , fflags ) ;
2016-10-28 15:04:43 -07:00
if ( ret )
goto out_destroy ;
}
2017-07-25 14:14:30 -07:00
for_each_mon_enabled_rdt_resource ( r ) {
fflags = r - > fflags | RF_MON_INFO ;
sprintf ( name , " %s_MON " , r - > name ) ;
ret = rdtgroup_mkdir_info_resdir ( r , name , fflags ) ;
if ( ret )
goto out_destroy ;
}
2016-10-28 15:04:43 -07:00
/*
* This extra ref will be put in kernfs_remove ( ) and guarantees
* that @ rdtgrp - > kn is always accessible .
*/
kernfs_get ( kn_info ) ;
ret = rdtgroup_kn_set_ugid ( kn_info ) ;
if ( ret )
goto out_destroy ;
kernfs_activate ( kn_info ) ;
return 0 ;
out_destroy :
kernfs_remove ( kn_info ) ;
return ret ;
}
2017-07-25 14:14:32 -07:00
static int
mongroup_create_dir ( struct kernfs_node * parent_kn , struct rdtgroup * prgrp ,
char * name , struct kernfs_node * * dest_kn )
{
struct kernfs_node * kn ;
int ret ;
/* create the directory */
kn = kernfs_create_dir ( parent_kn , name , parent_kn - > mode , prgrp ) ;
if ( IS_ERR ( kn ) )
return PTR_ERR ( kn ) ;
if ( dest_kn )
* dest_kn = kn ;
/*
* This extra ref will be put in kernfs_remove ( ) and guarantees
* that @ rdtgrp - > kn is always accessible .
*/
kernfs_get ( kn ) ;
ret = rdtgroup_kn_set_ugid ( kn ) ;
if ( ret )
goto out_destroy ;
kernfs_activate ( kn ) ;
return 0 ;
out_destroy :
kernfs_remove ( kn ) ;
return ret ;
}
2017-12-20 14:57:23 -08:00
2016-10-28 15:04:42 -07:00
static void l3_qos_cfg_update ( void * arg )
{
bool * enable = arg ;
wrmsrl ( IA32_L3_QOS_CFG , * enable ? L3_QOS_CDP_ENABLE : 0ULL ) ;
}
2017-12-20 14:57:23 -08:00
static void l2_qos_cfg_update ( void * arg )
2016-10-28 15:04:42 -07:00
{
2017-12-20 14:57:23 -08:00
bool * enable = arg ;
wrmsrl ( IA32_L2_QOS_CFG , * enable ? L2_QOS_CDP_ENABLE : 0ULL ) ;
}
2018-04-20 15:36:17 -07:00
static inline bool is_mba_linear ( void )
{
return rdt_resources_all [ RDT_RESOURCE_MBA ] . membw . delay_linear ;
}
2017-12-20 14:57:23 -08:00
static int set_cache_qos_cfg ( int level , bool enable )
{
void ( * update ) ( void * arg ) ;
struct rdt_resource * r_l ;
2016-10-28 15:04:42 -07:00
cpumask_var_t cpu_mask ;
struct rdt_domain * d ;
int cpu ;
if ( ! zalloc_cpumask_var ( & cpu_mask , GFP_KERNEL ) )
return - ENOMEM ;
2017-12-20 14:57:23 -08:00
if ( level = = RDT_RESOURCE_L3 )
update = l3_qos_cfg_update ;
else if ( level = = RDT_RESOURCE_L2 )
update = l2_qos_cfg_update ;
else
return - EINVAL ;
r_l = & rdt_resources_all [ level ] ;
list_for_each_entry ( d , & r_l - > domains , list ) {
2016-10-28 15:04:42 -07:00
/* Pick one CPU from each domain instance to update MSR */
cpumask_set_cpu ( cpumask_any ( & d - > cpu_mask ) , cpu_mask ) ;
}
cpu = get_cpu ( ) ;
/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
if ( cpumask_test_cpu ( cpu , cpu_mask ) )
2017-12-20 14:57:23 -08:00
update ( & enable ) ;
2016-10-28 15:04:42 -07:00
/* Update QOS_CFG MSR on all other cpus in cpu_mask. */
2017-12-20 14:57:23 -08:00
smp_call_function_many ( cpu_mask , update , & enable , 1 ) ;
2016-10-28 15:04:42 -07:00
put_cpu ( ) ;
free_cpumask_var ( cpu_mask ) ;
return 0 ;
}
2018-04-20 15:36:17 -07:00
/*
* Enable or disable the MBA software controller
* which helps user specify bandwidth in MBps .
* MBA software controller is supported only if
* MBM is supported and MBA is in linear scale .
*/
static int set_mba_sc ( bool mba_sc )
{
struct rdt_resource * r = & rdt_resources_all [ RDT_RESOURCE_MBA ] ;
2018-04-20 15:36:18 -07:00
struct rdt_domain * d ;
2018-04-20 15:36:17 -07:00
if ( ! is_mbm_enabled ( ) | | ! is_mba_linear ( ) | |
mba_sc = = is_mba_sc ( r ) )
return - EINVAL ;
r - > membw . mba_sc = mba_sc ;
2018-04-20 15:36:18 -07:00
list_for_each_entry ( d , & r - > domains , list )
setup_default_ctrlval ( r , d - > ctrl_val , d - > mbps_val ) ;
2018-04-20 15:36:17 -07:00
return 0 ;
}
2017-12-20 14:57:23 -08:00
static int cdp_enable ( int level , int data_type , int code_type )
2016-10-28 15:04:42 -07:00
{
2017-12-20 14:57:23 -08:00
struct rdt_resource * r_ldata = & rdt_resources_all [ data_type ] ;
struct rdt_resource * r_lcode = & rdt_resources_all [ code_type ] ;
struct rdt_resource * r_l = & rdt_resources_all [ level ] ;
2016-10-28 15:04:42 -07:00
int ret ;
2017-12-20 14:57:23 -08:00
if ( ! r_l - > alloc_capable | | ! r_ldata - > alloc_capable | |
! r_lcode - > alloc_capable )
2016-10-28 15:04:42 -07:00
return - EINVAL ;
2017-12-20 14:57:23 -08:00
ret = set_cache_qos_cfg ( level , true ) ;
2016-10-28 15:04:42 -07:00
if ( ! ret ) {
2017-12-20 14:57:23 -08:00
r_l - > alloc_enabled = false ;
r_ldata - > alloc_enabled = true ;
r_lcode - > alloc_enabled = true ;
2016-10-28 15:04:42 -07:00
}
return ret ;
}
2017-12-20 14:57:23 -08:00
static int cdpl3_enable ( void )
{
return cdp_enable ( RDT_RESOURCE_L3 , RDT_RESOURCE_L3DATA ,
RDT_RESOURCE_L3CODE ) ;
}
static int cdpl2_enable ( void )
2016-10-28 15:04:42 -07:00
{
2017-12-20 14:57:23 -08:00
return cdp_enable ( RDT_RESOURCE_L2 , RDT_RESOURCE_L2DATA ,
RDT_RESOURCE_L2CODE ) ;
}
static void cdp_disable ( int level , int data_type , int code_type )
{
struct rdt_resource * r = & rdt_resources_all [ level ] ;
2016-10-28 15:04:42 -07:00
2017-07-25 14:14:25 -07:00
r - > alloc_enabled = r - > alloc_capable ;
2016-10-28 15:04:42 -07:00
2017-12-20 14:57:23 -08:00
if ( rdt_resources_all [ data_type ] . alloc_enabled ) {
rdt_resources_all [ data_type ] . alloc_enabled = false ;
rdt_resources_all [ code_type ] . alloc_enabled = false ;
set_cache_qos_cfg ( level , false ) ;
2016-10-28 15:04:42 -07:00
}
}
2017-12-20 14:57:23 -08:00
static void cdpl3_disable ( void )
{
cdp_disable ( RDT_RESOURCE_L3 , RDT_RESOURCE_L3DATA , RDT_RESOURCE_L3CODE ) ;
}
static void cdpl2_disable ( void )
{
cdp_disable ( RDT_RESOURCE_L2 , RDT_RESOURCE_L2DATA , RDT_RESOURCE_L2CODE ) ;
}
static void cdp_disable_all ( void )
{
if ( rdt_resources_all [ RDT_RESOURCE_L3DATA ] . alloc_enabled )
cdpl3_disable ( ) ;
if ( rdt_resources_all [ RDT_RESOURCE_L2DATA ] . alloc_enabled )
cdpl2_disable ( ) ;
}
2016-10-28 15:04:42 -07:00
static int parse_rdtgroupfs_options ( char * data )
{
char * token , * o = data ;
int ret = 0 ;
while ( ( token = strsep ( & o , " , " ) ) ! = NULL ) {
2017-12-20 14:57:23 -08:00
if ( ! * token ) {
ret = - EINVAL ;
goto out ;
}
2016-10-28 15:04:42 -07:00
2017-12-20 14:57:23 -08:00
if ( ! strcmp ( token , " cdp " ) ) {
ret = cdpl3_enable ( ) ;
if ( ret )
goto out ;
} else if ( ! strcmp ( token , " cdpl2 " ) ) {
ret = cdpl2_enable ( ) ;
if ( ret )
goto out ;
2018-04-20 15:36:17 -07:00
} else if ( ! strcmp ( token , " mba_MBps " ) ) {
ret = set_mba_sc ( true ) ;
if ( ret )
goto out ;
2017-12-20 14:57:23 -08:00
} else {
ret = - EINVAL ;
goto out ;
}
2016-10-28 15:04:42 -07:00
}
2017-12-20 14:57:23 -08:00
return 0 ;
out :
pr_err ( " Invalid mount option \" %s \" \n " , token ) ;
2016-10-28 15:04:42 -07:00
return ret ;
}
2016-10-28 15:04:44 -07:00
/*
* We don ' t allow rdtgroup directories to be created anywhere
* except the root directory . Thus when looking for the rdtgroup
* structure for a kernfs node we are either looking at a directory ,
* in which case the rdtgroup structure is pointed at by the " priv "
* field , otherwise we have a file , and need only look to the parent
* to find the rdtgroup .
*/
static struct rdtgroup * kernfs_to_rdtgroup ( struct kernfs_node * kn )
{
2016-11-11 17:02:36 -08:00
if ( kernfs_type ( kn ) = = KERNFS_DIR ) {
/*
* All the resource directories use " kn->priv "
* to point to the " struct rdtgroup " for the
* resource . " info " and its subdirectories don ' t
* have rdtgroup structures , so return NULL here .
*/
if ( kn = = kn_info | | kn - > parent = = kn_info )
return NULL ;
else
return kn - > priv ;
} else {
2016-10-28 15:04:44 -07:00
return kn - > parent - > priv ;
2016-11-11 17:02:36 -08:00
}
2016-10-28 15:04:44 -07:00
}
struct rdtgroup * rdtgroup_kn_lock_live ( struct kernfs_node * kn )
{
struct rdtgroup * rdtgrp = kernfs_to_rdtgroup ( kn ) ;
2016-11-11 17:02:36 -08:00
if ( ! rdtgrp )
return NULL ;
2016-10-28 15:04:44 -07:00
atomic_inc ( & rdtgrp - > waitcount ) ;
kernfs_break_active_protection ( kn ) ;
mutex_lock ( & rdtgroup_mutex ) ;
/* Was this group deleted while we waited? */
if ( rdtgrp - > flags & RDT_DELETED )
return NULL ;
return rdtgrp ;
}
void rdtgroup_kn_unlock ( struct kernfs_node * kn )
{
struct rdtgroup * rdtgrp = kernfs_to_rdtgroup ( kn ) ;
2016-11-11 17:02:36 -08:00
if ( ! rdtgrp )
return ;
2016-10-28 15:04:44 -07:00
mutex_unlock ( & rdtgroup_mutex ) ;
if ( atomic_dec_and_test ( & rdtgrp - > waitcount ) & &
( rdtgrp - > flags & RDT_DELETED ) ) {
2018-06-22 15:42:22 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP | |
rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKED )
rdtgroup_pseudo_lock_remove ( rdtgrp ) ;
2016-10-28 15:04:44 -07:00
kernfs_unbreak_active_protection ( kn ) ;
2017-03-14 15:20:53 +01:00
kernfs_put ( rdtgrp - > kn ) ;
2016-10-28 15:04:44 -07:00
kfree ( rdtgrp ) ;
} else {
kernfs_unbreak_active_protection ( kn ) ;
}
}
2017-07-25 14:14:41 -07:00
static int mkdir_mondata_all ( struct kernfs_node * parent_kn ,
struct rdtgroup * prgrp ,
struct kernfs_node * * mon_data_kn ) ;
2016-10-28 15:04:42 -07:00
static struct dentry * rdt_mount ( struct file_system_type * fs_type ,
int flags , const char * unused_dev_name ,
void * data )
{
2017-07-25 14:14:47 -07:00
struct rdt_domain * dom ;
struct rdt_resource * r ;
2016-10-28 15:04:42 -07:00
struct dentry * dentry ;
int ret ;
2017-10-20 02:16:59 -07:00
cpus_read_lock ( ) ;
2016-10-28 15:04:42 -07:00
mutex_lock ( & rdtgroup_mutex ) ;
/*
* resctrl file system can only be mounted once .
*/
2017-07-25 14:14:41 -07:00
if ( static_branch_unlikely ( & rdt_enable_key ) ) {
2016-10-28 15:04:42 -07:00
dentry = ERR_PTR ( - EBUSY ) ;
goto out ;
}
ret = parse_rdtgroupfs_options ( data ) ;
if ( ret ) {
dentry = ERR_PTR ( ret ) ;
goto out_cdp ;
}
2016-10-28 15:04:44 -07:00
closid_init ( ) ;
2016-10-28 15:04:43 -07:00
ret = rdtgroup_create_info_dir ( rdtgroup_default . kn ) ;
2016-11-03 14:09:05 -07:00
if ( ret ) {
dentry = ERR_PTR ( ret ) ;
2016-10-28 15:04:43 -07:00
goto out_cdp ;
2016-11-03 14:09:05 -07:00
}
2016-10-28 15:04:43 -07:00
2017-07-25 14:14:41 -07:00
if ( rdt_mon_capable ) {
ret = mongroup_create_dir ( rdtgroup_default . kn ,
NULL , " mon_groups " ,
& kn_mongrp ) ;
if ( ret ) {
dentry = ERR_PTR ( ret ) ;
goto out_info ;
}
kernfs_get ( kn_mongrp ) ;
ret = mkdir_mondata_all ( rdtgroup_default . kn ,
& rdtgroup_default , & kn_mondata ) ;
if ( ret ) {
dentry = ERR_PTR ( ret ) ;
goto out_mongrp ;
}
kernfs_get ( kn_mondata ) ;
rdtgroup_default . mon . mon_data_kn = kn_mondata ;
}
2018-06-22 15:41:52 -07:00
ret = rdt_pseudo_lock_init ( ) ;
if ( ret ) {
dentry = ERR_PTR ( ret ) ;
goto out_mondata ;
}
2016-10-28 15:04:42 -07:00
dentry = kernfs_mount ( fs_type , flags , rdt_root ,
RDTGROUP_SUPER_MAGIC , NULL ) ;
if ( IS_ERR ( dentry ) )
2018-06-22 15:41:52 -07:00
goto out_psl ;
2017-07-25 14:14:41 -07:00
if ( rdt_alloc_capable )
2017-10-20 02:16:59 -07:00
static_branch_enable_cpuslocked ( & rdt_alloc_enable_key ) ;
2017-07-25 14:14:41 -07:00
if ( rdt_mon_capable )
2017-10-20 02:16:59 -07:00
static_branch_enable_cpuslocked ( & rdt_mon_enable_key ) ;
2016-10-28 15:04:42 -07:00
2017-07-25 14:14:41 -07:00
if ( rdt_alloc_capable | | rdt_mon_capable )
2017-10-20 02:16:59 -07:00
static_branch_enable_cpuslocked ( & rdt_enable_key ) ;
2017-07-25 14:14:47 -07:00
if ( is_mbm_enabled ( ) ) {
r = & rdt_resources_all [ RDT_RESOURCE_L3 ] ;
list_for_each_entry ( dom , & r - > domains , list )
2017-08-15 18:00:42 -07:00
mbm_setup_overflow_handler ( dom , MBM_OVERFLOW_INTERVAL ) ;
2017-07-25 14:14:47 -07:00
}
2016-10-28 15:04:42 -07:00
goto out ;
2018-06-22 15:41:52 -07:00
out_psl :
rdt_pseudo_lock_release ( ) ;
2017-07-25 14:14:41 -07:00
out_mondata :
if ( rdt_mon_capable )
kernfs_remove ( kn_mondata ) ;
out_mongrp :
if ( rdt_mon_capable )
kernfs_remove ( kn_mongrp ) ;
out_info :
2017-06-26 11:55:49 -07:00
kernfs_remove ( kn_info ) ;
2016-10-28 15:04:42 -07:00
out_cdp :
2017-12-20 14:57:23 -08:00
cdp_disable_all ( ) ;
2016-10-28 15:04:42 -07:00
out :
2017-09-25 16:39:33 -07:00
rdt_last_cmd_clear ( ) ;
2016-10-28 15:04:42 -07:00
mutex_unlock ( & rdtgroup_mutex ) ;
2017-10-20 02:16:59 -07:00
cpus_read_unlock ( ) ;
2016-10-28 15:04:42 -07:00
return dentry ;
}
2017-04-07 17:33:51 -07:00
static int reset_all_ctrls ( struct rdt_resource * r )
2016-10-28 15:04:42 -07:00
{
struct msr_param msr_param ;
cpumask_var_t cpu_mask ;
struct rdt_domain * d ;
int i , cpu ;
if ( ! zalloc_cpumask_var ( & cpu_mask , GFP_KERNEL ) )
return - ENOMEM ;
msr_param . res = r ;
msr_param . low = 0 ;
msr_param . high = r - > num_closid ;
/*
* Disable resource control for this resource by setting all
* CBMs in all domains to the maximum mask value . Pick one CPU
* from each domain to update the MSRs below .
*/
list_for_each_entry ( d , & r - > domains , list ) {
cpumask_set_cpu ( cpumask_any ( & d - > cpu_mask ) , cpu_mask ) ;
for ( i = 0 ; i < r - > num_closid ; i + + )
2017-04-07 17:33:51 -07:00
d - > ctrl_val [ i ] = r - > default_ctrl ;
2016-10-28 15:04:42 -07:00
}
cpu = get_cpu ( ) ;
/* Update CBM on this cpu if it's in cpu_mask. */
if ( cpumask_test_cpu ( cpu , cpu_mask ) )
2017-04-07 17:33:51 -07:00
rdt_ctrl_update ( & msr_param ) ;
2016-10-28 15:04:42 -07:00
/* Update CBM on all other cpus in cpu_mask. */
2017-04-07 17:33:51 -07:00
smp_call_function_many ( cpu_mask , rdt_ctrl_update , & msr_param , 1 ) ;
2016-10-28 15:04:42 -07:00
put_cpu ( ) ;
free_cpumask_var ( cpu_mask ) ;
return 0 ;
}
2017-07-25 14:14:40 -07:00
static bool is_closid_match ( struct task_struct * t , struct rdtgroup * r )
{
return ( rdt_alloc_capable & &
( r - > type = = RDTCTRL_GROUP ) & & ( t - > closid = = r - > closid ) ) ;
}
static bool is_rmid_match ( struct task_struct * t , struct rdtgroup * r )
{
return ( rdt_mon_capable & &
( r - > type = = RDTMON_GROUP ) & & ( t - > rmid = = r - > mon . rmid ) ) ;
}
2016-10-28 15:04:43 -07:00
/*
2016-11-18 15:18:04 -08:00
* Move tasks from one to the other group . If @ from is NULL , then all tasks
* in the systems are moved unconditionally ( used for teardown ) .
*
* If @ mask is not NULL the cpus on which moved tasks are running are set
* in that mask so the update smp function call is restricted to affected
* cpus .
2016-10-28 15:04:43 -07:00
*/
2016-11-18 15:18:04 -08:00
static void rdt_move_group_tasks ( struct rdtgroup * from , struct rdtgroup * to ,
struct cpumask * mask )
2016-10-28 15:04:43 -07:00
{
2016-10-28 15:04:46 -07:00
struct task_struct * p , * t ;
read_lock ( & tasklist_lock ) ;
2016-11-18 15:18:04 -08:00
for_each_process_thread ( p , t ) {
2017-07-25 14:14:40 -07:00
if ( ! from | | is_closid_match ( t , from ) | |
is_rmid_match ( t , from ) ) {
2016-11-18 15:18:04 -08:00
t - > closid = to - > closid ;
2017-07-25 14:14:40 -07:00
t - > rmid = to - > mon . rmid ;
2016-11-18 15:18:04 -08:00
# ifdef CONFIG_SMP
/*
* This is safe on x86 w / o barriers as the ordering
* of writing to task_cpu ( ) and t - > on_cpu is
* reverse to the reading here . The detection is
* inaccurate as tasks might move or schedule
* before the smp function call takes place . In
* such a case the function call is pointless , but
* there is no other side effect .
*/
if ( mask & & t - > on_cpu )
cpumask_set_cpu ( task_cpu ( t ) , mask ) ;
# endif
}
}
2016-10-28 15:04:46 -07:00
read_unlock ( & tasklist_lock ) ;
2016-11-18 15:18:04 -08:00
}
2017-07-25 14:14:40 -07:00
static void free_all_child_rdtgrp ( struct rdtgroup * rdtgrp )
{
struct rdtgroup * sentry , * stmp ;
struct list_head * head ;
head = & rdtgrp - > mon . crdtgrp_list ;
list_for_each_entry_safe ( sentry , stmp , head , mon . crdtgrp_list ) {
free_rmid ( sentry - > mon . rmid ) ;
list_del ( & sentry - > mon . crdtgrp_list ) ;
kfree ( sentry ) ;
}
}
2016-11-18 15:18:04 -08:00
/*
* Forcibly remove all of subdirectories under root .
*/
static void rmdir_all_sub ( void )
{
struct rdtgroup * rdtgrp , * tmp ;
/* Move all tasks to the default resource group */
rdt_move_group_tasks ( NULL , & rdtgroup_default , NULL ) ;
2016-10-28 15:04:44 -07:00
list_for_each_entry_safe ( rdtgrp , tmp , & rdt_all_groups , rdtgroup_list ) {
2017-07-25 14:14:41 -07:00
/* Free any child rmids */
free_all_child_rdtgrp ( rdtgrp ) ;
2016-10-28 15:04:44 -07:00
/* Remove each rdtgroup other than root */
if ( rdtgrp = = & rdtgroup_default )
continue ;
2016-11-11 17:02:37 -08:00
2018-06-22 15:42:22 -07:00
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP | |
rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKED )
rdtgroup_pseudo_lock_remove ( rdtgrp ) ;
2016-11-11 17:02:37 -08:00
/*
* Give any CPUs back to the default group . We cannot copy
* cpu_online_mask because a CPU might have executed the
* offline callback already , but is still marked online .
*/
cpumask_or ( & rdtgroup_default . cpu_mask ,
& rdtgroup_default . cpu_mask , & rdtgrp - > cpu_mask ) ;
2017-07-25 14:14:41 -07:00
free_rmid ( rdtgrp - > mon . rmid ) ;
2016-10-28 15:04:44 -07:00
kernfs_remove ( rdtgrp - > kn ) ;
list_del ( & rdtgrp - > rdtgroup_list ) ;
kfree ( rdtgrp ) ;
}
2016-11-18 15:18:04 -08:00
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2017-07-25 14:14:36 -07:00
update_closid_rmid ( cpu_online_mask , & rdtgroup_default ) ;
2016-11-18 15:18:04 -08:00
2016-10-28 15:04:43 -07:00
kernfs_remove ( kn_info ) ;
2017-07-25 14:14:41 -07:00
kernfs_remove ( kn_mongrp ) ;
kernfs_remove ( kn_mondata ) ;
2016-10-28 15:04:43 -07:00
}
2016-10-28 15:04:42 -07:00
static void rdt_kill_sb ( struct super_block * sb )
{
struct rdt_resource * r ;
2017-10-20 02:16:58 -07:00
cpus_read_lock ( ) ;
2016-10-28 15:04:42 -07:00
mutex_lock ( & rdtgroup_mutex ) ;
2018-04-20 15:36:17 -07:00
set_mba_sc ( false ) ;
2016-10-28 15:04:42 -07:00
/*Put everything back to default values. */
2017-07-25 14:14:25 -07:00
for_each_alloc_enabled_rdt_resource ( r )
2017-04-07 17:33:51 -07:00
reset_all_ctrls ( r ) ;
2017-12-20 14:57:23 -08:00
cdp_disable_all ( ) ;
2016-10-28 15:04:43 -07:00
rmdir_all_sub ( ) ;
2018-06-22 15:42:27 -07:00
rdt_pseudo_lock_release ( ) ;
2018-06-22 15:41:55 -07:00
rdtgroup_default . mode = RDT_MODE_SHAREABLE ;
2017-10-20 02:16:58 -07:00
static_branch_disable_cpuslocked ( & rdt_alloc_enable_key ) ;
static_branch_disable_cpuslocked ( & rdt_mon_enable_key ) ;
static_branch_disable_cpuslocked ( & rdt_enable_key ) ;
2016-10-28 15:04:42 -07:00
kernfs_kill_sb ( sb ) ;
mutex_unlock ( & rdtgroup_mutex ) ;
2017-10-20 02:16:58 -07:00
cpus_read_unlock ( ) ;
2016-10-28 15:04:42 -07:00
}
static struct file_system_type rdt_fs_type = {
. name = " resctrl " ,
. mount = rdt_mount ,
. kill_sb = rdt_kill_sb ,
} ;
2017-07-25 14:14:38 -07:00
static int mon_addfile ( struct kernfs_node * parent_kn , const char * name ,
void * priv )
{
struct kernfs_node * kn ;
int ret = 0 ;
2018-07-20 21:56:47 +00:00
kn = __kernfs_create_file ( parent_kn , name , 0444 ,
GLOBAL_ROOT_UID , GLOBAL_ROOT_GID , 0 ,
2017-07-25 14:14:38 -07:00
& kf_mondata_ops , priv , NULL , NULL ) ;
if ( IS_ERR ( kn ) )
return PTR_ERR ( kn ) ;
ret = rdtgroup_kn_set_ugid ( kn ) ;
if ( ret ) {
kernfs_remove ( kn ) ;
return ret ;
}
return ret ;
}
2017-07-25 14:14:44 -07:00
/*
* Remove all subdirectories of mon_data of ctrl_mon groups
* and monitor groups with given domain id .
*/
void rmdir_mondata_subdir_allrdtgrp ( struct rdt_resource * r , unsigned int dom_id )
{
struct rdtgroup * prgrp , * crgrp ;
char name [ 32 ] ;
if ( ! r - > mon_enabled )
return ;
list_for_each_entry ( prgrp , & rdt_all_groups , rdtgroup_list ) {
sprintf ( name , " mon_%s_%02d " , r - > name , dom_id ) ;
kernfs_remove_by_name ( prgrp - > mon . mon_data_kn , name ) ;
list_for_each_entry ( crgrp , & prgrp - > mon . crdtgrp_list , mon . crdtgrp_list )
kernfs_remove_by_name ( crgrp - > mon . mon_data_kn , name ) ;
}
}
2017-07-25 14:14:38 -07:00
static int mkdir_mondata_subdir ( struct kernfs_node * parent_kn ,
struct rdt_domain * d ,
struct rdt_resource * r , struct rdtgroup * prgrp )
{
union mon_data_bits priv ;
struct kernfs_node * kn ;
struct mon_evt * mevt ;
2017-07-25 14:14:46 -07:00
struct rmid_read rr ;
2017-07-25 14:14:38 -07:00
char name [ 32 ] ;
int ret ;
sprintf ( name , " mon_%s_%02d " , r - > name , d - > id ) ;
/* create the directory */
kn = kernfs_create_dir ( parent_kn , name , parent_kn - > mode , prgrp ) ;
if ( IS_ERR ( kn ) )
return PTR_ERR ( kn ) ;
/*
* This extra ref will be put in kernfs_remove ( ) and guarantees
* that kn is always accessible .
*/
kernfs_get ( kn ) ;
ret = rdtgroup_kn_set_ugid ( kn ) ;
if ( ret )
goto out_destroy ;
if ( WARN_ON ( list_empty ( & r - > evt_list ) ) ) {
ret = - EPERM ;
goto out_destroy ;
}
priv . u . rid = r - > rid ;
priv . u . domid = d - > id ;
list_for_each_entry ( mevt , & r - > evt_list , list ) {
priv . u . evtid = mevt - > evtid ;
ret = mon_addfile ( kn , mevt - > name , priv . priv ) ;
if ( ret )
goto out_destroy ;
2017-07-25 14:14:46 -07:00
if ( is_mbm_event ( mevt - > evtid ) )
mon_event_read ( & rr , d , prgrp , mevt - > evtid , true ) ;
2017-07-25 14:14:38 -07:00
}
kernfs_activate ( kn ) ;
return 0 ;
out_destroy :
kernfs_remove ( kn ) ;
return ret ;
}
2017-07-25 14:14:44 -07:00
/*
* Add all subdirectories of mon_data for " ctrl_mon " groups
* and " monitor " groups with given domain id .
*/
void mkdir_mondata_subdir_allrdtgrp ( struct rdt_resource * r ,
struct rdt_domain * d )
{
struct kernfs_node * parent_kn ;
struct rdtgroup * prgrp , * crgrp ;
struct list_head * head ;
if ( ! r - > mon_enabled )
return ;
list_for_each_entry ( prgrp , & rdt_all_groups , rdtgroup_list ) {
parent_kn = prgrp - > mon . mon_data_kn ;
mkdir_mondata_subdir ( parent_kn , d , r , prgrp ) ;
head = & prgrp - > mon . crdtgrp_list ;
list_for_each_entry ( crgrp , head , mon . crdtgrp_list ) {
parent_kn = crgrp - > mon . mon_data_kn ;
mkdir_mondata_subdir ( parent_kn , d , r , crgrp ) ;
}
}
}
2017-07-25 14:14:38 -07:00
static int mkdir_mondata_subdir_alldom ( struct kernfs_node * parent_kn ,
struct rdt_resource * r ,
struct rdtgroup * prgrp )
{
struct rdt_domain * dom ;
int ret ;
list_for_each_entry ( dom , & r - > domains , list ) {
ret = mkdir_mondata_subdir ( parent_kn , dom , r , prgrp ) ;
if ( ret )
return ret ;
}
return 0 ;
}
/*
* This creates a directory mon_data which contains the monitored data .
*
* mon_data has one directory for each domain whic are named
* in the format mon_ < domain_name > _ < domain_id > . For ex : A mon_data
* with L3 domain looks as below :
* . / mon_data :
* mon_L3_00
* mon_L3_01
* mon_L3_02
* . . .
*
* Each domain directory has one file per event :
* . / mon_L3_00 / :
* llc_occupancy
*
*/
static int mkdir_mondata_all ( struct kernfs_node * parent_kn ,
struct rdtgroup * prgrp ,
struct kernfs_node * * dest_kn )
{
struct rdt_resource * r ;
struct kernfs_node * kn ;
int ret ;
/*
* Create the mon_data directory first .
*/
ret = mongroup_create_dir ( parent_kn , NULL , " mon_data " , & kn ) ;
if ( ret )
return ret ;
if ( dest_kn )
* dest_kn = kn ;
/*
* Create the subdirectories for each domain . Note that all events
* in a domain like L3 are grouped into a resource whose domain is L3
*/
for_each_mon_enabled_rdt_resource ( r ) {
ret = mkdir_mondata_subdir_alldom ( kn , r , prgrp ) ;
if ( ret )
goto out_destroy ;
}
return 0 ;
out_destroy :
kernfs_remove ( kn ) ;
return ret ;
}
2018-06-22 15:41:59 -07:00
/**
* cbm_ensure_valid - Enforce validity on provided CBM
* @ _val : Candidate CBM
* @ r : RDT resource to which the CBM belongs
*
* The provided CBM represents all cache portions available for use . This
* may be represented by a bitmap that does not consist of contiguous ones
* and thus be an invalid CBM .
* Here the provided CBM is forced to be a valid CBM by only considering
* the first set of contiguous bits as valid and clearing all bits .
* The intention here is to provide a valid default CBM with which a new
* resource group is initialized . The user can follow this with a
* modification to the CBM if the default does not satisfy the
* requirements .
*/
static void cbm_ensure_valid ( u32 * _val , struct rdt_resource * r )
{
/*
* Convert the u32 _val to an unsigned long required by all the bit
* operations within this function . No more than 32 bits of this
* converted value can be accessed because all bit operations are
* additionally provided with cbm_len that is initialized during
* hardware enumeration using five bits from the EAX register and
* thus never can exceed 32 bits .
*/
unsigned long * val = ( unsigned long * ) _val ;
unsigned int cbm_len = r - > cache . cbm_len ;
unsigned long first_bit , zero_bit ;
if ( * val = = 0 )
return ;
first_bit = find_first_bit ( val , cbm_len ) ;
zero_bit = find_next_zero_bit ( val , cbm_len , first_bit ) ;
/* Clear any remaining bits to ensure contiguous region */
bitmap_clear ( val , zero_bit , cbm_len - zero_bit ) ;
}
/**
* rdtgroup_init_alloc - Initialize the new RDT group ' s allocations
*
* A new RDT group is being created on an allocation capable ( CAT )
* supporting system . Set this group up to start off with all usable
* allocations . That is , all shareable and unused bits .
*
* All - zero CBM is invalid . If there are no more shareable bits available
* on any domain then the entire allocation will fail .
*/
static int rdtgroup_init_alloc ( struct rdtgroup * rdtgrp )
{
u32 used_b = 0 , unused_b = 0 ;
u32 closid = rdtgrp - > closid ;
struct rdt_resource * r ;
enum rdtgrp_mode mode ;
struct rdt_domain * d ;
int i , ret ;
u32 * ctrl ;
for_each_alloc_enabled_rdt_resource ( r ) {
2018-09-15 14:58:23 -07:00
/*
* Only initialize default allocations for CBM cache
* resources
*/
if ( r - > rid = = RDT_RESOURCE_MBA )
continue ;
2018-06-22 15:41:59 -07:00
list_for_each_entry ( d , & r - > domains , list ) {
d - > have_new_ctrl = false ;
d - > new_ctrl = r - > cache . shareable_bits ;
used_b = r - > cache . shareable_bits ;
ctrl = d - > ctrl_val ;
2018-09-15 14:58:27 -07:00
for ( i = 0 ; i < closids_supported ( ) ; i + + , ctrl + + ) {
2018-06-22 15:41:59 -07:00
if ( closid_allocated ( i ) & & i ! = closid ) {
mode = rdtgroup_mode_by_closid ( i ) ;
2018-06-22 15:42:17 -07:00
if ( mode = = RDT_MODE_PSEUDO_LOCKSETUP )
break ;
2018-06-22 15:41:59 -07:00
used_b | = * ctrl ;
if ( mode = = RDT_MODE_SHAREABLE )
d - > new_ctrl | = * ctrl ;
}
}
2018-06-22 15:42:22 -07:00
if ( d - > plr & & d - > plr - > cbm > 0 )
used_b | = d - > plr - > cbm ;
2018-06-22 15:41:59 -07:00
unused_b = used_b ^ ( BIT_MASK ( r - > cache . cbm_len ) - 1 ) ;
unused_b & = BIT_MASK ( r - > cache . cbm_len ) - 1 ;
d - > new_ctrl | = unused_b ;
/*
* Force the initial CBM to be valid , user can
* modify the CBM based on system availability .
*/
cbm_ensure_valid ( & d - > new_ctrl , r ) ;
if ( bitmap_weight ( ( unsigned long * ) & d - > new_ctrl ,
r - > cache . cbm_len ) <
r - > cache . min_cbm_bits ) {
rdt_last_cmd_printf ( " no space on %s:%d \n " ,
r - > name , d - > id ) ;
return - ENOSPC ;
}
d - > have_new_ctrl = true ;
}
}
for_each_alloc_enabled_rdt_resource ( r ) {
2018-09-15 14:58:23 -07:00
/*
* Only initialize default allocations for CBM cache
* resources
*/
if ( r - > rid = = RDT_RESOURCE_MBA )
continue ;
2018-06-22 15:41:59 -07:00
ret = update_domains ( r , rdtgrp - > closid ) ;
if ( ret < 0 ) {
rdt_last_cmd_puts ( " failed to initialize allocations \n " ) ;
return ret ;
}
rdtgrp - > mode = RDT_MODE_SHAREABLE ;
}
return 0 ;
}
2017-07-25 14:14:31 -07:00
static int mkdir_rdt_prepare ( struct kernfs_node * parent_kn ,
struct kernfs_node * prgrp_kn ,
const char * name , umode_t mode ,
2017-07-25 14:14:32 -07:00
enum rdt_group_type rtype , struct rdtgroup * * r )
2016-10-28 15:04:44 -07:00
{
2017-07-25 14:14:31 -07:00
struct rdtgroup * prdtgrp , * rdtgrp ;
2016-10-28 15:04:44 -07:00
struct kernfs_node * kn ;
2017-07-25 14:14:31 -07:00
uint files = 0 ;
int ret ;
2016-10-28 15:04:44 -07:00
2017-07-25 14:14:31 -07:00
prdtgrp = rdtgroup_kn_lock_live ( prgrp_kn ) ;
2017-09-25 16:39:37 -07:00
rdt_last_cmd_clear ( ) ;
2017-07-25 14:14:31 -07:00
if ( ! prdtgrp ) {
2016-10-28 15:04:44 -07:00
ret = - ENODEV ;
2017-09-25 16:39:37 -07:00
rdt_last_cmd_puts ( " directory was removed \n " ) ;
2016-10-28 15:04:44 -07:00
goto out_unlock ;
}
2018-06-22 15:42:12 -07:00
if ( rtype = = RDTMON_GROUP & &
( prdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP | |
prdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKED ) ) {
ret = - EINVAL ;
rdt_last_cmd_puts ( " pseudo-locking in progress \n " ) ;
goto out_unlock ;
}
2016-10-28 15:04:44 -07:00
/* allocate the rdtgroup. */
rdtgrp = kzalloc ( sizeof ( * rdtgrp ) , GFP_KERNEL ) ;
if ( ! rdtgrp ) {
ret = - ENOSPC ;
2017-09-25 16:39:37 -07:00
rdt_last_cmd_puts ( " kernel out of memory \n " ) ;
2017-07-25 14:14:31 -07:00
goto out_unlock ;
2016-10-28 15:04:44 -07:00
}
2017-07-25 14:14:31 -07:00
* r = rdtgrp ;
2017-07-25 14:14:32 -07:00
rdtgrp - > mon . parent = prdtgrp ;
rdtgrp - > type = rtype ;
INIT_LIST_HEAD ( & rdtgrp - > mon . crdtgrp_list ) ;
2016-10-28 15:04:44 -07:00
/* kernfs creates the directory for rdtgrp */
2017-07-25 14:14:31 -07:00
kn = kernfs_create_dir ( parent_kn , name , mode , rdtgrp ) ;
2016-10-28 15:04:44 -07:00
if ( IS_ERR ( kn ) ) {
ret = PTR_ERR ( kn ) ;
2017-09-25 16:39:37 -07:00
rdt_last_cmd_puts ( " kernfs create error \n " ) ;
2017-07-25 14:14:31 -07:00
goto out_free_rgrp ;
2016-10-28 15:04:44 -07:00
}
rdtgrp - > kn = kn ;
/*
* kernfs_remove ( ) will drop the reference count on " kn " which
* will free it . But we still need it to stick around for the
* rdtgroup_kn_unlock ( kn } call below . Take one extra reference
* here , which will be dropped inside rdtgroup_kn_unlock ( ) .
*/
kernfs_get ( kn ) ;
ret = rdtgroup_kn_set_ugid ( kn ) ;
2017-09-25 16:39:37 -07:00
if ( ret ) {
rdt_last_cmd_puts ( " kernfs perm error \n " ) ;
2016-10-28 15:04:44 -07:00
goto out_destroy ;
2017-09-25 16:39:37 -07:00
}
2016-10-28 15:04:44 -07:00
2017-07-25 14:14:32 -07:00
files = RFTYPE_BASE | BIT ( RF_CTRLSHIFT + rtype ) ;
2017-07-25 14:14:31 -07:00
ret = rdtgroup_add_files ( kn , files ) ;
2017-09-25 16:39:37 -07:00
if ( ret ) {
rdt_last_cmd_puts ( " kernfs fill error \n " ) ;
2016-10-28 15:04:45 -07:00
goto out_destroy ;
2017-09-25 16:39:37 -07:00
}
2016-10-28 15:04:45 -07:00
2017-07-25 14:14:32 -07:00
if ( rdt_mon_capable ) {
ret = alloc_rmid ( ) ;
2017-09-25 16:39:37 -07:00
if ( ret < 0 ) {
rdt_last_cmd_puts ( " out of RMIDs \n " ) ;
2017-07-25 14:14:32 -07:00
goto out_destroy ;
2017-09-25 16:39:37 -07:00
}
2017-07-25 14:14:32 -07:00
rdtgrp - > mon . rmid = ret ;
2017-07-25 14:14:38 -07:00
ret = mkdir_mondata_all ( kn , rdtgrp , & rdtgrp - > mon . mon_data_kn ) ;
2017-09-25 16:39:37 -07:00
if ( ret ) {
rdt_last_cmd_puts ( " kernfs subdir error \n " ) ;
2017-07-25 14:14:38 -07:00
goto out_idfree ;
2017-09-25 16:39:37 -07:00
}
2017-07-25 14:14:32 -07:00
}
2016-10-28 15:04:44 -07:00
kernfs_activate ( kn ) ;
2017-07-25 14:14:31 -07:00
/*
* The caller unlocks the prgrp_kn upon success .
*/
return 0 ;
2016-10-28 15:04:44 -07:00
2017-07-25 14:14:38 -07:00
out_idfree :
free_rmid ( rdtgrp - > mon . rmid ) ;
2016-10-28 15:04:44 -07:00
out_destroy :
kernfs_remove ( rdtgrp - > kn ) ;
2017-07-25 14:14:31 -07:00
out_free_rgrp :
2016-10-28 15:04:44 -07:00
kfree ( rdtgrp ) ;
out_unlock :
2017-07-25 14:14:31 -07:00
rdtgroup_kn_unlock ( prgrp_kn ) ;
return ret ;
}
static void mkdir_rdt_prepare_clean ( struct rdtgroup * rgrp )
{
kernfs_remove ( rgrp - > kn ) ;
2017-07-25 14:14:32 -07:00
free_rmid ( rgrp - > mon . rmid ) ;
2017-07-25 14:14:31 -07:00
kfree ( rgrp ) ;
}
2017-07-25 14:14:32 -07:00
/*
* Create a monitor group under " mon_groups " directory of a control
* and monitor group ( ctrl_mon ) . This is a resource group
* to monitor a subset of tasks and cpus in its parent ctrl_mon group .
*/
static int rdtgroup_mkdir_mon ( struct kernfs_node * parent_kn ,
struct kernfs_node * prgrp_kn ,
const char * name ,
umode_t mode )
{
struct rdtgroup * rdtgrp , * prgrp ;
int ret ;
ret = mkdir_rdt_prepare ( parent_kn , prgrp_kn , name , mode , RDTMON_GROUP ,
& rdtgrp ) ;
if ( ret )
return ret ;
prgrp = rdtgrp - > mon . parent ;
rdtgrp - > closid = prgrp - > closid ;
/*
* Add the rdtgrp to the list of rdtgrps the parent
* ctrl_mon group has to track .
*/
list_add_tail ( & rdtgrp - > mon . crdtgrp_list , & prgrp - > mon . crdtgrp_list ) ;
rdtgroup_kn_unlock ( prgrp_kn ) ;
return ret ;
}
2017-07-25 14:14:31 -07:00
/*
* These are rdtgroups created under the root directory . Can be used
2017-07-25 14:14:32 -07:00
* to allocate and monitor resources .
2017-07-25 14:14:31 -07:00
*/
2017-07-25 14:14:32 -07:00
static int rdtgroup_mkdir_ctrl_mon ( struct kernfs_node * parent_kn ,
struct kernfs_node * prgrp_kn ,
const char * name , umode_t mode )
2017-07-25 14:14:31 -07:00
{
struct rdtgroup * rdtgrp ;
struct kernfs_node * kn ;
u32 closid ;
int ret ;
2017-07-25 14:14:32 -07:00
ret = mkdir_rdt_prepare ( parent_kn , prgrp_kn , name , mode , RDTCTRL_GROUP ,
& rdtgrp ) ;
2017-07-25 14:14:31 -07:00
if ( ret )
return ret ;
kn = rdtgrp - > kn ;
ret = closid_alloc ( ) ;
2017-09-25 16:39:37 -07:00
if ( ret < 0 ) {
rdt_last_cmd_puts ( " out of CLOSIDs \n " ) ;
2017-07-25 14:14:31 -07:00
goto out_common_fail ;
2017-09-25 16:39:37 -07:00
}
2017-07-25 14:14:31 -07:00
closid = ret ;
2018-02-22 19:26:03 -08:00
ret = 0 ;
2017-07-25 14:14:31 -07:00
rdtgrp - > closid = closid ;
2018-06-22 15:41:59 -07:00
ret = rdtgroup_init_alloc ( rdtgrp ) ;
if ( ret < 0 )
goto out_id_free ;
2017-07-25 14:14:31 -07:00
list_add ( & rdtgrp - > rdtgroup_list , & rdt_all_groups ) ;
2017-07-25 14:14:32 -07:00
if ( rdt_mon_capable ) {
/*
* Create an empty mon_groups directory to hold the subset
* of tasks and cpus to monitor .
*/
ret = mongroup_create_dir ( kn , NULL , " mon_groups " , NULL ) ;
2017-09-25 16:39:37 -07:00
if ( ret ) {
rdt_last_cmd_puts ( " kernfs subdir error \n " ) ;
2018-06-22 15:41:59 -07:00
goto out_del_list ;
2017-09-25 16:39:37 -07:00
}
2017-07-25 14:14:32 -07:00
}
2017-07-25 14:14:31 -07:00
goto out_unlock ;
2018-06-22 15:41:59 -07:00
out_del_list :
list_del ( & rdtgrp - > rdtgroup_list ) ;
2017-07-25 14:14:32 -07:00
out_id_free :
closid_free ( closid ) ;
2017-07-25 14:14:31 -07:00
out_common_fail :
mkdir_rdt_prepare_clean ( rdtgrp ) ;
out_unlock :
rdtgroup_kn_unlock ( prgrp_kn ) ;
2016-10-28 15:04:44 -07:00
return ret ;
}
2017-07-25 14:14:32 -07:00
/*
* We allow creating mon groups only with in a directory called " mon_groups "
* which is present in every ctrl_mon group . Check if this is a valid
* " mon_groups " directory .
*
* 1. The directory should be named " mon_groups " .
* 2. The mon group itself should " not " be named " mon_groups " .
* This makes sure " mon_groups " directory always has a ctrl_mon group
* as parent .
*/
static bool is_mon_groups ( struct kernfs_node * kn , const char * name )
{
return ( ! strcmp ( kn - > name , " mon_groups " ) & &
strcmp ( name , " mon_groups " ) ) ;
}
2017-07-25 14:14:31 -07:00
static int rdtgroup_mkdir ( struct kernfs_node * parent_kn , const char * name ,
umode_t mode )
{
/* Do not accept '\n' to avoid unparsable situation. */
if ( strchr ( name , ' \n ' ) )
return - EINVAL ;
/*
* If the parent directory is the root directory and RDT
2017-07-25 14:14:32 -07:00
* allocation is supported , add a control and monitoring
* subdirectory
2017-07-25 14:14:31 -07:00
*/
if ( rdt_alloc_capable & & parent_kn = = rdtgroup_default . kn )
2017-07-25 14:14:32 -07:00
return rdtgroup_mkdir_ctrl_mon ( parent_kn , parent_kn , name , mode ) ;
/*
* If RDT monitoring is supported and the parent directory is a valid
* " mon_groups " directory , add a monitoring subdirectory .
*/
if ( rdt_mon_capable & & is_mon_groups ( parent_kn , name ) )
return rdtgroup_mkdir_mon ( parent_kn , parent_kn - > parent , name , mode ) ;
2017-07-25 14:14:31 -07:00
return - EPERM ;
}
2017-07-25 14:14:40 -07:00
static int rdtgroup_rmdir_mon ( struct kernfs_node * kn , struct rdtgroup * rdtgrp ,
cpumask_var_t tmpmask )
{
struct rdtgroup * prdtgrp = rdtgrp - > mon . parent ;
int cpu ;
/* Give any tasks back to the parent group */
rdt_move_group_tasks ( rdtgrp , prdtgrp , tmpmask ) ;
/* Update per cpu rmid of the moved CPUs first */
for_each_cpu ( cpu , & rdtgrp - > cpu_mask )
2017-08-09 11:46:34 -07:00
per_cpu ( pqr_state . default_rmid , cpu ) = prdtgrp - > mon . rmid ;
2017-07-25 14:14:40 -07:00
/*
* Update the MSR on moved CPUs and CPUs which have moved
* task running on them .
*/
cpumask_or ( tmpmask , tmpmask , & rdtgrp - > cpu_mask ) ;
update_closid_rmid ( tmpmask , NULL ) ;
rdtgrp - > flags = RDT_DELETED ;
free_rmid ( rdtgrp - > mon . rmid ) ;
/*
* Remove the rdtgrp from the parent ctrl_mon group ' s list
*/
WARN_ON ( list_empty ( & prdtgrp - > mon . crdtgrp_list ) ) ;
list_del ( & rdtgrp - > mon . crdtgrp_list ) ;
/*
* one extra hold on this , will drop when we kfree ( rdtgrp )
* in rdtgroup_kn_unlock ( )
*/
kernfs_get ( kn ) ;
kernfs_remove ( rdtgrp - > kn ) ;
return 0 ;
}
2018-06-22 15:42:18 -07:00
static int rdtgroup_ctrl_remove ( struct kernfs_node * kn ,
struct rdtgroup * rdtgrp )
{
rdtgrp - > flags = RDT_DELETED ;
list_del ( & rdtgrp - > rdtgroup_list ) ;
/*
* one extra hold on this , will drop when we kfree ( rdtgrp )
* in rdtgroup_kn_unlock ( )
*/
kernfs_get ( kn ) ;
kernfs_remove ( rdtgrp - > kn ) ;
return 0 ;
}
2017-07-25 14:14:39 -07:00
static int rdtgroup_rmdir_ctrl ( struct kernfs_node * kn , struct rdtgroup * rdtgrp ,
cpumask_var_t tmpmask )
2016-10-28 15:04:44 -07:00
{
2017-07-25 14:14:39 -07:00
int cpu ;
2016-10-28 15:04:44 -07:00
2016-10-28 15:04:46 -07:00
/* Give any tasks back to the default group */
2016-11-18 15:18:04 -08:00
rdt_move_group_tasks ( rdtgrp , & rdtgroup_default , tmpmask ) ;
2016-10-28 15:04:46 -07:00
2016-10-28 15:04:45 -07:00
/* Give any CPUs back to the default group */
cpumask_or ( & rdtgroup_default . cpu_mask ,
& rdtgroup_default . cpu_mask , & rdtgrp - > cpu_mask ) ;
2016-11-18 15:18:04 -08:00
2017-07-25 14:14:40 -07:00
/* Update per cpu closid and rmid of the moved CPUs first */
for_each_cpu ( cpu , & rdtgrp - > cpu_mask ) {
2017-08-09 11:46:34 -07:00
per_cpu ( pqr_state . default_closid , cpu ) = rdtgroup_default . closid ;
per_cpu ( pqr_state . default_rmid , cpu ) = rdtgroup_default . mon . rmid ;
2017-07-25 14:14:40 -07:00
}
2016-11-18 15:18:04 -08:00
/*
* Update the MSR on moved CPUs and CPUs which have moved
* task running on them .
*/
cpumask_or ( tmpmask , tmpmask , & rdtgrp - > cpu_mask ) ;
2017-07-25 14:14:36 -07:00
update_closid_rmid ( tmpmask , NULL ) ;
2016-10-28 15:04:45 -07:00
2016-10-28 15:04:44 -07:00
closid_free ( rdtgrp - > closid ) ;
2017-07-25 14:14:40 -07:00
free_rmid ( rdtgrp - > mon . rmid ) ;
/*
* Free all the child monitor group rmids .
*/
free_all_child_rdtgrp ( rdtgrp ) ;
2018-06-22 15:42:18 -07:00
rdtgroup_ctrl_remove ( kn , rdtgrp ) ;
2017-07-25 14:14:39 -07:00
return 0 ;
}
static int rdtgroup_rmdir ( struct kernfs_node * kn )
{
struct kernfs_node * parent_kn = kn - > parent ;
struct rdtgroup * rdtgrp ;
cpumask_var_t tmpmask ;
int ret = 0 ;
if ( ! zalloc_cpumask_var ( & tmpmask , GFP_KERNEL ) )
return - ENOMEM ;
rdtgrp = rdtgroup_kn_lock_live ( kn ) ;
if ( ! rdtgrp ) {
ret = - EPERM ;
goto out ;
}
/*
* If the rdtgroup is a ctrl_mon group and parent directory
2017-07-25 14:14:40 -07:00
* is the root directory , remove the ctrl_mon group .
*
* If the rdtgroup is a mon group and parent directory
* is a valid " mon_groups " directory , remove the mon group .
2017-07-25 14:14:39 -07:00
*/
2018-06-22 15:42:22 -07:00
if ( rdtgrp - > type = = RDTCTRL_GROUP & & parent_kn = = rdtgroup_default . kn ) {
if ( rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKSETUP | |
rdtgrp - > mode = = RDT_MODE_PSEUDO_LOCKED ) {
ret = rdtgroup_ctrl_remove ( kn , rdtgrp ) ;
} else {
ret = rdtgroup_rmdir_ctrl ( kn , rdtgrp , tmpmask ) ;
}
} else if ( rdtgrp - > type = = RDTMON_GROUP & &
is_mon_groups ( parent_kn , kn - > name ) ) {
2017-07-25 14:14:40 -07:00
ret = rdtgroup_rmdir_mon ( kn , rdtgrp , tmpmask ) ;
2018-06-22 15:42:22 -07:00
} else {
2017-07-25 14:14:39 -07:00
ret = - EPERM ;
2018-06-22 15:42:22 -07:00
}
2017-07-25 14:14:39 -07:00
2016-11-18 15:18:04 -08:00
out :
2016-10-28 15:04:44 -07:00
rdtgroup_kn_unlock ( kn ) ;
2016-11-18 15:18:04 -08:00
free_cpumask_var ( tmpmask ) ;
return ret ;
2016-10-28 15:04:44 -07:00
}
2016-12-02 14:21:06 -08:00
static int rdtgroup_show_options ( struct seq_file * seq , struct kernfs_root * kf )
{
2017-07-25 14:14:25 -07:00
if ( rdt_resources_all [ RDT_RESOURCE_L3DATA ] . alloc_enabled )
2016-12-02 14:21:06 -08:00
seq_puts ( seq , " ,cdp " ) ;
return 0 ;
}
2016-10-28 15:04:42 -07:00
static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
2016-12-02 14:21:06 -08:00
. mkdir = rdtgroup_mkdir ,
. rmdir = rdtgroup_rmdir ,
. show_options = rdtgroup_show_options ,
2016-10-28 15:04:42 -07:00
} ;
static int __init rdtgroup_setup_root ( void )
{
2016-10-28 15:04:45 -07:00
int ret ;
2016-10-28 15:04:42 -07:00
rdt_root = kernfs_create_root ( & rdtgroup_kf_syscall_ops ,
2018-06-22 15:42:09 -07:00
KERNFS_ROOT_CREATE_DEACTIVATED |
KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK ,
2016-10-28 15:04:42 -07:00
& rdtgroup_default ) ;
if ( IS_ERR ( rdt_root ) )
return PTR_ERR ( rdt_root ) ;
mutex_lock ( & rdtgroup_mutex ) ;
rdtgroup_default . closid = 0 ;
2017-07-25 14:14:32 -07:00
rdtgroup_default . mon . rmid = 0 ;
rdtgroup_default . type = RDTCTRL_GROUP ;
INIT_LIST_HEAD ( & rdtgroup_default . mon . crdtgrp_list ) ;
2016-10-28 15:04:42 -07:00
list_add ( & rdtgroup_default . rdtgroup_list , & rdt_all_groups ) ;
2017-07-25 14:14:29 -07:00
ret = rdtgroup_add_files ( rdt_root - > kn , RF_CTRL_BASE ) ;
2016-10-28 15:04:45 -07:00
if ( ret ) {
kernfs_destroy_root ( rdt_root ) ;
goto out ;
}
2016-10-28 15:04:42 -07:00
rdtgroup_default . kn = rdt_root - > kn ;
kernfs_activate ( rdtgroup_default . kn ) ;
2016-10-28 15:04:45 -07:00
out :
2016-10-28 15:04:42 -07:00
mutex_unlock ( & rdtgroup_mutex ) ;
2016-10-28 15:04:45 -07:00
return ret ;
2016-10-28 15:04:42 -07:00
}
/*
* rdtgroup_init - rdtgroup initialization
*
* Setup resctrl file system including set up root , create mount point ,
* register rdtgroup filesystem , and initialize files under root directory .
*
* Return : 0 on success or - errno
*/
int __init rdtgroup_init ( void )
{
int ret = 0 ;
2017-09-25 16:39:33 -07:00
seq_buf_init ( & last_cmd_status , last_cmd_status_buf ,
sizeof ( last_cmd_status_buf ) ) ;
2016-10-28 15:04:42 -07:00
ret = rdtgroup_setup_root ( ) ;
if ( ret )
return ret ;
ret = sysfs_create_mount_point ( fs_kobj , " resctrl " ) ;
if ( ret )
goto cleanup_root ;
ret = register_filesystem ( & rdt_fs_type ) ;
if ( ret )
goto cleanup_mountpoint ;
2018-06-22 15:42:25 -07:00
/*
* Adding the resctrl debugfs directory here may not be ideal since
* it would let the resctrl debugfs directory appear on the debugfs
* filesystem before the resctrl filesystem is mounted .
* It may also be ok since that would enable debugging of RDT before
* resctrl is mounted .
* The reason why the debugfs directory is created here and not in
* rdt_mount ( ) is because rdt_mount ( ) takes rdtgroup_mutex and
* during the debugfs directory creation also & sb - > s_type - > i_mutex_key
* ( the lockdep class of inode - > i_rwsem ) . Other filesystem
* interactions ( eg . SyS_getdents ) have the lock ordering :
* & sb - > s_type - > i_mutex_key - - > & mm - > mmap_sem
* During mmap ( ) , called with & mm - > mmap_sem , the rdtgroup_mutex
* is taken , thus creating dependency :
* & mm - > mmap_sem - - > rdtgroup_mutex for the latter that can cause
* issues considering the other two lock dependencies .
* By creating the debugfs directory here we avoid a dependency
* that may cause deadlock ( even though file operations cannot
* occur until the filesystem is mounted , but I do not know how to
* tell lockdep that ) .
*/
debugfs_resctrl = debugfs_create_dir ( " resctrl " , NULL ) ;
2016-10-28 15:04:42 -07:00
return 0 ;
cleanup_mountpoint :
sysfs_remove_mount_point ( fs_kobj , " resctrl " ) ;
cleanup_root :
kernfs_destroy_root ( rdt_root ) ;
return ret ;
}
2018-06-22 15:42:24 -07:00
void __exit rdtgroup_exit ( void )
{
2018-06-22 15:42:25 -07:00
debugfs_remove_recursive ( debugfs_resctrl ) ;
2018-06-22 15:42:24 -07:00
unregister_filesystem ( & rdt_fs_type ) ;
sysfs_remove_mount_point ( fs_kobj , " resctrl " ) ;
kernfs_destroy_root ( rdt_root ) ;
}