2016-10-22 06:19:54 -07:00
# ifndef _ASM_X86_INTEL_RDT_H
# define _ASM_X86_INTEL_RDT_H
2017-02-02 17:54:15 +01:00
# include <linux/sched.h>
2016-11-02 17:51:17 +01:00
# include <linux/kernfs.h>
2016-10-28 15:04:42 -07:00
# include <linux/jump_label.h>
# define IA32_L3_QOS_CFG 0xc81
2016-10-22 06:19:54 -07:00
# define IA32_L3_CBM_BASE 0xc90
2016-10-22 06:19:55 -07:00
# define IA32_L2_CBM_BASE 0xd10
2017-04-07 17:33:53 -07:00
# define IA32_MBA_THRTL_BASE 0xd50
2016-10-22 06:19:54 -07:00
2016-10-28 15:04:42 -07:00
# define L3_QOS_CDP_ENABLE 0x01ULL
/**
* struct rdtgroup - store rdtgroup ' s data in resctrl file system .
* @ kn : kernfs node
* @ rdtgroup_list : linked list for all rdtgroups
* @ closid : closid for this rdtgroup
2016-10-28 15:04:45 -07:00
* @ cpu_mask : CPUs assigned to this rdtgroup
2016-10-28 15:04:44 -07:00
* @ flags : status bits
* @ waitcount : how many cpus expect to find this
2016-10-28 15:04:45 -07:00
* group when they acquire rdtgroup_mutex
2016-10-28 15:04:42 -07:00
*/
struct rdtgroup {
struct kernfs_node * kn ;
struct list_head rdtgroup_list ;
int closid ;
2016-10-28 15:04:45 -07:00
struct cpumask cpu_mask ;
2016-10-28 15:04:44 -07:00
int flags ;
atomic_t waitcount ;
2016-10-28 15:04:42 -07:00
} ;
2016-10-28 15:04:44 -07:00
/* rdtgroup.flags */
# define RDT_DELETED 1
2017-04-10 16:52:32 +02:00
/* rftype.flags */
# define RFTYPE_FLAGS_CPUS_LIST 1
2016-10-28 15:04:42 -07:00
/* List of all resource groups */
extern struct list_head rdt_all_groups ;
2017-04-03 14:44:17 -07:00
extern int max_name_width , max_data_width ;
2016-10-28 15:04:42 -07:00
int __init rdtgroup_init ( void ) ;
2016-10-28 15:04:43 -07:00
/**
* struct rftype - describe each file in the resctrl file system
2017-04-10 11:50:11 +02:00
* @ name : File name
* @ mode : Access mode
* @ kf_ops : File operations
2017-04-10 16:52:32 +02:00
* @ flags : File specific RFTYPE_FLAGS_ * flags
2017-04-10 11:50:11 +02:00
* @ seq_show : Show content of the file
* @ write : Write to the file
2016-10-28 15:04:43 -07:00
*/
struct rftype {
char * name ;
umode_t mode ;
struct kernfs_ops * kf_ops ;
2017-04-10 16:52:32 +02:00
unsigned long flags ;
2016-10-28 15:04:43 -07:00
int ( * seq_show ) ( struct kernfs_open_file * of ,
struct seq_file * sf , void * v ) ;
/*
* write ( ) is the generic write callback which maps directly to
* kernfs write operation and overrides all other operations .
* Maximum write size is determined by - > max_write_len .
*/
ssize_t ( * write ) ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off ) ;
} ;
2017-04-14 14:14:31 +02:00
/**
* struct rdt_domain - group of cpus sharing an RDT resource
* @ list : all instances of this resource
* @ id : unique id for this instance
* @ cpu_mask : which cpus share this resource
* @ ctrl_val : array of cache or mem ctrl values ( indexed by CLOSID )
* @ new_ctrl : new ctrl value to be loaded
* @ have_new_ctrl : did user provide new_ctrl for this domain
*/
struct rdt_domain {
struct list_head list ;
int id ;
struct cpumask cpu_mask ;
u32 * ctrl_val ;
u32 new_ctrl ;
bool have_new_ctrl ;
} ;
/**
* struct msr_param - set a range of MSRs from a domain
* @ res : The resource to use
* @ low : Beginning index from base MSR
* @ high : End index
*/
struct msr_param {
struct rdt_resource * res ;
int low ;
int high ;
} ;
2017-04-14 13:00:36 +02:00
/**
* struct rdt_cache - Cache allocation related data
* @ cbm_len : Length of the cache bit mask
* @ min_cbm_bits : Minimum number of consecutive bits to be set
* @ cbm_idx_mult : Multiplier of CBM index
* @ cbm_idx_offset : Offset of CBM index . CBM index is computed by :
* closid * cbm_idx_multi + cbm_idx_offset
* in a cache bit mask
*/
struct rdt_cache {
unsigned int cbm_len ;
unsigned int min_cbm_bits ;
unsigned int cbm_idx_mult ;
unsigned int cbm_idx_offset ;
} ;
2017-04-07 17:33:53 -07:00
/**
* struct rdt_membw - Memory bandwidth allocation related data
* @ max_delay : Max throttle delay . Delay is the hardware
* representation for memory bandwidth .
* @ min_bw : Minimum memory bandwidth percentage user can request
* @ bw_gran : Granularity at which the memory bandwidth is allocated
* @ delay_linear : True if memory B / W delay is in linear scale
* @ mb_map : Mapping of memory B / W percentage to memory B / W delay
*/
struct rdt_membw {
u32 max_delay ;
u32 min_bw ;
u32 bw_gran ;
u32 delay_linear ;
u32 * mb_map ;
} ;
2016-10-22 06:19:55 -07:00
/**
* struct rdt_resource - attributes of an RDT resource
2017-07-25 14:14:25 -07:00
* @ alloc_enabled : Is allocation enabled on this machine
* @ alloc_capable : Is allocation available on this machine
2017-04-14 13:00:36 +02:00
* @ name : Name to use in " schemata " file
* @ num_closid : Number of CLOSIDs available
* @ cache_level : Which cache level defines scope of this resource
* @ default_ctrl : Specifies default cache cbm or memory B / W percent .
* @ msr_base : Base MSR address for CBMs
2017-04-14 14:14:31 +02:00
* @ msr_update : Function pointer to update QOS MSRs
2017-04-14 13:00:36 +02:00
* @ data_width : Character width of data when displaying
* @ domains : All domains for this resource
* @ cache : Cache allocation related data
2017-04-07 17:33:54 -07:00
* @ info_files : resctrl info files for the resource
* @ nr_info_files : Number of info files
2017-04-07 17:33:56 -07:00
* @ format_str : Per resource format string to show domain value
* @ parse_ctrlval : Per resource function pointer to parse control values
2016-10-22 06:19:55 -07:00
*/
struct rdt_resource {
2017-07-25 14:14:25 -07:00
bool alloc_enabled ;
bool alloc_capable ;
2016-10-22 06:19:55 -07:00
char * name ;
int num_closid ;
2017-04-14 13:00:36 +02:00
int cache_level ;
2017-04-07 17:33:51 -07:00
u32 default_ctrl ;
2017-04-14 13:00:36 +02:00
unsigned int msr_base ;
2017-04-14 14:14:31 +02:00
void ( * msr_update ) ( struct rdt_domain * d , struct msr_param * m ,
struct rdt_resource * r ) ;
2017-04-03 14:44:17 -07:00
int data_width ;
2016-10-22 06:19:55 -07:00
struct list_head domains ;
2017-04-17 09:57:10 +02:00
struct rdt_cache cache ;
struct rdt_membw membw ;
2017-04-07 17:33:54 -07:00
struct rftype * info_files ;
int nr_info_files ;
2017-04-07 17:33:56 -07:00
const char * format_str ;
int ( * parse_ctrlval ) ( char * buf , struct rdt_resource * r ,
struct rdt_domain * d ) ;
2016-10-22 06:19:55 -07:00
} ;
2017-04-07 17:33:54 -07:00
void rdt_get_cache_infofile ( struct rdt_resource * r ) ;
2017-04-07 17:33:55 -07:00
void rdt_get_mba_infofile ( struct rdt_resource * r ) ;
2017-04-07 17:33:56 -07:00
int parse_cbm ( char * buf , struct rdt_resource * r , struct rdt_domain * d ) ;
2017-04-07 17:33:57 -07:00
int parse_bw ( char * buf , struct rdt_resource * r , struct rdt_domain * d ) ;
2017-04-07 17:33:54 -07:00
2016-10-28 15:04:41 -07:00
extern struct mutex rdtgroup_mutex ;
2016-10-22 06:19:55 -07:00
extern struct rdt_resource rdt_resources_all [ ] ;
2016-10-28 15:04:42 -07:00
extern struct rdtgroup rdtgroup_default ;
2017-07-25 14:14:25 -07:00
DECLARE_STATIC_KEY_FALSE ( rdt_alloc_enable_key ) ;
2016-10-28 15:04:42 -07:00
int __init rdtgroup_init ( void ) ;
2016-10-22 06:19:55 -07:00
enum {
RDT_RESOURCE_L3 ,
RDT_RESOURCE_L3DATA ,
RDT_RESOURCE_L3CODE ,
RDT_RESOURCE_L2 ,
2017-04-07 17:33:53 -07:00
RDT_RESOURCE_MBA ,
2016-10-22 06:19:55 -07:00
/* Must be the last */
RDT_NUM_RESOURCES ,
} ;
2017-07-25 14:14:25 -07:00
# define for_each_alloc_capable_rdt_resource(r) \
2016-10-22 06:19:55 -07:00
for ( r = rdt_resources_all ; r < rdt_resources_all + RDT_NUM_RESOURCES ; \
2017-04-10 11:50:11 +02:00
r + + ) \
2017-07-25 14:14:25 -07:00
if ( r - > alloc_capable )
2016-10-22 06:19:55 -07:00
2017-07-25 14:14:25 -07:00
# define for_each_alloc_enabled_rdt_resource(r) \
2016-10-28 15:04:41 -07:00
for ( r = rdt_resources_all ; r < rdt_resources_all + RDT_NUM_RESOURCES ; \
r + + ) \
2017-07-25 14:14:25 -07:00
if ( r - > alloc_enabled )
2016-10-28 15:04:41 -07:00
2016-10-22 06:19:55 -07:00
/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
union cpuid_0x10_1_eax {
struct {
unsigned int cbm_len : 5 ;
} split ;
unsigned int full ;
} ;
2017-04-07 17:33:52 -07:00
/* CPUID.(EAX=10H, ECX=ResID=3).EAX */
union cpuid_0x10_3_eax {
struct {
unsigned int max_delay : 12 ;
} split ;
unsigned int full ;
} ;
2017-04-07 17:33:51 -07:00
/* CPUID.(EAX=10H, ECX=ResID).EDX */
union cpuid_0x10_x_edx {
2016-10-22 06:19:55 -07:00
struct {
unsigned int cos_max : 16 ;
} split ;
unsigned int full ;
} ;
2016-10-28 15:04:41 -07:00
2016-10-28 15:04:45 -07:00
DECLARE_PER_CPU_READ_MOSTLY ( int , cpu_closid ) ;
2017-04-07 17:33:51 -07:00
void rdt_ctrl_update ( void * arg ) ;
2016-10-28 15:04:44 -07:00
struct rdtgroup * rdtgroup_kn_lock_live ( struct kernfs_node * kn ) ;
void rdtgroup_kn_unlock ( struct kernfs_node * kn ) ;
2016-10-28 15:04:47 -07:00
ssize_t rdtgroup_schemata_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off ) ;
int rdtgroup_schemata_show ( struct kernfs_open_file * of ,
struct seq_file * s , void * v ) ;
2016-10-28 15:04:48 -07:00
2016-10-22 06:19:54 -07:00
# endif /* _ASM_X86_INTEL_RDT_H */