2020-01-21 16:44:05 -07:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/device.h>
# include <linux/io-64-nonatomic-lo-hi.h>
# include <uapi/linux/idxd.h>
# include "registers.h"
# include "idxd.h"
static char * idxd_wq_type_names [ ] = {
[ IDXD_WQT_NONE ] = " none " ,
[ IDXD_WQT_KERNEL ] = " kernel " ,
2020-01-21 16:44:29 -07:00
[ IDXD_WQT_USER ] = " user " ,
2020-01-21 16:44:05 -07:00
} ;
/* IDXD engine attributes */
static ssize_t engine_group_id_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_engine * engine = confdev_to_engine ( dev ) ;
2020-01-21 16:44:05 -07:00
if ( engine - > group )
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %d \n " , engine - > group - > id ) ;
2020-01-21 16:44:05 -07:00
else
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %d \n " , - 1 ) ;
2020-01-21 16:44:05 -07:00
}
static ssize_t engine_group_id_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_engine * engine = confdev_to_engine ( dev ) ;
2020-01-21 16:44:05 -07:00
struct idxd_device * idxd = engine - > idxd ;
long id ;
int rc ;
2020-02-11 21:53:35 +08:00
struct idxd_group * prevg ;
2020-01-21 16:44:05 -07:00
rc = kstrtol ( buf , 10 , & id ) ;
if ( rc < 0 )
return - EINVAL ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( id > idxd - > max_groups - 1 | | id < - 1 )
return - EINVAL ;
if ( id = = - 1 ) {
if ( engine - > group ) {
engine - > group - > num_engines - - ;
engine - > group = NULL ;
}
return count ;
}
prevg = engine - > group ;
if ( prevg )
prevg - > num_engines - - ;
2021-04-15 16:37:51 -07:00
engine - > group = idxd - > groups [ id ] ;
2020-01-21 16:44:05 -07:00
engine - > group - > num_engines + + ;
return count ;
}
static struct device_attribute dev_attr_engine_group =
__ATTR ( group_id , 0644 , engine_group_id_show ,
engine_group_id_store ) ;
static struct attribute * idxd_engine_attributes [ ] = {
& dev_attr_engine_group . attr ,
NULL ,
} ;
static const struct attribute_group idxd_engine_attribute_group = {
. attrs = idxd_engine_attributes ,
} ;
static const struct attribute_group * idxd_engine_attribute_groups [ ] = {
& idxd_engine_attribute_group ,
NULL ,
} ;
2021-04-15 16:37:44 -07:00
static void idxd_conf_engine_release ( struct device * dev )
{
2021-07-15 11:43:20 -07:00
struct idxd_engine * engine = confdev_to_engine ( dev ) ;
2021-04-15 16:37:44 -07:00
kfree ( engine ) ;
}
struct device_type idxd_engine_device_type = {
. name = " engine " ,
. release = idxd_conf_engine_release ,
. groups = idxd_engine_attribute_groups ,
} ;
2020-01-21 16:44:05 -07:00
/* Group attributes */
2021-12-14 13:23:09 -07:00
static void idxd_set_free_rdbufs ( struct idxd_device * idxd )
2020-01-21 16:44:05 -07:00
{
2021-12-14 13:23:09 -07:00
int i , rdbufs ;
2020-01-21 16:44:05 -07:00
2021-12-14 13:23:09 -07:00
for ( i = 0 , rdbufs = 0 ; i < idxd - > max_groups ; i + + ) {
2021-04-15 16:37:51 -07:00
struct idxd_group * g = idxd - > groups [ i ] ;
2020-01-21 16:44:05 -07:00
2021-12-14 13:23:09 -07:00
rdbufs + = g - > rdbufs_reserved ;
2020-01-21 16:44:05 -07:00
}
2021-12-14 13:23:09 -07:00
idxd - > nr_rdbufs = idxd - > max_rdbufs - rdbufs ;
2020-01-21 16:44:05 -07:00
}
2021-12-14 13:23:14 -07:00
static ssize_t group_read_buffers_reserved_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-12-14 13:23:09 -07:00
return sysfs_emit ( buf , " %u \n " , group - > rdbufs_reserved ) ;
2020-01-21 16:44:05 -07:00
}
2021-12-14 13:23:14 -07:00
static ssize_t group_tokens_reserved_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
dev_warn_once ( dev , " attribute deprecated, see read_buffers_reserved. \n " ) ;
return group_read_buffers_reserved_show ( dev , attr , buf ) ;
}
static ssize_t group_read_buffers_reserved_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
struct idxd_device * idxd = group - > idxd ;
unsigned long val ;
int rc ;
rc = kstrtoul ( buf , 10 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
2021-04-15 16:38:09 -07:00
if ( idxd - > data - > type = = IDXD_TYPE_IAX )
2020-11-17 13:39:14 -07:00
return - EOPNOTSUPP ;
2020-01-21 16:44:05 -07:00
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( idxd - > state = = IDXD_DEV_ENABLED )
return - EPERM ;
2021-12-14 13:23:09 -07:00
if ( val > idxd - > max_rdbufs )
2020-01-21 16:44:05 -07:00
return - EINVAL ;
2021-12-14 13:23:09 -07:00
if ( val > idxd - > nr_rdbufs + group - > rdbufs_reserved )
2020-01-21 16:44:05 -07:00
return - EINVAL ;
2021-12-14 13:23:09 -07:00
group - > rdbufs_reserved = val ;
idxd_set_free_rdbufs ( idxd ) ;
2020-01-21 16:44:05 -07:00
return count ;
}
2021-12-14 13:23:14 -07:00
static ssize_t group_tokens_reserved_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
dev_warn_once ( dev , " attribute deprecated, see read_buffers_reserved. \n " ) ;
return group_read_buffers_reserved_store ( dev , attr , buf , count ) ;
}
2020-01-21 16:44:05 -07:00
static struct device_attribute dev_attr_group_tokens_reserved =
__ATTR ( tokens_reserved , 0644 , group_tokens_reserved_show ,
group_tokens_reserved_store ) ;
2021-12-14 13:23:14 -07:00
static struct device_attribute dev_attr_group_read_buffers_reserved =
__ATTR ( read_buffers_reserved , 0644 , group_read_buffers_reserved_show ,
group_read_buffers_reserved_store ) ;
static ssize_t group_read_buffers_allowed_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-12-14 13:23:09 -07:00
return sysfs_emit ( buf , " %u \n " , group - > rdbufs_allowed ) ;
2020-01-21 16:44:05 -07:00
}
2021-12-14 13:23:14 -07:00
static ssize_t group_tokens_allowed_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
dev_warn_once ( dev , " attribute deprecated, see read_buffers_allowed. \n " ) ;
return group_read_buffers_allowed_show ( dev , attr , buf ) ;
}
static ssize_t group_read_buffers_allowed_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
struct idxd_device * idxd = group - > idxd ;
unsigned long val ;
int rc ;
rc = kstrtoul ( buf , 10 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
2021-04-15 16:38:09 -07:00
if ( idxd - > data - > type = = IDXD_TYPE_IAX )
2020-11-17 13:39:14 -07:00
return - EOPNOTSUPP ;
2020-01-21 16:44:05 -07:00
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( idxd - > state = = IDXD_DEV_ENABLED )
return - EPERM ;
if ( val < 4 * group - > num_engines | |
2021-12-14 13:23:09 -07:00
val > group - > rdbufs_reserved + idxd - > nr_rdbufs )
2020-01-21 16:44:05 -07:00
return - EINVAL ;
2021-12-14 13:23:09 -07:00
group - > rdbufs_allowed = val ;
2020-01-21 16:44:05 -07:00
return count ;
}
2021-12-14 13:23:14 -07:00
static ssize_t group_tokens_allowed_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
dev_warn_once ( dev , " attribute deprecated, see read_buffers_allowed. \n " ) ;
return group_read_buffers_allowed_store ( dev , attr , buf , count ) ;
}
2020-01-21 16:44:05 -07:00
static struct device_attribute dev_attr_group_tokens_allowed =
__ATTR ( tokens_allowed , 0644 , group_tokens_allowed_show ,
group_tokens_allowed_store ) ;
2021-12-14 13:23:14 -07:00
static struct device_attribute dev_attr_group_read_buffers_allowed =
__ATTR ( read_buffers_allowed , 0644 , group_read_buffers_allowed_show ,
group_read_buffers_allowed_store ) ;
static ssize_t group_use_read_buffer_limit_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-12-14 13:23:09 -07:00
return sysfs_emit ( buf , " %u \n " , group - > use_rdbuf_limit ) ;
2020-01-21 16:44:05 -07:00
}
2021-12-14 13:23:14 -07:00
static ssize_t group_use_token_limit_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
dev_warn_once ( dev , " attribute deprecated, see use_read_buffer_limit. \n " ) ;
return group_use_read_buffer_limit_show ( dev , attr , buf ) ;
}
static ssize_t group_use_read_buffer_limit_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
struct idxd_device * idxd = group - > idxd ;
unsigned long val ;
int rc ;
rc = kstrtoul ( buf , 10 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
2021-04-15 16:38:09 -07:00
if ( idxd - > data - > type = = IDXD_TYPE_IAX )
2020-11-17 13:39:14 -07:00
return - EOPNOTSUPP ;
2020-01-21 16:44:05 -07:00
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( idxd - > state = = IDXD_DEV_ENABLED )
return - EPERM ;
2021-12-14 13:23:09 -07:00
if ( idxd - > rdbuf_limit = = 0 )
2020-01-21 16:44:05 -07:00
return - EPERM ;
2021-12-14 13:23:09 -07:00
group - > use_rdbuf_limit = ! ! val ;
2020-01-21 16:44:05 -07:00
return count ;
}
2021-12-14 13:23:14 -07:00
static ssize_t group_use_token_limit_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
dev_warn_once ( dev , " attribute deprecated, see use_read_buffer_limit. \n " ) ;
return group_use_read_buffer_limit_store ( dev , attr , buf , count ) ;
}
2020-01-21 16:44:05 -07:00
static struct device_attribute dev_attr_group_use_token_limit =
__ATTR ( use_token_limit , 0644 , group_use_token_limit_show ,
group_use_token_limit_store ) ;
2021-12-14 13:23:14 -07:00
static struct device_attribute dev_attr_group_use_read_buffer_limit =
__ATTR ( use_read_buffer_limit , 0644 , group_use_read_buffer_limit_show ,
group_use_read_buffer_limit_store ) ;
2020-01-21 16:44:05 -07:00
static ssize_t group_engines_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
int i , rc = 0 ;
struct idxd_device * idxd = group - > idxd ;
for ( i = 0 ; i < idxd - > max_engines ; i + + ) {
2021-04-15 16:37:44 -07:00
struct idxd_engine * engine = idxd - > engines [ i ] ;
2020-01-21 16:44:05 -07:00
if ( ! engine - > group )
continue ;
if ( engine - > group - > id = = group - > id )
2021-04-20 11:46:40 -07:00
rc + = sysfs_emit_at ( buf , rc , " engine%d.%d " , idxd - > id , engine - > id ) ;
2020-01-21 16:44:05 -07:00
}
2021-04-20 11:46:40 -07:00
if ( ! rc )
return 0 ;
2020-01-21 16:44:05 -07:00
rc - - ;
2021-04-20 11:46:40 -07:00
rc + = sysfs_emit_at ( buf , rc , " \n " ) ;
2020-01-21 16:44:05 -07:00
return rc ;
}
static struct device_attribute dev_attr_group_engines =
__ATTR ( engines , 0444 , group_engines_show , NULL ) ;
static ssize_t group_work_queues_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
int i , rc = 0 ;
struct idxd_device * idxd = group - > idxd ;
for ( i = 0 ; i < idxd - > max_wqs ; i + + ) {
2021-04-15 16:37:39 -07:00
struct idxd_wq * wq = idxd - > wqs [ i ] ;
2020-01-21 16:44:05 -07:00
if ( ! wq - > group )
continue ;
if ( wq - > group - > id = = group - > id )
2021-04-20 11:46:40 -07:00
rc + = sysfs_emit_at ( buf , rc , " wq%d.%d " , idxd - > id , wq - > id ) ;
2020-01-21 16:44:05 -07:00
}
2021-04-20 11:46:40 -07:00
if ( ! rc )
return 0 ;
2020-01-21 16:44:05 -07:00
rc - - ;
2021-04-20 11:46:40 -07:00
rc + = sysfs_emit_at ( buf , rc , " \n " ) ;
2020-01-21 16:44:05 -07:00
return rc ;
}
static struct device_attribute dev_attr_group_work_queues =
__ATTR ( work_queues , 0444 , group_work_queues_show , NULL ) ;
static ssize_t group_traffic_class_a_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %d \n " , group - > tc_a ) ;
2020-01-21 16:44:05 -07:00
}
static ssize_t group_traffic_class_a_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
struct idxd_device * idxd = group - > idxd ;
long val ;
int rc ;
rc = kstrtol ( buf , 10 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( idxd - > state = = IDXD_DEV_ENABLED )
return - EPERM ;
2021-07-20 13:42:10 -07:00
if ( idxd - > hw . version < DEVICE_VERSION_2 & & ! tc_override )
return - EPERM ;
2020-01-21 16:44:05 -07:00
if ( val < 0 | | val > 7 )
return - EINVAL ;
group - > tc_a = val ;
return count ;
}
static struct device_attribute dev_attr_group_traffic_class_a =
__ATTR ( traffic_class_a , 0644 , group_traffic_class_a_show ,
group_traffic_class_a_store ) ;
static ssize_t group_traffic_class_b_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %d \n " , group - > tc_b ) ;
2020-01-21 16:44:05 -07:00
}
static ssize_t group_traffic_class_b_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2020-01-21 16:44:05 -07:00
struct idxd_device * idxd = group - > idxd ;
long val ;
int rc ;
rc = kstrtol ( buf , 10 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( idxd - > state = = IDXD_DEV_ENABLED )
return - EPERM ;
2021-07-20 13:42:10 -07:00
if ( idxd - > hw . version < DEVICE_VERSION_2 & & ! tc_override )
return - EPERM ;
2020-01-21 16:44:05 -07:00
if ( val < 0 | | val > 7 )
return - EINVAL ;
group - > tc_b = val ;
return count ;
}
static struct device_attribute dev_attr_group_traffic_class_b =
__ATTR ( traffic_class_b , 0644 , group_traffic_class_b_show ,
group_traffic_class_b_store ) ;
2022-09-17 09:12:21 -07:00
static ssize_t group_desc_progress_limit_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
struct idxd_group * group = confdev_to_group ( dev ) ;
return sysfs_emit ( buf , " %d \n " , group - > desc_progress_limit ) ;
}
static ssize_t group_desc_progress_limit_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
struct idxd_group * group = confdev_to_group ( dev ) ;
int val , rc ;
rc = kstrtoint ( buf , 10 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
if ( val & ~ GENMASK ( 1 , 0 ) )
return - EINVAL ;
group - > desc_progress_limit = val ;
return count ;
}
static struct device_attribute dev_attr_group_desc_progress_limit =
__ATTR ( desc_progress_limit , 0644 , group_desc_progress_limit_show ,
group_desc_progress_limit_store ) ;
2022-09-17 09:12:22 -07:00
static ssize_t group_batch_progress_limit_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
struct idxd_group * group = confdev_to_group ( dev ) ;
return sysfs_emit ( buf , " %d \n " , group - > batch_progress_limit ) ;
}
static ssize_t group_batch_progress_limit_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
struct idxd_group * group = confdev_to_group ( dev ) ;
int val , rc ;
rc = kstrtoint ( buf , 10 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
if ( val & ~ GENMASK ( 1 , 0 ) )
return - EINVAL ;
group - > batch_progress_limit = val ;
return count ;
}
static struct device_attribute dev_attr_group_batch_progress_limit =
__ATTR ( batch_progress_limit , 0644 , group_batch_progress_limit_show ,
group_batch_progress_limit_store ) ;
2020-01-21 16:44:05 -07:00
static struct attribute * idxd_group_attributes [ ] = {
& dev_attr_group_work_queues . attr ,
& dev_attr_group_engines . attr ,
& dev_attr_group_use_token_limit . attr ,
2021-12-14 13:23:14 -07:00
& dev_attr_group_use_read_buffer_limit . attr ,
2020-01-21 16:44:05 -07:00
& dev_attr_group_tokens_allowed . attr ,
2021-12-14 13:23:14 -07:00
& dev_attr_group_read_buffers_allowed . attr ,
2020-01-21 16:44:05 -07:00
& dev_attr_group_tokens_reserved . attr ,
2021-12-14 13:23:14 -07:00
& dev_attr_group_read_buffers_reserved . attr ,
2020-01-21 16:44:05 -07:00
& dev_attr_group_traffic_class_a . attr ,
& dev_attr_group_traffic_class_b . attr ,
2022-09-17 09:12:21 -07:00
& dev_attr_group_desc_progress_limit . attr ,
2022-09-17 09:12:22 -07:00
& dev_attr_group_batch_progress_limit . attr ,
2020-01-21 16:44:05 -07:00
NULL ,
} ;
2022-09-17 09:12:21 -07:00
static bool idxd_group_attr_progress_limit_invisible ( struct attribute * attr ,
struct idxd_device * idxd )
{
2022-09-17 09:12:22 -07:00
return ( attr = = & dev_attr_group_desc_progress_limit . attr | |
attr = = & dev_attr_group_batch_progress_limit . attr ) & &
! idxd - > hw . group_cap . progress_limit ;
2022-09-17 09:12:21 -07:00
}
2022-10-22 15:49:49 +08:00
static bool idxd_group_attr_read_buffers_invisible ( struct attribute * attr ,
struct idxd_device * idxd )
{
/*
* Intel IAA does not support Read Buffer allocation control ,
* make these attributes invisible .
*/
return ( attr = = & dev_attr_group_use_token_limit . attr | |
attr = = & dev_attr_group_use_read_buffer_limit . attr | |
attr = = & dev_attr_group_tokens_allowed . attr | |
attr = = & dev_attr_group_read_buffers_allowed . attr | |
attr = = & dev_attr_group_tokens_reserved . attr | |
attr = = & dev_attr_group_read_buffers_reserved . attr ) & &
idxd - > data - > type = = IDXD_TYPE_IAX ;
}
2022-09-17 09:12:21 -07:00
static umode_t idxd_group_attr_visible ( struct kobject * kobj ,
struct attribute * attr , int n )
{
struct device * dev = container_of ( kobj , struct device , kobj ) ;
struct idxd_group * group = confdev_to_group ( dev ) ;
struct idxd_device * idxd = group - > idxd ;
if ( idxd_group_attr_progress_limit_invisible ( attr , idxd ) )
return 0 ;
2022-10-22 15:49:49 +08:00
if ( idxd_group_attr_read_buffers_invisible ( attr , idxd ) )
return 0 ;
2022-09-17 09:12:21 -07:00
return attr - > mode ;
}
2020-01-21 16:44:05 -07:00
static const struct attribute_group idxd_group_attribute_group = {
. attrs = idxd_group_attributes ,
2022-09-17 09:12:21 -07:00
. is_visible = idxd_group_attr_visible ,
2020-01-21 16:44:05 -07:00
} ;
static const struct attribute_group * idxd_group_attribute_groups [ ] = {
& idxd_group_attribute_group ,
NULL ,
} ;
2021-04-15 16:37:51 -07:00
static void idxd_conf_group_release ( struct device * dev )
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group = confdev_to_group ( dev ) ;
2021-04-15 16:37:51 -07:00
kfree ( group ) ;
}
struct device_type idxd_group_device_type = {
. name = " group " ,
. release = idxd_conf_group_release ,
. groups = idxd_group_attribute_groups ,
} ;
2020-01-21 16:44:05 -07:00
/* IDXD work queue attribs */
static ssize_t wq_clients_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %d \n " , wq - > client_count ) ;
2020-01-21 16:44:05 -07:00
}
static struct device_attribute dev_attr_wq_clients =
__ATTR ( clients , 0444 , wq_clients_show , NULL ) ;
static ssize_t wq_state_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
switch ( wq - > state ) {
case IDXD_WQ_DISABLED :
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " disabled \n " ) ;
2020-01-21 16:44:05 -07:00
case IDXD_WQ_ENABLED :
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " enabled \n " ) ;
2020-01-21 16:44:05 -07:00
}
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " unknown \n " ) ;
2020-01-21 16:44:05 -07:00
}
static struct device_attribute dev_attr_wq_state =
__ATTR ( state , 0444 , wq_state_show , NULL ) ;
static ssize_t wq_group_id_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
if ( wq - > group )
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , wq - > group - > id ) ;
2020-01-21 16:44:05 -07:00
else
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " -1 \n " ) ;
2020-01-21 16:44:05 -07:00
}
static ssize_t wq_group_id_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
struct idxd_device * idxd = wq - > idxd ;
long id ;
int rc ;
struct idxd_group * prevg , * group ;
rc = kstrtol ( buf , 10 , & id ) ;
if ( rc < 0 )
return - EINVAL ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
if ( id > idxd - > max_groups - 1 | | id < - 1 )
return - EINVAL ;
if ( id = = - 1 ) {
if ( wq - > group ) {
wq - > group - > num_wqs - - ;
wq - > group = NULL ;
}
return count ;
}
2021-04-15 16:37:51 -07:00
group = idxd - > groups [ id ] ;
2020-01-21 16:44:05 -07:00
prevg = wq - > group ;
if ( prevg )
prevg - > num_wqs - - ;
wq - > group = group ;
group - > num_wqs + + ;
return count ;
}
static struct device_attribute dev_attr_wq_group_id =
__ATTR ( group_id , 0644 , wq_group_id_show , wq_group_id_store ) ;
static ssize_t wq_mode_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %s \n " , wq_dedicated ( wq ) ? " dedicated " : " shared " ) ;
2020-01-21 16:44:05 -07:00
}
static ssize_t wq_mode_store ( struct device * dev ,
struct device_attribute * attr , const char * buf ,
size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
struct idxd_device * idxd = wq - > idxd ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
if ( sysfs_streq ( buf , " dedicated " ) ) {
set_bit ( WQ_FLAG_DEDICATED , & wq - > flags ) ;
wq - > threshold = 0 ;
2022-05-11 17:11:57 -07:00
} else if ( sysfs_streq ( buf , " shared " ) ) {
2020-10-27 10:34:35 -07:00
clear_bit ( WQ_FLAG_DEDICATED , & wq - > flags ) ;
2020-01-21 16:44:05 -07:00
} else {
return - EINVAL ;
}
return count ;
}
static struct device_attribute dev_attr_wq_mode =
__ATTR ( mode , 0644 , wq_mode_show , wq_mode_store ) ;
static ssize_t wq_size_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , wq - > size ) ;
2020-01-21 16:44:05 -07:00
}
2020-02-19 10:24:56 -07:00
static int total_claimed_wq_size ( struct idxd_device * idxd )
{
int i ;
int wq_size = 0 ;
for ( i = 0 ; i < idxd - > max_wqs ; i + + ) {
2021-04-15 16:37:39 -07:00
struct idxd_wq * wq = idxd - > wqs [ i ] ;
2020-02-19 10:24:56 -07:00
wq_size + = wq - > size ;
}
return wq_size ;
}
2020-01-21 16:44:05 -07:00
static ssize_t wq_size_store ( struct device * dev ,
struct device_attribute * attr , const char * buf ,
size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
unsigned long size ;
struct idxd_device * idxd = wq - > idxd ;
int rc ;
rc = kstrtoul ( buf , 10 , & size ) ;
if ( rc < 0 )
return - EINVAL ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
2021-04-07 12:59:47 -07:00
if ( idxd - > state = = IDXD_DEV_ENABLED )
2020-01-21 16:44:05 -07:00
return - EPERM ;
2020-02-19 10:24:56 -07:00
if ( size + total_claimed_wq_size ( idxd ) - wq - > size > idxd - > max_wq_size )
2020-01-21 16:44:05 -07:00
return - EINVAL ;
wq - > size = size ;
return count ;
}
static struct device_attribute dev_attr_wq_size =
__ATTR ( size , 0644 , wq_size_show , wq_size_store ) ;
static ssize_t wq_priority_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , wq - > priority ) ;
2020-01-21 16:44:05 -07:00
}
static ssize_t wq_priority_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
unsigned long prio ;
struct idxd_device * idxd = wq - > idxd ;
int rc ;
rc = kstrtoul ( buf , 10 , & prio ) ;
if ( rc < 0 )
return - EINVAL ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
if ( prio > IDXD_MAX_PRIORITY )
return - EINVAL ;
wq - > priority = prio ;
return count ;
}
static struct device_attribute dev_attr_wq_priority =
__ATTR ( priority , 0644 , wq_priority_show , wq_priority_store ) ;
2020-10-27 10:34:35 -07:00
static ssize_t wq_block_on_fault_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-10-27 10:34:35 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , test_bit ( WQ_FLAG_BLOCK_ON_FAULT , & wq - > flags ) ) ;
2020-10-27 10:34:35 -07:00
}
static ssize_t wq_block_on_fault_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-10-27 10:34:35 -07:00
struct idxd_device * idxd = wq - > idxd ;
bool bof ;
int rc ;
2021-08-03 15:32:06 -07:00
if ( ! idxd - > hw . gen_cap . block_on_fault )
return - EOPNOTSUPP ;
2020-10-27 10:34:35 -07:00
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - ENXIO ;
rc = kstrtobool ( buf , & bof ) ;
if ( rc < 0 )
return rc ;
if ( bof )
set_bit ( WQ_FLAG_BLOCK_ON_FAULT , & wq - > flags ) ;
else
clear_bit ( WQ_FLAG_BLOCK_ON_FAULT , & wq - > flags ) ;
return count ;
}
static struct device_attribute dev_attr_wq_block_on_fault =
__ATTR ( block_on_fault , 0644 , wq_block_on_fault_show ,
wq_block_on_fault_store ) ;
static ssize_t wq_threshold_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-10-27 10:34:35 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , wq - > threshold ) ;
2020-10-27 10:34:35 -07:00
}
static ssize_t wq_threshold_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-10-27 10:34:35 -07:00
struct idxd_device * idxd = wq - > idxd ;
unsigned int val ;
int rc ;
rc = kstrtouint ( buf , 0 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
if ( val > wq - > size | | val < = 0 )
return - EINVAL ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - ENXIO ;
if ( test_bit ( WQ_FLAG_DEDICATED , & wq - > flags ) )
return - EINVAL ;
wq - > threshold = val ;
return count ;
}
static struct device_attribute dev_attr_wq_threshold =
__ATTR ( threshold , 0644 , wq_threshold_show , wq_threshold_store ) ;
2020-01-21 16:44:05 -07:00
static ssize_t wq_type_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
switch ( wq - > type ) {
case IDXD_WQT_KERNEL :
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %s \n " , idxd_wq_type_names [ IDXD_WQT_KERNEL ] ) ;
2020-01-21 16:44:29 -07:00
case IDXD_WQT_USER :
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %s \n " , idxd_wq_type_names [ IDXD_WQT_USER ] ) ;
2020-01-21 16:44:05 -07:00
case IDXD_WQT_NONE :
default :
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %s \n " , idxd_wq_type_names [ IDXD_WQT_NONE ] ) ;
2020-01-21 16:44:05 -07:00
}
return - EINVAL ;
}
static ssize_t wq_type_store ( struct device * dev ,
struct device_attribute * attr , const char * buf ,
size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
enum idxd_wq_type old_type ;
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
old_type = wq - > type ;
2020-02-19 10:24:08 -07:00
if ( sysfs_streq ( buf , idxd_wq_type_names [ IDXD_WQT_NONE ] ) )
wq - > type = IDXD_WQT_NONE ;
else if ( sysfs_streq ( buf , idxd_wq_type_names [ IDXD_WQT_KERNEL ] ) )
2020-01-21 16:44:05 -07:00
wq - > type = IDXD_WQT_KERNEL ;
2020-01-21 16:44:29 -07:00
else if ( sysfs_streq ( buf , idxd_wq_type_names [ IDXD_WQT_USER ] ) )
wq - > type = IDXD_WQT_USER ;
2020-01-21 16:44:05 -07:00
else
2020-02-19 10:24:08 -07:00
return - EINVAL ;
2020-01-21 16:44:05 -07:00
/* If we are changing queue type, clear the name */
if ( wq - > type ! = old_type )
memset ( wq - > name , 0 , WQ_NAME_SIZE + 1 ) ;
return count ;
}
static struct device_attribute dev_attr_wq_type =
__ATTR ( type , 0644 , wq_type_show , wq_type_store ) ;
static ssize_t wq_name_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %s \n " , wq - > name ) ;
2020-01-21 16:44:05 -07:00
}
static ssize_t wq_name_store ( struct device * dev ,
struct device_attribute * attr , const char * buf ,
size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2022-03-21 13:40:51 -07:00
char * input , * pos ;
2020-01-21 16:44:05 -07:00
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
if ( strlen ( buf ) > WQ_NAME_SIZE | | strlen ( buf ) = = 0 )
return - EINVAL ;
2020-10-27 10:34:35 -07:00
/*
* This is temporarily placed here until we have SVM support for
* dmaengine .
*/
if ( wq - > type = = IDXD_WQT_KERNEL & & device_pasid_enabled ( wq - > idxd ) )
return - EOPNOTSUPP ;
2022-03-21 13:40:51 -07:00
input = kstrndup ( buf , count , GFP_KERNEL ) ;
if ( ! input )
return - ENOMEM ;
pos = strim ( input ) ;
2020-01-21 16:44:05 -07:00
memset ( wq - > name , 0 , WQ_NAME_SIZE + 1 ) ;
2022-03-21 13:40:51 -07:00
sprintf ( wq - > name , " %s " , pos ) ;
kfree ( input ) ;
2020-01-21 16:44:05 -07:00
return count ;
}
static struct device_attribute dev_attr_wq_name =
__ATTR ( name , 0644 , wq_name_show , wq_name_store ) ;
2020-01-21 16:44:29 -07:00
static ssize_t wq_cdev_minor_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2021-04-15 16:37:57 -07:00
int minor = - 1 ;
2020-01-21 16:44:29 -07:00
2021-04-15 16:37:57 -07:00
mutex_lock ( & wq - > wq_lock ) ;
if ( wq - > idxd_cdev )
minor = wq - > idxd_cdev - > minor ;
mutex_unlock ( & wq - > wq_lock ) ;
if ( minor = = - 1 )
return - ENXIO ;
return sysfs_emit ( buf , " %d \n " , minor ) ;
2020-01-21 16:44:29 -07:00
}
static struct device_attribute dev_attr_wq_cdev_minor =
__ATTR ( cdev_minor , 0444 , wq_cdev_minor_show , NULL ) ;
2020-08-28 15:12:50 -07:00
static int __get_sysfs_u64 ( const char * buf , u64 * val )
{
int rc ;
rc = kstrtou64 ( buf , 0 , val ) ;
if ( rc < 0 )
return - EINVAL ;
if ( * val = = 0 )
return - EINVAL ;
* val = roundup_pow_of_two ( * val ) ;
return 0 ;
}
2020-08-28 15:12:10 -07:00
static ssize_t wq_max_transfer_size_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-08-28 15:12:10 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %llu \n " , wq - > max_xfer_bytes ) ;
2020-08-28 15:12:10 -07:00
}
static ssize_t wq_max_transfer_size_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-08-28 15:12:10 -07:00
struct idxd_device * idxd = wq - > idxd ;
u64 xfer_size ;
int rc ;
2022-04-11 15:08:01 -07:00
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
2020-08-28 15:12:10 -07:00
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
2020-08-28 15:12:50 -07:00
rc = __get_sysfs_u64 ( buf , & xfer_size ) ;
2020-08-28 15:12:10 -07:00
if ( rc < 0 )
2020-08-28 15:12:50 -07:00
return rc ;
2020-08-28 15:12:10 -07:00
if ( xfer_size > idxd - > max_xfer_bytes )
return - EINVAL ;
wq - > max_xfer_bytes = xfer_size ;
return count ;
}
static struct device_attribute dev_attr_wq_max_transfer_size =
__ATTR ( max_transfer_size , 0644 ,
wq_max_transfer_size_show , wq_max_transfer_size_store ) ;
2020-08-28 15:12:50 -07:00
static ssize_t wq_max_batch_size_show ( struct device * dev , struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-08-28 15:12:50 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , wq - > max_batch_size ) ;
2020-08-28 15:12:50 -07:00
}
static ssize_t wq_max_batch_size_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-08-28 15:12:50 -07:00
struct idxd_device * idxd = wq - > idxd ;
u64 batch_size ;
int rc ;
2022-04-11 15:08:55 -07:00
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
2020-08-28 15:12:50 -07:00
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
rc = __get_sysfs_u64 ( buf , & batch_size ) ;
if ( rc < 0 )
return rc ;
if ( batch_size > idxd - > max_batch_size )
return - EINVAL ;
2022-10-01 04:15:27 +08:00
idxd_wq_set_max_batch_size ( idxd - > data - > type , wq , ( u32 ) batch_size ) ;
2020-08-28 15:12:50 -07:00
return count ;
}
static struct device_attribute dev_attr_wq_max_batch_size =
__ATTR ( max_batch_size , 0644 , wq_max_batch_size_show , wq_max_batch_size_store ) ;
2020-11-13 15:55:05 -07:00
static ssize_t wq_ats_disable_show ( struct device * dev , struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-11-13 15:55:05 -07:00
2022-09-17 09:12:18 -07:00
return sysfs_emit ( buf , " %u \n " , test_bit ( WQ_FLAG_ATS_DISABLE , & wq - > flags ) ) ;
2020-11-13 15:55:05 -07:00
}
static ssize_t wq_ats_disable_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2020-11-13 15:55:05 -07:00
struct idxd_device * idxd = wq - > idxd ;
bool ats_dis ;
int rc ;
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
if ( ! idxd - > hw . wq_cap . wq_ats_support )
return - EOPNOTSUPP ;
rc = kstrtobool ( buf , & ats_dis ) ;
if ( rc < 0 )
return rc ;
2022-09-17 09:12:18 -07:00
if ( ats_dis )
set_bit ( WQ_FLAG_ATS_DISABLE , & wq - > flags ) ;
else
clear_bit ( WQ_FLAG_ATS_DISABLE , & wq - > flags ) ;
2020-11-13 15:55:05 -07:00
return count ;
}
static struct device_attribute dev_attr_wq_ats_disable =
__ATTR ( ats_disable , 0644 , wq_ats_disable_show , wq_ats_disable_store ) ;
2021-06-03 14:57:35 -07:00
static ssize_t wq_occupancy_show ( struct device * dev , struct device_attribute * attr , char * buf )
{
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
struct idxd_device * idxd = wq - > idxd ;
u32 occup , offset ;
if ( ! idxd - > hw . wq_cap . occupancy )
return - EOPNOTSUPP ;
offset = WQCFG_OFFSET ( idxd , wq - > id , WQCFG_OCCUP_IDX ) ;
occup = ioread32 ( idxd - > reg_base + offset ) & WQCFG_OCCUP_MASK ;
return sysfs_emit ( buf , " %u \n " , occup ) ;
}
static struct device_attribute dev_attr_wq_occupancy =
__ATTR ( occupancy , 0444 , wq_occupancy_show , NULL ) ;
2021-11-29 10:19:38 -07:00
static ssize_t wq_enqcmds_retries_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
if ( wq_dedicated ( wq ) )
return - EOPNOTSUPP ;
return sysfs_emit ( buf , " %u \n " , wq - > enqcmds_retries ) ;
}
static ssize_t wq_enqcmds_retries_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
int rc ;
unsigned int retries ;
if ( wq_dedicated ( wq ) )
return - EOPNOTSUPP ;
rc = kstrtouint ( buf , 10 , & retries ) ;
if ( rc < 0 )
return rc ;
if ( retries > IDXD_ENQCMDS_MAX_RETRIES )
retries = IDXD_ENQCMDS_MAX_RETRIES ;
wq - > enqcmds_retries = retries ;
return count ;
}
static struct device_attribute dev_attr_wq_enqcmds_retries =
__ATTR ( enqcmds_retries , 0644 , wq_enqcmds_retries_show , wq_enqcmds_retries_store ) ;
2022-09-17 09:12:20 -07:00
static ssize_t wq_op_config_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
return sysfs_emit ( buf , " %*pb \n " , IDXD_MAX_OPCAP_BITS , wq - > opcap_bmap ) ;
}
static int idxd_verify_supported_opcap ( struct idxd_device * idxd , unsigned long * opmask )
{
int bit ;
/*
* The OPCAP is defined as 256 bits that represents each operation the device
* supports per bit . Iterate through all the bits and check if the input mask
* is set for bits that are not set in the OPCAP for the device . If no OPCAP
* bit is set and input mask has the bit set , then return error .
*/
for_each_set_bit ( bit , opmask , IDXD_MAX_OPCAP_BITS ) {
if ( ! test_bit ( bit , idxd - > opcap_bmap ) )
return - EINVAL ;
}
return 0 ;
}
static ssize_t wq_op_config_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
struct idxd_device * idxd = wq - > idxd ;
unsigned long * opmask ;
int rc ;
if ( wq - > state ! = IDXD_WQ_DISABLED )
return - EPERM ;
opmask = bitmap_zalloc ( IDXD_MAX_OPCAP_BITS , GFP_KERNEL ) ;
if ( ! opmask )
return - ENOMEM ;
rc = bitmap_parse ( buf , count , opmask , IDXD_MAX_OPCAP_BITS ) ;
if ( rc < 0 )
goto err ;
rc = idxd_verify_supported_opcap ( idxd , opmask ) ;
if ( rc < 0 )
goto err ;
bitmap_copy ( wq - > opcap_bmap , opmask , IDXD_MAX_OPCAP_BITS ) ;
bitmap_free ( opmask ) ;
return count ;
err :
bitmap_free ( opmask ) ;
return rc ;
}
static struct device_attribute dev_attr_wq_op_config =
__ATTR ( op_config , 0644 , wq_op_config_show , wq_op_config_store ) ;
2020-01-21 16:44:05 -07:00
static struct attribute * idxd_wq_attributes [ ] = {
& dev_attr_wq_clients . attr ,
& dev_attr_wq_state . attr ,
& dev_attr_wq_group_id . attr ,
& dev_attr_wq_mode . attr ,
& dev_attr_wq_size . attr ,
& dev_attr_wq_priority . attr ,
2020-10-27 10:34:35 -07:00
& dev_attr_wq_block_on_fault . attr ,
& dev_attr_wq_threshold . attr ,
2020-01-21 16:44:05 -07:00
& dev_attr_wq_type . attr ,
& dev_attr_wq_name . attr ,
2020-01-21 16:44:29 -07:00
& dev_attr_wq_cdev_minor . attr ,
2020-08-28 15:12:10 -07:00
& dev_attr_wq_max_transfer_size . attr ,
2020-08-28 15:12:50 -07:00
& dev_attr_wq_max_batch_size . attr ,
2020-11-13 15:55:05 -07:00
& dev_attr_wq_ats_disable . attr ,
2021-06-03 14:57:35 -07:00
& dev_attr_wq_occupancy . attr ,
2021-11-29 10:19:38 -07:00
& dev_attr_wq_enqcmds_retries . attr ,
2022-09-17 09:12:20 -07:00
& dev_attr_wq_op_config . attr ,
2020-01-21 16:44:05 -07:00
NULL ,
} ;
2022-09-17 09:12:20 -07:00
static bool idxd_wq_attr_op_config_invisible ( struct attribute * attr ,
struct idxd_device * idxd )
{
return attr = = & dev_attr_wq_op_config . attr & &
! idxd - > hw . wq_cap . op_config ;
}
2022-10-01 04:15:28 +08:00
static bool idxd_wq_attr_max_batch_size_invisible ( struct attribute * attr ,
struct idxd_device * idxd )
{
/* Intel IAA does not support batch processing, make it invisible */
return attr = = & dev_attr_wq_max_batch_size . attr & &
idxd - > data - > type = = IDXD_TYPE_IAX ;
}
2022-09-17 09:12:20 -07:00
static umode_t idxd_wq_attr_visible ( struct kobject * kobj ,
struct attribute * attr , int n )
{
struct device * dev = container_of ( kobj , struct device , kobj ) ;
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
struct idxd_device * idxd = wq - > idxd ;
if ( idxd_wq_attr_op_config_invisible ( attr , idxd ) )
return 0 ;
2022-10-01 04:15:28 +08:00
if ( idxd_wq_attr_max_batch_size_invisible ( attr , idxd ) )
return 0 ;
2022-09-17 09:12:20 -07:00
return attr - > mode ;
}
2020-01-21 16:44:05 -07:00
static const struct attribute_group idxd_wq_attribute_group = {
. attrs = idxd_wq_attributes ,
2022-09-17 09:12:20 -07:00
. is_visible = idxd_wq_attr_visible ,
2020-01-21 16:44:05 -07:00
} ;
static const struct attribute_group * idxd_wq_attribute_groups [ ] = {
& idxd_wq_attribute_group ,
NULL ,
} ;
2021-04-15 16:37:39 -07:00
static void idxd_conf_wq_release ( struct device * dev )
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq = confdev_to_wq ( dev ) ;
2021-04-15 16:37:39 -07:00
2022-09-17 09:12:20 -07:00
bitmap_free ( wq - > opcap_bmap ) ;
2021-04-15 16:37:39 -07:00
kfree ( wq - > wqcfg ) ;
kfree ( wq ) ;
}
struct device_type idxd_wq_device_type = {
. name = " wq " ,
. release = idxd_conf_wq_release ,
. groups = idxd_wq_attribute_groups ,
} ;
2020-01-21 16:44:05 -07:00
/* IDXD device attribs */
2020-04-15 09:13:12 -07:00
static ssize_t version_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-04-15 09:13:12 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %#x \n " , idxd - > hw . version ) ;
2020-04-15 09:13:12 -07:00
}
static DEVICE_ATTR_RO ( version ) ;
2020-01-21 16:44:05 -07:00
static ssize_t max_work_queues_size_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , idxd - > max_wq_size ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( max_work_queues_size ) ;
static ssize_t max_groups_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , idxd - > max_groups ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( max_groups ) ;
static ssize_t max_work_queues_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , idxd - > max_wqs ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( max_work_queues ) ;
static ssize_t max_engines_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , idxd - > max_engines ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( max_engines ) ;
static ssize_t numa_node_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %d \n " , dev_to_node ( & idxd - > pdev - > dev ) ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( numa_node ) ;
static ssize_t max_batch_size_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , idxd - > max_batch_size ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( max_batch_size ) ;
static ssize_t max_transfer_size_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %llu \n " , idxd - > max_xfer_bytes ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( max_transfer_size ) ;
static ssize_t op_cap_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2022-09-17 09:12:19 -07:00
return sysfs_emit ( buf , " %*pb \n " , IDXD_MAX_OPCAP_BITS , idxd - > opcap_bmap ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( op_cap ) ;
2020-02-24 11:01:34 -07:00
static ssize_t gen_cap_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-02-24 11:01:34 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %#llx \n " , idxd - > hw . gen_cap . bits ) ;
2020-02-24 11:01:34 -07:00
}
static DEVICE_ATTR_RO ( gen_cap ) ;
2020-01-21 16:44:05 -07:00
static ssize_t configurable_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( configurable ) ;
static ssize_t clients_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
int count = 0 , i ;
2021-08-24 14:24:27 -07:00
spin_lock ( & idxd - > dev_lock ) ;
2020-01-21 16:44:05 -07:00
for ( i = 0 ; i < idxd - > max_wqs ; i + + ) {
2021-04-15 16:37:39 -07:00
struct idxd_wq * wq = idxd - > wqs [ i ] ;
2020-01-21 16:44:05 -07:00
count + = wq - > client_count ;
}
2021-08-24 14:24:27 -07:00
spin_unlock ( & idxd - > dev_lock ) ;
2020-01-21 16:44:05 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %d \n " , count ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( clients ) ;
2020-10-27 10:34:35 -07:00
static ssize_t pasid_enabled_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-10-27 10:34:35 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , device_pasid_enabled ( idxd ) ) ;
2020-10-27 10:34:35 -07:00
}
static DEVICE_ATTR_RO ( pasid_enabled ) ;
2020-01-21 16:44:05 -07:00
static ssize_t state_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
switch ( idxd - > state ) {
case IDXD_DEV_DISABLED :
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " disabled \n " ) ;
2020-01-21 16:44:05 -07:00
case IDXD_DEV_ENABLED :
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " enabled \n " ) ;
2020-01-21 16:44:05 -07:00
case IDXD_DEV_HALTED :
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " halted \n " ) ;
2020-01-21 16:44:05 -07:00
}
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " unknown \n " ) ;
2020-01-21 16:44:05 -07:00
}
static DEVICE_ATTR_RO ( state ) ;
static ssize_t errors_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
int i , out = 0 ;
2021-08-24 14:24:27 -07:00
spin_lock ( & idxd - > dev_lock ) ;
2020-01-21 16:44:05 -07:00
for ( i = 0 ; i < 4 ; i + + )
2021-04-20 11:46:40 -07:00
out + = sysfs_emit_at ( buf , out , " %#018llx " , idxd - > sw_err . bits [ i ] ) ;
2021-08-24 14:24:27 -07:00
spin_unlock ( & idxd - > dev_lock ) ;
2020-01-21 16:44:05 -07:00
out - - ;
2021-04-20 11:46:40 -07:00
out + = sysfs_emit_at ( buf , out , " \n " ) ;
2020-01-21 16:44:05 -07:00
return out ;
}
static DEVICE_ATTR_RO ( errors ) ;
2021-12-14 13:23:14 -07:00
static ssize_t max_read_buffers_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-12-14 13:23:09 -07:00
return sysfs_emit ( buf , " %u \n " , idxd - > max_rdbufs ) ;
2020-01-21 16:44:05 -07:00
}
2021-12-14 13:23:14 -07:00
static ssize_t max_tokens_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
dev_warn_once ( dev , " attribute deprecated, see max_read_buffers. \n " ) ;
return max_read_buffers_show ( dev , attr , buf ) ;
}
static DEVICE_ATTR_RO ( max_tokens ) ; /* deprecated */
static DEVICE_ATTR_RO ( max_read_buffers ) ;
static ssize_t read_buffer_limit_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
2021-12-14 13:23:09 -07:00
return sysfs_emit ( buf , " %u \n " , idxd - > rdbuf_limit ) ;
2020-01-21 16:44:05 -07:00
}
2021-12-14 13:23:14 -07:00
static ssize_t token_limit_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
dev_warn_once ( dev , " attribute deprecated, see read_buffer_limit. \n " ) ;
return read_buffer_limit_show ( dev , attr , buf ) ;
}
static ssize_t read_buffer_limit_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:05 -07:00
unsigned long val ;
int rc ;
rc = kstrtoul ( buf , 10 , & val ) ;
if ( rc < 0 )
return - EINVAL ;
if ( idxd - > state = = IDXD_DEV_ENABLED )
return - EPERM ;
if ( ! test_bit ( IDXD_FLAG_CONFIGURABLE , & idxd - > flags ) )
return - EPERM ;
2021-12-14 13:23:09 -07:00
if ( ! idxd - > hw . group_cap . rdbuf_limit )
2020-01-21 16:44:05 -07:00
return - EPERM ;
2021-12-14 13:23:09 -07:00
if ( val > idxd - > hw . group_cap . total_rdbufs )
2020-01-21 16:44:05 -07:00
return - EINVAL ;
2021-12-14 13:23:09 -07:00
idxd - > rdbuf_limit = val ;
2020-01-21 16:44:05 -07:00
return count ;
}
2021-12-14 13:23:14 -07:00
static ssize_t token_limit_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
dev_warn_once ( dev , " attribute deprecated, see read_buffer_limit \n " ) ;
return read_buffer_limit_store ( dev , attr , buf , count ) ;
}
static DEVICE_ATTR_RW ( token_limit ) ; /* deprecated */
static DEVICE_ATTR_RW ( read_buffer_limit ) ;
2020-01-21 16:44:05 -07:00
2020-01-21 16:44:29 -07:00
static ssize_t cdev_major_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-01-21 16:44:29 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %u \n " , idxd - > major ) ;
2020-01-21 16:44:29 -07:00
}
static DEVICE_ATTR_RO ( cdev_major ) ;
2020-08-28 15:13:55 -07:00
static ssize_t cmd_status_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2020-08-28 15:13:55 -07:00
2021-04-20 11:46:40 -07:00
return sysfs_emit ( buf , " %#x \n " , idxd - > cmd_status ) ;
2020-08-28 15:13:55 -07:00
}
2021-07-20 13:42:15 -07:00
static ssize_t cmd_status_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
idxd - > cmd_status = 0 ;
return count ;
}
static DEVICE_ATTR_RW ( cmd_status ) ;
2020-08-28 15:13:55 -07:00
2022-10-01 04:15:28 +08:00
static bool idxd_device_attr_max_batch_size_invisible ( struct attribute * attr ,
struct idxd_device * idxd )
{
/* Intel IAA does not support batch processing, make it invisible */
return attr = = & dev_attr_max_batch_size . attr & &
idxd - > data - > type = = IDXD_TYPE_IAX ;
}
2022-10-22 15:49:49 +08:00
static bool idxd_device_attr_read_buffers_invisible ( struct attribute * attr ,
struct idxd_device * idxd )
{
/*
* Intel IAA does not support Read Buffer allocation control ,
* make these attributes invisible .
*/
return ( attr = = & dev_attr_max_tokens . attr | |
attr = = & dev_attr_max_read_buffers . attr | |
attr = = & dev_attr_token_limit . attr | |
attr = = & dev_attr_read_buffer_limit . attr ) & &
idxd - > data - > type = = IDXD_TYPE_IAX ;
}
2022-10-01 04:15:28 +08:00
static umode_t idxd_device_attr_visible ( struct kobject * kobj ,
struct attribute * attr , int n )
{
struct device * dev = container_of ( kobj , struct device , kobj ) ;
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
if ( idxd_device_attr_max_batch_size_invisible ( attr , idxd ) )
return 0 ;
2022-10-22 15:49:49 +08:00
if ( idxd_device_attr_read_buffers_invisible ( attr , idxd ) )
return 0 ;
2022-10-01 04:15:28 +08:00
return attr - > mode ;
}
2020-01-21 16:44:05 -07:00
static struct attribute * idxd_device_attributes [ ] = {
2020-04-15 09:13:12 -07:00
& dev_attr_version . attr ,
2020-01-21 16:44:05 -07:00
& dev_attr_max_groups . attr ,
& dev_attr_max_work_queues . attr ,
& dev_attr_max_work_queues_size . attr ,
& dev_attr_max_engines . attr ,
& dev_attr_numa_node . attr ,
& dev_attr_max_batch_size . attr ,
& dev_attr_max_transfer_size . attr ,
& dev_attr_op_cap . attr ,
2020-02-24 11:01:34 -07:00
& dev_attr_gen_cap . attr ,
2020-01-21 16:44:05 -07:00
& dev_attr_configurable . attr ,
& dev_attr_clients . attr ,
2020-10-27 10:34:35 -07:00
& dev_attr_pasid_enabled . attr ,
2020-01-21 16:44:05 -07:00
& dev_attr_state . attr ,
& dev_attr_errors . attr ,
& dev_attr_max_tokens . attr ,
2021-12-14 13:23:14 -07:00
& dev_attr_max_read_buffers . attr ,
2020-01-21 16:44:05 -07:00
& dev_attr_token_limit . attr ,
2021-12-14 13:23:14 -07:00
& dev_attr_read_buffer_limit . attr ,
2020-01-21 16:44:29 -07:00
& dev_attr_cdev_major . attr ,
2020-08-28 15:13:55 -07:00
& dev_attr_cmd_status . attr ,
2020-01-21 16:44:05 -07:00
NULL ,
} ;
static const struct attribute_group idxd_device_attribute_group = {
. attrs = idxd_device_attributes ,
2022-10-01 04:15:28 +08:00
. is_visible = idxd_device_attr_visible ,
2020-01-21 16:44:05 -07:00
} ;
static const struct attribute_group * idxd_attribute_groups [ ] = {
& idxd_device_attribute_group ,
NULL ,
} ;
2021-04-15 16:37:33 -07:00
static void idxd_conf_device_release ( struct device * dev )
{
2021-07-15 11:43:20 -07:00
struct idxd_device * idxd = confdev_to_idxd ( dev ) ;
2021-04-15 16:37:33 -07:00
kfree ( idxd - > groups ) ;
2022-09-28 08:48:56 -07:00
bitmap_free ( idxd - > wq_enable_map ) ;
2021-04-15 16:37:33 -07:00
kfree ( idxd - > wqs ) ;
kfree ( idxd - > engines ) ;
2021-04-15 16:38:03 -07:00
ida_free ( & idxd_ida , idxd - > id ) ;
2022-09-17 09:12:19 -07:00
bitmap_free ( idxd - > opcap_bmap ) ;
2021-04-15 16:37:33 -07:00
kfree ( idxd ) ;
}
struct device_type dsa_device_type = {
. name = " dsa " ,
. release = idxd_conf_device_release ,
. groups = idxd_attribute_groups ,
} ;
struct device_type iax_device_type = {
. name = " iax " ,
. release = idxd_conf_device_release ,
. groups = idxd_attribute_groups ,
} ;
2021-04-15 16:37:44 -07:00
static int idxd_register_engine_devices ( struct idxd_device * idxd )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_engine * engine ;
2021-04-15 16:37:44 -07:00
int i , j , rc ;
2020-01-21 16:44:05 -07:00
for ( i = 0 ; i < idxd - > max_engines ; i + + ) {
2021-07-15 11:43:20 -07:00
engine = idxd - > engines [ i ] ;
rc = device_add ( engine_confdev ( engine ) ) ;
2021-04-15 16:37:44 -07:00
if ( rc < 0 )
2020-01-21 16:44:05 -07:00
goto cleanup ;
}
return 0 ;
cleanup :
2021-04-15 16:37:44 -07:00
j = i - 1 ;
2021-07-15 11:43:20 -07:00
for ( ; i < idxd - > max_engines ; i + + ) {
engine = idxd - > engines [ i ] ;
put_device ( engine_confdev ( engine ) ) ;
}
2020-01-21 16:44:05 -07:00
2021-07-15 11:43:20 -07:00
while ( j - - ) {
engine = idxd - > engines [ j ] ;
device_unregister ( engine_confdev ( engine ) ) ;
}
2020-01-21 16:44:05 -07:00
return rc ;
}
2021-04-15 16:37:51 -07:00
static int idxd_register_group_devices ( struct idxd_device * idxd )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_group * group ;
2021-04-15 16:37:51 -07:00
int i , j , rc ;
2020-01-21 16:44:05 -07:00
for ( i = 0 ; i < idxd - > max_groups ; i + + ) {
2021-07-15 11:43:20 -07:00
group = idxd - > groups [ i ] ;
rc = device_add ( group_confdev ( group ) ) ;
2021-04-15 16:37:51 -07:00
if ( rc < 0 )
2020-01-21 16:44:05 -07:00
goto cleanup ;
}
return 0 ;
cleanup :
2021-04-15 16:37:51 -07:00
j = i - 1 ;
2021-07-15 11:43:20 -07:00
for ( ; i < idxd - > max_groups ; i + + ) {
group = idxd - > groups [ i ] ;
put_device ( group_confdev ( group ) ) ;
}
2020-01-21 16:44:05 -07:00
2021-07-15 11:43:20 -07:00
while ( j - - ) {
group = idxd - > groups [ j ] ;
device_unregister ( group_confdev ( group ) ) ;
}
2020-01-21 16:44:05 -07:00
return rc ;
}
2021-04-15 16:37:39 -07:00
static int idxd_register_wq_devices ( struct idxd_device * idxd )
2020-01-21 16:44:05 -07:00
{
2021-07-15 11:43:20 -07:00
struct idxd_wq * wq ;
2021-04-15 16:37:39 -07:00
int i , rc , j ;
2020-01-21 16:44:05 -07:00
for ( i = 0 ; i < idxd - > max_wqs ; i + + ) {
2021-07-15 11:43:20 -07:00
wq = idxd - > wqs [ i ] ;
rc = device_add ( wq_confdev ( wq ) ) ;
2021-04-15 16:37:39 -07:00
if ( rc < 0 )
2020-01-21 16:44:05 -07:00
goto cleanup ;
}
return 0 ;
cleanup :
2021-04-15 16:37:39 -07:00
j = i - 1 ;
2021-07-15 11:43:20 -07:00
for ( ; i < idxd - > max_wqs ; i + + ) {
wq = idxd - > wqs [ i ] ;
put_device ( wq_confdev ( wq ) ) ;
}
2020-01-21 16:44:05 -07:00
2021-07-15 11:43:20 -07:00
while ( j - - ) {
wq = idxd - > wqs [ j ] ;
device_unregister ( wq_confdev ( wq ) ) ;
}
2020-01-21 16:44:05 -07:00
return rc ;
}
2021-04-15 16:37:33 -07:00
int idxd_register_devices ( struct idxd_device * idxd )
2020-01-21 16:44:05 -07:00
{
struct device * dev = & idxd - > pdev - > dev ;
2021-04-15 16:37:39 -07:00
int rc , i ;
2020-01-21 16:44:05 -07:00
2021-07-15 11:43:20 -07:00
rc = device_add ( idxd_confdev ( idxd ) ) ;
2021-04-15 16:37:33 -07:00
if ( rc < 0 )
2020-01-21 16:44:05 -07:00
return rc ;
2021-04-15 16:37:39 -07:00
rc = idxd_register_wq_devices ( idxd ) ;
2020-01-21 16:44:05 -07:00
if ( rc < 0 ) {
2021-04-15 16:37:39 -07:00
dev_dbg ( dev , " WQ devices registering failed: %d \n " , rc ) ;
goto err_wq ;
2020-01-21 16:44:05 -07:00
}
2021-04-15 16:37:44 -07:00
rc = idxd_register_engine_devices ( idxd ) ;
2020-01-21 16:44:05 -07:00
if ( rc < 0 ) {
2021-04-15 16:37:44 -07:00
dev_dbg ( dev , " Engine devices registering failed: %d \n " , rc ) ;
goto err_engine ;
2020-01-21 16:44:05 -07:00
}
2021-04-15 16:37:51 -07:00
rc = idxd_register_group_devices ( idxd ) ;
2020-01-21 16:44:05 -07:00
if ( rc < 0 ) {
2021-04-15 16:37:51 -07:00
dev_dbg ( dev , " Group device registering failed: %d \n " , rc ) ;
2021-04-15 16:37:44 -07:00
goto err_group ;
2020-01-21 16:44:05 -07:00
}
return 0 ;
2021-04-15 16:37:39 -07:00
2021-04-15 16:37:44 -07:00
err_group :
for ( i = 0 ; i < idxd - > max_engines ; i + + )
2021-07-15 11:43:20 -07:00
device_unregister ( engine_confdev ( idxd - > engines [ i ] ) ) ;
2021-04-15 16:37:44 -07:00
err_engine :
2021-04-15 16:37:39 -07:00
for ( i = 0 ; i < idxd - > max_wqs ; i + + )
2021-07-15 11:43:20 -07:00
device_unregister ( wq_confdev ( idxd - > wqs [ i ] ) ) ;
2021-04-15 16:37:39 -07:00
err_wq :
2021-07-15 11:43:20 -07:00
device_del ( idxd_confdev ( idxd ) ) ;
2021-04-15 16:37:39 -07:00
return rc ;
2020-01-21 16:44:05 -07:00
}
2021-04-15 16:37:33 -07:00
void idxd_unregister_devices ( struct idxd_device * idxd )
2020-01-21 16:44:05 -07:00
{
int i ;
for ( i = 0 ; i < idxd - > max_wqs ; i + + ) {
2021-04-15 16:37:39 -07:00
struct idxd_wq * wq = idxd - > wqs [ i ] ;
2020-01-21 16:44:05 -07:00
2021-07-15 11:43:20 -07:00
device_unregister ( wq_confdev ( wq ) ) ;
2020-01-21 16:44:05 -07:00
}
for ( i = 0 ; i < idxd - > max_engines ; i + + ) {
2021-04-15 16:37:44 -07:00
struct idxd_engine * engine = idxd - > engines [ i ] ;
2020-01-21 16:44:05 -07:00
2021-07-15 11:43:20 -07:00
device_unregister ( engine_confdev ( engine ) ) ;
2020-01-21 16:44:05 -07:00
}
for ( i = 0 ; i < idxd - > max_groups ; i + + ) {
2021-04-15 16:37:51 -07:00
struct idxd_group * group = idxd - > groups [ i ] ;
2020-01-21 16:44:05 -07:00
2021-07-15 11:43:20 -07:00
device_unregister ( group_confdev ( group ) ) ;
2020-01-21 16:44:05 -07:00
}
}
int idxd_register_bus_type ( void )
{
2021-04-15 16:38:03 -07:00
return bus_register ( & dsa_bus_type ) ;
2020-01-21 16:44:05 -07:00
}
void idxd_unregister_bus_type ( void )
{
2021-04-15 16:38:03 -07:00
bus_unregister ( & dsa_bus_type ) ;
2020-01-21 16:44:05 -07:00
}