2020-01-22 02:44:29 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/device.h>
# include <linux/sched/task.h>
# include <linux/intel-svm.h>
# include <linux/io-64-nonatomic-lo-hi.h>
# include <linux/cdev.h>
# include <linux/fs.h>
# include <linux/poll.h>
2020-10-27 20:34:35 +03:00
# include <linux/iommu.h>
2020-01-22 02:44:29 +03:00
# include <uapi/linux/idxd.h>
# include "registers.h"
# include "idxd.h"
struct idxd_cdev_context {
const char * name ;
dev_t devt ;
struct ida minor_ida ;
} ;
/*
* ictx is an array based off of accelerator types . enum idxd_type
* is used as index
*/
static struct idxd_cdev_context ictx [ IDXD_TYPE_MAX ] = {
{ . name = " dsa " } ,
2020-11-17 23:39:14 +03:00
{ . name = " iax " }
2020-01-22 02:44:29 +03:00
} ;
struct idxd_user_context {
struct idxd_wq * wq ;
struct task_struct * task ;
2020-10-27 20:34:35 +03:00
unsigned int pasid ;
2020-01-22 02:44:29 +03:00
unsigned int flags ;
2020-10-27 20:34:35 +03:00
struct iommu_sva * sva ;
2020-01-22 02:44:29 +03:00
} ;
static void idxd_cdev_dev_release ( struct device * dev )
{
2021-07-15 21:43:20 +03:00
struct idxd_cdev * idxd_cdev = dev_to_cdev ( dev ) ;
2021-04-16 02:37:57 +03:00
struct idxd_cdev_context * cdev_ctx ;
struct idxd_wq * wq = idxd_cdev - > wq ;
2021-04-16 02:38:09 +03:00
cdev_ctx = & ictx [ wq - > idxd - > data - > type ] ;
2021-04-16 02:37:57 +03:00
ida_simple_remove ( & cdev_ctx - > minor_ida , idxd_cdev - > minor ) ;
kfree ( idxd_cdev ) ;
2020-01-22 02:44:29 +03:00
}
static struct device_type idxd_cdev_device_type = {
. name = " idxd_cdev " ,
. release = idxd_cdev_dev_release ,
} ;
static inline struct idxd_cdev * inode_idxd_cdev ( struct inode * inode )
{
struct cdev * cdev = inode - > i_cdev ;
return container_of ( cdev , struct idxd_cdev , cdev ) ;
}
static inline struct idxd_wq * inode_wq ( struct inode * inode )
{
2021-04-16 02:37:57 +03:00
struct idxd_cdev * idxd_cdev = inode_idxd_cdev ( inode ) ;
return idxd_cdev - > wq ;
2020-01-22 02:44:29 +03:00
}
static int idxd_cdev_open ( struct inode * inode , struct file * filp )
{
struct idxd_user_context * ctx ;
struct idxd_device * idxd ;
struct idxd_wq * wq ;
struct device * dev ;
2020-06-22 23:38:34 +03:00
int rc = 0 ;
2020-10-27 20:34:35 +03:00
struct iommu_sva * sva ;
unsigned int pasid ;
2020-01-22 02:44:29 +03:00
wq = inode_wq ( inode ) ;
idxd = wq - > idxd ;
dev = & idxd - > pdev - > dev ;
2020-03-12 19:23:53 +03:00
dev_dbg ( dev , " %s called: %d \n " , __func__ , idxd_wq_refcount ( wq ) ) ;
2020-01-22 02:44:29 +03:00
ctx = kzalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
2020-06-22 23:38:34 +03:00
mutex_lock ( & wq - > wq_lock ) ;
if ( idxd_wq_refcount ( wq ) > 0 & & wq_dedicated ( wq ) ) {
rc = - EBUSY ;
goto failed ;
}
2020-01-22 02:44:29 +03:00
ctx - > wq = wq ;
filp - > private_data = ctx ;
2020-10-27 20:34:35 +03:00
2022-05-12 03:11:57 +03:00
if ( device_user_pasid_enabled ( idxd ) ) {
2020-10-27 20:34:35 +03:00
sva = iommu_sva_bind_device ( dev , current - > mm , NULL ) ;
if ( IS_ERR ( sva ) ) {
rc = PTR_ERR ( sva ) ;
dev_err ( dev , " pasid allocation failed: %d \n " , rc ) ;
goto failed ;
}
pasid = iommu_sva_get_pasid ( sva ) ;
if ( pasid = = IOMMU_PASID_INVALID ) {
iommu_sva_unbind_device ( sva ) ;
2021-06-02 13:07:26 +03:00
rc = - EINVAL ;
2020-10-27 20:34:35 +03:00
goto failed ;
}
ctx - > sva = sva ;
ctx - > pasid = pasid ;
if ( wq_dedicated ( wq ) ) {
rc = idxd_wq_set_pasid ( wq , pasid ) ;
if ( rc < 0 ) {
iommu_sva_unbind_device ( sva ) ;
dev_err ( dev , " wq set pasid failed: %d \n " , rc ) ;
goto failed ;
}
}
}
2020-01-22 02:44:29 +03:00
idxd_wq_get ( wq ) ;
2020-06-22 23:38:34 +03:00
mutex_unlock ( & wq - > wq_lock ) ;
2020-01-22 02:44:29 +03:00
return 0 ;
2020-06-22 23:38:34 +03:00
failed :
mutex_unlock ( & wq - > wq_lock ) ;
kfree ( ctx ) ;
return rc ;
2020-01-22 02:44:29 +03:00
}
static int idxd_cdev_release ( struct inode * node , struct file * filep )
{
struct idxd_user_context * ctx = filep - > private_data ;
struct idxd_wq * wq = ctx - > wq ;
struct idxd_device * idxd = wq - > idxd ;
struct device * dev = & idxd - > pdev - > dev ;
2020-10-27 20:34:35 +03:00
int rc ;
2020-01-22 02:44:29 +03:00
dev_dbg ( dev , " %s called \n " , __func__ ) ;
filep - > private_data = NULL ;
2020-06-26 21:11:18 +03:00
/* Wait for in-flight operations to complete. */
2020-10-27 20:34:35 +03:00
if ( wq_shared ( wq ) ) {
idxd_device_drain_pasid ( idxd , ctx - > pasid ) ;
} else {
2022-05-12 03:11:57 +03:00
if ( device_user_pasid_enabled ( idxd ) ) {
2020-10-27 20:34:35 +03:00
/* The wq disable in the disable pasid function will drain the wq */
rc = idxd_wq_disable_pasid ( wq ) ;
if ( rc < 0 )
dev_err ( dev , " wq disable pasid failed. \n " ) ;
} else {
idxd_wq_drain ( wq ) ;
}
}
2020-06-26 21:11:18 +03:00
2020-10-27 20:34:35 +03:00
if ( ctx - > sva )
iommu_sva_unbind_device ( ctx - > sva ) ;
2020-01-22 02:44:29 +03:00
kfree ( ctx ) ;
2020-06-22 23:38:34 +03:00
mutex_lock ( & wq - > wq_lock ) ;
2020-01-22 02:44:29 +03:00
idxd_wq_put ( wq ) ;
2020-06-22 23:38:34 +03:00
mutex_unlock ( & wq - > wq_lock ) ;
2020-01-22 02:44:29 +03:00
return 0 ;
}
static int check_vma ( struct idxd_wq * wq , struct vm_area_struct * vma ,
const char * func )
{
struct device * dev = & wq - > idxd - > pdev - > dev ;
if ( ( vma - > vm_end - vma - > vm_start ) > PAGE_SIZE ) {
dev_info_ratelimited ( dev ,
" %s: %s: mapping too large: %lu \n " ,
current - > comm , func ,
vma - > vm_end - vma - > vm_start ) ;
return - EINVAL ;
}
return 0 ;
}
static int idxd_cdev_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct idxd_user_context * ctx = filp - > private_data ;
struct idxd_wq * wq = ctx - > wq ;
struct idxd_device * idxd = wq - > idxd ;
struct pci_dev * pdev = idxd - > pdev ;
phys_addr_t base = pci_resource_start ( pdev , IDXD_WQ_BAR ) ;
unsigned long pfn ;
int rc ;
dev_dbg ( & pdev - > dev , " %s called \n " , __func__ ) ;
rc = check_vma ( wq , vma , __func__ ) ;
2020-02-25 19:47:46 +03:00
if ( rc < 0 )
return rc ;
2020-01-22 02:44:29 +03:00
vma - > vm_flags | = VM_DONTCOPY ;
pfn = ( base + idxd_get_wq_portal_full_offset ( wq - > id ,
IDXD_PORTAL_LIMITED ) ) > > PAGE_SHIFT ;
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
vma - > vm_private_data = ctx ;
return io_remap_pfn_range ( vma , vma - > vm_start , pfn , PAGE_SIZE ,
vma - > vm_page_prot ) ;
}
static __poll_t idxd_cdev_poll ( struct file * filp ,
struct poll_table_struct * wait )
{
struct idxd_user_context * ctx = filp - > private_data ;
struct idxd_wq * wq = ctx - > wq ;
struct idxd_device * idxd = wq - > idxd ;
__poll_t out = 0 ;
2021-04-16 02:37:57 +03:00
poll_wait ( filp , & wq - > err_queue , wait ) ;
2021-08-25 00:24:27 +03:00
spin_lock ( & idxd - > dev_lock ) ;
2020-01-22 02:44:29 +03:00
if ( idxd - > sw_err . valid )
out = EPOLLIN | EPOLLRDNORM ;
2021-08-25 00:24:27 +03:00
spin_unlock ( & idxd - > dev_lock ) ;
2020-01-22 02:44:29 +03:00
return out ;
}
static const struct file_operations idxd_cdev_fops = {
. owner = THIS_MODULE ,
. open = idxd_cdev_open ,
. release = idxd_cdev_release ,
. mmap = idxd_cdev_mmap ,
. poll = idxd_cdev_poll ,
} ;
int idxd_cdev_get_major ( struct idxd_device * idxd )
{
2021-04-16 02:38:09 +03:00
return MAJOR ( ictx [ idxd - > data - > type ] . devt ) ;
2020-01-22 02:44:29 +03:00
}
2021-04-16 02:37:57 +03:00
int idxd_wq_add_cdev ( struct idxd_wq * wq )
2020-01-22 02:44:29 +03:00
{
struct idxd_device * idxd = wq - > idxd ;
2021-04-16 02:37:57 +03:00
struct idxd_cdev * idxd_cdev ;
struct cdev * cdev ;
2020-01-22 02:44:29 +03:00
struct device * dev ;
2021-04-16 02:37:57 +03:00
struct idxd_cdev_context * cdev_ctx ;
int rc , minor ;
2020-01-22 02:44:29 +03:00
2021-04-16 02:37:57 +03:00
idxd_cdev = kzalloc ( sizeof ( * idxd_cdev ) , GFP_KERNEL ) ;
if ( ! idxd_cdev )
2020-01-22 02:44:29 +03:00
return - ENOMEM ;
2021-07-15 21:43:20 +03:00
idxd_cdev - > idxd_dev . type = IDXD_DEV_CDEV ;
2021-04-16 02:37:57 +03:00
idxd_cdev - > wq = wq ;
cdev = & idxd_cdev - > cdev ;
2021-07-15 21:43:20 +03:00
dev = cdev_dev ( idxd_cdev ) ;
2021-04-16 02:38:09 +03:00
cdev_ctx = & ictx [ wq - > idxd - > data - > type ] ;
2020-01-22 02:44:29 +03:00
minor = ida_simple_get ( & cdev_ctx - > minor_ida , 0 , MINORMASK , GFP_KERNEL ) ;
if ( minor < 0 ) {
2021-04-16 02:37:57 +03:00
kfree ( idxd_cdev ) ;
return minor ;
2020-01-22 02:44:29 +03:00
}
idxd_cdev - > minor = minor ;
2021-04-16 02:37:57 +03:00
device_initialize ( dev ) ;
2021-07-15 21:43:20 +03:00
dev - > parent = wq_confdev ( wq ) ;
2021-04-16 02:38:03 +03:00
dev - > bus = & dsa_bus_type ;
2021-04-16 02:37:57 +03:00
dev - > type = & idxd_cdev_device_type ;
dev - > devt = MKDEV ( MAJOR ( cdev_ctx - > devt ) , minor ) ;
2020-01-22 02:44:29 +03:00
2021-04-16 02:38:09 +03:00
rc = dev_set_name ( dev , " %s/wq%u.%u " , idxd - > data - > name_prefix , idxd - > id , wq - > id ) ;
2020-01-22 02:44:29 +03:00
if ( rc < 0 )
2021-04-16 02:37:57 +03:00
goto err ;
2020-01-22 02:44:29 +03:00
2021-04-16 02:37:57 +03:00
wq - > idxd_cdev = idxd_cdev ;
2020-01-22 02:44:29 +03:00
cdev_init ( cdev , & idxd_cdev_fops ) ;
2021-04-16 02:37:57 +03:00
rc = cdev_device_add ( cdev , dev ) ;
2020-01-22 02:44:29 +03:00
if ( rc ) {
dev_dbg ( & wq - > idxd - > pdev - > dev , " cdev_add failed: %d \n " , rc ) ;
2021-04-16 02:37:57 +03:00
goto err ;
2020-01-22 02:44:29 +03:00
}
return 0 ;
2021-04-16 02:37:57 +03:00
err :
put_device ( dev ) ;
wq - > idxd_cdev = NULL ;
return rc ;
2020-01-22 02:44:29 +03:00
}
void idxd_wq_del_cdev ( struct idxd_wq * wq )
{
2021-04-16 02:37:57 +03:00
struct idxd_cdev * idxd_cdev ;
idxd_cdev = wq - > idxd_cdev ;
wq - > idxd_cdev = NULL ;
2021-07-15 21:43:20 +03:00
cdev_device_del ( & idxd_cdev - > cdev , cdev_dev ( idxd_cdev ) ) ;
put_device ( cdev_dev ( idxd_cdev ) ) ;
2020-01-22 02:44:29 +03:00
}
2021-07-15 21:44:35 +03:00
static int idxd_user_drv_probe ( struct idxd_dev * idxd_dev )
{
struct idxd_wq * wq = idxd_dev_to_wq ( idxd_dev ) ;
struct idxd_device * idxd = wq - > idxd ;
int rc ;
if ( idxd - > state ! = IDXD_DEV_ENABLED )
return - ENXIO ;
mutex_lock ( & wq - > wq_lock ) ;
wq - > type = IDXD_WQT_USER ;
2022-04-20 19:43:36 +03:00
rc = drv_enable_wq ( wq ) ;
2021-07-15 21:44:35 +03:00
if ( rc < 0 )
goto err ;
rc = idxd_wq_add_cdev ( wq ) ;
2021-07-20 23:42:15 +03:00
if ( rc < 0 ) {
idxd - > cmd_status = IDXD_SCMD_CDEV_ERR ;
2021-07-15 21:44:35 +03:00
goto err_cdev ;
2021-07-20 23:42:15 +03:00
}
2021-07-15 21:44:35 +03:00
2021-07-20 23:42:15 +03:00
idxd - > cmd_status = 0 ;
2021-07-15 21:44:35 +03:00
mutex_unlock ( & wq - > wq_lock ) ;
return 0 ;
err_cdev :
2022-04-20 19:43:36 +03:00
drv_disable_wq ( wq ) ;
2021-07-15 21:44:35 +03:00
err :
wq - > type = IDXD_WQT_NONE ;
mutex_unlock ( & wq - > wq_lock ) ;
return rc ;
}
static void idxd_user_drv_remove ( struct idxd_dev * idxd_dev )
{
struct idxd_wq * wq = idxd_dev_to_wq ( idxd_dev ) ;
mutex_lock ( & wq - > wq_lock ) ;
idxd_wq_del_cdev ( wq ) ;
2022-04-20 19:43:36 +03:00
drv_disable_wq ( wq ) ;
2021-07-15 21:44:35 +03:00
wq - > type = IDXD_WQT_NONE ;
mutex_unlock ( & wq - > wq_lock ) ;
}
static enum idxd_dev_type dev_types [ ] = {
IDXD_DEV_WQ ,
IDXD_DEV_NONE ,
} ;
struct idxd_device_driver idxd_user_drv = {
. probe = idxd_user_drv_probe ,
. remove = idxd_user_drv_remove ,
. name = " user " ,
. type = dev_types ,
} ;
2021-07-15 21:44:47 +03:00
EXPORT_SYMBOL_GPL ( idxd_user_drv ) ;
2021-07-15 21:44:35 +03:00
2020-01-22 02:44:29 +03:00
int idxd_cdev_register ( void )
{
int rc , i ;
for ( i = 0 ; i < IDXD_TYPE_MAX ; i + + ) {
ida_init ( & ictx [ i ] . minor_ida ) ;
rc = alloc_chrdev_region ( & ictx [ i ] . devt , 0 , MINORMASK ,
ictx [ i ] . name ) ;
if ( rc )
2022-04-21 09:13:38 +03:00
goto err_free_chrdev_region ;
2020-01-22 02:44:29 +03:00
}
return 0 ;
2022-04-21 09:13:38 +03:00
err_free_chrdev_region :
for ( i - - ; i > = 0 ; i - - )
unregister_chrdev_region ( ictx [ i ] . devt , MINORMASK ) ;
return rc ;
2020-01-22 02:44:29 +03:00
}
void idxd_cdev_remove ( void )
{
int i ;
for ( i = 0 ; i < IDXD_TYPE_MAX ; i + + ) {
unregister_chrdev_region ( ictx [ i ] . devt , MINORMASK ) ;
ida_destroy ( & ictx [ i ] . minor_ida ) ;
}
}