2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-10-08 19:55:02 +11:00
/*
* Copyright 2014 IBM Corp .
*/
# include <linux/spinlock.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/device.h>
# include <linux/mutex.h>
# include <linux/init.h>
# include <linux/list.h>
# include <linux/mm.h>
# include <linux/of.h>
# include <linux/slab.h>
# include <linux/idr.h>
# include <linux/pci.h>
2022-04-02 11:52:33 +02:00
# include <linux/platform_device.h>
2017-02-05 15:30:50 +01:00
# include <linux/sched/task.h>
2014-10-08 19:55:02 +11:00
# include <asm/cputable.h>
2019-09-03 01:29:31 +10:00
# include <asm/mmu.h>
2015-05-27 16:07:16 +10:00
# include <misc/cxl-base.h>
2014-10-08 19:55:02 +11:00
# include "cxl.h"
2015-01-09 20:34:36 +11:00
# include "trace.h"
2014-10-08 19:55:02 +11:00
static DEFINE_SPINLOCK ( adapter_idr_lock ) ;
static DEFINE_IDR ( cxl_adapter_idr ) ;
uint cxl_verbose ;
module_param_named ( verbose , cxl_verbose , uint , 0600 ) ;
MODULE_PARM_DESC ( verbose , " Enable verbose dmesg output " ) ;
2016-03-04 12:26:28 +01:00
const struct cxl_backend_ops * cxl_ops ;
2016-03-04 12:26:25 +01:00
int cxl_afu_slbia ( struct cxl_afu * afu )
{
unsigned long timeout = jiffies + ( HZ * CXL_TIMEOUT ) ;
pr_devel ( " cxl_afu_slbia issuing SLBIA command \n " ) ;
cxl_p2n_write ( afu , CXL_SLBIA_An , CXL_TLB_SLB_IQ_ALL ) ;
while ( cxl_p2n_read ( afu , CXL_SLBIA_An ) & CXL_TLB_SLB_P ) {
if ( time_after_eq ( jiffies , timeout ) ) {
dev_warn ( & afu - > dev , " WARNING: CXL AFU SLBIA timed out! \n " ) ;
return - EBUSY ;
}
/* If the adapter has gone down, we can assume that we
* will PERST it and that will invalidate everything .
*/
2016-03-04 12:26:41 +01:00
if ( ! cxl_ops - > link_ok ( afu - > adapter , afu ) )
2016-03-04 12:26:25 +01:00
return - EIO ;
cpu_relax ( ) ;
}
return 0 ;
}
2014-10-08 19:55:02 +11:00
static inline void _cxl_slbia ( struct cxl_context * ctx , struct mm_struct * mm )
{
unsigned long flags ;
2017-04-07 16:11:55 +02:00
if ( ctx - > mm ! = mm )
return ;
2014-10-08 19:55:02 +11:00
pr_devel ( " %s matched mm - card: %i afu: %i pe: %i \n " , __func__ ,
ctx - > afu - > adapter - > adapter_num , ctx - > afu - > slice , ctx - > pe ) ;
spin_lock_irqsave ( & ctx - > sste_lock , flags ) ;
2015-01-09 20:34:36 +11:00
trace_cxl_slbia ( ctx ) ;
2014-10-08 19:55:02 +11:00
memset ( ctx - > sstp , 0 , ctx - > sst_size ) ;
spin_unlock_irqrestore ( & ctx - > sste_lock , flags ) ;
mb ( ) ;
cxl_afu_slbia ( ctx - > afu ) ;
}
static inline void cxl_slbia_core ( struct mm_struct * mm )
{
struct cxl * adapter ;
struct cxl_afu * afu ;
struct cxl_context * ctx ;
int card , slice , id ;
pr_devel ( " %s called \n " , __func__ ) ;
spin_lock ( & adapter_idr_lock ) ;
idr_for_each_entry ( & cxl_adapter_idr , adapter , card ) {
/* XXX: Make this lookup faster with link from mm to ctx */
spin_lock ( & adapter - > afu_list_lock ) ;
for ( slice = 0 ; slice < adapter - > slices ; slice + + ) {
afu = adapter - > afu [ slice ] ;
2015-07-10 09:04:25 +10:00
if ( ! afu | | ! afu - > enabled )
2014-10-08 19:55:02 +11:00
continue ;
rcu_read_lock ( ) ;
idr_for_each_entry ( & afu - > contexts_idr , ctx , id )
_cxl_slbia ( ctx , mm ) ;
rcu_read_unlock ( ) ;
}
spin_unlock ( & adapter - > afu_list_lock ) ;
}
spin_unlock ( & adapter_idr_lock ) ;
}
static struct cxl_calls cxl_calls = {
. cxl_slbia = cxl_slbia_core ,
. owner = THIS_MODULE ,
} ;
int cxl_alloc_sst ( struct cxl_context * ctx )
{
unsigned long vsid ;
u64 ea_mask , size , sstp0 , sstp1 ;
sstp0 = 0 ;
sstp1 = 0 ;
ctx - > sst_size = PAGE_SIZE ;
ctx - > sst_lru = 0 ;
ctx - > sstp = ( struct cxl_sste * ) get_zeroed_page ( GFP_KERNEL ) ;
if ( ! ctx - > sstp ) {
pr_err ( " cxl_alloc_sst: Unable to allocate segment table \n " ) ;
return - ENOMEM ;
}
pr_devel ( " SSTP allocated at 0x%p \n " , ctx - > sstp ) ;
vsid = get_kernel_vsid ( ( u64 ) ctx - > sstp , mmu_kernel_ssize ) < < 12 ;
sstp0 | = ( u64 ) mmu_kernel_ssize < < CXL_SSTP0_An_B_SHIFT ;
sstp0 | = ( SLB_VSID_KERNEL | mmu_psize_defs [ mmu_linear_psize ] . sllp ) < < 50 ;
size = ( ( ( u64 ) ctx - > sst_size > > 8 ) - 1 ) < < CXL_SSTP0_An_SegTableSize_SHIFT ;
if ( unlikely ( size & ~ CXL_SSTP0_An_SegTableSize_MASK ) ) {
WARN ( 1 , " Impossible segment table size \n " ) ;
return - EINVAL ;
}
sstp0 | = size ;
if ( mmu_kernel_ssize = = MMU_SEGSIZE_256M )
ea_mask = 0xfffff00ULL ;
else
ea_mask = 0xffffffff00ULL ;
sstp0 | = vsid > > ( 50 - 14 ) ; /* Top 14 bits of VSID */
sstp1 | = ( vsid < < ( 64 - ( 50 - 14 ) ) ) & ~ ea_mask ;
sstp1 | = ( u64 ) ctx - > sstp & ea_mask ;
sstp1 | = CXL_SSTP1_An_V ;
pr_devel ( " Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx \n " ,
( u64 ) ctx - > sstp , ( u64 ) ctx - > sstp & ESID_MASK , mmu_kernel_ssize , vsid , sstp0 , sstp1 ) ;
/* Store calculated sstp hardware points for use later */
ctx - > sstp0 = sstp0 ;
ctx - > sstp1 = sstp1 ;
return 0 ;
}
2016-03-04 12:26:34 +01:00
/* print buffer content as integers when debugging */
void cxl_dump_debug_buffer ( void * buf , size_t buf_len )
{
# ifdef DEBUG
int i , * ptr ;
/*
* We want to regroup up to 4 integers per line , which means they
* need to be in the same pr_devel ( ) statement
*/
ptr = ( int * ) buf ;
for ( i = 0 ; i * 4 < buf_len ; i + = 4 ) {
if ( ( i + 3 ) * 4 < buf_len )
pr_devel ( " %.8x %.8x %.8x %.8x \n " , ptr [ i ] , ptr [ i + 1 ] ,
ptr [ i + 2 ] , ptr [ i + 3 ] ) ;
else if ( ( i + 2 ) * 4 < buf_len )
pr_devel ( " %.8x %.8x %.8x \n " , ptr [ i ] , ptr [ i + 1 ] ,
ptr [ i + 2 ] ) ;
else if ( ( i + 1 ) * 4 < buf_len )
pr_devel ( " %.8x %.8x \n " , ptr [ i ] , ptr [ i + 1 ] ) ;
else
pr_devel ( " %.8x \n " , ptr [ i ] ) ;
}
# endif /* DEBUG */
}
2014-10-08 19:55:02 +11:00
/* Find a CXL adapter by it's number and increase it's refcount */
struct cxl * get_cxl_adapter ( int num )
{
struct cxl * adapter ;
spin_lock ( & adapter_idr_lock ) ;
if ( ( adapter = idr_find ( & cxl_adapter_idr , num ) ) )
get_device ( & adapter - > dev ) ;
spin_unlock ( & adapter_idr_lock ) ;
return adapter ;
}
2016-03-04 12:26:26 +01:00
static int cxl_alloc_adapter_nr ( struct cxl * adapter )
2014-10-08 19:55:02 +11:00
{
int i ;
idr_preload ( GFP_KERNEL ) ;
spin_lock ( & adapter_idr_lock ) ;
i = idr_alloc ( & cxl_adapter_idr , adapter , 0 , 0 , GFP_NOWAIT ) ;
spin_unlock ( & adapter_idr_lock ) ;
idr_preload_end ( ) ;
if ( i < 0 )
return i ;
adapter - > adapter_num = i ;
return 0 ;
}
void cxl_remove_adapter_nr ( struct cxl * adapter )
{
idr_remove ( & cxl_adapter_idr , adapter - > adapter_num ) ;
}
2016-03-04 12:26:25 +01:00
struct cxl * cxl_alloc_adapter ( void )
{
struct cxl * adapter ;
if ( ! ( adapter = kzalloc ( sizeof ( struct cxl ) , GFP_KERNEL ) ) )
return NULL ;
spin_lock_init ( & adapter - > afu_list_lock ) ;
if ( cxl_alloc_adapter_nr ( adapter ) )
goto err1 ;
if ( dev_set_name ( & adapter - > dev , " card%i " , adapter - > adapter_num ) )
goto err2 ;
2016-10-14 15:08:36 +05:30
/* start with context lock taken */
atomic_set ( & adapter - > contexts_num , - 1 ) ;
2016-03-04 12:26:25 +01:00
2016-10-14 15:08:36 +05:30
return adapter ;
2016-03-04 12:26:25 +01:00
err2 :
cxl_remove_adapter_nr ( adapter ) ;
err1 :
kfree ( adapter ) ;
return NULL ;
}
struct cxl_afu * cxl_alloc_afu ( struct cxl * adapter , int slice )
{
struct cxl_afu * afu ;
if ( ! ( afu = kzalloc ( sizeof ( struct cxl_afu ) , GFP_KERNEL ) ) )
return NULL ;
afu - > adapter = adapter ;
afu - > dev . parent = & adapter - > dev ;
2016-03-04 12:26:28 +01:00
afu - > dev . release = cxl_ops - > release_afu ;
2016-03-04 12:26:25 +01:00
afu - > slice = slice ;
idr_init ( & afu - > contexts_idr ) ;
mutex_init ( & afu - > contexts_lock ) ;
spin_lock_init ( & afu - > afu_cntl_lock ) ;
2017-02-06 12:07:17 +11:00
atomic_set ( & afu - > configured_state , - 1 ) ;
2016-03-04 12:26:25 +01:00
afu - > prefault_mode = CXL_PREFAULT_NONE ;
afu - > irqs_max = afu - > adapter - > user_irqs ;
return afu ;
}
2014-10-08 19:55:02 +11:00
int cxl_afu_select_best_mode ( struct cxl_afu * afu )
{
if ( afu - > modes_supported & CXL_MODE_DIRECTED )
2016-03-04 12:26:28 +01:00
return cxl_ops - > afu_activate_mode ( afu , CXL_MODE_DIRECTED ) ;
2014-10-08 19:55:02 +11:00
if ( afu - > modes_supported & CXL_MODE_DEDICATED )
2016-03-04 12:26:28 +01:00
return cxl_ops - > afu_activate_mode ( afu , CXL_MODE_DEDICATED ) ;
2014-10-08 19:55:02 +11:00
dev_warn ( & afu - > dev , " No supported programming modes available \n " ) ;
/* We don't fail this so the user can inspect sysfs */
return 0 ;
}
2016-10-14 15:08:36 +05:30
int cxl_adapter_context_get ( struct cxl * adapter )
{
int rc ;
rc = atomic_inc_unless_negative ( & adapter - > contexts_num ) ;
2018-07-04 20:58:33 +05:30
return rc ? 0 : - EBUSY ;
2016-10-14 15:08:36 +05:30
}
void cxl_adapter_context_put ( struct cxl * adapter )
{
atomic_dec_if_positive ( & adapter - > contexts_num ) ;
}
int cxl_adapter_context_lock ( struct cxl * adapter )
{
int rc ;
/* no active contexts -> contexts_num == 0 */
rc = atomic_cmpxchg ( & adapter - > contexts_num , 0 , - 1 ) ;
return rc ? - EBUSY : 0 ;
}
void cxl_adapter_context_unlock ( struct cxl * adapter )
{
int val = atomic_cmpxchg ( & adapter - > contexts_num , - 1 , 0 ) ;
/*
* contexts lock taken - > contexts_num = = - 1
* If not true then show a warning and force reset the lock .
* This will happen when context_unlock was requested without
* doing a context_lock .
*/
if ( val ! = - 1 ) {
atomic_set ( & adapter - > contexts_num , 0 ) ;
WARN ( 1 , " Adapter context unlocked with %d active contexts " ,
val ) ;
}
}
2014-10-08 19:55:02 +11:00
static int __init init_cxl ( void )
{
int rc = 0 ;
2019-09-03 01:29:31 +10:00
if ( ! tlbie_capable )
return - EINVAL ;
2014-10-08 19:55:02 +11:00
if ( ( rc = cxl_file_init ( ) ) )
return rc ;
cxl_debugfs_init ( ) ;
2017-06-13 17:41:05 +02:00
/*
* we don ' t register the callback on P9 . slb callack is only
* used for the PSL8 MMU and CX4 .
*/
if ( cxl_is_power8 ( ) ) {
rc = register_cxl_calls ( & cxl_calls ) ;
if ( rc )
goto err ;
}
2014-10-08 19:55:02 +11:00
2016-03-04 12:26:36 +01:00
if ( cpu_has_feature ( CPU_FTR_HVMODE ) ) {
cxl_ops = & cxl_native_ops ;
rc = pci_register_driver ( & cxl_pci_driver ) ;
}
# ifdef CONFIG_PPC_PSERIES
else {
cxl_ops = & cxl_guest_ops ;
rc = platform_driver_register ( & cxl_of_driver ) ;
}
# endif
if ( rc )
2014-10-08 19:55:02 +11:00
goto err1 ;
return 0 ;
err1 :
2017-06-13 17:41:05 +02:00
if ( cxl_is_power8 ( ) )
unregister_cxl_calls ( & cxl_calls ) ;
2014-10-08 19:55:02 +11:00
err :
cxl_debugfs_exit ( ) ;
cxl_file_exit ( ) ;
return rc ;
}
static void exit_cxl ( void )
{
2016-03-04 12:26:36 +01:00
if ( cpu_has_feature ( CPU_FTR_HVMODE ) )
pci_unregister_driver ( & cxl_pci_driver ) ;
# ifdef CONFIG_PPC_PSERIES
else
platform_driver_unregister ( & cxl_of_driver ) ;
# endif
2014-10-08 19:55:02 +11:00
cxl_debugfs_exit ( ) ;
cxl_file_exit ( ) ;
2017-06-13 17:41:05 +02:00
if ( cxl_is_power8 ( ) )
unregister_cxl_calls ( & cxl_calls ) ;
2015-07-08 17:14:36 +02:00
idr_destroy ( & cxl_adapter_idr ) ;
2014-10-08 19:55:02 +11:00
}
module_init ( init_cxl ) ;
module_exit ( exit_cxl ) ;
MODULE_DESCRIPTION ( " IBM Coherent Accelerator " ) ;
MODULE_AUTHOR ( " Ian Munsie <imunsie@au1.ibm.com> " ) ;
MODULE_LICENSE ( " GPL " ) ;