2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-10-08 19:55:02 +11:00
/*
* Copyright 2014 IBM Corp .
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/bitmap.h>
# include <linux/sched.h>
# include <linux/pid.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/debugfs.h>
# include <linux/slab.h>
# include <linux/idr.h>
2017-04-07 16:11:55 +02:00
# include <linux/sched/mm.h>
2017-09-03 20:15:13 +02:00
# include <linux/mmu_context.h>
2014-10-08 19:55:02 +11:00
# include <asm/cputable.h>
# include <asm/current.h>
# include <asm/copro.h>
# include "cxl.h"
/*
* Allocates space for a CXL context .
*/
struct cxl_context * cxl_context_alloc ( void )
{
return kzalloc ( sizeof ( struct cxl_context ) , GFP_KERNEL ) ;
}
/*
* Initialises a CXL context .
*/
2016-11-18 23:00:31 +11:00
int cxl_context_init ( struct cxl_context * ctx , struct cxl_afu * afu , bool master )
2014-10-08 19:55:02 +11:00
{
int i ;
ctx - > afu = afu ;
ctx - > master = master ;
2017-04-07 16:11:55 +02:00
ctx - > pid = NULL ; /* Set in start work ioctl */
2014-12-08 19:18:01 +11:00
mutex_init ( & ctx - > mapping_lock ) ;
2016-11-18 23:00:31 +11:00
ctx - > mapping = NULL ;
2018-01-11 09:55:25 +01:00
ctx - > tidr = 0 ;
ctx - > assign_tidr = false ;
2014-10-08 19:55:02 +11:00
2017-06-13 17:41:05 +02:00
if ( cxl_is_power8 ( ) ) {
2017-04-07 16:11:58 +02:00
spin_lock_init ( & ctx - > sste_lock ) ;
/*
* Allocate the segment table before we put it in the IDR so that we
* can always access it when dereferenced from IDR . For the same
* reason , the segment table is only destroyed after the context is
* removed from the IDR . Access to this in the IOCTL is protected by
* Linux filesytem symantics ( can ' t IOCTL until open is complete ) .
*/
i = cxl_alloc_sst ( ctx ) ;
if ( i )
return i ;
}
2014-10-08 19:55:02 +11:00
INIT_WORK ( & ctx - > fault_work , cxl_handle_fault ) ;
init_waitqueue_head ( & ctx - > wq ) ;
spin_lock_init ( & ctx - > lock ) ;
ctx - > irq_bitmap = NULL ;
ctx - > pending_irq = false ;
ctx - > pending_fault = false ;
ctx - > pending_afu_err = false ;
2016-06-30 04:55:17 +10:00
INIT_LIST_HEAD ( & ctx - > irq_names ) ;
2014-10-08 19:55:02 +11:00
/*
* When we have to destroy all contexts in cxl_context_detach_all ( ) we
* end up with afu_release_irqs ( ) called from inside a
* idr_for_each_entry ( ) . Hence we need to make sure that anything
* dereferenced from this IDR is ok before we allocate the IDR here .
* This clears out the IRQ ranges to ensure this .
*/
for ( i = 0 ; i < CXL_IRQ_RANGES ; i + + )
ctx - > irqs . range [ i ] = 0 ;
mutex_init ( & ctx - > status_mutex ) ;
ctx - > status = OPENED ;
/*
* Allocating IDR ! We better make sure everything ' s setup that
* dereferences from it .
*/
2014-12-08 19:17:55 +11:00
mutex_lock ( & afu - > contexts_lock ) ;
2014-10-08 19:55:02 +11:00
idr_preload ( GFP_KERNEL ) ;
2018-06-28 12:05:09 +02:00
i = idr_alloc ( & ctx - > afu - > contexts_idr , ctx , 0 ,
2014-10-08 19:55:02 +11:00
ctx - > afu - > num_procs , GFP_NOWAIT ) ;
idr_preload_end ( ) ;
2014-12-08 19:17:55 +11:00
mutex_unlock ( & afu - > contexts_lock ) ;
2014-10-08 19:55:02 +11:00
if ( i < 0 )
return i ;
ctx - > pe = i ;
2016-03-04 12:26:36 +01:00
if ( cpu_has_feature ( CPU_FTR_HVMODE ) ) {
2016-03-04 12:26:35 +01:00
ctx - > elem = & ctx - > afu - > native - > spa [ i ] ;
2016-03-04 12:26:36 +01:00
ctx - > external_pe = ctx - > pe ;
} else {
ctx - > external_pe = - 1 ; /* assigned when attaching */
}
2014-10-08 19:55:02 +11:00
ctx - > pe_inserted = false ;
2015-11-16 09:33:45 +05:30
/*
* take a ref on the afu so that it stays alive at - least till
* this context is reclaimed inside reclaim_ctx .
*/
cxl_afu_get ( afu ) ;
2014-10-08 19:55:02 +11:00
return 0 ;
}
2016-11-18 23:00:31 +11:00
void cxl_context_set_mapping ( struct cxl_context * ctx ,
struct address_space * mapping )
{
mutex_lock ( & ctx - > mapping_lock ) ;
ctx - > mapping = mapping ;
mutex_unlock ( & ctx - > mapping_lock ) ;
}
2018-04-17 20:23:54 +05:30
static vm_fault_t cxl_mmap_fault ( struct vm_fault * vmf )
2015-01-07 16:33:04 +11:00
{
2017-02-24 14:56:41 -08:00
struct vm_area_struct * vma = vmf - > vma ;
2015-01-07 16:33:04 +11:00
struct cxl_context * ctx = vma - > vm_file - > private_data ;
u64 area , offset ;
2018-04-17 20:23:54 +05:30
vm_fault_t ret ;
2015-01-07 16:33:04 +11:00
offset = vmf - > pgoff < < PAGE_SHIFT ;
pr_devel ( " %s: pe: %i address: 0x%lx offset: 0x%llx \n " ,
2016-12-14 15:07:01 -08:00
__func__ , ctx - > pe , vmf - > address , offset ) ;
2015-01-07 16:33:04 +11:00
if ( ctx - > afu - > current_mode = = CXL_MODE_DEDICATED ) {
area = ctx - > afu - > psn_phys ;
2015-07-07 15:45:45 +10:00
if ( offset > = ctx - > afu - > adapter - > ps_size )
2015-01-07 16:33:04 +11:00
return VM_FAULT_SIGBUS ;
} else {
area = ctx - > psn_phys ;
2015-07-07 15:45:45 +10:00
if ( offset > = ctx - > psn_size )
2015-01-07 16:33:04 +11:00
return VM_FAULT_SIGBUS ;
}
mutex_lock ( & ctx - > status_mutex ) ;
if ( ctx - > status ! = STARTED ) {
mutex_unlock ( & ctx - > status_mutex ) ;
pr_devel ( " %s: Context not started, failing problem state access \n " , __func__ ) ;
2015-07-23 16:43:56 +10:00
if ( ctx - > mmio_err_ff ) {
if ( ! ctx - > ff_page ) {
ctx - > ff_page = alloc_page ( GFP_USER ) ;
if ( ! ctx - > ff_page )
return VM_FAULT_OOM ;
memset ( page_address ( ctx - > ff_page ) , 0xff , PAGE_SIZE ) ;
}
get_page ( ctx - > ff_page ) ;
vmf - > page = ctx - > ff_page ;
vma - > vm_page_prot = pgprot_cached ( vma - > vm_page_prot ) ;
return 0 ;
}
2015-01-07 16:33:04 +11:00
return VM_FAULT_SIGBUS ;
}
2018-04-17 20:23:54 +05:30
ret = vmf_insert_pfn ( vma , vmf - > address , ( area + offset ) > > PAGE_SHIFT ) ;
2015-01-07 16:33:04 +11:00
mutex_unlock ( & ctx - > status_mutex ) ;
2018-04-17 20:23:54 +05:30
return ret ;
2015-01-07 16:33:04 +11:00
}
static const struct vm_operations_struct cxl_mmap_vmops = {
. fault = cxl_mmap_fault ,
} ;
2014-10-08 19:55:02 +11:00
/*
* Map a per - context mmio space into the given vma .
*/
int cxl_context_iomap ( struct cxl_context * ctx , struct vm_area_struct * vma )
{
2015-07-07 15:45:46 +10:00
u64 start = vma - > vm_pgoff < < PAGE_SHIFT ;
2014-10-08 19:55:02 +11:00
u64 len = vma - > vm_end - vma - > vm_start ;
2015-07-07 15:45:46 +10:00
if ( ctx - > afu - > current_mode = = CXL_MODE_DEDICATED ) {
if ( start + len > ctx - > afu - > adapter - > ps_size )
return - EINVAL ;
2017-04-12 16:34:07 +02:00
2017-06-13 17:41:05 +02:00
if ( cxl_is_power9 ( ) ) {
2017-04-12 16:34:07 +02:00
/*
* Make sure there is a valid problem state
* area space for this AFU .
*/
if ( ctx - > master & & ! ctx - > afu - > psa ) {
pr_devel ( " AFU doesn't support mmio space \n " ) ;
return - EINVAL ;
}
/* Can't mmap until the AFU is enabled */
if ( ! ctx - > afu - > enabled )
return - EBUSY ;
}
2015-07-07 15:45:46 +10:00
} else {
if ( start + len > ctx - > psn_size )
return - EINVAL ;
2014-10-08 19:55:02 +11:00
2017-04-12 16:34:07 +02:00
/* Make sure there is a valid per process space for this AFU */
2015-01-07 16:33:04 +11:00
if ( ( ctx - > master & & ! ctx - > afu - > psa ) | | ( ! ctx - > afu - > pp_psa ) ) {
pr_devel ( " AFU doesn't support mmio space \n " ) ;
return - EINVAL ;
}
2014-10-08 19:55:02 +11:00
2015-01-07 16:33:04 +11:00
/* Can't mmap until the AFU is enabled */
if ( ! ctx - > afu - > enabled )
return - EBUSY ;
2014-10-08 19:55:02 +11:00
}
pr_devel ( " %s: mmio physical: %llx pe: %i master:%i \n " , __func__ ,
ctx - > psn_phys , ctx - > pe , ctx - > master ) ;
2015-01-07 16:33:04 +11:00
vma - > vm_flags | = VM_IO | VM_PFNMAP ;
2014-10-08 19:55:02 +11:00
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
2015-01-07 16:33:04 +11:00
vma - > vm_ops = & cxl_mmap_vmops ;
return 0 ;
2014-10-08 19:55:02 +11:00
}
/*
* Detach a context from the hardware . This disables interrupts and doesn ' t
* return until all outstanding interrupts for this context have completed . The
* hardware should no longer access * ctx after this has returned .
*/
2015-05-27 16:07:08 +10:00
int __detach_context ( struct cxl_context * ctx )
2014-10-08 19:55:02 +11:00
{
enum cxl_context_status status ;
mutex_lock ( & ctx - > status_mutex ) ;
status = ctx - > status ;
ctx - > status = CLOSED ;
mutex_unlock ( & ctx - > status_mutex ) ;
if ( status ! = STARTED )
2015-05-27 16:07:08 +10:00
return - EBUSY ;
2014-10-08 19:55:02 +11:00
2015-08-14 17:41:18 +10:00
/* Only warn if we detached while the link was OK.
* If detach fails when hw is down , we don ' t care .
*/
2016-03-04 12:26:28 +01:00
WARN_ON ( cxl_ops - > detach_process ( ctx ) & &
2016-03-04 12:26:41 +01:00
cxl_ops - > link_ok ( ctx - > afu - > adapter , ctx - > afu ) ) ;
2015-05-27 16:07:14 +10:00
flush_work ( & ctx - > fault_work ) ; /* Only needed for dedicated process */
2015-11-24 16:26:18 +05:30
2016-04-22 14:57:49 +10:00
/*
* Wait until no further interrupts are presented by the PSL
* for this context .
*/
if ( cxl_ops - > irq_wait )
cxl_ops - > irq_wait ( ctx ) ;
2015-11-24 16:26:18 +05:30
/* release the reference to the group leader and mm handling pid */
2015-05-27 16:07:14 +10:00
put_pid ( ctx - > pid ) ;
2015-11-24 16:26:18 +05:30
2015-05-27 16:07:14 +10:00
cxl_ctx_put ( ) ;
2016-10-14 15:08:36 +05:30
/* Decrease the attached context count on the adapter */
cxl_adapter_context_put ( ctx - > afu - > adapter ) ;
2017-04-07 16:11:55 +02:00
/* Decrease the mm count on the context */
cxl_context_mm_count_put ( ctx ) ;
2017-09-03 20:15:13 +02:00
if ( ctx - > mm )
mm_context_remove_copro ( ctx - > mm ) ;
2017-04-07 16:11:55 +02:00
ctx - > mm = NULL ;
2015-05-27 16:07:08 +10:00
return 0 ;
2014-10-08 19:55:02 +11:00
}
/*
* Detach the given context from the AFU . This doesn ' t actually
* free the context but it should stop the context running in hardware
* ( ie . prevent this context from generating any further interrupts
* so that it can be freed ) .
*/
void cxl_context_detach ( struct cxl_context * ctx )
{
2015-05-27 16:07:08 +10:00
int rc ;
rc = __detach_context ( ctx ) ;
if ( rc )
return ;
afu_release_irqs ( ctx , ctx ) ;
wake_up_all ( & ctx - > wq ) ;
2014-10-08 19:55:02 +11:00
}
/*
* Detach all contexts on the given AFU .
*/
void cxl_context_detach_all ( struct cxl_afu * afu )
{
struct cxl_context * ctx ;
int tmp ;
2014-12-08 19:17:55 +11:00
mutex_lock ( & afu - > contexts_lock ) ;
idr_for_each_entry ( & afu - > contexts_idr , ctx , tmp ) {
2014-10-08 19:55:02 +11:00
/*
* Anything done in here needs to be setup before the IDR is
* created and torn down after the IDR removed
*/
2015-05-27 16:07:08 +10:00
cxl_context_detach ( ctx ) ;
2015-01-07 16:33:04 +11:00
/*
* We are force detaching - remove any active PSA mappings so
* userspace cannot interfere with the card if it comes back .
* Easiest way to exercise this is to unbind and rebind the
* driver via sysfs while it is in use .
*/
mutex_lock ( & ctx - > mapping_lock ) ;
if ( ctx - > mapping )
unmap_mapping_range ( ctx - > mapping , 0 , 0 , 1 ) ;
mutex_unlock ( & ctx - > mapping_lock ) ;
2014-12-08 19:17:55 +11:00
}
mutex_unlock ( & afu - > contexts_lock ) ;
2014-10-08 19:55:02 +11:00
}
2015-05-08 22:55:18 +10:00
static void reclaim_ctx ( struct rcu_head * rcu )
2014-10-08 19:55:02 +11:00
{
2015-05-08 22:55:18 +10:00
struct cxl_context * ctx = container_of ( rcu , struct cxl_context , rcu ) ;
2014-10-08 19:55:02 +11:00
2017-06-13 17:41:05 +02:00
if ( cxl_is_power8 ( ) )
2017-04-07 16:11:58 +02:00
free_page ( ( u64 ) ctx - > sstp ) ;
2015-07-23 16:43:56 +10:00
if ( ctx - > ff_page )
__free_page ( ctx - > ff_page ) ;
2014-10-08 19:55:02 +11:00
ctx - > sstp = NULL ;
2015-11-06 11:00:23 +01:00
kfree ( ctx - > irq_bitmap ) ;
2015-09-30 11:58:06 +10:00
2015-11-16 09:33:45 +05:30
/* Drop ref to the afu device taken during cxl_context_init */
cxl_afu_put ( ctx - > afu ) ;
2014-10-08 19:55:02 +11:00
kfree ( ctx ) ;
}
2015-05-08 22:55:18 +10:00
void cxl_context_free ( struct cxl_context * ctx )
{
2016-11-18 23:00:31 +11:00
if ( ctx - > kernelapi & & ctx - > mapping )
cxl_release_mapping ( ctx ) ;
2015-05-08 22:55:18 +10:00
mutex_lock ( & ctx - > afu - > contexts_lock ) ;
idr_remove ( & ctx - > afu - > contexts_idr , ctx - > pe ) ;
mutex_unlock ( & ctx - > afu - > contexts_lock ) ;
call_rcu ( & ctx - > rcu , reclaim_ctx ) ;
}
2017-04-07 16:11:55 +02:00
void cxl_context_mm_count_get ( struct cxl_context * ctx )
{
if ( ctx - > mm )
2019-12-29 16:42:55 +01:00
mmgrab ( ctx - > mm ) ;
2017-04-07 16:11:55 +02:00
}
void cxl_context_mm_count_put ( struct cxl_context * ctx )
{
if ( ctx - > mm )
mmdrop ( ctx - > mm ) ;
}