2014-10-08 19:55:02 +11:00
/*
* Copyright 2014 IBM Corp .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/bitmap.h>
# include <linux/sched.h>
# include <linux/pid.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/debugfs.h>
# include <linux/slab.h>
# include <linux/idr.h>
# include <asm/cputable.h>
# include <asm/current.h>
# include <asm/copro.h>
# include "cxl.h"
/*
* Allocates space for a CXL context .
*/
struct cxl_context * cxl_context_alloc ( void )
{
return kzalloc ( sizeof ( struct cxl_context ) , GFP_KERNEL ) ;
}
/*
* Initialises a CXL context .
*/
2014-12-08 19:18:01 +11:00
int cxl_context_init ( struct cxl_context * ctx , struct cxl_afu * afu , bool master ,
struct address_space * mapping )
2014-10-08 19:55:02 +11:00
{
int i ;
spin_lock_init ( & ctx - > sste_lock ) ;
ctx - > afu = afu ;
ctx - > master = master ;
ctx - > pid = NULL ; /* Set in start work ioctl */
2014-12-08 19:18:01 +11:00
mutex_init ( & ctx - > mapping_lock ) ;
ctx - > mapping = mapping ;
2014-10-08 19:55:02 +11:00
/*
* Allocate the segment table before we put it in the IDR so that we
* can always access it when dereferenced from IDR . For the same
* reason , the segment table is only destroyed after the context is
* removed from the IDR . Access to this in the IOCTL is protected by
* Linux filesytem symantics ( can ' t IOCTL until open is complete ) .
*/
i = cxl_alloc_sst ( ctx ) ;
if ( i )
return i ;
INIT_WORK ( & ctx - > fault_work , cxl_handle_fault ) ;
init_waitqueue_head ( & ctx - > wq ) ;
spin_lock_init ( & ctx - > lock ) ;
ctx - > irq_bitmap = NULL ;
ctx - > pending_irq = false ;
ctx - > pending_fault = false ;
ctx - > pending_afu_err = false ;
/*
* When we have to destroy all contexts in cxl_context_detach_all ( ) we
* end up with afu_release_irqs ( ) called from inside a
* idr_for_each_entry ( ) . Hence we need to make sure that anything
* dereferenced from this IDR is ok before we allocate the IDR here .
* This clears out the IRQ ranges to ensure this .
*/
for ( i = 0 ; i < CXL_IRQ_RANGES ; i + + )
ctx - > irqs . range [ i ] = 0 ;
mutex_init ( & ctx - > status_mutex ) ;
ctx - > status = OPENED ;
/*
* Allocating IDR ! We better make sure everything ' s setup that
* dereferences from it .
*/
2014-12-08 19:17:55 +11:00
mutex_lock ( & afu - > contexts_lock ) ;
2014-10-08 19:55:02 +11:00
idr_preload ( GFP_KERNEL ) ;
i = idr_alloc ( & ctx - > afu - > contexts_idr , ctx , 0 ,
ctx - > afu - > num_procs , GFP_NOWAIT ) ;
idr_preload_end ( ) ;
2014-12-08 19:17:55 +11:00
mutex_unlock ( & afu - > contexts_lock ) ;
2014-10-08 19:55:02 +11:00
if ( i < 0 )
return i ;
ctx - > pe = i ;
ctx - > elem = & ctx - > afu - > spa [ i ] ;
ctx - > pe_inserted = false ;
return 0 ;
}
2015-01-07 16:33:04 +11:00
static int cxl_mmap_fault ( struct vm_area_struct * vma , struct vm_fault * vmf )
{
struct cxl_context * ctx = vma - > vm_file - > private_data ;
unsigned long address = ( unsigned long ) vmf - > virtual_address ;
u64 area , offset ;
offset = vmf - > pgoff < < PAGE_SHIFT ;
pr_devel ( " %s: pe: %i address: 0x%lx offset: 0x%llx \n " ,
__func__ , ctx - > pe , address , offset ) ;
if ( ctx - > afu - > current_mode = = CXL_MODE_DEDICATED ) {
area = ctx - > afu - > psn_phys ;
if ( offset > ctx - > afu - > adapter - > ps_size )
return VM_FAULT_SIGBUS ;
} else {
area = ctx - > psn_phys ;
if ( offset > ctx - > psn_size )
return VM_FAULT_SIGBUS ;
}
mutex_lock ( & ctx - > status_mutex ) ;
if ( ctx - > status ! = STARTED ) {
mutex_unlock ( & ctx - > status_mutex ) ;
pr_devel ( " %s: Context not started, failing problem state access \n " , __func__ ) ;
return VM_FAULT_SIGBUS ;
}
vm_insert_pfn ( vma , address , ( area + offset ) > > PAGE_SHIFT ) ;
mutex_unlock ( & ctx - > status_mutex ) ;
return VM_FAULT_NOPAGE ;
}
static const struct vm_operations_struct cxl_mmap_vmops = {
. fault = cxl_mmap_fault ,
} ;
2014-10-08 19:55:02 +11:00
/*
* Map a per - context mmio space into the given vma .
*/
int cxl_context_iomap ( struct cxl_context * ctx , struct vm_area_struct * vma )
{
u64 len = vma - > vm_end - vma - > vm_start ;
len = min ( len , ctx - > psn_size ) ;
2015-01-07 16:33:04 +11:00
if ( ctx - > afu - > current_mode ! = CXL_MODE_DEDICATED ) {
/* make sure there is a valid per process space for this AFU */
if ( ( ctx - > master & & ! ctx - > afu - > psa ) | | ( ! ctx - > afu - > pp_psa ) ) {
pr_devel ( " AFU doesn't support mmio space \n " ) ;
return - EINVAL ;
}
2014-10-08 19:55:02 +11:00
2015-01-07 16:33:04 +11:00
/* Can't mmap until the AFU is enabled */
if ( ! ctx - > afu - > enabled )
return - EBUSY ;
2014-10-08 19:55:02 +11:00
}
pr_devel ( " %s: mmio physical: %llx pe: %i master:%i \n " , __func__ ,
ctx - > psn_phys , ctx - > pe , ctx - > master ) ;
2015-01-07 16:33:04 +11:00
vma - > vm_flags | = VM_IO | VM_PFNMAP ;
2014-10-08 19:55:02 +11:00
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
2015-01-07 16:33:04 +11:00
vma - > vm_ops = & cxl_mmap_vmops ;
return 0 ;
2014-10-08 19:55:02 +11:00
}
/*
* Detach a context from the hardware . This disables interrupts and doesn ' t
* return until all outstanding interrupts for this context have completed . The
* hardware should no longer access * ctx after this has returned .
*/
static void __detach_context ( struct cxl_context * ctx )
{
enum cxl_context_status status ;
mutex_lock ( & ctx - > status_mutex ) ;
status = ctx - > status ;
ctx - > status = CLOSED ;
mutex_unlock ( & ctx - > status_mutex ) ;
if ( status ! = STARTED )
return ;
WARN_ON ( cxl_detach_process ( ctx ) ) ;
2015-05-27 16:07:07 +10:00
afu_release_irqs ( ctx , ctx ) ;
2014-10-08 19:55:02 +11:00
flush_work ( & ctx - > fault_work ) ; /* Only needed for dedicated process */
wake_up_all ( & ctx - > wq ) ;
}
/*
* Detach the given context from the AFU . This doesn ' t actually
* free the context but it should stop the context running in hardware
* ( ie . prevent this context from generating any further interrupts
* so that it can be freed ) .
*/
void cxl_context_detach ( struct cxl_context * ctx )
{
__detach_context ( ctx ) ;
}
/*
* Detach all contexts on the given AFU .
*/
void cxl_context_detach_all ( struct cxl_afu * afu )
{
struct cxl_context * ctx ;
int tmp ;
2014-12-08 19:17:55 +11:00
mutex_lock ( & afu - > contexts_lock ) ;
idr_for_each_entry ( & afu - > contexts_idr , ctx , tmp ) {
2014-10-08 19:55:02 +11:00
/*
* Anything done in here needs to be setup before the IDR is
* created and torn down after the IDR removed
*/
__detach_context ( ctx ) ;
2015-01-07 16:33:04 +11:00
/*
* We are force detaching - remove any active PSA mappings so
* userspace cannot interfere with the card if it comes back .
* Easiest way to exercise this is to unbind and rebind the
* driver via sysfs while it is in use .
*/
mutex_lock ( & ctx - > mapping_lock ) ;
if ( ctx - > mapping )
unmap_mapping_range ( ctx - > mapping , 0 , 0 , 1 ) ;
mutex_unlock ( & ctx - > mapping_lock ) ;
2014-12-08 19:17:55 +11:00
}
mutex_unlock ( & afu - > contexts_lock ) ;
2014-10-08 19:55:02 +11:00
}
2015-05-08 22:55:18 +10:00
static void reclaim_ctx ( struct rcu_head * rcu )
2014-10-08 19:55:02 +11:00
{
2015-05-08 22:55:18 +10:00
struct cxl_context * ctx = container_of ( rcu , struct cxl_context , rcu ) ;
2014-10-08 19:55:02 +11:00
free_page ( ( u64 ) ctx - > sstp ) ;
ctx - > sstp = NULL ;
put_pid ( ctx - > pid ) ;
kfree ( ctx ) ;
}
2015-05-08 22:55:18 +10:00
void cxl_context_free ( struct cxl_context * ctx )
{
mutex_lock ( & ctx - > afu - > contexts_lock ) ;
idr_remove ( & ctx - > afu - > contexts_idr , ctx - > pe ) ;
mutex_unlock ( & ctx - > afu - > contexts_lock ) ;
call_rcu ( & ctx - > rcu , reclaim_ctx ) ;
}