2018-01-23 14:31:41 +03:00
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
# include <linux/sched/mm.h>
2018-01-23 14:31:44 +03:00
# include "trace.h"
2018-01-23 14:31:41 +03:00
# include "ocxl_internal.h"
2019-03-27 08:31:33 +03:00
int ocxl_context_alloc ( struct ocxl_context * * context , struct ocxl_afu * afu ,
2018-01-23 14:31:41 +03:00
struct address_space * mapping )
{
int pasid ;
2019-03-27 08:31:33 +03:00
struct ocxl_context * ctx ;
2019-12-09 13:55:13 +03:00
ctx = kzalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
2019-03-27 08:31:33 +03:00
return - ENOMEM ;
2018-01-23 14:31:41 +03:00
ctx - > afu = afu ;
mutex_lock ( & afu - > contexts_lock ) ;
pasid = idr_alloc ( & afu - > contexts_idr , ctx , afu - > pasid_base ,
afu - > pasid_base + afu - > pasid_max , GFP_KERNEL ) ;
if ( pasid < 0 ) {
mutex_unlock ( & afu - > contexts_lock ) ;
2019-12-09 13:55:13 +03:00
kfree ( ctx ) ;
2018-01-23 14:31:41 +03:00
return pasid ;
}
afu - > pasid_count + + ;
mutex_unlock ( & afu - > contexts_lock ) ;
ctx - > pasid = pasid ;
ctx - > status = OPENED ;
mutex_init ( & ctx - > status_mutex ) ;
ctx - > mapping = mapping ;
mutex_init ( & ctx - > mapping_lock ) ;
init_waitqueue_head ( & ctx - > events_wq ) ;
mutex_init ( & ctx - > xsl_error_lock ) ;
2018-01-23 14:31:42 +03:00
mutex_init ( & ctx - > irq_lock ) ;
idr_init ( & ctx - > irq_idr ) ;
2018-05-11 09:13:01 +03:00
ctx - > tidr = 0 ;
2018-01-23 14:31:41 +03:00
/*
* Keep a reference on the AFU to make sure it ' s valid for the
* duration of the life of the context
*/
ocxl_afu_get ( afu ) ;
2019-12-09 13:55:13 +03:00
* context = ctx ;
2018-01-23 14:31:41 +03:00
return 0 ;
}
2019-03-27 08:31:33 +03:00
EXPORT_SYMBOL_GPL ( ocxl_context_alloc ) ;
2018-01-23 14:31:41 +03:00
/*
* Callback for when a translation fault triggers an error
* data : a pointer to the context which triggered the fault
* addr : the address that triggered the error
* dsisr : the value of the PPC64 dsisr register
*/
static void xsl_fault_error ( void * data , u64 addr , u64 dsisr )
{
struct ocxl_context * ctx = ( struct ocxl_context * ) data ;
mutex_lock ( & ctx - > xsl_error_lock ) ;
ctx - > xsl_error . addr = addr ;
ctx - > xsl_error . dsisr = dsisr ;
ctx - > xsl_error . count + + ;
mutex_unlock ( & ctx - > xsl_error_lock ) ;
wake_up_all ( & ctx - > events_wq ) ;
}
2019-03-27 08:31:33 +03:00
int ocxl_context_attach ( struct ocxl_context * ctx , u64 amr , struct mm_struct * mm )
2018-01-23 14:31:41 +03:00
{
int rc ;
2019-06-20 07:12:01 +03:00
unsigned long pidr = 0 ;
2020-11-25 18:50:11 +03:00
struct pci_dev * dev ;
2018-01-23 14:31:41 +03:00
2018-05-11 09:13:01 +03:00
// Locks both status & tidr
2018-01-23 14:31:41 +03:00
mutex_lock ( & ctx - > status_mutex ) ;
if ( ctx - > status ! = OPENED ) {
rc = - EIO ;
goto out ;
}
2019-06-20 07:12:01 +03:00
if ( mm )
pidr = mm - > context . id ;
2020-11-25 18:50:11 +03:00
dev = to_pci_dev ( ctx - > afu - > fn - > dev . parent ) ;
2019-06-20 07:12:01 +03:00
rc = ocxl_link_add_pe ( ctx - > afu - > fn - > link , ctx - > pasid , pidr , ctx - > tidr ,
2020-11-25 18:50:11 +03:00
amr , pci_dev_id ( dev ) , mm , xsl_fault_error , ctx ) ;
2018-01-23 14:31:41 +03:00
if ( rc )
goto out ;
ctx - > status = ATTACHED ;
out :
mutex_unlock ( & ctx - > status_mutex ) ;
return rc ;
}
2019-03-27 08:31:33 +03:00
EXPORT_SYMBOL_GPL ( ocxl_context_attach ) ;
2018-01-23 14:31:41 +03:00
2018-06-11 23:29:04 +03:00
static vm_fault_t map_afu_irq ( struct vm_area_struct * vma , unsigned long address ,
2018-01-23 14:31:42 +03:00
u64 offset , struct ocxl_context * ctx )
{
u64 trigger_addr ;
2019-03-27 08:31:34 +03:00
int irq_id = ocxl_irq_offset_to_id ( ctx , offset ) ;
2018-01-23 14:31:42 +03:00
2019-03-27 08:31:34 +03:00
trigger_addr = ocxl_afu_irq_get_addr ( ctx , irq_id ) ;
2018-01-23 14:31:42 +03:00
if ( ! trigger_addr )
return VM_FAULT_SIGBUS ;
2018-06-11 23:29:04 +03:00
return vmf_insert_pfn ( vma , address , trigger_addr > > PAGE_SHIFT ) ;
2018-01-23 14:31:42 +03:00
}
2018-06-11 23:29:04 +03:00
static vm_fault_t map_pp_mmio ( struct vm_area_struct * vma , unsigned long address ,
2018-01-23 14:31:41 +03:00
u64 offset , struct ocxl_context * ctx )
{
u64 pp_mmio_addr ;
int pasid_off ;
2018-06-11 23:29:04 +03:00
vm_fault_t ret ;
2018-01-23 14:31:41 +03:00
if ( offset > = ctx - > afu - > config . pp_mmio_stride )
return VM_FAULT_SIGBUS ;
mutex_lock ( & ctx - > status_mutex ) ;
if ( ctx - > status ! = ATTACHED ) {
mutex_unlock ( & ctx - > status_mutex ) ;
pr_debug ( " %s: Context not attached, failing mmio mmap \n " ,
__func__ ) ;
return VM_FAULT_SIGBUS ;
}
pasid_off = ctx - > pasid - ctx - > afu - > pasid_base ;
pp_mmio_addr = ctx - > afu - > pp_mmio_start +
pasid_off * ctx - > afu - > config . pp_mmio_stride +
offset ;
2018-06-11 23:29:04 +03:00
ret = vmf_insert_pfn ( vma , address , pp_mmio_addr > > PAGE_SHIFT ) ;
2018-01-23 14:31:41 +03:00
mutex_unlock ( & ctx - > status_mutex ) ;
2018-06-11 23:29:04 +03:00
return ret ;
2018-01-23 14:31:41 +03:00
}
2018-06-11 23:29:04 +03:00
static vm_fault_t ocxl_mmap_fault ( struct vm_fault * vmf )
2018-01-23 14:31:41 +03:00
{
struct vm_area_struct * vma = vmf - > vma ;
struct ocxl_context * ctx = vma - > vm_file - > private_data ;
u64 offset ;
2018-06-11 23:29:04 +03:00
vm_fault_t ret ;
2018-01-23 14:31:41 +03:00
offset = vmf - > pgoff < < PAGE_SHIFT ;
pr_debug ( " %s: pasid %d address 0x%lx offset 0x%llx \n " , __func__ ,
ctx - > pasid , vmf - > address , offset ) ;
2018-01-23 14:31:42 +03:00
if ( offset < ctx - > afu - > irq_base_offset )
2018-06-11 23:29:04 +03:00
ret = map_pp_mmio ( vma , vmf - > address , offset , ctx ) ;
2018-01-23 14:31:42 +03:00
else
2018-06-11 23:29:04 +03:00
ret = map_afu_irq ( vma , vmf - > address , offset , ctx ) ;
return ret ;
2018-01-23 14:31:41 +03:00
}
static const struct vm_operations_struct ocxl_vmops = {
. fault = ocxl_mmap_fault ,
} ;
2018-01-23 14:31:42 +03:00
static int check_mmap_afu_irq ( struct ocxl_context * ctx ,
struct vm_area_struct * vma )
{
2019-03-27 08:31:34 +03:00
int irq_id = ocxl_irq_offset_to_id ( ctx , vma - > vm_pgoff < < PAGE_SHIFT ) ;
2018-01-23 14:31:42 +03:00
/* only one page */
if ( vma_pages ( vma ) ! = 1 )
return - EINVAL ;
/* check offset validty */
2019-03-27 08:31:34 +03:00
if ( ! ocxl_afu_irq_get_addr ( ctx , irq_id ) )
2018-01-23 14:31:42 +03:00
return - EINVAL ;
/*
* trigger page should only be accessible in write mode .
*
* It ' s a bit theoretical , as a page mmaped with only
* PROT_WRITE is currently readable , but it doesn ' t hurt .
*/
if ( ( vma - > vm_flags & VM_READ ) | | ( vma - > vm_flags & VM_EXEC ) | |
! ( vma - > vm_flags & VM_WRITE ) )
return - EINVAL ;
vma - > vm_flags & = ~ ( VM_MAYREAD | VM_MAYEXEC ) ;
return 0 ;
}
2018-01-23 14:31:41 +03:00
static int check_mmap_mmio ( struct ocxl_context * ctx ,
struct vm_area_struct * vma )
{
if ( ( vma_pages ( vma ) + vma - > vm_pgoff ) >
( ctx - > afu - > config . pp_mmio_stride > > PAGE_SHIFT ) )
return - EINVAL ;
return 0 ;
}
int ocxl_context_mmap ( struct ocxl_context * ctx , struct vm_area_struct * vma )
{
int rc ;
2018-01-23 14:31:42 +03:00
if ( ( vma - > vm_pgoff < < PAGE_SHIFT ) < ctx - > afu - > irq_base_offset )
rc = check_mmap_mmio ( ctx , vma ) ;
else
rc = check_mmap_afu_irq ( ctx , vma ) ;
2018-01-23 14:31:41 +03:00
if ( rc )
return rc ;
vma - > vm_flags | = VM_IO | VM_PFNMAP ;
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
vma - > vm_ops = & ocxl_vmops ;
return 0 ;
}
int ocxl_context_detach ( struct ocxl_context * ctx )
{
struct pci_dev * dev ;
int afu_control_pos ;
enum ocxl_context_status status ;
int rc ;
mutex_lock ( & ctx - > status_mutex ) ;
status = ctx - > status ;
ctx - > status = CLOSED ;
mutex_unlock ( & ctx - > status_mutex ) ;
if ( status ! = ATTACHED )
return 0 ;
dev = to_pci_dev ( ctx - > afu - > fn - > dev . parent ) ;
afu_control_pos = ctx - > afu - > config . dvsec_afu_control_pos ;
mutex_lock ( & ctx - > afu - > afu_control_lock ) ;
rc = ocxl_config_terminate_pasid ( dev , afu_control_pos , ctx - > pasid ) ;
mutex_unlock ( & ctx - > afu - > afu_control_lock ) ;
2018-01-23 14:31:44 +03:00
trace_ocxl_terminate_pasid ( ctx - > pasid , rc ) ;
2018-01-23 14:31:41 +03:00
if ( rc ) {
/*
* If we timeout waiting for the AFU to terminate the
* pasid , then it ' s dangerous to clean up the Process
* Element entry in the SPA , as it may be referenced
* in the future by the AFU . In which case , we would
* checkstop because of an invalid PE access ( FIR
* register 2 , bit 42 ) . So leave the PE
* defined . Caller shouldn ' t free the context so that
* PASID remains allocated .
*
* A link reset will be required to cleanup the AFU
* and the SPA .
*/
if ( rc = = - EBUSY )
return rc ;
}
rc = ocxl_link_remove_pe ( ctx - > afu - > fn - > link , ctx - > pasid ) ;
if ( rc ) {
2019-03-27 08:31:32 +03:00
dev_warn ( & dev - > dev ,
2018-01-23 14:31:41 +03:00
" Couldn't remove PE entry cleanly: %d \n " , rc ) ;
}
return 0 ;
}
2019-03-27 08:31:33 +03:00
EXPORT_SYMBOL_GPL ( ocxl_context_detach ) ;
2018-01-23 14:31:41 +03:00
void ocxl_context_detach_all ( struct ocxl_afu * afu )
{
struct ocxl_context * ctx ;
int tmp ;
mutex_lock ( & afu - > contexts_lock ) ;
idr_for_each_entry ( & afu - > contexts_idr , ctx , tmp ) {
ocxl_context_detach ( ctx ) ;
/*
* We are force detaching - remove any active mmio
* mappings so userspace cannot interfere with the
* card if it comes back . Easiest way to exercise
* this is to unbind and rebind the driver via sysfs
* while it is in use .
*/
mutex_lock ( & ctx - > mapping_lock ) ;
if ( ctx - > mapping )
unmap_mapping_range ( ctx - > mapping , 0 , 0 , 1 ) ;
mutex_unlock ( & ctx - > mapping_lock ) ;
}
mutex_unlock ( & afu - > contexts_lock ) ;
}
void ocxl_context_free ( struct ocxl_context * ctx )
{
mutex_lock ( & ctx - > afu - > contexts_lock ) ;
ctx - > afu - > pasid_count - - ;
idr_remove ( & ctx - > afu - > contexts_idr , ctx - > pasid ) ;
mutex_unlock ( & ctx - > afu - > contexts_lock ) ;
2018-01-23 14:31:42 +03:00
ocxl_afu_irq_free_all ( ctx ) ;
idr_destroy ( & ctx - > irq_idr ) ;
2020-02-26 07:39:23 +03:00
/* reference to the AFU taken in ocxl_context_alloc() */
2018-01-23 14:31:41 +03:00
ocxl_afu_put ( ctx - > afu ) ;
kfree ( ctx ) ;
}
2019-03-27 08:31:33 +03:00
EXPORT_SYMBOL_GPL ( ocxl_context_free ) ;