2018-01-23 12:31:42 +01:00
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
# include <linux/interrupt.h>
2019-03-27 16:31:35 +11:00
# include <asm/pnv-ocxl.h>
2018-01-23 12:31:42 +01:00
# include "ocxl_internal.h"
2018-01-23 12:31:44 +01:00
# include "trace.h"
2018-01-23 12:31:42 +01:00
struct afu_irq {
int id ;
int hw_irq ;
unsigned int virq ;
char * name ;
u64 trigger_page ;
2019-03-27 16:31:35 +11:00
irqreturn_t ( * handler ) ( void * private ) ;
void ( * free_private ) ( void * private ) ;
void * private ;
2018-01-23 12:31:42 +01:00
} ;
2019-03-27 16:31:34 +11:00
int ocxl_irq_offset_to_id ( struct ocxl_context * ctx , u64 offset )
2018-01-23 12:31:42 +01:00
{
return ( offset - ctx - > afu - > irq_base_offset ) > > PAGE_SHIFT ;
}
2019-03-27 16:31:34 +11:00
u64 ocxl_irq_id_to_offset ( struct ocxl_context * ctx , int irq_id )
2018-01-23 12:31:42 +01:00
{
2019-03-27 16:31:34 +11:00
return ctx - > afu - > irq_base_offset + ( irq_id < < PAGE_SHIFT ) ;
2018-01-23 12:31:42 +01:00
}
2019-03-27 16:31:35 +11:00
int ocxl_irq_set_handler ( struct ocxl_context * ctx , int irq_id ,
irqreturn_t ( * handler ) ( void * private ) ,
void ( * free_private ) ( void * private ) ,
void * private )
{
struct afu_irq * irq ;
int rc ;
mutex_lock ( & ctx - > irq_lock ) ;
irq = idr_find ( & ctx - > irq_idr , irq_id ) ;
if ( ! irq ) {
rc = - EINVAL ;
goto unlock ;
}
irq - > handler = handler ;
irq - > private = private ;
irq - > free_private = free_private ;
rc = 0 ;
// Fall through to unlock
unlock :
mutex_unlock ( & ctx - > irq_lock ) ;
return rc ;
}
EXPORT_SYMBOL_GPL ( ocxl_irq_set_handler ) ;
2018-01-23 12:31:42 +01:00
static irqreturn_t afu_irq_handler ( int virq , void * data )
{
struct afu_irq * irq = ( struct afu_irq * ) data ;
2018-01-23 12:31:44 +01:00
trace_ocxl_afu_irq_receive ( virq ) ;
2019-03-27 16:31:35 +11:00
if ( irq - > handler )
return irq - > handler ( irq - > private ) ;
return IRQ_HANDLED ; // Just drop it on the ground
2018-01-23 12:31:42 +01:00
}
static int setup_afu_irq ( struct ocxl_context * ctx , struct afu_irq * irq )
{
int rc ;
irq - > virq = irq_create_mapping ( NULL , irq - > hw_irq ) ;
if ( ! irq - > virq ) {
pr_err ( " irq_create_mapping failed \n " ) ;
return - ENOMEM ;
}
pr_debug ( " hw_irq %d mapped to virq %u \n " , irq - > hw_irq , irq - > virq ) ;
irq - > name = kasprintf ( GFP_KERNEL , " ocxl-afu-%u " , irq - > virq ) ;
if ( ! irq - > name ) {
irq_dispose_mapping ( irq - > virq ) ;
return - ENOMEM ;
}
rc = request_irq ( irq - > virq , afu_irq_handler , 0 , irq - > name , irq ) ;
if ( rc ) {
kfree ( irq - > name ) ;
irq - > name = NULL ;
irq_dispose_mapping ( irq - > virq ) ;
pr_err ( " request_irq failed: %d \n " , rc ) ;
return rc ;
}
return 0 ;
}
static void release_afu_irq ( struct afu_irq * irq )
{
free_irq ( irq - > virq , irq ) ;
irq_dispose_mapping ( irq - > virq ) ;
kfree ( irq - > name ) ;
}
2019-03-27 16:31:34 +11:00
int ocxl_afu_irq_alloc ( struct ocxl_context * ctx , int * irq_id )
2018-01-23 12:31:42 +01:00
{
struct afu_irq * irq ;
int rc ;
irq = kzalloc ( sizeof ( struct afu_irq ) , GFP_KERNEL ) ;
if ( ! irq )
return - ENOMEM ;
/*
* We limit the number of afu irqs per context and per link to
* avoid a single process or user depleting the pool of IPIs
*/
mutex_lock ( & ctx - > irq_lock ) ;
irq - > id = idr_alloc ( & ctx - > irq_idr , irq , 0 , MAX_IRQ_PER_CONTEXT ,
GFP_KERNEL ) ;
if ( irq - > id < 0 ) {
rc = - ENOSPC ;
goto err_unlock ;
}
rc = ocxl_link_irq_alloc ( ctx - > afu - > fn - > link , & irq - > hw_irq ,
& irq - > trigger_page ) ;
if ( rc )
goto err_idr ;
rc = setup_afu_irq ( ctx , irq ) ;
if ( rc )
goto err_alloc ;
2019-03-27 16:31:34 +11:00
trace_ocxl_afu_irq_alloc ( ctx - > pasid , irq - > id , irq - > virq , irq - > hw_irq ) ;
2018-01-23 12:31:42 +01:00
mutex_unlock ( & ctx - > irq_lock ) ;
2019-03-27 16:31:34 +11:00
* irq_id = irq - > id ;
2018-01-23 12:31:42 +01:00
return 0 ;
err_alloc :
ocxl_link_free_irq ( ctx - > afu - > fn - > link , irq - > hw_irq ) ;
err_idr :
idr_remove ( & ctx - > irq_idr , irq - > id ) ;
err_unlock :
mutex_unlock ( & ctx - > irq_lock ) ;
kfree ( irq ) ;
return rc ;
}
2019-03-27 16:31:35 +11:00
EXPORT_SYMBOL_GPL ( ocxl_afu_irq_alloc ) ;
2018-01-23 12:31:42 +01:00
static void afu_irq_free ( struct afu_irq * irq , struct ocxl_context * ctx )
{
2018-01-23 12:31:44 +01:00
trace_ocxl_afu_irq_free ( ctx - > pasid , irq - > id ) ;
2018-01-23 12:31:42 +01:00
if ( ctx - > mapping )
unmap_mapping_range ( ctx - > mapping ,
2019-03-27 16:31:34 +11:00
ocxl_irq_id_to_offset ( ctx , irq - > id ) ,
2018-01-23 12:31:42 +01:00
1 < < PAGE_SHIFT , 1 ) ;
release_afu_irq ( irq ) ;
2019-03-27 16:31:35 +11:00
if ( irq - > free_private )
irq - > free_private ( irq - > private ) ;
2018-01-23 12:31:42 +01:00
ocxl_link_free_irq ( ctx - > afu - > fn - > link , irq - > hw_irq ) ;
kfree ( irq ) ;
}
2019-03-27 16:31:34 +11:00
int ocxl_afu_irq_free ( struct ocxl_context * ctx , int irq_id )
2018-01-23 12:31:42 +01:00
{
struct afu_irq * irq ;
mutex_lock ( & ctx - > irq_lock ) ;
2019-03-27 16:31:34 +11:00
irq = idr_find ( & ctx - > irq_idr , irq_id ) ;
2018-01-23 12:31:42 +01:00
if ( ! irq ) {
mutex_unlock ( & ctx - > irq_lock ) ;
return - EINVAL ;
}
idr_remove ( & ctx - > irq_idr , irq - > id ) ;
afu_irq_free ( irq , ctx ) ;
mutex_unlock ( & ctx - > irq_lock ) ;
return 0 ;
}
2019-03-27 16:31:35 +11:00
EXPORT_SYMBOL_GPL ( ocxl_afu_irq_free ) ;
2018-01-23 12:31:42 +01:00
void ocxl_afu_irq_free_all ( struct ocxl_context * ctx )
{
struct afu_irq * irq ;
int id ;
mutex_lock ( & ctx - > irq_lock ) ;
idr_for_each_entry ( & ctx - > irq_idr , irq , id )
afu_irq_free ( irq , ctx ) ;
mutex_unlock ( & ctx - > irq_lock ) ;
}
2019-03-27 16:31:34 +11:00
u64 ocxl_afu_irq_get_addr ( struct ocxl_context * ctx , int irq_id )
2018-01-23 12:31:42 +01:00
{
struct afu_irq * irq ;
u64 addr = 0 ;
mutex_lock ( & ctx - > irq_lock ) ;
2019-03-27 16:31:34 +11:00
irq = idr_find ( & ctx - > irq_idr , irq_id ) ;
2018-01-23 12:31:42 +01:00
if ( irq )
addr = irq - > trigger_page ;
mutex_unlock ( & ctx - > irq_lock ) ;
return addr ;
}
2019-03-27 16:31:35 +11:00
EXPORT_SYMBOL_GPL ( ocxl_afu_irq_get_addr ) ;