2019-05-19 16:51:48 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/*
* Adaptec AAC series RAID controller driver
2008-10-27 18:16:36 +03:00
* ( c ) Copyright 2001 Red Hat Inc .
2005-04-17 02:20:36 +04:00
*
* based on the old aacraid driver that is . .
* Adaptec aacraid device driver for Linux .
*
2011-03-17 12:10:32 +03:00
* Copyright ( c ) 2000 - 2010 Adaptec , Inc .
2017-02-03 02:53:36 +03:00
* 2010 - 2015 PMC - Sierra , Inc . ( aacraid @ pmc - sierra . com )
* 2016 - 2017 Microsemi Corp . ( aacraid @ microsemi . com )
2005-04-17 02:20:36 +04:00
*
* Module Name :
* dpcsup . c
*
* Abstract : All DPC processing routines for the cyclone board occur here .
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/types.h>
# include <linux/spinlock.h>
# include <linux/slab.h>
# include <linux/completion.h>
# include <linux/blkdev.h>
# include "aacraid.h"
/**
* aac_response_normal - Handle command replies
* @ q : Queue to read from
*
* This DPC routine will be run when the adapter interrupts us to let us
* know there is a response on our normal priority queue . We will pull off
* all QE there are and wake up all the waiters before exiting . We will
* take a spinlock out on the queue before operating on it .
*/
unsigned int aac_response_normal ( struct aac_queue * q )
{
struct aac_dev * dev = q - > dev ;
struct aac_entry * entry ;
struct hw_fib * hwfib ;
struct fib * fib ;
int consumed = 0 ;
2009-12-21 16:09:27 +03:00
unsigned long flags , mflags ;
2005-04-17 02:20:36 +04:00
2009-12-21 16:09:27 +03:00
spin_lock_irqsave ( q - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs . We then return
2018-08-07 08:14:29 +03:00
* back to the system . If no response was requested we just
2005-04-17 02:20:36 +04:00
* deallocate the Fib here and continue .
*/
while ( aac_consumer_get ( dev , q , & entry ) )
{
int fast ;
u32 index = le32_to_cpu ( entry - > addr ) ;
fast = index & 0x01 ;
2005-10-24 21:52:22 +04:00
fib = & dev - > fibs [ index > > 2 ] ;
2007-03-15 20:26:22 +03:00
hwfib = fib - > hw_fib_va ;
2005-04-17 02:20:36 +04:00
aac_consumer_free ( dev , q , HostNormRespQueue ) ;
/*
* Remove this fib from the Outstanding I / O queue .
* But only if it has not already been timed out .
*
* If the fib has been timed out already , then just
* continue . The caller has already been notified that
* the fib timed out .
*/
2015-03-26 17:41:30 +03:00
atomic_dec ( & dev - > queues - > queue [ AdapNormCmdQueue ] . numpending ) ;
2007-03-15 20:27:45 +03:00
if ( unlikely ( fib - > flags & FIB_CONTEXT_FLAG_TIMED_OUT ) ) {
spin_unlock_irqrestore ( q - > lock , flags ) ;
aac_fib_complete ( fib ) ;
aac_fib_free ( fib ) ;
spin_lock_irqsave ( q - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
continue ;
}
spin_unlock_irqrestore ( q - > lock , flags ) ;
if ( fast ) {
/*
* Doctor the fib
*/
2005-04-27 17:05:51 +04:00
* ( __le32 * ) hwfib - > data = cpu_to_le32 ( ST_OK ) ;
2005-04-17 02:20:36 +04:00
hwfib - > header . XferState | = cpu_to_le32 ( AdapterProcessed ) ;
2012-07-14 16:48:51 +04:00
fib - > flags | = FIB_CONTEXT_FLAG_FASTRESP ;
2005-04-17 02:20:36 +04:00
}
FIB_COUNTER_INCREMENT ( aac_config . FibRecved ) ;
if ( hwfib - > header . Command = = cpu_to_le16 ( NuFileSystem ) )
{
2005-04-27 17:05:51 +04:00
__le32 * pstatus = ( __le32 * ) hwfib - > data ;
2005-04-17 02:20:36 +04:00
if ( * pstatus & cpu_to_le32 ( 0xffff0000 ) )
* pstatus = cpu_to_le32 ( ST_OK ) ;
}
if ( hwfib - > header . XferState & cpu_to_le32 ( NoResponseExpected | Async ) )
{
if ( hwfib - > header . XferState & cpu_to_le32 ( NoResponseExpected ) )
FIB_COUNTER_INCREMENT ( aac_config . NoResponseRecved ) ;
else
FIB_COUNTER_INCREMENT ( aac_config . AsyncRecved ) ;
/*
* NOTE : we cannot touch the fib after this
* call , because it may have been deallocated .
*/
fib - > callback ( fib - > callback_data , fib ) ;
} else {
unsigned long flagv ;
spin_lock_irqsave ( & fib - > event_lock , flagv ) ;
2009-12-21 16:09:27 +03:00
if ( ! fib - > done ) {
2006-08-03 19:02:24 +04:00
fib - > done = 1 ;
2018-12-11 00:32:41 +03:00
complete ( & fib - > event_wait ) ;
2009-12-21 16:09:27 +03:00
}
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & fib - > event_lock , flagv ) ;
2009-12-21 16:09:27 +03:00
spin_lock_irqsave ( & dev - > manage_lock , mflags ) ;
dev - > management_fib_count - - ;
spin_unlock_irqrestore ( & dev - > manage_lock , mflags ) ;
2005-04-17 02:20:36 +04:00
FIB_COUNTER_INCREMENT ( aac_config . NormalRecved ) ;
2006-08-03 19:02:24 +04:00
if ( fib - > done = = 2 ) {
2009-12-21 16:09:27 +03:00
spin_lock_irqsave ( & fib - > event_lock , flagv ) ;
fib - > done = 0 ;
spin_unlock_irqrestore ( & fib - > event_lock , flagv ) ;
2006-08-03 19:02:24 +04:00
aac_fib_complete ( fib ) ;
aac_fib_free ( fib ) ;
}
2005-04-17 02:20:36 +04:00
}
consumed + + ;
spin_lock_irqsave ( q - > lock , flags ) ;
}
if ( consumed > aac_config . peak_fibs )
aac_config . peak_fibs = consumed ;
if ( consumed = = 0 )
aac_config . zero_fibs + + ;
spin_unlock_irqrestore ( q - > lock , flags ) ;
return 0 ;
}
/**
* aac_command_normal - handle commands
* @ q : queue to process
*
* This DPC routine will be queued when the adapter interrupts us to
* let us know there is a command on our normal priority queue . We will
* pull off all QE there are and wake up all the waiters before exiting .
* We will take a spinlock out on the queue before operating on it .
*/
unsigned int aac_command_normal ( struct aac_queue * q )
{
struct aac_dev * dev = q - > dev ;
struct aac_entry * entry ;
unsigned long flags ;
spin_lock_irqsave ( q - > lock , flags ) ;
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs . We then return
* back to the system .
*/
while ( aac_consumer_get ( dev , q , & entry ) )
{
struct fib fibctx ;
struct hw_fib * hw_fib ;
u32 index ;
struct fib * fib = & fibctx ;
index = le32_to_cpu ( entry - > addr ) / sizeof ( struct hw_fib ) ;
hw_fib = & dev - > aif_base_va [ index ] ;
/*
* Allocate a FIB at all costs . For non queued stuff
* we can just use the stack so we are happy . We need
* a fib object in order to manage the linked lists
*/
if ( dev - > aif_thread )
if ( ( fib = kmalloc ( sizeof ( struct fib ) , GFP_ATOMIC ) ) = = NULL )
fib = & fibctx ;
memset ( fib , 0 , sizeof ( struct fib ) ) ;
INIT_LIST_HEAD ( & fib - > fiblink ) ;
fib - > type = FSAFS_NTC_FIB_CONTEXT ;
fib - > size = sizeof ( struct fib ) ;
2007-03-15 20:26:22 +03:00
fib - > hw_fib_va = hw_fib ;
2005-04-17 02:20:36 +04:00
fib - > data = hw_fib - > data ;
fib - > dev = dev ;
if ( dev - > aif_thread & & fib ! = & fibctx ) {
list_add_tail ( & fib - > fiblink , & q - > cmdq ) ;
aac_consumer_free ( dev , q , HostNormCmdQueue ) ;
wake_up_interruptible ( & q - > cmdready ) ;
} else {
aac_consumer_free ( dev , q , HostNormCmdQueue ) ;
spin_unlock_irqrestore ( q - > lock , flags ) ;
/*
* Set the status of this FIB
*/
2005-04-27 17:05:51 +04:00
* ( __le32 * ) hw_fib - > data = cpu_to_le32 ( ST_OK ) ;
2006-02-01 20:30:55 +03:00
aac_fib_adapter_complete ( fib , sizeof ( u32 ) ) ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( q - > lock , flags ) ;
}
}
spin_unlock_irqrestore ( q - > lock , flags ) ;
return 0 ;
}
2005-10-24 21:52:22 +04:00
2011-03-17 12:10:32 +03:00
/*
*
* aac_aif_callback
* @ context : the context set in the fib - here it is scsi cmd
* @ fibptr : pointer to the fib
*
* Handles the AIFs - new method ( SRC )
*
*/
static void aac_aif_callback ( void * context , struct fib * fibptr )
{
struct fib * fibctx ;
struct aac_dev * dev ;
struct aac_aifcmd * cmd ;
int status ;
fibctx = ( struct fib * ) context ;
BUG_ON ( fibptr = = NULL ) ;
dev = fibptr - > dev ;
2017-02-03 02:53:22 +03:00
if ( ( fibptr - > hw_fib_va - > header . XferState &
cpu_to_le32 ( NoMoreAifDataAvailable ) ) | |
dev - > sa_firmware ) {
2011-03-17 12:10:32 +03:00
aac_fib_complete ( fibptr ) ;
aac_fib_free ( fibptr ) ;
return ;
}
aac_intr_normal ( dev , 0 , 1 , 0 , fibptr - > hw_fib_va ) ;
aac_fib_init ( fibctx ) ;
cmd = ( struct aac_aifcmd * ) fib_data ( fibctx ) ;
cmd - > command = cpu_to_le32 ( AifReqEvent ) ;
status = aac_fib_send ( AifRequest ,
fibctx ,
sizeof ( struct hw_fib ) - sizeof ( struct aac_fibhdr ) ,
FsaNormal ,
0 , 1 ,
( fib_callback ) aac_aif_callback , fibctx ) ;
}
2005-10-24 21:52:22 +04:00
/**
* aac_intr_normal - Handle command replies
* @ dev : Device
* @ index : completion reference
*
* This DPC routine will be run when the adapter interrupts us to let us
* know there is a response on our normal priority queue . We will pull off
* all QE there are and wake up all the waiters before exiting .
*/
2017-02-03 02:53:22 +03:00
unsigned int aac_intr_normal ( struct aac_dev * dev , u32 index , int isAif ,
int isFastResponse , struct hw_fib * aif_fib )
2005-10-24 21:52:22 +04:00
{
2009-12-21 16:09:27 +03:00
unsigned long mflags ;
2007-11-08 20:27:47 +03:00
dprintk ( ( KERN_INFO " aac_intr_normal(%p,%x) \n " , dev , index ) ) ;
2011-03-17 12:10:32 +03:00
if ( isAif = = 1 ) { /* AIF - common */
2005-10-24 21:52:22 +04:00
struct hw_fib * hw_fib ;
struct fib * fib ;
struct aac_queue * q = & dev - > queues - > queue [ HostNormCmdQueue ] ;
unsigned long flags ;
/*
* Allocate a FIB . For non queued stuff we can just use
* the stack so we are happy . We need a fib object in order to
* manage the linked lists .
*/
if ( ( ! dev - > aif_thread )
2007-04-16 19:21:50 +04:00
| | ( ! ( fib = kzalloc ( sizeof ( struct fib ) , GFP_ATOMIC ) ) ) )
2005-10-24 21:52:22 +04:00
return 1 ;
2007-04-16 19:21:50 +04:00
if ( ! ( hw_fib = kzalloc ( sizeof ( struct hw_fib ) , GFP_ATOMIC ) ) ) {
2005-10-24 21:52:22 +04:00
kfree ( fib ) ;
return 1 ;
}
2017-02-03 02:53:22 +03:00
if ( dev - > sa_firmware ) {
fib - > hbacmd_size = index ; /* store event type */
} else if ( aif_fib ! = NULL ) {
2011-03-17 12:10:32 +03:00
memcpy ( hw_fib , aif_fib , sizeof ( struct hw_fib ) ) ;
} else {
2017-02-03 02:53:22 +03:00
memcpy ( hw_fib , ( struct hw_fib * )
( ( ( uintptr_t ) ( dev - > regs . sa ) ) + index ) ,
sizeof ( struct hw_fib ) ) ;
2011-03-17 12:10:32 +03:00
}
2005-10-24 21:52:22 +04:00
INIT_LIST_HEAD ( & fib - > fiblink ) ;
fib - > type = FSAFS_NTC_FIB_CONTEXT ;
fib - > size = sizeof ( struct fib ) ;
2007-03-15 20:26:22 +03:00
fib - > hw_fib_va = hw_fib ;
2005-10-24 21:52:22 +04:00
fib - > data = hw_fib - > data ;
fib - > dev = dev ;
spin_lock_irqsave ( q - > lock , flags ) ;
list_add_tail ( & fib - > fiblink , & q - > cmdq ) ;
wake_up_interruptible ( & q - > cmdready ) ;
spin_unlock_irqrestore ( q - > lock , flags ) ;
return 1 ;
2011-03-17 12:10:32 +03:00
} else if ( isAif = = 2 ) { /* AIF - new (SRC) */
struct fib * fibctx ;
struct aac_aifcmd * cmd ;
fibctx = aac_fib_alloc ( dev ) ;
if ( ! fibctx )
return 1 ;
aac_fib_init ( fibctx ) ;
cmd = ( struct aac_aifcmd * ) fib_data ( fibctx ) ;
cmd - > command = cpu_to_le32 ( AifReqEvent ) ;
return aac_fib_send ( AifRequest ,
fibctx ,
sizeof ( struct hw_fib ) - sizeof ( struct aac_fibhdr ) ,
FsaNormal ,
0 , 1 ,
( fib_callback ) aac_aif_callback , fibctx ) ;
2005-10-24 21:52:22 +04:00
} else {
2011-03-17 12:10:32 +03:00
struct fib * fib = & dev - > fibs [ index ] ;
2017-02-03 02:53:29 +03:00
int start_callback = 0 ;
2005-10-24 21:52:22 +04:00
/*
* Remove this fib from the Outstanding I / O queue .
* But only if it has not already been timed out .
*
* If the fib has been timed out already , then just
* continue . The caller has already been notified that
* the fib timed out .
*/
2015-03-26 17:41:30 +03:00
atomic_dec ( & dev - > queues - > queue [ AdapNormCmdQueue ] . numpending ) ;
2007-03-15 20:27:45 +03:00
if ( unlikely ( fib - > flags & FIB_CONTEXT_FLAG_TIMED_OUT ) ) {
aac_fib_complete ( fib ) ;
aac_fib_free ( fib ) ;
2005-10-24 21:52:22 +04:00
return 0 ;
}
FIB_COUNTER_INCREMENT ( aac_config . FibRecved ) ;
2017-02-03 02:53:29 +03:00
if ( fib - > flags & FIB_CONTEXT_FLAG_NATIVE_HBA ) {
if ( isFastResponse )
fib - > flags | = FIB_CONTEXT_FLAG_FASTRESP ;
if ( fib - > callback ) {
start_callback = 1 ;
} else {
unsigned long flagv ;
2018-12-11 00:32:41 +03:00
int completed = 0 ;
2017-02-03 02:53:29 +03:00
dprintk ( ( KERN_INFO " event_wait up \n " ) ) ;
spin_lock_irqsave ( & fib - > event_lock , flagv ) ;
if ( fib - > done = = 2 ) {
fib - > done = 1 ;
2018-12-11 00:32:41 +03:00
completed = 1 ;
2017-02-03 02:53:29 +03:00
} else {
fib - > done = 1 ;
2018-12-11 00:32:41 +03:00
complete ( & fib - > event_wait ) ;
2017-02-03 02:53:29 +03:00
}
spin_unlock_irqrestore ( & fib - > event_lock , flagv ) ;
spin_lock_irqsave ( & dev - > manage_lock , mflags ) ;
dev - > management_fib_count - - ;
spin_unlock_irqrestore ( & dev - > manage_lock ,
mflags ) ;
FIB_COUNTER_INCREMENT ( aac_config . NativeRecved ) ;
2018-12-11 00:32:41 +03:00
if ( completed )
2017-02-03 02:53:29 +03:00
aac_fib_complete ( fib ) ;
}
2005-10-24 21:52:22 +04:00
} else {
2017-02-03 02:53:29 +03:00
struct hw_fib * hwfib = fib - > hw_fib_va ;
if ( isFastResponse ) {
/* Doctor the fib */
* ( __le32 * ) hwfib - > data = cpu_to_le32 ( ST_OK ) ;
hwfib - > header . XferState | =
cpu_to_le32 ( AdapterProcessed ) ;
fib - > flags | = FIB_CONTEXT_FLAG_FASTRESP ;
2009-12-21 16:09:27 +03:00
}
2017-02-03 02:53:29 +03:00
if ( hwfib - > header . Command = =
cpu_to_le16 ( NuFileSystem ) ) {
__le32 * pstatus = ( __le32 * ) hwfib - > data ;
2009-12-21 16:09:27 +03:00
2017-02-03 02:53:29 +03:00
if ( * pstatus & cpu_to_le32 ( 0xffff0000 ) )
* pstatus = cpu_to_le32 ( ST_OK ) ;
}
if ( hwfib - > header . XferState &
cpu_to_le32 ( NoResponseExpected | Async ) ) {
if ( hwfib - > header . XferState & cpu_to_le32 (
NoResponseExpected ) )
FIB_COUNTER_INCREMENT (
aac_config . NoResponseRecved ) ;
else
FIB_COUNTER_INCREMENT (
aac_config . AsyncRecved ) ;
start_callback = 1 ;
} else {
unsigned long flagv ;
2018-12-11 00:32:41 +03:00
int completed = 0 ;
2017-02-03 02:53:29 +03:00
dprintk ( ( KERN_INFO " event_wait up \n " ) ) ;
2009-12-21 16:09:27 +03:00
spin_lock_irqsave ( & fib - > event_lock , flagv ) ;
2017-02-03 02:53:29 +03:00
if ( fib - > done = = 2 ) {
fib - > done = 1 ;
2018-12-11 00:32:41 +03:00
completed = 1 ;
2017-02-03 02:53:29 +03:00
} else {
fib - > done = 1 ;
2018-12-11 00:32:41 +03:00
complete ( & fib - > event_wait ) ;
2017-02-03 02:53:29 +03:00
}
2009-12-21 16:09:27 +03:00
spin_unlock_irqrestore ( & fib - > event_lock , flagv ) ;
2017-02-03 02:53:29 +03:00
spin_lock_irqsave ( & dev - > manage_lock , mflags ) ;
dev - > management_fib_count - - ;
spin_unlock_irqrestore ( & dev - > manage_lock ,
mflags ) ;
FIB_COUNTER_INCREMENT ( aac_config . NormalRecved ) ;
2018-12-11 00:32:41 +03:00
if ( completed )
2017-02-03 02:53:29 +03:00
aac_fib_complete ( fib ) ;
}
}
if ( start_callback ) {
/*
* NOTE : we cannot touch the fib after this
* call , because it may have been deallocated .
*/
if ( likely ( fib - > callback & & fib - > callback_data ) ) {
fib - > callback ( fib - > callback_data , fib ) ;
} else {
2009-12-21 16:09:27 +03:00
aac_fib_complete ( fib ) ;
2017-02-03 02:53:29 +03:00
aac_fib_free ( fib ) ;
2009-12-21 16:09:27 +03:00
}
2005-10-24 21:52:22 +04:00
}
return 0 ;
}
}