2005-04-17 02:20:36 +04:00
/*
* Adaptec AAC series RAID controller driver
2008-10-27 18:16:36 +03:00
* ( c ) Copyright 2001 Red Hat Inc .
2005-04-17 02:20:36 +04:00
*
* based on the old aacraid driver that is . .
* Adaptec aacraid device driver for Linux .
*
2011-03-17 12:10:32 +03:00
* Copyright ( c ) 2000 - 2010 Adaptec , Inc .
* 2010 PMC - Sierra , Inc . ( aacraid @ pmc - sierra . com )
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING . If not , write to
* the Free Software Foundation , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*
* Module Name :
* commsup . c
*
* Abstract : Contain all routines that are required for FSA host / adapter
2005-05-17 05:28:42 +04:00
* communication .
2005-04-17 02:20:36 +04:00
*
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/types.h>
# include <linux/sched.h>
# include <linux/pci.h>
# include <linux/spinlock.h>
# include <linux/slab.h>
# include <linux/completion.h>
# include <linux/blkdev.h>
2005-12-01 07:47:05 +03:00
# include <linux/delay.h>
2006-02-14 20:45:06 +03:00
# include <linux/kthread.h>
2006-09-25 02:45:29 +04:00
# include <linux/interrupt.h>
2008-04-19 06:21:05 +04:00
# include <linux/semaphore.h>
2006-08-03 19:03:30 +04:00
# include <scsi/scsi.h>
2005-05-17 05:28:42 +04:00
# include <scsi/scsi_host.h>
2005-09-27 00:04:56 +04:00
# include <scsi/scsi_device.h>
2006-08-03 19:03:30 +04:00
# include <scsi/scsi_cmnd.h>
2005-04-17 02:20:36 +04:00
# include "aacraid.h"
/**
* fib_map_alloc - allocate the fib objects
* @ dev : Adapter to allocate for
*
* Allocate and map the shared PCI space for the FIB blocks used to
* talk to the Adaptec firmware .
*/
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
static int fib_map_alloc ( struct aac_dev * dev )
{
2005-05-17 05:28:42 +04:00
dprintk ( ( KERN_INFO
" allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p) \n " ,
dev - > pdev , dev - > max_fib_size , dev - > scsi_host_ptr - > can_queue ,
AAC_NUM_MGT_FIB , & dev - > hw_fib_pa ) ) ;
2011-03-17 12:10:32 +03:00
dev - > hw_fib_va = pci_alloc_consistent ( dev - > pdev ,
( dev - > max_fib_size + sizeof ( struct aac_fib_xporthdr ) )
* ( dev - > scsi_host_ptr - > can_queue + AAC_NUM_MGT_FIB ) + ( ALIGN32 - 1 ) ,
& dev - > hw_fib_pa ) ;
if ( dev - > hw_fib_va = = NULL )
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
return 0 ;
}
/**
2006-02-01 20:30:55 +03:00
* aac_fib_map_free - free the fib objects
2005-04-17 02:20:36 +04:00
* @ dev : Adapter to free
*
* Free the PCI mappings and the memory allocated for FIB blocks
* on this adapter .
*/
2006-02-01 20:30:55 +03:00
void aac_fib_map_free ( struct aac_dev * dev )
2005-04-17 02:20:36 +04:00
{
2007-07-17 19:15:08 +04:00
pci_free_consistent ( dev - > pdev ,
dev - > max_fib_size * ( dev - > scsi_host_ptr - > can_queue + AAC_NUM_MGT_FIB ) ,
dev - > hw_fib_va , dev - > hw_fib_pa ) ;
dev - > hw_fib_va = NULL ;
dev - > hw_fib_pa = 0 ;
2005-04-17 02:20:36 +04:00
}
/**
2006-02-01 20:30:55 +03:00
* aac_fib_setup - setup the fibs
2005-04-17 02:20:36 +04:00
* @ dev : Adapter to set up
*
tree-wide: fix comment/printk typos
"gadget", "through", "command", "maintain", "maintain", "controller", "address",
"between", "initiali[zs]e", "instead", "function", "select", "already",
"equal", "access", "management", "hierarchy", "registration", "interest",
"relative", "memory", "offset", "already",
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2010-11-01 22:38:34 +03:00
* Allocate the PCI space for the fibs , map it and then initialise the
2005-04-17 02:20:36 +04:00
* fib area , the unmapped fib data and also the free list
*/
2006-02-01 20:30:55 +03:00
int aac_fib_setup ( struct aac_dev * dev )
2005-04-17 02:20:36 +04:00
{
struct fib * fibptr ;
2007-03-15 20:26:22 +03:00
struct hw_fib * hw_fib ;
2005-04-17 02:20:36 +04:00
dma_addr_t hw_fib_pa ;
int i ;
2005-05-17 05:28:42 +04:00
while ( ( ( i = fib_map_alloc ( dev ) ) = = - ENOMEM )
& & ( dev - > scsi_host_ptr - > can_queue > ( 64 - AAC_NUM_MGT_FIB ) ) ) {
dev - > init - > MaxIoCommands = cpu_to_le32 ( ( dev - > scsi_host_ptr - > can_queue + AAC_NUM_MGT_FIB ) > > 1 ) ;
dev - > scsi_host_ptr - > can_queue = le32_to_cpu ( dev - > init - > MaxIoCommands ) - AAC_NUM_MGT_FIB ;
}
if ( i < 0 )
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
2008-01-16 18:39:06 +03:00
2011-03-17 12:10:32 +03:00
/* 32 byte alignment for PMC */
hw_fib_pa = ( dev - > hw_fib_pa + ( ALIGN32 - 1 ) ) & ~ ( ALIGN32 - 1 ) ;
dev - > hw_fib_va = ( struct hw_fib * ) ( ( unsigned char * ) dev - > hw_fib_va +
( hw_fib_pa - dev - > hw_fib_pa ) ) ;
dev - > hw_fib_pa = hw_fib_pa ;
memset ( dev - > hw_fib_va , 0 ,
( dev - > max_fib_size + sizeof ( struct aac_fib_xporthdr ) ) *
( dev - > scsi_host_ptr - > can_queue + AAC_NUM_MGT_FIB ) ) ;
/* add Xport header */
dev - > hw_fib_va = ( struct hw_fib * ) ( ( unsigned char * ) dev - > hw_fib_va +
sizeof ( struct aac_fib_xporthdr ) ) ;
dev - > hw_fib_pa + = sizeof ( struct aac_fib_xporthdr ) ;
2007-03-15 20:26:22 +03:00
hw_fib = dev - > hw_fib_va ;
2005-04-17 02:20:36 +04:00
hw_fib_pa = dev - > hw_fib_pa ;
/*
* Initialise the fibs
*/
2008-01-16 18:39:06 +03:00
for ( i = 0 , fibptr = & dev - > fibs [ i ] ;
i < ( dev - > scsi_host_ptr - > can_queue + AAC_NUM_MGT_FIB ) ;
i + + , fibptr + + )
2005-04-17 02:20:36 +04:00
{
2012-07-14 16:48:51 +04:00
fibptr - > flags = 0 ;
2005-04-17 02:20:36 +04:00
fibptr - > dev = dev ;
2007-03-15 20:26:22 +03:00
fibptr - > hw_fib_va = hw_fib ;
fibptr - > data = ( void * ) fibptr - > hw_fib_va - > data ;
2005-04-17 02:20:36 +04:00
fibptr - > next = fibptr + 1 ; /* Forward chain the fibs */
2010-09-07 18:32:47 +04:00
sema_init ( & fibptr - > event_wait , 0 ) ;
2005-04-17 02:20:36 +04:00
spin_lock_init ( & fibptr - > event_lock ) ;
2007-03-15 20:26:22 +03:00
hw_fib - > header . XferState = cpu_to_le32 ( 0xffffffff ) ;
hw_fib - > header . SenderSize = cpu_to_le16 ( dev - > max_fib_size ) ;
2005-04-17 02:20:36 +04:00
fibptr - > hw_fib_pa = hw_fib_pa ;
2011-03-17 12:10:32 +03:00
hw_fib = ( struct hw_fib * ) ( ( unsigned char * ) hw_fib +
dev - > max_fib_size + sizeof ( struct aac_fib_xporthdr ) ) ;
hw_fib_pa = hw_fib_pa +
dev - > max_fib_size + sizeof ( struct aac_fib_xporthdr ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Add the fib chain to the free list
*/
2005-05-17 05:28:42 +04:00
dev - > fibs [ dev - > scsi_host_ptr - > can_queue + AAC_NUM_MGT_FIB - 1 ] . next = NULL ;
2005-04-17 02:20:36 +04:00
/*
* Enable this to debug out of queue space
*/
dev - > free_fib = & dev - > fibs [ 0 ] ;
return 0 ;
}
/**
2006-02-01 20:30:55 +03:00
* aac_fib_alloc - allocate a fib
2005-04-17 02:20:36 +04:00
* @ dev : Adapter to allocate the fib for
*
* Allocate a fib from the adapter fib pool . If the pool is empty we
2005-05-17 05:28:42 +04:00
* return NULL .
2005-04-17 02:20:36 +04:00
*/
2008-01-16 18:39:06 +03:00
2006-02-01 20:30:55 +03:00
struct fib * aac_fib_alloc ( struct aac_dev * dev )
2005-04-17 02:20:36 +04:00
{
struct fib * fibptr ;
unsigned long flags ;
spin_lock_irqsave ( & dev - > fib_lock , flags ) ;
2008-01-16 18:39:06 +03:00
fibptr = dev - > free_fib ;
2005-05-17 05:28:42 +04:00
if ( ! fibptr ) {
spin_unlock_irqrestore ( & dev - > fib_lock , flags ) ;
return fibptr ;
}
2005-04-17 02:20:36 +04:00
dev - > free_fib = fibptr - > next ;
spin_unlock_irqrestore ( & dev - > fib_lock , flags ) ;
/*
* Set the proper node type code and node byte size
*/
fibptr - > type = FSAFS_NTC_FIB_CONTEXT ;
fibptr - > size = sizeof ( struct fib ) ;
/*
* Null out fields that depend on being zero at the start of
* each I / O
*/
2007-03-15 20:26:22 +03:00
fibptr - > hw_fib_va - > header . XferState = 0 ;
2008-01-09 00:26:43 +03:00
fibptr - > flags = 0 ;
2005-04-17 02:20:36 +04:00
fibptr - > callback = NULL ;
fibptr - > callback_data = NULL ;
return fibptr ;
}
/**
2006-02-01 20:30:55 +03:00
* aac_fib_free - free a fib
2005-04-17 02:20:36 +04:00
* @ fibptr : fib to free up
*
* Frees up a fib and places it on the appropriate queue
*/
2008-01-16 18:39:06 +03:00
2006-02-01 20:30:55 +03:00
void aac_fib_free ( struct fib * fibptr )
2005-04-17 02:20:36 +04:00
{
2015-03-26 17:41:30 +03:00
unsigned long flags ;
2009-12-21 16:09:27 +03:00
2015-03-26 17:41:30 +03:00
if ( fibptr - > done = = 2 )
2009-12-21 16:09:27 +03:00
return ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & fibptr - > dev - > fib_lock , flags ) ;
2007-03-15 20:27:45 +03:00
if ( unlikely ( fibptr - > flags & FIB_CONTEXT_FLAG_TIMED_OUT ) )
2005-04-17 02:20:36 +04:00
aac_config . fib_timeouts + + ;
2007-03-15 20:27:45 +03:00
if ( fibptr - > hw_fib_va - > header . XferState ! = 0 ) {
printk ( KERN_WARNING " aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x \n " ,
( void * ) fibptr ,
le32_to_cpu ( fibptr - > hw_fib_va - > header . XferState ) ) ;
}
fibptr - > next = fibptr - > dev - > free_fib ;
fibptr - > dev - > free_fib = fibptr ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & fibptr - > dev - > fib_lock , flags ) ;
}
/**
2006-02-01 20:30:55 +03:00
* aac_fib_init - initialise a fib
2005-04-17 02:20:36 +04:00
* @ fibptr : The fib to initialize
2008-01-16 18:39:06 +03:00
*
2005-04-17 02:20:36 +04:00
* Set up the generic fib fields ready for use
*/
2008-01-16 18:39:06 +03:00
2006-02-01 20:30:55 +03:00
void aac_fib_init ( struct fib * fibptr )
2005-04-17 02:20:36 +04:00
{
2007-03-15 20:26:22 +03:00
struct hw_fib * hw_fib = fibptr - > hw_fib_va ;
2005-04-17 02:20:36 +04:00
2012-07-14 16:48:51 +04:00
memset ( & hw_fib - > header , 0 , sizeof ( struct aac_fibhdr ) ) ;
2005-04-17 02:20:36 +04:00
hw_fib - > header . StructType = FIB_MAGIC ;
2005-05-17 05:28:42 +04:00
hw_fib - > header . Size = cpu_to_le16 ( fibptr - > dev - > max_fib_size ) ;
hw_fib - > header . XferState = cpu_to_le32 ( HostOwned | FibInitialized | FibEmpty | FastResponseCapable ) ;
2012-07-14 16:48:51 +04:00
hw_fib - > header . u . ReceiverFibAddress = cpu_to_le32 ( fibptr - > hw_fib_pa ) ;
2005-05-17 05:28:42 +04:00
hw_fib - > header . SenderSize = cpu_to_le16 ( fibptr - > dev - > max_fib_size ) ;
2005-04-17 02:20:36 +04:00
}
/**
* fib_deallocate - deallocate a fib
* @ fibptr : fib to deallocate
*
* Will deallocate and return to the free pool the FIB pointed to by the
* caller .
*/
2008-01-16 18:39:06 +03:00
2005-04-26 06:45:58 +04:00
static void fib_dealloc ( struct fib * fibptr )
2005-04-17 02:20:36 +04:00
{
2007-03-15 20:26:22 +03:00
struct hw_fib * hw_fib = fibptr - > hw_fib_va ;
2008-01-16 18:39:06 +03:00
hw_fib - > header . XferState = 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Commuication primitives define and support the queuing method we use to
* support host to adapter commuication . All queue accesses happen through
* these routines and are the only routines which have a knowledge of the
* how these queues are implemented .
*/
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
/**
* aac_get_entry - get a queue entry
* @ dev : Adapter
* @ qid : Queue Number
* @ entry : Entry return
* @ index : Index return
* @ nonotify : notification control
*
* With a priority the routine returns a queue entry if the queue has free entries . If the queue
* is full ( no free entries ) than no entry is returned and the function returns 0 otherwise 1 is
* returned .
*/
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
static int aac_get_entry ( struct aac_dev * dev , u32 qid , struct aac_entry * * entry , u32 * index , unsigned long * nonotify )
{
struct aac_queue * q ;
2005-08-04 02:38:51 +04:00
unsigned long idx ;
2005-04-17 02:20:36 +04:00
/*
* All of the queues wrap when they reach the end , so we check
* to see if they have reached the end and if they have we just
* set the index back to zero . This is a wrap . You could or off
* the high bits in all updates but this is a bit faster I think .
*/
q = & dev - > queues - > queue [ qid ] ;
2005-08-04 02:38:51 +04:00
idx = * index = le32_to_cpu ( * ( q - > headers . producer ) ) ;
/* Interrupt Moderation, only interrupt for first two entries */
if ( idx ! = le32_to_cpu ( * ( q - > headers . consumer ) ) ) {
if ( - - idx = = 0 ) {
2005-09-20 23:57:11 +04:00
if ( qid = = AdapNormCmdQueue )
2005-08-04 02:38:51 +04:00
idx = ADAP_NORM_CMD_ENTRIES ;
2005-09-20 23:57:11 +04:00
else
2005-08-04 02:38:51 +04:00
idx = ADAP_NORM_RESP_ENTRIES ;
}
if ( idx ! = le32_to_cpu ( * ( q - > headers . consumer ) ) )
2008-01-16 18:39:06 +03:00
* nonotify = 1 ;
2005-08-04 02:38:51 +04:00
}
2005-04-17 02:20:36 +04:00
2005-09-20 23:57:11 +04:00
if ( qid = = AdapNormCmdQueue ) {
2008-01-16 18:39:06 +03:00
if ( * index > = ADAP_NORM_CMD_ENTRIES )
2005-04-17 02:20:36 +04:00
* index = 0 ; /* Wrap to front of the Producer Queue. */
2005-09-20 23:57:11 +04:00
} else {
2008-01-16 18:39:06 +03:00
if ( * index > = ADAP_NORM_RESP_ENTRIES )
2005-04-17 02:20:36 +04:00
* index = 0 ; /* Wrap to front of the Producer Queue. */
}
2008-01-16 18:39:06 +03:00
/* Queue is full */
if ( ( * index + 1 ) = = le32_to_cpu ( * ( q - > headers . consumer ) ) ) {
2005-05-17 05:28:42 +04:00
printk ( KERN_WARNING " Queue %d full, %u outstanding. \n " ,
2015-03-26 17:41:30 +03:00
qid , atomic_read ( & q - > numpending ) ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
} else {
2008-01-16 18:39:06 +03:00
* entry = q - > base + * index ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
2008-01-16 18:39:06 +03:00
}
2005-04-17 02:20:36 +04:00
/**
* aac_queue_get - get the next free QE
* @ dev : Adapter
* @ index : Returned index
* @ priority : Priority of fib
* @ fib : Fib to associate with the queue entry
* @ wait : Wait if queue full
* @ fibptr : Driver fib object to go with fib
* @ nonotify : Don ' t notify the adapter
*
* Gets the next free QE off the requested priorty adapter command
* queue and associates the Fib with the QE . The QE represented by
* index is ready to insert on the queue when this routine returns
* success .
*/
2007-01-24 01:59:20 +03:00
int aac_queue_get ( struct aac_dev * dev , u32 * index , u32 qid , struct hw_fib * hw_fib , int wait , struct fib * fibptr , unsigned long * nonotify )
2005-04-17 02:20:36 +04:00
{
struct aac_entry * entry = NULL ;
int map = 0 ;
2008-01-16 18:39:06 +03:00
2005-09-20 23:57:11 +04:00
if ( qid = = AdapNormCmdQueue ) {
2005-04-17 02:20:36 +04:00
/* if no entries wait for some if caller wants to */
2008-01-16 18:39:06 +03:00
while ( ! aac_get_entry ( dev , qid , & entry , index , nonotify ) ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " GetEntries failed \n " ) ;
}
2008-01-16 18:39:06 +03:00
/*
* Setup queue entry with a command , status and fib mapped
*/
entry - > size = cpu_to_le32 ( le16_to_cpu ( hw_fib - > header . Size ) ) ;
map = 1 ;
2005-09-20 23:57:11 +04:00
} else {
2008-01-16 18:39:06 +03:00
while ( ! aac_get_entry ( dev , qid , & entry , index , nonotify ) ) {
2005-04-17 02:20:36 +04:00
/* if no entries wait for some if caller wants to */
}
2008-01-16 18:39:06 +03:00
/*
* Setup queue entry with command , status and fib mapped
*/
entry - > size = cpu_to_le32 ( le16_to_cpu ( hw_fib - > header . Size ) ) ;
entry - > addr = hw_fib - > header . SenderFibAddress ;
/* Restore adapters pointer to the FIB */
2012-07-14 16:48:51 +04:00
hw_fib - > header . u . ReceiverFibAddress = hw_fib - > header . SenderFibAddress ; /* Let the adapter now where to find its data */
2008-01-16 18:39:06 +03:00
map = 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* If MapFib is true than we need to map the Fib and put pointers
* in the queue entry .
*/
if ( map )
entry - > addr = cpu_to_le32 ( fibptr - > hw_fib_pa ) ;
return 0 ;
}
/*
2008-01-16 18:39:06 +03:00
* Define the highest level of host to adapter communication routines .
* These routines will support host to adapter FS commuication . These
2005-04-17 02:20:36 +04:00
* routines have no knowledge of the commuication method used . This level
* sends and receives FIBs . This level has no knowledge of how these FIBs
* get passed back and forth .
*/
/**
2006-02-01 20:30:55 +03:00
* aac_fib_send - send a fib to the adapter
2005-04-17 02:20:36 +04:00
* @ command : Command to send
* @ fibptr : The fib
* @ size : Size of fib data area
* @ priority : Priority of Fib
* @ wait : Async / sync select
* @ reply : True if a reply is wanted
* @ callback : Called with reply
* @ callback_data : Passed to callback
*
* Sends the requested FIB to the adapter and optionally will wait for a
* response FIB . If the caller does not wish to wait for a response than
* an event to wait on must be supplied . This event will be set when a
* response FIB is received from the adapter .
*/
2008-01-16 18:39:06 +03:00
2006-02-01 20:30:55 +03:00
int aac_fib_send ( u16 command , struct fib * fibptr , unsigned long size ,
int priority , int wait , int reply , fib_callback callback ,
void * callback_data )
2005-04-17 02:20:36 +04:00
{
struct aac_dev * dev = fibptr - > dev ;
2007-03-15 20:26:22 +03:00
struct hw_fib * hw_fib = fibptr - > hw_fib_va ;
2005-04-17 02:20:36 +04:00
unsigned long flags = 0 ;
2009-12-21 16:09:27 +03:00
unsigned long mflags = 0 ;
2012-02-09 10:51:04 +04:00
unsigned long sflags = 0 ;
2009-12-21 16:09:27 +03:00
2005-09-20 23:57:11 +04:00
2005-04-17 02:20:36 +04:00
if ( ! ( hw_fib - > header . XferState & cpu_to_le32 ( HostOwned ) ) )
return - EBUSY ;
/*
2011-03-31 05:57:33 +04:00
* There are 5 cases with the wait and response requested flags .
2005-04-17 02:20:36 +04:00
* The only invalid cases are if the caller requests to wait and
* does not request a response and if the caller does not want a
* response and the Fib is not allocated from pool . If a response
* is not requesed the Fib will just be deallocaed by the DPC
* routine when the response comes back from the adapter . No
2008-01-16 18:39:06 +03:00
* further processing will be done besides deleting the Fib . We
2005-04-17 02:20:36 +04:00
* will have a debug mode where the adapter can notify the host
* it had a problem and the host can log that fact .
*/
2008-01-09 00:26:43 +03:00
fibptr - > flags = 0 ;
2005-04-17 02:20:36 +04:00
if ( wait & & ! reply ) {
return - EINVAL ;
} else if ( ! wait & & reply ) {
hw_fib - > header . XferState | = cpu_to_le32 ( Async | ResponseExpected ) ;
FIB_COUNTER_INCREMENT ( aac_config . AsyncSent ) ;
} else if ( ! wait & & ! reply ) {
hw_fib - > header . XferState | = cpu_to_le32 ( NoResponseExpected ) ;
FIB_COUNTER_INCREMENT ( aac_config . NoResponseSent ) ;
} else if ( wait & & reply ) {
hw_fib - > header . XferState | = cpu_to_le32 ( ResponseExpected ) ;
FIB_COUNTER_INCREMENT ( aac_config . NormalSent ) ;
2008-01-16 18:39:06 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Map the fib into 32 bits by using the fib number
*/
2005-10-24 21:52:22 +04:00
hw_fib - > header . SenderFibAddress = cpu_to_le32 ( ( ( u32 ) ( fibptr - dev - > fibs ) ) < < 2 ) ;
2012-07-14 16:48:51 +04:00
hw_fib - > header . Handle = ( u32 ) ( fibptr - dev - > fibs ) + 1 ;
2005-04-17 02:20:36 +04:00
/*
* Set FIB state to indicate where it came from and if we want a
* response from the adapter . Also load the command from the
* caller .
*
* Map the hw fib pointer as a 32 bit value
*/
hw_fib - > header . Command = cpu_to_le16 ( command ) ;
hw_fib - > header . XferState | = cpu_to_le32 ( SentFromHost ) ;
/*
* Set the size of the Fib we want to send to the adapter
*/
hw_fib - > header . Size = cpu_to_le16 ( sizeof ( struct aac_fibhdr ) + size ) ;
if ( le16_to_cpu ( hw_fib - > header . Size ) > le16_to_cpu ( hw_fib - > header . SenderSize ) ) {
return - EMSGSIZE ;
2008-01-16 18:39:06 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Get a queue entry connect the FIB to it and send an notify
* the adapter a command is ready .
*/
2005-09-20 23:57:11 +04:00
hw_fib - > header . XferState | = cpu_to_le32 ( NormalPriority ) ;
2005-04-17 02:20:36 +04:00
/*
* Fill in the Callback and CallbackContext if we are not
* going to wait .
*/
if ( ! wait ) {
fibptr - > callback = callback ;
fibptr - > callback_data = callback_data ;
2008-01-09 00:26:43 +03:00
fibptr - > flags = FIB_CONTEXT_FLAG ;
2005-04-17 02:20:36 +04:00
}
fibptr - > done = 0 ;
2005-09-20 23:57:11 +04:00
FIB_COUNTER_INCREMENT ( aac_config . FibsSent ) ;
dprintk ( ( KERN_DEBUG " Fib contents:. \n " ) ) ;
2005-10-24 21:52:22 +04:00
dprintk ( ( KERN_DEBUG " Command = %d. \n " , le32_to_cpu ( hw_fib - > header . Command ) ) ) ;
dprintk ( ( KERN_DEBUG " SubCommand = %d. \n " , le32_to_cpu ( ( ( struct aac_query_mount * ) fib_data ( fibptr ) ) - > command ) ) ) ;
dprintk ( ( KERN_DEBUG " XferState = %x. \n " , le32_to_cpu ( hw_fib - > header . XferState ) ) ) ;
2007-03-15 20:26:22 +03:00
dprintk ( ( KERN_DEBUG " hw_fib va being sent=%p \n " , fibptr - > hw_fib_va ) ) ;
2005-09-20 23:57:11 +04:00
dprintk ( ( KERN_DEBUG " hw_fib pa being sent=%lx \n " , ( ulong ) fibptr - > hw_fib_pa ) ) ;
dprintk ( ( KERN_DEBUG " fib being sent=%p \n " , fibptr ) ) ;
2006-08-03 19:02:24 +04:00
if ( ! dev - > queues )
2006-09-19 19:59:23 +04:00
return - EBUSY ;
2005-09-20 23:57:11 +04:00
2009-12-21 16:09:27 +03:00
if ( wait ) {
spin_lock_irqsave ( & dev - > manage_lock , mflags ) ;
if ( dev - > management_fib_count > = AAC_NUM_MGT_FIB ) {
printk ( KERN_INFO " No management Fibs Available:%d \n " ,
dev - > management_fib_count ) ;
spin_unlock_irqrestore ( & dev - > manage_lock , mflags ) ;
return - EBUSY ;
}
dev - > management_fib_count + + ;
spin_unlock_irqrestore ( & dev - > manage_lock , mflags ) ;
2005-09-20 23:57:11 +04:00
spin_lock_irqsave ( & fibptr - > event_lock , flags ) ;
2009-12-21 16:09:27 +03:00
}
2012-02-09 10:51:04 +04:00
if ( dev - > sync_mode ) {
if ( wait )
spin_unlock_irqrestore ( & fibptr - > event_lock , flags ) ;
spin_lock_irqsave ( & dev - > sync_lock , sflags ) ;
if ( dev - > sync_fib ) {
list_add_tail ( & fibptr - > fiblink , & dev - > sync_fib_list ) ;
spin_unlock_irqrestore ( & dev - > sync_lock , sflags ) ;
} else {
dev - > sync_fib = fibptr ;
spin_unlock_irqrestore ( & dev - > sync_lock , sflags ) ;
aac_adapter_sync_cmd ( dev , SEND_SYNCHRONOUS_FIB ,
( u32 ) fibptr - > hw_fib_pa , 0 , 0 , 0 , 0 , 0 ,
NULL , NULL , NULL , NULL , NULL ) ;
}
if ( wait ) {
fibptr - > flags | = FIB_CONTEXT_FLAG_WAIT ;
if ( down_interruptible ( & fibptr - > event_wait ) ) {
fibptr - > flags & = ~ FIB_CONTEXT_FLAG_WAIT ;
return - EFAULT ;
}
return 0 ;
}
return - EINPROGRESS ;
}
2009-12-21 16:09:27 +03:00
if ( aac_adapter_deliver ( fibptr ) ! = 0 ) {
printk ( KERN_ERR " aac_fib_send: returned -EBUSY \n " ) ;
if ( wait ) {
spin_unlock_irqrestore ( & fibptr - > event_lock , flags ) ;
spin_lock_irqsave ( & dev - > manage_lock , mflags ) ;
dev - > management_fib_count - - ;
spin_unlock_irqrestore ( & dev - > manage_lock , mflags ) ;
}
return - EBUSY ;
}
2005-10-24 21:52:22 +04:00
2005-04-17 02:20:36 +04:00
/*
2008-01-16 18:39:06 +03:00
* If the caller wanted us to wait for response wait now .
2005-04-17 02:20:36 +04:00
*/
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
if ( wait ) {
spin_unlock_irqrestore ( & fibptr - > event_lock , flags ) ;
2005-09-20 23:56:50 +04:00
/* Only set for first known interruptable command */
if ( wait < 0 ) {
/*
* * VERY * Dangerous to time out a command , the
* assumption is made that we have no hope of
* functioning because an interrupt routing or other
* hardware failure has occurred .
*/
2012-06-11 22:44:44 +04:00
unsigned long timeout = jiffies + ( 180 * HZ ) ; /* 3 minutes */
2005-09-20 23:56:50 +04:00
while ( down_trylock ( & fibptr - > event_wait ) ) {
2006-11-21 21:40:08 +03:00
int blink ;
2012-06-11 22:44:44 +04:00
if ( time_is_before_eq_jiffies ( timeout ) ) {
2007-01-24 01:59:20 +03:00
struct aac_queue * q = & dev - > queues - > queue [ AdapNormCmdQueue ] ;
2015-03-26 17:41:30 +03:00
atomic_dec ( & q - > numpending ) ;
2005-09-20 23:56:50 +04:00
if ( wait = = - 1 ) {
2006-02-01 20:30:55 +03:00
printk ( KERN_ERR " aacraid: aac_fib_send: first asynchronous command timed out. \n "
2005-09-20 23:56:50 +04:00
" Usually a result of a PCI interrupt routing problem; \n "
" update mother board BIOS or consider utilizing one of \n "
" the SAFE mode kernel options (acpi, apic etc) \n " ) ;
}
return - ETIMEDOUT ;
}
2006-11-21 21:40:08 +03:00
if ( ( blink = aac_adapter_check_health ( dev ) ) > 0 ) {
if ( wait = = - 1 ) {
printk ( KERN_ERR " aacraid: aac_fib_send: adapter blinkLED 0x%x. \n "
" Usually a result of a serious unrecoverable hardware problem \n " ,
blink ) ;
}
return - EFAULT ;
}
2012-06-11 22:44:44 +04:00
/* We used to udelay() here but that absorbed
* a CPU when a timeout occured . Not very
* useful . */
cpu_relax ( ) ;
2005-09-20 23:56:50 +04:00
}
2008-04-23 16:16:06 +04:00
} else if ( down_interruptible ( & fibptr - > event_wait ) ) {
2009-12-21 16:09:27 +03:00
/* Do nothing ... satisfy
* down_interruptible must_check */
2008-04-14 22:20:16 +04:00
}
2009-12-21 16:09:27 +03:00
2007-03-15 20:27:21 +03:00
spin_lock_irqsave ( & fibptr - > event_lock , flags ) ;
2009-12-21 16:09:27 +03:00
if ( fibptr - > done = = 0 ) {
2007-03-15 20:27:21 +03:00
fibptr - > done = 2 ; /* Tell interrupt we aborted */
2006-08-03 19:02:24 +04:00
spin_unlock_irqrestore ( & fibptr - > event_lock , flags ) ;
2009-12-21 16:09:27 +03:00
return - ERESTARTSYS ;
2006-08-03 19:02:24 +04:00
}
2007-03-15 20:27:21 +03:00
spin_unlock_irqrestore ( & fibptr - > event_lock , flags ) ;
2006-06-23 13:06:06 +04:00
BUG_ON ( fibptr - > done = = 0 ) ;
2008-01-16 18:39:06 +03:00
2007-03-26 17:21:14 +04:00
if ( unlikely ( fibptr - > flags & FIB_CONTEXT_FLAG_TIMED_OUT ) )
2005-04-17 02:20:36 +04:00
return - ETIMEDOUT ;
2007-03-26 17:21:14 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* If the user does not want a response than return success otherwise
* return pending
*/
if ( reply )
return - EINPROGRESS ;
else
return 0 ;
}
2008-01-16 18:39:06 +03:00
/**
2005-04-17 02:20:36 +04:00
* aac_consumer_get - get the top of the queue
* @ dev : Adapter
* @ q : Queue
* @ entry : Return entry
*
* Will return a pointer to the entry on the top of the queue requested that
2008-01-16 18:39:06 +03:00
* we are a consumer of , and return the address of the queue entry . It does
* not change the state of the queue .
2005-04-17 02:20:36 +04:00
*/
int aac_consumer_get ( struct aac_dev * dev , struct aac_queue * q , struct aac_entry * * entry )
{
u32 index ;
int status ;
if ( le32_to_cpu ( * q - > headers . producer ) = = le32_to_cpu ( * q - > headers . consumer ) ) {
status = 0 ;
} else {
/*
* The consumer index must be wrapped if we have reached
* the end of the queue , else we just use the entry
* pointed to by the header index
*/
2008-01-16 18:39:06 +03:00
if ( le32_to_cpu ( * q - > headers . consumer ) > = q - > entries )
index = 0 ;
2005-04-17 02:20:36 +04:00
else
2008-01-16 18:39:06 +03:00
index = le32_to_cpu ( * q - > headers . consumer ) ;
2005-04-17 02:20:36 +04:00
* entry = q - > base + index ;
status = 1 ;
}
return ( status ) ;
}
/**
* aac_consumer_free - free consumer entry
* @ dev : Adapter
* @ q : Queue
* @ qid : Queue ident
*
* Frees up the current top of the queue we are a consumer of . If the
* queue was full notify the producer that the queue is no longer full .
*/
void aac_consumer_free ( struct aac_dev * dev , struct aac_queue * q , u32 qid )
{
int wasfull = 0 ;
u32 notify ;
if ( ( le32_to_cpu ( * q - > headers . producer ) + 1 ) = = le32_to_cpu ( * q - > headers . consumer ) )
wasfull = 1 ;
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
if ( le32_to_cpu ( * q - > headers . consumer ) > = q - > entries )
* q - > headers . consumer = cpu_to_le32 ( 1 ) ;
else
2008-03-29 00:48:35 +03:00
le32_add_cpu ( q - > headers . consumer , 1 ) ;
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
if ( wasfull ) {
switch ( qid ) {
case HostNormCmdQueue :
notify = HostNormCmdNotFull ;
break ;
case HostNormRespQueue :
notify = HostNormRespNotFull ;
break ;
default :
BUG ( ) ;
return ;
}
aac_adapter_notify ( dev , notify ) ;
}
2008-01-16 18:39:06 +03:00
}
2005-04-17 02:20:36 +04:00
/**
2006-02-01 20:30:55 +03:00
* aac_fib_adapter_complete - complete adapter issued fib
2005-04-17 02:20:36 +04:00
* @ fibptr : fib to complete
* @ size : size of fib
*
* Will do all necessary work to complete a FIB that was sent from
* the adapter .
*/
2006-02-01 20:30:55 +03:00
int aac_fib_adapter_complete ( struct fib * fibptr , unsigned short size )
2005-04-17 02:20:36 +04:00
{
2007-03-15 20:26:22 +03:00
struct hw_fib * hw_fib = fibptr - > hw_fib_va ;
2005-04-17 02:20:36 +04:00
struct aac_dev * dev = fibptr - > dev ;
2005-09-20 23:57:11 +04:00
struct aac_queue * q ;
2005-04-17 02:20:36 +04:00
unsigned long nointr = 0 ;
2005-09-20 23:57:11 +04:00
unsigned long qflags ;
2012-07-14 16:48:51 +04:00
if ( dev - > comm_interface = = AAC_COMM_MESSAGE_TYPE1 | |
dev - > comm_interface = = AAC_COMM_MESSAGE_TYPE2 ) {
2011-03-17 12:10:32 +03:00
kfree ( hw_fib ) ;
return 0 ;
}
2005-09-20 23:57:11 +04:00
if ( hw_fib - > header . XferState = = 0 ) {
2007-01-24 01:59:20 +03:00
if ( dev - > comm_interface = = AAC_COMM_MESSAGE )
2011-03-17 12:10:32 +03:00
kfree ( hw_fib ) ;
2008-01-16 18:39:06 +03:00
return 0 ;
2005-09-20 23:57:11 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* If we plan to do anything check the structure type first .
2008-01-16 18:39:06 +03:00
*/
2012-07-14 16:48:51 +04:00
if ( hw_fib - > header . StructType ! = FIB_MAGIC & &
hw_fib - > header . StructType ! = FIB_MAGIC2 & &
hw_fib - > header . StructType ! = FIB_MAGIC2_64 ) {
2007-01-24 01:59:20 +03:00
if ( dev - > comm_interface = = AAC_COMM_MESSAGE )
2011-03-17 12:10:32 +03:00
kfree ( hw_fib ) ;
2008-01-16 18:39:06 +03:00
return - EINVAL ;
2005-04-17 02:20:36 +04:00
}
/*
* This block handles the case where the adapter had sent us a
* command and we have finished processing the command . We
2008-01-16 18:39:06 +03:00
* call completeFib when we are done processing the command
* and want to send a response back to the adapter . This will
2005-04-17 02:20:36 +04:00
* send the completed cdb to the adapter .
*/
if ( hw_fib - > header . XferState & cpu_to_le32 ( SentFromAdapter ) ) {
2007-01-24 01:59:20 +03:00
if ( dev - > comm_interface = = AAC_COMM_MESSAGE ) {
2005-10-24 21:52:22 +04:00
kfree ( hw_fib ) ;
} else {
2008-01-16 18:39:06 +03:00
u32 index ;
hw_fib - > header . XferState | = cpu_to_le32 ( HostProcessed ) ;
2005-10-24 21:52:22 +04:00
if ( size ) {
size + = sizeof ( struct aac_fibhdr ) ;
2008-01-16 18:39:06 +03:00
if ( size > le16_to_cpu ( hw_fib - > header . SenderSize ) )
2005-10-24 21:52:22 +04:00
return - EMSGSIZE ;
hw_fib - > header . Size = cpu_to_le16 ( size ) ;
}
q = & dev - > queues - > queue [ AdapNormRespQueue ] ;
spin_lock_irqsave ( q - > lock , qflags ) ;
aac_queue_get ( dev , & index , AdapNormRespQueue , hw_fib , 1 , NULL , & nointr ) ;
* ( q - > headers . producer ) = cpu_to_le32 ( index + 1 ) ;
spin_unlock_irqrestore ( q - > lock , qflags ) ;
if ( ! ( nointr & ( int ) aac_config . irq_mod ) )
aac_adapter_notify ( dev , AdapNormRespQueue ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-16 18:39:06 +03:00
} else {
printk ( KERN_WARNING " aac_fib_adapter_complete: "
" Unknown xferstate detected. \n " ) ;
BUG ( ) ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
/**
2006-02-01 20:30:55 +03:00
* aac_fib_complete - fib completion handler
2005-04-17 02:20:36 +04:00
* @ fib : FIB to complete
*
* Will do all necessary work to complete a FIB .
*/
2008-01-16 18:39:06 +03:00
2006-02-01 20:30:55 +03:00
int aac_fib_complete ( struct fib * fibptr )
2005-04-17 02:20:36 +04:00
{
2007-03-15 20:26:22 +03:00
struct hw_fib * hw_fib = fibptr - > hw_fib_va ;
2005-04-17 02:20:36 +04:00
/*
* Check for a fib which has already been completed
*/
if ( hw_fib - > header . XferState = = 0 )
2008-01-16 18:39:06 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
/*
* If we plan to do anything check the structure type first .
2008-01-16 18:39:06 +03:00
*/
2005-04-17 02:20:36 +04:00
2012-07-14 16:48:51 +04:00
if ( hw_fib - > header . StructType ! = FIB_MAGIC & &
hw_fib - > header . StructType ! = FIB_MAGIC2 & &
hw_fib - > header . StructType ! = FIB_MAGIC2_64 )
2008-01-16 18:39:06 +03:00
return - EINVAL ;
2005-04-17 02:20:36 +04:00
/*
2008-01-16 18:39:06 +03:00
* This block completes a cdb which orginated on the host and we
2005-04-17 02:20:36 +04:00
* just need to deallocate the cdb or reinit it . At this point the
* command is complete that we had sent to the adapter and this
* cdb could be reused .
*/
2009-12-21 16:09:27 +03:00
2005-04-17 02:20:36 +04:00
if ( ( hw_fib - > header . XferState & cpu_to_le32 ( SentFromHost ) ) & &
( hw_fib - > header . XferState & cpu_to_le32 ( AdapterProcessed ) ) )
{
fib_dealloc ( fibptr ) ;
}
else if ( hw_fib - > header . XferState & cpu_to_le32 ( SentFromHost ) )
{
/*
* This handles the case when the host has aborted the I / O
* to the adapter because the adapter is not responding
*/
fib_dealloc ( fibptr ) ;
} else if ( hw_fib - > header . XferState & cpu_to_le32 ( HostOwned ) ) {
fib_dealloc ( fibptr ) ;
} else {
BUG ( ) ;
2008-01-16 18:39:06 +03:00
}
2005-04-17 02:20:36 +04:00
return 0 ;
}
/**
* aac_printf - handle printf from firmware
* @ dev : Adapter
* @ val : Message info
*
* Print a message passed to us by the controller firmware on the
* Adaptec board
*/
void aac_printf ( struct aac_dev * dev , u32 val )
{
char * cp = dev - > printfbuf ;
2005-05-17 05:28:42 +04:00
if ( dev - > printf_enabled )
{
int length = val & 0xffff ;
int level = ( val > > 16 ) & 0xffff ;
2008-01-16 18:39:06 +03:00
2005-05-17 05:28:42 +04:00
/*
* The size of the printfbuf is set in port . c
* There is no variable or define for it
*/
if ( length > 255 )
length = 255 ;
if ( cp [ length ] ! = 0 )
cp [ length ] = 0 ;
if ( level = = LOG_AAC_HIGH_ERROR )
2006-03-27 21:44:23 +04:00
printk ( KERN_WARNING " %s:%s " , dev - > name , cp ) ;
2005-05-17 05:28:42 +04:00
else
2006-03-27 21:44:23 +04:00
printk ( KERN_INFO " %s:%s " , dev - > name , cp ) ;
2005-05-17 05:28:42 +04:00
}
2008-01-16 18:39:06 +03:00
memset ( cp , 0 , 256 ) ;
2005-04-17 02:20:36 +04:00
}
2005-09-27 00:04:56 +04:00
/**
* aac_handle_aif - Handle a message from the firmware
* @ dev : Which adapter this fib is from
* @ fibptr : Pointer to fibptr from adapter
*
* This routine handles a driver notify fib from the adapter and
* dispatches it to the appropriate routine for handling .
*/
2015-03-26 17:41:25 +03:00
# define AIF_SNIFF_TIMEOUT (500*HZ)
2005-09-27 00:04:56 +04:00
static void aac_handle_aif ( struct aac_dev * dev , struct fib * fibptr )
{
2007-03-15 20:26:22 +03:00
struct hw_fib * hw_fib = fibptr - > hw_fib_va ;
2005-09-27 00:04:56 +04:00
struct aac_aifcmd * aifcmd = ( struct aac_aifcmd * ) hw_fib - > data ;
2008-01-11 22:56:07 +03:00
u32 channel , id , lun , container ;
2005-09-27 00:04:56 +04:00
struct scsi_device * device ;
enum {
NOTHING ,
DELETE ,
ADD ,
CHANGE
2008-01-11 22:56:07 +03:00
} device_config_needed = NOTHING ;
2005-09-27 00:04:56 +04:00
/* Sniff for container changes */
2006-08-03 19:02:24 +04:00
if ( ! dev | | ! dev - > fsa_dev )
2005-09-27 00:04:56 +04:00
return ;
2008-01-11 22:56:07 +03:00
container = channel = id = lun = ( u32 ) - 1 ;
2005-09-27 00:04:56 +04:00
/*
* We have set this up to try and minimize the number of
* re - configures that take place . As a result of this when
* certain AIF ' s come in we will set a flag waiting for another
* type of AIF before setting the re - config flag .
*/
switch ( le32_to_cpu ( aifcmd - > command ) ) {
case AifCmdDriverNotify :
2007-11-08 20:27:47 +03:00
switch ( le32_to_cpu ( ( ( __le32 * ) aifcmd - > data ) [ 0 ] ) ) {
2005-09-27 00:04:56 +04:00
/*
* Morph or Expand complete
*/
case AifDenMorphComplete :
case AifDenVolumeExtendComplete :
2007-11-08 20:27:47 +03:00
container = le32_to_cpu ( ( ( __le32 * ) aifcmd - > data ) [ 1 ] ) ;
2005-09-27 00:04:56 +04:00
if ( container > = dev - > maximum_num_containers )
break ;
/*
2005-10-31 20:32:08 +03:00
* Find the scsi_device associated with the SCSI
2005-09-27 00:04:56 +04:00
* address . Make sure we have the right array , and if
* so set the flag to initiate a new re - config once we
* see an AifEnConfigChange AIF come through .
*/
if ( ( dev ! = NULL ) & & ( dev - > scsi_host_ptr ! = NULL ) ) {
2008-01-16 18:39:06 +03:00
device = scsi_device_lookup ( dev - > scsi_host_ptr ,
CONTAINER_TO_CHANNEL ( container ) ,
CONTAINER_TO_ID ( container ) ,
2005-09-27 00:04:56 +04:00
CONTAINER_TO_LUN ( container ) ) ;
if ( device ) {
dev - > fsa_dev [ container ] . config_needed = CHANGE ;
dev - > fsa_dev [ container ] . config_waiting_on = AifEnConfigChange ;
2006-03-27 21:43:55 +04:00
dev - > fsa_dev [ container ] . config_waiting_stamp = jiffies ;
2005-09-27 00:04:56 +04:00
scsi_device_put ( device ) ;
}
}
}
/*
* If we are waiting on something and this happens to be
* that thing then set the re - configure flag .
*/
if ( container ! = ( u32 ) - 1 ) {
if ( container > = dev - > maximum_num_containers )
break ;
2006-03-27 21:43:55 +04:00
if ( ( dev - > fsa_dev [ container ] . config_waiting_on = =
2007-11-08 20:27:47 +03:00
le32_to_cpu ( * ( __le32 * ) aifcmd - > data ) ) & &
2006-03-27 21:43:55 +04:00
time_before ( jiffies , dev - > fsa_dev [ container ] . config_waiting_stamp + AIF_SNIFF_TIMEOUT ) )
2005-09-27 00:04:56 +04:00
dev - > fsa_dev [ container ] . config_waiting_on = 0 ;
} else for ( container = 0 ;
container < dev - > maximum_num_containers ; + + container ) {
2006-03-27 21:43:55 +04:00
if ( ( dev - > fsa_dev [ container ] . config_waiting_on = =
2007-11-08 20:27:47 +03:00
le32_to_cpu ( * ( __le32 * ) aifcmd - > data ) ) & &
2006-03-27 21:43:55 +04:00
time_before ( jiffies , dev - > fsa_dev [ container ] . config_waiting_stamp + AIF_SNIFF_TIMEOUT ) )
2005-09-27 00:04:56 +04:00
dev - > fsa_dev [ container ] . config_waiting_on = 0 ;
}
break ;
case AifCmdEventNotify :
2007-11-08 20:27:47 +03:00
switch ( le32_to_cpu ( ( ( __le32 * ) aifcmd - > data ) [ 0 ] ) ) {
2008-01-08 23:01:07 +03:00
case AifEnBatteryEvent :
dev - > cache_protected =
( ( ( __le32 * ) aifcmd - > data ) [ 1 ] = = cpu_to_le32 ( 3 ) ) ;
break ;
2005-09-27 00:04:56 +04:00
/*
* Add an Array .
*/
case AifEnAddContainer :
2007-11-08 20:27:47 +03:00
container = le32_to_cpu ( ( ( __le32 * ) aifcmd - > data ) [ 1 ] ) ;
2005-09-27 00:04:56 +04:00
if ( container > = dev - > maximum_num_containers )
break ;
dev - > fsa_dev [ container ] . config_needed = ADD ;
dev - > fsa_dev [ container ] . config_waiting_on =
AifEnConfigChange ;
2006-03-27 21:43:55 +04:00
dev - > fsa_dev [ container ] . config_waiting_stamp = jiffies ;
2005-09-27 00:04:56 +04:00
break ;
/*
* Delete an Array .
*/
case AifEnDeleteContainer :
2007-11-08 20:27:47 +03:00
container = le32_to_cpu ( ( ( __le32 * ) aifcmd - > data ) [ 1 ] ) ;
2005-09-27 00:04:56 +04:00
if ( container > = dev - > maximum_num_containers )
break ;
dev - > fsa_dev [ container ] . config_needed = DELETE ;
dev - > fsa_dev [ container ] . config_waiting_on =
AifEnConfigChange ;
2006-03-27 21:43:55 +04:00
dev - > fsa_dev [ container ] . config_waiting_stamp = jiffies ;
2005-09-27 00:04:56 +04:00
break ;
/*
* Container change detected . If we currently are not
* waiting on something else , setup to wait on a Config Change .
*/
case AifEnContainerChange :
2007-11-08 20:27:47 +03:00
container = le32_to_cpu ( ( ( __le32 * ) aifcmd - > data ) [ 1 ] ) ;
2005-09-27 00:04:56 +04:00
if ( container > = dev - > maximum_num_containers )
break ;
2006-03-27 21:43:55 +04:00
if ( dev - > fsa_dev [ container ] . config_waiting_on & &
time_before ( jiffies , dev - > fsa_dev [ container ] . config_waiting_stamp + AIF_SNIFF_TIMEOUT ) )
2005-09-27 00:04:56 +04:00
break ;
dev - > fsa_dev [ container ] . config_needed = CHANGE ;
dev - > fsa_dev [ container ] . config_waiting_on =
AifEnConfigChange ;
2006-03-27 21:43:55 +04:00
dev - > fsa_dev [ container ] . config_waiting_stamp = jiffies ;
2005-09-27 00:04:56 +04:00
break ;
case AifEnConfigChange :
break ;
2008-01-17 20:25:07 +03:00
case AifEnAddJBOD :
case AifEnDeleteJBOD :
container = le32_to_cpu ( ( ( __le32 * ) aifcmd - > data ) [ 1 ] ) ;
2008-04-30 23:47:35 +04:00
if ( ( container > > 28 ) ) {
container = ( u32 ) - 1 ;
2008-01-17 20:25:07 +03:00
break ;
2008-04-30 23:47:35 +04:00
}
2008-01-17 20:25:07 +03:00
channel = ( container > > 24 ) & 0xF ;
2008-04-30 23:47:35 +04:00
if ( channel > = dev - > maximum_num_channels ) {
container = ( u32 ) - 1 ;
2008-01-17 20:25:07 +03:00
break ;
2008-04-30 23:47:35 +04:00
}
2008-01-17 20:25:07 +03:00
id = container & 0xFFFF ;
2008-04-30 23:47:35 +04:00
if ( id > = dev - > maximum_num_physicals ) {
container = ( u32 ) - 1 ;
2008-01-17 20:25:07 +03:00
break ;
2008-04-30 23:47:35 +04:00
}
2008-01-17 20:25:07 +03:00
lun = ( container > > 16 ) & 0xFF ;
2008-04-30 23:47:35 +04:00
container = ( u32 ) - 1 ;
2008-01-17 20:25:07 +03:00
channel = aac_phys_to_logical ( channel ) ;
device_config_needed =
( ( ( __le32 * ) aifcmd - > data ) [ 0 ] = =
cpu_to_le32 ( AifEnAddJBOD ) ) ? ADD : DELETE ;
2010-05-10 15:05:50 +04:00
if ( device_config_needed = = ADD ) {
device = scsi_device_lookup ( dev - > scsi_host_ptr ,
channel ,
id ,
lun ) ;
if ( device ) {
scsi_remove_device ( device ) ;
scsi_device_put ( device ) ;
}
}
2008-01-17 20:25:07 +03:00
break ;
2008-01-11 22:56:07 +03:00
case AifEnEnclosureManagement :
2008-01-17 20:25:07 +03:00
/*
* If in JBOD mode , automatic exposure of new
* physical target to be suppressed until configured .
*/
if ( dev - > jbod )
break ;
2008-01-11 22:56:07 +03:00
switch ( le32_to_cpu ( ( ( __le32 * ) aifcmd - > data ) [ 3 ] ) ) {
case EM_DRIVE_INSERTION :
case EM_DRIVE_REMOVAL :
2015-03-26 17:41:22 +03:00
case EM_SES_DRIVE_INSERTION :
case EM_SES_DRIVE_REMOVAL :
2008-01-11 22:56:07 +03:00
container = le32_to_cpu (
( ( __le32 * ) aifcmd - > data ) [ 2 ] ) ;
2008-04-30 23:47:35 +04:00
if ( ( container > > 28 ) ) {
container = ( u32 ) - 1 ;
2008-01-11 22:56:07 +03:00
break ;
2008-04-30 23:47:35 +04:00
}
2008-01-11 22:56:07 +03:00
channel = ( container > > 24 ) & 0xF ;
2008-04-30 23:47:35 +04:00
if ( channel > = dev - > maximum_num_channels ) {
container = ( u32 ) - 1 ;
2008-01-11 22:56:07 +03:00
break ;
2008-04-30 23:47:35 +04:00
}
2008-01-11 22:56:07 +03:00
id = container & 0xFFFF ;
lun = ( container > > 16 ) & 0xFF ;
2008-04-30 23:47:35 +04:00
container = ( u32 ) - 1 ;
2008-01-11 22:56:07 +03:00
if ( id > = dev - > maximum_num_physicals ) {
/* legacy dev_t ? */
if ( ( 0x2000 < = id ) | | lun | | channel | |
( ( channel = ( id > > 7 ) & 0x3F ) > =
dev - > maximum_num_channels ) )
break ;
lun = ( id > > 4 ) & 7 ;
id & = 0xF ;
}
channel = aac_phys_to_logical ( channel ) ;
device_config_needed =
2015-03-26 17:41:22 +03:00
( ( ( ( __le32 * ) aifcmd - > data ) [ 3 ]
= = cpu_to_le32 ( EM_DRIVE_INSERTION ) ) | |
( ( ( __le32 * ) aifcmd - > data ) [ 3 ]
= = cpu_to_le32 ( EM_SES_DRIVE_INSERTION ) ) ) ?
2008-01-11 22:56:07 +03:00
ADD : DELETE ;
break ;
}
break ;
2005-09-27 00:04:56 +04:00
}
/*
* If we are waiting on something and this happens to be
* that thing then set the re - configure flag .
*/
if ( container ! = ( u32 ) - 1 ) {
if ( container > = dev - > maximum_num_containers )
break ;
2006-03-27 21:43:55 +04:00
if ( ( dev - > fsa_dev [ container ] . config_waiting_on = =
2007-11-08 20:27:47 +03:00
le32_to_cpu ( * ( __le32 * ) aifcmd - > data ) ) & &
2006-03-27 21:43:55 +04:00
time_before ( jiffies , dev - > fsa_dev [ container ] . config_waiting_stamp + AIF_SNIFF_TIMEOUT ) )
2005-09-27 00:04:56 +04:00
dev - > fsa_dev [ container ] . config_waiting_on = 0 ;
} else for ( container = 0 ;
container < dev - > maximum_num_containers ; + + container ) {
2006-03-27 21:43:55 +04:00
if ( ( dev - > fsa_dev [ container ] . config_waiting_on = =
2007-11-08 20:27:47 +03:00
le32_to_cpu ( * ( __le32 * ) aifcmd - > data ) ) & &
2006-03-27 21:43:55 +04:00
time_before ( jiffies , dev - > fsa_dev [ container ] . config_waiting_stamp + AIF_SNIFF_TIMEOUT ) )
2005-09-27 00:04:56 +04:00
dev - > fsa_dev [ container ] . config_waiting_on = 0 ;
}
break ;
case AifCmdJobProgress :
/*
* These are job progress AIF ' s . When a Clear is being
* done on a container it is initially created then hidden from
* the OS . When the clear completes we don ' t get a config
* change so we monitor the job status complete on a clear then
* wait for a container change .
*/
2007-11-08 20:27:47 +03:00
if ( ( ( __le32 * ) aifcmd - > data ) [ 1 ] = = cpu_to_le32 ( AifJobCtrZero ) & &
( ( ( __le32 * ) aifcmd - > data ) [ 6 ] = = ( ( __le32 * ) aifcmd - > data ) [ 5 ] | |
( ( __le32 * ) aifcmd - > data ) [ 4 ] = = cpu_to_le32 ( AifJobStsSuccess ) ) ) {
2005-09-27 00:04:56 +04:00
for ( container = 0 ;
container < dev - > maximum_num_containers ;
+ + container ) {
/*
* Stomp on all config sequencing for all
* containers ?
*/
dev - > fsa_dev [ container ] . config_waiting_on =
AifEnContainerChange ;
dev - > fsa_dev [ container ] . config_needed = ADD ;
2006-03-27 21:43:55 +04:00
dev - > fsa_dev [ container ] . config_waiting_stamp =
jiffies ;
2005-09-27 00:04:56 +04:00
}
}
2007-11-08 20:27:47 +03:00
if ( ( ( __le32 * ) aifcmd - > data ) [ 1 ] = = cpu_to_le32 ( AifJobCtrZero ) & &
( ( __le32 * ) aifcmd - > data ) [ 6 ] = = 0 & &
( ( __le32 * ) aifcmd - > data ) [ 4 ] = = cpu_to_le32 ( AifJobStsRunning ) ) {
2005-09-27 00:04:56 +04:00
for ( container = 0 ;
container < dev - > maximum_num_containers ;
+ + container ) {
/*
* Stomp on all config sequencing for all
* containers ?
*/
dev - > fsa_dev [ container ] . config_waiting_on =
AifEnContainerChange ;
dev - > fsa_dev [ container ] . config_needed = DELETE ;
2006-03-27 21:43:55 +04:00
dev - > fsa_dev [ container ] . config_waiting_stamp =
jiffies ;
2005-09-27 00:04:56 +04:00
}
}
break ;
}
2008-04-30 23:47:35 +04:00
container = 0 ;
retry_next :
2008-01-11 22:56:07 +03:00
if ( device_config_needed = = NOTHING )
2008-04-30 23:47:35 +04:00
for ( ; container < dev - > maximum_num_containers ; + + container ) {
2006-03-27 21:43:55 +04:00
if ( ( dev - > fsa_dev [ container ] . config_waiting_on = = 0 ) & &
( dev - > fsa_dev [ container ] . config_needed ! = NOTHING ) & &
time_before ( jiffies , dev - > fsa_dev [ container ] . config_waiting_stamp + AIF_SNIFF_TIMEOUT ) ) {
2005-09-27 00:04:56 +04:00
device_config_needed =
dev - > fsa_dev [ container ] . config_needed ;
dev - > fsa_dev [ container ] . config_needed = NOTHING ;
2008-01-11 22:56:07 +03:00
channel = CONTAINER_TO_CHANNEL ( container ) ;
id = CONTAINER_TO_ID ( container ) ;
lun = CONTAINER_TO_LUN ( container ) ;
2005-09-27 00:04:56 +04:00
break ;
}
}
if ( device_config_needed = = NOTHING )
return ;
/*
* If we decided that a re - configuration needs to be done ,
* schedule it here on the way out the door , please close the door
* behind you .
*/
/*
2005-10-31 20:32:08 +03:00
* Find the scsi_device associated with the SCSI address ,
2005-09-27 00:04:56 +04:00
* and mark it as changed , invalidating the cache . This deals
* with changes to existing device IDs .
*/
if ( ! dev | | ! dev - > scsi_host_ptr )
return ;
/*
2006-02-01 20:30:55 +03:00
* force reload of disk info via aac_probe_container
2005-09-27 00:04:56 +04:00
*/
2008-01-11 22:56:07 +03:00
if ( ( channel = = CONTAINER_CHANNEL ) & &
( device_config_needed ! = NOTHING ) ) {
if ( dev - > fsa_dev [ container ] . valid = = 1 )
dev - > fsa_dev [ container ] . valid = 2 ;
2006-02-01 20:30:55 +03:00
aac_probe_container ( dev , container ) ;
2008-01-11 22:56:07 +03:00
}
device = scsi_device_lookup ( dev - > scsi_host_ptr , channel , id , lun ) ;
2005-09-27 00:04:56 +04:00
if ( device ) {
switch ( device_config_needed ) {
case DELETE :
2010-05-10 15:29:25 +04:00
# if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
scsi_remove_device ( device ) ;
# else
2008-01-11 22:56:07 +03:00
if ( scsi_device_online ( device ) ) {
scsi_device_set_state ( device , SDEV_OFFLINE ) ;
sdev_printk ( KERN_INFO , device ,
" Device offlined - %s \n " ,
( channel = = CONTAINER_CHANNEL ) ?
" array deleted " :
" enclosure services event " ) ;
}
2010-05-10 15:29:25 +04:00
# endif
2008-01-11 22:56:07 +03:00
break ;
case ADD :
if ( ! scsi_device_online ( device ) ) {
sdev_printk ( KERN_INFO , device ,
" Device online - %s \n " ,
( channel = = CONTAINER_CHANNEL ) ?
" array created " :
" enclosure services event " ) ;
scsi_device_set_state ( device , SDEV_RUNNING ) ;
}
/* FALLTHRU */
2005-09-27 00:04:56 +04:00
case CHANGE :
2008-01-11 22:56:07 +03:00
if ( ( channel = = CONTAINER_CHANNEL )
& & ( ! dev - > fsa_dev [ container ] . valid ) ) {
2010-05-10 15:29:25 +04:00
# if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
scsi_remove_device ( device ) ;
# else
2008-01-11 22:56:07 +03:00
if ( ! scsi_device_online ( device ) )
break ;
scsi_device_set_state ( device , SDEV_OFFLINE ) ;
sdev_printk ( KERN_INFO , device ,
" Device offlined - %s \n " ,
" array failed " ) ;
2010-05-10 15:29:25 +04:00
# endif
2008-01-11 22:56:07 +03:00
break ;
}
2005-09-27 00:04:56 +04:00
scsi_rescan_device ( & device - > sdev_gendev ) ;
default :
break ;
}
scsi_device_put ( device ) ;
2008-01-11 22:56:07 +03:00
device_config_needed = NOTHING ;
2005-09-27 00:04:56 +04:00
}
2008-01-11 22:56:07 +03:00
if ( device_config_needed = = ADD )
scsi_add_device ( dev - > scsi_host_ptr , channel , id , lun ) ;
2008-04-30 23:47:35 +04:00
if ( channel = = CONTAINER_CHANNEL ) {
container + + ;
device_config_needed = NOTHING ;
goto retry_next ;
}
2005-09-27 00:04:56 +04:00
}
2007-06-12 17:33:54 +04:00
static int _aac_reset_adapter ( struct aac_dev * aac , int forced )
2006-08-03 19:03:30 +04:00
{
int index , quirks ;
2015-03-26 17:41:25 +03:00
int retval , i ;
2006-08-03 19:03:30 +04:00
struct Scsi_Host * host ;
struct scsi_device * dev ;
struct scsi_cmnd * command ;
struct scsi_cmnd * command_list ;
2007-06-12 17:33:54 +04:00
int jafo = 0 ;
2015-03-26 17:41:30 +03:00
int cpu ;
2006-08-03 19:03:30 +04:00
/*
* Assumptions :
2007-06-12 17:33:54 +04:00
* - host is locked , unless called by the aacraid thread .
* ( a matter of convenience , due to legacy issues surrounding
* eh_host_adapter_reset ) .
2006-08-03 19:03:30 +04:00
* - in_reset is asserted , so no new i / o is getting to the
* card .
2007-06-12 17:33:54 +04:00
* - The card is dead , or will be very shortly ; - / so no new
* commands are completing in the interrupt service .
2006-08-03 19:03:30 +04:00
*/
host = aac - > scsi_host_ptr ;
scsi_block_requests ( host ) ;
aac_adapter_disable_int ( aac ) ;
2007-06-12 17:33:54 +04:00
if ( aac - > thread - > pid ! = current - > pid ) {
spin_unlock_irq ( host - > host_lock ) ;
kthread_stop ( aac - > thread ) ;
jafo = 1 ;
}
2006-08-03 19:03:30 +04:00
/*
* If a positive health , means in a known DEAD PANIC
* state and the adapter could be reset to ` try again ' .
*/
2007-06-12 17:33:54 +04:00
retval = aac_adapter_restart ( aac , forced ? 0 : aac_adapter_check_health ( aac ) ) ;
2006-08-03 19:03:30 +04:00
if ( retval )
goto out ;
2006-11-21 21:40:31 +03:00
/*
* Loop through the fibs , close the synchronous FIBS
*/
2007-03-15 20:27:21 +03:00
for ( retval = 1 , index = 0 ; index < ( aac - > scsi_host_ptr - > can_queue + AAC_NUM_MGT_FIB ) ; index + + ) {
2006-11-21 21:40:31 +03:00
struct fib * fib = & aac - > fibs [ index ] ;
2007-03-15 20:26:22 +03:00
if ( ! ( fib - > hw_fib_va - > header . XferState & cpu_to_le32 ( NoResponseExpected | Async ) ) & &
( fib - > hw_fib_va - > header . XferState & cpu_to_le32 ( ResponseExpected ) ) ) {
2006-11-21 21:40:31 +03:00
unsigned long flagv ;
spin_lock_irqsave ( & fib - > event_lock , flagv ) ;
up ( & fib - > event_wait ) ;
spin_unlock_irqrestore ( & fib - > event_lock , flagv ) ;
schedule ( ) ;
2007-03-15 20:27:21 +03:00
retval = 0 ;
2006-11-21 21:40:31 +03:00
}
}
2007-03-15 20:27:21 +03:00
/* Give some extra time for ioctls to complete. */
if ( retval = = 0 )
ssleep ( 2 ) ;
2006-08-03 19:03:30 +04:00
index = aac - > cardtype ;
/*
* Re - initialize the adapter , first free resources , then carefully
* apply the initialization sequence to come back again . Only risk
* is a change in Firmware dropping cache , it is assumed the caller
* will ensure that i / o is queisced and the card is flushed in that
* case .
*/
aac_fib_map_free ( aac ) ;
pci_free_consistent ( aac - > pdev , aac - > comm_size , aac - > comm_addr , aac - > comm_phys ) ;
aac - > comm_addr = NULL ;
aac - > comm_phys = 0 ;
kfree ( aac - > queues ) ;
aac - > queues = NULL ;
2015-03-26 17:41:30 +03:00
cpu = cpumask_first ( cpu_online_mask ) ;
2015-03-26 17:41:25 +03:00
if ( aac - > pdev - > device = = PMC_DEVICE_S6 | |
aac - > pdev - > device = = PMC_DEVICE_S7 | |
aac - > pdev - > device = = PMC_DEVICE_S8 | |
aac - > pdev - > device = = PMC_DEVICE_S9 ) {
if ( aac - > max_msix > 1 ) {
2015-03-26 17:41:30 +03:00
for ( i = 0 ; i < aac - > max_msix ; i + + ) {
if ( irq_set_affinity_hint (
aac - > msixentry [ i ] . vector ,
NULL ) ) {
printk ( KERN_ERR " %s%d: Failed to reset IRQ affinity for cpu %d \n " ,
aac - > name ,
aac - > id ,
cpu ) ;
}
cpu = cpumask_next ( cpu ,
cpu_online_mask ) ;
2015-03-26 17:41:25 +03:00
free_irq ( aac - > msixentry [ i ] . vector ,
& ( aac - > aac_msix [ i ] ) ) ;
2015-03-26 17:41:30 +03:00
}
2015-03-26 17:41:25 +03:00
pci_disable_msix ( aac - > pdev ) ;
} else {
free_irq ( aac - > pdev - > irq , & ( aac - > aac_msix [ 0 ] ) ) ;
}
} else {
free_irq ( aac - > pdev - > irq , aac ) ;
}
2011-09-02 19:31:46 +04:00
if ( aac - > msi )
pci_disable_msi ( aac - > pdev ) ;
2006-08-03 19:03:30 +04:00
kfree ( aac - > fsa_dev ) ;
aac - > fsa_dev = NULL ;
2007-12-14 03:14:18 +03:00
quirks = aac_get_driver_ident ( index ) - > quirks ;
if ( quirks & AAC_QUIRK_31BIT ) {
2009-04-07 06:01:16 +04:00
if ( ( ( retval = pci_set_dma_mask ( aac - > pdev , DMA_BIT_MASK ( 31 ) ) ) ) | |
( ( retval = pci_set_consistent_dma_mask ( aac - > pdev , DMA_BIT_MASK ( 31 ) ) ) ) )
2006-08-03 19:03:30 +04:00
goto out ;
} else {
2009-04-07 06:01:15 +04:00
if ( ( ( retval = pci_set_dma_mask ( aac - > pdev , DMA_BIT_MASK ( 32 ) ) ) ) | |
( ( retval = pci_set_consistent_dma_mask ( aac - > pdev , DMA_BIT_MASK ( 32 ) ) ) ) )
2006-08-03 19:03:30 +04:00
goto out ;
}
if ( ( retval = ( * ( aac_get_driver_ident ( index ) - > init ) ) ( aac ) ) )
goto out ;
2007-12-14 03:14:18 +03:00
if ( quirks & AAC_QUIRK_31BIT )
2009-04-07 06:01:15 +04:00
if ( ( retval = pci_set_dma_mask ( aac - > pdev , DMA_BIT_MASK ( 32 ) ) ) )
2006-08-03 19:03:30 +04:00
goto out ;
2007-06-12 17:33:54 +04:00
if ( jafo ) {
2013-07-04 02:04:58 +04:00
aac - > thread = kthread_run ( aac_command_thread , aac , " %s " ,
aac - > name ) ;
2007-06-12 17:33:54 +04:00
if ( IS_ERR ( aac - > thread ) ) {
retval = PTR_ERR ( aac - > thread ) ;
goto out ;
}
2006-08-03 19:03:30 +04:00
}
( void ) aac_get_adapter_info ( aac ) ;
if ( ( quirks & AAC_QUIRK_34SG ) & & ( host - > sg_tablesize > 34 ) ) {
2008-01-16 18:39:06 +03:00
host - > sg_tablesize = 34 ;
host - > max_sectors = ( host - > sg_tablesize * 8 ) + 112 ;
}
if ( ( quirks & AAC_QUIRK_17SG ) & & ( host - > sg_tablesize > 17 ) ) {
host - > sg_tablesize = 17 ;
host - > max_sectors = ( host - > sg_tablesize * 8 ) + 112 ;
}
2006-08-03 19:03:30 +04:00
aac_get_config_status ( aac , 1 ) ;
aac_get_containers ( aac ) ;
/*
* This is where the assumption that the Adapter is quiesced
* is important .
*/
command_list = NULL ;
__shost_for_each_device ( dev , host ) {
unsigned long flags ;
spin_lock_irqsave ( & dev - > list_lock , flags ) ;
list_for_each_entry ( command , & dev - > cmd_list , list )
if ( command - > SCp . phase = = AAC_OWNER_FIRMWARE ) {
command - > SCp . buffer = ( struct scatterlist * ) command_list ;
command_list = command ;
}
spin_unlock_irqrestore ( & dev - > list_lock , flags ) ;
}
while ( ( command = command_list ) ) {
command_list = ( struct scsi_cmnd * ) command - > SCp . buffer ;
command - > SCp . buffer = NULL ;
command - > result = DID_OK < < 16
| COMMAND_COMPLETE < < 8
| SAM_STAT_TASK_SET_FULL ;
command - > SCp . phase = AAC_OWNER_ERROR_HANDLER ;
command - > scsi_done ( command ) ;
}
retval = 0 ;
out :
aac - > in_reset = 0 ;
scsi_unblock_requests ( host ) ;
2007-06-12 17:33:54 +04:00
if ( jafo ) {
spin_lock_irq ( host - > host_lock ) ;
}
return retval ;
}
int aac_reset_adapter ( struct aac_dev * aac , int forced )
{
unsigned long flagv = 0 ;
int retval ;
struct Scsi_Host * host ;
if ( spin_trylock_irqsave ( & aac - > fib_lock , flagv ) = = 0 )
return - EBUSY ;
if ( aac - > in_reset ) {
spin_unlock_irqrestore ( & aac - > fib_lock , flagv ) ;
return - EBUSY ;
}
aac - > in_reset = 1 ;
spin_unlock_irqrestore ( & aac - > fib_lock , flagv ) ;
/*
* Wait for all commands to complete to this specific
* target ( block maximum 60 seconds ) . Although not necessary ,
* it does make us a good storage citizen .
*/
host = aac - > scsi_host_ptr ;
scsi_block_requests ( host ) ;
if ( forced < 2 ) for ( retval = 60 ; retval ; - - retval ) {
struct scsi_device * dev ;
struct scsi_cmnd * command ;
int active = 0 ;
__shost_for_each_device ( dev , host ) {
spin_lock_irqsave ( & dev - > list_lock , flagv ) ;
list_for_each_entry ( command , & dev - > cmd_list , list ) {
if ( command - > SCp . phase = = AAC_OWNER_FIRMWARE ) {
active + + ;
break ;
}
}
spin_unlock_irqrestore ( & dev - > list_lock , flagv ) ;
if ( active )
break ;
}
/*
* We can exit If all the commands are complete
*/
if ( active = = 0 )
break ;
ssleep ( 1 ) ;
}
/* Quiesce build, flush cache, write through mode */
2007-10-30 22:50:49 +03:00
if ( forced < 2 )
aac_send_shutdown ( aac ) ;
2007-06-12 17:33:54 +04:00
spin_lock_irqsave ( host - > host_lock , flagv ) ;
2007-10-30 22:50:49 +03:00
retval = _aac_reset_adapter ( aac , forced ? forced : ( ( aac_check_reset ! = 0 ) & & ( aac_check_reset ! = 1 ) ) ) ;
2007-06-12 17:33:54 +04:00
spin_unlock_irqrestore ( host - > host_lock , flagv ) ;
2007-10-30 22:50:49 +03:00
if ( ( forced < 2 ) & & ( retval = = - ENODEV ) ) {
2007-06-12 17:33:54 +04:00
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
struct fib * fibctx = aac_fib_alloc ( aac ) ;
if ( fibctx ) {
struct aac_pause * cmd ;
int status ;
aac_fib_init ( fibctx ) ;
cmd = ( struct aac_pause * ) fib_data ( fibctx ) ;
cmd - > command = cpu_to_le32 ( VM_ContainerConfig ) ;
cmd - > type = cpu_to_le32 ( CT_PAUSE_IO ) ;
cmd - > timeout = cpu_to_le32 ( 1 ) ;
cmd - > min = cpu_to_le32 ( 1 ) ;
cmd - > noRescan = cpu_to_le32 ( 1 ) ;
cmd - > count = cpu_to_le32 ( 0 ) ;
status = aac_fib_send ( ContainerCommand ,
fibctx ,
sizeof ( struct aac_pause ) ,
FsaNormal ,
- 2 /* Timeout silently */ , 1 ,
NULL , NULL ) ;
if ( status > = 0 )
aac_fib_complete ( fibctx ) ;
2009-12-21 16:09:27 +03:00
/* FIB should be freed only after getting
* the response from the F / W */
if ( status ! = - ERESTARTSYS )
aac_fib_free ( fibctx ) ;
2007-06-12 17:33:54 +04:00
}
}
2006-08-03 19:03:30 +04:00
return retval ;
}
int aac_check_health ( struct aac_dev * aac )
{
int BlinkLED ;
unsigned long time_now , flagv = 0 ;
struct list_head * entry ;
struct Scsi_Host * host ;
/* Extending the scope of fib_lock slightly to protect aac->in_reset */
if ( spin_trylock_irqsave ( & aac - > fib_lock , flagv ) = = 0 )
return 0 ;
if ( aac - > in_reset | | ! ( BlinkLED = aac_adapter_check_health ( aac ) ) ) {
spin_unlock_irqrestore ( & aac - > fib_lock , flagv ) ;
return 0 ; /* OK */
}
aac - > in_reset = 1 ;
/* Fake up an AIF:
* aac_aifcmd . command = AifCmdEventNotify = 1
* aac_aifcmd . seqnum = 0xFFFFFFFF
* aac_aifcmd . data [ 0 ] = AifEnExpEvent = 23
* aac_aifcmd . data [ 1 ] = AifExeFirmwarePanic = 3
* aac . aifcmd . data [ 2 ] = AifHighPriority = 3
* aac . aifcmd . data [ 3 ] = BlinkLED
*/
time_now = jiffies / HZ ;
entry = aac - > fib_list . next ;
/*
* For each Context that is on the
* fibctxList , make a copy of the
* fib , and then set the event to wake up the
* thread that is waiting for it .
*/
while ( entry ! = & aac - > fib_list ) {
/*
* Extract the fibctx
*/
struct aac_fib_context * fibctx = list_entry ( entry , struct aac_fib_context , next ) ;
struct hw_fib * hw_fib ;
struct fib * fib ;
/*
* Check if the queue is getting
* backlogged
*/
if ( fibctx - > count > 20 ) {
/*
* It ' s * not * jiffies folks ,
* but jiffies / HZ , so do not
* panic . . .
*/
u32 time_last = fibctx - > jiffies ;
/*
* Has it been > 2 minutes
* since the last read off
* the queue ?
*/
if ( ( time_now - time_last ) > aif_timeout ) {
entry = entry - > next ;
aac_close_fib_context ( aac , fibctx ) ;
continue ;
}
}
/*
* Warning : no sleep allowed while
* holding spinlock
*/
2007-04-16 19:21:50 +04:00
hw_fib = kzalloc ( sizeof ( struct hw_fib ) , GFP_ATOMIC ) ;
fib = kzalloc ( sizeof ( struct fib ) , GFP_ATOMIC ) ;
2006-08-03 19:03:30 +04:00
if ( fib & & hw_fib ) {
struct aac_aifcmd * aif ;
2007-03-15 20:26:22 +03:00
fib - > hw_fib_va = hw_fib ;
2006-08-03 19:03:30 +04:00
fib - > dev = aac ;
aac_fib_init ( fib ) ;
fib - > type = FSAFS_NTC_FIB_CONTEXT ;
fib - > size = sizeof ( struct fib ) ;
fib - > data = hw_fib - > data ;
aif = ( struct aac_aifcmd * ) hw_fib - > data ;
aif - > command = cpu_to_le32 ( AifCmdEventNotify ) ;
2008-01-08 23:48:25 +03:00
aif - > seqnum = cpu_to_le32 ( 0xFFFFFFFF ) ;
( ( __le32 * ) aif - > data ) [ 0 ] = cpu_to_le32 ( AifEnExpEvent ) ;
( ( __le32 * ) aif - > data ) [ 1 ] = cpu_to_le32 ( AifExeFirmwarePanic ) ;
( ( __le32 * ) aif - > data ) [ 2 ] = cpu_to_le32 ( AifHighPriority ) ;
( ( __le32 * ) aif - > data ) [ 3 ] = cpu_to_le32 ( BlinkLED ) ;
2006-08-03 19:03:30 +04:00
/*
* Put the FIB onto the
* fibctx ' s fibs
*/
list_add_tail ( & fib - > fiblink , & fibctx - > fib_list ) ;
fibctx - > count + + ;
/*
* Set the event to wake up the
* thread that will waiting .
*/
up ( & fibctx - > wait_sem ) ;
} else {
printk ( KERN_WARNING " aifd: didn't allocate NewFib. \n " ) ;
kfree ( fib ) ;
kfree ( hw_fib ) ;
}
entry = entry - > next ;
}
spin_unlock_irqrestore ( & aac - > fib_lock , flagv ) ;
if ( BlinkLED < 0 ) {
printk ( KERN_ERR " %s: Host adapter dead %d \n " , aac - > name , BlinkLED ) ;
goto out ;
}
printk ( KERN_ERR " %s: Host adapter BLINK LED 0x%x \n " , aac - > name , BlinkLED ) ;
2008-02-08 19:36:23 +03:00
if ( ! aac_check_reset | | ( ( aac_check_reset = = 1 ) & &
2008-01-08 23:48:25 +03:00
( aac - > supplement_adapter_info . SupportedOptions2 &
AAC_OPTION_IGNORE_RESET ) ) )
2007-06-12 17:33:54 +04:00
goto out ;
2006-08-03 19:03:30 +04:00
host = aac - > scsi_host_ptr ;
2007-06-12 17:33:54 +04:00
if ( aac - > thread - > pid ! = current - > pid )
spin_lock_irqsave ( host - > host_lock , flagv ) ;
2007-10-30 22:50:49 +03:00
BlinkLED = _aac_reset_adapter ( aac , aac_check_reset ! = 1 ) ;
2007-06-12 17:33:54 +04:00
if ( aac - > thread - > pid ! = current - > pid )
spin_unlock_irqrestore ( host - > host_lock , flagv ) ;
2006-08-03 19:03:30 +04:00
return BlinkLED ;
out :
aac - > in_reset = 0 ;
return BlinkLED ;
}
2005-04-17 02:20:36 +04:00
/**
* aac_command_thread - command processing thread
* @ dev : Adapter to monitor
*
* Waits on the commandready event in it ' s queue . When the event gets set
* it will pull FIBs off it ' s queue . It will continue to pull FIBs off
* until the queue is empty . When the queue is empty it will wait for
* more FIBs .
*/
2008-01-16 18:39:06 +03:00
2006-02-14 20:45:06 +03:00
int aac_command_thread ( void * data )
2005-04-17 02:20:36 +04:00
{
2006-02-14 20:45:06 +03:00
struct aac_dev * dev = data ;
2005-04-17 02:20:36 +04:00
struct hw_fib * hw_fib , * hw_newfib ;
struct fib * fib , * newfib ;
struct aac_fib_context * fibctx ;
unsigned long flags ;
DECLARE_WAITQUEUE ( wait , current ) ;
2007-06-12 17:33:54 +04:00
unsigned long next_jiffies = jiffies + HZ ;
unsigned long next_check_jiffies = next_jiffies ;
long difference = HZ ;
2005-04-17 02:20:36 +04:00
/*
* We can only have one thread per adapter for AIF ' s .
*/
if ( dev - > aif_thread )
return - EINVAL ;
2006-02-14 20:45:06 +03:00
2005-04-17 02:20:36 +04:00
/*
* Let the DPC know it has a place to send the AIF ' s to .
*/
dev - > aif_thread = 1 ;
2005-09-27 00:02:15 +04:00
add_wait_queue ( & dev - > queues - > queue [ HostNormCmdQueue ] . cmdready , & wait ) ;
2005-04-17 02:20:36 +04:00
set_current_state ( TASK_INTERRUPTIBLE ) ;
2005-09-27 00:02:15 +04:00
dprintk ( ( KERN_INFO " aac_command_thread start \n " ) ) ;
2008-01-16 18:39:06 +03:00
while ( 1 ) {
2005-09-27 00:02:15 +04:00
spin_lock_irqsave ( dev - > queues - > queue [ HostNormCmdQueue ] . lock , flags ) ;
while ( ! list_empty ( & ( dev - > queues - > queue [ HostNormCmdQueue ] . cmdq ) ) ) {
2005-04-17 02:20:36 +04:00
struct list_head * entry ;
struct aac_aifcmd * aifcmd ;
set_current_state ( TASK_RUNNING ) ;
2008-01-16 18:39:06 +03:00
2005-09-27 00:02:15 +04:00
entry = dev - > queues - > queue [ HostNormCmdQueue ] . cmdq . next ;
2005-04-17 02:20:36 +04:00
list_del ( entry ) ;
2008-01-16 18:39:06 +03:00
2005-09-27 00:02:15 +04:00
spin_unlock_irqrestore ( dev - > queues - > queue [ HostNormCmdQueue ] . lock , flags ) ;
2005-04-17 02:20:36 +04:00
fib = list_entry ( entry , struct fib , fiblink ) ;
/*
2008-01-16 18:39:06 +03:00
* We will process the FIB here or pass it to a
* worker thread that is TBD . We Really can ' t
2005-04-17 02:20:36 +04:00
* do anything at this point since we don ' t have
* anything defined for this thread to do .
*/
2007-03-15 20:26:22 +03:00
hw_fib = fib - > hw_fib_va ;
2005-04-17 02:20:36 +04:00
memset ( fib , 0 , sizeof ( struct fib ) ) ;
fib - > type = FSAFS_NTC_FIB_CONTEXT ;
2008-01-16 18:39:06 +03:00
fib - > size = sizeof ( struct fib ) ;
2007-03-15 20:26:22 +03:00
fib - > hw_fib_va = hw_fib ;
2005-04-17 02:20:36 +04:00
fib - > data = hw_fib - > data ;
fib - > dev = dev ;
/*
* We only handle AifRequest fibs from the adapter .
*/
aifcmd = ( struct aac_aifcmd * ) hw_fib - > data ;
if ( aifcmd - > command = = cpu_to_le32 ( AifCmdDriverNotify ) ) {
/* Handle Driver Notify Events */
2005-09-27 00:04:56 +04:00
aac_handle_aif ( dev , fib ) ;
2005-04-27 17:05:51 +04:00
* ( __le32 * ) hw_fib - > data = cpu_to_le32 ( ST_OK ) ;
2006-02-01 20:30:55 +03:00
aac_fib_adapter_complete ( fib , ( u16 ) sizeof ( u32 ) ) ;
2005-04-17 02:20:36 +04:00
} else {
/* The u32 here is important and intended. We are using
32 bit wrapping time to fit the adapter field */
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
u32 time_now , time_last ;
unsigned long flagv ;
2005-09-27 00:02:15 +04:00
unsigned num ;
struct hw_fib * * hw_fib_pool , * * hw_fib_p ;
struct fib * * fib_pool , * * fib_p ;
2008-01-16 18:39:06 +03:00
2005-09-27 00:04:56 +04:00
/* Sniff events */
2008-01-16 18:39:06 +03:00
if ( ( aifcmd - > command = =
2005-09-27 00:04:56 +04:00
cpu_to_le32 ( AifCmdEventNotify ) ) | |
2008-01-16 18:39:06 +03:00
( aifcmd - > command = =
2005-09-27 00:04:56 +04:00
cpu_to_le32 ( AifCmdJobProgress ) ) ) {
aac_handle_aif ( dev , fib ) ;
}
2007-06-12 17:33:54 +04:00
2005-04-17 02:20:36 +04:00
time_now = jiffies / HZ ;
2005-09-27 00:02:15 +04:00
/*
* Warning : no sleep allowed while
* holding spinlock . We take the estimate
* and pre - allocate a set of fibs outside the
* lock .
*/
num = le32_to_cpu ( dev - > init - > AdapterFibsSize )
/ sizeof ( struct hw_fib ) ; /* some extra */
spin_lock_irqsave ( & dev - > fib_lock , flagv ) ;
entry = dev - > fib_list . next ;
while ( entry ! = & dev - > fib_list ) {
entry = entry - > next ;
+ + num ;
}
spin_unlock_irqrestore ( & dev - > fib_lock , flagv ) ;
hw_fib_pool = NULL ;
fib_pool = NULL ;
if ( num
& & ( ( hw_fib_pool = kmalloc ( sizeof ( struct hw_fib * ) * num , GFP_KERNEL ) ) )
& & ( ( fib_pool = kmalloc ( sizeof ( struct fib * ) * num , GFP_KERNEL ) ) ) ) {
hw_fib_p = hw_fib_pool ;
fib_p = fib_pool ;
while ( hw_fib_p < & hw_fib_pool [ num ] ) {
if ( ! ( * ( hw_fib_p + + ) = kmalloc ( sizeof ( struct hw_fib ) , GFP_KERNEL ) ) ) {
- - hw_fib_p ;
break ;
}
if ( ! ( * ( fib_p + + ) = kmalloc ( sizeof ( struct fib ) , GFP_KERNEL ) ) ) {
kfree ( * ( - - hw_fib_p ) ) ;
break ;
}
}
if ( ( num = hw_fib_p - hw_fib_pool ) = = 0 ) {
kfree ( fib_pool ) ;
fib_pool = NULL ;
kfree ( hw_fib_pool ) ;
hw_fib_pool = NULL ;
}
2005-11-07 12:01:26 +03:00
} else {
2005-09-27 00:02:15 +04:00
kfree ( hw_fib_pool ) ;
hw_fib_pool = NULL ;
}
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & dev - > fib_lock , flagv ) ;
entry = dev - > fib_list . next ;
/*
2008-01-16 18:39:06 +03:00
* For each Context that is on the
2005-04-17 02:20:36 +04:00
* fibctxList , make a copy of the
* fib , and then set the event to wake up the
* thread that is waiting for it .
*/
2005-09-27 00:02:15 +04:00
hw_fib_p = hw_fib_pool ;
fib_p = fib_pool ;
2005-04-17 02:20:36 +04:00
while ( entry ! = & dev - > fib_list ) {
/*
* Extract the fibctx
*/
fibctx = list_entry ( entry , struct aac_fib_context , next ) ;
/*
* Check if the queue is getting
* backlogged
*/
if ( fibctx - > count > 20 )
{
/*
* It ' s * not * jiffies folks ,
* but jiffies / HZ so do not
* panic . . .
*/
time_last = fibctx - > jiffies ;
/*
2008-01-16 18:39:06 +03:00
* Has it been > 2 minutes
2005-04-17 02:20:36 +04:00
* since the last read off
* the queue ?
*/
2006-05-10 20:12:48 +04:00
if ( ( time_now - time_last ) > aif_timeout ) {
2005-04-17 02:20:36 +04:00
entry = entry - > next ;
aac_close_fib_context ( dev , fibctx ) ;
continue ;
}
}
/*
* Warning : no sleep allowed while
* holding spinlock
*/
2005-09-27 00:02:15 +04:00
if ( hw_fib_p < & hw_fib_pool [ num ] ) {
hw_newfib = * hw_fib_p ;
* ( hw_fib_p + + ) = NULL ;
newfib = * fib_p ;
* ( fib_p + + ) = NULL ;
2005-04-17 02:20:36 +04:00
/*
* Make the copy of the FIB
*/
memcpy ( hw_newfib , hw_fib , sizeof ( struct hw_fib ) ) ;
memcpy ( newfib , fib , sizeof ( struct fib ) ) ;
2007-03-15 20:26:22 +03:00
newfib - > hw_fib_va = hw_newfib ;
2005-04-17 02:20:36 +04:00
/*
* Put the FIB onto the
* fibctx ' s fibs
*/
list_add_tail ( & newfib - > fiblink , & fibctx - > fib_list ) ;
fibctx - > count + + ;
2008-01-16 18:39:06 +03:00
/*
2005-04-17 02:20:36 +04:00
* Set the event to wake up the
2005-09-27 00:02:15 +04:00
* thread that is waiting .
2005-04-17 02:20:36 +04:00
*/
up ( & fibctx - > wait_sem ) ;
} else {
printk ( KERN_WARNING " aifd: didn't allocate NewFib. \n " ) ;
}
entry = entry - > next ;
}
/*
* Set the status of this FIB
*/
2005-04-27 17:05:51 +04:00
* ( __le32 * ) hw_fib - > data = cpu_to_le32 ( ST_OK ) ;
2006-02-01 20:30:55 +03:00
aac_fib_adapter_complete ( fib , sizeof ( u32 ) ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & dev - > fib_lock , flagv ) ;
2005-09-27 00:02:15 +04:00
/* Free up the remaining resources */
hw_fib_p = hw_fib_pool ;
fib_p = fib_pool ;
while ( hw_fib_p < & hw_fib_pool [ num ] ) {
2005-11-07 12:01:26 +03:00
kfree ( * hw_fib_p ) ;
kfree ( * fib_p ) ;
2005-09-27 00:02:15 +04:00
+ + fib_p ;
+ + hw_fib_p ;
}
2005-11-07 12:01:26 +03:00
kfree ( hw_fib_pool ) ;
kfree ( fib_pool ) ;
2005-04-17 02:20:36 +04:00
}
kfree ( fib ) ;
2005-09-27 00:02:15 +04:00
spin_lock_irqsave ( dev - > queues - > queue [ HostNormCmdQueue ] . lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
/*
* There are no more AIF ' s
*/
2005-09-27 00:02:15 +04:00
spin_unlock_irqrestore ( dev - > queues - > queue [ HostNormCmdQueue ] . lock , flags ) ;
2007-06-12 17:33:54 +04:00
/*
* Background activity
*/
if ( ( time_before ( next_check_jiffies , next_jiffies ) )
& & ( ( difference = next_check_jiffies - jiffies ) < = 0 ) ) {
next_check_jiffies = next_jiffies ;
if ( aac_check_health ( dev ) = = 0 ) {
difference = ( ( long ) ( unsigned ) check_interval )
* HZ ;
next_check_jiffies = jiffies + difference ;
} else if ( ! dev - > queues )
break ;
}
if ( ! time_before ( next_check_jiffies , next_jiffies )
& & ( ( difference = next_jiffies - jiffies ) < = 0 ) ) {
struct timeval now ;
int ret ;
/* Don't even try to talk to adapter if its sick */
ret = aac_check_health ( dev ) ;
if ( ! ret & & ! dev - > queues )
break ;
next_check_jiffies = jiffies
+ ( ( long ) ( unsigned ) check_interval )
* HZ ;
do_gettimeofday ( & now ) ;
/* Synchronize our watches */
if ( ( ( 1000000 - ( 1000000 / HZ ) ) > now . tv_usec )
& & ( now . tv_usec > ( 1000000 / HZ ) ) )
difference = ( ( ( 1000000 - now . tv_usec ) * HZ )
+ 500000 ) / 1000000 ;
else if ( ret = = 0 ) {
struct fib * fibptr ;
if ( ( fibptr = aac_fib_alloc ( dev ) ) ) {
2009-12-21 16:09:27 +03:00
int status ;
2007-11-08 20:27:47 +03:00
__le32 * info ;
2007-06-12 17:33:54 +04:00
aac_fib_init ( fibptr ) ;
2007-11-08 20:27:47 +03:00
info = ( __le32 * ) fib_data ( fibptr ) ;
2007-06-12 17:33:54 +04:00
if ( now . tv_usec > 500000 )
+ + now . tv_sec ;
* info = cpu_to_le32 ( now . tv_sec ) ;
2009-12-21 16:09:27 +03:00
status = aac_fib_send ( SendHostTime ,
2007-06-12 17:33:54 +04:00
fibptr ,
sizeof ( * info ) ,
FsaNormal ,
1 , 1 ,
NULL ,
NULL ) ;
2009-12-21 16:09:27 +03:00
/* Do not set XferState to zero unless
* receives a response from F / W */
if ( status > = 0 )
aac_fib_complete ( fibptr ) ;
/* FIB should be freed only after
* getting the response from the F / W */
if ( status ! = - ERESTARTSYS )
aac_fib_free ( fibptr ) ;
2007-06-12 17:33:54 +04:00
}
difference = ( long ) ( unsigned ) update_interval * HZ ;
} else {
/* retry shortly */
difference = 10 * HZ ;
}
next_jiffies = jiffies + difference ;
if ( time_before ( next_check_jiffies , next_jiffies ) )
difference = next_check_jiffies - jiffies ;
}
if ( difference < = 0 )
difference = 1 ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
schedule_timeout ( difference ) ;
2005-04-17 02:20:36 +04:00
2006-02-14 20:45:06 +03:00
if ( kthread_should_stop ( ) )
2005-04-17 02:20:36 +04:00
break ;
}
2005-09-27 00:02:15 +04:00
if ( dev - > queues )
remove_wait_queue ( & dev - > queues - > queue [ HostNormCmdQueue ] . cmdready , & wait ) ;
2005-04-17 02:20:36 +04:00
dev - > aif_thread = 0 ;
2005-09-27 00:02:15 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}