2019-05-19 16:51:48 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/*
* Adaptec AAC series RAID controller driver
2008-10-27 18:16:36 +03:00
* ( c ) Copyright 2001 Red Hat Inc .
2005-04-17 02:20:36 +04:00
*
* based on the old aacraid driver that is . .
* Adaptec aacraid device driver for Linux .
*
2011-03-17 12:10:32 +03:00
* Copyright ( c ) 2000 - 2010 Adaptec , Inc .
2017-02-03 02:53:36 +03:00
* 2010 - 2015 PMC - Sierra , Inc . ( aacraid @ pmc - sierra . com )
* 2016 - 2017 Microsemi Corp . ( aacraid @ microsemi . com )
2005-04-17 02:20:36 +04:00
*
* Module Name :
* commctrl . c
*
* Abstract : Contains all routines for control of the AFA comm layer
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/types.h>
# include <linux/pci.h>
# include <linux/spinlock.h>
# include <linux/slab.h>
# include <linux/completion.h>
# include <linux/dma-mapping.h>
# include <linux/blkdev.h>
2020-10-30 19:44:19 +03:00
# include <linux/compat.h>
2006-08-03 19:02:24 +04:00
# include <linux/delay.h> /* ssleep prototype */
2006-03-27 21:44:26 +04:00
# include <linux/kthread.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2008-05-28 23:32:55 +04:00
# include <scsi/scsi_host.h>
2005-04-17 02:20:36 +04:00
# include "aacraid.h"
2020-07-13 10:59:39 +03:00
# define AAC_DEBUG_PREAMBLE KERN_INFO
# define AAC_DEBUG_POSTAMBLE
2005-04-17 02:20:36 +04:00
/**
* ioctl_send_fib - send a FIB from userspace
* @ dev : adapter is being processed
* @ arg : arguments to the ioctl call
2008-01-16 18:39:06 +03:00
*
2005-04-17 02:20:36 +04:00
* This routine sends a fib to the adapter on behalf of a user level
* program .
*/
static int ioctl_send_fib ( struct aac_dev * dev , void __user * arg )
{
struct hw_fib * kfib ;
struct fib * fibptr ;
2005-05-17 05:28:42 +04:00
struct hw_fib * hw_fib = ( struct hw_fib * ) 0 ;
dma_addr_t hw_fib_pa = ( dma_addr_t ) 0LL ;
2016-08-05 22:44:10 +03:00
unsigned int size , osize ;
2005-05-17 05:28:42 +04:00
int retval ;
2005-04-17 02:20:36 +04:00
2007-03-15 20:27:21 +03:00
if ( dev - > in_reset ) {
return - EBUSY ;
}
2006-02-01 20:30:55 +03:00
fibptr = aac_fib_alloc ( dev ) ;
2005-05-17 05:28:42 +04:00
if ( fibptr = = NULL ) {
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
2005-05-17 05:28:42 +04:00
}
2008-01-16 18:39:06 +03:00
2007-03-15 20:26:22 +03:00
kfib = fibptr - > hw_fib_va ;
2005-04-17 02:20:36 +04:00
/*
* First copy in the header so that we can check the size field .
*/
if ( copy_from_user ( ( void * ) kfib , arg , sizeof ( struct aac_fibhdr ) ) ) {
2006-02-01 20:30:55 +03:00
aac_fib_free ( fibptr ) ;
2005-04-17 02:20:36 +04:00
return - EFAULT ;
}
/*
* Since we copy based on the fib header size , make sure that we
* will not overrun the buffer when we copy the memory . Return
* an error if we would .
*/
2016-08-05 22:44:10 +03:00
osize = size = le16_to_cpu ( kfib - > header . Size ) +
sizeof ( struct aac_fibhdr ) ;
2005-05-17 05:28:42 +04:00
if ( size < le16_to_cpu ( kfib - > header . SenderSize ) )
size = le16_to_cpu ( kfib - > header . SenderSize ) ;
if ( size > dev - > max_fib_size ) {
2008-10-23 12:36:08 +04:00
dma_addr_t daddr ;
2006-01-11 20:28:07 +03:00
if ( size > 2048 ) {
retval = - EINVAL ;
goto cleanup ;
}
2008-10-23 12:36:08 +04:00
2017-04-05 13:44:16 +03:00
kfib = dma_alloc_coherent ( & dev - > pdev - > dev , size , & daddr ,
GFP_KERNEL ) ;
2008-10-23 12:36:08 +04:00
if ( ! kfib ) {
retval = - ENOMEM ;
goto cleanup ;
}
2005-05-17 05:28:42 +04:00
/* Highjack the hw_fib */
2007-03-15 20:26:22 +03:00
hw_fib = fibptr - > hw_fib_va ;
2005-05-17 05:28:42 +04:00
hw_fib_pa = fibptr - > hw_fib_pa ;
2008-10-23 12:36:08 +04:00
fibptr - > hw_fib_va = kfib ;
fibptr - > hw_fib_pa = daddr ;
2005-05-17 05:28:42 +04:00
memset ( ( ( char * ) kfib ) + dev - > max_fib_size , 0 , size - dev - > max_fib_size ) ;
memcpy ( kfib , hw_fib , dev - > max_fib_size ) ;
2005-04-17 02:20:36 +04:00
}
2005-05-17 05:28:42 +04:00
if ( copy_from_user ( kfib , arg , size ) ) {
retval = - EFAULT ;
goto cleanup ;
2005-04-17 02:20:36 +04:00
}
2016-08-05 22:44:10 +03:00
/* Sanity check the second copy */
if ( ( osize ! = le16_to_cpu ( kfib - > header . Size ) +
sizeof ( struct aac_fibhdr ) )
| | ( size < le16_to_cpu ( kfib - > header . SenderSize ) ) ) {
retval = - EINVAL ;
goto cleanup ;
}
2005-04-27 17:05:51 +04:00
if ( kfib - > header . Command = = cpu_to_le16 ( TakeABreakPt ) ) {
2005-04-17 02:20:36 +04:00
aac_adapter_interrupt ( dev ) ;
/*
2008-01-16 18:39:06 +03:00
* Since we didn ' t really send a fib , zero out the state to allow
2005-04-17 02:20:36 +04:00
* cleanup code not to assert .
*/
kfib - > header . XferState = 0 ;
} else {
2006-02-01 20:30:55 +03:00
retval = aac_fib_send ( le16_to_cpu ( kfib - > header . Command ) , fibptr ,
2005-04-17 02:20:36 +04:00
le16_to_cpu ( kfib - > header . Size ) , FsaNormal ,
1 , 1 , NULL , NULL ) ;
if ( retval ) {
2005-05-17 05:28:42 +04:00
goto cleanup ;
2005-04-17 02:20:36 +04:00
}
2006-02-01 20:30:55 +03:00
if ( aac_fib_complete ( fibptr ) ! = 0 ) {
2005-05-17 05:28:42 +04:00
retval = - EINVAL ;
goto cleanup ;
2005-04-17 02:20:36 +04:00
}
}
/*
* Make sure that the size returned by the adapter ( which includes
* the header ) is less than or equal to the size of a fib , so we
* don ' t corrupt application data . Then copy that size to the user
* buffer . ( Don ' t try to add the header information again , since it
* was already included by the adapter . )
*/
2005-05-17 05:28:42 +04:00
retval = 0 ;
if ( copy_to_user ( arg , ( void * ) kfib , size ) )
retval = - EFAULT ;
cleanup :
if ( hw_fib ) {
2017-04-05 13:44:16 +03:00
dma_free_coherent ( & dev - > pdev - > dev , size , kfib ,
fibptr - > hw_fib_pa ) ;
2005-05-17 05:28:42 +04:00
fibptr - > hw_fib_pa = hw_fib_pa ;
2007-03-15 20:26:22 +03:00
fibptr - > hw_fib_va = hw_fib ;
2005-04-17 02:20:36 +04:00
}
2009-12-21 16:09:27 +03:00
if ( retval ! = - ERESTARTSYS )
2006-08-03 19:02:24 +04:00
aac_fib_free ( fibptr ) ;
2005-05-17 05:28:42 +04:00
return retval ;
2005-04-17 02:20:36 +04:00
}
/**
* open_getadapter_fib - Get the next fib
2020-07-13 10:59:39 +03:00
* @ dev : adapter is being processed
* @ arg : arguments to the open call
2005-04-17 02:20:36 +04:00
*
* This routine will get the next Fib , if available , from the AdapterFibContext
* passed in from the user .
*/
static int open_getadapter_fib ( struct aac_dev * dev , void __user * arg )
{
struct aac_fib_context * fibctx ;
int status ;
fibctx = kmalloc ( sizeof ( struct aac_fib_context ) , GFP_KERNEL ) ;
if ( fibctx = = NULL ) {
status = - ENOMEM ;
} else {
unsigned long flags ;
struct list_head * entry ;
struct aac_fib_context * context ;
fibctx - > type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT ;
fibctx - > size = sizeof ( struct aac_fib_context ) ;
2008-01-16 18:39:06 +03:00
/*
2005-04-17 02:20:36 +04:00
* Yes yes , I know this could be an index , but we have a
* better guarantee of uniqueness for the locked loop below .
* Without the aid of a persistent history , this also helps
* reduce the chance that the opaque context would be reused .
*/
fibctx - > unique = ( u32 ) ( ( ulong ) fibctx & 0xFFFFFFFF ) ;
/*
* Initialize the mutex used to wait for the next AIF .
*/
2018-12-11 00:32:40 +03:00
init_completion ( & fibctx - > completion ) ;
2005-04-17 02:20:36 +04:00
fibctx - > wait = 0 ;
/*
* Initialize the fibs and set the count of fibs on
* the list to 0.
*/
fibctx - > count = 0 ;
INIT_LIST_HEAD ( & fibctx - > fib_list ) ;
fibctx - > jiffies = jiffies / HZ ;
/*
2008-01-16 18:39:06 +03:00
* Now add this context onto the adapter ' s
2005-04-17 02:20:36 +04:00
* AdapterFibContext list .
*/
spin_lock_irqsave ( & dev - > fib_lock , flags ) ;
/* Ensure that we have a unique identifier */
entry = dev - > fib_list . next ;
while ( entry ! = & dev - > fib_list ) {
context = list_entry ( entry , struct aac_fib_context , next ) ;
if ( context - > unique = = fibctx - > unique ) {
/* Not unique (32 bits) */
fibctx - > unique + + ;
entry = dev - > fib_list . next ;
} else {
entry = entry - > next ;
}
}
list_add_tail ( & fibctx - > next , & dev - > fib_list ) ;
spin_unlock_irqrestore ( & dev - > fib_lock , flags ) ;
2008-01-16 18:39:06 +03:00
if ( copy_to_user ( arg , & fibctx - > unique ,
2005-04-17 02:20:36 +04:00
sizeof ( fibctx - > unique ) ) ) {
status = - EFAULT ;
} else {
status = 0 ;
2008-01-16 18:39:06 +03:00
}
2005-04-17 02:20:36 +04:00
}
return status ;
}
2020-10-30 19:44:19 +03:00
struct compat_fib_ioctl {
u32 fibctx ;
s32 wait ;
compat_uptr_t fib ;
} ;
2005-04-17 02:20:36 +04:00
/**
* next_getadapter_fib - get the next fib
* @ dev : adapter to use
* @ arg : ioctl argument
2008-01-16 18:39:06 +03:00
*
* This routine will get the next Fib , if available , from the AdapterFibContext
2005-04-17 02:20:36 +04:00
* passed in from the user .
*/
static int next_getadapter_fib ( struct aac_dev * dev , void __user * arg )
{
struct fib_ioctl f ;
struct fib * fib ;
struct aac_fib_context * fibctx ;
int status ;
struct list_head * entry ;
unsigned long flags ;
2008-01-16 18:39:06 +03:00
2020-10-30 19:44:19 +03:00
if ( in_compat_syscall ( ) ) {
struct compat_fib_ioctl cf ;
if ( copy_from_user ( & cf , arg , sizeof ( struct compat_fib_ioctl ) ) )
return - EFAULT ;
f . fibctx = cf . fibctx ;
f . wait = cf . wait ;
f . fib = compat_ptr ( cf . fib ) ;
} else {
if ( copy_from_user ( & f , arg , sizeof ( struct fib_ioctl ) ) )
return - EFAULT ;
}
2005-04-17 02:20:36 +04:00
/*
* Verify that the HANDLE passed in was a valid AdapterFibContext
*
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
2008-01-28 23:16:52 +03:00
spin_lock_irqsave ( & dev - > fib_lock , flags ) ;
2005-04-17 02:20:36 +04:00
entry = dev - > fib_list . next ;
fibctx = NULL ;
while ( entry ! = & dev - > fib_list ) {
fibctx = list_entry ( entry , struct aac_fib_context , next ) ;
/*
* Extract the AdapterFibContext from the Input parameters .
*/
2008-01-28 23:16:52 +03:00
if ( fibctx - > unique = = f . fibctx ) { /* We found a winner */
2005-04-17 02:20:36 +04:00
break ;
}
entry = entry - > next ;
fibctx = NULL ;
}
if ( ! fibctx ) {
2008-01-28 23:16:52 +03:00
spin_unlock_irqrestore ( & dev - > fib_lock , flags ) ;
2005-04-17 02:20:36 +04:00
dprintk ( ( KERN_INFO " Fib Context not found \n " ) ) ;
return - EINVAL ;
}
if ( ( fibctx - > type ! = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT ) | |
( fibctx - > size ! = sizeof ( struct aac_fib_context ) ) ) {
2008-01-28 23:16:52 +03:00
spin_unlock_irqrestore ( & dev - > fib_lock , flags ) ;
2005-04-17 02:20:36 +04:00
dprintk ( ( KERN_INFO " Fib Context corrupt? \n " ) ) ;
return - EINVAL ;
}
status = 0 ;
/*
* If there are no fibs to send back , then either wait or return
* - EAGAIN
*/
return_fib :
if ( ! list_empty ( & fibctx - > fib_list ) ) {
/*
* Pull the next fib from the fibs
*/
entry = fibctx - > fib_list . next ;
list_del ( entry ) ;
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
fib = list_entry ( entry , struct fib , fiblink ) ;
fibctx - > count - - ;
spin_unlock_irqrestore ( & dev - > fib_lock , flags ) ;
2007-03-15 20:26:22 +03:00
if ( copy_to_user ( f . fib , fib - > hw_fib_va , sizeof ( struct hw_fib ) ) ) {
kfree ( fib - > hw_fib_va ) ;
2005-04-17 02:20:36 +04:00
kfree ( fib ) ;
return - EFAULT ;
2008-01-16 18:39:06 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Free the space occupied by this copy of the fib .
*/
2007-03-15 20:26:22 +03:00
kfree ( fib - > hw_fib_va ) ;
2005-04-17 02:20:36 +04:00
kfree ( fib ) ;
status = 0 ;
} else {
spin_unlock_irqrestore ( & dev - > fib_lock , flags ) ;
2006-03-27 21:44:26 +04:00
/* If someone killed the AIF aacraid thread, restart it */
status = ! dev - > aif_thread ;
2006-08-03 19:03:30 +04:00
if ( status & & ! dev - > in_reset & & dev - > queues & & dev - > fsa_dev ) {
2006-03-27 21:44:26 +04:00
/* Be paranoid, be very paranoid! */
kthread_stop ( dev - > thread ) ;
ssleep ( 1 ) ;
dev - > aif_thread = 0 ;
2013-07-04 02:04:58 +04:00
dev - > thread = kthread_run ( aac_command_thread , dev ,
" %s " , dev - > name ) ;
2006-03-27 21:44:26 +04:00
ssleep ( 1 ) ;
}
2005-04-17 02:20:36 +04:00
if ( f . wait ) {
2018-12-11 00:32:40 +03:00
if ( wait_for_completion_interruptible ( & fibctx - > completion ) < 0 ) {
2009-12-21 16:09:27 +03:00
status = - ERESTARTSYS ;
2005-04-17 02:20:36 +04:00
} else {
/* Lock again and retry */
spin_lock_irqsave ( & dev - > fib_lock , flags ) ;
goto return_fib ;
}
} else {
status = - EAGAIN ;
2008-01-16 18:39:06 +03:00
}
2005-04-17 02:20:36 +04:00
}
2005-08-04 02:39:25 +04:00
fibctx - > jiffies = jiffies / HZ ;
2005-04-17 02:20:36 +04:00
return status ;
}
int aac_close_fib_context ( struct aac_dev * dev , struct aac_fib_context * fibctx )
{
struct fib * fib ;
/*
* First free any FIBs that have not been consumed .
*/
while ( ! list_empty ( & fibctx - > fib_list ) ) {
struct list_head * entry ;
/*
* Pull the next fib from the fibs
*/
entry = fibctx - > fib_list . next ;
list_del ( entry ) ;
fib = list_entry ( entry , struct fib , fiblink ) ;
fibctx - > count - - ;
/*
* Free the space occupied by this copy of the fib .
*/
2007-03-15 20:26:22 +03:00
kfree ( fib - > hw_fib_va ) ;
2005-04-17 02:20:36 +04:00
kfree ( fib ) ;
}
/*
* Remove the Context from the AdapterFibContext List
*/
list_del ( & fibctx - > next ) ;
/*
* Invalidate context
*/
fibctx - > type = 0 ;
/*
* Free the space occupied by the Context
*/
kfree ( fibctx ) ;
return 0 ;
}
/**
* close_getadapter_fib - close down user fib context
* @ dev : adapter
* @ arg : ioctl arguments
*
* This routine will close down the fibctx passed in from the user .
*/
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
static int close_getadapter_fib ( struct aac_dev * dev , void __user * arg )
{
struct aac_fib_context * fibctx ;
int status ;
unsigned long flags ;
struct list_head * entry ;
/*
* Verify that the HANDLE passed in was a valid AdapterFibContext
*
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
entry = dev - > fib_list . next ;
fibctx = NULL ;
while ( entry ! = & dev - > fib_list ) {
fibctx = list_entry ( entry , struct aac_fib_context , next ) ;
/*
* Extract the fibctx from the input parameters
*/
2007-10-29 08:11:28 +03:00
if ( fibctx - > unique = = ( u32 ) ( uintptr_t ) arg ) /* We found a winner */
2005-04-17 02:20:36 +04:00
break ;
entry = entry - > next ;
fibctx = NULL ;
}
if ( ! fibctx )
return 0 ; /* Already gone */
if ( ( fibctx - > type ! = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT ) | |
( fibctx - > size ! = sizeof ( struct aac_fib_context ) ) )
return - EINVAL ;
spin_lock_irqsave ( & dev - > fib_lock , flags ) ;
status = aac_close_fib_context ( dev , fibctx ) ;
spin_unlock_irqrestore ( & dev - > fib_lock , flags ) ;
return status ;
}
/**
* check_revision - close down user fib context
* @ dev : adapter
* @ arg : ioctl arguments
*
* This routine returns the driver version .
2008-01-28 23:16:52 +03:00
* Under Linux , there have been no version incompatibilities , so this is
* simple !
2005-04-17 02:20:36 +04:00
*/
static int check_revision ( struct aac_dev * dev , void __user * arg )
{
struct revision response ;
2005-08-04 02:38:55 +04:00
char * driver_version = aac_driver_version ;
u32 version ;
2005-10-24 21:52:02 +04:00
response . compat = 1 ;
2008-01-16 18:39:06 +03:00
version = ( simple_strtol ( driver_version ,
2005-08-04 02:38:55 +04:00
& driver_version , 10 ) < < 24 ) | 0x00000400 ;
version + = simple_strtol ( driver_version + 1 , & driver_version , 10 ) < < 16 ;
version + = simple_strtol ( driver_version + 1 , NULL , 10 ) ;
response . version = cpu_to_le32 ( version ) ;
2008-01-16 18:39:06 +03:00
# ifdef AAC_DRIVER_BUILD
2005-08-04 02:38:55 +04:00
response . build = cpu_to_le32 ( AAC_DRIVER_BUILD ) ;
# else
response . build = cpu_to_le32 ( 9999 ) ;
# endif
2005-04-17 02:20:36 +04:00
if ( copy_to_user ( arg , & response , sizeof ( response ) ) )
return - EFAULT ;
return 0 ;
}
2005-05-17 05:28:42 +04:00
2005-04-17 02:20:36 +04:00
/**
2021-03-03 17:46:10 +03:00
* aac_send_raw_srb ( )
2020-07-13 10:59:39 +03:00
* @ dev : adapter is being processed
* @ arg : arguments to the send call
2005-04-17 02:20:36 +04:00
*/
2005-04-26 06:45:58 +04:00
static int aac_send_raw_srb ( struct aac_dev * dev , void __user * arg )
2005-04-17 02:20:36 +04:00
{
struct fib * srbfib ;
int status ;
2005-04-27 17:05:51 +04:00
struct aac_srb * srbcmd = NULL ;
2017-02-03 02:53:29 +03:00
struct aac_hba_cmd_req * hbacmd = NULL ;
2005-04-27 17:05:51 +04:00
struct user_aac_srb * user_srbcmd = NULL ;
struct user_aac_srb __user * user_srb = arg ;
2005-04-17 02:20:36 +04:00
struct aac_srb_reply __user * user_reply ;
2017-02-03 02:53:29 +03:00
u32 chn ;
2005-04-17 02:20:36 +04:00
u32 fibsize = 0 ;
u32 flags = 0 ;
s32 rcode = 0 ;
u32 data_dir ;
2017-02-03 02:53:29 +03:00
void __user * sg_user [ HBA_MAX_SG_EMBEDDED ] ;
void * sg_list [ HBA_MAX_SG_EMBEDDED ] ;
u32 sg_count [ HBA_MAX_SG_EMBEDDED ] ;
2008-01-28 23:16:52 +03:00
u32 sg_indx = 0 ;
2005-04-17 02:20:36 +04:00
u32 byte_count = 0 ;
2007-03-15 20:27:32 +03:00
u32 actual_fibsize64 , actual_fibsize = 0 ;
2005-04-17 02:20:36 +04:00
int i ;
2017-02-03 02:53:29 +03:00
int is_native_device ;
u64 address ;
2005-04-17 02:20:36 +04:00
2007-03-15 20:27:21 +03:00
if ( dev - > in_reset ) {
dprintk ( ( KERN_DEBUG " aacraid: send raw srb -EBUSY \n " ) ) ;
return - EBUSY ;
}
2005-04-17 02:20:36 +04:00
if ( ! capable ( CAP_SYS_ADMIN ) ) {
2008-01-16 18:39:06 +03:00
dprintk ( ( KERN_DEBUG " aacraid: No permission to send raw srb \n " ) ) ;
2005-04-17 02:20:36 +04:00
return - EPERM ;
}
/*
2007-03-15 20:27:32 +03:00
* Allocate and initialize a Fib then setup a SRB command
2005-04-17 02:20:36 +04:00
*/
2006-02-01 20:30:55 +03:00
if ( ! ( srbfib = aac_fib_alloc ( dev ) ) ) {
2005-06-18 00:38:04 +04:00
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
}
2005-05-17 05:28:42 +04:00
memset ( sg_list , 0 , sizeof ( sg_list ) ) ; /* cleanup may take issue */
2005-04-17 02:20:36 +04:00
if ( copy_from_user ( & fibsize , & user_srb - > count , sizeof ( u32 ) ) ) {
2008-01-16 18:39:06 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not copy data size from user \n " ) ) ;
2005-04-17 02:20:36 +04:00
rcode = - EFAULT ;
goto cleanup ;
}
2013-10-31 12:31:02 +04:00
if ( ( fibsize < ( sizeof ( struct user_aac_srb ) - sizeof ( struct user_sgentry ) ) ) | |
( fibsize > ( dev - > max_fib_size - sizeof ( struct aac_fibhdr ) ) ) ) {
2005-04-17 02:20:36 +04:00
rcode = - EINVAL ;
goto cleanup ;
}
2020-04-26 05:42:44 +03:00
user_srbcmd = memdup_user ( user_srb , fibsize ) ;
if ( IS_ERR ( user_srbcmd ) ) {
rcode = PTR_ERR ( user_srbcmd ) ;
2020-05-13 12:37:03 +03:00
user_srbcmd = NULL ;
2005-04-17 02:20:36 +04:00
goto cleanup ;
}
2005-04-27 17:05:51 +04:00
flags = user_srbcmd - > flags ; /* from user in cpu order */
switch ( flags & ( SRB_DataIn | SRB_DataOut ) ) {
2005-04-17 02:20:36 +04:00
case SRB_DataOut :
data_dir = DMA_TO_DEVICE ;
break ;
case ( SRB_DataIn | SRB_DataOut ) :
data_dir = DMA_BIDIRECTIONAL ;
break ;
case SRB_DataIn :
data_dir = DMA_FROM_DEVICE ;
break ;
default :
data_dir = DMA_NONE ;
}
2006-06-09 09:23:48 +04:00
if ( user_srbcmd - > sg . count > ARRAY_SIZE ( sg_list ) ) {
2005-05-17 05:28:42 +04:00
dprintk ( ( KERN_DEBUG " aacraid: too many sg entries %d \n " ,
2017-02-03 02:53:29 +03:00
user_srbcmd - > sg . count ) ) ;
rcode = - EINVAL ;
goto cleanup ;
}
if ( ( data_dir = = DMA_NONE ) & & user_srbcmd - > sg . count ) {
dprintk ( ( KERN_DEBUG " aacraid:SG with no direction specified \n " ) ) ;
2005-05-17 05:28:42 +04:00
rcode = - EINVAL ;
goto cleanup ;
}
2007-03-15 20:27:32 +03:00
actual_fibsize = sizeof ( struct aac_srb ) - sizeof ( struct sgentry ) +
( ( user_srbcmd - > sg . count & 0xff ) * sizeof ( struct sgentry ) ) ;
actual_fibsize64 = actual_fibsize + ( user_srbcmd - > sg . count & 0xff ) *
( sizeof ( struct sgentry64 ) - sizeof ( struct sgentry ) ) ;
/* User made a mistake - should not continue */
if ( ( actual_fibsize ! = fibsize ) & & ( actual_fibsize64 ! = fibsize ) ) {
dprintk ( ( KERN_DEBUG " aacraid: Bad Size specified in "
" Raw SRB command calculated fibsize=%lu;%lu "
" user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
" issued fibsize=%d \n " ,
actual_fibsize , actual_fibsize64 , user_srbcmd - > sg . count ,
sizeof ( struct aac_srb ) , sizeof ( struct sgentry ) ,
sizeof ( struct sgentry64 ) , fibsize ) ) ;
rcode = - EINVAL ;
goto cleanup ;
}
2017-02-03 02:53:29 +03:00
2017-02-16 23:51:11 +03:00
chn = user_srbcmd - > channel ;
2017-02-03 02:53:29 +03:00
if ( chn < AAC_MAX_BUSES & & user_srbcmd - > id < AAC_MAX_TARGETS & &
dev - > hba_map [ chn ] [ user_srbcmd - > id ] . devtype = =
AAC_DEVTYPE_NATIVE_RAW ) {
is_native_device = 1 ;
hbacmd = ( struct aac_hba_cmd_req * ) srbfib - > hw_fib_va ;
memset ( hbacmd , 0 , 96 ) ; /* sizeof(*hbacmd) is not necessary */
/* iu_type is a parameter of aac_hba_send */
switch ( data_dir ) {
case DMA_TO_DEVICE :
hbacmd - > byte1 = 2 ;
break ;
case DMA_FROM_DEVICE :
case DMA_BIDIRECTIONAL :
hbacmd - > byte1 = 1 ;
break ;
case DMA_NONE :
default :
break ;
}
hbacmd - > lun [ 1 ] = cpu_to_le32 ( user_srbcmd - > lun ) ;
hbacmd - > it_nexus = dev - > hba_map [ chn ] [ user_srbcmd - > id ] . rmw_nexus ;
/*
* we fill in reply_qid later in aac_src_deliver_message
* we fill in iu_type , request_id later in aac_hba_send
* we fill in emb_data_desc_count , data_length later
* in sg list build
*/
memcpy ( hbacmd - > cdb , user_srbcmd - > cdb , sizeof ( hbacmd - > cdb ) ) ;
address = ( u64 ) srbfib - > hw_error_pa ;
hbacmd - > error_ptr_hi = cpu_to_le32 ( ( u32 ) ( address > > 32 ) ) ;
hbacmd - > error_ptr_lo = cpu_to_le32 ( ( u32 ) ( address & 0xffffffff ) ) ;
hbacmd - > error_length = cpu_to_le32 ( FW_ERROR_BUFFER_SIZE ) ;
hbacmd - > emb_data_desc_count =
cpu_to_le32 ( user_srbcmd - > sg . count ) ;
srbfib - > hbacmd_size = 64 +
user_srbcmd - > sg . count * sizeof ( struct aac_hba_sgl ) ;
} else {
is_native_device = 0 ;
aac_fib_init ( srbfib ) ;
/* raw_srb FIB is not FastResponseCapable */
srbfib - > hw_fib_va - > header . XferState & =
~ cpu_to_le32 ( FastResponseCapable ) ;
srbcmd = ( struct aac_srb * ) fib_data ( srbfib ) ;
// Fix up srb for endian and force some values
srbcmd - > function = cpu_to_le32 ( SRBF_ExecuteScsi ) ; // Force this
srbcmd - > channel = cpu_to_le32 ( user_srbcmd - > channel ) ;
srbcmd - > id = cpu_to_le32 ( user_srbcmd - > id ) ;
srbcmd - > lun = cpu_to_le32 ( user_srbcmd - > lun ) ;
srbcmd - > timeout = cpu_to_le32 ( user_srbcmd - > timeout ) ;
srbcmd - > flags = cpu_to_le32 ( flags ) ;
srbcmd - > retry_limit = 0 ; // Obsolete parameter
srbcmd - > cdb_size = cpu_to_le32 ( user_srbcmd - > cdb_size ) ;
memcpy ( srbcmd - > cdb , user_srbcmd - > cdb , sizeof ( srbcmd - > cdb ) ) ;
2007-03-15 20:27:32 +03:00
}
2017-02-03 02:53:29 +03:00
2007-03-15 20:27:32 +03:00
byte_count = 0 ;
2017-02-03 02:53:29 +03:00
if ( is_native_device ) {
struct user_sgmap * usg32 = & user_srbcmd - > sg ;
struct user_sgmap64 * usg64 =
( struct user_sgmap64 * ) & user_srbcmd - > sg ;
for ( i = 0 ; i < usg32 - > count ; i + + ) {
void * p ;
u64 addr ;
sg_count [ i ] = ( actual_fibsize64 = = fibsize ) ?
usg64 - > sg [ i ] . count : usg32 - > sg [ i ] . count ;
if ( sg_count [ i ] >
( dev - > scsi_host_ptr - > max_sectors < < 9 ) ) {
pr_err ( " aacraid: upsg->sg[%d].count=%u>%u \n " ,
i , sg_count [ i ] ,
dev - > scsi_host_ptr - > max_sectors < < 9 ) ;
rcode = - EINVAL ;
goto cleanup ;
}
2017-05-10 19:39:35 +03:00
p = kmalloc ( sg_count [ i ] , GFP_KERNEL ) ;
2017-02-03 02:53:29 +03:00
if ( ! p ) {
rcode = - ENOMEM ;
goto cleanup ;
}
if ( actual_fibsize64 = = fibsize ) {
addr = ( u64 ) usg64 - > sg [ i ] . addr [ 0 ] ;
addr + = ( ( u64 ) usg64 - > sg [ i ] . addr [ 1 ] ) < < 32 ;
} else {
addr = ( u64 ) usg32 - > sg [ i ] . addr ;
}
sg_user [ i ] = ( void __user * ) ( uintptr_t ) addr ;
sg_list [ i ] = p ; // save so we can clean up later
sg_indx = i ;
if ( flags & SRB_DataOut ) {
if ( copy_from_user ( p , sg_user [ i ] ,
sg_count [ i ] ) ) {
rcode = - EFAULT ;
goto cleanup ;
}
}
scsi: aacraid: Remove pci-dma-compat wrapper API
The legacy API wrappers in include/linux/pci-dma-compat.h should go away as
they create unnecessary midlayering for include/linux/dma-mapping.h API.
Instead use dma-mapping.h API directly.
The patch has been generated with the coccinelle script below.
Compile-tested.
@@@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@@@
- PCI_DMA_NONE
+ DMA_NONE
@@ expression E1, E2, E3; @@
- pci_alloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3; @@
- pci_zalloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3, E4; @@
- pci_free_consistent(E1, E2, E3, E4)
+ dma_free_coherent(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_single(E1, E2, E3, E4)
+ dma_map_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_single(E1, E2, E3, E4)
+ dma_unmap_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4, E5; @@
- pci_map_page(E1, E2, E3, E4, E5)
+ dma_map_page(&E1->dev, E2, E3, E4, E5)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_page(E1, E2, E3, E4)
+ dma_unmap_page(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_sg(E1, E2, E3, E4)
+ dma_map_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_sg(E1, E2, E3, E4)
+ dma_unmap_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_cpu(E1, E2, E3, E4)
+ dma_sync_single_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_device(E1, E2, E3, E4)
+ dma_sync_single_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_cpu(E1, E2, E3, E4)
+ dma_sync_sg_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_device(E1, E2, E3, E4)
+ dma_sync_sg_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2; @@
- pci_dma_mapping_error(E1, E2)
+ dma_mapping_error(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_consistent_dma_mask(E1, E2)
+ dma_set_coherent_mask(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_dma_mask(E1, E2)
+ dma_set_mask(&E1->dev, E2)
Link: https://lore.kernel.org/r/f8d4778440d55ba26c04eef0f7d63fb211a39443.1596045683.git.usuraj35@gmail.com
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-07-29 21:06:27 +03:00
addr = dma_map_single ( & dev - > pdev - > dev , p , sg_count [ i ] ,
data_dir ) ;
2017-02-03 02:53:29 +03:00
hbacmd - > sge [ i ] . addr_hi = cpu_to_le32 ( ( u32 ) ( addr > > 32 ) ) ;
hbacmd - > sge [ i ] . addr_lo = cpu_to_le32 (
( u32 ) ( addr & 0xffffffff ) ) ;
hbacmd - > sge [ i ] . len = cpu_to_le32 ( sg_count [ i ] ) ;
hbacmd - > sge [ i ] . flags = 0 ;
byte_count + = sg_count [ i ] ;
}
if ( usg32 - > count > 0 ) /* embedded sglist */
hbacmd - > sge [ usg32 - > count - 1 ] . flags =
cpu_to_le32 ( 0x40000000 ) ;
hbacmd - > data_length = cpu_to_le32 ( byte_count ) ;
status = aac_hba_send ( HBA_IU_TYPE_SCSI_CMD_REQ , srbfib ,
NULL , NULL ) ;
} else if ( dev - > adapter_info . options & AAC_OPT_SGMAP_HOST64 ) {
2005-04-27 17:05:51 +04:00
struct user_sgmap64 * upsg = ( struct user_sgmap64 * ) & user_srbcmd - > sg ;
2005-07-08 00:40:00 +04:00
struct sgmap64 * psg = ( struct sgmap64 * ) & srbcmd - > sg ;
2005-04-17 02:20:36 +04:00
/*
* This should also catch if user used the 32 bit sgmap
*/
2007-03-15 20:27:32 +03:00
if ( actual_fibsize64 = = fibsize ) {
actual_fibsize = actual_fibsize64 ;
for ( i = 0 ; i < upsg - > count ; i + + ) {
u64 addr ;
void * p ;
2017-02-03 02:53:29 +03:00
sg_count [ i ] = upsg - > sg [ i ] . count ;
if ( sg_count [ i ] >
2009-12-21 16:09:27 +03:00
( ( dev - > adapter_info . options &
2008-05-28 23:32:55 +04:00
AAC_OPT_NEW_COMM ) ?
( dev - > scsi_host_ptr - > max_sectors < < 9 ) :
2009-12-21 16:09:27 +03:00
65536 ) ) {
2008-05-28 23:32:55 +04:00
rcode = - EINVAL ;
goto cleanup ;
}
2017-05-10 19:39:35 +03:00
p = kmalloc ( sg_count [ i ] , GFP_KERNEL ) ;
2008-01-09 00:08:04 +03:00
if ( ! p ) {
2007-03-15 20:27:32 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d \n " ,
2017-02-03 02:53:29 +03:00
sg_count [ i ] , i , upsg - > count ) ) ;
2007-03-15 20:27:32 +03:00
rcode = - ENOMEM ;
goto cleanup ;
}
addr = ( u64 ) upsg - > sg [ i ] . addr [ 0 ] ;
addr + = ( ( u64 ) upsg - > sg [ i ] . addr [ 1 ] ) < < 32 ;
2007-10-29 08:11:28 +03:00
sg_user [ i ] = ( void __user * ) ( uintptr_t ) addr ;
2007-03-15 20:27:32 +03:00
sg_list [ i ] = p ; // save so we can clean up later
sg_indx = i ;
2008-01-16 18:39:06 +03:00
if ( flags & SRB_DataOut ) {
2017-02-03 02:53:29 +03:00
if ( copy_from_user ( p , sg_user [ i ] ,
sg_count [ i ] ) ) {
2007-03-15 20:27:32 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not copy sg data from user \n " ) ) ;
rcode = - EFAULT ;
goto cleanup ;
}
}
scsi: aacraid: Remove pci-dma-compat wrapper API
The legacy API wrappers in include/linux/pci-dma-compat.h should go away as
they create unnecessary midlayering for include/linux/dma-mapping.h API.
Instead use dma-mapping.h API directly.
The patch has been generated with the coccinelle script below.
Compile-tested.
@@@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@@@
- PCI_DMA_NONE
+ DMA_NONE
@@ expression E1, E2, E3; @@
- pci_alloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3; @@
- pci_zalloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3, E4; @@
- pci_free_consistent(E1, E2, E3, E4)
+ dma_free_coherent(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_single(E1, E2, E3, E4)
+ dma_map_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_single(E1, E2, E3, E4)
+ dma_unmap_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4, E5; @@
- pci_map_page(E1, E2, E3, E4, E5)
+ dma_map_page(&E1->dev, E2, E3, E4, E5)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_page(E1, E2, E3, E4)
+ dma_unmap_page(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_sg(E1, E2, E3, E4)
+ dma_map_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_sg(E1, E2, E3, E4)
+ dma_unmap_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_cpu(E1, E2, E3, E4)
+ dma_sync_single_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_device(E1, E2, E3, E4)
+ dma_sync_single_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_cpu(E1, E2, E3, E4)
+ dma_sync_sg_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_device(E1, E2, E3, E4)
+ dma_sync_sg_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2; @@
- pci_dma_mapping_error(E1, E2)
+ dma_mapping_error(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_consistent_dma_mask(E1, E2)
+ dma_set_coherent_mask(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_dma_mask(E1, E2)
+ dma_set_mask(&E1->dev, E2)
Link: https://lore.kernel.org/r/f8d4778440d55ba26c04eef0f7d63fb211a39443.1596045683.git.usuraj35@gmail.com
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-07-29 21:06:27 +03:00
addr = dma_map_single ( & dev - > pdev - > dev , p ,
sg_count [ i ] , data_dir ) ;
2005-04-17 02:20:36 +04:00
2007-03-15 20:27:32 +03:00
psg - > sg [ i ] . addr [ 0 ] = cpu_to_le32 ( addr & 0xffffffff ) ;
psg - > sg [ i ] . addr [ 1 ] = cpu_to_le32 ( addr > > 32 ) ;
2017-02-03 02:53:29 +03:00
byte_count + = sg_count [ i ] ;
psg - > sg [ i ] . count = cpu_to_le32 ( sg_count [ i ] ) ;
2007-03-15 20:27:32 +03:00
}
} else {
struct user_sgmap * usg ;
2016-05-19 17:08:33 +03:00
usg = kmemdup ( upsg ,
actual_fibsize - sizeof ( struct aac_srb )
+ sizeof ( struct sgmap ) , GFP_KERNEL ) ;
2007-03-15 20:27:32 +03:00
if ( ! usg ) {
dprintk ( ( KERN_DEBUG " aacraid: Allocation error in Raw SRB command \n " ) ) ;
2005-04-17 02:20:36 +04:00
rcode = - ENOMEM ;
goto cleanup ;
}
2007-03-15 20:27:32 +03:00
actual_fibsize = actual_fibsize64 ;
for ( i = 0 ; i < usg - > count ; i + + ) {
u64 addr ;
void * p ;
2017-02-03 02:53:29 +03:00
sg_count [ i ] = usg - > sg [ i ] . count ;
if ( sg_count [ i ] >
2009-12-21 16:09:27 +03:00
( ( dev - > adapter_info . options &
2008-05-28 23:32:55 +04:00
AAC_OPT_NEW_COMM ) ?
( dev - > scsi_host_ptr - > max_sectors < < 9 ) :
2009-12-21 16:09:27 +03:00
65536 ) ) {
2012-01-09 01:44:19 +04:00
kfree ( usg ) ;
2008-05-28 23:32:55 +04:00
rcode = - EINVAL ;
goto cleanup ;
}
2017-05-10 19:39:35 +03:00
p = kmalloc ( sg_count [ i ] , GFP_KERNEL ) ;
2008-01-09 00:08:04 +03:00
if ( ! p ) {
2010-05-15 13:46:12 +04:00
dprintk ( ( KERN_DEBUG " aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d \n " ,
2017-02-03 02:53:29 +03:00
sg_count [ i ] , i , usg - > count ) ) ;
2010-05-15 13:46:12 +04:00
kfree ( usg ) ;
2007-03-15 20:27:32 +03:00
rcode = - ENOMEM ;
2005-04-17 02:20:36 +04:00
goto cleanup ;
}
2007-10-29 08:11:28 +03:00
sg_user [ i ] = ( void __user * ) ( uintptr_t ) usg - > sg [ i ] . addr ;
2007-03-15 20:27:32 +03:00
sg_list [ i ] = p ; // save so we can clean up later
sg_indx = i ;
2008-01-16 18:39:06 +03:00
if ( flags & SRB_DataOut ) {
2017-02-03 02:53:29 +03:00
if ( copy_from_user ( p , sg_user [ i ] ,
sg_count [ i ] ) ) {
2007-03-15 20:27:32 +03:00
kfree ( usg ) ;
dprintk ( ( KERN_DEBUG " aacraid: Could not copy sg data from user \n " ) ) ;
rcode = - EFAULT ;
goto cleanup ;
}
}
scsi: aacraid: Remove pci-dma-compat wrapper API
The legacy API wrappers in include/linux/pci-dma-compat.h should go away as
they create unnecessary midlayering for include/linux/dma-mapping.h API.
Instead use dma-mapping.h API directly.
The patch has been generated with the coccinelle script below.
Compile-tested.
@@@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@@@
- PCI_DMA_NONE
+ DMA_NONE
@@ expression E1, E2, E3; @@
- pci_alloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3; @@
- pci_zalloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3, E4; @@
- pci_free_consistent(E1, E2, E3, E4)
+ dma_free_coherent(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_single(E1, E2, E3, E4)
+ dma_map_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_single(E1, E2, E3, E4)
+ dma_unmap_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4, E5; @@
- pci_map_page(E1, E2, E3, E4, E5)
+ dma_map_page(&E1->dev, E2, E3, E4, E5)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_page(E1, E2, E3, E4)
+ dma_unmap_page(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_sg(E1, E2, E3, E4)
+ dma_map_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_sg(E1, E2, E3, E4)
+ dma_unmap_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_cpu(E1, E2, E3, E4)
+ dma_sync_single_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_device(E1, E2, E3, E4)
+ dma_sync_single_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_cpu(E1, E2, E3, E4)
+ dma_sync_sg_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_device(E1, E2, E3, E4)
+ dma_sync_sg_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2; @@
- pci_dma_mapping_error(E1, E2)
+ dma_mapping_error(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_consistent_dma_mask(E1, E2)
+ dma_set_coherent_mask(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_dma_mask(E1, E2)
+ dma_set_mask(&E1->dev, E2)
Link: https://lore.kernel.org/r/f8d4778440d55ba26c04eef0f7d63fb211a39443.1596045683.git.usuraj35@gmail.com
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-07-29 21:06:27 +03:00
addr = dma_map_single ( & dev - > pdev - > dev , p ,
sg_count [ i ] , data_dir ) ;
2005-04-17 02:20:36 +04:00
2007-03-15 20:27:32 +03:00
psg - > sg [ i ] . addr [ 0 ] = cpu_to_le32 ( addr & 0xffffffff ) ;
psg - > sg [ i ] . addr [ 1 ] = cpu_to_le32 ( addr > > 32 ) ;
2017-02-03 02:53:29 +03:00
byte_count + = sg_count [ i ] ;
psg - > sg [ i ] . count = cpu_to_le32 ( sg_count [ i ] ) ;
2007-03-15 20:27:32 +03:00
}
kfree ( usg ) ;
2005-04-17 02:20:36 +04:00
}
srbcmd - > count = cpu_to_le32 ( byte_count ) ;
2015-03-26 17:41:23 +03:00
if ( user_srbcmd - > sg . count )
psg - > count = cpu_to_le32 ( sg_indx + 1 ) ;
else
psg - > count = 0 ;
2006-02-01 20:30:55 +03:00
status = aac_fib_send ( ScsiPortCommand64 , srbfib , actual_fibsize , FsaNormal , 1 , 1 , NULL , NULL ) ;
2005-04-17 02:20:36 +04:00
} else {
2005-04-27 17:05:51 +04:00
struct user_sgmap * upsg = & user_srbcmd - > sg ;
2005-04-17 02:20:36 +04:00
struct sgmap * psg = & srbcmd - > sg ;
2007-03-15 20:27:32 +03:00
if ( actual_fibsize64 = = fibsize ) {
struct user_sgmap64 * usg = ( struct user_sgmap64 * ) upsg ;
for ( i = 0 ; i < upsg - > count ; i + + ) {
2007-10-29 08:11:28 +03:00
uintptr_t addr ;
2007-03-15 20:27:32 +03:00
void * p ;
2017-02-03 02:53:29 +03:00
sg_count [ i ] = usg - > sg [ i ] . count ;
if ( sg_count [ i ] >
2009-12-21 16:09:27 +03:00
( ( dev - > adapter_info . options &
2008-05-28 23:32:55 +04:00
AAC_OPT_NEW_COMM ) ?
( dev - > scsi_host_ptr - > max_sectors < < 9 ) :
2009-12-21 16:09:27 +03:00
65536 ) ) {
2008-05-28 23:32:55 +04:00
rcode = - EINVAL ;
goto cleanup ;
}
2018-05-30 12:09:59 +03:00
p = kmalloc ( sg_count [ i ] , GFP_KERNEL ) ;
2017-02-03 02:53:29 +03:00
if ( ! p ) {
2007-03-15 20:27:32 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d \n " ,
2017-02-03 02:53:29 +03:00
sg_count [ i ] , i , usg - > count ) ) ;
2007-03-15 20:27:32 +03:00
rcode = - ENOMEM ;
2005-04-17 02:20:36 +04:00
goto cleanup ;
}
2007-03-15 20:27:32 +03:00
addr = ( u64 ) usg - > sg [ i ] . addr [ 0 ] ;
addr + = ( ( u64 ) usg - > sg [ i ] . addr [ 1 ] ) < < 32 ;
2007-10-29 08:11:28 +03:00
sg_user [ i ] = ( void __user * ) addr ;
2007-03-15 20:27:32 +03:00
sg_list [ i ] = p ; // save so we can clean up later
sg_indx = i ;
2008-01-16 18:39:06 +03:00
if ( flags & SRB_DataOut ) {
2017-02-03 02:53:29 +03:00
if ( copy_from_user ( p , sg_user [ i ] ,
sg_count [ i ] ) ) {
2007-03-15 20:27:32 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not copy sg data from user \n " ) ) ;
rcode = - EFAULT ;
goto cleanup ;
}
}
scsi: aacraid: Remove pci-dma-compat wrapper API
The legacy API wrappers in include/linux/pci-dma-compat.h should go away as
they create unnecessary midlayering for include/linux/dma-mapping.h API.
Instead use dma-mapping.h API directly.
The patch has been generated with the coccinelle script below.
Compile-tested.
@@@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@@@
- PCI_DMA_NONE
+ DMA_NONE
@@ expression E1, E2, E3; @@
- pci_alloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3; @@
- pci_zalloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3, E4; @@
- pci_free_consistent(E1, E2, E3, E4)
+ dma_free_coherent(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_single(E1, E2, E3, E4)
+ dma_map_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_single(E1, E2, E3, E4)
+ dma_unmap_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4, E5; @@
- pci_map_page(E1, E2, E3, E4, E5)
+ dma_map_page(&E1->dev, E2, E3, E4, E5)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_page(E1, E2, E3, E4)
+ dma_unmap_page(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_sg(E1, E2, E3, E4)
+ dma_map_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_sg(E1, E2, E3, E4)
+ dma_unmap_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_cpu(E1, E2, E3, E4)
+ dma_sync_single_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_device(E1, E2, E3, E4)
+ dma_sync_single_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_cpu(E1, E2, E3, E4)
+ dma_sync_sg_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_device(E1, E2, E3, E4)
+ dma_sync_sg_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2; @@
- pci_dma_mapping_error(E1, E2)
+ dma_mapping_error(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_consistent_dma_mask(E1, E2)
+ dma_set_coherent_mask(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_dma_mask(E1, E2)
+ dma_set_mask(&E1->dev, E2)
Link: https://lore.kernel.org/r/f8d4778440d55ba26c04eef0f7d63fb211a39443.1596045683.git.usuraj35@gmail.com
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-07-29 21:06:27 +03:00
addr = dma_map_single ( & dev - > pdev - > dev , p ,
usg - > sg [ i ] . count ,
data_dir ) ;
2007-03-15 20:27:32 +03:00
psg - > sg [ i ] . addr = cpu_to_le32 ( addr & 0xffffffff ) ;
byte_count + = usg - > sg [ i ] . count ;
2017-02-03 02:53:29 +03:00
psg - > sg [ i ] . count = cpu_to_le32 ( sg_count [ i ] ) ;
2005-04-17 02:20:36 +04:00
}
2007-03-15 20:27:32 +03:00
} else {
for ( i = 0 ; i < upsg - > count ; i + + ) {
dma_addr_t addr ;
void * p ;
2017-02-03 02:53:29 +03:00
sg_count [ i ] = upsg - > sg [ i ] . count ;
if ( sg_count [ i ] >
2009-12-21 16:09:27 +03:00
( ( dev - > adapter_info . options &
2008-05-28 23:32:55 +04:00
AAC_OPT_NEW_COMM ) ?
( dev - > scsi_host_ptr - > max_sectors < < 9 ) :
2009-12-21 16:09:27 +03:00
65536 ) ) {
2008-05-28 23:32:55 +04:00
rcode = - EINVAL ;
goto cleanup ;
}
2018-05-30 12:09:59 +03:00
p = kmalloc ( sg_count [ i ] , GFP_KERNEL ) ;
2008-01-09 00:08:04 +03:00
if ( ! p ) {
2007-03-15 20:27:32 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d \n " ,
2017-02-03 02:53:29 +03:00
sg_count [ i ] , i , upsg - > count ) ) ;
2007-03-15 20:27:32 +03:00
rcode = - ENOMEM ;
goto cleanup ;
}
2007-10-29 08:11:28 +03:00
sg_user [ i ] = ( void __user * ) ( uintptr_t ) upsg - > sg [ i ] . addr ;
2007-03-15 20:27:32 +03:00
sg_list [ i ] = p ; // save so we can clean up later
sg_indx = i ;
2008-01-16 18:39:06 +03:00
if ( flags & SRB_DataOut ) {
2017-02-03 02:53:29 +03:00
if ( copy_from_user ( p , sg_user [ i ] ,
sg_count [ i ] ) ) {
2007-03-15 20:27:32 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not copy sg data from user \n " ) ) ;
rcode = - EFAULT ;
goto cleanup ;
}
}
scsi: aacraid: Remove pci-dma-compat wrapper API
The legacy API wrappers in include/linux/pci-dma-compat.h should go away as
they create unnecessary midlayering for include/linux/dma-mapping.h API.
Instead use dma-mapping.h API directly.
The patch has been generated with the coccinelle script below.
Compile-tested.
@@@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@@@
- PCI_DMA_NONE
+ DMA_NONE
@@ expression E1, E2, E3; @@
- pci_alloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3; @@
- pci_zalloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3, E4; @@
- pci_free_consistent(E1, E2, E3, E4)
+ dma_free_coherent(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_single(E1, E2, E3, E4)
+ dma_map_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_single(E1, E2, E3, E4)
+ dma_unmap_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4, E5; @@
- pci_map_page(E1, E2, E3, E4, E5)
+ dma_map_page(&E1->dev, E2, E3, E4, E5)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_page(E1, E2, E3, E4)
+ dma_unmap_page(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_sg(E1, E2, E3, E4)
+ dma_map_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_sg(E1, E2, E3, E4)
+ dma_unmap_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_cpu(E1, E2, E3, E4)
+ dma_sync_single_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_device(E1, E2, E3, E4)
+ dma_sync_single_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_cpu(E1, E2, E3, E4)
+ dma_sync_sg_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_device(E1, E2, E3, E4)
+ dma_sync_sg_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2; @@
- pci_dma_mapping_error(E1, E2)
+ dma_mapping_error(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_consistent_dma_mask(E1, E2)
+ dma_set_coherent_mask(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_dma_mask(E1, E2)
+ dma_set_mask(&E1->dev, E2)
Link: https://lore.kernel.org/r/f8d4778440d55ba26c04eef0f7d63fb211a39443.1596045683.git.usuraj35@gmail.com
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-07-29 21:06:27 +03:00
addr = dma_map_single ( & dev - > pdev - > dev , p ,
sg_count [ i ] , data_dir ) ;
2005-04-17 02:20:36 +04:00
2007-03-15 20:27:32 +03:00
psg - > sg [ i ] . addr = cpu_to_le32 ( addr ) ;
2017-02-03 02:53:29 +03:00
byte_count + = sg_count [ i ] ;
psg - > sg [ i ] . count = cpu_to_le32 ( sg_count [ i ] ) ;
2007-03-15 20:27:32 +03:00
}
2005-04-17 02:20:36 +04:00
}
srbcmd - > count = cpu_to_le32 ( byte_count ) ;
2015-03-26 17:41:23 +03:00
if ( user_srbcmd - > sg . count )
psg - > count = cpu_to_le32 ( sg_indx + 1 ) ;
else
psg - > count = 0 ;
2006-02-01 20:30:55 +03:00
status = aac_fib_send ( ScsiPortCommand , srbfib , actual_fibsize , FsaNormal , 1 , 1 , NULL , NULL ) ;
2005-04-17 02:20:36 +04:00
}
2017-02-03 02:53:29 +03:00
2009-12-21 16:09:27 +03:00
if ( status = = - ERESTARTSYS ) {
rcode = - ERESTARTSYS ;
2006-08-03 19:02:24 +04:00
goto cleanup ;
}
2005-04-17 02:20:36 +04:00
2017-02-03 02:53:29 +03:00
if ( status ! = 0 ) {
2008-01-16 18:39:06 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not send raw srb fib to hba \n " ) ) ;
2005-06-18 00:38:04 +04:00
rcode = - ENXIO ;
2005-04-17 02:20:36 +04:00
goto cleanup ;
}
2008-01-16 18:39:06 +03:00
if ( flags & SRB_DataIn ) {
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < = sg_indx ; i + + ) {
2017-02-03 02:53:29 +03:00
if ( copy_to_user ( sg_user [ i ] , sg_list [ i ] , sg_count [ i ] ) ) {
2008-01-16 18:39:06 +03:00
dprintk ( ( KERN_DEBUG " aacraid: Could not copy sg data to user \n " ) ) ;
2005-04-17 02:20:36 +04:00
rcode = - EFAULT ;
goto cleanup ;
}
}
}
2017-02-03 02:53:29 +03:00
user_reply = arg + fibsize ;
if ( is_native_device ) {
struct aac_hba_resp * err =
& ( ( struct aac_native_hba * ) srbfib - > hw_fib_va ) - > resp . err ;
struct aac_srb_reply reply ;
2017-06-23 17:04:22 +03:00
memset ( & reply , 0 , sizeof ( reply ) ) ;
2017-02-03 02:53:29 +03:00
reply . status = ST_OK ;
if ( srbfib - > flags & FIB_CONTEXT_FLAG_FASTRESP ) {
/* fast response */
reply . srb_status = SRB_STATUS_SUCCESS ;
reply . scsi_status = 0 ;
reply . data_xfer_length = byte_count ;
2017-05-15 17:56:05 +03:00
reply . sense_data_size = 0 ;
memset ( reply . sense_data , 0 , AAC_SENSE_BUFFERSIZE ) ;
2017-02-03 02:53:29 +03:00
} else {
reply . srb_status = err - > service_response ;
reply . scsi_status = err - > status ;
reply . data_xfer_length = byte_count -
le32_to_cpu ( err - > residual_count ) ;
reply . sense_data_size = err - > sense_response_data_len ;
memcpy ( reply . sense_data , err - > sense_response_buf ,
AAC_SENSE_BUFFERSIZE ) ;
}
if ( copy_to_user ( user_reply , & reply ,
sizeof ( struct aac_srb_reply ) ) ) {
dprintk ( ( KERN_DEBUG " aacraid: Copy to user failed \n " ) ) ;
rcode = - EFAULT ;
goto cleanup ;
}
} else {
struct aac_srb_reply * reply ;
reply = ( struct aac_srb_reply * ) fib_data ( srbfib ) ;
if ( copy_to_user ( user_reply , reply ,
sizeof ( struct aac_srb_reply ) ) ) {
dprintk ( ( KERN_DEBUG " aacraid: Copy to user failed \n " ) ) ;
rcode = - EFAULT ;
goto cleanup ;
}
2005-04-17 02:20:36 +04:00
}
cleanup :
2005-04-27 17:05:51 +04:00
kfree ( user_srbcmd ) ;
2009-12-21 16:09:27 +03:00
if ( rcode ! = - ERESTARTSYS ) {
2017-02-03 02:53:29 +03:00
for ( i = 0 ; i < = sg_indx ; i + + )
kfree ( sg_list [ i ] ) ;
2006-08-03 19:02:24 +04:00
aac_fib_complete ( srbfib ) ;
aac_fib_free ( srbfib ) ;
}
2005-04-17 02:20:36 +04:00
return rcode ;
}
struct aac_pci_info {
2008-01-16 18:39:06 +03:00
u32 bus ;
u32 slot ;
2005-04-17 02:20:36 +04:00
} ;
2005-04-26 06:45:58 +04:00
static int aac_get_pci_info ( struct aac_dev * dev , void __user * arg )
2005-04-17 02:20:36 +04:00
{
2008-01-16 18:39:06 +03:00
struct aac_pci_info pci_info ;
2005-04-17 02:20:36 +04:00
pci_info . bus = dev - > pdev - > bus - > number ;
pci_info . slot = PCI_SLOT ( dev - > pdev - > devfn ) ;
2008-01-28 23:16:52 +03:00
if ( copy_to_user ( arg , & pci_info , sizeof ( struct aac_pci_info ) ) ) {
dprintk ( ( KERN_DEBUG " aacraid: Could not copy pci info \n " ) ) ;
return - EFAULT ;
2005-04-17 02:20:36 +04:00
}
2008-01-16 18:39:06 +03:00
return 0 ;
2005-05-17 05:28:42 +04:00
}
2017-02-03 02:53:35 +03:00
static int aac_get_hba_info ( struct aac_dev * dev , void __user * arg )
{
struct aac_hba_info hbainfo ;
2017-06-23 17:04:22 +03:00
memset ( & hbainfo , 0 , sizeof ( hbainfo ) ) ;
2017-02-03 02:53:35 +03:00
hbainfo . adapter_number = ( u8 ) dev - > id ;
hbainfo . system_io_bus_number = dev - > pdev - > bus - > number ;
hbainfo . device_number = ( dev - > pdev - > devfn > > 3 ) ;
hbainfo . function_number = ( dev - > pdev - > devfn & 0x0007 ) ;
hbainfo . vendor_id = dev - > pdev - > vendor ;
hbainfo . device_id = dev - > pdev - > device ;
hbainfo . sub_vendor_id = dev - > pdev - > subsystem_vendor ;
hbainfo . sub_system_id = dev - > pdev - > subsystem_device ;
if ( copy_to_user ( arg , & hbainfo , sizeof ( struct aac_hba_info ) ) ) {
dprintk ( ( KERN_DEBUG " aacraid: Could not copy hba info \n " ) ) ;
return - EFAULT ;
}
return 0 ;
}
2017-02-03 02:53:34 +03:00
struct aac_reset_iop {
u8 reset_type ;
} ;
static int aac_send_reset_adapter ( struct aac_dev * dev , void __user * arg )
{
struct aac_reset_iop reset ;
int retval ;
2008-01-16 18:39:06 +03:00
2017-02-03 02:53:34 +03:00
if ( copy_from_user ( ( void * ) & reset , arg , sizeof ( struct aac_reset_iop ) ) )
return - EFAULT ;
2017-12-27 07:34:26 +03:00
dev - > adapter_shutdown = 1 ;
mutex_unlock ( & dev - > ioctl_mutex ) ;
2017-02-03 02:53:34 +03:00
retval = aac_reset_adapter ( dev , 0 , reset . reset_type ) ;
2017-12-27 07:34:26 +03:00
mutex_lock ( & dev - > ioctl_mutex ) ;
2017-02-03 02:53:34 +03:00
2017-12-27 07:34:26 +03:00
return retval ;
2017-02-03 02:53:34 +03:00
}
2005-04-17 02:20:36 +04:00
2019-02-07 19:07:20 +03:00
int aac_do_ioctl ( struct aac_dev * dev , unsigned int cmd , void __user * arg )
2005-04-17 02:20:36 +04:00
{
int status ;
2008-01-16 18:39:06 +03:00
2016-02-04 02:06:05 +03:00
mutex_lock ( & dev - > ioctl_mutex ) ;
2016-02-04 02:06:06 +03:00
if ( dev - > adapter_shutdown ) {
status = - EACCES ;
goto cleanup ;
}
2005-04-17 02:20:36 +04:00
/*
* HBA gets first crack
*/
2008-01-16 18:39:06 +03:00
2005-04-17 02:20:36 +04:00
status = aac_dev_ioctl ( dev , cmd , arg ) ;
2009-12-21 16:09:27 +03:00
if ( status ! = - ENOTTY )
2016-02-04 02:06:05 +03:00
goto cleanup ;
2005-04-17 02:20:36 +04:00
switch ( cmd ) {
case FSACTL_MINIPORT_REV_CHECK :
status = check_revision ( dev , arg ) ;
break ;
2005-05-17 05:28:42 +04:00
case FSACTL_SEND_LARGE_FIB :
2005-04-17 02:20:36 +04:00
case FSACTL_SENDFIB :
status = ioctl_send_fib ( dev , arg ) ;
break ;
case FSACTL_OPEN_GET_ADAPTER_FIB :
status = open_getadapter_fib ( dev , arg ) ;
break ;
case FSACTL_GET_NEXT_ADAPTER_FIB :
status = next_getadapter_fib ( dev , arg ) ;
break ;
case FSACTL_CLOSE_GET_ADAPTER_FIB :
status = close_getadapter_fib ( dev , arg ) ;
break ;
case FSACTL_SEND_RAW_SRB :
status = aac_send_raw_srb ( dev , arg ) ;
break ;
case FSACTL_GET_PCI_INFO :
status = aac_get_pci_info ( dev , arg ) ;
break ;
2017-02-03 02:53:35 +03:00
case FSACTL_GET_HBA_INFO :
status = aac_get_hba_info ( dev , arg ) ;
break ;
2017-02-03 02:53:34 +03:00
case FSACTL_RESET_IOP :
status = aac_send_reset_adapter ( dev , arg ) ;
break ;
2005-04-17 02:20:36 +04:00
default :
status = - ENOTTY ;
2008-01-16 18:39:06 +03:00
break ;
2005-04-17 02:20:36 +04:00
}
2016-02-04 02:06:05 +03:00
cleanup :
mutex_unlock ( & dev - > ioctl_mutex ) ;
2005-04-17 02:20:36 +04:00
return status ;
}